code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.eval
import scala.util.Success
import cats.implicits._
object TaskExecuteWithLocalContextSuite extends BaseTestSuite {
test("cats' parSequence with LCP is stack safe") { implicit sc =>
val f = List
.fill(2000)(Task.unit)
.parSequence_
.executeWithOptions(_.enableLocalContextPropagation)
.runToFuture
sc.tick()
assertEquals(f.value, Some(Success(())))
}
}
|
alexandru/monifu
|
monix-eval/jvm/src/test/scala/monix/eval/TaskExecuteWithLocalContextSuite.scala
|
Scala
|
apache-2.0
| 1,089 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution
import java.lang.Thread.UncaughtExceptionHandler
import scala.annotation.implicitNotFound
import scala.concurrent.ExecutionContext
/** An exception reporter is a function that logs an uncaught error.
*
* Usually taken as an implicit when executing computations that could fail,
* but that must not blow up the call-stack, like asynchronous tasks.
*
* A default implicit is provided that simply logs the error on STDERR.
*/
@implicitNotFound(
"No ExceptionReporter was found in context for " +
"reporting uncaught errors, either build one yourself or use " +
"an implicit Scheduler (schedulers are ExceptionReporters)")
trait UncaughtExceptionReporter extends Serializable {
def reportFailure(ex: Throwable): Unit
}
/** See [[UncaughtExceptionReporter]]. */
object UncaughtExceptionReporter {
/** Builds a reporter out of the provided callback. */
def apply(reporter: Throwable => Unit): UncaughtExceptionReporter =
new UncaughtExceptionReporter {
def reportFailure(ex: Throwable) = reporter(ex)
}
/**
* Default instance that logs errors in a platform specific way.
*
* For the JVM logging is accomplished using the current
* [[https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.UncaughtExceptionHandler.html Thread.UncaughtExceptionHandler]].
* If an `UncaughtExceptionHandler` is not currently set,
* then error is printed on stderr.
*
* For JS logging is done via `console.error`.
*/
val default: UncaughtExceptionReporter =
internal.DefaultUncaughtExceptionReporter
/**
* DEPRECATED - use [[default]] instead.
*/
@deprecated("Use UncaughtExceptionReporter.default", since = "3.0.0")
val LogExceptionsToStandardErr = {
// $COVERAGE-OFF$
UncaughtExceptionReporter(ExecutionContext.defaultReporter)
// $COVERAGE-ON$
}
implicit final class Extensions(val r: UncaughtExceptionReporter) extends AnyVal {
/**
* Converts [[UncaughtExceptionReporter]] to Java's
* [[https://docs.oracle.com/javase/8/docs/api/java/lang/Thread.UncaughtExceptionHandler.html UncaughtExceptionHandler]].
*/
def asJava: UncaughtExceptionHandler =
new UncaughtExceptionHandler {
override def uncaughtException(t: Thread, e: Throwable): Unit =
r.reportFailure(e)
}
}
}
|
alexandru/monifu
|
monix-execution/shared/src/main/scala/monix/execution/UncaughtExceptionReporter.scala
|
Scala
|
apache-2.0
| 3,027 |
package org.pgscala
package builder
package converters
object PGByteArrayConverterBuilder
extends PGConverterBuilder {
val scalaClazz = "Array[Byte]"
override val imports = ""
override val scalaUpperType = "ByteArray"
override val javaUpperType = "ByteArray"
val defaultValue = "Array.empty[Byte]"
}
|
melezov/pgscala
|
builder/src/main/scala/org/pgscala/builder/converters/scala/PGByteArrayConverterBuilder.scala
|
Scala
|
bsd-3-clause
| 320 |
package com.anakiou.modbus.procimg
class DefaultProcessImageFactory extends ProcessImageFactory {
def createProcessImageImplementation(): ProcessImageImplementation = new SimpleProcessImage()
def createDigitalIn(): DigitalIn = new SimpleDigitalIn()
def createDigitalIn(state: Boolean): DigitalIn = new SimpleDigitalIn(state)
def createDigitalOut(): DigitalOut = new SimpleDigitalOut()
def createDigitalOut(b: Boolean): DigitalOut = new SimpleDigitalOut(b)
def createInputRegister(): InputRegister = new SimpleInputRegister()
def createInputRegister(b1: Byte, b2: Byte): InputRegister = new SimpleInputRegister(b1, b2)
def createRegister(): Register = new SimpleRegister()
def createRegister(b1: Byte, b2: Byte): Register = new SimpleRegister(b1, b2)
}
|
anakiou/scamod
|
src/com/anakiou/modbus/procimg/DefaultProcessImageFactory.scala
|
Scala
|
apache-2.0
| 802 |
package com.cloudray.scalapress.plugin.listings.controller.renderer
import com.cloudray.scalapress.util.{WizardStep, WizardRenderer}
import com.cloudray.scalapress.plugin.listings.domain.ListingPackage
import scala.collection.mutable.ListBuffer
/** @author Stephen Samuel */
object ListingWizardRenderer {
object FoldersStep extends WizardStep("/listing/folder", "Sections")
object DetailsStep extends WizardStep("/listing/field", "Details")
object ImagesStep extends WizardStep("/listing/image", "Images")
object ConfirmationStep extends WizardStep("/listing/confirmation", "Confirm")
object VoucherStep extends WizardStep("/listing/voucher", "Voucher")
object PaymentStep extends WizardStep("/listing/payment", "Payment")
def steps(lp: ListingPackage, vouchers: Boolean) = {
val buffer = new ListBuffer[WizardStep]
if (lp.maxFolders > 0 && lp.folders.split(",").size > 1)
buffer.append(FoldersStep)
buffer.append(DetailsStep)
if (lp.maxImages > 0)
buffer.append(ImagesStep)
buffer.append(ConfirmationStep)
if (vouchers)
buffer.append(VoucherStep)
if (lp.fee > 0)
buffer.append(PaymentStep)
buffer.toList
}
def render(lp: ListingPackage, active: WizardStep, vouchers: Boolean) =
WizardRenderer.render(steps(lp, vouchers), active)
}
|
vidyacraghav/scalapress
|
src/main/scala/com/cloudray/scalapress/plugin/listings/controller/renderer/ListingWizardRenderer.scala
|
Scala
|
apache-2.0
| 1,319 |
package model
import me.mtrupkin.core.Point
/**
* Created by mtrupkin on 4/25/2015.
*/
case class Vector(x: Double, y: Double) {
def +(p: Vector): Vector = Vector(x + p.x, y + p.y)
def +=(p: Vector): Vector = Vector(x + p.x, y + p.y)
def -(p: Vector): Vector = Vector(x - p.x, y - p.y)
def *(u: Double): Vector = Vector(x*u, y*u)
def /(u: Double): Vector = Vector(x/u, y/u)
def magnitude: Double = Math.sqrt(x*x + y*y)
def unit: Vector = /(magnitude)
def normal(n: Double) = unit*n
}
object Vector {
implicit def VectorToPoint(v: Vector): Point = Point((v.x).toInt, (v.y).toInt)
implicit def PointToVector(p: Point): Vector = Vector(p.x+0.5, p.y+0.5)
}
|
mtrupkin/tomorrow
|
src/main/scala/model/Vector.scala
|
Scala
|
mit
| 706 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.zeppelin.spark
import java.io.File
import java.nio.file.{Files, Paths}
import org.apache.spark.SparkConf
import org.apache.spark.repl.SparkILoop
import org.apache.spark.repl.SparkILoop._
import org.apache.zeppelin.interpreter.thrift.InterpreterCompletion
import org.apache.zeppelin.interpreter.util.InterpreterOutputStream
import org.apache.zeppelin.interpreter.{InterpreterContext, InterpreterResult}
import org.slf4j.{Logger, LoggerFactory}
import scala.tools.nsc.Settings
import scala.tools.nsc.interpreter._
/**
* SparkInterpreter for scala-2.10
*/
class SparkScala210Interpreter(override val conf: SparkConf,
override val depFiles: java.util.List[String])
extends BaseSparkScalaInterpreter(conf, depFiles) {
lazy override val LOGGER: Logger = LoggerFactory.getLogger(getClass)
private var sparkILoop: SparkILoop = _
override val interpreterOutput =
new InterpreterOutputStream(LoggerFactory.getLogger(classOf[SparkScala210Interpreter]))
override def open(): Unit = {
super.open()
// redirect the output of open to InterpreterOutputStream, so that user can have more
// diagnose info in frontend
if (InterpreterContext.get() != null) {
interpreterOutput.setInterpreterOutput(InterpreterContext.get().out)
}
val rootDir = conf.get("spark.repl.classdir", System.getProperty("java.io.tmpdir"))
val outputDir = Files.createTempDirectory(Paths.get(rootDir), "spark").toFile
outputDir.deleteOnExit()
conf.set("spark.repl.class.outputDir", outputDir.getAbsolutePath)
// Only Spark1 requires to create http server, Spark2 removes HttpServer class.
startHttpServer(outputDir).foreach { case (server, uri) =>
sparkHttpServer = server
conf.set("spark.repl.class.uri", uri)
}
val settings = new Settings()
settings.embeddedDefaults(Thread.currentThread().getContextClassLoader())
settings.usejavacp.value = true
settings.classpath.value = getUserJars.mkString(File.pathSeparator)
Console.setOut(interpreterOutput)
sparkILoop = new SparkILoop(null, new JPrintWriter(Console.out, true))
setDeclaredField(sparkILoop, "settings", settings)
callMethod(sparkILoop, "createInterpreter")
sparkILoop.initializeSynchronous()
callMethod(sparkILoop, "postInitialization")
val reader = callMethod(sparkILoop,
"org$apache$spark$repl$SparkILoop$$chooseReader",
Array(settings.getClass), Array(settings)).asInstanceOf[InteractiveReader]
setDeclaredField(sparkILoop, "org$apache$spark$repl$SparkILoop$$in", reader)
scalaCompleter = reader.completion.completer()
createSparkContext()
}
override def close(): Unit = {
super.close()
if (sparkILoop != null) {
callMethod(sparkILoop, "org$apache$spark$repl$SparkILoop$$closeInterpreter")
}
}
protected override def interpret(code: String, context: InterpreterContext): InterpreterResult = {
if (context != null) {
interpreterOutput.setInterpreterOutput(context.out)
context.out.clear()
} else {
interpreterOutput.setInterpreterOutput(null)
}
Console.withOut(if (context != null) context.out else Console.out) {
interpreterOutput.ignoreLeadingNewLinesFromScalaReporter()
// add print("") at the end in case the last line is comment which lead to INCOMPLETE
val lines = code.split("\\\\n") ++ List("print(\\"\\")")
var incompleteCode = ""
var lastStatus: InterpreterResult.Code = null
for (line <- lines if !line.trim.isEmpty) {
val nextLine = if (incompleteCode != "") {
incompleteCode + "\\n" + line
} else {
line
}
scalaInterpret(nextLine) match {
case scala.tools.nsc.interpreter.IR.Success =>
// continue the next line
incompleteCode = ""
lastStatus = InterpreterResult.Code.SUCCESS
case [email protected] =>
return new InterpreterResult(InterpreterResult.Code.ERROR)
case scala.tools.nsc.interpreter.IR.Incomplete =>
// put this line into inCompleteCode for the next execution.
incompleteCode = incompleteCode + "\\n" + line
lastStatus = InterpreterResult.Code.INCOMPLETE
}
}
// flush all output before returning result to frontend
Console.flush()
interpreterOutput.setInterpreterOutput(null)
return new InterpreterResult(lastStatus)
}
}
def scalaInterpret(code: String): scala.tools.nsc.interpreter.IR.Result =
sparkILoop.interpret(code)
protected def bind(name: String, tpe: String, value: Object, modifier: List[String]): Unit = {
sparkILoop.beQuietDuring {
sparkILoop.bind(name, tpe, value, modifier)
}
}
}
|
tinkoff-dwh/zeppelin
|
spark/scala-2.10/src/main/scala/org/apache/zeppelin/spark/SparkScala210Interpreter.scala
|
Scala
|
apache-2.0
| 5,605 |
package com.alpha.crm.ui.menu
/**
* Created by pnagarjuna on 20/02/16.
*/
trait MenuComposer {
}
|
pamu/alpha-crm
|
src/main/scala/com/alpha/crm/ui/menu/MenuComposer.scala
|
Scala
|
apache-2.0
| 103 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.action.cookie
import io.gatling.core.session._
import io.gatling.http.cache.BaseUrlSupport
import io.gatling.http.protocol.HttpProtocol
object CookieActionBuilder {
private val NoBaseUrlFailure = "Neither cookie domain nor baseUrl nor wsBaseUrl".expressionFailure
val DefaultPath: String = "/"
def defaultDomain(httpProtocol: HttpProtocol): Expression[String] =
BaseUrlSupport.defaultDomain(httpProtocol, NoBaseUrlFailure)
}
|
gatling/gatling
|
gatling-http/src/main/scala/io/gatling/http/action/cookie/CookieActionBuilder.scala
|
Scala
|
apache-2.0
| 1,081 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frsse2008.boxes.micro
import uk.gov.hmrc.ct.accounts.frsse2008.calculations.ProfitOrLossCalculator
import uk.gov.hmrc.ct.accounts.frsse2008.retriever.Frsse2008AccountsBoxRetriever
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.box.retriever.FilingAttributesBoxValueRetriever
case class AC435(value: Option[Int]) extends CtBoxIdentifier(name = "Current Profit or loss") with CtOptionalInteger
object AC435 extends ProfitOrLossCalculator {
def calculate(boxRetriever: Frsse2008AccountsBoxRetriever): AC435 = {
calculateCurrentProfitOrLoss(ac12 = boxRetriever.ac12(),
ac405 = boxRetriever.ac405(),
ac410 = boxRetriever.ac410(),
ac415 = boxRetriever.ac415(),
ac420 = boxRetriever.ac420(),
ac425 = boxRetriever.ac425(),
ac34 = boxRetriever.ac34(),
microEntityFiling = boxRetriever.filingAttributesBoxValueRetriever.microEntityFiling())
}
}
|
liquidarmour/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/accounts/frsse2008/boxes/micro/AC435.scala
|
Scala
|
apache-2.0
| 1,717 |
package ch.wsl.fireindices.metadata
import ch.wsl.fireindices.log.DataLog
import java.io.File
import java.text.NumberFormat
import ch.wsl.fireindices.functions.Utils
import scala.collection.mutable.HashMap
import scala.collection.mutable.MapLike
import scala.collection.generic._
import scala.collection.mutable.MutableList
/**
* Collection of Parameter objects.
*
**/
class Parameters(that: TraversableOnce[Parameter]) extends KeySet[Variable,Parameter](that) {
//with Map[Variable,Parameter]
//with MapLike[Variable,Parameter, Parameters]{
import Parameters._
/**
* simple constructor
*
*/
def this() = this(Nil)
/**
* builds a key to insert a new Parameter in the KeySet
*
* @param par Parameter[Any] > the new parameter
* @return Variable[Any] >
*/
def buildKey(par: Parameter)= par.variable
/**
* add a value to Parameters object if it's not null
*
* @param name Variable > the variable for key building
* @param value A > the new value
* @return Parameters
*/
def addIfNotNull(name: Variable, value: Double)={
if (!value.equals(Double.NaN)) this += new Parameter(name, value, DataLog(name.abbr))
}
/**
* add a value in String format to Parameters object if it's not null
*
* @param name Variable > the variable for key building & for determinate the type
* @param value the new value
* @return Parameters
*/
def addIfNotNull_Txt(name: Variable, txt: String)={
if (txt != null && !txt.equals("") && txt!=Null.getNullVal)
this += new Parameter(name,name.toValue(txt), DataLog(name.abbr))
}
// /**
// * add a value in String format to Parameters object if it's not null
// *
// * @param name Variable > the variable for key building & for determinate the type
// * @param value the new value
// * @return Parameters
// */
// def addIfNotNull_TxtA(name: Variable, txt: String)={
// if (txt != null && !txt.equals("") && txt!=Null.getNullVal)
// this += new Parameter(name,name.toValue(txt))
//
//// val d:Double = NumberFormat.getInstance.parse(txt).doubleValue
////// val doubleTxt:String = d.toString
//// if (txt != null && !txt.equals("") && txt!=Null.getNullVal) {
//// val value = name.dataManifest.toString match{
//// case "Double"=>d
//// case "Long" =>d.toLong
//// case "Int" =>d.toInt
//// case _ => d
//// }
//// this += new Parameter[A](name,value.asInstanceOf[A] )
//// }
//
// }
/**
* Returns a Parameters with the order specified in Variable.parameters
*
* @return Parameters
*/
def ordered: Parameters ={
val ordered = Variable.parameters.intersect(this.keySet.toSeq)
new Parameters(ordered.map(this(_)))
}
/**
* read the parameters values from a file
*
* @param file parameter file
*
*/
def read(file:File):Boolean ={
import resource._
for(input <- managed(io.Source.fromFile(file))) {
for (line <- io.Source.fromFile(file).getLines){
val s=line.split(" ").filter(_.size>0)
if (s.length>0){
val v=Variable.getByAbbrCaseInsensitive(s(0))
if (v!=null)
try{
val value = v match {
case FireSeasonStart => Utils.solarDate2Long(s(1),"dd.MM")
case FireSeasonEnd => Utils.solarDate2Long(s(1),"dd.MM")
case M68VegCorrStep3Start=> Utils.solarDate2Long(s(1),"dd.MM")
case M68VegCorrStep3End=> Utils.solarDate2Long(s(1),"dd.MM")
case XbirchLeaves => Utils.solarDate2Long(s(1),"dd.MM")
case XrobiniaBlossom => Utils.solarDate2Long(s(1),"dd.MM")
case XsnowcoverStart=> Utils.solarDate2Long(s(1),"dd.MM")
case XsnowcoverEnd => Utils.solarDate2Long(s(1),"dd.MM")
case _ => s(1)
}
v match{
case Climate => this.addIfNotNull(v.asInstanceOf[Variable] , value.toString.toInt) //I
case FireSeasonStart =>this.addIfNotNull(v.asInstanceOf[Variable] , value.toString.toLong)
case FireSeasonEnd =>this.addIfNotNull(v.asInstanceOf[Variable] , value.toString.toLong)
case M68VegCorrStep3Start=>this.addIfNotNull(v.asInstanceOf[Variable] ,value.toString.toLong)
case M68VegCorrStep3End=>this.addIfNotNull(v.asInstanceOf[Variable] ,value.toString.toLong)
case XbirchLeaves =>this.addIfNotNull(v.asInstanceOf[Variable] ,value.toString.toLong)
case XrobiniaBlossom =>this.addIfNotNull(v.asInstanceOf[Variable] ,value.toString.toLong)
case XsnowcoverStart=>this.addIfNotNull(v.asInstanceOf[Variable] ,value.toString.toLong)
case XsnowcoverEnd =>this.addIfNotNull(v.asInstanceOf[Variable] ,value.toString.toLong)
case _ => this.addIfNotNull_Txt(v,value.toString)
}
} catch {
case e:Throwable => //do nothing
}
}
}
}
file.exists
}
/**
* add the default values for the missing parameters
*
* @return String with the list of paramters added
*/
def completeWithDefaults:List[String] ={
val missing = defaults.filterKeys(!this.keySet.contains(_))
var toPrint=new MutableList[String]
missing.foreach((kv)=>{this.addIfNotNull(kv._1.asInstanceOf[Variable],kv._2)
toPrint += kv._1.abbr})
toPrint.toList
}
/**
* clones all Parameter s
*
* @return Parameters
*/
def cloneAll:Parameters = new Parameters(this.valuesIterator.map(_.clone))
/**
* return a String with all parameters contained in Parameters object
*
* @param prefix a prefix for all parameters printed
* @return String
*/
def print(prefix:String=""):String ={
this.values.map((x) => prefix + x.variable.abbr +" = "+
(if(x.variable.c.toString.equals("long")) Utils.solarDate2String(x.value.asInstanceOf[Long],"dd-MMM") else x.value)
).mkString("\\n")+ "\\n"
}
}
object Parameters {
val defaults = new HashMap[Variable, Double]
defaults += (Altitude -> 273.0,
Latitude -> 46.0,
RainyDayThreshold -> 1.27,
Rs_a -> 0.25,
Rs_b -> 0.50,
Krs -> 0.16,
Albedo -> 0.25,
RainyWeekThreshold -> 30.0,
FFMCstart -> 85.0,
DMCstart -> 6.0,
DCstart -> 15.0,
Climate -> 3,
FireSeasonStart -> Utils.solarDate2Long("15.02","dd.MM"),
FireSeasonEnd -> Utils.solarDate2Long("30.09","dd.MM"),
M68VegCorrStep3Start -> Utils.solarDate2Long("15.08","dd.MM"),
M68VegCorrStep3End -> Utils.solarDate2Long("01.09","dd.MM"),
XbirchLeaves -> Utils.solarDate2Long("01.04","dd.MM"),
XrobiniaBlossom -> Utils.solarDate2Long("01.06","dd.MM"),
XsnowcoverStart -> Utils.solarDate2Long("01.01","dd.MM"),
XsnowcoverEnd -> Utils.solarDate2Long("01.01","dd.MM"),
Aspect -> 180.0,
Slope -> 0.0,
Risico_v0 -> 140,
Risico_d0 -> 1.0,
Risico_d1 -> 3.0,
Risico_T0 -> 6.0,
Risico_sat -> 40,
Risico_hhv -> 21000.0,
Risico_humidity -> 45.0
)
}
|
Insubric/fire-calculator
|
fireindiceslib/src/main/scala/ch/wsl/fireindices/metadata/Parameters.scala
|
Scala
|
gpl-2.0
| 7,804 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.io.File
import kafka.utils._
import junit.framework.Assert._
import java.util.Properties
import kafka.consumer.SimpleConsumer
import org.junit.{After, Before, Test}
import kafka.message.{NoCompressionCodec, ByteBufferMessageSet, Message}
import kafka.zk.ZooKeeperTestHarness
import org.scalatest.junit.JUnit3Suite
import kafka.api.{OffsetCommitRequest, OffsetFetchRequest}
import kafka.utils.TestUtils._
import kafka.common.{ErrorMapping, TopicAndPartition, OffsetMetadataAndError}
import scala.util.Random
import kafka.admin.AdminUtils
class OffsetCommitTest extends JUnit3Suite with ZooKeeperTestHarness {
val random: Random = new Random()
var logDir: File = null
var topicLogDir: File = null
var server: KafkaServer = null
var logSize: Int = 100
val brokerPort: Int = 9099
var simpleConsumer: SimpleConsumer = null
var time: Time = new MockTime()
@Before
override def setUp() {
super.setUp()
val config: Properties = createBrokerConfig(1, brokerPort)
val logDirPath = config.getProperty("log.dir")
logDir = new File(logDirPath)
time = new MockTime()
server = TestUtils.createServer(new KafkaConfig(config), time)
simpleConsumer = new SimpleConsumer("localhost", brokerPort, 1000000, 64*1024, "test-client")
}
@After
override def tearDown() {
simpleConsumer.close
server.shutdown
Utils.rm(logDir)
super.tearDown()
}
@Test
def testUpdateOffsets() {
val topic = "topic"
// Commit an offset
val topicAndPartition = TopicAndPartition(topic, 0)
val expectedReplicaAssignment = Map(0 -> List(1))
// create the topic
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkClient, topic, expectedReplicaAssignment)
val leaderIdOpt = TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topic, 0, 1000)
assertTrue("Leader should be elected after topic creation", leaderIdOpt.isDefined)
val commitRequest = OffsetCommitRequest("test-group", Map(topicAndPartition -> OffsetMetadataAndError(offset=42L)))
val commitResponse = simpleConsumer.commitOffsets(commitRequest)
assertEquals(ErrorMapping.NoError, commitResponse.requestInfo.get(topicAndPartition).get)
// Fetch it and verify
val fetchRequest = OffsetFetchRequest("test-group", Seq(topicAndPartition))
val fetchResponse = simpleConsumer.fetchOffsets(fetchRequest)
assertEquals(ErrorMapping.NoError, fetchResponse.requestInfo.get(topicAndPartition).get.error)
//assertEquals(OffsetMetadataAndError.NoMetadata, fetchResponse.requestInfo.get(topicAndPartition).get.metadata)
assertEquals(42L, fetchResponse.requestInfo.get(topicAndPartition).get.offset)
// Commit a new offset
val commitRequest1 = OffsetCommitRequest("test-group", Map(topicAndPartition -> OffsetMetadataAndError(
offset=100L,
metadata="some metadata"
)))
val commitResponse1 = simpleConsumer.commitOffsets(commitRequest1)
assertEquals(ErrorMapping.NoError, commitResponse1.requestInfo.get(topicAndPartition).get)
// Fetch it and verify
val fetchRequest1 = OffsetFetchRequest("test-group", Seq(topicAndPartition))
val fetchResponse1 = simpleConsumer.fetchOffsets(fetchRequest1)
assertEquals(ErrorMapping.NoError, fetchResponse1.requestInfo.get(topicAndPartition).get.error)
//assertEquals("some metadata", fetchResponse1.requestInfo.get(topicAndPartition).get.metadata)
assertEquals(100L, fetchResponse1.requestInfo.get(topicAndPartition).get.offset)
}
@Test
def testCommitAndFetchOffsets() {
val topic1 = "topic-1"
val topic2 = "topic-2"
val topic3 = "topic-3"
val topic4 = "topic-4"
val expectedReplicaAssignment = Map(0 -> List(1))
// create the topic
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkClient, topic1, expectedReplicaAssignment)
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkClient, topic2, expectedReplicaAssignment)
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkClient, topic3, expectedReplicaAssignment)
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkClient, topic4, expectedReplicaAssignment)
var leaderIdOpt = TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topic1, 0, 1000)
assertTrue("Leader should be elected after topic creation", leaderIdOpt.isDefined)
leaderIdOpt = TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topic2, 0, 1000)
assertTrue("Leader should be elected after topic creation", leaderIdOpt.isDefined)
leaderIdOpt = TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topic3, 0, 1000)
assertTrue("Leader should be elected after topic creation", leaderIdOpt.isDefined)
leaderIdOpt = TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topic4, 0, 1000)
assertTrue("Leader should be elected after topic creation", leaderIdOpt.isDefined)
val commitRequest = OffsetCommitRequest("test-group", Map(
TopicAndPartition(topic1, 0) -> OffsetMetadataAndError(offset=42L, metadata="metadata one"),
TopicAndPartition(topic2, 0) -> OffsetMetadataAndError(offset=43L, metadata="metadata two"),
TopicAndPartition(topic3, 0) -> OffsetMetadataAndError(offset=44L, metadata="metadata three"),
TopicAndPartition(topic2, 1) -> OffsetMetadataAndError(offset=45L)
))
val commitResponse = simpleConsumer.commitOffsets(commitRequest)
assertEquals(ErrorMapping.NoError, commitResponse.requestInfo.get(TopicAndPartition(topic1, 0)).get)
assertEquals(ErrorMapping.NoError, commitResponse.requestInfo.get(TopicAndPartition(topic2, 0)).get)
assertEquals(ErrorMapping.NoError, commitResponse.requestInfo.get(TopicAndPartition(topic3, 0)).get)
assertEquals(ErrorMapping.NoError, commitResponse.requestInfo.get(TopicAndPartition(topic2, 1)).get)
val fetchRequest = OffsetFetchRequest("test-group", Seq(
TopicAndPartition(topic1, 0),
TopicAndPartition(topic2, 0),
TopicAndPartition(topic3, 0),
TopicAndPartition(topic2, 1),
TopicAndPartition(topic3, 1), // An unknown partition
TopicAndPartition(topic4, 0) // An unknown topic
))
val fetchResponse = simpleConsumer.fetchOffsets(fetchRequest)
assertEquals(ErrorMapping.NoError, fetchResponse.requestInfo.get(TopicAndPartition(topic1, 0)).get.error)
assertEquals(ErrorMapping.NoError, fetchResponse.requestInfo.get(TopicAndPartition(topic2, 0)).get.error)
assertEquals(ErrorMapping.NoError, fetchResponse.requestInfo.get(TopicAndPartition(topic3, 0)).get.error)
assertEquals(ErrorMapping.NoError, fetchResponse.requestInfo.get(TopicAndPartition(topic2, 1)).get.error)
assertEquals(ErrorMapping.UnknownTopicOrPartitionCode, fetchResponse.requestInfo.get(TopicAndPartition(topic3, 1)).get.error)
assertEquals(ErrorMapping.UnknownTopicOrPartitionCode, fetchResponse.requestInfo.get(TopicAndPartition(topic4, 0)).get.error)
//assertEquals("metadata one", fetchResponse.requestInfo.get(TopicAndPartition(topic1, 0)).get.metadata)
//assertEquals("metadata two", fetchResponse.requestInfo.get(TopicAndPartition(topic2, 0)).get.metadata)
//assertEquals("metadata three", fetchResponse.requestInfo.get(TopicAndPartition(topic3, 0)).get.metadata)
//assertEquals(OffsetMetadataAndError.NoMetadata, fetchResponse.requestInfo.get(TopicAndPartition(topic2, 1)).get.metadata)
//assertEquals(OffsetMetadataAndError.NoMetadata, fetchResponse.requestInfo.get(TopicAndPartition(topic3, 1)).get.metadata)
//assertEquals(OffsetMetadataAndError.NoMetadata, fetchResponse.requestInfo.get(TopicAndPartition(topic4, 0)).get.metadata)
assertEquals(42L, fetchResponse.requestInfo.get(TopicAndPartition(topic1, 0)).get.offset)
assertEquals(43L, fetchResponse.requestInfo.get(TopicAndPartition(topic2, 0)).get.offset)
assertEquals(44L, fetchResponse.requestInfo.get(TopicAndPartition(topic3, 0)).get.offset)
assertEquals(45L, fetchResponse.requestInfo.get(TopicAndPartition(topic2, 1)).get.offset)
assertEquals(OffsetMetadataAndError.InvalidOffset, fetchResponse.requestInfo.get(TopicAndPartition(topic3, 1)).get.offset)
assertEquals(OffsetMetadataAndError.InvalidOffset, fetchResponse.requestInfo.get(TopicAndPartition(topic4, 0)).get.offset)
}
@Test
def testLargeMetadataPayload() {
val topicAndPartition = TopicAndPartition("large-metadata", 0)
val expectedReplicaAssignment = Map(0 -> List(1))
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkClient, topicAndPartition.topic, expectedReplicaAssignment)
var leaderIdOpt = TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topicAndPartition.topic, 0, 1000)
assertTrue("Leader should be elected after topic creation", leaderIdOpt.isDefined)
val commitRequest = OffsetCommitRequest("test-group", Map(topicAndPartition -> OffsetMetadataAndError(
offset=42L,
metadata=random.nextString(server.config.offsetMetadataMaxSize)
)))
val commitResponse = simpleConsumer.commitOffsets(commitRequest)
assertEquals(ErrorMapping.NoError, commitResponse.requestInfo.get(topicAndPartition).get)
val commitRequest1 = OffsetCommitRequest("test-group", Map(topicAndPartition -> OffsetMetadataAndError(
offset=42L,
metadata=random.nextString(server.config.offsetMetadataMaxSize + 1)
)))
val commitResponse1 = simpleConsumer.commitOffsets(commitRequest1)
assertEquals(ErrorMapping.OffsetMetadataTooLargeCode, commitResponse1.requestInfo.get(topicAndPartition).get)
}
@Test
def testNullMetadata() {
val topicAndPartition = TopicAndPartition("null-metadata", 0)
val expectedReplicaAssignment = Map(0 -> List(1))
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkClient, topicAndPartition.topic, expectedReplicaAssignment)
var leaderIdOpt = TestUtils.waitUntilLeaderIsElectedOrChanged(zkClient, topicAndPartition.topic, 0, 1000)
assertTrue("Leader should be elected after topic creation", leaderIdOpt.isDefined)
val commitRequest = OffsetCommitRequest("test-group", Map(topicAndPartition -> OffsetMetadataAndError(
offset=42L,
metadata=null
)))
val commitResponse = simpleConsumer.commitOffsets(commitRequest)
assertEquals(ErrorMapping.NoError, commitResponse.requestInfo.get(topicAndPartition).get)
}
}
|
unix1986/universe
|
tool/kafka-0.8.1.1-src/core/src/test/scala/unit/kafka/server/OffsetCommitTest.scala
|
Scala
|
bsd-2-clause
| 11,129 |
package io.finch.response
import com.twitter.finagle.httpx.{Request, Status}
import com.twitter.finagle.httpx.path.Root
import com.twitter.util.Await
import org.scalatest.{Matchers, FlatSpec}
class RedirectSpec extends FlatSpec with Matchers {
"A Redirect" should "create a service from a string url that generates a redirect" in {
val redirect = Redirect("/some/route")
val request = Request()
val futureResponse = redirect(request)
val response = Await.result(futureResponse)
response.status shouldBe Status.SeeOther
response.headerMap shouldBe Map("Location" -> "/some/route")
}
it should "create a service from a path that generates a redirect" in {
val redirect = Redirect(Root / "some" / "route")
val request = Request()
val futureResponse = redirect(request)
val response = Await.result(futureResponse)
response.status shouldBe Status.SeeOther
response.headerMap shouldBe Map("Location" -> "/some/route")
}
}
|
peel/finch
|
core/src/test/scala/io/finch/response/RedirectSpec.scala
|
Scala
|
apache-2.0
| 978 |
package kv.kissmetrics
import scala.language.dynamics
trait BaseService {
def alias(person1Id: String, person2Id: String)
def event(personId: String, eventName: String, properties: Map[String, String] = Map())
def properties(personId: String, properties: Map[String, String])
}
trait DynamicService extends BaseService with Dynamic {
val EVENT_METHOD_PATTERN = "^e_(.*)".r
val PROPERTY_METHOD_PATTERN = "^p_(.*)".r
def applyDynamic(trigger: String)(personId: String, args: String*) {
val signature = trigger.split("_").toList
if (signature.size < 2) throw new IllegalArgumentException("Method name requires at least an event name or one property name.")
signature match {
case "e" :: rest => handleEvent(personId, rest, args)
case "p" :: rest => handleProperty(personId, rest, args)
case _ => throw new IllegalArgumentException(s"$trigger is neither an event or a property method. Please see docs for more information.")
}
}
private def handleEvent(personId: String, sig: List[String], propertyValues: Seq[String]) {
val (propertySignature, name) = parseTo(sig, "with")
val propertyNames = parsePropertyNames(propertySignature)
if (propertyNames.size != propertyValues.size) throw new IllegalArgumentException("A value must be specified for each property.")
val map = propertyNames zip propertyValues toMap;
event(personId, name, map)
}
private def handleProperty(personId: String, sig: List[String], propertyValues: Seq[String]) {
val propertyNames = parsePropertyNames(sig)
if (propertyNames.size != propertyValues.size) throw new IllegalArgumentException("A value must be specified for each property.")
val map = propertyNames zip propertyValues toMap;
properties(personId, map)
}
private def parseTo(sig: List[String], stop: String): (List[String], String) = {
val value = sig takeWhile (p => p != stop) mkString(" ")
val rest = sig dropWhile (p => p != stop) drop 1
(rest, value)
}
private def parsePropertyNames(sig: List[String]): List[String] = {
if (sig isEmpty) return sig
val (rest, value) = parseTo(sig, "and")
value :: parsePropertyNames(rest)
}
}
|
kodemaniak/scala-kissmetrics
|
src/main/scala/kv/kissmetrics/base.scala
|
Scala
|
apache-2.0
| 2,134 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.test
import java.io.File
import java.util.{Set => JavaSet}
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.language.implicitConversions
import org.apache.hadoop.hive.conf.HiveConf.ConfVars
import org.apache.hadoop.hive.ql.exec.FunctionRegistry
import org.apache.hadoop.hive.ql.processors._
import org.apache.hadoop.hive.serde2.`lazy`.LazySimpleSerDe
import org.apache.spark.sql.{SQLContext, SQLConf}
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.CacheTableCommand
import org.apache.spark.sql.hive._
import org.apache.spark.sql.hive.execution.HiveNativeCommand
import org.apache.spark.util.{ShutdownHookManager, Utils}
import org.apache.spark.{SparkConf, SparkContext}
// SPARK-3729: Test key required to check for initialization errors with config.
object TestHive
extends TestHiveContext(
new SparkContext(
System.getProperty("spark.sql.test.master", "local[32]"),
"TestSQLContext",
new SparkConf()
.set("spark.sql.test", "")
.set("spark.sql.hive.metastore.barrierPrefixes",
"org.apache.spark.sql.hive.execution.PairSerDe")
// SPARK-8910
.set("spark.ui.enabled", "false")))
trait TestHiveSingleton {
protected val sqlContext: SQLContext = TestHive
protected val hiveContext: TestHiveContext = TestHive
}
/**
* A locally running test instance of Spark's Hive execution engine.
*
* Data from [[testTables]] will be automatically loaded whenever a query is run over those tables.
* Calling [[reset]] will delete all tables and other state in the database, leaving the database
* in a "clean" state.
*
* TestHive is singleton object version of this class because instantiating multiple copies of the
* hive metastore seems to lead to weird non-deterministic failures. Therefore, the execution of
* test cases that rely on TestHive must be serialized.
*/
class TestHiveContext(sc: SparkContext) extends HiveContext(sc) {
self =>
import HiveContext._
// By clearing the port we force Spark to pick a new one. This allows us to rerun tests
// without restarting the JVM.
System.clearProperty("spark.hostPort")
CommandProcessorFactory.clean(hiveconf)
hiveconf.set("hive.plan.serialization.format", "javaXML")
lazy val warehousePath = Utils.createTempDir(namePrefix = "warehouse-")
lazy val scratchDirPath = {
val dir = Utils.createTempDir(namePrefix = "scratch-")
dir.delete()
dir
}
private lazy val temporaryConfig = newTemporaryConfiguration()
/** Sets up the system initially or after a RESET command */
protected override def configure(): Map[String, String] = {
super.configure() ++ temporaryConfig ++ Map(
ConfVars.METASTOREWAREHOUSE.varname -> warehousePath.toURI.toString,
ConfVars.METASTORE_INTEGER_JDO_PUSHDOWN.varname -> "true",
ConfVars.SCRATCHDIR.varname -> scratchDirPath.toURI.toString,
ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY.varname -> "1"
)
}
val testTempDir = Utils.createTempDir()
// For some hive test case which contain ${system:test.tmp.dir}
System.setProperty("test.tmp.dir", testTempDir.getCanonicalPath)
/** The location of the compiled hive distribution */
lazy val hiveHome = envVarToFile("HIVE_HOME")
/** The location of the hive source code. */
lazy val hiveDevHome = envVarToFile("HIVE_DEV_HOME")
// Override so we can intercept relative paths and rewrite them to point at hive.
override def runSqlHive(sql: String): Seq[String] =
super.runSqlHive(rewritePaths(substitutor.substitute(this.hiveconf, sql)))
override def executePlan(plan: LogicalPlan): this.QueryExecution =
new this.QueryExecution(plan)
protected[sql] override lazy val conf: SQLConf = new SQLConf {
// The super.getConf(SQLConf.DIALECT) is "sql" by default, we need to set it as "hiveql"
override def dialect: String = super.getConf(SQLConf.DIALECT, "hiveql")
override def caseSensitiveAnalysis: Boolean = getConf(SQLConf.CASE_SENSITIVE, false)
clear()
override def clear(): Unit = {
super.clear()
TestHiveContext.overrideConfs.map {
case (key, value) => setConfString(key, value)
}
}
}
/**
* Returns the value of specified environmental variable as a [[java.io.File]] after checking
* to ensure it exists
*/
private def envVarToFile(envVar: String): Option[File] = {
Option(System.getenv(envVar)).map(new File(_))
}
/**
* Replaces relative paths to the parent directory "../" with hiveDevHome since this is how the
* hive test cases assume the system is set up.
*/
private def rewritePaths(cmd: String): String =
if (cmd.toUpperCase contains "LOAD DATA") {
val testDataLocation =
hiveDevHome.map(_.getCanonicalPath).getOrElse(inRepoTests.getCanonicalPath)
cmd.replaceAll("\\.\\./\\.\\./", testDataLocation + "/")
} else {
cmd
}
val hiveFilesTemp = File.createTempFile("catalystHiveFiles", "")
hiveFilesTemp.delete()
hiveFilesTemp.mkdir()
ShutdownHookManager.registerShutdownDeleteDir(hiveFilesTemp)
val inRepoTests = if (System.getProperty("user.dir").endsWith("sql" + File.separator + "hive")) {
new File("src" + File.separator + "test" + File.separator + "resources" + File.separator)
} else {
new File("sql" + File.separator + "hive" + File.separator + "src" + File.separator + "test" +
File.separator + "resources")
}
def getHiveFile(path: String): File = {
val stripped = path.replaceAll("""\.\.\/""", "").replace('/', File.separatorChar)
hiveDevHome
.map(new File(_, stripped))
.filter(_.exists)
.getOrElse(new File(inRepoTests, stripped))
}
val describedTable = "DESCRIBE (\\w+)".r
/**
* Override QueryExecution with special debug workflow.
*/
class QueryExecution(logicalPlan: LogicalPlan)
extends super.QueryExecution(logicalPlan) {
def this(sql: String) = this(parseSql(sql))
override lazy val analyzed = {
val describedTables = logical match {
case HiveNativeCommand(describedTable(tbl)) => tbl :: Nil
case CacheTableCommand(tbl, _, _) => tbl :: Nil
case _ => Nil
}
// Make sure any test tables referenced are loaded.
val referencedTables =
describedTables ++
logical.collect { case UnresolvedRelation(tableIdent, _) => tableIdent.table }
val referencedTestTables = referencedTables.filter(testTables.contains)
logDebug(s"Query references test tables: ${referencedTestTables.mkString(", ")}")
referencedTestTables.foreach(loadTestTable)
// Proceed with analysis.
analyzer.execute(logical)
}
}
case class TestTable(name: String, commands: (() => Unit)*)
protected[hive] implicit class SqlCmd(sql: String) {
def cmd: () => Unit = {
() => new QueryExecution(sql).stringResult(): Unit
}
}
/**
* A list of test tables and the DDL required to initialize them. A test table is loaded on
* demand when a query are run against it.
*/
@transient
lazy val testTables = new mutable.HashMap[String, TestTable]()
def registerTestTable(testTable: TestTable): Unit = {
testTables += (testTable.name -> testTable)
}
// The test tables that are defined in the Hive QTestUtil.
// /itests/util/src/main/java/org/apache/hadoop/hive/ql/QTestUtil.java
// https://github.com/apache/hive/blob/branch-0.13/data/scripts/q_test_init.sql
@transient
val hiveQTestUtilTables = Seq(
TestTable("src",
"CREATE TABLE src (key INT, value STRING)".cmd,
s"LOAD DATA LOCAL INPATH '${getHiveFile("data/files/kv1.txt")}' INTO TABLE src".cmd),
TestTable("src1",
"CREATE TABLE src1 (key INT, value STRING)".cmd,
s"LOAD DATA LOCAL INPATH '${getHiveFile("data/files/kv3.txt")}' INTO TABLE src1".cmd),
TestTable("srcpart", () => {
runSqlHive(
"CREATE TABLE srcpart (key INT, value STRING) PARTITIONED BY (ds STRING, hr STRING)")
for (ds <- Seq("2008-04-08", "2008-04-09"); hr <- Seq("11", "12")) {
runSqlHive(
s"""LOAD DATA LOCAL INPATH '${getHiveFile("data/files/kv1.txt")}'
|OVERWRITE INTO TABLE srcpart PARTITION (ds='$ds',hr='$hr')
""".stripMargin)
}
}),
TestTable("srcpart1", () => {
runSqlHive("CREATE TABLE srcpart1 (key INT, value STRING) PARTITIONED BY (ds STRING, hr INT)")
for (ds <- Seq("2008-04-08", "2008-04-09"); hr <- 11 to 12) {
runSqlHive(
s"""LOAD DATA LOCAL INPATH '${getHiveFile("data/files/kv1.txt")}'
|OVERWRITE INTO TABLE srcpart1 PARTITION (ds='$ds',hr='$hr')
""".stripMargin)
}
}),
TestTable("src_thrift", () => {
import org.apache.hadoop.hive.serde2.thrift.ThriftDeserializer
import org.apache.hadoop.mapred.{SequenceFileInputFormat, SequenceFileOutputFormat}
import org.apache.thrift.protocol.TBinaryProtocol
runSqlHive(
s"""
|CREATE TABLE src_thrift(fake INT)
|ROW FORMAT SERDE '${classOf[ThriftDeserializer].getName}'
|WITH SERDEPROPERTIES(
| 'serialization.class'='org.apache.spark.sql.hive.test.Complex',
| 'serialization.format'='${classOf[TBinaryProtocol].getName}'
|)
|STORED AS
|INPUTFORMAT '${classOf[SequenceFileInputFormat[_, _]].getName}'
|OUTPUTFORMAT '${classOf[SequenceFileOutputFormat[_, _]].getName}'
""".stripMargin)
runSqlHive(
s"LOAD DATA LOCAL INPATH '${getHiveFile("data/files/complex.seq")}' INTO TABLE src_thrift")
}),
TestTable("serdeins",
s"""CREATE TABLE serdeins (key INT, value STRING)
|ROW FORMAT SERDE '${classOf[LazySimpleSerDe].getCanonicalName}'
|WITH SERDEPROPERTIES ('field.delim'='\\t')
""".stripMargin.cmd,
"INSERT OVERWRITE TABLE serdeins SELECT * FROM src".cmd),
TestTable("episodes",
s"""CREATE TABLE episodes (title STRING, air_date STRING, doctor INT)
|STORED AS avro
|TBLPROPERTIES (
| 'avro.schema.literal'='{
| "type": "record",
| "name": "episodes",
| "namespace": "testing.hive.avro.serde",
| "fields": [
| {
| "name": "title",
| "type": "string",
| "doc": "episode title"
| },
| {
| "name": "air_date",
| "type": "string",
| "doc": "initial date"
| },
| {
| "name": "doctor",
| "type": "int",
| "doc": "main actor playing the Doctor in episode"
| }
| ]
| }'
|)
""".stripMargin.cmd,
s"LOAD DATA LOCAL INPATH '${getHiveFile("data/files/episodes.avro")}' INTO TABLE episodes".cmd
),
// THIS TABLE IS NOT THE SAME AS THE HIVE TEST TABLE episodes_partitioned AS DYNAMIC PARITIONING
// IS NOT YET SUPPORTED
TestTable("episodes_part",
s"""CREATE TABLE episodes_part (title STRING, air_date STRING, doctor INT)
|PARTITIONED BY (doctor_pt INT)
|STORED AS avro
|TBLPROPERTIES (
| 'avro.schema.literal'='{
| "type": "record",
| "name": "episodes",
| "namespace": "testing.hive.avro.serde",
| "fields": [
| {
| "name": "title",
| "type": "string",
| "doc": "episode title"
| },
| {
| "name": "air_date",
| "type": "string",
| "doc": "initial date"
| },
| {
| "name": "doctor",
| "type": "int",
| "doc": "main actor playing the Doctor in episode"
| }
| ]
| }'
|)
""".stripMargin.cmd,
// WORKAROUND: Required to pass schema to SerDe for partitioned tables.
// TODO: Pass this automatically from the table to partitions.
s"""
|ALTER TABLE episodes_part SET SERDEPROPERTIES (
| 'avro.schema.literal'='{
| "type": "record",
| "name": "episodes",
| "namespace": "testing.hive.avro.serde",
| "fields": [
| {
| "name": "title",
| "type": "string",
| "doc": "episode title"
| },
| {
| "name": "air_date",
| "type": "string",
| "doc": "initial date"
| },
| {
| "name": "doctor",
| "type": "int",
| "doc": "main actor playing the Doctor in episode"
| }
| ]
| }'
|)
""".stripMargin.cmd,
s"""
INSERT OVERWRITE TABLE episodes_part PARTITION (doctor_pt=1)
SELECT title, air_date, doctor FROM episodes
""".cmd
),
TestTable("src_json",
s"""CREATE TABLE src_json (json STRING) STORED AS TEXTFILE
""".stripMargin.cmd,
s"LOAD DATA LOCAL INPATH '${getHiveFile("data/files/json.txt")}' INTO TABLE src_json".cmd)
)
hiveQTestUtilTables.foreach(registerTestTable)
private val loadedTables = new collection.mutable.HashSet[String]
var cacheTables: Boolean = false
def loadTestTable(name: String) {
if (!(loadedTables contains name)) {
// Marks the table as loaded first to prevent infinite mutually recursive table loading.
loadedTables += name
logDebug(s"Loading test table $name")
val createCmds =
testTables.get(name).map(_.commands).getOrElse(sys.error(s"Unknown test table $name"))
createCmds.foreach(_())
if (cacheTables) {
cacheTable(name)
}
}
}
/**
* Records the UDFs present when the server starts, so we can delete ones that are created by
* tests.
*/
protected val originalUDFs: JavaSet[String] = FunctionRegistry.getFunctionNames
/**
* Resets the test instance by deleting any tables that have been created.
* TODO: also clear out UDFs, views, etc.
*/
def reset() {
try {
// HACK: Hive is too noisy by default.
org.apache.log4j.LogManager.getCurrentLoggers.asScala.foreach { log =>
log.asInstanceOf[org.apache.log4j.Logger].setLevel(org.apache.log4j.Level.WARN)
}
cacheManager.clearCache()
loadedTables.clear()
catalog.cachedDataSourceTables.invalidateAll()
catalog.client.reset()
catalog.unregisterAllTables()
FunctionRegistry.getFunctionNames.asScala.filterNot(originalUDFs.contains(_)).
foreach { udfName => FunctionRegistry.unregisterTemporaryUDF(udfName) }
// Some tests corrupt this value on purpose, which breaks the RESET call below.
hiveconf.set("fs.default.name", new File(".").toURI.toString)
// It is important that we RESET first as broken hooks that might have been set could break
// other sql exec here.
executionHive.runSqlHive("RESET")
metadataHive.runSqlHive("RESET")
// For some reason, RESET does not reset the following variables...
// https://issues.apache.org/jira/browse/HIVE-9004
runSqlHive("set hive.table.parameters.default=")
runSqlHive("set datanucleus.cache.collections=true")
runSqlHive("set datanucleus.cache.collections.lazy=true")
// Lots of tests fail if we do not change the partition whitelist from the default.
runSqlHive("set hive.metastore.partition.name.whitelist.pattern=.*")
configure().foreach {
case (k, v) =>
metadataHive.runSqlHive(s"SET $k=$v")
}
defaultOverrides()
runSqlHive("USE default")
// Just loading src makes a lot of tests pass. This is because some tests do something like
// drop an index on src at the beginning. Since we just pass DDL to hive this bypasses our
// Analyzer and thus the test table auto-loading mechanism.
// Remove after we handle more DDL operations natively.
loadTestTable("src")
loadTestTable("srcpart")
} catch {
case e: Exception =>
logError("FATAL ERROR: Failed to reset TestDB state.", e)
}
}
}
private[hive] object TestHiveContext {
/**
* A map used to store all confs that need to be overridden in sql/hive unit tests.
*/
val overrideConfs: Map[String, String] =
Map(
// Fewer shuffle partitions to speed up testing.
SQLConf.SHUFFLE_PARTITIONS.key -> "5"
)
}
|
pronix/spark
|
sql/hive/src/main/scala/org/apache/spark/sql/hive/test/TestHive.scala
|
Scala
|
apache-2.0
| 17,578 |
/**
* Created by Tran Huu Cuong on 2015-11-17 18:51:00.
*/
import com.datastax.spark.connector._
import org.apache.spark._
object HelloCassandra {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("HelloCassandra")
val sc = new SparkContext(conf)
val hello = sc.cassandraTable("test", "hello")
val first = hello.first()
println(first)
sc.stop
}
}
|
tranhuucuong91/spark-cassandra-example
|
src/main/scala/HelloCassandra.scala
|
Scala
|
mit
| 441 |
package com.num.brain.rest.app
import com.num.brain.rest.api.NumBrainServer
import com.typesafe.config.ConfigFactory
import akka.stream.ActorMaterializer
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.event.Logging
/**
* @author ramithp
*/
object RestServiceApp extends App with NumBrainServer {
implicit val system = ActorSystem()
//override implicit val executor = system.dispatcher
implicit val materializer = ActorMaterializer()
val config = ConfigFactory.load()
Http().bindAndHandle(route, config.getString("http.interface"), config.getInt("http.port"))
}
|
Ra41P/NumBrain
|
src/main/scala/com/num/brain/rest/app/RestServiceApp.scala
|
Scala
|
mit
| 601 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import java.util.concurrent.CountDownLatch
import org.apache.commons.lang3.RandomStringUtils
import org.mockito.Mockito._
import org.scalactic.TolerantNumerics
import org.scalatest.BeforeAndAfter
import org.scalatest.concurrent.Eventually._
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatest.mockito.MockitoSugar
import org.apache.spark.SparkException
import org.apache.spark.internal.Logging
import org.apache.spark.sql.{DataFrame, Dataset}
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.streaming.util.{BlockingSource, MockSourceProvider, StreamManualClock}
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.ManualClock
class StreamingQuerySuite extends StreamTest with BeforeAndAfter with Logging with MockitoSugar {
import AwaitTerminationTester._
import testImplicits._
// To make === between double tolerate inexact values
implicit val doubleEquality = TolerantNumerics.tolerantDoubleEquality(0.01)
after {
sqlContext.streams.active.foreach(_.stop())
}
test("name unique in active queries") {
withTempDir { dir =>
def startQuery(name: Option[String]): StreamingQuery = {
val writer = MemoryStream[Int].toDS.writeStream
name.foreach(writer.queryName)
writer
.foreach(new TestForeachWriter)
.start()
}
// No name by default, multiple active queries can have no name
val q1 = startQuery(name = None)
assert(q1.name === null)
val q2 = startQuery(name = None)
assert(q2.name === null)
// Can be set by user
val q3 = startQuery(name = Some("q3"))
assert(q3.name === "q3")
// Multiple active queries cannot have same name
val e = intercept[IllegalArgumentException] {
startQuery(name = Some("q3"))
}
q1.stop()
q2.stop()
q3.stop()
}
}
test(
"id unique in active queries + persists across restarts, runId unique across start/restarts") {
val inputData = MemoryStream[Int]
withTempDir { dir =>
var cpDir: String = null
def startQuery(restart: Boolean): StreamingQuery = {
if (cpDir == null || !restart) cpDir = s"$dir/${RandomStringUtils.randomAlphabetic(10)}"
MemoryStream[Int].toDS().groupBy().count()
.writeStream
.format("memory")
.outputMode("complete")
.queryName(s"name${RandomStringUtils.randomAlphabetic(10)}")
.option("checkpointLocation", cpDir)
.start()
}
// id and runId unique for new queries
val q1 = startQuery(restart = false)
val q2 = startQuery(restart = false)
assert(q1.id !== q2.id)
assert(q1.runId !== q2.runId)
q1.stop()
q2.stop()
// id persists across restarts, runId unique across restarts
val q3 = startQuery(restart = false)
q3.stop()
val q4 = startQuery(restart = true)
q4.stop()
assert(q3.id === q3.id)
assert(q3.runId !== q4.runId)
// Only one query with same id can be active
val q5 = startQuery(restart = false)
val e = intercept[IllegalStateException] {
startQuery(restart = true)
}
}
}
testQuietly("isActive, exception, and awaitTermination") {
val inputData = MemoryStream[Int]
val mapped = inputData.toDS().map { 6 / _}
testStream(mapped)(
AssertOnQuery(_.isActive === true),
AssertOnQuery(_.exception.isEmpty),
AddData(inputData, 1, 2),
CheckAnswer(6, 3),
TestAwaitTermination(ExpectBlocked),
TestAwaitTermination(ExpectBlocked, timeoutMs = 2000),
TestAwaitTermination(ExpectNotBlocked, timeoutMs = 10, expectedReturnValue = false),
StopStream,
AssertOnQuery(_.isActive === false),
AssertOnQuery(_.exception.isEmpty),
TestAwaitTermination(ExpectNotBlocked),
TestAwaitTermination(ExpectNotBlocked, timeoutMs = 2000, expectedReturnValue = true),
TestAwaitTermination(ExpectNotBlocked, timeoutMs = 10, expectedReturnValue = true),
StartStream(),
AssertOnQuery(_.isActive === true),
AddData(inputData, 0),
ExpectFailure[SparkException](),
AssertOnQuery(_.isActive === false),
TestAwaitTermination(ExpectException[SparkException]),
TestAwaitTermination(ExpectException[SparkException], timeoutMs = 2000),
TestAwaitTermination(ExpectException[SparkException], timeoutMs = 10),
AssertOnQuery(q => {
q.exception.get.startOffset ===
q.committedOffsets.toOffsetSeq(Seq(inputData), OffsetSeqMetadata()).toString &&
q.exception.get.endOffset ===
q.availableOffsets.toOffsetSeq(Seq(inputData), OffsetSeqMetadata()).toString
}, "incorrect start offset or end offset on exception")
)
}
testQuietly("OneTime trigger, commit log, and exception") {
import Trigger.Once
val inputData = MemoryStream[Int]
val mapped = inputData.toDS().map { 6 / _}
testStream(mapped)(
AssertOnQuery(_.isActive === true),
StopStream,
AddData(inputData, 1, 2),
StartStream(trigger = Once),
CheckAnswer(6, 3),
StopStream, // clears out StreamTest state
AssertOnQuery { q =>
// both commit log and offset log contain the same (latest) batch id
q.batchCommitLog.getLatest().map(_._1).getOrElse(-1L) ==
q.offsetLog.getLatest().map(_._1).getOrElse(-2L)
},
AssertOnQuery { q =>
// blow away commit log and sink result
q.batchCommitLog.purge(1)
q.sink.asInstanceOf[MemorySink].clear()
true
},
StartStream(trigger = Once),
CheckAnswer(6, 3), // ensure we fall back to offset log and reprocess batch
StopStream,
AddData(inputData, 3),
StartStream(trigger = Once),
CheckLastBatch(2), // commit log should be back in place
StopStream,
AddData(inputData, 0),
StartStream(trigger = Once),
ExpectFailure[SparkException](),
AssertOnQuery(_.isActive === false),
AssertOnQuery(q => {
q.exception.get.startOffset ===
q.committedOffsets.toOffsetSeq(Seq(inputData), OffsetSeqMetadata()).toString &&
q.exception.get.endOffset ===
q.availableOffsets.toOffsetSeq(Seq(inputData), OffsetSeqMetadata()).toString
}, "incorrect start offset or end offset on exception")
)
}
testQuietly("status, lastProgress, and recentProgress") {
import StreamingQuerySuite._
clock = new StreamManualClock
/** Custom MemoryStream that waits for manual clock to reach a time */
val inputData = new MemoryStream[Int](0, sqlContext) {
// getOffset should take 50 ms the first time it is called
override def getOffset: Option[Offset] = {
val offset = super.getOffset
if (offset.nonEmpty) {
clock.waitTillTime(1050)
}
offset
}
// getBatch should take 100 ms the first time it is called
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
if (start.isEmpty) clock.waitTillTime(1150)
super.getBatch(start, end)
}
}
// query execution should take 350 ms the first time it is called
val mapped = inputData.toDS.coalesce(1).as[Long].map { x =>
clock.waitTillTime(1500) // this will only wait the first time when clock < 1500
10 / x
}.agg(count("*")).as[Long]
case class AssertStreamExecThreadIsWaitingForTime(targetTime: Long)
extends AssertOnQuery(q => {
eventually(Timeout(streamingTimeout)) {
if (q.exception.isEmpty) {
assert(clock.isStreamWaitingFor(targetTime))
}
}
if (q.exception.isDefined) {
throw q.exception.get
}
true
}, "") {
override def toString: String = s"AssertStreamExecThreadIsWaitingForTime($targetTime)"
}
case class AssertClockTime(time: Long)
extends AssertOnQuery(q => clock.getTimeMillis() === time, "") {
override def toString: String = s"AssertClockTime($time)"
}
var lastProgressBeforeStop: StreamingQueryProgress = null
testStream(mapped, OutputMode.Complete)(
StartStream(ProcessingTime(1000), triggerClock = clock),
AssertStreamExecThreadIsWaitingForTime(1000),
AssertOnQuery(_.status.isDataAvailable === false),
AssertOnQuery(_.status.isTriggerActive === false),
AssertOnQuery(_.status.message === "Waiting for next trigger"),
AssertOnQuery(_.recentProgress.count(_.numInputRows > 0) === 0),
// Test status and progress while offset is being fetched
AddData(inputData, 1, 2),
AdvanceManualClock(1000), // time = 1000 to start new trigger, will block on getOffset
AssertStreamExecThreadIsWaitingForTime(1050),
AssertOnQuery(_.status.isDataAvailable === false),
AssertOnQuery(_.status.isTriggerActive === true),
AssertOnQuery(_.status.message.startsWith("Getting offsets from")),
AssertOnQuery(_.recentProgress.count(_.numInputRows > 0) === 0),
// Test status and progress while batch is being fetched
AdvanceManualClock(50), // time = 1050 to unblock getOffset
AssertClockTime(1050),
AssertStreamExecThreadIsWaitingForTime(1150), // will block on getBatch that needs 1150
AssertOnQuery(_.status.isDataAvailable === true),
AssertOnQuery(_.status.isTriggerActive === true),
AssertOnQuery(_.status.message === "Processing new data"),
AssertOnQuery(_.recentProgress.count(_.numInputRows > 0) === 0),
// Test status and progress while batch is being processed
AdvanceManualClock(100), // time = 1150 to unblock getBatch
AssertClockTime(1150),
AssertStreamExecThreadIsWaitingForTime(1500), // will block in Spark job that needs 1500
AssertOnQuery(_.status.isDataAvailable === true),
AssertOnQuery(_.status.isTriggerActive === true),
AssertOnQuery(_.status.message === "Processing new data"),
AssertOnQuery(_.recentProgress.count(_.numInputRows > 0) === 0),
// Test status and progress while batch processing has completed
AssertOnQuery { _ => clock.getTimeMillis() === 1150 },
AdvanceManualClock(350), // time = 1500 to unblock job
AssertClockTime(1500),
CheckAnswer(2),
AssertStreamExecThreadIsWaitingForTime(2000),
AssertOnQuery(_.status.isDataAvailable === true),
AssertOnQuery(_.status.isTriggerActive === false),
AssertOnQuery(_.status.message === "Waiting for next trigger"),
AssertOnQuery { query =>
assert(query.lastProgress != null)
assert(query.recentProgress.exists(_.numInputRows > 0))
assert(query.recentProgress.last.eq(query.lastProgress))
val progress = query.lastProgress
assert(progress.id === query.id)
assert(progress.name === query.name)
assert(progress.batchId === 0)
assert(progress.timestamp === "1970-01-01T00:00:01.000Z") // 100 ms in UTC
assert(progress.numInputRows === 2)
assert(progress.processedRowsPerSecond === 4.0)
assert(progress.durationMs.get("getOffset") === 50)
assert(progress.durationMs.get("getBatch") === 100)
assert(progress.durationMs.get("queryPlanning") === 0)
assert(progress.durationMs.get("walCommit") === 0)
assert(progress.durationMs.get("triggerExecution") === 500)
assert(progress.sources.length === 1)
assert(progress.sources(0).description contains "MemoryStream")
assert(progress.sources(0).startOffset === null)
assert(progress.sources(0).endOffset !== null)
assert(progress.sources(0).processedRowsPerSecond === 4.0) // 2 rows processed in 500 ms
assert(progress.stateOperators.length === 1)
assert(progress.stateOperators(0).numRowsUpdated === 1)
assert(progress.stateOperators(0).numRowsTotal === 1)
assert(progress.sink.description contains "MemorySink")
true
},
// Test whether input rate is updated after two batches
AssertStreamExecThreadIsWaitingForTime(2000), // blocked waiting for next trigger time
AddData(inputData, 1, 2),
AdvanceManualClock(500), // allow another trigger
AssertClockTime(2000),
AssertStreamExecThreadIsWaitingForTime(3000), // will block waiting for next trigger time
CheckAnswer(4),
AssertOnQuery(_.status.isDataAvailable === true),
AssertOnQuery(_.status.isTriggerActive === false),
AssertOnQuery(_.status.message === "Waiting for next trigger"),
AssertOnQuery { query =>
assert(query.recentProgress.last.eq(query.lastProgress))
assert(query.lastProgress.batchId === 1)
assert(query.lastProgress.inputRowsPerSecond === 2.0)
assert(query.lastProgress.sources(0).inputRowsPerSecond === 2.0)
true
},
// Test status and progress after data is not available for a trigger
AdvanceManualClock(1000), // allow another trigger
AssertStreamExecThreadIsWaitingForTime(4000),
AssertOnQuery(_.status.isDataAvailable === false),
AssertOnQuery(_.status.isTriggerActive === false),
AssertOnQuery(_.status.message === "Waiting for next trigger"),
// Test status and progress after query stopped
AssertOnQuery { query =>
lastProgressBeforeStop = query.lastProgress
true
},
StopStream,
AssertOnQuery(_.lastProgress.json === lastProgressBeforeStop.json),
AssertOnQuery(_.status.isDataAvailable === false),
AssertOnQuery(_.status.isTriggerActive === false),
AssertOnQuery(_.status.message === "Stopped"),
// Test status and progress after query terminated with error
StartStream(ProcessingTime(1000), triggerClock = clock),
AdvanceManualClock(1000), // ensure initial trigger completes before AddData
AddData(inputData, 0),
AdvanceManualClock(1000), // allow another trigger
ExpectFailure[SparkException](),
AssertOnQuery(_.status.isDataAvailable === false),
AssertOnQuery(_.status.isTriggerActive === false),
AssertOnQuery(_.status.message.startsWith("Terminated with exception"))
)
}
test("lastProgress should be null when recentProgress is empty") {
BlockingSource.latch = new CountDownLatch(1)
withTempDir { tempDir =>
val sq = spark.readStream
.format("org.apache.spark.sql.streaming.util.BlockingSource")
.load()
.writeStream
.format("org.apache.spark.sql.streaming.util.BlockingSource")
.option("checkpointLocation", tempDir.toString)
.start()
// Creating source is blocked so recentProgress is empty and lastProgress should be null
assert(sq.lastProgress === null)
// Release the latch and stop the query
BlockingSource.latch.countDown()
sq.stop()
}
}
test("codahale metrics") {
val inputData = MemoryStream[Int]
/** Whether metrics of a query is registered for reporting */
def isMetricsRegistered(query: StreamingQuery): Boolean = {
val sourceName = s"spark.streaming.${query.id}"
val sources = spark.sparkContext.env.metricsSystem.getSourcesByName(sourceName)
require(sources.size <= 1)
sources.nonEmpty
}
// Disabled by default
assert(spark.conf.get("spark.sql.streaming.metricsEnabled").toBoolean === false)
withSQLConf("spark.sql.streaming.metricsEnabled" -> "false") {
testStream(inputData.toDF)(
AssertOnQuery { q => !isMetricsRegistered(q) },
StopStream,
AssertOnQuery { q => !isMetricsRegistered(q) }
)
}
// Registered when enabled
withSQLConf("spark.sql.streaming.metricsEnabled" -> "true") {
testStream(inputData.toDF)(
AssertOnQuery { q => isMetricsRegistered(q) },
StopStream,
AssertOnQuery { q => !isMetricsRegistered(q) }
)
}
}
test("input row calculation with mixed batch and streaming sources") {
val streamingTriggerDF = spark.createDataset(1 to 10).toDF
val streamingInputDF = createSingleTriggerStreamingDF(streamingTriggerDF).toDF("value")
val staticInputDF = spark.createDataFrame(Seq(1 -> "1", 2 -> "2")).toDF("value", "anotherValue")
// Trigger input has 10 rows, static input has 2 rows,
// therefore after the first trigger, the calculated input rows should be 10
val progress = getFirstProgress(streamingInputDF.join(staticInputDF, "value"))
assert(progress.numInputRows === 10)
assert(progress.sources.size === 1)
assert(progress.sources(0).numInputRows === 10)
}
test("input row calculation with trigger input DF having multiple leaves") {
val streamingTriggerDF =
spark.createDataset(1 to 5).toDF.union(spark.createDataset(6 to 10).toDF)
require(streamingTriggerDF.logicalPlan.collectLeaves().size > 1)
val streamingInputDF = createSingleTriggerStreamingDF(streamingTriggerDF)
// After the first trigger, the calculated input rows should be 10
val progress = getFirstProgress(streamingInputDF)
assert(progress.numInputRows === 10)
assert(progress.sources.size === 1)
assert(progress.sources(0).numInputRows === 10)
}
testQuietly("StreamExecution metadata garbage collection") {
val inputData = MemoryStream[Int]
val mapped = inputData.toDS().map(6 / _)
withSQLConf(SQLConf.MIN_BATCHES_TO_RETAIN.key -> "1") {
// Run 3 batches, and then assert that only 2 metadata files is are at the end
// since the first should have been purged.
testStream(mapped)(
AddData(inputData, 1, 2),
CheckAnswer(6, 3),
AddData(inputData, 1, 2),
CheckAnswer(6, 3, 6, 3),
AddData(inputData, 4, 6),
CheckAnswer(6, 3, 6, 3, 1, 1),
AssertOnQuery("metadata log should contain only two files") { q =>
val metadataLogDir = new java.io.File(q.offsetLog.metadataPath.toUri)
val logFileNames = metadataLogDir.listFiles().toSeq.map(_.getName())
val toTest = logFileNames.filter(!_.endsWith(".crc")).sorted // Workaround for SPARK-17475
assert(toTest.size == 2 && toTest.head == "1")
true
}
)
}
val inputData2 = MemoryStream[Int]
withSQLConf(SQLConf.MIN_BATCHES_TO_RETAIN.key -> "2") {
// Run 5 batches, and then assert that 3 metadata files is are at the end
// since the two should have been purged.
testStream(inputData2.toDS())(
AddData(inputData2, 1, 2),
CheckAnswer(1, 2),
AddData(inputData2, 1, 2),
CheckAnswer(1, 2, 1, 2),
AddData(inputData2, 3, 4),
CheckAnswer(1, 2, 1, 2, 3, 4),
AddData(inputData2, 5, 6),
CheckAnswer(1, 2, 1, 2, 3, 4, 5, 6),
AddData(inputData2, 7, 8),
CheckAnswer(1, 2, 1, 2, 3, 4, 5, 6, 7, 8),
AssertOnQuery("metadata log should contain three files") { q =>
val metadataLogDir = new java.io.File(q.offsetLog.metadataPath.toUri)
val logFileNames = metadataLogDir.listFiles().toSeq.map(_.getName())
val toTest = logFileNames.filter(!_.endsWith(".crc")).sorted // Workaround for SPARK-17475
assert(toTest.size == 3 && toTest.head == "2")
true
}
)
}
}
testQuietly("StreamingQuery should be Serializable but cannot be used in executors") {
def startQuery(ds: Dataset[Int], queryName: String): StreamingQuery = {
ds.writeStream
.queryName(queryName)
.format("memory")
.start()
}
val input = MemoryStream[Int]
val q1 = startQuery(input.toDS, "stream_serializable_test_1")
val q2 = startQuery(input.toDS.map { i =>
// Emulate that `StreamingQuery` get captured with normal usage unintentionally.
// It should not fail the query.
q1
i
}, "stream_serializable_test_2")
val q3 = startQuery(input.toDS.map { i =>
// Emulate that `StreamingQuery` is used in executors. We should fail the query with a clear
// error message.
q1.explain()
i
}, "stream_serializable_test_3")
try {
input.addData(1)
// q2 should not fail since it doesn't use `q1` in the closure
q2.processAllAvailable()
// The user calls `StreamingQuery` in the closure and it should fail
val e = intercept[StreamingQueryException] {
q3.processAllAvailable()
}
assert(e.getCause.isInstanceOf[SparkException])
assert(e.getCause.getCause.isInstanceOf[IllegalStateException])
assert(e.getMessage.contains("StreamingQuery cannot be used in executors"))
} finally {
q1.stop()
q2.stop()
q3.stop()
}
}
test("StreamExecution should call stop() on sources when a stream is stopped") {
var calledStop = false
val source = new Source {
override def stop(): Unit = {
calledStop = true
}
override def getOffset: Option[Offset] = None
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
spark.emptyDataFrame
}
override def schema: StructType = MockSourceProvider.fakeSchema
}
MockSourceProvider.withMockSources(source) {
val df = spark.readStream
.format("org.apache.spark.sql.streaming.util.MockSourceProvider")
.load()
testStream(df)(StopStream)
assert(calledStop, "Did not call stop on source for stopped stream")
}
}
testQuietly("SPARK-19774: StreamExecution should call stop() on sources when a stream fails") {
var calledStop = false
val source1 = new Source {
override def stop(): Unit = {
throw new RuntimeException("Oh no!")
}
override def getOffset: Option[Offset] = Some(LongOffset(1))
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
spark.range(2).toDF(MockSourceProvider.fakeSchema.fieldNames: _*)
}
override def schema: StructType = MockSourceProvider.fakeSchema
}
val source2 = new Source {
override def stop(): Unit = {
calledStop = true
}
override def getOffset: Option[Offset] = None
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
spark.emptyDataFrame
}
override def schema: StructType = MockSourceProvider.fakeSchema
}
MockSourceProvider.withMockSources(source1, source2) {
val df1 = spark.readStream
.format("org.apache.spark.sql.streaming.util.MockSourceProvider")
.load()
.as[Int]
val df2 = spark.readStream
.format("org.apache.spark.sql.streaming.util.MockSourceProvider")
.load()
.as[Int]
testStream(df1.union(df2).map(i => i / 0))(
AssertOnQuery { sq =>
intercept[StreamingQueryException](sq.processAllAvailable())
sq.exception.isDefined && !sq.isActive
}
)
assert(calledStop, "Did not call stop on source for stopped stream")
}
}
test("get the query id in source") {
@volatile var queryId: String = null
val source = new Source {
override def stop(): Unit = {}
override def getOffset: Option[Offset] = {
queryId = spark.sparkContext.getLocalProperty(StreamExecution.QUERY_ID_KEY)
None
}
override def getBatch(start: Option[Offset], end: Offset): DataFrame = spark.emptyDataFrame
override def schema: StructType = MockSourceProvider.fakeSchema
}
MockSourceProvider.withMockSources(source) {
val df = spark.readStream
.format("org.apache.spark.sql.streaming.util.MockSourceProvider")
.load()
testStream(df)(
AssertOnQuery { sq =>
sq.processAllAvailable()
assert(sq.id.toString === queryId)
assert(sq.runId.toString !== queryId)
true
}
)
}
}
test("processAllAvailable should not block forever when a query is stopped") {
val input = MemoryStream[Int]
input.addData(1)
val query = input.toDF().writeStream
.trigger(Trigger.Once())
.format("console")
.start()
failAfter(streamingTimeout) {
query.processAllAvailable()
}
}
/** Create a streaming DF that only execute one batch in which it returns the given static DF */
private def createSingleTriggerStreamingDF(triggerDF: DataFrame): DataFrame = {
require(!triggerDF.isStreaming)
// A streaming Source that generate only on trigger and returns the given Dataframe as batch
val source = new Source() {
override def schema: StructType = triggerDF.schema
override def getOffset: Option[Offset] = Some(LongOffset(0))
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
sqlContext.internalCreateDataFrame(
triggerDF.queryExecution.toRdd, triggerDF.schema, isStreaming = true)
}
override def stop(): Unit = {}
}
StreamingExecutionRelation(source, spark)
}
/** Returns the query progress at the end of the first trigger of streaming DF */
private def getFirstProgress(streamingDF: DataFrame): StreamingQueryProgress = {
try {
val q = streamingDF.writeStream.format("memory").queryName("test").start()
q.processAllAvailable()
q.recentProgress.head
} finally {
spark.streams.active.map(_.stop())
}
}
/**
* A [[StreamAction]] to test the behavior of `StreamingQuery.awaitTermination()`.
*
* @param expectedBehavior Expected behavior (not blocked, blocked, or exception thrown)
* @param timeoutMs Timeout in milliseconds
* When timeoutMs is less than or equal to 0, awaitTermination() is
* tested (i.e. w/o timeout)
* When timeoutMs is greater than 0, awaitTermination(timeoutMs) is
* tested
* @param expectedReturnValue Expected return value when awaitTermination(timeoutMs) is used
*/
case class TestAwaitTermination(
expectedBehavior: ExpectedBehavior,
timeoutMs: Int = -1,
expectedReturnValue: Boolean = false
) extends AssertOnQuery(
TestAwaitTermination.assertOnQueryCondition(expectedBehavior, timeoutMs, expectedReturnValue),
"Error testing awaitTermination behavior"
) {
override def toString(): String = {
s"TestAwaitTermination($expectedBehavior, timeoutMs = $timeoutMs, " +
s"expectedReturnValue = $expectedReturnValue)"
}
}
object TestAwaitTermination {
/**
* Tests the behavior of `StreamingQuery.awaitTermination`.
*
* @param expectedBehavior Expected behavior (not blocked, blocked, or exception thrown)
* @param timeoutMs Timeout in milliseconds
* When timeoutMs is less than or equal to 0, awaitTermination() is
* tested (i.e. w/o timeout)
* When timeoutMs is greater than 0, awaitTermination(timeoutMs) is
* tested
* @param expectedReturnValue Expected return value when awaitTermination(timeoutMs) is used
*/
def assertOnQueryCondition(
expectedBehavior: ExpectedBehavior,
timeoutMs: Int,
expectedReturnValue: Boolean
)(q: StreamExecution): Boolean = {
def awaitTermFunc(): Unit = {
if (timeoutMs <= 0) {
q.awaitTermination()
} else {
val returnedValue = q.awaitTermination(timeoutMs)
assert(returnedValue === expectedReturnValue, "Returned value does not match expected")
}
}
AwaitTerminationTester.test(expectedBehavior, awaitTermFunc)
true // If the control reached here, then everything worked as expected
}
}
}
object StreamingQuerySuite {
// Singleton reference to clock that does not get serialized in task closures
var clock: StreamManualClock = null
}
|
minixalpha/spark
|
sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQuerySuite.scala
|
Scala
|
apache-2.0
| 28,872 |
package smarthouse.restapi.services
import java.util.Date
import smarthouse.restapi.models.DeviceEntity
import smarthouse.restapi.models.db.DeviceEntityTable
import smarthouse.restapi.utils.DatabaseService
import scala.concurrent.{ExecutionContext, Future}
class DevicesService(val databaseService: DatabaseService)
(implicit executionContext: ExecutionContext) extends DeviceEntityTable {
import databaseService._
import databaseService.driver.api._
def getDevices(): Future[Seq[DeviceEntity]] = db.run(devices.result)
def createDevice(entity: DeviceEntity): Future[DeviceEntity] = {
db.run(devices returning devices += entity.copy(created = new Date()))
}
}
|
andrewobukhov/smart-house
|
src/main/scala/smarthouse/restapi/services/DevicesService.scala
|
Scala
|
mit
| 700 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.optim
import com.intel.analytics.bigdl._
import com.intel.analytics.bigdl.dataset.segmentation.{MaskUtils, RLEMasks}
import com.intel.analytics.bigdl.nn.ClassNLLCriterion
import com.intel.analytics.bigdl.nn.AbsCriterion
import com.intel.analytics.bigdl.nn.abstractnn.Activity
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.transform.vision.image.label.roi.RoiLabel
import com.intel.analytics.bigdl.utils.Table
import org.apache.commons.lang3.SerializationUtils
import scala.collection.mutable.ArrayBuffer
import scala.reflect.ClassTag
/**
* A method defined to evaluate the model.
* This trait can be extended by user-defined method. Such
* as Top1Accuracy
*/
trait ValidationMethod[T] extends Serializable {
def apply(output: Activity, target: Activity): ValidationResult
// return the name of this method
protected def format(): String
// return the name of this method
override def toString(): String = format()
// deep clone the object
override def clone(): ValidationMethod[T] = SerializationUtils.clone(this)
}
/**
* A result that calculate the numeric value of a validation method.
* User-defined valuation results must override the + operation and result() method.
* It is executed over the samples in each batch.
*/
trait ValidationResult extends Serializable {
// return the calculation results over all the samples in the batch
def result(): (Float, Int) // (Result, TotalNum)
// scalastyle:off methodName
def +(other: ValidationResult): ValidationResult
// return the name of this trait
protected def format(): String
// return the name of this trait
override def toString(): String = format()
}
/**
* Represent an accuracy result. Accuracy means a ratio of correct number and total number.
* @param correct correct number
* @param count total count number
*/
class AccuracyResult(private var correct: Int, private var count: Int)
extends ValidationResult {
override def result(): (Float, Int) = (correct.toFloat/count, count)
// scalastyle:off methodName
override def +(other: ValidationResult): ValidationResult = {
val otherResult = other.asInstanceOf[AccuracyResult]
this.correct += otherResult.correct
this.count += otherResult.count
this
}
// scalastyle:on methodName
override protected def format(): String = {
s"Accuracy(correct: $correct, count: $count, accuracy: ${correct.toDouble / count})"
}
override def equals(obj: Any): Boolean = {
if (obj == null) {
return false
}
if (!obj.isInstanceOf[AccuracyResult]) {
return false
}
val other = obj.asInstanceOf[AccuracyResult]
if (this.eq(other)) {
return true
}
this.correct == other.correct && this.count == other.count
}
override def hashCode(): Int = {
val seed = 37
var hash = 1
hash = hash * seed + this.correct
hash = hash * seed + this.count
hash
}
}
/**
* This is a metric to measure the accuracy of Tree Neural Network/Recursive Neural Network
*
*/
class TreeNNAccuracy[T: ClassTag]()(
implicit ev: TensorNumeric[T])
extends ValidationMethod[T] {
override def apply(output: Activity, target: Activity):
ValidationResult = {
var correct = 0
var count = 0
var _output = output.asInstanceOf[Tensor[T]]
val _target = target.asInstanceOf[Tensor[T]].select(2, 1)
if (_output.dim() == 3) {
_output = _output.select(2, 1)
(if (_output.size(2) == 1) {
_output.apply1(x => if (ev.isGreater(ev.fromType(0.5), x)) ev.zero else ev.one)
} else {
_output.max(2)._2.squeeze()
}).map(_target, (a, b) => {
if (a == b) {
correct += 1
}
a
})
count += _output.size(1)
} else if (_output.dim == 2) {
_output = _output.select(1, 1)
require(_target.size(1) == 1)
(if (_output.size(1) == 1) {
_output.apply1(x => if (ev.isGreater(ev.fromType(0.5), x)) ev.zero else ev.one)
} else {
_output.max(1)._2.squeeze()
}).map(_target, (a, b) => {
if (a == b) {
correct += 1
}
a
})
count += 1
} else {
throw new IllegalArgumentException
}
new AccuracyResult(correct, count)
}
override def format(): String =
s"TreeNNAccuracy()"
}
/**
* Caculate the percentage that output's max probability index equals target
*/
class Top1Accuracy[T: ClassTag](
implicit ev: TensorNumeric[T])
extends ValidationMethod[T] {
override def apply(output: Activity, target: Activity):
ValidationResult = {
var correct = 0
var count = 0
val _target = target.asInstanceOf[Tensor[T]]
val _output = if (output.toTensor[T].nDimension() != 1 &&
output.toTensor[T].size().head != _target.size().head) {
output.toTensor[T].narrow(1, 1, _target.size().head)
} else {
output.toTensor[T]
}
if (_output.dim() == 2) {
(if (_output.size(2) == 1) {
_output.apply1(x => if (ev.isGreater(ev.fromType(0.5), x)) ev.zero else ev.one)
} else {
_output.max(2)._2.squeeze()
}).map(_target, (a, b) => {
if (a == b) {
correct += 1
}
a
})
count += _output.size(1)
} else if (_output.dim == 1) {
require(_target.size(1) == 1)
(if (_output.size(1) == 1) {
_output.apply1(x => if (ev.isGreater(ev.fromType(0.5), x)) ev.zero else ev.one)
} else {
_output.max(1)._2
}).map(_target, (a, b) => {
if (a == b) {
correct += 1
}
a
})
count += 1
} else {
throw new IllegalArgumentException
}
new AccuracyResult(correct, count)
}
override def format(): String = "Top1Accuracy"
}
/**
* Calculate the Mean Average Precision (MAP). The algorithm follows VOC Challenge after 2007
* Require class label beginning with 0
* @param k Take top-k confident predictions into account. If k=-1, calculate on all predictions
* @param classes The number of classes
*/
class MeanAveragePrecision[T: ClassTag](k: Int, classes: Int)(
implicit ev: TensorNumeric[T]) extends ValidationMethod[T] {
require(classes > 0 && classes <= classes, s"The number of classes should be "
+ s"> 0 and <= $classes, but got $classes")
require(k > 0, s"k should be > 0, but got $k")
override def apply(output: Activity, target: Activity): ValidationResult = {
var _target = target.asInstanceOf[Tensor[T]].squeezeNewTensor()
val outTensor = output.toTensor[T]
val _output = if (outTensor.nDimension() != 1 &&
outTensor.size(1) != _target.size(1)) {
outTensor.narrow(1, 1, _target.size().head)
} else {
outTensor
}
require(_output.dim()==1 && _target.nElement() == 1 ||
_output.size(1) == _target.nElement(), "The number of samples in the output should " +
"be the same as in the target")
val posCnt = new Array[Int](classes)
for (i <- 1 to _target.nElement()) {
val clazz = ev.toType[Float](_target.valueAt(i))
require(clazz == math.ceil(clazz), s"The class for $i-th test sample should be an integer, "
+ s"got $clazz")
val intClazz = clazz.toInt
require(intClazz >= 0 && intClazz < classes, s"The class for $i-th test sample should be "
+ s">= 0 and < $classes, but got $intClazz")
posCnt(intClazz) += 1
}
val confidenceArr = (0 until classes).map(_ => new ArrayBuffer[(Float, Boolean)]).toArray
if (_output.nDimension() == 2) {
(1 to _output.size(1)).foreach(i => {
val row = _output.select(1, i)
val gtClz = ev.toType[Float](_target.valueAt(i))
for(clz <- 0 until classes) {
confidenceArr(clz) += ((ev.toType[Float](row.valueAt(clz + 1)), gtClz == clz))
}
})
} else {
require(_output.dim() == 1, "The output should have 1 or 2 dimensions")
val row = _output
val gtClz = ev.toType[Float](_target.valueAt(1))
for(clz <- 0 until classes) {
confidenceArr(clz) += ((ev.toType[Float](row.valueAt(clz + 1)), gtClz == clz))
}
}
new MAPValidationResult(classes, k, confidenceArr, posCnt)
}
override def format(): String = s"MAP@$k"
}
object MAPUtil {
// find top k values & indices in a column of a matrix
def findTopK(k: Int, arr: Array[Array[Float]], column: Int): Array[(Int, Float)] = {
val q = collection.mutable.PriorityQueue[(Int, Float)]()(Ordering.by[(Int, Float), Float](_._2))
arr.indices.foreach(i => {
q.enqueue((i, arr(i)(column)))
})
val end = Math.min(k, q.size)
(1 to end).map(_ => q.dequeue()).toArray
}
/**
* convert the ground truth into parsed GroundTruthRegions
* @param gtTable
* @param classes
* @param isCOCO if using COCO's algorithm for IOU computation
* @param isSegmentation
* @return (array of GT BBoxes of images, # of GT bboxes for each class)
*/
def gtTablesToGroundTruthRegions(gtTable: Table, classes: Int, numIOU: Int, isCOCO: Boolean,
isSegmentation: Boolean): (Array[ArrayBuffer[GroundTruthRegion]], Array[Int]) = {
// the number of GT bboxes for each class
val gtCntByClass = new Array[Int](classes)
// one image may contain multiple Ground truth bboxes
val gtImages = (1 to gtTable.length()).map { i =>
val gtImage = new ArrayBuffer[GroundTruthRegion]()
val roiLabel = gtTable[Table](i)
if (roiLabel.length() > 0) {
val bbox = RoiLabel.getBBoxes(roiLabel)
val tclasses = RoiLabel.getClasses(roiLabel)
val isCrowd = RoiLabel.getIsCrowd(roiLabel)
val masks = if (isSegmentation) RoiLabel.getMasks(roiLabel) else null
val bboxCnt = bbox.size(1)
require(bboxCnt == tclasses.size(1), "CLASSES of target tables should have the" +
"same size of the bbox counts")
require(bboxCnt == isCrowd.nElement(), "ISCROWD of target tables should have the" +
"same size of the bbox counts")
require(masks == null || bboxCnt == masks.length, "MASKS of target tables should have the" +
"same size of the bbox counts")
for (j <- 1 to bboxCnt) {
val (label, _diff) = if (tclasses.dim() == 2) {
(tclasses.valueAt(1, j).toInt, tclasses.valueAt(2, j))
} else {
(tclasses.valueAt(j).toInt, 0f)
}
val diff = if (isCrowd.valueAt(j) != 0 || _diff != 0) 1f else 0f
val newGt = if (isSegmentation) {
new GroundTruthRLE(numIOU, label, diff, masks(j - 1))
} else {
new GroundTruthBBox(isCOCO, numIOU, label, diff, bbox.valueAt(j, 1),
bbox.valueAt(j, 2), bbox.valueAt(j, 3), bbox.valueAt(j, 4))
}
gtImage += newGt
require(label >= 0 && label < classes, s"Bad label id $label")
if (diff == 0) {
gtCntByClass(label) += 1
}
}
}
gtImage
}.toArray
(gtImages, gtCntByClass)
}
/**
* For a detection, match it with all GT boxes. Record the match in "predictByClass"
*/
def parseDetection(gtBbox: ArrayBuffer[GroundTruthRegion], label: Int, score: Float, x1: Float,
y1: Float, x2: Float, y2: Float, mask: RLEMasks, classes: Int, iou: Array[Float],
predictByClasses: Array[Array[ArrayBuffer[(Float, Boolean)]]]): Unit = {
require(label >= 0 && label < classes, s"Bad label id $label")
for (i <- iou.indices) {
// for each GT boxes, try to find a matched one with current prediction
val matchedGt = gtBbox.toIterator.filter(gt => label == gt.label && gt.canOccupy(i))
.flatMap(gt => { // calculate and filter out the bbox
val iouRate = gt.getIOURate(x1, y1, x2, y2, mask)
if (iouRate >= iou(i)) Iterator.single((gt, iouRate)) else Iterator.empty
})
.reduceOption((gtArea1, gtArea2) => { // find max IOU bbox
if (gtArea1._1.diff != gtArea2._1.diff) {
if (gtArea1._1.diff > gtArea2._1.diff) gtArea2 else gtArea1
} else {
if (gtArea1._2 > gtArea2._2) gtArea1 else gtArea2
}
})
.map(bbox => { // occupy the bbox
bbox._1.occupy(i)
bbox._1
})
if (matchedGt.isEmpty || matchedGt.get.diff == 0) {
predictByClasses(i)(label).append((score, matchedGt.isDefined))
}
// else: when the prediction matches a "difficult" GT, do nothing
// it is neither TP nor FP
// "difficult" is defined in PASCAL VOC dataset, meaning the image is difficult to detect
}
}
def parseSegmentationTensorResult(outTensor: Tensor[Float],
func: (Int, Int, Float, Float, Float, Float, Float) => Unit): Unit = {
require(outTensor.dim() == 2, "the output tensor should have 2 dimensions")
for (imgId <- 0 until outTensor.size(1)) {
// for each image
val batch = outTensor.select(1, imgId + 1)
val batchSize = batch.valueAt(1).toInt
var offset = 2
for (bboxIdx <- 0 until batchSize) {
// for each predicted bboxes
val label = batch.valueAt(offset).toInt
val score = batch.valueAt(offset + 1)
val x1 = batch.valueAt(offset + 2)
val y1 = batch.valueAt(offset + 3)
val x2 = batch.valueAt(offset + 4)
val y2 = batch.valueAt(offset + 5)
func(imgId, label, score, x1, y1, x2, y2)
offset += 6
}
}
}
}
class MAPType extends Serializable
object MAPPascalVoc2007 extends MAPType
object MAPPascalVoc2010 extends MAPType
object MAPCOCO extends MAPType
/**
* The MAP Validation Result. The results are not calculated until result() or format() is called
* require class label beginning with 0
*/
class MAPValidationResult(
private val nClass: Int,
// take the first k samples, or -1 for all samples
private val k: Int,
// the predicts for each classes. (Confidence, GT)
private[bigdl] var predictForClass: Array[ArrayBuffer[(Float, Boolean)]],
private[bigdl] var gtCntForClass: Array[Int],
private val theType: MAPType = MAPPascalVoc2010,
private val skipClass: Int = -1,
private val isSegmentation: Boolean = false
)
extends ValidationResult {
if (skipClass < 0) {
require(skipClass == -1, s"Invalid skipClass $skipClass")
} else {
require(skipClass >= 0 && skipClass < nClass, s"Invalid skipClass $skipClass")
}
private def sortPredictions(p: ArrayBuffer[(Float, Boolean)]): ArrayBuffer[(Float, Boolean)] = {
p.sortBy(v => v._1)(Ordering.Float.reverse) // decending order
}
private[bigdl] def calculateClassAP(clz: Int): Float = {
val posCnt = gtCntForClass
// for each class, first find top k confident samples
val sorted = sortPredictions(predictForClass(clz))
var tp = 0
val refinedK = if (k > 0) k else sorted.size
// calculate the max precision for each different recall
// for each top-j items, calculate the (precision, recall)
val PnR = sorted.take(refinedK).zipWithIndex.flatMap { case (predict, j) =>
if (predict._2) {
// if it is a hit
tp += 1
// j + 1 is the total number of samples marked positive by the model
val precision = tp.toFloat / (j + 1)
val recall = tp.toFloat / posCnt(clz)
Iterator.single(recall, precision)
} else {
Iterator.empty
}
}
// get Average precision over each different recall
theType match {
case _: MAPPascalVoc2007.type =>
(0 to 10).map(r => {
val recall = 0.1f * r
// for every (R,P), where R>=recall, get max(P)
PnR.filter(_._1 >= recall).map(_._2).reduceOption(_ max _).getOrElse(0f)
})
.reduceOption(_ + _)
.map(_ / 11)
.getOrElse(0f)
case _: MAPPascalVoc2010.type =>
(1 to posCnt(clz)).map(r => {
val recall = r.toFloat / posCnt(clz)
// for every (R,P), where R>=recall, get max(P)
PnR.filter(_._1 >= recall).map(_._2).reduceOption(_ max _).getOrElse(0f)
})
.reduceOption(_ + _)
.map(_ / posCnt(clz))
.getOrElse(0f)
case _: MAPCOCO.type =>
if (posCnt(clz) == 0) {
-1f
} else {
(0 to 100).map(r => {
val recall = 0.01f * r
// for every (R,P), where R>=recall, get max(P)
PnR.filter(_._1 >= recall).map(_._2).reduceOption(_ max _).getOrElse(0f)
})
.reduceOption(_ + _)
.map(_ / 101)
.getOrElse(0f)
}
}
}
override def result(): (Float, Int) = {
// get the indices of top-k confident samples
val AP = (0 until nClass).filter(_ != skipClass).map { clz => calculateClassAP(clz) }
// APs are got. Now we get MAP
val result = theType match {
case t: MAPCOCO.type =>
val filtered = AP.filter(_ != -1f)
filtered.sum / filtered.length
case _ => AP.sum / (nClass - (if (skipClass == -1) 0 else 1))
}
(result, 1)
}
private[optim] def mergeWithoutGtCnt(o: MAPValidationResult): MAPValidationResult = {
require(predictForClass.length == o.predictForClass.length)
require(gtCntForClass.length == o.gtCntForClass.length)
for (i <- predictForClass.indices) {
val (left, right) = (predictForClass(i), o.predictForClass(i))
left ++= right
predictForClass(i) = if (k < 0) {
left
} else {
val sorted = sortPredictions(left)
sorted.take(k)
}
}
this
}
// scalastyle:off methodName
override def +(other: ValidationResult): ValidationResult = {
val o = other.asInstanceOf[MAPValidationResult]
mergeWithoutGtCnt(o)
gtCntForClass.indices.foreach( i => gtCntForClass(i) += o.gtCntForClass(i))
this
}
// scalastyle:on methodName
override protected def format(): String = {
val segOrBbox = if (isSegmentation) "segm" else "bbox"
val resultStr = (0 until nClass).map { clz => calculateClassAP(clz) }.zipWithIndex
.map { t => s"AP of class ${t._2} = ${t._1}\\n"}.reduceOption( _ + _).getOrElse("")
s"MeanAveragePrecision_$segOrBbox@$k(${result()._1})\\n $resultStr"
}
}
abstract private[bigdl] class GroundTruthRegion(isCOCO: Boolean, numIOU: Int, val label: Int,
val diff: Float) {
// if is false, the region is not matched with any predictions
// indexed by the IOU threshold index
private val isOccupied = new Array[Boolean](numIOU)
/**
* Returns if any previous prediction is matched with the current region
*
* @return
*/
def canOccupy(iouIdx: Int): Boolean = (isCOCO && diff == 1) || !isOccupied(iouIdx)
def occupy(iouIdx: Int): Unit = {
isOccupied(iouIdx) = true
}
/** get the IOU rate of another region with the current region
*
* @param x1 the min x
* @param y1 the min y
* @param x2 the max x
* @param y2 the max y
* @param rle RLE mask data, can be null
* @return
*/
def getIOURate(x1: Float, y1: Float, x2: Float, y2: Float, rle: RLEMasks = null): Float
}
private[bigdl] class GroundTruthBBox(isCOCO: Boolean, numIOU: Int, label: Int, diff: Float,
val xmin: Float, val ymin: Float, val xmax: Float, val ymax: Float)
extends GroundTruthRegion(isCOCO, numIOU, label, diff) {
private val area = (xmax - xmin + 1) * (ymax - ymin + 1)
override def getIOURate(x1: Float, y1: Float, x2: Float, y2: Float,
rle: RLEMasks = null): Float = {
val ixmin = Math.max(xmin, x1)
val iymin = Math.max(ymin, y1)
val ixmax = Math.min(xmax, x2)
val iymax = Math.min(ymax, y2)
val inter = Math.max(ixmax - ixmin + 1, 0) * Math.max(iymax - iymin + 1, 0)
val detectionArea = (x2 - x1 + 1) * (y2 - y1 + 1)
val union = if (isCOCO && diff != 0) detectionArea else (detectionArea + area - inter)
inter / union
}
}
private[bigdl] class GroundTruthRLE(numIOU: Int, label: Int, diff: Float, rle: RLEMasks)
extends GroundTruthRegion(true, numIOU, label, diff) {
override def getIOURate(x1: Float, y1: Float, x2: Float, y2: Float,
detRLE: RLEMasks): Float = {
MaskUtils.rleIOU(detRLE, rle, diff != 0)
}
}
class MAPMultiIOUValidationResult(
private val nClass: Int,
// take the first k samples, or -1 for all samples
private val k: Int,
// the predicts for each classes.
// predictForClassIOU(iouIdx)(cls) is an array of (Confidence, GT)
private val predictForClassIOU: Array[Array[ArrayBuffer[(Float, Boolean)]]],
private var gtCntForClass: Array[Int],
private val iouRange: (Float, Float),
private val theType: MAPType = MAPPascalVoc2010,
private val skipClass: Int = -1,
private val isSegmentation: Boolean = false) extends ValidationResult {
val impl = predictForClassIOU.map(predictForClass => {
new MAPValidationResult(nClass, k, predictForClass,
gtCntForClass, theType, skipClass, isSegmentation)
})
override def result(): (Float, Int) = (impl.map(_.result()._1).sum / impl.length, 1)
// scalastyle:off methodName
override def +(other: ValidationResult): ValidationResult = {
val o = other.asInstanceOf[MAPMultiIOUValidationResult]
require(o.predictForClassIOU.length == predictForClassIOU.length,
"To merge MAPMultiIOUValidationResult, the length of predictForClassIOU should be" +
"the same")
impl.zip(o.impl).foreach { case (v1, v2) => v1.mergeWithoutGtCnt(v2) }
gtCntForClass.indices.foreach( i => gtCntForClass(i) += o.gtCntForClass(i))
this
}
// scalastyle:on methodName
override protected def format(): String = {
val step = (iouRange._2 - iouRange._1) / (predictForClassIOU.length - 1)
val results = impl.map(_.result()._1)
val resultStr = results.zipWithIndex
.map { t => s"\\t IOU(${iouRange._1 + t._2 * step}) = ${t._1}\\n"}
.reduceOption( _ + _).getOrElse("")
val segOrBbox = if (isSegmentation) "segm" else "bbox"
f"MAP_$segOrBbox@IOU(${iouRange._1}%1.3f:$step%1.3f:${iouRange._2}%1.3f)=" +
s"${results.sum / impl.length}\\n$resultStr"
}
}
/** MeanAveragePrecision for Object Detection
* The class label begins with 0
*
* The expected output from the last layer should be a Tensor[Float] or a Table
* If output is a tensor, it should be [num_of_batch X (1 + maxDetection * 6)] matrix
* The format of the matrix should be [<batch>, <batch>, ...], where each row vector is
* <batch> = [<size_of_batch>, <sample>,...]. Each sample has format:
* <sample> = <label, score, bbox x4>
* imgId is the batch number of the sample. imgId begins with 0.
* Multiple samples may share one imgId
*
* If output is a table, it is a table of tables.
* output(i) is the results of the i-th image in the batch, where i = 1 to sizeof(batch)
* output(i) is a table, which contains the same keys (fields) of image info in the "target"
* Please refer to RoiMiniBatch/RoiImageInfo's documents. Besides, the inner tables also contain
* the scores for the detections in the image.
*
* The "target" (Ground truth) is a table with the same structure of "output", except that
* it does not have "score" field
*
* @param classes the number of classes
* @param topK only take topK confident predictions (-1 for all predictions)
* @param iouThres the IOU thresholds
* @param theType the type of MAP algorithm. (voc2007/voc2010/COCO)
* @param skipClass skip calculating on a specific class (e.g. background)
* the class index starts from 0, or is -1 if no skipping
* @param isSegmentation if check the IOU of segmentations instead of bounding boxes. If true,
* the output and target must have "masks" data
*/
class MeanAveragePrecisionObjectDetection[T: ClassTag](
classes: Int, topK: Int = -1, iouThres: Array[Float] = Array(0.5f),
theType: MAPType = MAPPascalVoc2010, skipClass: Int = -1, isSegmentation: Boolean = false)(
implicit ev: TensorNumeric[T]) extends ValidationMethod[T] {
override def apply(output: Activity, target: Activity): ValidationResult = {
// one image may contain multiple Ground truth bboxes
val (gtImages, gtCntByClass) =
MAPUtil.gtTablesToGroundTruthRegions(target.toTable, classes, iouThres.length,
theType.isInstanceOf[MAPCOCO.type], isSegmentation)
// the predicted bboxes for each classes
// predictByClasses(iouIdx)(classIdx)(bboxNum) is (Confidence, GT)
val predictByClasses = iouThres.map(_iou => {
(0 until classes).map(_ => new ArrayBuffer[(Float, Boolean)]).toArray
})
output match {
case _outTensor: Tensor[_] =>
require(!isSegmentation, "Cannot get segmentation data from tensor output for MAP")
val outTensor = _outTensor.asInstanceOf[Tensor[Float]]
MAPUtil.parseSegmentationTensorResult(outTensor,
(imgIdx, label, score, x1, y1, x2, y2) => {
val gtBbox = gtImages(imgIdx)
MAPUtil.parseDetection(gtBbox, label, score, x1, y1, x2, y2, null, classes, iouThres,
predictByClasses = predictByClasses)
})
case outTable: Table =>
require(gtImages.length == outTable.length(), "The number of images in the output and " +
"in the target should be the same")
for (imgId <- 1 to outTable.length()) {
val gtBbox = gtImages(imgId - 1)
val imgOut = outTable[Table](imgId)
// if the image contains empty predictions, do nothing
if (imgOut.length() > 0) {
val bboxes = RoiLabel.getBBoxes(imgOut)
val scores = RoiLabel.getScores(imgOut)
val labels = RoiLabel.getClasses(imgOut)
require(bboxes.dim() == 2, "the bbox tensor should have 2 dimensions")
val masks = if (isSegmentation) Some(RoiLabel.getMasks(imgOut)) else None
val batchSize = bboxes.size(1)
require(batchSize == labels.size(1), "CLASSES of target tables should have the" +
"same size of the bbox counts")
require(batchSize == scores.nElement(), "ISCROWD of target tables should have the" +
"same size of the bbox counts")
require(masks.isEmpty || batchSize == masks.get.length, "MASKS of target tables " +
"should have the same size of the bbox counts")
val detections = new ArrayBuffer[(Int, Float, Float, Float, Float,
Float, RLEMasks)]()
for (bboxIdx <- 1 to batchSize) {
val score = scores.valueAt(bboxIdx)
val x1 = bboxes.valueAt(bboxIdx, 1)
val y1 = bboxes.valueAt(bboxIdx, 2)
val x2 = bboxes.valueAt(bboxIdx, 3)
val y2 = bboxes.valueAt(bboxIdx, 4)
val label = labels.valueAt(bboxIdx).toInt
val mask = masks.map(_ (bboxIdx - 1)).orNull
detections.append((label, score, x1, y1, x2, y2, mask))
}
detections.sortBy(v => v._2)(Ordering.Float.reverse).foreach {
case (label, score, x1, y1, x2, y2, mask) =>
MAPUtil.parseDetection(gtBbox, label, score, x1, y1, x2, y2, mask, classes,
iouThres, predictByClasses)
}
}
}
}
if (iouThres.length != 1) {
new MAPMultiIOUValidationResult(classes, topK, predictByClasses, gtCntByClass,
(iouThres.head, iouThres.last), theType, skipClass, isSegmentation)
} else {
new MAPValidationResult(classes, topK, predictByClasses.head, gtCntByClass, theType,
skipClass, isSegmentation)
}
}
override protected def format(): String = s"MAPObjectDetection"
}
object MeanAveragePrecision {
/**
* Create MeanAveragePrecision validation method using COCO's algorithm for object detection.
* IOU computed by the segmentation masks
*
* @param nClasses the number of classes (including skipped class)
* @param topK only take topK confident predictions (-1 for all predictions)
* @param skipClass skip calculating on a specific class (e.g. background)
* the class index starts from 0, or is -1 if no skipping
* @param iouThres the IOU thresholds, (rangeStart, stepSize, numOfThres), inclusive
* @return MeanAveragePrecisionObjectDetection
*/
def cocoSegmentation(nClasses: Int, topK: Int = -1, skipClass: Int = 0,
iouThres: (Float, Float, Int) = (0.5f, 0.05f, 10))
: MeanAveragePrecisionObjectDetection[Float] = {
createCOCOMAP(nClasses, topK, skipClass, iouThres, true)
}
/**
* Create MeanAveragePrecision validation method using COCO's algorithm for object detection.
* IOU computed by the bounding boxes
*
* @param nClasses the number of classes (including skipped class)
* @param topK only take topK confident predictions (-1 for all predictions)
* @param skipClass skip calculating on a specific class (e.g. background)
* the class index starts from 0, or is -1 if no skipping
* @param iouThres the IOU thresholds, (rangeStart, stepSize, numOfThres), inclusive
* @return MeanAveragePrecisionObjectDetection
*/
def cocoBBox(nClasses: Int, topK: Int = -1, skipClass: Int = 0,
iouThres: (Float, Float, Int) = (0.5f, 0.05f, 10))
: MeanAveragePrecisionObjectDetection[Float] = {
createCOCOMAP(nClasses, topK, skipClass, iouThres, false)
}
/**
* Calculate the Mean Average Precision (MAP) for classification output and target
* The algorithm follows VOC Challenge after 2007
* Require class label beginning with 0
*
* @param nClasses The number of classes
* @param topK Take top-k confident predictions into account. If k=-1,calculate on all predictions
*/
def classification(nClasses: Int, topK: Int = -1)
: MeanAveragePrecision[Float] = new MeanAveragePrecision[Float](topK, nClasses)
private def createCOCOMAP(nClasses: Int, topK: Int, skipClass: Int,
iouThres: (Float, Float, Int), isSegmentation: Boolean)
: MeanAveragePrecisionObjectDetection[Float] = {
new MeanAveragePrecisionObjectDetection[Float](nClasses, topK,
(0 until iouThres._3).map(iouThres._1 + _ * iouThres._2).toArray,
MAPCOCO, skipClass, isSegmentation)
}
/**
* Create MeanAveragePrecision validation method using Pascal VOC's algorithm for object detection
*
* @param nClasses the number of classes
* @param useVoc2007 if using the algorithm in Voc2007 (11 points). Otherwise, use Voc2010
* @param topK only take topK confident predictions (-1 for all predictions)
* @param skipClass skip calculating on a specific class (e.g. background)
* the class index starts from 0, or is -1 if no skipping
* @return MeanAveragePrecisionObjectDetection
*/
def pascalVOC(nClasses: Int, useVoc2007: Boolean = false, topK: Int = -1,
skipClass: Int = 0) : MeanAveragePrecisionObjectDetection[Float] = {
new MeanAveragePrecisionObjectDetection[Float](nClasses, topK,
theType = if (useVoc2007) MAPPascalVoc2007 else MAPPascalVoc2010,
skipClass = skipClass)
}
}
/**
* Calculate the percentage that target in output's top5 probability indexes
*/
class Top5Accuracy[T: ClassTag](
implicit ev: TensorNumeric[T]) extends ValidationMethod[T] {
override def apply(output: Activity, target: Activity):
AccuracyResult = {
var _target = target.asInstanceOf[Tensor[T]].squeezeNewTensor()
val _output = if (output.toTensor[T].nDimension() != 1 &&
output.toTensor[T].size(1) != _target.size(1)) {
output.toTensor[T].narrow(1, 1, _target.size().head)
} else {
output.toTensor[T]
}
var correct = 0
var count = 0
if (_output.dim() == 2) {
val indices = _output.topk(5, 2, false)._2
var i = 1
while (i <= _output.size(1)) {
if (indices.valueAt(i, 1) == _target.valueAt(i)
|| indices.valueAt(i, 2) == _target.valueAt(i)
|| indices.valueAt(i, 3) == _target.valueAt(i)
|| indices.valueAt(i, 4) == _target.valueAt(i)
|| indices.valueAt(i, 5) == _target.valueAt(i)) {
correct += 1
}
i += 1
}
count += _output.size(1)
} else if (_output.dim == 1) {
require(_target.size(1) == 1)
val indices = _output.topk(5, 1, false)._2
if (indices.valueAt(1) == _target.valueAt(1) || indices.valueAt(2) == _target.valueAt(1)
|| indices.valueAt(3) == _target.valueAt(1) || indices.valueAt(4) == _target.valueAt(1)
|| indices.valueAt(5) == _target.valueAt(1)) {
correct += 1
}
count += 1
} else {
throw new IllegalArgumentException
}
new AccuracyResult(correct, count)
}
override def format(): String = "Top5Accuracy"
}
/**
* Hit Ratio(HR).
* HR intuitively measures whether the test item is present on the top-k list.
*
* @param k top k.
* @param negNum number of negative items.
*/
class HitRatio[T: ClassTag](k: Int = 10, negNum: Int = 100)(
implicit ev: TensorNumeric[T])
extends ValidationMethod[T] {
/**
* Output and target should belong to the same user.
* And have (negNum + 1) elements.
* Target should have only one positive label, means one element is 1, others
* are all 0.
* A couple of output and target will be count as one record.
*/
override def apply(output: Activity, target: Activity): ValidationResult = {
require(output.toTensor[T].nElement() == negNum + 1,
s"negNum is $negNum, output's nElement should be ${negNum}, but got" +
s" ${output.toTensor[T].nElement()}")
require(target.toTensor[T].nElement() == negNum + 1,
s"negNum is $negNum, target's nElement should be ${negNum}, but got" +
s" ${output.toTensor[T].nElement()}")
val o = output.toTensor[T].resize(1 + negNum)
val t = target.toTensor[T].resize(1 + negNum)
var positiveItem = 0
var positiveCount = 0
var i = 1
while(i <= t.nElement()) {
if (t.valueAt(i) == 1) {
positiveItem = i
positiveCount += 1
}
i += 1
}
require(positiveItem != 0, s"${format()}: no positive item.")
require(positiveCount == 1, s"${format()}: too many positive items, excepted 1," +
s" but got $positiveCount")
val hr = calHitRate(positiveItem, o, k)
new ContiguousResult(hr, 1, s"HitRatio@$k")
}
// compute hit rate
private def calHitRate(index: Int, o: Tensor[T], k: Int): Float = {
var topK = 1
var i = 1
val precision = ev.toType[Float](o.valueAt(index))
while (i < o.nElement() && topK <= k) {
if (ev.toType[Float](o.valueAt(i)) > precision) {
topK += 1
}
i += 1
}
if(topK <= k) {
1
} else {
0
}
}
override def format(): String = "HitRate@10"
}
/**
* Normalized Discounted Cumulative Gain(NDCG).
* NDCG accounts for the position of the hit by assigning higher scores to hits at top ranks.
*
* @param k top k.
* @param negNum number of negative items.
*/
class NDCG[T: ClassTag](k: Int = 10, negNum: Int = 100)(
implicit ev: TensorNumeric[T])
extends ValidationMethod[T] {
/**
* Output and target should belong to the same user.
* And have (negNum + 1) elements.
* Target should have only one positive label, means one element is 1, others
* are all 0.
* A couple of output and target will be count as one record.
*/
override def apply(output: Activity, target: Activity): ValidationResult = {
require(output.toTensor[T].nElement() == negNum + 1,
s"negNum is $negNum, output's nElement should be ${negNum}, but got" +
s" ${output.toTensor[T].nElement()}")
require(target.toTensor[T].nElement() == negNum + 1,
s"negNum is $negNum, target's nElement should be ${negNum}, but got" +
s" ${output.toTensor[T].nElement()}")
val o = output.toTensor[T].resize(1 + negNum)
val t = target.toTensor[T].resize(1 + negNum)
var positiveItem = 0
var positiveCount = 0
var i = 1
while(i <= t.nElement()) {
if (t.valueAt(i) == 1) {
positiveItem = i
positiveCount += 1
}
i += 1
}
require(positiveItem != 0, s"${format()}: no positive item.")
require(positiveCount == 1, s"${format()}: too many positive items, excepted 1," +
s" but got $positiveCount")
val ndcg = calNDCG(positiveItem, o, k)
new ContiguousResult(ndcg, 1, s"NDCG")
}
// compute NDCG
private def calNDCG(index: Int, o: Tensor[T], k: Int): Float = {
var ranking = 1
var i = 1
val precision = ev.toType[Float](o.valueAt(index))
while (i < o.nElement() && ranking <= k) {
if (ev.toType[Float](o.valueAt(i)) > precision) {
ranking += 1
}
i += 1
}
if(ranking <= k) {
(math.log(2) / math.log(ranking + 1)).toFloat
} else {
0
}
}
override def format(): String = "NDCG"
}
/**
* Use loss as a validation result
*
* @param loss loss calculated by forward function
* @param count recording the times of calculating loss
*/
class LossResult(private var loss: Float, private var count: Int)
extends ContiguousResult(loss, count, name = "Loss")
/**
* A generic result type who's data is contiguous float.
*
* @param contiResult loss calculated by forward function
* @param count recording the times of calculating loss
* @param name name of the result
*/
class ContiguousResult(
private var contiResult: Float,
private var count: Int,
private val name: String)
extends ValidationResult {
override def result(): (Float, Int) = (contiResult.toFloat / count, count)
// scalastyle:off methodName
override def +(other: ValidationResult): ValidationResult = {
val otherResult = other.asInstanceOf[ContiguousResult]
this.contiResult += otherResult.contiResult
this.count += otherResult.count
this
}
// scalastyle:on methodName
override protected def format(): String = {
s"($name: $contiResult, count: $count, Average $name: ${contiResult.toFloat / count})"
}
override def equals(obj: Any): Boolean = {
if (obj == null) {
return false
}
if (!obj.isInstanceOf[ContiguousResult]) {
return false
}
val other = obj.asInstanceOf[ContiguousResult]
if (this.eq(other)) {
return true
}
this.contiResult == other.contiResult && this.count == other.count
}
override def hashCode(): Int = {
val seed = 37
var hash = 1
hash = hash * seed + this.contiResult.toInt
hash = hash * seed + this.count
hash
}
}
/**
* This evaluation method is calculate loss of output with respect to target
*
* @param criterion criterion method for evaluation
* The default criterion is [[ClassNLLCriterion]]
*/
class Loss[@specialized(Float, Double)T: ClassTag](
var criterion: Criterion[T] = null)
(implicit ev: TensorNumeric[T]) extends ValidationMethod[T] {
if (criterion == null) criterion = ClassNLLCriterion[T]()
override def apply(output: Activity, target: Activity): LossResult = {
val _target = target.asInstanceOf[Tensor[T]]
val _output = if (output.toTensor[T].nDimension() != 1 &&
output.toTensor[T].size().head != _target.size().head) {
output.toTensor[T].narrow(1, 1, _target.size().head)
} else {
output.toTensor[T]
}
val loss = ev.toType[Float](criterion.forward(_output, _target))
val count = 1
new LossResult(loss, count)
}
override def format(): String = "Loss"
}
/**
* This evaluation method is calculate mean absolute error of output with respect to target
*
*/
class MAE[@specialized(Float, Double)T: ClassTag]()
(implicit ev: TensorNumeric[T]) extends ValidationMethod[T] {
private val criterion = AbsCriterion[T]()
override def apply(output: Activity, target: Activity): LossResult = {
val _output = output.asInstanceOf[Tensor[T]]
val (max_prob, max_index) = _output.max(2)
val _target = target.asInstanceOf[Tensor[T]]
val loss = ev.toType[Float](criterion.forward(max_index, _target))
val count = 1
new LossResult(loss, count)
}
override def format(): String = "MAE"
}
|
i8run/BigDL-1
|
spark/dl/src/main/scala/com/intel/analytics/bigdl/optim/ValidationMethod.scala
|
Scala
|
apache-2.0
| 40,633 |
package io.taig.phoenix.models
import io.circe.DecodingFailure
import io.circe.syntax._
class TopicTest extends Suite {
it should "have a String representation" in {
Topic( "foo", "bar" ).toString shouldBe "Topic(foo:bar)"
Topic( "foo" ).toString shouldBe "Topic(foo)"
}
it should "have a JSON encoder" in {
Topic( "foo", "bar" ).asJson shouldBe "foo:bar".asJson
Topic( "foo" ).asJson shouldBe "foo".asJson
}
it should "have a JSON decoder" in {
"foo:bar".asJson.as[Topic] shouldBe Right( Topic( "foo", "bar" ) )
"foo".asJson.as[Topic] shouldBe Right( Topic( "foo" ) )
"foo§$%baz".asJson.as[Topic] shouldBe
Left( DecodingFailure( "Invalid format", List.empty ) )
}
"isSubscribedTo" should "be valid when comparing equal topics" in {
Topic( "foo", "bar" ) isSubscribedTo Topic( "foo", "bar" ) shouldBe true
Topic( "foo" ) isSubscribedTo Topic( "foo" ) shouldBe true
}
it should "be valid when comparing a specific topic with a broadcast" in {
Topic( "foo", "bar" ) isSubscribedTo Topic( "foo" ) shouldBe true
}
it should "be invalid when comparing topics with different identifiers" in {
Topic( "foo", "bar" ) isSubscribedTo Topic( "foo", "baz" ) shouldBe false
}
it should "be invalid when comparing a broadcast with a specific topic" in {
Topic( "foo" ) isSubscribedTo Topic( "foo", "bar" ) shouldBe false
}
}
|
Taig/phoenix-models
|
src/test/scala/io/taig/phoenix/models/TopicTest.scala
|
Scala
|
mit
| 1,483 |
package net.tixxit.contract.util
import scalaz._
import scalaz.effect._
final object IOComonad extends Comonad[IO] {
def copoint[A](p: IO[A]): A = p.unsafePerformIO
def cobind[A, B](fa: IO[A])(f: IO[A] => B): IO[B] = IO(f(fa))
def map[A, B](fa: IO[A])(f: A => B): IO[B] = Functor[IO].map(fa)(f)
def cojoin[A](a: IO[A]): IO[IO[A]] = IO(a)
}
|
tixxit/contract
|
src/test/scala/net/tixxit/contract/util/IOComonad.scala
|
Scala
|
mit
| 351 |
package org.dsa.time
import org.dsa.core.CloudSWASMWithTop
import org.dsa.utils.ArgsDefault
/**
* Created by xubo on 2016/12/11.
*/
object CloudSWASMWithTopSubjectHDFSTime {
def main(args: Array[String]) {
var subject = ArgsDefault.DSWSubjectHDFS
for (j <- 0 until subject.length) {
for (i <- 0 until 3) {
subject(j).foreach { each =>
print(each + "\\t")
}
println()
CloudSWASMWithTop.main(subject(j))
}
}
}
}
|
xubo245/CloudSW
|
src/main/scala/org/dsa/time/CloudSWASMWithTopSubjectHDFSTime.scala
|
Scala
|
gpl-2.0
| 486 |
package com.getjenny.starchat.entities.io
/**
* Created by Angelo Leto <[email protected]> on 20/11/17.
*/
case class UserId(id: String/** user id */)
|
GetJenny/starchat
|
src/main/scala/com/getjenny/starchat/entities/io/UserId.scala
|
Scala
|
gpl-2.0
| 157 |
package com.idyria.osi.ooxoo.db.store
import com.idyria.osi.ooxoo.db.Document
import com.idyria.osi.ooxoo.core.buffers.structural.ElementBuffer
import scala.reflect.ClassTag
/**
* Base Trait for Document Store
*
* A Document Store is a main interface to fetch documents
*/
trait DocumentStore {
// Container Interface
//--------------------------
/**
* Returns a container for the provided id
*/
def container(id: String): DocumentContainer
/**
* Returns a list of all known containers
*/
def containers() : Iterable[DocumentContainer]
// Document Interface
//-----------------------
/**
* path format: "containerid"/"documentid"
*/
def document(path: String): Option[Document] = {
path.split("/") match {
case splitted if (splitted.length != 2) => throw new RuntimeException(s"""DocumentStore document path $path not conform to containerid/documentid format """)
case splitted => this.container(splitted(0)).getDocument(splitted(1))
}
}
def document[T <: ElementBuffer : ClassTag](path: String, topElement: T): Option[T] = {
path.split("/") match {
case splitted if (splitted.length != 2) => throw new RuntimeException(s"""DocumentStore document path $path not conform to containerid/documentid format """)
case splitted => this.container(splitted(0)).document[T](splitted(1), topElement)
}
}
// XPath Interface
//---------------------
}
|
richnou/ooxoo-db
|
src/main/scala/com/idyria/osi/ooxoo/db/store/DocumentStore.scala
|
Scala
|
lgpl-3.0
| 1,509 |
/*
* The MIT License
*
* Copyright (c) 2010 Vladimir Kirichenko <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package butter4s.date
import java.util.{GregorianCalendar, Locale, Calendar}
import java.util.Calendar._
/**
* @author Vladimir Kirichenko <[email protected]>
*/
case class Year( year: Int ) extends Seq[Month] {
private[date] val calendar = new GregorianCalendar(YEAR, 0, 1 )
private lazy val monthes = for ( m <- calendar.getActualMinimum( MONTH ) to calendar.getActualMaximum( MONTH ) ) yield Month( this, m )
def iterator = monthes.toIterator
def length = monthes.length
def apply( idx: Int ) = monthes( idx )
override def toString() = "Year(" + year + ")"
}
case class Month( year: Year, number: Int ) extends Seq[Day] {
private[date] val calendar = year.calendar.clone.asInstanceOf[Calendar]
calendar.set( MONTH, number )
lazy val name:String = calendar.getDisplayName( MONTH, LONG, Locale.getDefault )
private lazy val days = for ( d <- calendar.getActualMinimum( DATE ) to calendar.getActualMaximum( DATE ) ) yield Day( this, d )
def iterator = days.toIterator
def length = days.length
def apply( idx: Int ) = days( idx )
override def toString() = name
}
case class Day( month: Month, number: Int ) {
private[date] val calendar = month.calendar.clone.asInstanceOf[Calendar]
calendar.set( DATE, number )
lazy val ofWeek = calendar.get( DAY_OF_WEEK ) match {
case MONDAY => Monday
case TUESDAY => Tuesday
case WEDNESDAY => Wednesday
case THURSDAY => Thursday
case FRIDAY => Friday
case SATURDAY => Saturday
case SUNDAY => Sunday
}
override def toString = ofWeek + "(" + month + "," + number + ")"
}
sealed trait DayOfWeek
case object Monday extends DayOfWeek
case object Tuesday extends DayOfWeek
case object Wednesday extends DayOfWeek
case object Thursday extends DayOfWeek
case object Friday extends DayOfWeek
case object Saturday extends DayOfWeek
case object Sunday extends DayOfWeek
|
vladimirk/butter4s
|
lang/src/butter4s/date/Year.scala
|
Scala
|
mit
| 3,035 |
/*
Copyright (c) 2014 by Contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ml.dmlc.xgboost4j.scala.spark.params
import scala.collection.immutable.HashSet
import org.apache.spark.ml.param.{DoubleParam, IntParam, Param, Params}
trait LearningTaskParams extends Params {
/**
* number of tasks to learn
*/
val numClasses = new IntParam(this, "num_class", "number of classes")
/**
* Specify the learning task and the corresponding learning objective.
* options: reg:linear, reg:logistic, binary:logistic, binary:logitraw, count:poisson,
* multi:softmax, multi:softprob, rank:pairwise, reg:gamma. default: reg:linear
*/
val objective = new Param[String](this, "objective", "objective function used for training," +
s" options: {${LearningTaskParams.supportedObjective.mkString(",")}",
(value: String) => LearningTaskParams.supportedObjective.contains(value))
/**
* the initial prediction score of all instances, global bias. default=0.5
*/
val baseScore = new DoubleParam(this, "base_score", "the initial prediction score of all" +
" instances, global bias")
/**
* evaluation metrics for validation data, a default metric will be assigned according to
* objective(rmse for regression, and error for classification, mean average precision for
* ranking). options: rmse, mae, logloss, error, merror, mlogloss, auc, ndcg, map, gamma-deviance
*/
val evalMetric = new Param[String](this, "eval_metric", "evaluation metrics for validation" +
" data, a default metric will be assigned according to objective (rmse for regression, and" +
" error for classification, mean average precision for ranking), options: " +
s" {${LearningTaskParams.supportedEvalMetrics.mkString(",")}}",
(value: String) => LearningTaskParams.supportedEvalMetrics.contains(value))
/**
* group data specify each group sizes for ranking task. To correspond to partition of
* training data, it is nested.
*/
val groupData = new Param[Seq[Seq[Int]]](this, "groupData", "group data specify each group size" +
" for ranking task. To correspond to partition of training data, it is nested.")
setDefault(objective -> "reg:linear", baseScore -> 0.5, numClasses -> 2, groupData -> null)
}
private[spark] object LearningTaskParams {
val supportedObjective = HashSet("reg:linear", "reg:logistic", "binary:logistic",
"binary:logitraw", "count:poisson", "multi:softmax", "multi:softprob", "rank:pairwise",
"reg:gamma")
val supportedEvalMetrics = HashSet("rmse", "mae", "logloss", "error", "merror", "mlogloss",
"auc", "ndcg", "map", "gamma-deviance")
}
|
RPGOne/Skynet
|
xgboost-master/jvm-packages/xgboost4j-spark/src/main/scala/ml/dmlc/xgboost4j/scala/spark/params/LearningTaskParams.scala
|
Scala
|
bsd-3-clause
| 3,136 |
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datamountaineer.streamreactor.connect.config.base.traits
import com.datamountaineer.streamreactor.connect.config.base.const.TraitConfigConst.ALLOW_PARALLEL_WRITE_PROP_SUFFIX
trait AllowParallelizationSettings extends BaseSettings {
val allowParallelConstant: String = s"$connectorPrefix.$ALLOW_PARALLEL_WRITE_PROP_SUFFIX"
def getAllowParallel: java.lang.Boolean = getBoolean(allowParallelConstant)
}
|
datamountaineer/kafka-connect-common
|
src/main/scala/com/datamountaineer/streamreactor/connect/config/base/traits/AllowParallelizationSettings.scala
|
Scala
|
apache-2.0
| 1,026 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.history
import java.util.concurrent.TimeUnit
import scala.annotation.meta.getter
import org.apache.spark.internal.config.ConfigBuilder
import org.apache.spark.util.kvstore.KVIndex
private[spark] object config {
/** Use this to annotate constructor params to be used as KVStore indices. */
type KVIndexParam = KVIndex @getter
val DEFAULT_LOG_DIR = "file:/tmp/spark-events"
val EVENT_LOG_DIR = ConfigBuilder("spark.history.fs.logDirectory")
.stringConf
.createWithDefault(DEFAULT_LOG_DIR)
val MAX_LOG_AGE_S = ConfigBuilder("spark.history.fs.cleaner.maxAge")
.timeConf(TimeUnit.SECONDS)
.createWithDefaultString("7d")
val LOCAL_STORE_DIR = ConfigBuilder("spark.history.store.path")
.doc("Local directory where to cache application history information. By default this is " +
"not set, meaning all history information will be kept in memory.")
.stringConf
.createOptional
}
|
minixalpha/spark
|
core/src/main/scala/org/apache/spark/deploy/history/config.scala
|
Scala
|
apache-2.0
| 1,760 |
package org.oxyjen.test
import org.scalatest.{FlatSpec, ShouldMatchers}
abstract class AbstractUnitSpec extends FlatSpec with ShouldMatchers
|
skinny85/oxyjen
|
oxyjen-cli/src/test/scala/org/oxyjen/test/AbstractUnitSpec.scala
|
Scala
|
mit
| 143 |
/*
* Copyright 2012-2015 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.comcast.money.internal
import akka.testkit.TestActorRef
import com.comcast.money.test.AkkaTestJawn
import com.comcast.money.core.{ Span, SpanId }
import com.comcast.money.internal.EmitterProtocol.{ EmitMetricLong, EmitMetricDouble, EmitSpan }
import com.comcast.money.util.DateTimeUtil
import org.scalatest.WordSpecLike
import org.scalatest.mock.MockitoSugar
class EmitterSpec extends AkkaTestJawn with WordSpecLike with MockitoSugar {
"An Emitter" when {
val emitterBus = new EmitterBus()
val underTest = TestActorRef(new Emitter(emitterBus) with TestProbeMaker)
"sending a span message" should {
val data = Span(SpanId(1L), "record", "app", "host", 2L, true, 35L, Map())
val span = EmitSpan(data)
underTest ! span
"deliver the message to all children" in {
child(underTest, "graphite-emitter").expectMsg(span)
child(underTest, "log-emitter").expectMsg(span)
}
}
"sending a metric" should {
val metric = EmitMetricDouble("path", 1.0)
underTest ! metric
"only deliver the message to graphite" in {
child(underTest, "graphite-emitter").expectMsg(metric)
child(underTest, "log-emitter").expectNoMsg()
}
}
"sending a metric long" should {
val metric = EmitMetricLong("path", 2L)
underTest ! metric
"only deliver the message to graphite" in {
child(underTest, "graphite-emitter").expectMsg(metric)
child(underTest, "log-emitter").expectNoMsg()
}
}
"getting props" should {
val props = Emitter.props()
props.actorClass() shouldBe a[Class[Emitter]]
}
}
"Creating an EmitMetric instance" should {
"not divide the timestamp by 1000" in {
DateTimeUtil.timeProvider = () => 1000L
val em = EmitMetricDouble("path", 1.0)
em.timestamp shouldEqual 1000L
}
}
}
|
ipapa/money
|
money-core/src/test/scala/com/comcast/money/internal/EmitterSpec.scala
|
Scala
|
apache-2.0
| 2,511 |
package so.modernized.whip.sparql
import org.openanzo.client.IAnzoClient
import org.openanzo.glitter.query.PatternSolution
import org.openanzo.rdf.{URI => AnzoURI}
import scala.collection.JavaConverters._
class QueryIterator(anzo:IAnzoClient, datasets:Set[AnzoURI])(query:(Int, Int) => String, batchSize:Int=100) extends Iterator[Seq[PatternSolution]] {
private var cursor = 0
private var lastReturnedRows = batchSize
println("query iter inst")
override def hasNext: Boolean = lastReturnedRows == batchSize
override def next(): Seq[PatternSolution] = {
val q = query(cursor, batchSize)
println(q)
val res = anzo.serverQuery(null, null, datasets.asJava, q).getSelectResults.asScala
cursor += batchSize
lastReturnedRows = res.size
res
}
}
|
JackSullivan/whip
|
src/main/scala/so/modernized/whip/sparql/QueryIterator.scala
|
Scala
|
apache-2.0
| 777 |
package org.dagre.euler
import org.scalatest.{FlatSpec, Matchers}
class Euler30Spec extends FlatSpec with Matchers {
"Euler 30" should "return the sum of numbers for which the sum of their digits raised to the n is equal to the number itself" in {
Euler30(4) should be (1634 + 8208 + 9474)
}
it should "solve the problem for the given input" in {
Euler30(5) should be (443839)
}
"Upper bound for power" should "return the maximum number that can be a candidate for the problem" in {
// 6*(9^4) has 5 digits, therefore the maximum should be 5*(9^4)
Euler30.upperBoundForPower(4) should be ((5 * BigInt(9).pow(4)).toInt)
// 7*(9^5) has 6 digits, therefore the maximum should be 6*(9^5)
Euler30.upperBoundForPower(5) should be ((6 * BigInt(9).pow(5)).toInt)
}
}
|
dagre/euler
|
src/test/scala-2.12/org/dagre/euler/Euler30Spec.scala
|
Scala
|
apache-2.0
| 801 |
package coursera
import scala.concurrent.duration._
import org.junit.Test
import org.scalatest.junit.JUnitSuite
import _root_.rx.lang.scala.Observable
import _root_.rx.lang.scala.Notification
object Utils {
/**
* Print an observable to stdout, blocking the calling thread.
*/
def displayObservable[T](o: Observable[T]): Unit = {
println()
toPrintableNotifications(o).toBlockingObservable.foreach(println(_))
println()
}
def toPrintableNotifications[T](o: Observable[T]): Observable[String] = {
val t0 = System.currentTimeMillis
for ((millis, notif) <- o.materialize.timestamp)
yield f"t = ${(millis-t0)/1000.0}%.3f: ${notificationToString(notif)}"
}
/**
* does what Notification.toString (or its subclasses) should do
*/
def notificationToString[T](n: Notification[T]): String = n match {
case Notification.OnNext(value) => s"OnNext($value)"
case Notification.OnError(err) => s"OnError($err)"
case Notification.OnCompleted(()) => "OnCompleted()"
}
implicit class ExtendedObservable[T](o: Observable[T]) {
def doOnEach(onNext: T => Unit): Observable[T] = {
val action: _root_.rx.util.functions.Action1[T] = _root_.rx.lang.scala.ImplicitFunctionConversions.scalaFunction1ProducingUnitToAction1(onNext)
// casting because Java Observable lacks "? super T"
val jObs: _root_.rx.Observable[T] = o.asJavaObservable.asInstanceOf[_root_.rx.Observable[T]].doOnEach(action)
Observable[T](jObs)
}
}
}
|
ericpony/scala-examples
|
reactive/src/test/scala/coursera/Utils.scala
|
Scala
|
mit
| 1,506 |
package fpinscala.errorhandling
import scala.{Option => _, Some => _, Either => _, _} // hide std library `Option`, `Some` and `Either`, since we are writing our own in this chapter
sealed trait Option[+A] {
def map[B](f: A => B): Option[B] = this match {
case Some(a) => Some(f(a))
case None => None
}
def getOrElse[B>:A](default: => B): B = this match {
case Some(a) => a
case None => default
}
def flatMap[B](f: A => Option[B]): Option[B] =
map(f).getOrElse(None)
def orElse[B>:A](ob: => Option[B]): Option[B] =
map (Some(_)) getOrElse ob
def filter(f: A => Boolean): Option[A] =
flatMap { a => if (f(a)) Some(a) else None }
}
case class Some[+A](get: A) extends Option[A]
case object None extends Option[Nothing]
object Option {
def failingFn(i: Int): Int = {
val y: Int = throw new Exception("fail!") // `val y: Int = ...` declares `y` as having type `Int`, and sets it equal to the right hand side of the `=`.
try {
val x = 42 + 5
x + y
}
catch { case e: Exception => 43 } // A `catch` block is just a pattern matching block like the ones we've seen. `case e: Exception` is a pattern that matches any `Exception`, and it binds this value to the identifier `e`. The match returns the value 43.
}
def failingFn2(i: Int): Int = {
try {
val x = 42 + 5
x + ((throw new Exception("fail!")): Int) // A thrown Exception can be given any type; here we're annotating it with the type `Int`
}
catch { case e: Exception => 43 }
}
def mean(xs: Seq[Double]): Option[Double] =
if (xs.isEmpty) None
else Some(xs.sum / xs.length)
def variance(xs: Seq[Double]): Option[Double] = {
mean(xs) flatMap { m =>
mean(xs map(x => math.pow(m - x, 2)))
}
}
def map2[A,B,C](oa: Option[A], ob: Option[B])(f: (A, B) => C): Option[C] =
// oa flatMap { a => ob map {b => f(a,b) } }
for {
a <- oa
b <- ob
} yield f(a,b)
def sequence[A](as: List[Option[A]]): Option[List[A]] = as match {
case Nil => Some(Nil)
case h :: t => h flatMap (a => sequence(t) map (a :: _))
}
// def sequence_1[A](a: List[Option[A]]): Option[List[A]] =
// a.foldRight[Option[List[A]]](Some(Nil))((x,y) => map2(x,y)(_ :: _))
def traverse[A, B](a: List[A])(f: A => Option[B]): Option[List[B]] = a match {
case Nil => Some(Nil)
case h :: t => map2(f(h), traverse(t)(f))(_ :: _)
}
}
|
fpinscala-muc/fpinscala-abo64
|
exercises/src/main/scala/fpinscala/errorhandling/Option.scala
|
Scala
|
mit
| 2,423 |
package jp.seraphr.collection.builder
import jp.seraphr.collection.wrapper.Wrapper
trait WrapperBuilder[_Elem, _Wrapper <: Wrapper[_Elem, _Wrapper]] extends Builder[_Elem, _Wrapper] {
}
|
seraphr/collection-wrapper
|
src/main/scala/jp/seraphr/collection/builder/WrapperBuilder.scala
|
Scala
|
bsd-2-clause
| 187 |
package com.tribbloids.spookystuff.uav.actions
import com.tribbloids.spookystuff.actions.Export
import com.tribbloids.spookystuff.doc.{Doc, DocOption, DocUID}
import com.tribbloids.spookystuff.session.{NoPythonDriverException, Session}
import com.tribbloids.spookystuff.uav.utils.UAVViews.SessionView
import org.apache.http.entity.ContentType
/**
* Mark current vehicle status
*/
case class Mark() extends Export with UAVAction {
override def doExeNoName(session: Session): Seq[DocOption] = {
try {
val exe = new SessionView(session)
val location = exe.link.status().currentLocation
val jsonStr = location.prettyJSON
Seq(
new Doc(
DocUID((session.backtrace :+ this).toList, this)(),
exe.link.uav.uris.head,
jsonStr.getBytes("UTF8"),
Some(s"${ContentType.APPLICATION_JSON}; charset=UTF-8")
))
} catch {
case e: NoPythonDriverException =>
Nil
}
}
}
|
tribbloid/spookystuff
|
uav/src/main/scala/com/tribbloids/spookystuff/uav/actions/Mark.scala
|
Scala
|
apache-2.0
| 966 |
package scheduler
import org.quartz.CronScheduleBuilder._
import org.quartz.JobBuilder.newJob
import org.quartz.TriggerBuilder._
import org.quartz.impl.StdSchedulerFactory
object Scheduler {
val scheduler = StdSchedulerFactory.getDefaultScheduler()
def printMessageDaily(hour: Int, minute: Int, message: String) {
val job = newJob(classOf[PrintMessage]).build()
job.getJobDataMap().put("message", message)
val trigger = newTrigger().withSchedule(dailyAtHourAndMinute(hour, minute)).build()
scheduler.scheduleJob(job, trigger)
}
}
|
hynek-jemelik/scheduler
|
app/scheduler/Scheduler.scala
|
Scala
|
gpl-3.0
| 555 |
/*******************************************************************************/
/* */
/* Copyright (C) 2017 by Max Lv <[email protected]> */
/* Copyright (C) 2017 by Mygod Studio <[email protected]> */
/* */
/* This program is free software: you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation, either version 3 of the License, or */
/* (at your option) any later version. */
/* */
/* This program is distributed in the hope that it will be useful, */
/* but WITHOUT ANY WARRANTY; without even the implied warranty of */
/* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
/* GNU General Public License for more details. */
/* */
/* You should have received a copy of the GNU General Public License */
/* along with this program. If not, see <http://www.gnu.org/licenses/>. */
/* */
/*******************************************************************************/
package com.github.shadowsocks
import android.content.{BroadcastReceiver, Context, Intent}
import com.github.shadowsocks.utils.{TaskerSettings, Utils}
import com.github.shadowsocks.ShadowsocksApplication.app
/**
* @author CzBiX
*/
class TaskerReceiver extends BroadcastReceiver {
override def onReceive(context: Context, intent: Intent) {
val settings = TaskerSettings.fromIntent(intent)
app.profileManager.getProfile(settings.profileId) match {
case Some(_) => app.switchProfile(settings.profileId)
case _ =>
}
if (settings.switchOn) Utils.startSsService(context) else Utils.stopSsService(context)
}
}
|
hangox/shadowsocks-android
|
mobile/src/main/scala/com/github/shadowsocks/TaskerReceiver.scala
|
Scala
|
gpl-3.0
| 2,200 |
package com.danielwestheide.kontextfrei.scalatest
import org.apache.spark.rdd.RDD
import org.scalatest.enablers.Collecting
import org.scalatest.{Inspectors, PropSpec, PropSpecLike}
import org.scalatest.prop.GeneratorDrivenPropertyChecks
trait CollectingInstancesProperties[DColl[_]]
extends PropSpecLike
with GeneratorDrivenPropertyChecks
with KontextfreiSpec[DColl]
with CollectingInstances {
property("There is a Collecting instance for DCollection") {
forAll { (xs: List[String]) =>
val dcoll = ops.unit(xs)
Inspectors.forAll(dcoll) { x =>
assert(xs.contains(x))
}
}
}
property(
"Collecting nature of DCollection returns the original size of the input sequence") {
forAll { (xs: List[String]) =>
val dcoll = ops.unit(xs)
assert(
implicitly[Collecting[String, DColl[String]]]
.sizeOf(dcoll) === xs.size)
}
}
property(
"Collecting nature of DCollection returns the Some loneElement if input sequence has exactly one element") {
forAll { (x: String) =>
val dcoll = ops.unit(List(x))
assert(
implicitly[Collecting[String, DColl[String]]]
.loneElementOf(dcoll) === Some(x))
}
}
property(
"Collecting nature of DCollection returns the None as loneElement if input sequence as more than one element") {
forAll { (xs: List[String]) =>
whenever(xs.size > 1) {
val dcoll = ops.unit(xs)
assert(
implicitly[Collecting[String, DColl[String]]]
.loneElementOf(dcoll)
.isEmpty)
}
}
}
property(
"Collecting nature of DCollection returns the None as loneElement if input sequence is empty") {
val dcoll = ops.unit(List.empty[String])
assert(
implicitly[Collecting[String, DColl[String]]]
.loneElementOf(dcoll)
.isEmpty)
}
}
class CollectionInstancesStreamSpec
extends CollectingInstancesProperties[Stream]
with StreamSpec
class CollectionInstancesRDDSpec
extends CollectingInstancesProperties[RDD]
with RDDSpec
|
dwestheide/kontextfrei
|
scalatest/src/test/scala/com/danielwestheide/kontextfrei/scalatest/CollectingInstancesProperties.scala
|
Scala
|
apache-2.0
| 2,084 |
package views.html
import play.twirl.api._
import play.twirl.api.TemplateMagic._
import play.api.templates.PlayMagic._
import models._
import controllers._
import java.lang._
import java.util._
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import play.api.i18n._
import play.core.j.PlayMagicForJava._
import play.mvc._
import play.data._
import play.api.data.Field
import play.mvc.Http.Context.Implicit._
import views.html._
/**/
object main extends BaseScalaTemplate[play.twirl.api.HtmlFormat.Appendable,Format[play.twirl.api.HtmlFormat.Appendable]](play.twirl.api.HtmlFormat) with play.twirl.api.Template3[User,Html,Html,play.twirl.api.HtmlFormat.Appendable] {
/**/
def apply/*1.2*/(user: User = null, scripts: Html = Html(""))(content: Html):play.twirl.api.HtmlFormat.Appendable = {
_display_ {
Seq[Any](format.raw/*1.62*/("""
"""),format.raw/*3.1*/("""<!DOCTYPE html>
<html>
<head>
<title>"""),_display_(/*7.17*/Messages("title")),format.raw/*7.34*/("""</title>
<link rel="stylesheet" media="screen" href=""""),_display_(/*8.54*/routes/*8.60*/.Assets.at("stylesheets/bootstrap.css")),format.raw/*8.99*/("""">
<link rel="stylesheet" media="screen" href=""""),_display_(/*9.54*/routes/*9.60*/.Assets.at("stylesheets/main.css")),format.raw/*9.94*/("""">
<link rel="shortcut icon" type="image/png" href=""""),_display_(/*10.59*/routes/*10.65*/.Assets.at("images/favicon.png")),format.raw/*10.97*/("""">
<script src=""""),_display_(/*11.23*/routes/*11.29*/.Assets.at("javascripts/jquery/jquery-2.1.0.min.js")),format.raw/*11.81*/("""" type="text/javascript"></script>
<script src=""""),_display_(/*12.23*/routes/*12.29*/.Assets.at("javascripts/bootstrap.js")),format.raw/*12.67*/("""" type="text/javascript"></script>
<link rel="stylesheet" media="screen" href=""""),_display_(/*13.54*/routes/*13.60*/.Assets.at("stylesheets/font-awesome.min.css")),format.raw/*13.106*/("""">
"""),_display_(/*14.10*/scripts),format.raw/*14.17*/("""
"""),format.raw/*15.5*/("""</head>
<body>
<div ng-controller="MenuCtrl" class="navbar navbar-inverse navbar-default" role="navigation">
<div class="navbar-header">
<button type="button" class="navbar-toggle" data-toggle="collapse" data-target=".navbar-ex1-collapse">
<span class="sr-only">Toggle navigation</span>
<span class="fa fa-bars fa-lg fa-inverse"></span>
</button>
<a class="navbar-brand" href=""""),_display_(/*25.48*/routes/*25.54*/.Application.index()),format.raw/*25.74*/("""">
<i class="fa fa-rocket"></i> Project name
</a>
<ul class="nav navbar-nav navbar-right">
<li class=""><a href=""""),_display_(/*29.44*/routes/*29.50*/.Application.index()),format.raw/*29.70*/("""">Home</a></li>
</ul>
</div>
"""),_display_(/*32.14*/logged(user)),format.raw/*32.26*/("""
"""),format.raw/*33.9*/("""</div>
<div class="container">
<div class="row">
"""),_display_(/*37.18*/content),format.raw/*37.25*/("""
"""),format.raw/*38.13*/("""</div>
</div>
<hr>
<div class="footer text-center">
<div>
<small>
Hello! I'm your friendly footer. If you're actually reading this, I'm impressed....
<a href="https://github.com/yesnault/PlayStartApp">Fork me on Github</a> <i class="fa fa-github fa-1"></i> <a href="https://github.com/yesnault/PlayStartApp">https://github.com/yesnault/PlayStartApp</a>
</small>
</div>
</div>
</body>
</html>
"""))}
}
def render(user:User,scripts:Html,content:Html): play.twirl.api.HtmlFormat.Appendable = apply(user,scripts)(content)
def f:((User,Html) => (Html) => play.twirl.api.HtmlFormat.Appendable) = (user,scripts) => (content) => apply(user,scripts)(content)
def ref: this.type = this
}
/*
-- GENERATED --
DATE: Tue Mar 31 22:50:07 PDT 2015
SOURCE: /Users/admin/Box Documents/PlayStartApp/app/views/main.scala.html
HASH: 753e695af9ce5d7e51d9a4331d7a327196bfda59
MATRIX: 730->1|878->61|906->63|983->114|1020->131|1108->193|1122->199|1181->238|1263->294|1277->300|1331->334|1419->395|1434->401|1487->433|1539->458|1554->464|1627->516|1711->573|1726->579|1785->617|1900->705|1915->711|1983->757|2022->769|2050->776|2082->781|2601->1273|2616->1279|2657->1299|2870->1485|2885->1491|2926->1511|3023->1581|3056->1593|3092->1602|3206->1689|3234->1696|3275->1709
LINES: 26->1|29->1|31->3|35->7|35->7|36->8|36->8|36->8|37->9|37->9|37->9|38->10|38->10|38->10|39->11|39->11|39->11|40->12|40->12|40->12|41->13|41->13|41->13|42->14|42->14|43->15|53->25|53->25|53->25|57->29|57->29|57->29|60->32|60->32|61->33|65->37|65->37|66->38
-- GENERATED --
*/
|
stjokro/Project-1
|
target/scala-2.10/twirl/main/views/html/main.template.scala
|
Scala
|
bsd-3-clause
| 5,114 |
package wandou.math.classifier
/**
*
* @author Caoyuan Deng
*/
final case class AttributeValue[+T](value: T) extends Cloneable {
private var _count = 0
def count = _count
def increaseOne = {
_count += 1
this
}
def decreaseOne = {
_count -= 1
this
}
def reset = {
_count = 0
this
}
/**
* This should not be an abstract method so that scalac knows it's a override of
* @cloneable instead of java.lang.Object#clone
*/
override def clone: AttributeValue[T] = { super.clone.asInstanceOf[AttributeValue[T]] }
}
|
wandoulabs/wandou-math
|
wandou-math/src/main/scala/wandou/math/classifier/AttributeValue.scala
|
Scala
|
apache-2.0
| 567 |
/*
* Copyright (c) 2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich
package hadoop
package shredder
// Jackson
import com.github.fge.jackson.JacksonUtils
import com.fasterxml.jackson.databind.JsonNode
// Scala
import scala.collection.JavaConversions._
// Scalaz
import scalaz._
import Scalaz._
// json4s
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
// Snowplow Common Enrich
import common._
// This project
import hadoop.utils.JsonUtils
/**
* Companion object contains helpers.
*/
object TypeHierarchy {
private val NodeFactory = JacksonUtils.nodeFactory()
}
/**
* Expresses the hierarchy of types for this type.
*/
case class TypeHierarchy(
val rootId: String,
val rootTstamp: String,
val refRoot: String,
val refTree: List[String],
val refParent: String
) {
/**
* Converts a TypeHierarchy into a JSON containing
* each element.
*
* @return the TypeHierarchy as a Jackson JsonNode
*/
def toJsonNode: JsonNode =
asJsonNode(this.toJValue)
/**
* Converts a TypeHierarchy into a JSON containing
* each element.
*
* @return the TypeHierarchy as a json4s JValue
*/
def toJValue: JValue =
("rootId" -> rootId) ~
("rootTstamp" -> rootTstamp) ~
("refRoot" -> refRoot) ~
("refTree" -> refTree) ~
("refParent" -> refParent)
/**
* Completes a partial TypeHierarchy with
* the supplied refTree elements, and uses
* the final refTree to replace the refParent
* too.
*
* @param refTree the rest of the type tree
* to append onto existing refTree
* @return the completed TypeHierarchy
*/
def complete(
refTree: List[String]): TypeHierarchy =
partialHierarchyLens.set(this, refTree)
/**
* A Scalaz Lens to complete the refTree within
* a TypeHierarchy object.
*/
private val partialHierarchyLens: Lens[TypeHierarchy, List[String]] =
Lens.lensu((ph, rt) => {
val full = ph.refTree ++ rt
ph.copy(
refTree = full,
refParent = secondTail(full)
)}, _.refTree
)
/**
* Get the last-but-one element ("tail-tail")
* from a list.
*
* @param ls The list to return the last-but-one
* element from
* @return the last-but-one element from this list
*/
private[shredder] def secondTail[A](ls: List[A]): A = ls match {
case h :: _ :: Nil => h
case _ :: tail => secondTail(tail)
case _ => throw new NoSuchElementException
}
}
|
wesley1001/snowplow
|
3-enrich/scala-hadoop-shred/src/main/scala/com.snowplowanalytics.snowplow.enrich/hadoop/shredder/TypeHierarchy.scala
|
Scala
|
apache-2.0
| 3,186 |
// Master.scala
//
//
package com.example.picalc
import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import scala.concurrent.duration._
import scala.math.abs
object Master {
case object Calculate
case class Work(start: Int, numberOfElements: Int)
case class Result(value: Double)
}
class Master (listener: ActorRef ) extends Actor with ActorLogging {
import Master._
import Listener._
var pi: Double = _
val start: Long = System.currentTimeMillis
// Step length of the calculation
val numberOfElements: Int = 1000
var startElement: Int = 0
var previousPi: Double = 0.0
// Used to calculate the accuracy of the value of Pi
val error: Double = 0.0000000001
val worker = context.actorOf(Props[Worker], name = "worker")
def receive = {
case Calculate => worker ! Work(startElement, numberOfElements)
case Result(value) => pi += value
if( abs( previousPi - pi ) <= error ) {
log.info("Value of Pi found after:" + startElement)
listener ! PiApproximation(pi, duration = (System.currentTimeMillis - start).millis)
context.stop(self)
} else {
previousPi = pi
startElement += numberOfElements
worker ! Work( startElement, numberOfElements )
}
}
}
|
dtinblack/Scala-AKKACluster
|
PiSingleWorker/src/main/scala/Master.scala
|
Scala
|
mit
| 1,532 |
package chrome.tts.bindings
import scala.scalajs.js
object SpeakOptions {
def apply(enqueue: js.UndefOr[Boolean] = js.undefined,
voiceName: js.UndefOr[String] = js.undefined,
extensionId: js.UndefOr[String] = js.undefined,
lang: js.UndefOr[String] = js.undefined,
gender: js.UndefOr[TTSVoice.Gender] = js.undefined,
rate: js.UndefOr[Double] = js.undefined,
pitch: js.UndefOr[Double] = js.undefined,
volume: js.UndefOr[Double] = js.undefined,
requiredEventTypes: js.UndefOr[TTSEvent.Type] = js.undefined,
desiredEventTypes: js.UndefOr[TTSEvent.Type] = js.undefined,
onEvent: js.UndefOr[String] = js.undefined): SpeakOptions = {
js.Dynamic.literal(
enqueue = enqueue,
voiceName = voiceName,
extensionId = extensionId,
lang = lang,
gender = gender,
rate = rate,
pitch = pitch,
volume = volume,
requiredEventTypes = requiredEventTypes,
desiredEventTypes = desiredEventTypes,
onEvent = onEvent
).asInstanceOf[SpeakOptions]
}
}
class SpeakOptions extends js.Object {
val enqueue: js.UndefOr[Boolean] = js.native
val voiceName: js.UndefOr[String] = js.native
val extensionId: js.UndefOr[String] = js.native
val lang: js.UndefOr[String] = js.native
val gender: js.UndefOr[TTSVoice.Gender] = js.native
val rate: js.UndefOr[Double] = js.native
val pitch: js.UndefOr[Double] = js.native
val volume: js.UndefOr[Double] = js.native
val requiredEventTypes: js.UndefOr[TTSEvent.Type] = js.native
val desiredEventTypes: js.UndefOr[TTSEvent.Type] = js.native
val onEvent: js.UndefOr[String] = js.native
}
|
amsayk/scala-js-chrome
|
bindings/src/main/scala/chrome/tts/bindings/SpeakOptions.scala
|
Scala
|
mit
| 1,715 |
package fpinscala.laziness
import Stream._
trait Stream[+A] {
// The natural recursive solution
def toListRecursive: List[A] = this match {
case Cons(h,t) => h() :: t().toListRecursive
case _ => List()
}
/*
The above solution will stack overflow for large streams, since it's
not tail-recursive. Here is a tail-recursive implementation. At each
step we cons onto the front of the `acc` list, which will result in the
reverse of the stream. Then at the end we reverse the result to get the
correct order again.
[:ben] are the line breaks above okay? I'm unclear on whether these "hints" are supposed to go in the book or not
*/
def toList: List[A] = {
@annotation.tailrec
def go(s: Stream[A], acc: List[A]): List[A] = s match {
case Cons(h,t) => go(t(), h() :: acc)
case _ => acc
}
go(this, List()).reverse
}
/*
In order to avoid the `reverse` at the end, we could write it using a
mutable list buffer and an explicit loop instead. Note that the mutable
list buffer never escapes our `toList` method, so this function is
still _pure_.
*/
def toListFast: List[A] = {
val buf = new collection.mutable.ListBuffer[A]
@annotation.tailrec
def go(s: Stream[A]): List[A] = s match {
case Cons(h,t) =>
buf += h()
go(t())
case _ => buf.toList
}
go(this)
}
/*
`take` first checks if n==0. In that case we need not look at the stream at all.
*/
def take(n: Int): Stream[A] =
if (n > 0) this match {
case Cons(h, t) => cons(h(), t().take(n-1))
case _ => Stream.empty // we can say Stream.empty
}
else Stream() // or Stream()
/*
Unlike `take`, `drop` is not incremental. That is, it doesn't generate the
answer lazily. It must traverse the first `n` elements of the stream eagerly.
*/
def drop(n: Int): Stream[A] = {
@annotation.tailrec
def go(s: Stream[A], n: Int): Stream[A] =
if (n <= 0) s
else s match {
case Cons(h,t) => go(t(), n-1)
case _ => Stream()
}
go(this, n)
}
/*
It's a common Scala style to write method calls without `.` notation, as in `t() takeWhile f`.
*/
def takeWhile(f: A => Boolean): Stream[A] = this match {
case Cons(h,t) if f(h()) => cons(h(), t() takeWhile f)
case _ => empty
}
def foldRight[B](z: => B)(f: (A, => B) => B): B = // The arrow `=>` in front of the argument type `B` means that the function `f` takes its second argument by name and may choose not to evaluate it.
this match {
case Cons(h,t) => f(h(), t().foldRight(z)(f)) // If `f` doesn't evaluate its second argument, the recursion never occurs.
case _ => z
}
def exists(p: A => Boolean): Boolean =
foldRight(false)((a, b) => p(a) || b) // Here `b` is the unevaluated recursive step that folds the tail of the stream. If `p(a)` returns `true`, `b` will never be evaluated and the computation terminates early.
/*
Since `&&` is non-strict in its second argument, this terminates the traversal as soon as a nonmatching element is found.
*/
def forAll(f: A => Boolean): Boolean =
foldRight(true)((a,b) => f(a) && b)
def takeWhile_1(f: A => Boolean): Stream[A] =
foldRight(empty[A])((h,t) =>
if (f(h)) cons(h,t)
else empty)
def headOption: Option[A] =
foldRight(None: Option[A])((h,_) => Some(h))
def map[B](f: A => B): Stream[B] =
foldRight(empty[B])((h,t) => cons(f(h), t))
def filter[B](f: A => Boolean): Stream[A] =
foldRight(empty[A])((h,t) =>
if (f(h)) cons(h, t)
else t)
def append[B>:A](s: => Stream[B]): Stream[B] =
foldRight(s)((h,t) => cons(h,t))
def flatMap[B](f: A => Stream[B]): Stream[B] =
foldRight(empty[B])((h,t) => f(h) append t)
def mapViaUnfold[B](f: A => B): Stream[B] =
unfold(this) {
case Cons(h,t) => Some((f(h()), t()))
case _ => None
}
def takeViaUnfold(n: Int): Stream[A] =
unfold((this,n)) {
case (Cons(h,t), n) if n > 0 => Some((h(), (t(), n-1)))
case _ => None
}
def takeWhileViaUnfold(f: A => Boolean): Stream[A] =
unfold(this) {
case Cons(h,t) if f(h()) => Some((h(), t()))
case _ => None
}
def zipWith[B,C](s2: Stream[B])(f: (A,B) => C): Stream[C] =
unfold((this, s2)) {
case (Cons(h1,t1), Cons(h2,t2)) =>
Some((f(h1(), h2()), (t1(), t2())))
case _ => None
}
// special case of `zip`
def zip[B](s2: Stream[B]): Stream[(A,B)] =
zipWith(s2)((_,_))
def zipAll[B](s2: Stream[B]): Stream[(Option[A],Option[B])] =
zipWithAll(s2)((_,_))
/*
There are a number of edge cases with this function. We can deal with some of these edge cases by treating each stream as an infinite series of `Option` values, which become `None` when the stream is exhausted.
*/
def zipWithAll[B,C](s2: Stream[B])(f: (Option[A],Option[B]) => C): Stream[C] = {
val a = this map (Some(_)) append (constant(None))
val b = s2 map (Some(_)) append (constant(None))
unfold((a, b)) {
case (Empty, Empty) => None
case (s1, s2) => for {
h1 <- s1.headOption
h2 <- s2.headOption
} yield (f(h1,h2), (s1 drop 1, s2 drop 1))
}
}
/*
`s startsWith s2` when corresponding elements of `s` and `s2` are all equal, until the point that `s2` is exhausted. If `s` is exhausted first, or we find an element that doesn't match, we terminate early. Using non-strictness, we can compose these three separate logical steps--the zipping, the termination when the second stream is exhausted, and the termination if a nonmatching element is found or the first stream is exhausted.
*/
def startsWith[A](s: Stream[A]): Boolean =
zipAll(s).takeWhile(!_._2.isEmpty) forAll {
case (h,h2) => h == h2
}
/*
The last element of `tails` is always the empty `Stream`, so we handle this as a special case, by appending it to the output.
*/
def tails: Stream[Stream[A]] =
unfold(this) {
case Empty => None
case s => Some((s, s drop 1))
} append (Stream(empty))
def hasSubsequence[A](s: Stream[A]): Boolean =
tails exists (_ startsWith s)
/*
The function can't be implemented using `unfold`, since `unfold` generates elements of the `Stream` from left to right. It can be implemented using `foldRight` though.
The implementation is just a `foldRight` that keeps the accumulated value and the stream of intermediate results, which we `cons` onto during each iteration. When writing folds, it's common to have more state in the fold than is needed to compute the result. Here, we simply extract the accumulated list once finished.
*/
def scanRight[B](z: B)(f: (A,=>B) => B): Stream[B] =
foldRight((z, Stream(z)))((a,p) => {
val b2 = f(a,p._1)
(b2, cons(b2,p._2))
})._2
@annotation.tailrec
final def find(f: A => Boolean): Option[A] = this match {
case Empty => None
case Cons(h, t) => if (f(h())) Some(h()) else t().find(f)
}
}
case object Empty extends Stream[Nothing]
case class Cons[+A](h: () => A, t: () => Stream[A]) extends Stream[A]
object Stream {
def cons[A](hd: => A, tl: => Stream[A]): Stream[A] = {
lazy val head = hd
lazy val tail = tl
Cons(() => head, () => tail)
}
def empty[A]: Stream[A] = Empty
def apply[A](as: A*): Stream[A] =
if (as.isEmpty) empty
else cons(as.head, apply(as.tail: _*))
val ones: Stream[Int] = Stream.cons(1, ones)
// This is more efficient than `cons(a, constant(a))` since it's just
// one object referencing itself.
def constant[A](a: A): Stream[A] = {
lazy val tail: Stream[A] = Cons(() => a, () => tail)
tail
}
def from(n: Int): Stream[Int] =
cons(n, from(n+1))
val fibs = {
def go(f0: Int, f1: Int): Stream[Int] =
cons(f0, go(f1, f0+f1))
go(0, 1)
}
def unfold[A, S](z: S)(f: S => Option[(A, S)]): Stream[A] =
f(z) match {
case Some((h,s)) => cons(h, unfold(s)(f))
case None => empty
}
/*
Scala provides shorter syntax when the first action of a function literal is to match on an expression. The function passed to `unfold` in `fibsViaUnfold` is equivalent to `p => p match { case (f0,f1) => ... }`, but we avoid having to choose a name for `p`, only to pattern match on it.
*/
val fibsViaUnfold =
unfold((0,1)) { case (f0,f1) => Some((f0,(f1,f0+f1))) }
def fromViaUnfold(n: Int) =
unfold(n)(n => Some((n,n+1)))
def constantViaUnfold[A](a: A) =
unfold(a)(_ => Some((a,a)))
// could also of course be implemented as constant(1)
val onesViaUnfold = unfold(1)(_ => Some((1,1)))
}
|
fpinscala-muc/fpinscala-cdanning
|
answers/src/main/scala/fpinscala/laziness/Stream.scala
|
Scala
|
mit
| 8,713 |
package com.bigchange.config
/**
* Created by C.J.YOU on 2016/1/18.
* 本地目录配置文件
*/
object FileConfig {
val ROOT_DIR = "/home/telecom/data"
val test_dir = "F:\\\\datatest\\\\telecom"
}
|
bigchange/AI
|
src/main/scala/com/bigchange/config/FileConfig.scala
|
Scala
|
apache-2.0
| 208 |
/*
* Copyright 2012-2013 Stephane Godbillon (@sgodbillon) and Zenexity
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package reactivemongo.api.gridfs
import java.io._
import java.util.Arrays
import play.api.libs.iteratee._
import reactivemongo.api._
import reactivemongo.bson._
import reactivemongo.core.commands.LastError
import reactivemongo.utils._
import scala.concurrent.{ ExecutionContext, Future }
import reactivemongo.core.netty.ChannelBufferWritableBuffer
import reactivemongo.api.collections.GenericCollectionProducer
import reactivemongo.api.collections.GenericCollection
object `package` {
private[gridfs] val logger = LazyLogger("reactivemongo.api.gridfs")
}
object Implicits {
/** A default `BSONReader` for `ReadFile`. */
implicit object DefaultReadFileReader extends BSONDocumentReader[ReadFile[BSONValue]] {
import DefaultBSONHandlers._
def read(doc: BSONDocument) = {
DefaultReadFile(
doc.getAs[BSONValue]("_id").get,
doc.getAs[BSONString]("contentType").map(_.value),
doc.getAs[BSONString]("filename").map(_.value).get,
doc.getAs[BSONNumberLike]("uploadDate").map(_.toLong),
doc.getAs[BSONNumberLike]("chunkSize").map(_.toInt).get,
doc.getAs[BSONNumberLike]("length").map(_.toInt).get,
doc.getAs[BSONString]("md5").map(_.value),
doc.getAs[BSONDocument]("metadata").getOrElse(BSONDocument()),
doc)
}
}
}
/** Metadata that cannot be customized. */
trait ComputedMetadata {
/** Length of the file. */
def length: Int
/** Size of the chunks of this file. */
def chunkSize: Int
/** MD5 hash of this file. */
def md5: Option[String]
}
/**
* Common metadata.
* @tparam Id Type of the id of this file (generally `BSONObjectID` or `BSONValue`).
*/
trait BasicMetadata[+Id <: BSONValue] {
/** Id of this file. */
def id: Id
/** Name of this file. */
def filename: String
/** Date when this file was uploaded. */
def uploadDate: Option[Long]
/** Content type of this file. */
def contentType: Option[String]
}
/** Custom metadata (generic trait) */
trait CustomMetadata {
/** A BSONDocument holding all the metadata that are not standard. */
def metadata: BSONDocument
}
/**
* A file that will be saved in a GridFS store.
* @tparam Id Type of the id of this file (generally `BSONObjectID` or `BSONValue`).
*/
trait FileToSave[+Id <: BSONValue] extends BasicMetadata[Id] with CustomMetadata
/** A default implementation of `FileToSave[BSONValue]`. */
case class DefaultFileToSave(
filename: String,
contentType: Option[String] = None,
uploadDate: Option[Long] = None,
metadata: BSONDocument = BSONDocument(),
id: BSONValue = BSONObjectID.generate) extends FileToSave[BSONValue]
/**
* A file read from a GridFS store.
* @tparam Id Type of the id of this file (generally `BSONObjectID` or `BSONValue`).
*/
trait ReadFile[+Id <: BSONValue] extends BasicMetadata[Id] with CustomMetadata with ComputedMetadata
/** A default implementation of `ReadFile[BSONValue]`. */
case class DefaultReadFile(
id: BSONValue,
contentType: Option[String],
filename: String,
uploadDate: Option[Long],
chunkSize: Int,
length: Int,
md5: Option[String],
metadata: BSONDocument,
original: BSONDocument) extends ReadFile[BSONValue]
/**
* A GridFS store.
* @param db The database where this store is located.
* @param prefix The prefix of this store. The `files` and `chunks` collections will be actually named `prefix.files` and `prefix.chunks`.
*/
class GridFS[Structure, Reader[_], Writer[_]](db: DB with DBMetaCommands, prefix: String = "fs")(implicit producer: GenericCollectionProducer[Structure, Reader, Writer, GenericCollection[Structure, Reader, Writer]] = collections.default.BSONCollectionProducer) {
import indexes._
import IndexType._
/** The `files` collection */
def files[C <: Collection](implicit collProducer: CollectionProducer[C] = producer) = db(prefix + ".files")(collProducer)
/** The `chunks` collection */
def chunks[C <: Collection](implicit collProducer: CollectionProducer[C] = producer) = db(prefix + ".chunks")(collProducer)
/**
* Finds the files matching the given selector.
*
* @param selector The document to select the files to return
*
* @tparam S The type of the selector document. An implicit `Writer[S]` must be in the scope.
*/ // TODO More generic deserializers ?
def find[S, T <: ReadFile[_]](selector: S)(implicit sWriter: Writer[S], readFileReader: Reader[T], ctx: ExecutionContext): Cursor[T] = {
files.find(selector).cursor
}
/**
* Saves the content provided by the given enumerator with the given metadata.
*
* @param enumerator Producer of content.
* @param file Metadata of the file to store.
* @param chunkSize Size of the chunks. Defaults to 256kB.
*
* @return A future of a ReadFile[Id].
*/
def save[Id <: BSONValue](enumerator: Enumerator[Array[Byte]], file: FileToSave[Id], chunkSize: Int = 262144)(implicit readFileReader: Reader[ReadFile[Id]], ctx: ExecutionContext): Future[ReadFile[Id]] = {
(enumerator |>>> iteratee(file, chunkSize)).flatMap(f => f)
}
import reactivemongo.api.collections.default.{ BSONCollection, BSONCollectionProducer }
/**
* Gets an `Iteratee` that will consume data to put into a GridFS store.
* @param file Metadata of the file to store.
* @param chunkSize Size of the chunks. Defaults to 256kB.
*
* @tparam Id Type of the id of this file (generally `BSONObjectID` or `BSONValue`).
*
* @return An `Iteratee` that will consume data to put into a GridFS store.
*/
def iteratee[Id <: BSONValue](file: FileToSave[Id], chunkSize: Int = 262144)(implicit readFileReader: Reader[ReadFile[Id]], ctx: ExecutionContext): Iteratee[Array[Byte], Future[ReadFile[Id]]] = {
implicit val ec = db.connection.actorSystem
case class Chunk(
previous: Array[Byte] = new Array(0),
n: Int = 0,
md: java.security.MessageDigest = java.security.MessageDigest.getInstance("MD5"),
length: Int = 0) {
def feed(chunk: Array[Byte]): Future[Chunk] = {
val wholeChunk = concat(previous, chunk)
val normalizedChunkNumber = wholeChunk.length / chunkSize
logger.debug("wholeChunk size is " + wholeChunk.length + " => " + normalizedChunkNumber)
val zipped = for (i <- 0 until normalizedChunkNumber) yield Arrays.copyOfRange(wholeChunk, i * chunkSize, (i + 1) * chunkSize) -> i
val left = Arrays.copyOfRange(wholeChunk, normalizedChunkNumber * chunkSize, wholeChunk.length)
Future.traverse(zipped) { ci =>
writeChunk(n + ci._2, ci._1)
}.map { _ =>
logger.debug("all futures for the last given chunk are redeemed.")
Chunk(
if (left.isEmpty) Array.empty else left,
n + normalizedChunkNumber,
md, //{ md.update(chunk) ; md },
length + chunk.length)
}
}
def finish(): Future[ReadFile[Id]] = {
import DefaultBSONHandlers._
logger.debug("writing last chunk (n=" + n + ")!")
val uploadDate = file.uploadDate.getOrElse(System.currentTimeMillis)
writeChunk(n, previous).flatMap { f =>
val bson = BSONDocument(
"_id" -> file.id.asInstanceOf[BSONValue],
"filename" -> BSONString(file.filename),
"chunkSize" -> BSONInteger(chunkSize),
"length" -> BSONInteger(length),
"uploadDate" -> BSONDateTime(uploadDate),
"contentType" -> file.contentType.map(BSONString(_)),
"metadata" -> option(!file.metadata.isEmpty, file.metadata))
files[BSONCollection].insert(bson).map(_ =>
files(producer).BufferReaderInstance(readFileReader).read(
files[BSONCollection].StructureBufferWriter.write(bson, ChannelBufferWritableBuffer()).toReadableBuffer))
}
}
def writeChunk(n: Int, array: Array[Byte]) = {
logger.debug("writing chunk " + n)
val bson = {
import DefaultBSONHandlers._
BSONDocument(
"files_id" -> file.id.asInstanceOf[BSONValue],
"n" -> BSONInteger(n),
"data" -> BSONBinary(array, Subtype.GenericBinarySubtype))
}
chunks[BSONCollection].insert(bson)
}
}
Iteratee.foldM(Chunk()) { (previous, chunk: Array[Byte]) =>
logger.debug("processing new enumerated chunk from n=" + previous.n + "...\\n")
previous.feed(chunk)
}.map(_.finish)
}
/** Produces an enumerator of chunks of bytes from the `chunks` collection matching the given file metadata. */
def enumerate(file: ReadFile[_ <: BSONValue])(implicit ctx: ExecutionContext): Enumerator[Array[Byte]] = {
import DefaultBSONHandlers._
val selector = BSONDocument(
"$query" -> BSONDocument(
"files_id" -> file.id,
"n" -> BSONDocument(
"$gte" -> BSONInteger(0),
"$lte" -> BSONInteger(file.length / file.chunkSize + (if (file.length % file.chunkSize > 0) 1 else 0)))),
"$orderby" -> BSONDocument(
"n" -> BSONInteger(1)))
val cursor = chunks[BSONCollection].find(selector).cursor
cursor.enumerate() &> Enumeratee.map { doc =>
doc.get("data").flatMap {
case BSONBinary(data, _) => {
val array = new Array[Byte](data.readable)
data.slice(data.readable).readBytes(array)
Some(array)
}
case _ => None
}.getOrElse {
logger.error("not a chunk! failed assertion: data field is missing")
throw new RuntimeException("not a chunk! failed assertion: data field is missing")
}
}
}
/** Reads the given file and writes its contents to the given OutputStream */
def readToOutputStream(file: ReadFile[_ <: BSONValue], out: OutputStream)(implicit ctx: ExecutionContext): Future[Unit] = {
enumerate(file) |>>> Iteratee.foreach { chunk =>
out.write(chunk)
}
}
/** Writes the data provided by the given InputStream to the given file. */
def writeFromInputStream[Id <: BSONValue](file: FileToSave[Id], input: InputStream, chunkSize: Int = 262144)(implicit readFileReader: Reader[ReadFile[Id]], ctx: ExecutionContext): Future[ReadFile[Id]] = {
save(Enumerator.fromStream(input, chunkSize), file)
}
/**
* Removes a file from this store.
* Note that if the file does not actually exist, the returned future will not be hold an error.
*
* @param file The file entry to remove from this store.
*/
def remove[Id <: BSONValue](file: BasicMetadata[Id])(implicit ctx: ExecutionContext): Future[LastError] = remove(file.id)
/**
* Removes a file from this store.
* Note that if the file does not actually exist, the returned future will not be hold an error.
*
* @param id The file id to remove from this store.
*/
def remove(id: BSONValue)(implicit ctx: ExecutionContext): Future[LastError] = {
import DefaultBSONHandlers._
chunks[BSONCollection].remove(BSONDocument("files_id" -> id)).flatMap { _ =>
files[BSONCollection].remove(BSONDocument("_id" -> id))
}
}
/**
* Creates the needed index on the `chunks` collection, if none.
*
* Please note that you should really consider reading [[http://www.mongodb.org/display/DOCS/Indexes]] before doing this, especially in production.
*
* @return A future containing true if the index was created, false if it already exists.
*/
def ensureIndex()(implicit ctx: ExecutionContext): Future[Boolean] =
db.indexesManager.onCollection(prefix + ".chunks").ensure(Index(List("files_id" -> Ascending, "n" -> Ascending), unique = true))
}
object GridFS {
def apply[Structure, Reader[_], Writer[_]](db: DB with DBMetaCommands, prefix: String = "fs")(implicit producer: GenericCollectionProducer[Structure, Reader, Writer, GenericCollection[Structure, Reader, Writer]] = collections.default.BSONCollectionProducer) =
new GridFS(db, prefix)(producer)
}
|
qubell/ReactiveMongo
|
driver/src/main/scala/api/gridfs.scala
|
Scala
|
apache-2.0
| 12,508 |
package com.rasterfoundry.database
import com.rasterfoundry.datamodel._
import com.rasterfoundry.common.Generators.Implicits._
import com.rasterfoundry.common.SceneWithProjectIdLayerId
import com.rasterfoundry.database.Implicits._
import doobie._, doobie.implicits._
import cats.implicits._
import doobie.postgres.implicits._
import org.scalacheck.Prop.forAll
import org.scalatest._
import org.scalatestplus.scalacheck.Checkers
class SceneToLayerDaoSpec
extends FunSuite
with Matchers
with Checkers
with DBTestConfig
with PropTestHelpers {
test("Insert scenes to a project and accept them") {
check {
forAll {
(user: User.Create,
org: Organization.Create,
platform: Platform,
project: Project.Create,
scenes: List[Scene.Create],
dsCreate: Datasource.Create) =>
{
val acceptedSceneAndStlIO = for {
(dbUser, _, _, dbProject) <- insertUserOrgPlatProject(user,
org,
platform,
project)
datasource <- DatasourceDao.create(dsCreate.toDatasource(dbUser),
dbUser)
scenesInsert <- (scenes map {
fixupSceneCreate(dbUser, datasource, _)
}).traverse(
(scene: Scene.Create) => SceneDao.insert(scene, dbUser)
)
_ <- ProjectDao.addScenesToProject(scenesInsert map { _.id },
dbProject.id,
dbProject.defaultLayerId,
false)
acceptedSceneCount <- SceneToLayerDao.acceptScenes(
dbProject.defaultLayerId,
scenesInsert map { _.id })
stls <- SceneToLayerDao.query
.filter(fr"project_layer_id = ${dbProject.defaultLayerId}")
.list
} yield (acceptedSceneCount, stls)
val (acceptedSceneCount, stls) =
acceptedSceneAndStlIO.transact(xa).unsafeRunSync
acceptedSceneCount == scenes.length &&
stls.length == scenes.length &&
stls.filter(_.accepted).length == scenes.length
}
}
}
}
test("Verify scenes are returned in correct order for mosaic definition") {
check {
forAll {
(user: User.Create,
org: Organization.Create,
platform: Platform,
project: Project.Create,
scenes: List[Scene.Create],
dsCreate: Datasource.Create) =>
{
val mdAndStpsIO = for {
(dbUser, _, _, dbProject) <- insertUserOrgPlatProject(user,
org,
platform,
project)
datasource <- DatasourceDao.create(dsCreate.toDatasource(dbUser),
dbUser)
scenesInsert <- (scenes map {
fixupSceneCreate(dbUser, datasource, _)
}).traverse(
(scene: Scene.Create) => SceneDao.insert(scene, dbUser)
)
selectedSceneIds = scenesInsert.take(2) map { _.id }
_ <- ProjectDao.addScenesToProject(scenesInsert map { _.id },
dbProject.id,
dbProject.defaultLayerId,
false)
_ <- SceneToLayerDao.setManualOrder(dbProject.id,
dbProject.defaultLayerId,
scenesInsert map { _.id })
mds <- SceneToLayerDao
.getMosaicDefinition(dbProject.defaultLayerId,
None,
sceneIdSubset = selectedSceneIds)
.compile
.to[List]
stls <- SceneToLayerDao.query
.filter(fr"project_layer_id = ${dbProject.defaultLayerId}")
.filter(selectedSceneIds.toNel map {
Fragments.in(fr"scene_id", _)
})
.list
} yield (mds, stls, selectedSceneIds)
val (mds, stls, _) =
mdAndStpsIO.transact(xa).unsafeRunSync
// Mapping of scene ids to scene order
val sceneMap =
stls.map(s => (s.sceneId, s.sceneOrder.getOrElse(-1))).toMap
// List of scene orders, ordered by the mosaic definitions
val sceneOrders = mds.map(md => sceneMap.getOrElse(md.sceneId, -1))
// If the scenes are returned in the correct order,
// the scene orders of the mosaic definitions will be in order
sceneOrders.sameElements(sceneOrders.sorted)
}
}
}
}
test("Get layer ID and project ID of a scene") {
check {
forAll {
(user: User.Create,
org: Organization.Create,
platform: Platform,
projectCreate: Project.Create,
scene: Scene.Create,
dsCreate: Datasource.Create) =>
{
val sceneLayerProjectIO
: ConnectionIO[(Scene.WithRelated,
List[SceneWithProjectIdLayerId],
Project)] = for {
(dbUser, _, _, dbProject) <- insertUserOrgPlatProject(
user,
org,
platform,
projectCreate)
datasource <- DatasourceDao.create(dsCreate.toDatasource(dbUser),
dbUser)
sceneInsert <- SceneDao.insert(fixupSceneCreate(dbUser,
datasource,
scene),
dbUser)
_ <- ProjectDao.addScenesToProject(List(sceneInsert.id),
dbProject.id,
dbProject.defaultLayerId,
true)
slp <- SceneToLayerDao.getProjectsAndLayersBySceneId(
sceneInsert.id)
} yield { (sceneInsert, slp, dbProject) }
val (sceneInsert, slp, dbProject) =
sceneLayerProjectIO.transact(xa).unsafeRunSync
slp.toSet == Set(
SceneWithProjectIdLayerId(sceneInsert.id,
dbProject.id,
dbProject.defaultLayerId))
}
}
}
}
}
|
azavea/raster-foundry
|
app-backend/db/src/test/scala/com/azavea/rf/database/SceneToLayerDaoSpec.scala
|
Scala
|
apache-2.0
| 7,062 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//import scala.sys.process.processInternal.File
// comment
import java.io.{ByteArrayInputStream, File, PrintWriter}
import java.nio.file.Files
import java.util.Date
import cmwell.ctrl.client.CtrlClient
import cmwell.ctrl.hc.{ActiveNodes, ClusterStatus}
import cmwell.util.build.BuildInfo
import k.grid.{GridConnection, Grid => AkkaGrid}
import org.apache.commons.io.FileUtils
import scala.collection.{GenSeq, GenSet}
import scala.collection.parallel.{ParMap, ParSeq}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.io.Source
import scala.language.postfixOps
import scala.sys.process._
import scala.util.parsing.json._
import scala.util.{Failure, Success, Try}
//todo: make sure that some applications are installed.
trait Info {
def info(msg: String) = println(s"Info: $msg")
}
object ResourceBuilder {
def getIndexedName(name: String, index: Int): String = {
index match {
case 1 => name
case _ => s"$name$index"
}
}
private def replaceTemplates(text: String, templates: Map[String, String]): String =
"""\\{\\{([^{}]*)\\}\\}""".r replaceSomeIn(text, {
case scala.util.matching.Regex.Groups(name) => templates get name
})
def getResource(path: String, map: Map[String, String]): String = {
val fileContent = Source.fromFile(path).mkString
replaceTemplates(fileContent, map)
}
}
abstract class ModuleLock(checkCount: Int = 50) extends Info {
val delay = 5
def name: String
def com(host: String): Try[String]
def continueCondition(v: String, waitForModuleFor: Int): Boolean
private var prevRes = "UNLIKLY RES"
def fail = {
println("failed to check " + name)
throw new Exception("failed to check " + name)
}
def waitForModule(host: String, waitForModuleFor: Int, tries: Int = checkCount): Boolean = {
if (tries == 0) {
fail
false
} else {
val res = com(host)
res match {
case Success(v) =>
if (continueCondition(v.trim, waitForModuleFor)) {
Thread.sleep(delay * 1000)
val t = if (prevRes != v.trim) {
prevRes = v.trim
if (v.trim.length < 40)
info(s" $name in progress (${v.trim})")
else
info(s" $name in progress")
checkCount
} else tries - 1
waitForModule(host, waitForModuleFor, t)
}
case Failure(e) =>
Thread.sleep(delay * 1000)
waitForModule(host, waitForModuleFor, tries - 1)
}
true
}
}
def waitForModuleIndefinitely(host: String, waitForModuleFor: Int = 0): Boolean = {
val res = com(host)
res match {
case Success(v) =>
if (continueCondition(v.trim, waitForModuleFor)) {
if (v.trim.length < 40)
info(s" $name in progress (${v.trim})")
else
info(s" $name in progress")
Thread.sleep(delay * 1000)
waitForModuleIndefinitely(host, waitForModuleFor)
}
case Failure(e) =>
Thread.sleep(delay * 1000)
waitForModuleIndefinitely(host, waitForModuleFor)
}
true
}
}
case class BatchResult(name: String, indexed: BigInt, fileSize: BigInt)
case class BatchQuery(name: String, head: String, log: String)
case class HostBatchStatus(name: String, br: ParSeq[BatchResult])
//List("/mnt/d1/cas", "/mnt/d1/cas2", "/mnt/d1/cas3", "/mnt/d1/cas4")
case class DataDirs(casDataDirs: GenSeq[String],
casCommitLogDirs: GenSeq[String],
esDataDirs: GenSeq[String],
tlogDataDirs: GenSeq[String],
kafkaDataDirs: GenSeq[String],
zookeeperDataDir: String,
logsDataDir: String)
//case class DataDirs(m : Map[String,String])
case class InstDirs(intallationDir: String = "~/cm-well-new/cm-well", globalLocation: String = "/opt")
case class HaProxy(host: String, sitedown: String = "cm-well:8080")
object Host {
var connectedToAkkaGrid = false
def createHostsNames(name: String, fromIndex: Int, toIndex: Int): List[String] = {
val digitNum = toIndex.toString.length
val range = fromIndex to toIndex
range.toList.map(index => s"${name}%0${digitNum}d".format(index))
}
def ctrl = cmwell.ctrl.client.CtrlClient
def getIndexTxt(moduleIndex: Int) = {
if (moduleIndex == 1) "" else s"$moduleIndex"
}
}
trait OsType
case object Oracle extends OsType
case object Ubuntu extends OsType
abstract class Host(user: String,
password: String,
ipMappings: IpMappings,
size: Int,
inet: String,
val cn: String,
val dc: String,
dataDirs: DataDirs,
instDirs: InstDirs,
wsPerMachine: Int,
allocationPlan: ModuleAllocations,
useAuthorization: Boolean,
deployJava: Boolean,
production: Boolean,
su: Boolean,
ctrlService: Boolean = false,
minMembers: Option[Int] = None,
haProxy: Option[HaProxy],
withElk: Boolean = false,
val withZkKfk: Boolean = false,
val withOldBg: Boolean = true,
isDebug:Boolean = false) {
var sudoerCredentials: Option[Credentials] = None
def getUser = user
def getIpMappings = ipMappings
def getInet = inet
def getDataDirs = dataDirs
def getInstDirs = instDirs
def getAllocationPlan = allocationPlan
def getUseAuthorization = useAuthorization
def getDeployJava = deployJava
def getProduction = production
def getSu = su
def getCtrlSerice = ctrlService
def getHaProxy = haProxy
def getWithElk = withElk
/*
var useAuthorization = false
var deployJava = false
var production = false
var devMode = false
*/
def getMinMembers = minMembers.getOrElse(ips.size / 2 + 1)
val esRegPort = 9201
val esMasterPort = 9200
def currentDir = command("pwd").get
def getOs(host: String): OsType = {
val osStr = command("""cat /etc/*-release""", host, false) match {
case Success(str) => str.trim
case Failure(err) => "oracle"
}
osStr match {
case str: String if str.toLowerCase().contains("ubuntu") => Ubuntu
case str: String if str.toLowerCase().contains("oracle") => Oracle
case str: String => Oracle
}
}
def cssh = {
checkProduction
Future {
command(s"cssh --username $user ${ips.mkString(" ")}")
}
}
def jstat = {
ips.par.map { ip =>
ip -> command(s"ps aux | grep java | egrep -v 'starter|grep' | awk '{print $$2}' | xargs -I zzz ${getInstDirs.globalLocation}/cm-well/app/java/bin/jstat -gcutil zzz", ip, false).map(_.trim)
}.toMap
}
def jstat(comp: String) = {
ips.par.map { ip =>
ip -> command(s"ps aux | grep java | egrep -v 'starter|grep' | grep $comp | awk '{print $$2}' | xargs -I zzz ${getInstDirs.globalLocation}/cm-well/app/java/bin/jstat -gcutil zzz", ip, false).map(_.trim)
}.toMap
}
def jstat(comp: String, ip: String): Unit = {
ParMap(ip -> command(s"ps aux | grep java | egrep -v 'starter|grep' | grep $comp | awk '{print $$2}' | xargs -I zzz ${getInstDirs.globalLocation}/cm-well/app/java/bin/jstat -gcutil zzz", ip, false).map(_.trim))
}
private val componentToJmxMapping = Map(
"ws" -> PortManagers.ws.jmxPortManager.initialPort,
"batch" -> PortManagers.batch.jmxPortManager.initialPort,
"ctrl" -> PortManagers.ctrl.jmxPortManager.initialPort,
"dc" -> PortManagers.dc.jmxPortManager.initialPort
)
def jconsole(component: String, dualmonitor: Boolean, host1: String, hosts: String*): Unit = jconsole(component, dualmonitor, Seq(host1) ++ hosts)
def jconsole(component: String, dualmonitor: Boolean = false, hosts: GenSeq[String] = ips): Unit = {
if (!dualmonitor) {
val com = hosts.map(h => s"$h:${componentToJmxMapping(component)}").mkString(" ")
info(com)
Future {
command(s"jconsole $com")
}
} else {
val (hosts1, hosts2) = hosts.splitAt(hosts.size / 2)
val com1 = hosts1.map(h => s"$h:${componentToJmxMapping(component)}").mkString(" ")
val com2 = hosts2.map(h => s"$h:${componentToJmxMapping(component)}").mkString(" ")
info(com1)
info(com2)
Future {
command(s"jconsole $com1")
}
Future {
command(s"jconsole $com2")
}
}
}
def dcSync(remoteHost: String, dc: String): Unit = {
command(s"""curl -XPOST "http://${ips(0)}:9000/meta/sys/dc/$dc" -H "X-CM-Well-Type:Obj" -H "Content-Type:application/json" --data-binary '{"type":"remote" , "location" : "$remoteHost" , "id" : "$dc"}'""")
}
def ips = ipMappings.getIps
def getSize = size
def createFile(path: String, content: String, hosts: GenSeq[String] = ips, sudo: Boolean = false, sudoer: Option[Credentials] = None) {
if (sudo)
command(s"""echo -e '$content' | sudo tee $path > /dev/null""", hosts, true, sudoer)
else
command(s"""echo $$'$content' > $path""", hosts, false)
}
val shipperConfLocation = s"${instDirs.globalLocation}/cm-well/conf/logstash"
val logstashJarLocation = s"${instDirs.globalLocation}/cm-well/app/logstash"
val logstashConfName = "logstash.conf"
val logstashJarName = "logstash-1.2.2-flatjar.jar"
def addLogstash(esHost: String, hosts: GenSeq[String] = ips) {
createLogstashConfFile(esHost, hosts)
deployLogstash(hosts)
startSendingLogsToLogstash(hosts)
}
def createLogstashConfFile(esHost: String, hosts: GenSeq[String] = ips) {
val str = genLogstashConfFile(esHost, Map("BU" -> "TMS", "serviceID" -> "cm-well", "environmentID" -> "cm-well", "appID" -> "cm-well", "cluster" -> cn))
command(s"mkdir -p $shipperConfLocation", hosts, false)
createFile(s"$shipperConfLocation/$logstashConfName", str, hosts)
}
def deployLogstash(hosts: GenSeq[String] = ips) {
command(s"mkdir -p ${instDirs.globalLocation}/cm-well/app/logstash", hosts, false)
rsync(s"components-extras/$logstashJarName", logstashJarLocation, hosts)
val startFile = s"java -jar $logstashJarName agent -f $shipperConfLocation/$logstashConfName > /dev/null 2> /dev/null &"
createFile(s"$logstashJarLocation/start.sh", startFile, hosts)
command(s"cd $logstashJarLocation; chmod +x start.sh", hosts, false)
}
def startSendingLogsToLogstash(hosts: GenSeq[String] = ips) {
command(s"cd $logstashJarLocation; ./start.sh", hosts, false)
}
def stopSendingLogsToLogstash(hosts: GenSeq[String] = ips) {
killProcess("logstash", "")
}
def genLogstashConfFile(esHost: String, globalFields: Map[String, String]): String = LogstashConf.genLogstashConfFile(cn, esHost, globalFields, s"${instDirs.globalLocation}/cm-well/log", dataDirs.esDataDirs.size)
private def resolveIndex(index: Int): String = {
index match {
case 1 => ""
case _ => s"${index}"
}
}
object tests {
def loadTest(port: Int = 9000, host: String = ips(0), amount: Int = 1000000, path: String = "/test/lorem") {
val com = Seq("java", "-jar", "components/lorem-ipsum-agent-executable.jar", "-hosts", host, "-p", path, "-a", "5", "-v", "3", "-n", s"$amount", "--port", s"$port").run
val startTime: Long = System.currentTimeMillis / 1000
var currentAmount = 0
while (currentAmount < amount) {
currentAmount = (Seq("curl", "-s", s"http://$host:$port$path?format=atom&op=search&length=0") #| Seq("grep", "-o", "<os:totalResults>[0-9]*</os:totalResults>") #| Seq("grep", "-o", "[0-9]*") !!).trim.toInt
val timePassed = (System.currentTimeMillis / 1000 - startTime)
println(s"infotons uploaded: $currentAmount, time passed: ${Math.round(timePassed / 60)}:${timePassed % 60}")
Thread.sleep(5000)
}
}
def printBatchStatus {
val results = batchStatus
results.toList.foreach {
hostRes =>
println(s"---------------------------------------${hostRes.name}---------------------------------------")
hostRes.br.toList.foreach {
res =>
println(res.name)
if (res.indexed == res.fileSize) {
println("Nothing to proccess")
} else {
println(s"${res.fileSize - res.indexed} bytes to index")
}
}
}
}
def batchStatus: ParSeq[HostBatchStatus] = {
val batchQuries = List(BatchQuery("Imp", "imp_UpdateTLog", "UpdateTLog_updatesPar"), BatchQuery("Indexer", "indexer_UuidsTLogupdatesPar", "UuidsTLog_uuidsPar"))
val y = ips.par.map {
host =>
val res = batchQuries.par.map { query =>
//val res = command( """cd """ + instDirs.globalLocation + """/cm-well/data/tlog/data/tlog ; cat """ + query.head + """*; printf "," ;ls -alF | grep """ + query.log + """ | awk '{print $5}'""", host, false)
val res = command( """cd """ + instDirs.globalLocation + """/cm-well/data/tlog ; cat """ + query.head + """*; printf "," ;ls -alF | grep """ + query.log + """ | awk '{print $5}'""", host, false)
val x = res match {
case Success(str) =>
val t = Try {
val vals = str.split("\\n")(0).split(",")
BatchResult(query.name, BigInt(vals(0)), BigInt(vals(1)))
}
t match {
case Success(br) => br
case Failure(ex) => BatchResult(s"${query.name} - problem!!", BigInt(1), BigInt(0))
}
case Failure(ex) =>
throw new Exception("can't execute command on this host")
}
x
}
HostBatchStatus(host, res)
}
y
}
def infotonChangeAvg: BigDecimal = infotonChangeAvg()
def infotonChangeAvg(checkCount: Int = 5): BigDecimal = {
val checks = (1 to checkCount).toList
val res = checks.map { x => Thread.sleep(1000); infotonCount }
val x = (res.drop(1) zip res.dropRight(1)).map { item => item._1 - item._2 }
val y = x.foldRight[BigDecimal](0)(_ + _) / checkCount
y
}
def infotonCount: BigDecimal = {
val res = command(s"curl -s ${pingAddress}:$esRegPort/_status", ips(0), false).get
val json = JSON.parseFull(res).get.asInstanceOf[Map[String, Any]]
BigDecimal(json("indices").asInstanceOf[Map[String, Any]]("cmwell0_current").asInstanceOf[Map[String, Any]]("docs").asInstanceOf[Map[String, Any]]("num_docs").toString)
}
def getCpuUsage: List[Double] = {
// top -bn 2 -d 0.01 | grep '^%Cpu' | tail -n 1 | awk '{print $2+$4+$6}'
val ret1 = ips.par.map {
ip =>
val res = command("top -bn 2 -d 0.01 | grep '^%Cpu' | tail -n 1 | awk '{print $2+$4+$6}'", ip, false)
val ret2 = res match {
case Success(str) => str.toDouble
case Failure(err) => -1.0d
}
ret2
}
ret1.toList
}
}
val deployment = new Deployment(this)
//FIXME: was object inside a class, caused this nifty hard to track exception:
/*
* java.lang.NoSuchFieldError: LogLevel$module
* at Host.LogLevel(ctl.scala:415)
* at Main$$anon$1.<init>(scalacmd4970030751979185144.scala:10)
* at Main$.main(scalacmd4970030751979185144.scala:1)
* at Main.main(scalacmd4970030751979185144.scala)
* at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
* at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
* at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
* at java.lang.reflect.Method.invoke(Method.java:497)
* at scala.reflect.internal.util.ScalaClassLoader$$anonfun$run$1.apply(ScalaClassLoader.scala:70)
* at scala.reflect.internal.util.ScalaClassLoader$class.asContext(ScalaClassLoader.scala:31)
* at scala.reflect.internal.util.ScalaClassLoader$URLClassLoader.asContext(ScalaClassLoader.scala:101)
* at scala.reflect.internal.util.ScalaClassLoader$class.run(ScalaClassLoader.scala:70)
* at scala.reflect.internal.util.ScalaClassLoader$URLClassLoader.run(ScalaClassLoader.scala:101)
* at scala.tools.nsc.CommonRunner$class.run(ObjectRunner.scala:22)
* at scala.tools.nsc.ObjectRunner$.run(ObjectRunner.scala:39)
* at scala.tools.nsc.CommonRunner$class.runAndCatch(ObjectRunner.scala:29)
* at scala.tools.nsc.ObjectRunner$.runAndCatch(ObjectRunner.scala:39)
* at scala.tools.nsc.ScriptRunner.scala$tools$nsc$ScriptRunner$$runCompiled(ScriptRunner.scala:175)
* at scala.tools.nsc.ScriptRunner$$anonfun$runCommand$1.apply(ScriptRunner.scala:222)
* at scala.tools.nsc.ScriptRunner$$anonfun$runCommand$1.apply(ScriptRunner.scala:222)
* at scala.tools.nsc.ScriptRunner$$anonfun$withCompiledScript$1$$anonfun$apply$mcZ$sp$1.apply(ScriptRunner.scala:161)
* at scala.tools.nsc.ScriptRunner$$anonfun$withCompiledScript$1.apply$mcZ$sp(ScriptRunner.scala:161)
* at scala.tools.nsc.ScriptRunner$$anonfun$withCompiledScript$1.apply(ScriptRunner.scala:129)
* at scala.tools.nsc.ScriptRunner$$anonfun$withCompiledScript$1.apply(ScriptRunner.scala:129)
* at scala.tools.nsc.util.package$.trackingThreads(package.scala:43)
* at scala.tools.nsc.util.package$.waitingForThreads(package.scala:27)
* at scala.tools.nsc.ScriptRunner.withCompiledScript(ScriptRunner.scala:128)
* at scala.tools.nsc.ScriptRunner.runCommand(ScriptRunner.scala:222)
* at scala.tools.nsc.MainGenericRunner.run$1(MainGenericRunner.scala:85)
* at scala.tools.nsc.MainGenericRunner.process(MainGenericRunner.scala:98)
* at scala.tools.nsc.MainGenericRunner$.main(MainGenericRunner.scala:103)
* at scala.tools.nsc.MainGenericRunner.main(MainGenericRunner.scala)
*
* changed it to an anon val, which is ugly, but works.
* this is a TEMPORARY PATCH!
* @michaelirzh : please refactor!!!
*/
val LogLevel = new {
def warn = deployment.componentProps.collect { case lc: LoggingComponent => lc }.foreach(lc => lc.LogLevel.warn)
def error = deployment.componentProps.collect { case lc: LoggingComponent => lc }.foreach(lc => lc.LogLevel.error)
def info = deployment.componentProps.collect { case lc: LoggingComponent => lc }.foreach(lc => lc.LogLevel.info)
def debug = deployment.componentProps.collect { case lc: LoggingComponent => lc }.foreach(lc => lc.LogLevel.debug)
}
private val jwt = sys.env.getOrElse("PUSER_TOKEN", "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJwVXNlciIsImV4cCI6NDYzODkwMjQwMDAwMCwicmV2IjoxfQ.j-tJCGnWHbJ-XAUJ1wyHxMlnMaLvO6IO0fKVjsXOzYM")
private val rootDigest = sys.env.getOrElse("ROOT_DIGEST", "$2a$10$MKrHtymBOyfE67dZnbEdeOEB336uOXwYetVU28djINKjUTs2da6Km")
private val rootDigest2 = sys.env.getOrElse("ROOT_DIGEST2", "199245447fd82dd38f84c000da94cf1d")
var verbose = false
var deb = isDebug
def debug: Boolean = deb
def debug_=(v: Boolean) = {
deb = v
println("The ports are:\\nws: 5010\\nbatch: 5009\\nctrl: 5011\\ncw: 5012\\ndc: 5013\\nbg: 5014")
}
var doInfo = true
def info(msg: String) = if (doInfo) println(s"Info: $msg")
def warn(msg: String) = {
println(s"Warning: $msg")
}
def warnPrompt = {
println("Warning: Are you sure you want to continue: (yes/no)")
val ln = scala.io.StdIn.readLine()
if (ln != "yes") {
throw new Exception("You chose to not continue the process.")
}
}
def getMode: String
def getCassandraHostIDs(host: String): String
def getElasticsearchMasters: Int
def isSu = su
def help = println(Source.fromFile("readme").mkString)
//def hosts = ips.map(ip => s"${user}@${ip}")
def getSeedNodes: List[String]
def javaPath = s"${instDirs.globalLocation}/cm-well/app/java/bin"
def utilsPath = s"${instDirs.globalLocation}/cm-well/bin/utils"
def homeBinPath = "~/bin"
def path: String = s"$javaPath:$utilsPath:$homeBinPath:$$PATH"
private def ipsToSsh(u: String = user, ips: GenSeq[String]) = ips.map(ip => if (ip.indexOf("@") == -1) s"${u}@${ip}" else ip)
private def timeStamp = System.currentTimeMillis / 1000
private var lastProdCheckTimeStamp = 0L
private def checkProduction {
val interval = 60 * 60
if (production && (timeStamp - lastProdCheckTimeStamp > interval)) {
println("This is a production cluster. Are you sure you want to do this operation: (yes/no)")
val ln = scala.io.StdIn.readLine()
if (ln != "yes") {
throw new Exception("This operation is not allowed on a production environment. Please remove: production = true from this cluster's definition file")
} else {
lastProdCheckTimeStamp = timeStamp
}
}
}
// var intallationDir = "~/cm-well-new/cm-well"
// var globalLocation = "/opt"
case class Credentials(name: String, pass: String)
def gainTrust: Unit = gainTrust()
def gainTrust(u: String = user, p: String = "", hosts: GenSeq[String] = ips) {
val sshLocation = s"${sys.env("HOME")}/.ssh"
val privateKey = s"$sshLocation/id_rsa"
val publicKey = s"$privateKey.pub"
val hasPrivateKey = new java.io.File(privateKey).exists
val hasPublicKey = new java.io.File(publicKey).exists
val hasKey = hasPrivateKey && hasPublicKey
if (!hasKey) {
info(" key not found, generating install key.")
//ssh-keygen asks for overwrite in case the private key exists, so deleting before.
Seq("bash", "-c", s"rm -f $privateKey; ssh-keygen -q -t rsa -b 4096 -N '' -C '' -f $privateKey").!!
}
val pass = if (p != "") p else scala.io.StdIn.readLine(s"Please enter password for $u\\n")
val sshHosts = ipsToSsh(u, hosts)
sshHosts.seq.foreach { sshHost => Seq("ssh-keygen", "-R", sshHost).!! }
sshHosts.foreach { sshHost =>
val cmd = Seq("bash", "-c", s"read PASS; ${UtilCommands.sshpass} -p $$PASS ssh-copy-id -i $privateKey -o StrictHostKeyChecking=no $sshHost")
if (verbose) println("command: " + cmd.mkString(" "))
(s"echo -e -n $pass\\\\n" #| cmd).!!
}
}
def refreshUserState(user: String, sudoer: Option[Credentials], hosts: GenSeq[String] = ips): Unit = {
// temp disabled for OSX till new cons available...
val pubKeyOpt = sys.env.get("SSH_DEV_KEY")
if(!UtilCommands.isOSX && pubKeyOpt.isDefined) {
val pubKey = pubKeyOpt.get
val userSshDir = s"/home/$user/.ssh"
val rootSshDir = "/root/.ssh"
val fileName = "authorized_keys"
val rootVarMap = Map("STR" -> pubKey, "DIR" -> rootSshDir, "FILE" -> fileName)
val cmdTemplate = "%smkdir -p $DIR; %ssed -i -e '\\\\$a\\\\' $DIR/$FILE 2> /dev/null; %sgrep -q '$STR' $DIR/$FILE 2> /dev/null || echo -e '$STR' | %stee -a $DIR/$FILE > /dev/null"
val rootCmd = cmdTemplate.format(Seq.fill(4)("sudo "): _ *)
val userCmd = cmdTemplate.format(Seq.fill(4)(""): _ *)
sudoer.foreach(_ => command(rootCmd, hosts, sudo = true, sudoer, rootVarMap))
val userVarMap = Map("STR" -> pubKey, "DIR" -> userSshDir, "FILE" -> fileName)
command(userCmd, hosts, sudo = false, sudoer = None, variables = userVarMap)
//add the file that removes the annoying ssh log in message
command("touch ~/.hushlogin", hosts, sudo = false)
}
}
//def gainTrustNoPass(u : String = user , p : String = "", hosts : GenSeq[String] = ips.par)
def validateNumberOfMasterNodes(num: Int, size: Int): Boolean = (num % 2) == 1 && num <= size && num >= 3
def absPath(path: String) = Seq("bash", "-c", s"cd ${path}; pwd").!!.replace("\\n", "")
val nodeToolLocation = s"${instDirs.globalLocation}/cm-well/app/cas/cur/bin/nodetool"
def nodeToolPath = nodeToolLocation
def pingAddress = ips(0)
def esHealthAddress = ":9200/_cluster/health?pretty=true"
var mappingFile = "mapping.json"
def cassandraStatus(host: String): Try[String] = {
command(s"JAVA_HOME=${instDirs.globalLocation}/cm-well/app/java/bin $nodeToolPath status 2> /dev/null | grep UN | wc -l", host, false).map(_.trim)
}
case class CassandraLock() extends ModuleLock {
def name: String = "Cassandra boot"
def com(host: String): Try[String] = cassandraStatus(host)
def continueCondition(v: String, waitFor: Int): Boolean = v.toInt < waitFor
}
case class CassandraDNLock() extends ModuleLock {
def name: String = "CassandraDownNodes counter"
def com(host: String): Try[String] = command(s"JAVA_HOME=${instDirs.globalLocation}/cm-well/app/java/bin $nodeToolPath status 2> /dev/null | grep DN | wc -l", host, false)
def continueCondition(v: String, waitFor: Int): Boolean = v.toInt < waitFor
}
case class ElasticsearchLock(checkHost: String = ips(0)) extends ModuleLock {
def name: String = "Elasticsearch boot"
def com(host: String): Try[String] = {
val r = command("curl -sX GET http://" + checkHost + esHealthAddress, host, false)
r match {
case Success(v) =>
Try(JSON.parseFull(v.trim).get.asInstanceOf[Map[String, Any]]("number_of_nodes").toString.trim.split('.')(0))
case Failure(e) => Failure(e)
}
}
def continueCondition(v: String, waitFor: Int): Boolean = v.trim.toInt < waitFor
}
case class ElasticsearchStatusLock(colors: String*) extends ModuleLock {
override val delay = 20
def name: String = s"Waiting for Elasticsearch ${colors.mkString(", ")} status"
def com(host: String): Try[String] = elasticsearchStatus(host)
def continueCondition(v: String, waitFor: Int): Boolean = !colors.contains(v.toLowerCase)
}
case class NoKnownHostsLock() extends ModuleLock {
override def name: String = "Updating known hosts"
override def continueCondition(v: String, waitForModuleFor: Int): Boolean = v.contains("known-cmwell-hosts")
override def com(host: String): Try[String] = command("curl -sX GET http://" + host + ":9000/meta/sys?format=ntriples")
}
def webServerStatus(host: String) = command("curl -Is http://" + host + ":9000/ | head -1")
case class WebServiceLock() extends ModuleLock {
def name: String = "Web Service boot"
def com(host: String): Try[String] = webServerStatus(host)
def continueCondition(v: String, waitFor: Int): Boolean = !v.contains("200") && !v.contains("404") && !v.contains("503")
}
val dataInitializer = new DataInitializer(this, jwt, rootDigest, rootDigest2)
def shutDownDataInitializer() = dataInitializer.shutdownMaterializerAndActorSystem()
implicit class StringExtensions(s: String) {
def takeRightWhile(p: (Char) => Boolean): String = s.takeRight(s.reverseIterator.takeWhile(p).length)
}
def createCassandraRackProperties(hosts: GenSeq[String] = ips.par) {
hosts.zipWithIndex.foreach {
ip =>
val content = s"dc=DC1\\nrack=RAC${ip._2 + 1}"
command(s"""echo "$content" > ${instDirs.globalLocation}/cm-well/conf/cas/cassandra-rackdc.properties""", ip._1, false)
for (i <- 2 to dataDirs.casDataDirs.size)
command(s"""echo "$content" > ${instDirs.globalLocation}/cm-well/conf/cas$i/cassandra-rackdc.properties""", ip._1, false)
}
}
def createUser(user: String = "u", pass: String = "said2000", hosts: GenSeq[String] = ips.par, sudoer: Credentials) {
command(s"sudo useradd $user", hosts, true, Some(sudoer))
command(s"echo '$user:$$USERPASS' | sudo chpasswd", hosts, true, Some(sudoer), Map("USERPASS" -> pass))
}
def sudoComm(com: String) = s"""sudo bash -c \\"\\"\\"${com}\\"\\"\\""""
def elasticsearchStatus(host: String) = {
val r = command("curl -sX GET http://" + ips(0) + esHealthAddress, host, false)
r match {
case Success(v) =>
Try(JSON.parseFull(v.trim).get.asInstanceOf[Map[String, Any]]("status").toString.trim.split('.')(0))
case Failure(e) => Failure(e)
}
}
def command(com: String, hosts: GenSeq[String], sudo: Boolean): GenSeq[Try[String]] = {
command(com, hosts, sudo, None)
}
def command(com: String, hosts: GenSeq[String], sudo: Boolean, sudoer: Option[Credentials]): GenSeq[Try[String]] = {
hosts.map {
host =>
command(com, host, sudo, sudoer)
}
}
def command(com: String, hosts: GenSeq[String], sudo: Boolean, sudoer: Option[Credentials], variables: Map[String, String]): GenSeq[Try[String]] = {
hosts.map(host => command(com, host, sudo, sudoer, variables))
}
def command(com: String, host: String, sudo: Boolean): Try[String] = {
command(com, host, sudo, None)
}
def command(com: String, host: String, sudo: Boolean, sudoer: Option[Credentials]): Try[String] = {
command(com, host, sudo, sudoer, Map[String, String]())
}
def command(com: String, host: String, sudo: Boolean, sudoer: Option[Credentials], variables: Map[String, String]): Try[String] = {
if (sudo && isSu && sudoer.isEmpty) throw new Exception(s"Sudoer credentials must be available in order to use sudo")
if (!ips.contains(host) && host != haProxy.map(x => x.host).getOrElse("")) throw new Exception(s"The host $host is not part of this cluster")
val (readVarsLine, varValues) = variables.fold(("", "")) {
case ((readVarsStr, varValuesForEcho), (varName, value)) =>
(s"$readVarsStr read $varName;", s"$varValuesForEcho$value\\\\n")
}
val (commandLine, process) = if (sudo && isSu) {
val cmd = s"""ssh -o StrictHostKeyChecking=no -o LogLevel=ERROR ${sudoer.get.name}@$host export PATH=$path;$readVarsLine read PASS; sshpass -p $$PASS bash -c "${escapedCommand(com)}"""" //old version that get stuck sometimes - val command = s"""ssh -o StrictHostKeyChecking=no ${sudoer.get.name}@$host bash -c $$'{ export PATH=$path; read PASS; ./sshpass -p $$PASS bash -c "${escapedCommand(com)}"; }'"""
(cmd, s"echo -e -n $varValues${sudoer.get.pass}\\\\n" #| cmd)
} else {
if (variables.nonEmpty) {
val cmd = s"""ssh -o StrictHostKeyChecking=no -o LogLevel=ERROR $user@$host export PATH=$path;$readVarsLine bash -c "${escapedCommand(com)}""""
(cmd, s"echo -e -n $varValues" #| cmd)
}
else {
val cmd = Seq("ssh", "-o", "StrictHostKeyChecking=no", "-o", "LogLevel=ERROR", s"$user@$host", s"PATH=$path $com")
(cmd.mkString(" "), Process(cmd))
}
}
if (verbose) println("command: " + commandLine)
Try(process.!!)
}
private def escapedCommand(cmd: String) = cmd.replace("\\"", "\\\\\\"") // old version for $'..' bash string: cmd.replace("\\"", "\\\\\\\\\\"").replace("'", "\\\\'")
def command(com: String, sudo: Boolean = false): Try[String] = {
if (sudo && isSu)
Try(sudoComm(com).!!)
else {
val seq = Seq("bash", "-c", com)
if (verbose) println("command: " + seq.mkString(" "))
Try(seq.!!)
}
}
def rsync(from: String, to: String, hosts: GenSeq[String], sudo: Boolean = false): GenSeq[Try[String]] = {
val h = hosts.map(host => if (host.indexOf("@") == -1) s"${user}@${host}" else host)
h.map {
host =>
_rsync(from, to, host, sudo = sudo)
}
}
def _rsync(from: String, to: String, host: String, tries: Int = 10, sudo: Boolean): Try[String] = {
val seq = Seq("rsync", "-Paz", "--delete", from, host + ":" + to)
if (verbose) println("command: " + seq.mkString(" "))
val res = Try(seq.!!)
res match {
case Success(r) => res
case Failure(err) => if (tries == 0) res else _rsync(from, to, host, tries - 1, sudo)
}
}
def removeDataDirs: Unit = removeDataDirs()
def removeDataDirs(i: GenSeq[String] = ips.par) {
command(s"rm -rf ${instDirs.intallationDir}", i, false)
dataDirs.casDataDirs.foreach {
cas => command(s"rm -rf ${cas}", i, false)
}
dataDirs.casCommitLogDirs.foreach {
ccl => command(s"rm -rf ${ccl}", i, false)
}
dataDirs.esDataDirs.foreach {
es => command(s"rm -rf ${es}", i, false)
}
dataDirs.tlogDataDirs.foreach {
tlog =>
command(s"rm -rf $tlog", i, false)
}
command(s"rm -rf ${dataDirs.logsDataDir}", i, false)
}
def createDataDirs(): Unit = createDataDirs(ips.par)
def createDataDirs(hosts: GenSeq[String]) {
info("creating data directories")
info(" creating installation directory")
command(s"mkdir -p ${instDirs.intallationDir}/", hosts, false)
deployment.componentProps.collect { case cp: DataComponent => cp } foreach {
_.createDataDirectories(hosts)
}
info(" creating log data directory")
command(s"mkdir -p ${dataDirs.logsDataDir}", hosts, false)
info("finished creating data directories")
}
def deployComponents(hosts: GenSeq[String] = ips.par) {
deployment.componentProps.foreach(_.deployComponent(hosts = hosts))
}
def genResources(hosts: GenSeq[String] = ips) {
deployment.createResources(mkScripts(hosts))
}
def genEsResources(hosts: GenSeq[String]) {
deployment.createResources(mkScripts(hosts).filter(_.isInstanceOf[ElasticsearchConf]))
}
def genCtrlResources(hosts: GenSeq[String]) {
deployment.createResources(mkScripts(hosts).filter(_.isInstanceOf[CtrlConf]))
}
def deployApplication: Unit = deployApplication()
def deployApplication(hosts: GenSeq[String] = ips.par) {
syncLib(hosts)
info("deploying application")
info(" creating application directories")
//command(s"mkdir -p ${instDirs.intallationDir}/", hosts, false)
command(s"mkdir ${instDirs.intallationDir}/app ${instDirs.intallationDir}/conf ${instDirs.intallationDir}/data ${instDirs.intallationDir}/bin", hosts, false)
command(s"mkdir ${instDirs.intallationDir}/app/batch ${instDirs.intallationDir}/app/bg ${instDirs.intallationDir}/app/ctrl ${instDirs.intallationDir}/app/dc ${instDirs.intallationDir}/app/cas ${instDirs.intallationDir}/app/es ${instDirs.intallationDir}/app/ws ${instDirs.intallationDir}/app/scripts ${instDirs.intallationDir}/app/tools", hosts, false)
command(s"ln -s ${dataDirs.logsDataDir} ${instDirs.intallationDir}/log", hosts, false)
info(" deploying components")
deployComponents(hosts)
//info(" extracting components")
//extractComponents(hosts)
info(" creating symbolic links")
deployment.componentProps.collect { case cp: DataComponent => cp } foreach {
_.linkDataDirectories(hosts)
}
deployment.componentProps.collect { case cp: LoggingComponent => cp } foreach {
_.createLoggingDirectories(hosts)
}
deployment.componentProps.collect { case cp: ConfigurableComponent => cp } foreach {
_.createConigurationsDirectoires(hosts)
}
rsync("./scripts/", s"${instDirs.intallationDir}/app/scripts/", hosts)
info(" creating links in app directory")
createAppLinks(hosts)
rsync(s"./components/mx4j-tools-3.0.1.jar", s"${instDirs.intallationDir}/app/cas/cur/lib/", hosts)
info(" creating scripts")
genResources(hosts)
info(" deploying plugins")
rsyncPlugins(hosts)
info(" linking libs")
linkLibs(hosts)
info("finished deploying application")
}
private def createAppLinks(hosts: GenSeq[String]) = {
command(s"test -L ${instDirs.globalLocation}/cm-well/app/batch/logs || ln -s ${instDirs.globalLocation}/cm-well/log/batch/ ${instDirs.globalLocation}/cm-well/app/batch/logs", hosts, false)
command(s"test -L ${instDirs.globalLocation}/cm-well/app/bg/logs || ln -s ${instDirs.globalLocation}/cm-well/log/bg/ ${instDirs.globalLocation}/cm-well/app/bg/logs", hosts, false)
command(s"test -L ${instDirs.globalLocation}/cm-well/app/ws/logs || ln -s ${instDirs.globalLocation}/cm-well/log/ws/ ${instDirs.globalLocation}/cm-well/app/ws/logs", hosts, false)
command(s"mkdir -p ${instDirs.globalLocation}/cm-well/log/cw/", hosts, false)
command(s"test -L ${instDirs.globalLocation}/cm-well/app/ws/cw-logs || ln -s ${instDirs.globalLocation}/cm-well/log/cw/ ${instDirs.globalLocation}/cm-well/app/ws/cw-logs", hosts, false)
command(s"test -L ${instDirs.globalLocation}/cm-well/app/ctrl/logs || ln -s ${instDirs.globalLocation}/cm-well/log/ctrl/ ${instDirs.globalLocation}/cm-well/app/ctrl/logs", hosts, false)
command(s"test -L ${instDirs.globalLocation}/cm-well/app/dc/logs || ln -s ${instDirs.globalLocation}/cm-well/log/dc/ ${instDirs.globalLocation}/cm-well/app/dc/logs", hosts, false)
command(s"mkdir -p ${instDirs.globalLocation}/cm-well/conf/batch/", hosts, false)
command(s"test -L ${instDirs.globalLocation}/cm-well/app/batch/conf || ln -s ${instDirs.globalLocation}/cm-well/conf/batch/ ${instDirs.globalLocation}/cm-well/app/batch/conf", hosts, false)
command(s"test -L ${instDirs.globalLocation}/cm-well/app/bg/conf || ln -s ${instDirs.globalLocation}/cm-well/conf/bg/ ${instDirs.globalLocation}/cm-well/app/bg/conf", hosts, false)
command(s"test -L ${instDirs.globalLocation}/cm-well/app/ws/conf || ln -s ${instDirs.globalLocation}/cm-well/conf/ws/ ${instDirs.globalLocation}/cm-well/app/ws/conf", hosts, false)
command(s"mkdir -p ${instDirs.globalLocation}/cm-well/conf/cw/", hosts, false)
command(s"test -L ${instDirs.globalLocation}/cm-well/app/ws/cw-conf || ln -s ${instDirs.globalLocation}/cm-well/conf/cw/ ${instDirs.globalLocation}/cm-well/app/ws/cw-conf", hosts, false)
command(s"test -L ${instDirs.globalLocation}/cm-well/app/ctrl/conf || ln -s ${instDirs.globalLocation}/cm-well/conf/ctrl/ ${instDirs.globalLocation}/cm-well/app/ctrl/conf", hosts, false)
command(s"mkdir -p ${instDirs.globalLocation}/cm-well/conf/dc/", hosts, false)
command(s"test -L ${instDirs.globalLocation}/cm-well/app/dc/conf || ln -s ${instDirs.globalLocation}/cm-well/conf/dc/ ${instDirs.globalLocation}/cm-well/app/dc/conf", hosts, false)
}
def mkScripts(ips: GenSeq[String] = ips): GenSeq[ComponentConf] = {
null
}
def redeploy: Unit = redeploy()
def redeploy(hosts: GenSeq[String] = ips.par) {
checkProduction
stop(false, hosts)
clearApp(hosts)
deployApplication(hosts)
}
def updateBatch: Unit = updateBatch()
def updateBatch(hosts: GenSeq[String] = ips) {
hosts.foreach { h =>
rsync(s"./components/cmwell-batch_2.10-1.0.1-SNAPSHOT-selfexec.jar", s"${instDirs.globalLocation}/cm-well/app/bg/_cmwell-batch_2.10-1.0.1-SNAPSHOT-selfexec.jar", List(h))
//stopWebservice(List(hosts))
stopBatch(List(h))
command(s"mv ${instDirs.globalLocation}/cm-well/app/bg/_cmwell-batch_2.10-1.0.1-SNAPSHOT-selfexec.jar ${instDirs.globalLocation}/cm-well/app/bg/cmwell-batch_2.10-1.0.1-SNAPSHOT-selfexec.jar", List(h), false)
startBatch(List(h))
//startWebservice(List(hosts))
}
}
def updateWebService: Unit = updateWebService()
def updateWebService(hosts: GenSeq[String] = ips) {
hosts.foreach { h =>
command(s"cd ${instDirs.globalLocation}/cm-well/app/ws; mkdir tmp", List(h), false)
rsync("./components/cmwell-ws_2.10-1.0.1-SNAPSHOT-dist.zip", s"${instDirs.globalLocation}/cm-well/app/ws/tmp/cmwell-ws_2.10-1.0.1-SNAPSHOT-dist.zip", List(h))
command(s"cd ${instDirs.globalLocation}/cm-well/app/ws/tmp; unzip cmwell-ws_2.10-1.0.1-SNAPSHOT-dist.zip", hosts, false)
stopWebservice(List(h))
command(s"rm -rf ${instDirs.intallationDir}/cm-well/app/ws/cmwell-ws-1.0.1-SNAPSHOT", List(h), false)
command(s"rm ${instDirs.globalLocation}/cm-well/app/ws/RUNNING_PID", List(h), false)
command(s"mv ${instDirs.globalLocation}/cm-well/app/ws/tmp/cmwell-ws-1.0.1-SNAPSHOT ${instDirs.globalLocation}/cm-well/app/ws/cmwell-ws-1.0.1-SNAPSHOT", List(h), false)
startWebservice(List(h))
}
}
def removeCmwellSymLink(): Unit = removeCmwellSymLink(ips.par)
def removeCmwellSymLink(hosts: GenSeq[String]) {
command(s"unlink ${instDirs.globalLocation}/cm-well 2> /dev/null", hosts, false)
}
def createCmwellSymLink(sudoer: Option[Credentials]): Unit = createCmwellSymLink(ips.par, sudoer)
def createCmwellSymLink(hosts: GenSeq[String], sudoer: Option[Credentials] = None) {
removeCmwellSymLink(hosts)
command(s"sudo ln -s ${instDirs.intallationDir} ${instDirs.globalLocation}/cm-well; sudo chown -h $user:$user ${instDirs.globalLocation}/cm-well", hosts, true, sudoer)
}
def registerCtrlService(hosts: GenSeq[String], sudoer: Credentials) {
if (ctrlService) {
//remove the old ctrl (the link one) - if exists
command("sudo rm -f /etc/init.d/ctrl", hosts, true, Some(sudoer))
command(s"mkdir -p ${instDirs.globalLocation}/cm-well/conf/ctrl", hosts, false)
createFile(s"${instDirs.globalLocation}/cm-well/conf/ctrl/ctrl", Source.fromFile("scripts/templates/ctrl").mkString.replace("{{user}}", user), hosts)
command(s"chmod +x ${instDirs.globalLocation}/cm-well/conf/ctrl/ctrl", hosts, false)
val cmwellRunner = Source.fromFile("scripts/templates/cmwell-runner").mkString.replace("\\n", "\\\\\\\\n") // it's used inside echo -e that will remove the \\\\ to \\ and then another echo -e that will make the actual new line
createFile("/etc/init.d/cmwell-runner", cmwellRunner, hosts, true, Some(sudoer))
command("sudo chmod +x /etc/init.d/cmwell-runner", hosts, true, Some(sudoer))
hosts.foreach {
host =>
getOs(host) match {
case Ubuntu =>
command("sudo update-rc.d cmwell-runner defaults", host, true, Some(sudoer))
case Oracle =>
command("sudo chkconfig --add cmwell-runner", host, true, Some(sudoer))
command("sudo chkconfig cmwell-runner on", host, true, Some(sudoer))
}
}
command("sudo service cmwell-runner start", hosts, true, Some(sudoer))
}
}
def disks: GenSet[String] = {
val DataDirs(casDataDirs, casCommitLogDirs, esDataDirs, tlogDataDirs, kafkaDataDirs, zookeeperDataDir, logsDataDir) = dataDirs
val dirs = casDataDirs ++ casCommitLogDirs ++ esDataDirs ++ tlogDataDirs ++ kafkaDataDirs ++ Seq(zookeeperDataDir, logsDataDir, instDirs.intallationDir)
dirs.map(dir => dir.substring(0, dir.lastIndexOf("/"))).toSet
}
def disksWithAncestors(disks: GenSet[String]): GenSet[String] = {
def addSlash(p: String) = p match {
case "" => "";
case "/" => "/";
case _ => p + "/"
}
disks.flatten { disk =>
val splitted = disk.split("/").map(p => if (p.isEmpty) "/" else p)
val ancestors = splitted.scan("")((p, dir) => addSlash(p) + dir)
ancestors.filterNot(p => p.isEmpty || p == "/")
}
}
def unprepareMachines(): Unit = unprepareMachines(ips.par)
def unprepareMachines(hosts: GenSeq[String]) {
purge(hosts)
removeDataDirs(hosts)
removeCmwellSymLink(hosts)
}
def changeOwnerAndAddExcutePermission(hosts: GenSeq[String], dirs: GenSeq[String], user: String, sudoer: Credentials): Unit = {
dirs.foreach(dir => command(s"sudo chmod +x $dir; sudo chown $user:$user $dir", hosts, true, Some(sudoer)))
}
def prepareMachines(): Unit = prepareMachines(ips.par, "", "", "")
def prepareMachines(hosts: String*): Unit = prepareMachines(hosts, "", "", "")
def prepareMachines(hosts: GenSeq[String], sudoerName: String, sudoerPass: String, userPass: String) {
val sudoerNameFinal: String = if (sudoerName != "") sudoerName else scala.io.StdIn.readLine("Please enter sudoer username\\n")
val sudoerPassword: String = if (sudoerPass != "") sudoerPass else scala.io.StdIn.readLine(s"Please enter $sudoerNameFinal password\\n")
println(s"Gaining trust of sudoer account: $sudoerNameFinal")
gainTrust(sudoerNameFinal, sudoerPassword, hosts)
sudoerCredentials = Some(Credentials(sudoerNameFinal, sudoerPassword))
val sudoer = sudoerCredentials.get
copySshpass(hosts, sudoer)
println("We will now create a local user 'u' for this cluster")
val pass = if (userPass != "") userPass else scala.io.StdIn.readLine(s"Please enter $user password\\n")
createUser(user, pass, hosts, sudoer)
println(s"Gaining trust of the account $user")
gainTrust(user, pass, hosts)
refreshUserState(user, Some(sudoer), hosts)
changeOwnerAndAddExcutePermission(hosts, disksWithAncestors(disks).toSeq, user, sudoer)
createDataDirs(hosts)
createCmwellSymLink(hosts, Some(sudoer))
registerCtrlService(hosts, sudoer)
finishPrepareMachines(hosts, sudoer)
}
protected def finishPrepareMachines(hosts: GenSeq[String], sudoer: Credentials) = {
// deleteSshpass(hosts, sudoer)
info("Machine preparation was done. Please look at the console output to see if there were any errors.")
}
private def copySshpass(hosts: GenSeq[String], sudoer: Credentials): Unit = {
//only copy sshpass if it's an internal one
if (UtilCommands.linuxSshpass == "bin/utils/sshpass") {
hosts.foreach(host => Seq("rsync", "-z", "-e", "ssh -o StrictHostKeyChecking=no", UtilCommands.linuxSshpass, s"${sudoer.name}@$host:~/bin/") !!)
}
}
private def deleteSshpass(hosts: GenSeq[String], sudoer: Credentials): Unit = {
command("sudo rm sshpass", hosts, true, Some(sudoer))
}
def prepareMachinesNonInteractive: Unit = prepareMachinesNonInteractive()
def prepareMachinesNonInteractive(sudoerName: String = "mySudoer", sudoerPass: String = "said2000", uPass: String = "said2000", hosts: GenSeq[String] = ips.par) {
gainTrust(sudoerName, sudoerPass, hosts)
val sudoer = Credentials(sudoerName, sudoerPass)
sudoerCredentials = Some(sudoer)
copySshpass(hosts, sudoer)
createUser(pass = uPass, hosts = hosts, sudoer = sudoer)
gainTrust("u", uPass, hosts)
refreshUserState("u", Some(sudoer))
changeOwnerAndAddExcutePermission(hosts, disksWithAncestors(disks).toSeq, user, sudoer)
createDataDirs()
createCmwellSymLink(Some(sudoer))
registerCtrlService(hosts, sudoer)
// deleteSshpass(hosts, sudoer)
}
def deploy: Unit = deploy()
def deploy(hosts: GenSeq[String] = ips.par) {
checkProduction
deployApplication(hosts)
}
def getNewHostInstance(ipms: IpMappings): Host
def cassandraNetstats = {
println(command(s"JAVA_HOME=${instDirs.globalLocation}/cm-well/app/java/bin $nodeToolPath netstats 2> /dev/null", ips(0), false).get)
}
def removeNode(host: String): Host = {
checkProduction
connectToGrid
if (CtrlClient.currentHost == host) CtrlClient.init((ips.toSet - host).head)
purge(Seq(host))
Host.ctrl.waitForHealth
Thread.sleep(20000)
info("Removing node from Grid")
Host.ctrl.removeNode(host)
ipMappings.filePath match {
case Some(fp) =>
IpMappingController.writeMapping(ipMappings.remove(List(host)), fp)
IpMappingController.writeMapping(ipMappings.remove(ips.diff(List(host))), s"${fp}_$host")
case None => // Do nothing.
}
getNewHostInstance(ipMappings.remove(List(host)))
}
@deprecated
def removeNodeOld(host: String): Host = {
//decommissionCassandraNodes(hosts)
connectToGrid
if (CtrlClient.currentHost == host) CtrlClient.init((ips.toSet - host).head)
val hosts = Seq(host)
val numberOfCassandraDownNodes = command("ps aux | grep Cassandra | grep -v starter | grep -v grep | wc -l", hosts, false).map(r => r.get.trim.toInt).foldLeft(0)(_ + _)
//purge(hosts)
val taskResult = Await.result(CtrlClient.clearNode(host), 1.hours)
stopCtrl(host)
val newInstance = getNewHostInstance(ipMappings.remove(hosts.toList))
//Try(newInstance.CassandraDNLock().waitForModule(newInstance.ips(0), numberOfCassandraDownNodes, 20))
//newInstance.rebalanceCassandraDownNodes
info("Regenerating resource files")
newInstance.genResources()
if (hosts.contains(ips(0))) {
val newIndexerMaster = newInstance.ips(0)
newInstance.stopBatch(List(newIndexerMaster))
newInstance.startBatch(List(newIndexerMaster))
info(s"${newInstance.ips(0)}'s indexer will be promoted to be master.")
}
ipMappings.filePath match {
case Some(fp) =>
IpMappingController.writeMapping(ipMappings.remove(hosts.toList), fp)
IpMappingController.writeMapping(ipMappings.remove(ips.diff(hosts.toList)), s"${fp}_$host")
case None => // Do nothing.
}
info("Waiting for Health control.")
Host.ctrl.waitForHealth
Thread.sleep(20000)
Host.ctrl.removeNode(host)
newInstance.dataInitializer.updateKnownHosts
info(s"The node $host is removed from the cluster.")
newInstance
}
def addNodesSH(path: String) {
addNodes(path)
sys.exit(0)
}
def removeNodeSH(ip: String) {
removeNode(ip)
sys.exit(0)
}
def addNodes(path: String): Host = {
addNodes(IpMappingController.readMapping(path))
}
def addNodes(ipms: IpMappings, sudoerName: String = "", sudoerPass: String = "", userPass: String = ""): Host = {
connectToGrid
val activeNodes = Try(Await.result(Host.ctrl.getActiveNodes, 10 seconds)).getOrElse(ActiveNodes(Set.empty[String]))
val addedInstances = getNewHostInstance(ipms)
//Due to Dudi's request prepare machine isn't run by default and must be run manually (to spare the need for passwords)
//addedInstances.prepareMachines(addedInstances.ips.par, sudoerName = sudoerName, sudoerPass = sudoerPass, userPass = userPass)
addedInstances.purge()
val hostsToRemove = Set.empty[String] //ipMappings.m.map(_.ip).toSet -- activeNodes.an
val withoutDownNodesMapping = ipMappings.remove(hostsToRemove.toList)
val combinedMappings = withoutDownNodesMapping combine ipms
val combinedInstances = getNewHostInstance(combinedMappings)
combinedInstances.deploy(addedInstances.ips)
combinedInstances.startCtrl(addedInstances.ips)
Thread.sleep(20000)
ipms.getIps.foreach(Host.ctrl.addNode)
combinedInstances.startDcForced(addedInstances.ips)
// combinedInstances.startCassandra(addedInstances.ips)
// combinedInstances.startElasticsearch(addedInstances.ips)
//
//
// Retry{
// try{
// combinedInstances.CassandraLock().waitForModule(combinedInstances.ips(0), combinedInstances.getSize)
// } catch {
// case t : Throwable =>
// info("Trying to reinit Cassandra")
// combinedInstances.startCassandra(addedInstances.ips)
// throw t
// }
// }
//
// Retry{
// try{
// combinedInstances.ElasticsearchLock().waitForModule(combinedInstances.ips(0), combinedInstances.getSize)
// } catch {
// case t : Throwable =>
// info("Trying to reinit Elasticsearch")
// combinedInstances.startElasticsearch(addedInstances.ips)
// throw t
// }
// }
//
// combinedInstances.startCtrl(addedInstances.ips)
// combinedInstances.startBatch(addedInstances.ips)
// combinedInstances.startWebservice(addedInstances.ips)
// combinedInstances.startCW(addedInstances.ips)
// combinedInstances.startDc(addedInstances.ips)
//
// update the ip mappings file.
ipMappings.filePath match {
case Some(fp) => IpMappingController.writeMapping(combinedMappings, fp)
case None => // Do nothing.
}
//combinedInstances.dataInitializer.updateKnownHosts
combinedInstances
}
def killProcess(name: String, flag: String, hosts: GenSeq[String] = ips.par, tries: Int = 5) {
if (tries > 0) {
command(s"ps aux | grep -v grep | grep $name | awk '{print $$2}' | xargs -I zzz kill $flag zzz 2> /dev/null", hosts, false)
val died = command(s"ps aux | grep java | grep -v grep | grep $name | wc -l ", hosts, false).map(s => s.get.trim.toInt).filterNot(_ == 0).length == 0
if (!died) {
Thread.sleep(500)
killProcess(name, flag, hosts, tries - 1)
}
} else {
command(s"ps aux | grep java | grep " + name + " | awk '{print $2}' | xargs -I zzz kill -9 zzz 2> /dev/null", hosts, false)
}
}
// todo: kill with -9 if it didn't work.
// todo: remove es with its command.
def stop: Unit = stop(false, ips.par)
def stop(hosts: String*): Unit = stop(false, hosts.par)
def stop(force: Boolean, hosts: GenSeq[String]) {
checkProduction
val tries = if (force) 0 else 5
stopWebservice(hosts, tries)
stopBatch(hosts, tries)
stopBg(hosts, tries)
stopElasticsearch(hosts, tries)
stopCassandra(hosts, tries)
stopCtrl(hosts, tries)
stopCW(hosts, tries)
stopDc(hosts, tries)
stopKafka(hosts, tries)
stopZookeeper(hosts, tries)
stopLogstash(hosts, tries)
stopKibana(hosts, tries)
}
def clearData: Unit = clearData()
def clearData(hosts: GenSeq[String] = ips.par) {
checkProduction
dataDirs.casDataDirs.foreach {
cas => command(s"rm -rf ${cas}/*", hosts, false)
}
dataDirs.casCommitLogDirs.foreach {
ccl => command(s"rm -rf ${ccl}/*", hosts, false)
}
dataDirs.esDataDirs.foreach {
es => command(s"rm -rf ${es}/*", hosts, false)
}
dataDirs.tlogDataDirs.foreach {
tlog =>
command(s"rm -rf $tlog/*", hosts, false)
}
dataDirs.kafkaDataDirs.foreach {
kafka =>
command(s"rm -rf $kafka/*", hosts, false)
}
command(s"rm -rf ${dataDirs.zookeeperDataDir}/*", hosts, false)
command(s"rm -rf ${dataDirs.logsDataDir}/*", hosts, false)
}
def clearApp: Unit = clearApp()
def clearApp(hosts: GenSeq[String] = ips.par) {
checkProduction
command(s"rm -rf ${instDirs.intallationDir}/*", hosts, false)
}
def purge: Unit = purge()
def purge(hosts: GenSeq[String] = ips) {
checkProduction
info("purging cm-well")
info(" stopping processes")
stop(true, hosts)
info(" clearing application data")
clearApp(hosts)
info(" clearing data")
clearData(hosts)
info("finished purging cm-well")
}
def injectMetaData: Unit = injectMetaData(ips(0))
def injectMetaData(host: String) {
dataInitializer.uploadMetaData()
dataInitializer.uploadNameSpaces()
}
def injectSampleData = {
dataInitializer.uploadSampleData()
}
def casHealth: Try[String] = casHealth()
def casHealth(hosts: GenSeq[String] = ips.par): Try[String] = {
command(s"JAVA_HOME=${instDirs.globalLocation}/cm-well/app/java/bin" + nodeToolPath + " status", hosts(0), false)
}
def esHealth: Try[String] = {
command("curl -sX GET http://" + pingAddress + esHealthAddress, ips(0), false)
}
def stopBg: Unit = stopBg(ips.par)
def stopBg(hosts: String*): Unit = stopBg(hosts.par)
def stopBg(hosts: GenSeq[String], tries: Int = 5) {
checkProduction
killProcess("cmwell.bg.Runner", "", hosts, tries)
}
def stopBatch: Unit = stopBatch(ips.par)
def stopBatch(hosts: String*): Unit = stopBatch(hosts.par)
def stopBatch(hosts: GenSeq[String], tries: Int = 5) {
checkProduction
killProcess("batch", "", hosts, tries)
}
def stopWebservice: Unit = stopWebservice(ips.par)
def stopWebservice(hosts: String*): Unit = stopWebservice(hosts.par)
def stopWebservice(hosts: GenSeq[String], tries: Int = 5) {
checkProduction
killProcess("psId=Webserver", "", hosts, tries)
}
def stopCW: Unit = stopCW(ips.par)
def stopCW(hosts: String*): Unit = stopCW(hosts.par)
def stopCW(hosts: GenSeq[String], tries: Int = 5) {
checkProduction
killProcess("crashableworker", "", hosts, tries)
}
def stopDc: Unit = stopDc(ips.par)
def stopDc(hosts: String*): Unit = stopDc(hosts.par)
def stopDc(hosts: GenSeq[String], tries: Int = 5) = {
checkProduction
killProcess("app/dc", "", hosts, tries)
}
def stopCassandra: Unit = stopCassandra(ips.par)
def stopCassandra(hosts: String*): Unit = stopCassandra(hosts.par)
def stopCassandra(hosts: GenSeq[String], tries: Int = 5) {
checkProduction
killProcess("CassandraDaemon", "", hosts, tries)
}
def esSyncedFlush(host: String, port: Int = 9200): Unit = {
command(s"curl -sX POST 'http://$host:$port/_all/_flush/synced'")
}
def stopElasticsearch: Unit = stopElasticsearch(ips.par)
def stopElasticsearch(hosts: String*): Unit = stopElasticsearch(hosts.par)
def stopElasticsearch(hosts: GenSeq[String], tries: Int = 5) {
checkProduction
esSyncedFlush(hosts(0))
killProcess("Elasticsearch", "", hosts, tries)
}
def startBg: Unit = startBg(ips.par)
def startBg(hosts: String*): Unit = startBatch(hosts.par)
def startBg(hosts: GenSeq[String]) {
checkProduction
if (withZkKfk)
command(s"cd ${instDirs.globalLocation}/cm-well/app/bg; ${startScript("./start.sh")}", hosts, false)
}
def startBatch: Unit = startBatch(ips.par)
def startBatch(hosts: String*): Unit = startBatch(hosts.par)
def startBatch(hosts: GenSeq[String]) {
checkProduction
command(s"cd ${instDirs.globalLocation}/cm-well/app/batch; ${startScript("./start.sh")}", hosts, false)
}
def startWebservice: Unit = startWebservice(ips.par)
def startWebservice(hosts: String*): Unit = startWebservice(hosts.par)
def startWebservice(hosts: GenSeq[String]) {
checkProduction
command(s"cd ${instDirs.globalLocation}/cm-well/app/ws/; ${startScript("./start.sh")}", hosts, false)
}
def startCW: Unit = startCW(ips.par)
def startCW(hosts: String*): Unit = startCW(hosts.par)
def startCW(hosts: GenSeq[String]) {
checkProduction
command(s"cd ${instDirs.globalLocation}/cm-well/app/ws; ${startScript("./cw-start.sh")}", hosts, false)
}
def startDc: Unit = startDc(ips.par)
def startDc(hosts: String*): Unit = startDc(hosts.par)
def startDc(hosts: GenSeq[String]): Unit = {
checkProduction
command(s"cd ${instDirs.globalLocation}/cm-well/app/dc; ${startScript("./start.sh")}", hosts, false)
}
def startDcForced(hosts: GenSeq[String]): Unit = {
checkProduction
command(s"cd ${instDirs.globalLocation}/cm-well/app/dc; HAL=9000 FORCE=MAJOUR ./start.sh", hosts, false)
}
//def startScript(script : String) = s"""bash -c "starter '$script'" > /dev/null 2> /dev/null & """
//def startScript(script : String) = s"""HAL=9000 $script"""
def startScript(script: String) =
s"""HAL=9000 ${if (deb) "CMWELL_DEBUG=true" else ""} $script"""
def start: Unit = start(ips.par)
def start(hosts: String*): Unit = start(hosts.par)
def start(hosts: GenSeq[String]) {
checkProduction
startCassandra(hosts)
startElasticsearch(hosts)
Try(CassandraLock().waitForModule(hosts(0), size))
Try(ElasticsearchLock().waitForModule(hosts(0), size))
startZookeeper
startKafka(hosts)
startCtrl(hosts)
startBatch(hosts)
startCW(hosts)
startWebservice(hosts)
startDc(hosts)
if (withElk) {
startLogstash(hosts)
startKibana(hosts)
}
}
def startCtrl: Unit = startCtrl(ips)
def startCtrl(hosts: String*): Unit = startCtrl(hosts.par)
def startCtrl(hosts: GenSeq[String]) = {
checkProduction
command(s"cd ${instDirs.globalLocation}/cm-well/app/ctrl; ${startScript("./start.sh")}", hosts, false)
}
def stopCtrl: Unit = stopCtrl(ips.par)
def stopCtrl(hosts: String*): Unit = stopCtrl(hosts.par)
def stopCtrl(hosts: GenSeq[String], tries: Int = 5) {
checkProduction
killProcess("CtrlServer", "", hosts, tries)
}
def createManager: Unit = createManager()
def createManager(machineName: String = ips(0), path: String = "~/cmwell/") {
rsync("./", path, List(machineName))
}
def readTime(targ: String = "meta/ns/oa") {
}
def init: Unit = init()
def init(hosts: GenSeq[String] = ips.par) {
checkProduction
info("starting controller")
startCtrl(hosts)
info("initializing cm-well")
info(" initializing cassandra")
initCassandra(hosts)
info(" initializing elasticsearch")
initElasticsearch(hosts)
info(" waiting for cassandra and elasticsearch")
Retry {
try {
CassandraLock().waitForModule(hosts(0), size)
} catch {
case t: Throwable =>
info("Trying to reinit Cassandra")
initCassandra(hosts)
throw t
}
}
Retry {
try {
ElasticsearchLock().waitForModule(hosts(0), size)
} catch {
case t: Throwable =>
info("Trying to reinit Elasticsearch")
initElasticsearch(hosts)
throw t
}
}
info(" starting zookeeper")
startZookeeper
info(" starting kafka")
startKafka
info(" inserting schemas")
initSchemes(hosts)
// wait until all the schemas are written.
Thread.sleep(10000)
info(" starting batch")
startBatch(hosts)
info(" starting bg")
startBg(hosts)
info(" starting cw")
startCW(hosts)
info(" starting web service")
startWebservice(hosts)
uploadInitialContent(hosts(0))
info(" starting dc controller")
startDc(hosts)
info("finished initializing cm-well")
if (withElk) {
startLogstash(hosts)
startKibana(hosts)
}
}
def uploadInitialContent(host: String = ips(0)): Unit = {
checkProduction
Try(WebServiceLock().waitForModule(host, 1))
info(" waiting for ws...")
dataInitializer.waitForWs()
info(" inserting meta data")
injectMetaData(host)
info(" uploading SPAs to meta/app")
dataInitializer.uploadDirectory("data", s"http://$host:9000/meta/app/")
info(" uploading sys")
dataInitializer.uploadDirectory("sys", s"http://$host:9000/meta/sys/wb/")
info(" uploading docs")
dataInitializer.uploadDirectory("docs", s"http://$host:9000/meta/docs/")
info(" uploading basic userInfotons (if not exist)")
dataInitializer.uploadBasicUserInfotons(host)
info(" updating version history")
dataInitializer.logVersionUpgrade(host)
}
def initCassandra: Unit = initCassandra()
def initCassandra(hosts: GenSeq[String] = ips.par)
def initElasticsearch: Unit = initElasticsearch()
def initElasticsearch(hosts: GenSeq[String] = ips.par)
def initSchemes: Unit = initSchemes()
def initSchemes(hosts: GenSeq[String] = ips.par) {
val aliases =
"""{
"actions" : [
{ "add" : { "index" : "cmwell_current_0", "alias" : "cmwell_current" } },
{ "add" : { "index" : "cmwell_history_0", "alias" : "cmwell_history" } },
{ "add" : { "index" : "cmwell_current_0", "alias" : "cmwell_current_latest" } },
{ "add" : { "index" : "cmwell_history_0", "alias" : "cmwell_history_latest" } },
{ "add" : { "index" : "cm_well_p0_0", "alias" : "cm_well_all" } }
]
}""".replace("\\n", "")
command(s"cd ${instDirs.globalLocation}/cm-well/app/cas/cur; sh bin/cqlsh ${pingAddress} -f ${instDirs.globalLocation}/cm-well/conf/cas/cassandra-cql-init-cluster", hosts(0), false)
command(s"cd ${instDirs.globalLocation}/cm-well/app/cas/cur; sh bin/cqlsh ${pingAddress} -f ${instDirs.globalLocation}/cm-well/conf/cas/cassandra-cql-init-cluster-new", hosts(0), false)
command(s"cd ${instDirs.globalLocation}/cm-well/app/cas/cur; sh bin/cqlsh ${pingAddress} -f ${instDirs.globalLocation}/cm-well/conf/cas/zstore-cql-init-cluster", hosts(0), false)
command(s"""curl -s -X POST http://${pingAddress}:$esRegPort/_template/cmwell_indices_template -H "Content-Type: application/json" --data-ascii @${instDirs.globalLocation}/cm-well/conf/es/mapping.json""", hosts(0), false)
command(s"""curl -s -X POST http://${pingAddress}:$esRegPort/_template/cmwell_index_template -H "Content-Type: application/json" --data-ascii @${instDirs.globalLocation}/cm-well/conf/es/indices_template_new.json""", hosts(0), false)
command(s"curl -s -X POST http://${pingAddress}:$esRegPort/cmwell_current_0/;curl -s -X POST http://${pingAddress}:$esRegPort/cmwell_history_0/", hosts(0), false)
command(s"curl -s -X POST http://${pingAddress}:$esRegPort/cm_well_p0_0/", hosts(0), false)
// command(s"curl -s -X POST http://${pingAddress}:$esRegPort/cm_well_0/", hosts(0), false)
command(s"""curl -s -X POST http://${pingAddress}:$esRegPort/_aliases -H "Content-Type: application/json" --data-ascii '${aliases}'""", hosts(0), false)
// create kafka topics
if (withZkKfk) {
val replicationFactor = math.min(hosts.size, 3)
val createTopicCommandPrefix = s"cd ${instDirs.globalLocation}/cm-well/app/kafka/cur; export PATH=/opt/cm-well/app/java/bin:$$PATH ; sh bin/kafka-topics.sh --create --zookeeper ${pingAddress}:2181 --replication-factor $replicationFactor --partitions ${hosts.size} --topic"
var tryNum:Int = 1
var ret = command(s"$createTopicCommandPrefix persist_topic", hosts(0), false)
while(ret.isFailure || !ret.get.contains("Created topic") && tryNum < 6 ){
tryNum += 1
Thread.sleep(5000)
ret = command(s"$createTopicCommandPrefix persist_topic", hosts(0), false)
}
ret = command(s"$createTopicCommandPrefix persist_topic.priority", hosts(0), false)
while(ret.isFailure || !ret.get.contains("Created topic") && tryNum < 6 ){
tryNum += 1
Thread.sleep(5000)
ret = command(s"$createTopicCommandPrefix persist_topic.priority", hosts(0), false)
}
ret = command(s"$createTopicCommandPrefix index_topic", hosts(0), false)
while(ret.isFailure || !ret.get.contains("Created topic") && tryNum < 6 ){
tryNum += 1
Thread.sleep(5000)
ret = command(s"$createTopicCommandPrefix index_topic", hosts(0), false)
}
ret = command(s"$createTopicCommandPrefix index_topic.priority", hosts(0), false)
while(ret.isFailure || !ret.get.contains("Created topic") && tryNum < 6 ){
tryNum += 1
Thread.sleep(5000)
ret = command(s"$createTopicCommandPrefix index_topic.priority", hosts(0), false)
}
}
}
val withZookeeper = withZkKfk
val withKafka = withZkKfk
def avaiableHosts = {
ips.filter {
ip =>
command(s"ping -c 1 $ip > /dev/null 2> /dev/null").isSuccess
}
}
def brokerId(host: String) = ips.indexOf(host)
def startZookeeper: Unit = {
checkProduction
if (withZookeeper) command(s"cd ${instDirs.globalLocation}/cm-well/app/zookeeper; ${startScript("./start.sh")}", avaiableHosts.take(3), false)
}
def startZookeeper(host: String): Unit = {
if (withZookeeper) command(s"cd ${instDirs.globalLocation}/cm-well/app/zookeeper; ${startScript("./start.sh")}", host, false)
}
def startZookeeper(hosts: GenSeq[String]): Unit = {
checkProduction
if (withZookeeper)
command(s"cd ${instDirs.globalLocation}/cm-well/app/zookeeper; ${startScript("./start.sh")}", hosts.intersect(avaiableHosts), false)
}
def stopZookeeper: Unit = stopZookeeper()
def stopZookeeper(hosts: GenSeq[String] = ips.par, tries: Int = 5): Unit = {
checkProduction
//if(withZookeeper)
killProcess("zookeeper", "", hosts, tries = tries)
}
def startKafka: Unit = startKafka()
def startKafka(hosts: GenSeq[String] = ips.par): Unit = {
checkProduction
if (withKafka) {
command(s"cd ${instDirs.globalLocation}/cm-well/app/kafka; ${startScript("./start.sh")}", hosts, false)
}
}
def startKafka(host: String): Unit = {
checkProduction
if (withKafka) {
command(s"cd ${instDirs.globalLocation}/cm-well/app/kafka; ${startScript("./start.sh")}", host, false)
}
}
def stopKafka: Unit = stopKafka()
def stopKafka(hosts: GenSeq[String] = ips.par, tries: Int = 5): Unit = {
checkProduction
//if(withKafka)
killProcess("kafka.Kafka", "", hosts, tries = tries)
}
def startElasticsearch: Unit = startElasticsearch(ips.par)
def startElasticsearch(hosts: String*): Unit = startElasticsearch(hosts.par)
def startElasticsearch(hosts: GenSeq[String]): Unit
def startCassandra: Unit = startCassandra(ips.par)
def startCassandra(hosts: String*): Unit = startCassandra(hosts.par)
def startCassandra(hosts: GenSeq[String])
def startKibana: Unit = startKibana()
def startKibana(hosts: GenSeq[String] = ips.par): Unit = {
checkProduction
command(s"cd ${instDirs.globalLocation}/cm-well/app/kibana; ${startScript("./start.sh")}", hosts, false)
}
def stopKibana: Unit = stopKibana()
def stopKibana(hosts: GenSeq[String] = ips.par, tries: Int = 5): Unit = {
checkProduction
killProcess("kibana", "", hosts, tries = tries)
}
def startLogstash: Unit = startLogstash()
def startLogstash(hosts: GenSeq[String] = ips.par): Unit = {
checkProduction
command(s"cd ${instDirs.globalLocation}/cm-well/app/logstash; ${startScript("./start.sh")}", hosts, false)
}
def stopLogstash: Unit = stopLogstash()
def stopLogstash(hosts: GenSeq[String] = ips.par, tries: Int = 5): Unit = {
checkProduction
killProcess("logstash", "", hosts, tries = tries)
}
def quickInstall: Unit = {
checkProduction
}
def install: Unit = install(ips.par)
def install(hosts: String*): Unit = install(hosts.par)
def install(hosts: GenSeq[String]) {
checkProduction
refreshUserState(user, None, hosts)
purge(hosts)
deploy(hosts)
init(hosts)
//setElasticsearchUnassignedTimeout()
}
def disableElasticsearchUpdate: Unit = disableElasticsearchUpdate(ips(0))
def disableElasticsearchUpdate(ip: String) {
command(s"""curl -s -X PUT http://${pingAddress}:$esRegPort/_cluster/settings -d '{"transient" : {"cluster.routing.allocation.enable" : "none"}}'""", ip, false)
}
def enableElasticsearchUpdate: Unit = enableElasticsearchUpdate(ips(0))
def enableElasticsearchUpdate(ip: String) {
command(s"""curl -s -X PUT http://${pingAddress}:$esRegPort/_cluster/settings -d '{"transient" : {"cluster.routing.allocation.enable" : "all"}}'""", ip, false)
}
def findEsMasterNode(hosts: GenSeq[String] = ips): Option[String] = {
hosts.par.find(host => command(s"curl -s $host:$esMasterPort > /dev/null 2> /dev/null").isSuccess)
}
def findEsMasterNodes(hosts: GenSeq[String] = ips): GenSeq[String] = {
hosts.par.filter(host => command(s"curl -s $host:$esMasterPort > /dev/null 2> /dev/null").isSuccess)
}
def setElasticsearchUnassignedTimeout(host: String = ips.head, timeout: String = "15m"): Unit = {
info(s"setting index.unassigned.node_left.delayed_timeout to $timeout")
val com =
s"""curl -s -X PUT 'http://$host:$esRegPort/_all/_settings' -d '{
| "settings": {
| "index.unassigned.node_left.delayed_timeout": "$timeout"
| }
|}'""".stripMargin
command(com, host, false)
}
def getCassandraHostId(addr: String): String = {
command(s"JAVA_HOME=${instDirs.globalLocation}/cm-well/app/java/bin $nodeToolPath status 2> /dev/null | grep $addr | awk '{print $$7}'", ips(0), false).get.trim
}
def rebalanceCassandraDownNodes {
// grep DN | awk '{print $2 " " $7}'
Retry {
val downNodes = command(s"""JAVA_HOME=${instDirs.globalLocation}/cm-well/app/java/bin $nodeToolPath status 2> /dev/null | grep DN | awk '{print $$2 " " $$7}'""", ips(0), false).get.trim.split("\\n").toList.map {
dn =>
val dnsplt = dn.split(" ")
dnsplt(0) -> dnsplt(1)
}
downNodes.par.foreach(dn => command(s"JAVA_HOME=${instDirs.globalLocation}/cm-well/app/java/bin $nodeToolPath removenode ${dn._2} 2> /dev/null", ips(0), false))
if (command( s"""JAVA_HOME=${instDirs.globalLocation}/cm-well/app/java/bin $nodeToolPath status 2> /dev/null | grep DN | awk '{print $$2 " " $$7}'""", ips(0), false).get.trim.split("\\n").toList.size > 0)
throw new Exception("Failed to remove down nodes")
info(s"Cassandra nodes were removed from the cluster. The cluster now will rebalance its data.")
}
}
def getCassandraAddresses(host: String): Seq[String] = Seq(host)
def decommissionCassandraNodes(hosts: GenSeq[String]) {
hosts.foreach {
host =>
getCassandraAddresses(host).foreach {
ip =>
command( s"""JAVA_HOME=${instDirs.globalLocation}/cm-well/app/java/bin $nodeToolLocation -h $ip decommission 2> /dev/null""", host, false)
}
}
}
def shutdown: Unit = shutdown()
def shutdown(hosts: GenSeq[String] = ips): Unit = {
disableElasticsearchUpdate
stop(false, hosts)
}
def updateCasKeyspace: Unit = {
command(s"cd ${absPath(instDirs.globalLocation)}/cm-well/app/cas/cur; sh bin/cqlsh ${pingAddress} -f ${absPath(instDirs.globalLocation)}/cm-well/conf/cas/cassandra-cql-init-cluster-new", ips(0), false)
}
def updateKafkaScemas: Unit = {
val replicationFactor = math.min(ips.size, 3)
val createTopicCommandPrefix = s"cd ${absPath(instDirs.globalLocation)}/cm-well/app/kafka/cur; export PATH=/opt/cm-well/app/java/bin:$$PATH ; sh bin/kafka-topics.sh --create --zookeeper ${pingAddress}:2181 --replication-factor $replicationFactor --partitions ${ips.size} --topic"
command(s"$createTopicCommandPrefix persist_topic", ips(0), false)
command(s"$createTopicCommandPrefix index_topic", ips(0), false)
}
def checkPreUpgradeStatus(host: String): Unit = {
val esStatusTry = elasticsearchStatus(host)
val casStatusTry = cassandraStatus(host).map(_.toInt)
val wsStatusTry = webServerStatus(host)
var hasProblem = false
esStatusTry match {
case Success(color) => if (color.toLowerCase != "green") {
hasProblem = true
warn(s"Elasticsearch status is $color.")
}
case Failure(err) =>
hasProblem = true
warn(s"Couldn't retrieve Elasticsearch status.")
}
casStatusTry match {
case Success(uns) => if (uns < size) {
hasProblem = true
warn(s"Number of Cassandra up nodes is $uns/$size.")
}
case Failure(err) =>
hasProblem = true
warn(s"Couldn't retrieve Cassandra status.")
}
wsStatusTry match {
case Success(v) => if (!v.contains("200") && !v.contains("404") && !v.contains("503")) {
hasProblem = true
warn(s"Webservice doesn't respond with a good code.")
}
case Failure(err) =>
hasProblem = true
warn(s"Webservice doesn't respond.")
}
if (hasProblem) warnPrompt
}
def upgradeDc = upgrade(List(DcProps(this)), uploadSpa = false, uploadDocs = false)
def upgradeCtrl = upgrade(List(CtrlProps(this)), uploadSpa = false, uploadDocs = false)
def upgradeBG = upgrade(List(BatchProps(this)), uploadSpa = false, uploadDocs = false)
def upgradeWS = upgrade(List(WebserviceProps(this)))
def quickUpgrade: Unit = quickUpgrade()
def quickUpgrade(hosts: GenSeq[String] = ips): Unit = {
refreshUserState(user, None, hosts)
syncLib(hosts)
linkLibs(hosts)
hosts.par.foreach(host => restartApp(host))
}
def noDownTimeQuickUpgrade(hosts: GenSeq[String] = ips): Unit = {
refreshUserState(user, None, hosts)
info("syncing libs")
syncLib(hosts)
linkLibs(hosts)
info("generating resources")
genResources(hosts)
info("stopping CM-WELL components")
stopBatch(hosts)
stopBg(hosts)
stopDc(hosts)
stopCW(hosts)
stopCtrl(hosts)
val (h1, h2) = hosts.zipWithIndex.partition(x => x._2 % 2 == 0)
val hosts1 = h1.map(_._1)
val hosts2 = h2.map(_._1)
info(s"restarting web services on ${hosts1.mkString(",")}")
stopWebservice(hosts1)
startWebservice(hosts1)
hosts1.foreach { host => info(s"waiting for $host to respond"); WebServiceLock().com(host) }
info(s"restarting web services on ${hosts2.mkString(",")}")
stopWebservice(hosts2)
startWebservice(hosts2)
hosts2.foreach { host => info(s"waiting for $host to respond"); WebServiceLock().com(host) }
startBatch(hosts)
startBg(hosts)
startDc(hosts)
startCW(hosts)
startCtrl(hosts)
}
def upgrade: Unit = upgrade()
def upgrade(baseProps: List[ComponentProps] = List(CassandraProps(this), ElasticsearchProps(this), KafkaProps(this), ZooKeeperProps(this), BgProps(this), BatchProps(this), WebserviceProps(this), CtrlProps(this), DcProps(this), TlogProps(this)), clearTlogs: Boolean = false, uploadSpa: Boolean = true, uploadDocs: Boolean = true, uploadUserInfotons: Boolean = true, withUpdateSchemas: Boolean = false, hosts: GenSeq[String] = ips) {
checkProduction
refreshUserState(user, None, hosts)
//checkPreUpgradeStatus(hosts(0))
val esMasterNode = findEsMasterNode(hosts) match {
case Some(emn) =>
info(s"found Elasticsearch master node: $emn")
emn
case None => throw new Exception("Couldn't find elasticsearch master node")
}
val dateStr = deployment.getCurrentDateStr
var props = baseProps
if (deployJava) props = props ++ List(JavaProps(this))
if (withElk) props = props ++ List(LogstashProps(this), KibanaProps(this))
info("deploying components and checking what should be upgraded.")
syncLib(hosts)
linkLibs(hosts)
rsyncPlugins(hosts)
BinsProps(this).deployComponent(hosts)
//println(s"props: $props")
// get for each component its unsynced hosts and redeploy the new version of the component.
val updatedHosts = props.map(prop => (prop, prop.getUnsyncedHosts(hosts.par))).filter(t => t._2.size > 0).map(t => (t._1, t._2, t._1.redeployComponent(t._2)))
if (updatedHosts.size > 0) {
//todo: FIX THIS!!!
doInfo = false
deployment.createDirs(hosts, props)
doInfo = true
val updatedComponents = updatedHosts.map(_._1).toSet
val preUpgradeComponents = props.collect { case r: RunnableComponent if r.upgradeMethod == PreUpgrade => r }.filter(r => updatedComponents.contains(r) || !updatedComponents.intersect(r.upgradeDependency).isEmpty)
val nonRollingComponents = props.collect { case r: RunnableComponent if r.upgradeMethod == NonRolling => r }.filter(r => updatedComponents.contains(r) || !updatedComponents.intersect(r.upgradeDependency).isEmpty)
val rollingComponents = props.collect { case r: RunnableComponent if r.upgradeMethod == Rolling => r }.filter(r => updatedComponents.contains(r) || !updatedComponents.intersect(r.upgradeDependency).isEmpty)
val nonRunningComponents = props.filter(p => !p.isInstanceOf[RunnableComponent])
updatedHosts.filter { el => nonRunningComponents.contains(el._1) && el._1.symLinkName.isDefined }.foreach {
el =>
val component = el._1
val hostsToUpdate = el._2
val newName = el._3
info(s"updating ${component.getName} on all hosts")
component.relink(newName, hostsToUpdate)
}
// stopping all the components that are not upgraded in rolling style.
nonRollingComponents.foreach {
nrc =>
info(s"stopping ${nrc.getName} on all hosts.")
nrc.stop(hosts)
}
hosts.foreach {
h =>
// The components that where updated on this host.
val updatedHostComponents = updatedHosts.filter(uh => uh._2.toVector.contains(h)).map(uh => uh._1 -> (uh._2, uh._3)).toMap
val casUpdated = updatedComponents.contains(CassandraProps(this))
val esUpdated = updatedComponents.contains(ElasticsearchProps(this))
val javaUpdated = updatedComponents.contains(JavaProps(this))
//if(esUpdated || javaUpdated) {
Try(ElasticsearchLock().waitForModule(esMasterNode, size))
Try(ElasticsearchStatusLock("green", "yellow").waitForModuleIndefinitely(esMasterNode))
// if we encounter status yellow lets sleep for 10 minutes.
//if(elasticsearchStatus(ips(0)).getOrElse("N/A") == "yellow") Thread.sleep(10 * 1000 * 60)
//}
info(s"updating ${(updatedComponents -- nonRunningComponents -- preUpgradeComponents).map(_.getName).mkString(", ")} on $h")
val updatedComponentsSet = updatedComponents
// stopping all the components that are upgraded in rolling style.
rollingComponents.foreach {
rc =>
info(s" restarting ${rc.getName}")
rc.stop(List(h))
}
if (clearTlogs) {
removeTlogs(List(h))
}
// relinking the new components.
(updatedComponentsSet -- preUpgradeComponents -- nonRunningComponents).foreach(cp => if (cp.symLinkName.isDefined) cp.relink(updatedHostComponents.get(cp).get._2, List(h)))
createAppLinks(List(h))
genResources(List(h))
// starting all the components that are upgraded in rolling style.
rollingComponents.foreach(_.start(List(h)))
// wait for cassandra and elasticsearch to be stable before starting cmwell components.
if (javaUpdated || casUpdated) {
Try(CassandraLock().waitForModule(ips(0), size))
}
}
hosts.par.foreach(host => Try(WebServiceLock().waitForModule(host, 1)))
preUpgradeComponents.foreach {
puc =>
info(s"restarting ${puc.getName} on all hosts")
puc.stop(hosts)
}
updatedHosts.filter { el => preUpgradeComponents.contains(el._1) && el._1.symLinkName.isDefined }.foreach {
el =>
val component = el._1
val hostsToUpdate = el._2
val newName = el._3
info(s"updating ${component.getName} on all hosts.")
component.relink(newName, hostsToUpdate)
}
// todo: make more generic.
genEsResources(hosts)
preUpgradeComponents.foreach(_.start(hosts))
// starting all the components that are not upgraded in rolling style.
Try(ElasticsearchLock(esMasterNode).waitForModule(esMasterNode, size))
Try(ElasticsearchStatusLock("green", "yellow").waitForModuleIndefinitely(esMasterNode))
if (withUpdateSchemas) {
updateCasKeyspace
reloadEsMappings
updateKafkaScemas
}
nonRollingComponents.par.foreach {
nrc =>
info(s"starting ${nrc.getName} on all hosts.")
nrc.start(hosts)
}
}
Try(WebServiceLock().waitForModule(ips(0), 1))
info(" waiting for ws...")
dataInitializer.waitForWs()
if (uploadSpa) {
Try(WebServiceLock().waitForModule(ips(0), 1))
info(" uploading SPA to meta/app")
dataInitializer.uploadDirectory("data", s"http://${hosts.head}:9000/meta/app/")
info(" uploading sys")
dataInitializer.uploadDirectory("sys", s"http://${hosts.head}:9000/meta/sys/wb/")
}
if (uploadDocs) {
Try(WebServiceLock().waitForModule(ips(0), 1))
info(" uploading docs")
dataInitializer.uploadDirectory("docs", s"http://${hosts.head}:9000/meta/docs/")
}
if (uploadUserInfotons) {
Try(WebServiceLock().waitForModule(ips(0), 1))
info(" uploading basic userInfotons (if not exist)")
dataInitializer.uploadBasicUserInfotons(hosts(0))
}
info(" updating version history")
dataInitializer.logVersionUpgrade(hosts(0))
}
def reloadEsMappings: Unit = reloadEsMappings()
def reloadEsMappings(createNewIndices: Boolean = true) {
info("reloading Elasticsearch mappings")
command(s"""curl -s -X POST http://${pingAddress}:$esRegPort/_template/cmwell_index_template -H "Content-Type: application/json" --data-ascii @${absPath(instDirs.globalLocation)}/cm-well/conf/es/indices_template_new.json""", ips(0), false)
if (createNewIndices) {
Thread.sleep(5000)
createEsIndices
}
}
def createEsIndices: Unit = {
// val numberOfShards = getSize
// val numberOfReplicas = 2
//
// val settingsJson =
// s"""
// |{
// | "settings" : {
// | "index" : {
// | "number_of_shards" : $numberOfShards,
// | "number_of_replicas" : $numberOfReplicas
// | }
// | }
// |}
// """.stripMargin
//
// command(s"""curl -s -XPUT 'http://${pingAddress}:$esRegPort/cm_well_0/' -d '$settingsJson'""", ips.head, false)
// val actionsJson =
// s"""
// |{
// | "actions" : [
// | {
// | "add" : { "index" : "cm_well_0", "alias" : "cm_well_latest" }
// | },
// | {
// | "add" : { "index" : "cm_well_0", "alias" : "cm_well_all" }
// | }
// | ]
// |}
// """.stripMargin
//
//
// command(s"""curl -s -X POST 'http://${pingAddress}:$esRegPort/_aliases' -d '$actionsJson'""", ips.head, false)
}
def createNewEsIndices: Unit = {
info("creating new indices")
val numberOfShards = getSize
val numberOfReplicas = 2
val settingsJson =
s"""
|{
| "settings" : {
| "index" : {
| "number_of_shards" : $numberOfShards,
| "number_of_replicas" : $numberOfReplicas
| }
| }
|}
""".stripMargin
val json = command(s""" curl -s http://${ips.head}:9000/health/es""").get
val (currents, histories) = JSON.parseFull(json.trim).get.asInstanceOf[Map[String, Any]]("indices").asInstanceOf[Map[String, Any]].keySet.partition {
_.contains("current")
}
val currentIndex = currents.map(_.split("_")(2).toInt).max
val historyIndex = histories.map(_.split("_")(2).toInt).max
val newCurrentIndex = s"cmwell_current_${currentIndex + 1}"
val newHistoryIndex = s"cmwell_history_${historyIndex + 1}"
val oldCurrentIndex = s"cmwell_current_$currentIndex"
val oldHistoryIndex = s"cmwell_history_$historyIndex"
command(s"""curl -s -XPUT 'http://${pingAddress}:$esRegPort/$newCurrentIndex/' -d '$settingsJson'""", ips.head, false)
command(s"""curl -s -XPUT 'http://${pingAddress}:$esRegPort/$newHistoryIndex/' -d '$settingsJson'""", ips.head, false)
val actionsJson =
s"""
|{
| "actions" : [
| {
| "add" : { "index" : "$newCurrentIndex", "alias" : "cmwell_current" }
| },
| {
| "add" : { "index" : "$newCurrentIndex", "alias" : "cmwell_current_latest" }
| },
| {
| "add" : { "index" : "$newHistoryIndex", "alias" : "cmwell_history" }
| },
| {
| "add" : { "index" : "$newHistoryIndex", "alias" : "cmwell_history_latest" }
| },
| {
| "remove" : { "index" : "$oldCurrentIndex", "alias" : "cmwell_current_latest" }
| },
| {
| "remove" : { "index" : "$oldHistoryIndex", "alias" : "cmwell_history_latest" }
| }
| ]
|}
""".stripMargin
command(s"""curl -s -X POST 'http://${pingAddress}:$esRegPort/_aliases' -d '$actionsJson'""", ips.head, false)
}
def restartApp = {
stopCtrl
startCtrl
Thread.sleep(5000)
restartWebservice
restartCW
restartDc
stopBatch
startBatch
}
def restartApp(host: String) = {
stopCtrl(host)
startCtrl(host)
Thread.sleep(5000)
stopWebservice(host)
startWebservice(host)
stopCW(host)
startCW(host)
stopDc(host)
startDc(host)
stopBatch(host)
startBatch(host)
}
def restartWebservice {
ips.foreach {
ip =>
info(s"Restarting Webservice on $ip")
stopWebservice(Seq(ip))
startWebservice(Seq(ip))
Try(WebServiceLock().waitForModule(ip, 1))
}
}
def restartDc {
stopDc
startDc
}
def restartCW {
stopCW
startCW
}
def restartCassandra {
ips.foreach {
ip =>
info(s"Restarting Cassandra on $ip")
stopCassandra(Seq(ip))
startCassandra(Seq(ip))
Try(CassandraLock().waitForModule(ips(0), size))
}
}
def restartElasticsearch: Unit = restartElasticsearch(ips)
def restartElasticsearch(hosts: Seq[String]) {
hosts.foreach {
host =>
Try(ElasticsearchStatusLock("green").waitForModule(hosts(0), 1000))
info(s"Restarting Elasticsearch on $host")
disableElasticsearchUpdate(ips((ips.indexOf(host) + 1) % ips.size))
Thread.sleep(10000)
stopElasticsearch(Seq(host))
startElasticsearch(Seq(host))
enableElasticsearchUpdate(ips((ips.indexOf(host) + 1) % ips.size))
}
}
//def createNetwork : Unit = createNetwork(ips.par,topology, persistentAliases)
def createNetwork(topology: NetTopology, persistent: Boolean, hosts : GenSeq[String], sudoer: Credentials) {
val ipMappingsOfPreparedOnly = ipMappings.remove(ipMappings.getIps.filterNot(hosts.seq.contains))
topology match {
case n: VLanTopology =>
val tag = n.tag
val m = topology.getTopologyMap(ipMappingsOfPreparedOnly)
m.foreach {
tuple =>
var index = 0
command(s"echo '/sbin/modprobe 8021q' | sudo tee /etc/sysconfig/modules/vlan.modules > /dev/null", tuple._1, true, Some(sudoer))
command(s"sudo chmod +x /etc/sysconfig/modules/vlan.modules", tuple._1, true, Some(sudoer))
command(s"sudo modprobe 8021q", tuple._1, true, Some(sudoer))
command(s"sudo ip link add link $inet name $inet.$tag type vlan id $tag", tuple._1, true, Some(sudoer))
command(s"sudo ifconfig $inet.$tag up", tuple._1, true, Some(sudoer))
val fileName = s"ifcfg-$inet.$tag"
val path = "/etc/sysconfig/network-scripts"
val fileContent =
s"""
|DEVICE=$inet.$tag
|BOOTPROTO=none
|ONBOOT=yes
|VLAN=yes
""".stripMargin
command(s"echo '$fileContent' | sudo tee $path/$fileName > /dev/null", tuple._1, true, Some(sudoer))
tuple._2.foreach {
ip =>
val mask = topology.getNetMask
val fileName = s"ifcfg-$inet.$tag:$index"
val path = "/etc/sysconfig/network-scripts"
val fileContent =
s"""
|DEVICE=${inet}.${tag}:${index}
|IPADDR=${ip}
|NETMASK=$mask
|ONBOOT=yes
""".stripMargin
command(s"echo '$fileContent' | sudo tee $path/$fileName > /dev/null", tuple._1, true, Some(sudoer))
command(s"sudo ifconfig $inet.$tag:$index $ip netmask $mask", tuple._1, true, Some(sudoer))
index += 1
}
}
case _ =>
val m = topology.getTopologyMap(ipMappingsOfPreparedOnly)
m.foreach {
tuple =>
var index = 0
tuple._2.foreach {
ip =>
command(s"sudo ifconfig $inet:$index $ip/${topology.getCidr} up", tuple._1, true, Some(sudoer))
if (persistent) {
val path = "/etc/sysconfig/network-scripts"
val fileName = s"ifcfg-$inet:$index"
val fileContent =
s"""
|DEVICE=$inet:$index
|IPADDR=$ip
|NETMASK=${topology.getNetMask}
|ONBOOT=yes
""".stripMargin
command(s"echo '$fileContent' | sudo tee $path/$fileName > /dev/null", tuple._1, true, Some(sudoer))
}
index += 1
}
}
}
}
def removeTlogs(ips: GenSeq[String] = ips.par) {
command(s"rm ${dataDirs.tlogDataDirs(0)}/*", ips, false)
}
/*def findIpToConnectWithToGrid : String = {
Await.result(Future.firstCompletedOf(ips map getIpInGrid), Duration.Inf)
}
def getIpInGrid(ipToCheckAgainst : String) : Future[String] = {
import java.net.NetworkInterface
import java.util
import collection.JavaConversions._
import collection.JavaConverters._
Future {
val interfaces: Seq[java.net.NetworkInterface] = util.Collections.list(NetworkInterface.getNetworkInterfaces())
val validInterfaceOpt = interfaces.collectFirst { case i if (command(s"ping -c 1 -I ${i.getName} $ipToCheckAgainst ; echo $$?").get.split("\\n").toList.last.trim == "0") => i}
validInterfaceOpt match {
case Some(validInterface) =>
validInterface.getInterfaceAddresses.asScala.collectFirst {
case inetAddr if (inetAddr.getAddress.getHostAddress.matches( """\\d+.\\d+.\\d+.\\d+""")) =>
inetAddr.getAddress.getHostAddress
}.get
}
}
}*/
def findIpToConnectWithToGrid: String = {
ips.foreach {
ip =>
val res = getIpInGrid(ip)
if (res.isDefined) return res.get
}
ips(0)
}
def getIpInGrid(ipToCheckAgainst: String): Option[String] = {
import java.net.NetworkInterface
import java.util
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
val interfaces: Seq[java.net.NetworkInterface] = util.Collections.list(NetworkInterface.getNetworkInterfaces())
val validInterfaceOpt = interfaces.collectFirst { case i if (command(s"ping -c 1 -I ${i.getName} $ipToCheckAgainst ; echo $$?").get.split("\\n").toList.last.trim == "0") => i }
validInterfaceOpt match {
case Some(validInterface) =>
validInterface.getInterfaceAddresses.asScala.collectFirst {
case inetAddr if (inetAddr.getAddress.getHostAddress.matches( """\\d+.\\d+.\\d+.\\d+""")) =>
inetAddr.getAddress.getHostAddress
}
case None => None
}
}
def connectToGrid: Unit = connectToGrid()
def connectToGrid(ip: String = "") {
if (!Host.connectedToAkkaGrid) {
val useIp = if (ip == "") {
val uIp = findIpToConnectWithToGrid
info(s"Connecting to grid with ip: $uIp")
uIp
} else ip
AkkaGrid.setGridConnection(GridConnection(memberName = "CONS", clusterName = cn, hostName = useIp, port = 0, seeds = ips.take(3).map(seedIp => s"$seedIp:7777").toSet))
AkkaGrid.joinClient
CtrlClient.init(ips(0))
Host.connectedToAkkaGrid = true
Thread.sleep(5000)
}
}
def restartHaproxy(sudoer: Credentials) {
haProxy match {
case Some(HaProxy(host, sitedown)) =>
command("sudo service haproxy restart", Seq(host), true, Some(sudoer))
case None =>
}
}
def stopHaproxy(sudoer: Credentials) {
haProxy match {
case Some(HaProxy(host, sitedown)) =>
command("sudo service haproxy stop", Seq(host), true, Some(sudoer))
case None =>
}
}
def deployHaproxy(sudoer: Credentials) {
throw new Exception("deploy haproxy currently cancelled")
/*
haProxy match {
case Some(HaProxy(host, sitedown)) =>
command("sudo apt-get -q -y install haproxy", Seq(host), true, Some(sudoer))
val servers = ips.map(ip => s"""server $ip $ip:9000 check inter 10000 rise 5 fall 3""").mkString("\\n")
val content = ResourceBuilder.getResource("scripts/templates/haproxy.cfg", Map("cluster" -> cn, "sitedown" -> sitedown, "servers" -> servers))
createFile("/etc/haproxy/haproxy.cfg", content, Seq(host), true, Some(sudoer))
restartHaproxy(sudoer)
case None =>
}
*/
}
def getClusterStatus: ClusterStatus = {
connectToGrid
Await.result(Host.ctrl.getClusterStatus, 30 seconds)
}
private val elkImageName = "cmwell-elk"
private val elkContainerName = "cmwell-elk-container"
private val elkClusterNameSuffix = "elk"
private val elkDirName = "elk"
private val elkEsWebPort = 9220
private val elkEsTransportPort = 9320
private val elkWebPort = 8080
def deployElk: Unit = {
???
info(s"copying files to remote hosts.")
ips.par.foreach {
ip =>
info(s"copying files to $ip")
command(s"rsync -Paz scripts/docker-elk $user@$ip:${instDirs.intallationDir}/app/")
}
info(s"creating docker image")
ips.par.foreach {
ip =>
val res = command(s"sudo cd ${instDirs.intallationDir}/app/docker-elk/; sudo docker build -t $elkImageName .", ip, true)
if (res.isSuccess)
info(s"image was created at $ip")
else
info(s"failed to create image at $ip")
}
info("creating elk log directory")
command(s"mkdir -p ${instDirs.intallationDir}/log/$elkDirName", ips, false)
}
def createLogstashConfig: Unit = {
info("creating logstash config file")
ips.par.foreach {
ip =>
createLogstashConfFile(s"$ip:$elkEsWebPort", Seq(ip))
}
}
def startElk: Unit = {
def getSeeds: String = {
ips.take(3).map(ip => s"$ip:$elkEsTransportPort").mkString(",")
}
//docker run -e elk_cluster='docker-elk' -v /home/michael/me/projects/elk-docker/conf:/etc/logstash -v /home/michael/app/cm-well/log:/cm-well/log -p 8080:80 -p 9200:9220 -p 9300:9320 elk
//command(s"docker run -d --net=host --name=$elkContainerName -e elk_cluster='$cn-$elkClusterNameSuffix' -e elk_hosts='$getSeeds' -v ${instDirs.intallationDir}/conf/logstash/:/etc/logstash -v ${instDirs.intallationDir}/log:/opt/cm-well/log -v ${instDirs.intallationDir}/log/$elkDirName:/usr/share/elasticsearch/data -p $elkWebPort:80 -p $elkEsWebPort:$elkEsWebPort -p $elkEsTransportPort:$elkEsTransportPort $elkImageName", ips, true)
???
}
def stopElk: Unit = {
//command(s"docker rm -f $elkContainerName", ips, true)
???
}
def removeOldPackages(hosts: GenSeq[String] = ips): Unit = {
val packs = deployment.componentProps.filter(_.symLinkName.isDefined)
val loc = instDirs.globalLocation
for {
host <- hosts
pack <- packs
} {
val target = pack.targetLocation
val compName = pack.getName
val symLinkName = pack.symLinkName.get
val currentPack = command(s"readlink -e $loc/cm-well/$target/$symLinkName | xargs basename", host, false).get.trim
val com = s"ls -1 $loc/cm-well/$target | grep $compName | grep -v $currentPack | xargs -I zzz rm -rf $loc/cm-well/$target/zzz"
command(com, host, false)
}
}
def syncLib(hosts: GenSeq[String] = ips) = {
def getCurrentDateStr = {
val format = new java.text.SimpleDateFormat("yyyyMMdd_hhmmss")
val date = new Date()
format.format(date)
}
val currentDate = getCurrentDateStr
hosts.foreach {
host =>
val comStr =
s"""test -L ${instDirs.globalLocation}/cm-well/lib &&
|cp -al `readlink ${instDirs.globalLocation}/cm-well/lib`/ ${instDirs.globalLocation}/cm-well/lib-$currentDate/ ||
|mkdir -p ${instDirs.globalLocation}/cm-well/lib-$currentDate""".stripMargin
command(comStr, host, false)
command(s"test -L ${instDirs.globalLocation}/cm-well/lib && rm ${instDirs.globalLocation}/cm-well/lib", host, false)
command(s"ln -s ${instDirs.globalLocation}/cm-well/lib-$currentDate ${instDirs.globalLocation}/cm-well/lib", host, false)
rsync("lib/", s"${instDirs.globalLocation}/cm-well/lib/", Seq(host))
}
}
def linkLibs(hosts: GenSeq[String] = ips.par) = {
val dir = new File("dependencies")
command(s"mkdir -p ${instDirs.globalLocation}/cm-well/dependencies", hosts, false)
rsync(s"dependencies/", s"${instDirs.globalLocation}/cm-well/dependencies/", hosts)
dir.listFiles().toVector.par.foreach {
file =>
linkLib(file.getName, hosts)
}
}
def linkLib(component: String, hosts: GenSeq[String] = ips) = {
val target = component //if(component == "batch") "bg" else component
//val content = Source.fromFile(s"dependencies/$component").getLines().toVector
command(s"rm ${instDirs.globalLocation}/cm-well/app/$target/lib/* > /dev/null 2> /dev/null", hosts, false)
command(s"mkdir -p ${instDirs.globalLocation}/cm-well/app/$target/lib", hosts, false)
hosts.foreach {
host =>
command(s"cat ${instDirs.globalLocation}/cm-well/dependencies/$component | xargs -I zzz ln -s ${instDirs.globalLocation}/cm-well/lib/zzz ${instDirs.globalLocation}/cm-well/app/$target/lib/zzz", host, false)
}
}
sys.addShutdownHook {
Try(k.grid.Grid.shutdown)
}
def rsyncPlugins(hosts: GenSeq[String] = ips) = {
command(s"mkdir -p ${instDirs.globalLocation}/cm-well/app/ws/plugins/sg-engines/", hosts, false)
rsync(s"plugins/", s"${instDirs.globalLocation}/cm-well/app/ws/plugins/sg-engines/", hosts)
}
}
|
nruppin/CM-Well
|
server/cmwell-cons/src/main/scala/ctl.scala
|
Scala
|
apache-2.0
| 101,173 |
package akka.contrib.persistence.mongodb
import java.util.concurrent.atomic.AtomicBoolean
import akka.actor.Actor
import akka.pattern.{CircuitBreakerOpenException, CircuitBreaker}
import scala.collection.immutable
import akka.persistence.journal.AsyncWriteJournal
import akka.persistence.{AtomicWrite, PersistentRepr}
import scala.concurrent.Future
import scala.concurrent.ExecutionContext
import nl.grons.metrics.scala.InstrumentedBuilder
import nl.grons.metrics.scala.Timer
import scala.util.Try
import scala.concurrent.duration._
class MongoJournal extends AsyncWriteJournal {
private[this] val impl = MongoPersistenceExtension(context.system).journaler
private[this] implicit val ec = context.dispatcher
/**
* Plugin API: asynchronously writes a batch (`Seq`) of persistent messages to the
* journal.
*
* The batch is only for performance reasons, i.e. all messages don't have to be written
* atomically. Higher throughput can typically be achieved by using batch inserts of many
* records compared inserting records one-by-one, but this aspect depends on the
* underlying data store and a journal implementation can implement it as efficient as
* possible with the assumption that the messages of the batch are unrelated.
*
* Each `AtomicWrite` message contains the single `PersistentRepr` that corresponds to
* the event that was passed to the `persist` method of the `PersistentActor`, or it
* contains several `PersistentRepr` that corresponds to the events that were passed
* to the `persistAll` method of the `PersistentActor`. All `PersistentRepr` of the
* `AtomicWrite` must be written to the data store atomically, i.e. all or none must
* be stored. If the journal (data store) cannot support atomic writes of multiple
* events it should reject such writes with a `Try` `Failure` with an
* `UnsupportedOperationException` describing the issue. This limitation should
* also be documented by the journal plugin.
*
* If there are failures when storing any of the messages in the batch the returned
* `Future` must be completed with failure. The `Future` must only be completed with
* success when all messages in the batch have been confirmed to be stored successfully,
* i.e. they will be readable, and visible, in a subsequent replay. If there is
* uncertainty about if the messages were stored or not the `Future` must be completed
* with failure.
*
* Data store connection problems must be signaled by completing the `Future` with
* failure.
*
* The journal can also signal that it rejects individual messages (`AtomicWrite`) by
* the returned `immutable.Seq[Try[Unit]]`. The returned `Seq` must have as many elements
* as the input `messages` `Seq`. Each `Try` element signals if the corresponding
* `AtomicWrite` is rejected or not, with an exception describing the problem. Rejecting
* a message means it was not stored, i.e. it must not be included in a later replay.
* Rejecting a message is typically done before attempting to store it, e.g. because of
* serialization error.
*
* Data store connection problems must not be signaled as rejections.
*
* Note that it is possible to reduce number of allocations by
* caching some result `Seq` for the happy path, i.e. when no messages are rejected.
*/
override def asyncWriteMessages(messages: immutable.Seq[AtomicWrite]): Future[immutable.Seq[Try[Unit]]] =
impl.batchAppend(messages)
/**
* Plugin API: asynchronously deletes all persistent messages up to `toSequenceNr`
* (inclusive).
*/
override def asyncDeleteMessagesTo(persistenceId: String, toSequenceNr: Long): Future[Unit] =
impl.deleteFrom(persistenceId, toSequenceNr)
/**
* Plugin API
*
* Allows plugin implementers to use `f pipeTo self` and
* handle additional messages for implementing advanced features
*/
override def receivePluginInternal: Actor.Receive = Actor.emptyBehavior // No advanced features yet. Stay tuned!
/**
* Plugin API: asynchronously replays persistent messages. Implementations replay
* a message by calling `replayCallback`. The returned future must be completed
* when all messages (matching the sequence number bounds) have been replayed.
* The future must be completed with a failure if any of the persistent messages
* could not be replayed.
*
* The `replayCallback` must also be called with messages that have been marked
* as deleted. In this case a replayed message's `deleted` method must return
* `true`.
*
* The channel ids of delivery confirmations that are available for a replayed
* message must be contained in that message's `confirms` sequence.
*
* @param processorId processor id.
* @param fromSequenceNr sequence number where replay should start (inclusive).
* @param toSequenceNr sequence number where replay should end (inclusive).
* @param max maximum number of messages to be replayed.
* @param replayCallback called to replay a single message. Can be called from any
* thread.
*/
override def asyncReplayMessages(processorId: String, fromSequenceNr: Long, toSequenceNr: Long, max: Long)(replayCallback: PersistentRepr ⇒ Unit): Future[Unit] =
impl.replayJournal(processorId, fromSequenceNr, toSequenceNr, max)(replayCallback)
/**
* Plugin API: asynchronously reads the highest stored sequence number for the
* given `processorId`.
*
* @param processorId processor id.
* @param fromSequenceNr hint where to start searching for the highest sequence
* number.
*/
override def asyncReadHighestSequenceNr(processorId: String, fromSequenceNr: Long): Future[Long] =
impl.maxSequenceNr(processorId, fromSequenceNr)
}
trait JournallingFieldNames {
final val PROCESSOR_ID = "pid"
final val SEQUENCE_NUMBER = "sn"
final val CONFIRMS = "cs"
final val DELETED = "dl"
final val SERIALIZED = "pr"
final val PayloadKey = "p"
final val SenderKey = "s"
final val RedeliveriesKey = "r"
final val ConfirmableKey = "c"
final val ConfirmMessageKey = "cm"
final val ConfirmTargetKey = "ct"
final val VERSION = "v"
final val EVENTS = "events"
final val FROM = "from"
final val TO = "to"
final val MANIFEST = "manifest"
final val WRITER_UUID = "_w"
final val TYPE = "_t"
final val HINT = "_h"
}
object JournallingFieldNames extends JournallingFieldNames
trait MongoPersistenceJournallingApi {
private[mongodb] def batchAppend(writes: immutable.Seq[AtomicWrite])(implicit ec: ExecutionContext): Future[immutable.Seq[Try[Unit]]]
private[mongodb] def deleteFrom(persistenceId: String, toSequenceNr: Long)(implicit ec: ExecutionContext): Future[Unit]
private[mongodb] def replayJournal(pid: String, from: Long, to: Long, max: Long)(replayCallback: PersistentRepr ⇒ Unit)(implicit ec: ExecutionContext): Future[Unit]
private[mongodb] def maxSequenceNr(pid: String, from: Long)(implicit ec: ExecutionContext): Future[Long]
}
trait MongoPersistenceJournalFailFast extends MongoPersistenceJournallingApi {
private[mongodb] val breaker: CircuitBreaker
private lazy val cbOpen = {
val ab = new AtomicBoolean(false)
breaker.onOpen(ab.set(true))
breaker.onHalfOpen(ab.set(false))
breaker.onClose(ab.set(false))
ab
}
private def onlyWhenClosed[A](thunk: => A) = {
if (cbOpen.get()) throw new CircuitBreakerOpenException(0.seconds)
else thunk
}
private[mongodb] abstract override def batchAppend(writes: immutable.Seq[AtomicWrite])(implicit ec: ExecutionContext): Future[immutable.Seq[Try[Unit]]] =
breaker.withCircuitBreaker(super.batchAppend(writes))
private[mongodb] abstract override def deleteFrom(persistenceId: String, toSequenceNr: Long)(implicit ec: ExecutionContext): Future[Unit] =
breaker.withCircuitBreaker(super.deleteFrom(persistenceId, toSequenceNr))
private[mongodb] abstract override def replayJournal(pid: String, from: Long, to: Long, max: Long)(replayCallback: PersistentRepr ⇒ Unit)(implicit ec: ExecutionContext) =
onlyWhenClosed(super.replayJournal(pid,from,to,max)(breaker.withSyncCircuitBreaker(replayCallback)))
private[mongodb] abstract override def maxSequenceNr(pid: String, from: Long)(implicit ec: ExecutionContext) =
breaker.withCircuitBreaker(super.maxSequenceNr(pid,from))
}
trait MongoPersistenceJournalMetrics extends MongoPersistenceJournallingApi with InstrumentedBuilder {
val metricRegistry = MongoPersistenceDriver.registry
def driverName: String
private def fullyQualifiedName(metric: String, metricType: String) = s"akka-persistence-mongo.journal.$driverName.$metric-$metricType"
private def timerName(metric: String) = fullyQualifiedName(metric,"timer")
private def histName(metric: String) = fullyQualifiedName(metric, "histo")
// Timers
private lazy val appendTimer = metrics.timer(timerName("write.append"))
private lazy val deleteTimer = metrics.timer(timerName("write.delete-range"))
private lazy val replayTimer = metrics.timer(timerName("read.replay"))
private lazy val maxTimer = metrics.timer(timerName("read.max-seq"))
// Histograms
private lazy val writeBatchSize = metrics.histogram(histName("write.append.batch-size"))
private def timeIt[A](timer: Timer)(block: => Future[A])(implicit ec: ExecutionContext): Future[A] = {
val ctx = timer.timerContext()
val result = block
result.onComplete(_ => ctx.stop())
result
}
private[mongodb] abstract override def batchAppend(writes: immutable.Seq[AtomicWrite])(implicit ec: ExecutionContext): Future[immutable.Seq[Try[Unit]]] = timeIt (appendTimer) {
writeBatchSize += writes.map(_.size).sum
super.batchAppend(writes)
}
private[mongodb] abstract override def deleteFrom(persistenceId: String, toSequenceNr: Long)(implicit ec: ExecutionContext): Future[Unit] = timeIt (deleteTimer) {
super.deleteFrom(persistenceId, toSequenceNr)
}
private[mongodb] abstract override def replayJournal(pid: String, from: Long, to: Long, max: Long)(replayCallback: PersistentRepr ⇒ Unit)(implicit ec: ExecutionContext): Future[Unit]
= timeIt (replayTimer) { super.replayJournal(pid, from, to, max)(replayCallback) }
private[mongodb] abstract override def maxSequenceNr(pid: String, from: Long)(implicit ec: ExecutionContext): Future[Long]
= timeIt (maxTimer) { super.maxSequenceNr(pid, from) }
}
|
twillouer/akka-persistence-mongo
|
common/src/main/scala/akka/contrib/persistence/mongodb/MongoJournal.scala
|
Scala
|
apache-2.0
| 10,480 |
/*
* Copyright 2009-2016 DigitalGlobe, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and limitations under the License.
*
*/
package org.mrgeo.mapalgebra
import java.io.{Externalizable, IOException, ObjectInput, ObjectOutput}
import org.apache.spark.{SparkConf, SparkContext}
import org.mrgeo.data.rdd.VectorRDD
import org.mrgeo.data.vector.FeatureIdWritable
import org.mrgeo.geometry.GeometryFactory
import org.mrgeo.image.MrsPyramidMetadata
import org.mrgeo.job.JobArguments
import org.mrgeo.mapalgebra.parser.{ParserException, ParserNode}
import org.mrgeo.mapalgebra.raster.RasterMapOp
import org.mrgeo.mapalgebra.vector.VectorMapOp
object LeastCostPathMapOp extends MapOpRegistrar {
override def register: Array[String] = {
Array[String]("leastCostPath", "lcp")
}
override def apply(node:ParserNode, variables: String => Option[ParserNode]): MapOp =
new LeastCostPathMapOp(node, variables)
}
class LeastCostPathMapOp extends VectorMapOp with Externalizable
{
var costDistanceMapOp: Option[RasterMapOp] = None
var costDistanceMetadata: MrsPyramidMetadata = null
var pointsMapOp: Option[VectorMapOp] = None
var zoom: Int = -1
var vectorrdd: Option[VectorRDD] = None
private[mapalgebra] def this(node: ParserNode, variables: String => Option[ParserNode]) = {
this()
if (node.getNumChildren != 2 && node.getNumChildren != 3)
{
throw new ParserException(
"LeastCostPath takes the following arguments ([cost zoom level], cost raster, destination points")
}
var nodeIndex: Int = 0
if (node.getNumChildren == 3)
{
zoom = MapOp.decodeInt(node.getChild(nodeIndex), variables).getOrElse(
throw new ParserException("Invalid zoom specified for least cost path: " +
MapOp.decodeString(node.getChild(nodeIndex), variables))
)
nodeIndex += 1
}
costDistanceMapOp = RasterMapOp.decodeToRaster(node.getChild(nodeIndex), variables)
nodeIndex += 1
pointsMapOp = VectorMapOp.decodeToVector(node.getChild(nodeIndex), variables)
}
override def registerClasses(): Array[Class[_]] = {
GeometryFactory.getClasses ++ Array[Class[_]](classOf[FeatureIdWritable])
}
override def setup(job: JobArguments, conf: SparkConf): Boolean = {
// conf.set("spark.kryo.registrationRequired", "true")
true
}
override def teardown(job: JobArguments, conf: SparkConf): Boolean = true
override def execute(context: SparkContext): Boolean = {
//var destPoints: String = null
costDistanceMetadata =
costDistanceMapOp.getOrElse(throw new IOException("Invalid cost distance input")).
metadata().getOrElse(throw new IOException("Missing metadata for cost distance input"))
if (zoom < 0)
{
zoom = costDistanceMetadata.getMaxZoomLevel
}
//TODO: Need to instantiate and run LeastCostPathCalculator here
// It currently writes the output tsv file directly. That should ideally
// be done by the VectorDataProvider, and the LCP calculator (and this map op)
// should only create a VectorRDD
val cdrdd = costDistanceMapOp.getOrElse(throw new IOException("Invalid cost distance input"))
.rdd(zoom).getOrElse(throw new IOException("Invalid RDD for cost distance input"))
val destrdd = pointsMapOp.getOrElse(throw new IOException("Invalid points input"))
.rdd().getOrElse(throw new IOException("Invalid RDD for points input"))
vectorrdd = Some(LeastCostPathCalculator.run(cdrdd, costDistanceMetadata, zoom, destrdd, context))
true
}
override def readExternal(in: ObjectInput): Unit = {
zoom = in.readInt()
}
override def writeExternal(out: ObjectOutput): Unit = {
out.writeInt(zoom)
}
override def rdd(): Option[VectorRDD] = vectorrdd
}
|
akarmas/mrgeo
|
mrgeo-mapalgebra/mrgeo-mapalgebra-costdistance/src/main/scala/org/mrgeo/mapalgebra/LeastCostPathMapOp.scala
|
Scala
|
apache-2.0
| 4,222 |
package org.jetbrains.plugins.scala
package lang
package autoImport
import java.io.File
import com.intellij.openapi.util.io.FileUtil
import com.intellij.openapi.util.text.StringUtil
import com.intellij.openapi.vfs.{CharsetToolkit, LocalFileSystem}
import com.intellij.psi.util.PsiTreeUtil
import com.intellij.testFramework.UsefulTestCase
import org.jetbrains.plugins.scala.annotator.intention.ScalaImportTypeFix
import org.jetbrains.plugins.scala.annotator.intention.ScalaImportTypeFix.ClassTypeToImport
import org.jetbrains.plugins.scala.base.ScalaLightPlatformCodeInsightTestCaseAdapter
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.base.ScReferenceElement
import org.jetbrains.plugins.scala.util.ScalaUtils
/**
* User: Alexander Podkhalyuzin
* Date: 15.03.2009
*/
abstract class AutoImportTestBase extends ScalaLightPlatformCodeInsightTestCaseAdapter {
private val refMarker = "/*ref*/"
protected def folderPath = baseRootPath() + "autoImport/"
protected override def rootPath(): String = folderPath
protected def doTest() {
import junit.framework.Assert._
val filePath = folderPath + getTestName(false) + ".scala"
val file = LocalFileSystem.getInstance.refreshAndFindFileByPath(filePath.replace(File.separatorChar, '/'))
assert(file != null, "file " + filePath + " not found")
val fileText = StringUtil.convertLineSeparators(FileUtil.loadFile(new File(file.getCanonicalPath), CharsetToolkit.UTF8))
configureFromFileTextAdapter(getTestName(false) + ".scala", fileText)
val scalaFile = getFileAdapter.asInstanceOf[ScalaFile]
val offset = fileText.indexOf(refMarker)
val refOffset = offset + refMarker.length
assert(offset != -1, "Not specified ref marker in test case. Use /*ref*/ in scala file for this.")
val ref: ScReferenceElement = PsiTreeUtil.
getParentOfType(scalaFile.findElementAt(refOffset), classOf[ScReferenceElement])
assert(ref != null, "Not specified reference at marker.")
ref.resolve() match {
case null =>
case _ => assert(assertion = false, message = "Reference must be unresolved.")
}
val classes = ScalaImportTypeFix.getTypesToImport(ref, getProjectAdapter)
assert(classes.length > 0, "Haven't classes to import")
var res: String = null
val lastPsi = scalaFile.findElementAt(scalaFile.getText.length - 1)
try {
ScalaUtils.runWriteAction(new Runnable {
def run() {
classes(0) match {
case ClassTypeToImport(clazz) =>
org.jetbrains.plugins.scala.annotator.intention.ScalaImportTypeFix.
getImportHolder(ref, getProjectAdapter).addImportForClass(clazz)
case ta =>
org.jetbrains.plugins.scala.annotator.intention.ScalaImportTypeFix.
getImportHolder(ref, getProjectAdapter).addImportForPath(ta.qualifiedName, ref)
}
UsefulTestCase.doPostponedFormatting(getProjectAdapter)
}
}, getProjectAdapter, "Test")
res = scalaFile.getText.substring(0, lastPsi.getTextOffset).trim//getImportStatements.map(_.getText()).mkString("\n")
assert(ref.resolve != null, "reference is unresolved after import action")
}
catch {
case e: Exception =>
println(e)
assert(assertion = false, message = e.getMessage + "\n" + e.getStackTrace)
}
val text = lastPsi.getText
val output = lastPsi.getNode.getElementType match {
case ScalaTokenTypes.tLINE_COMMENT => text.substring(2).trim
case ScalaTokenTypes.tBLOCK_COMMENT | ScalaTokenTypes.tDOC_COMMENT =>
text.substring(2, text.length - 2).trim
case _ =>
assertTrue("Test result must be in last comment statement.", false)
""
}
assertEquals(output, res)
}
}
|
LPTK/intellij-scala
|
test/org/jetbrains/plugins/scala/lang/autoImport/AutoImportTestBase.scala
|
Scala
|
apache-2.0
| 3,886 |
/**
* Copyright (C) 2012 Typesafe, Inc. <http://www.typesafe.com>
*/
package org.pantsbuild.zinc
import java.io.File
import java.util.{ List => JList, Map => JMap }
import sbt.compiler.IC
import sbt.inc.{ Analysis, Locate }
import sbt.Path._
import scala.collection.JavaConverters._
import xsbti.compile.CompileOrder
/**
* All inputs for a compile run.
*/
case class Inputs(
classpath: Seq[File],
sources: Seq[File],
classesDirectory: File,
scalacOptions: Seq[String],
javacOptions: Seq[String],
cacheFile: File,
analysisMap: Map[File, Analysis],
forceClean: Boolean,
definesClass: File => String => Boolean,
javaOnly: Boolean,
compileOrder: CompileOrder,
incOptions: IncOptions,
outputRelations: Option[File],
outputProducts: Option[File],
mirrorAnalysis: Boolean)
object Inputs {
/**
* Create inputs based on command-line settings.
*/
def apply(log: LoggerRaw, settings: Settings): Inputs = {
import settings._
inputs(
log,
classpath,
sources,
classesDirectory,
scalacOptions,
javacOptions,
analysis.cache,
analysis.cacheMap,
analysis.forceClean,
javaOnly,
compileOrder,
incOptions,
analysis.outputRelations,
analysis.outputProducts,
analysis.mirrorAnalysis)
}
/** An overridden definesClass to use analysis for an input directory if it is available. */
def definesClass(log: LoggerRaw, analysisMap: Map[File, Analysis], entry: File): String => Boolean =
analysisMap.get(entry).map { analysis =>
log.debug(s"Hit analysis cache for class definitions with ${entry}")
(s: String) => analysis.relations.definesClass(s).nonEmpty
}.getOrElse {
Locate.definesClass(entry)
}
/**
* Create normalised and defaulted Inputs.
*/
def inputs(
log: LoggerRaw,
classpath: Seq[File],
sources: Seq[File],
classesDirectory: File,
scalacOptions: Seq[String],
javacOptions: Seq[String],
analysisCache: Option[File],
analysisCacheMap: Map[File, File],
forceClean: Boolean,
javaOnly: Boolean,
compileOrder: CompileOrder,
incOptions: IncOptions,
outputRelations: Option[File],
outputProducts: Option[File],
mirrorAnalysis: Boolean): Inputs =
{
val normalise: File => File = { _.getAbsoluteFile }
val cp = classpath map normalise
val srcs = sources map normalise
val classes = normalise(classesDirectory)
val cacheFile = normalise(analysisCache.getOrElse(defaultCacheLocation(classesDirectory)))
val upstreamAnalysis = analysisCacheMap map { case (k, v) => (normalise(k), normalise(v)) }
// Use only existing upstream analysis files for class lookups.
val validUpstreamAnalysis =
upstreamAnalysis.flatMap {
case (k, _) if k == classes =>
// ignore our own analysis
None
case (k, v) =>
// use analysis only if it was valid/non-empty
Compiler.analysisOption(v).map { analysis =>
k -> analysis
}
}
val analysisMap = (cp map { file => (file, allAnalysisFor(file, classes, upstreamAnalysis)) }).toMap
val incOpts = updateIncOptions(incOptions, classesDirectory, normalise)
val printRelations = outputRelations map normalise
val printProducts = outputProducts map normalise
new Inputs(
cp, srcs, classes, scalacOptions, javacOptions, cacheFile, analysisMap, forceClean, definesClass(log, validUpstreamAnalysis, _),
javaOnly, compileOrder, incOpts, printRelations, printProducts, mirrorAnalysis
)
}
/**
* Java API for creating Inputs.
*/
def create(
log: LoggerRaw,
classpath: JList[File],
sources: JList[File],
classesDirectory: File,
scalacOptions: JList[String],
javacOptions: JList[String],
analysisCache: File,
analysisMap: JMap[File, File],
compileOrder: String,
incOptions: IncOptions,
mirrorAnalysisCache: Boolean): Inputs =
inputs(
log,
classpath.asScala,
sources.asScala,
classesDirectory,
scalacOptions.asScala,
javacOptions.asScala,
Option(analysisCache),
analysisMap.asScala.toMap,
forceClean = false,
javaOnly = false,
Settings.compileOrder(compileOrder),
incOptions,
outputRelations = None,
outputProducts = None,
mirrorAnalysis = mirrorAnalysisCache
)
@deprecated("Use the variant that takes `incOptions` parameter, instead.", "0.3.5.3")
def create(
log: LoggerRaw,
classpath: JList[File],
sources: JList[File],
classesDirectory: File,
scalacOptions: JList[String],
javacOptions: JList[String],
analysisCache: File,
analysisMap: JMap[File, File],
compileOrder: String,
mirrorAnalysisCache: Boolean): Inputs =
create(log, classpath, sources, classesDirectory, scalacOptions, javacOptions,
analysisCache, analysisMap, compileOrder, IncOptions(), mirrorAnalysisCache)
/**
* By default the cache location is relative to the classes directory (for example, target/classes/../cache/classes).
*/
def defaultCacheLocation(classesDir: File) = {
classesDir.getParentFile / "cache" / classesDir.getName
}
/**
* Get the possible cache location for a classpath entry. Checks the upstream analysis map
* for the cache location, otherwise uses the default location for output directories.
*/
def cacheFor(file: File, exclude: File, mapped: Map[File, File]): Option[File] = {
mapped.get(file) orElse {
if (file.isDirectory && file != exclude) Some(defaultCacheLocation(file)) else None
}
}
/**
* Get the analysis for a compile run, based on a classpath entry.
* If not cached in memory, reads from the cache file, or creates empty analysis.
*/
def allAnalysisFor(file: File, exclude: File, mapped: Map[File, File]): Analysis = {
cacheFor(file, exclude, mapped) map Compiler.analysis getOrElse Analysis.Empty
}
/**
* Normalise files and default the backup directory.
*/
def updateIncOptions(incOptions: IncOptions, classesDir: File, normalise: File => File): IncOptions = {
incOptions.copy(
apiDumpDirectory = incOptions.apiDumpDirectory map normalise,
backup = getBackupDirectory(incOptions, classesDir, normalise)
)
}
/**
* Get normalised, default if not specified, backup directory. If transactional.
*/
def getBackupDirectory(incOptions: IncOptions, classesDir: File, normalise: File => File): Option[File] = {
if (incOptions.transactional)
Some(normalise(incOptions.backup.getOrElse(defaultBackupLocation(classesDir))))
else
None
}
/**
* By default the backup location is relative to the classes directory (for example, target/classes/../backup/classes).
*/
def defaultBackupLocation(classesDir: File) = {
classesDir.getParentFile / "backup" / classesDir.getName
}
/**
* Verify inputs and update if necessary.
* Currently checks that the cache file is writable.
*/
def verify(inputs: Inputs): Inputs = {
inputs.copy(cacheFile = verifyCacheFile(inputs.cacheFile, inputs.classesDirectory))
}
/**
* Check that the cache file is writable.
* If not writable then the fallback is within the zinc cache directory.
*
*/
def verifyCacheFile(cacheFile: File, classesDir: File): File = {
if (Util.checkWritable(cacheFile)) cacheFile
else Setup.zincCacheDir / "analysis-cache" / Util.pathHash(classesDir)
}
/**
* Debug output for inputs.
*/
def debug(inputs: Inputs, log: xsbti.Logger): Unit = {
show(inputs, s => log.debug(sbt.Logger.f0(s)))
}
/**
* Debug output for inputs.
*/
def show(inputs: Inputs, output: String => Unit): Unit = {
import inputs._
val incOpts = Seq(
"transitive step" -> incOptions.transitiveStep,
"recompile all fraction" -> incOptions.recompileAllFraction,
"debug relations" -> incOptions.relationsDebug,
"debug api" -> incOptions.apiDebug,
"api dump" -> incOptions.apiDumpDirectory,
"api diff context size" -> incOptions.apiDiffContextSize,
"transactional" -> incOptions.transactional,
"backup directory" -> incOptions.backup,
"recompile on macro def" -> incOptions.recompileOnMacroDef,
"name hashing" -> incOptions.nameHashing
)
val values = Seq(
"classpath" -> classpath,
"sources" -> sources,
"output directory" -> classesDirectory,
"scalac options" -> scalacOptions,
"javac options" -> javacOptions,
"cache file" -> cacheFile,
"analysis map" -> analysisMap,
"force clean" -> forceClean,
"java only" -> javaOnly,
"compile order" -> compileOrder,
"incremental compiler options" -> incOpts,
"output relations" -> outputRelations,
"output products" -> outputProducts)
Util.show(("Inputs", values), output)
}
}
|
pgroudas/pants
|
src/scala/org/pantsbuild/zinc/Inputs.scala
|
Scala
|
apache-2.0
| 9,194 |
package com.chrisrebert.lmvtfy.validation
import java.util.regex.Pattern
package object markdown {
private object MarkdownMessagePart {
private val uberCodeQuote = "````"
private val tooManyBackticks = Pattern.compile(uberCodeQuote + "+")
}
implicit class MarkdownMessagePart(part: MessagePart) {
import MarkdownMessagePart._
def markdown: String = {
// escape backticks
part match {
case PlainText(plain) => plain.replace("`", "\\\\`")
case CodeText(code) => {
val sanitized = tooManyBackticks.matcher(code).replaceAll("[backticks]")
uberCodeQuote + s" ${sanitized} " + uberCodeQuote
}
case _:Link => "" // ignoring links for now
}
}
}
implicit class MarkdownValidationMessage(msg: ValidationMessage) {
def markdown: String = {
msg.locationSpan.map{ _.toString + ": " }.getOrElse("") + msg.parts.map{ _.markdown }.mkString
}
}
}
|
cvrebert/lmvtfy
|
src/main/scala/com/chrisrebert/lmvtfy/validation/markdown/package.scala
|
Scala
|
mit
| 948 |
//#imports
import com.twitter.zipkin.conversions.thrift._
import com.twitter.finagle.Http
import com.twitter.server.TwitterServer
import com.twitter.util.Await
import com.twitter.zipkin.cassandra.CassandraSpanStoreFactory
import com.twitter.zipkin.collector.SpanReceiver
import com.twitter.zipkin.common.Span
import com.twitter.zipkin.{thriftscala => thrift}
import com.twitter.zipkin.receiver.scribe.ScribeSpanReceiverFactory
import com.twitter.zipkin.web.ZipkinWebFactory
import com.twitter.zipkin.query.ThriftQueryService
import com.twitter.zipkin.query.constants.DefaultAdjusters
//#imports
//#web_main
object WebMain extends TwitterServer
with ZipkinWebFactory
with CassandraSpanStoreFactory
{
override def newQueryClient(): ZipkinQuery.FutureIface =
new ThriftQueryService(newCassandraSpanStore(), adjusters = DefaultAdjusters)
def main() {
val web = Http.serve(webServerPort(), newWebServer())
closeOnExit(web)
Await.ready(web)
}
}
//#web_main
//#collector_main
object CollectorMain extends TwitterServer
with ZipkinCollectorFactory
with CassandraSpanStoreFactory
with ScribeSpanReceiverFactory
{
def main() {
val store = newCassandraSpanStore()
val convert: Seq[thrift.Span] => Seq[Span] = { _.map(_.toSpan) }
val receiver = newScribeSpanReceiver(convert andThen store)
closeOnExit(receiver)
Await.ready(receiver)
}
}
//#collector_main
|
jstanier/zipkin
|
doc/src/sphinx/code/quickstart/WebAndCollector.scala
|
Scala
|
apache-2.0
| 1,403 |
/* Copyright (C) 2008-2016 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.app.nlp.hcoref
import cc.factorie.infer.SettingsSampler
import cc.factorie.variable.{DiffList, SettingIterator}
/**
* User: escher, John Sullivan, akobren
* Date: 10/28/13
*
*/
trait MoveSettingIterator[Vars <: NodeVariables[Vars]] extends SettingIterator{
def moves:IndexedSeq[Move[Vars]]
var i = 0
def hasNext = i < moves.size
def next(diff:DiffList) = {val d = newDiffList; moves(i).perform(d); i += 1; d}
def reset = i = 0
}
trait MoveGenerator[Vars <: NodeVariables[Vars]] {
this: SettingsSampler[(Node[Vars], Node[Vars])] =>
def newInstance(implicit d:DiffList):Node[Vars]
}
trait Move[Vars <: NodeVariables[Vars]] {
def name: String
def perform(d:DiffList):Unit
def isSymmetric(node1:Node[Vars], node2:Node[Vars]): Boolean // is the move symmetric for this pair of nodes?
def isValid(node1: Node[Vars], node2:Node[Vars]): Boolean
def operation(node1: Node[Vars], node2:Node[Vars])(d:DiffList): DiffList
final def apply(node1:Node[Vars], node2:Node[Vars])(d:DiffList):DiffList = Option(d) match {
case Some(diff) => operation(node1, node2)(diff)
case None => operation(node1, node2)(new DiffList)
}
}
class NoMove[Vars <: NodeVariables[Vars]] extends Move[Vars] {
def name = "No Move"
def perform(d:DiffList) = Unit
def isSymmetric(node1: Node[Vars], node2: Node[Vars]): Boolean = true
def isValid(node1: Node[Vars], node2: Node[Vars]): Boolean = true
def operation(node1: Node[Vars], node2: Node[Vars])(d:DiffList) = {
d
}
}
class MergeLeft[Vars <: NodeVariables[Vars]](val left:Node[Vars], val right:Node[Vars]) extends Move[Vars] {
def this() = this(null.asInstanceOf[Node[Vars]], null.asInstanceOf[Node[Vars]])
def perform(d:DiffList) {
operation(right, left)(d)
}
def name = "Merge Left"
def isValid(right: Node[Vars], left: Node[Vars]) = right.root != left.root && !left.isMention && left.mentionCountVar.value >= right.mentionCountVar.value
def isSymmetric(node1: Node[Vars], node2: Node[Vars]): Boolean = false
def operation(right: Node[Vars], left: Node[Vars])(d:DiffList) = {
right.alterParent(Option(left))(d)
d
}
}
class SplitRight[Vars <: NodeVariables[Vars]](val left:Node[Vars], val right:Node[Vars]) extends Move[Vars] {
def this() = this(null.asInstanceOf[Node[Vars]], null.asInstanceOf[Node[Vars]])
def perform(d:DiffList) {
operation(right, left)(d)
}
def name = "Split Right"
def isValid(right: Node[Vars], left: Node[Vars]): Boolean = left.root == right.root && right.mentionCountVar.value >= left.mentionCountVar.value
def isSymmetric(node1: Node[Vars], node2: Node[Vars]): Boolean = false
def operation(right:Node[Vars], left: Node[Vars])(d:DiffList) = {
right.alterParent(None)(d)
d
}
}
class MergeUp[Vars <: NodeVariables[Vars]](val left:Node[Vars], val right:Node[Vars])(newInstance:(DiffList => Node[Vars])) extends Move[Vars] {
def this(newInstance:(DiffList => Node[Vars])) = this(null.asInstanceOf[Node[Vars]], null.asInstanceOf[Node[Vars]])(newInstance)
def perform(d:DiffList) {
operation(right, left)(d)
}
def name = "Merge Up"
def isValid(right: Node[Vars], left: Node[Vars]): Boolean = left.root != right.root && (left.isRoot && right.isRoot) && (left.isMention && right.isMention)
def isSymmetric(node1: Node[Vars], node2: Node[Vars]): Boolean = true
def operation(right: Node[Vars], left: Node[Vars])(d:DiffList) = {
val newParent = newInstance(d)
right.alterParent(Some(newParent))(d)
left.alterParent(Some(newParent))(d)
d
}
}
|
Craigacp/factorie
|
src/main/scala/cc/factorie/app/nlp/hcoref/Move.scala
|
Scala
|
apache-2.0
| 4,321 |
package marge.la
class Scalar(d: Double) {
def *[E](v: Vector[E]): Vector[E] = v * d
def /[E](v: Vector[E]): Vector[E] = v * 1/d
}
object Scalar {
def apply(d: Double) = new Scalar(d)
}
|
mikiobraun/marge
|
src/main/scala/marge/la/Scalar.scala
|
Scala
|
mit
| 194 |
package com.haskforce.run.stack.task
import java.io.File
import java.nio.file.Files
import java.util.regex.Pattern
import com.haskforce.settings.HaskellBuildSettings
import com.haskforce.utils.FileUtil
import com.intellij.execution.configurations.{CommandLineState, GeneralCommandLine, ParametersList}
import com.intellij.execution.filters._
import com.intellij.execution.impl.ConsoleViewImpl
import com.intellij.execution.process.{OSProcessHandler, ProcessEvent, ProcessListener}
import com.intellij.execution.runners.ExecutionEnvironment
import com.intellij.execution.ui.{ConsoleView, ConsoleViewContentType}
import com.intellij.openapi.project.{Project, ProjectUtil}
import com.intellij.openapi.util.Key
import com.intellij.openapi.vfs.LocalFileSystem
import scala.collection.JavaConverters._
class StackTaskCommandLineState(
environment: ExecutionEnvironment,
config: StackTaskConfiguration
) extends CommandLineState(environment) {
setConsoleBuilder(new TextConsoleBuilderImpl(config.getProject) {
override def getConsole: ConsoleView = {
val consoleView = new ConsoleViewImpl(config.getProject, true)
consoleView.addMessageFilter(new PatternBasedFileHyperlinkFilter(
config.getProject,
config.getProject.getBasePath,
new RelativeDiscoveryFileHyperlinkRawDataFinder(getProject, Array(
new PatternHyperlinkFormat(
StackTaskCommandLineState.LINK_TO_SOURCE_REGEX, false, false,
PatternHyperlinkPart.PATH, PatternHyperlinkPart.LINE, PatternHyperlinkPart.COLUMN
)
))
))
consoleView
}
})
override def startProcess(): OSProcessHandler = {
val configState = config.getConfigState
val commandLine: GeneralCommandLine = new GeneralCommandLine
// Set up the working directory for the process
// TODO: Make this configurable
commandLine.setWorkDirectory(getEnvironment.getProject.getBasePath)
val buildSettings: HaskellBuildSettings = HaskellBuildSettings.getInstance(config.getProject)
// Set the path to `stack`
commandLine.setExePath(buildSettings.getStackPath)
// Build the parameters list
val parametersList: ParametersList = commandLine.getParametersList
parametersList.addParametersString(configState.task)
// Set the env vars
val environment = commandLine.getEnvironment
if (configState.environmentVariables.isPassParentEnvs) environment.putAll(System.getenv())
if (configState.useCurrentSSHAgentVars) environment.putAll(extractCurrentSSHAgentVars())
commandLine.getEnvironment.putAll(configState.environmentVariables.getEnvs)
// Start and return the process
val procHandler = new OSProcessHandler(commandLine)
// TODO: This doesn't seem to work...the message doesn't get printed...
procHandler.addProcessListener(new ProcessListener {
override def startNotified(event: ProcessEvent): Unit = ()
override def processWillTerminate(event: ProcessEvent, willBeDestroyed: Boolean): Unit = ()
override def onTextAvailable(event: ProcessEvent, outputType: Key[_]): Unit = ()
override def processTerminated(event: ProcessEvent): Unit = {
if (event.getExitCode == 0) {
getConsoleBuilder.getConsole.print(
s"Stack task '${config.getConfigState.task}' completed successfully (exit code 0)",
ConsoleViewContentType.NORMAL_OUTPUT
)
}
}
})
procHandler
}
private def extractCurrentSSHAgentVars(): java.util.Map[String, String] = {
def empty = java.util.Collections.emptyMap[String, String]()
val home = sys.props.get("user.home").getOrElse { return empty }
val sshDir = new File(home, ".ssh")
if (!sshDir.isDirectory) return empty
val envFile = sshDir.listFiles().find(_.getName.startsWith("environment-")).getOrElse { return empty }
Files.readAllLines(envFile.toPath).iterator().asScala
.filter(s => s.startsWith("SSH_AUTH_SOCK=") || s.startsWith("SSH_AGENT_PID="))
.map(_.split(';').head.split('=') match {
case Array(k, v) => (k, v)
case _ => return empty
})
.toMap.asJava
}
}
object StackTaskCommandLineState {
// [\\\\w\\\\-]*>? detects a dependency building; issue #409
private val LINK_TO_SOURCE_REGEX = Pattern.compile("^[\\\\w\\\\-\\\\s]*>?\\\\s*([^:]+):(\\\\d+):(\\\\d+):")
}
/**
* Attempts to discover relative paths and convert them into canonical hyperlinks.
* Normal absolute paths are unaffected by this process.
*/
class RelativeDiscoveryFileHyperlinkRawDataFinder(
project: Project,
linkFormats: Array[PatternHyperlinkFormat]
) extends PatternBasedFileHyperlinkRawDataFinder(linkFormats) {
override def find(line: String): java.util.List[FileHyperlinkRawData] = {
val res = super.find(line)
if (res.isEmpty) return res
val fs = LocalFileSystem.getInstance()
res.iterator().asScala.foreach { data =>
if (fs.findFileByPath(data.getFilePath) != null) return res
}
// Infer relative path, would be nice if we could figure this out somehow from the command
// or output to determine base relative dir instead of just guessing like this.
res.iterator().asScala.map { data =>
val found = FileUtil.findFilesRecursively(
ProjectUtil.guessProjectDir(project),
_.getCanonicalPath.endsWith(data.getFilePath)
)
// Abort if we found 0 or more than 1 match since that would be ambiguous.
if (found.length != 1) None else {
Some(new FileHyperlinkRawData(
found.head.getCanonicalPath,
data.getDocumentLine,
data.getDocumentColumn,
data.getHyperlinkStartInd,
data.getHyperlinkEndInd
))
}
}.collectFirst {
case Some(x) => java.util.Collections.singletonList(x)
}.getOrElse(res)
}
}
|
carymrobbins/intellij-haskforce
|
src/com/haskforce/run/stack/task/StackTaskCommandLineState.scala
|
Scala
|
apache-2.0
| 5,808 |
package org.jetbrains.jps.incremental.scala.local.worksheet.repl_interface
import java.io.{File, Flushable, PrintWriter}
import org.jetbrains.jps.incremental.scala.local.worksheet.repl_interface.ILoopWrapper
import org.jetbrains.jps.incremental.scala.local.worksheet.repl_interface.ILoopWrapper213Impl._
import scala.reflect.classTag
import scala.reflect.internal.util.Position
import scala.tools.nsc.Settings
import scala.tools.nsc.interpreter.NamedParam.Typed
import scala.tools.nsc.interpreter.StdReplTags.tagOfIMain
import scala.tools.nsc.interpreter.shell.{ILoop, ReplReporterImpl, ShellConfig}
import scala.tools.nsc.interpreter.{IMain, Results}
import scala.jdk.CollectionConverters._
/**
* ATTENTION: when editing ensure to increase the version in ILoopWrapperFactoryHandler
*/
class ILoopWrapper213Impl(
myOut: PrintWriter,
wrapperReporter: ILoopWrapperReporter,
projectFullCp: java.util.List[String],
scalaOptions: java.util.List[String]
) extends ILoop(new DummyConfig, out = myOut)
with ILoopWrapper {
override def getOutput: Flushable = myOut
override def init(): Unit = {
val mySettings = new Settings
mySettings.processArguments(scalaOptions.asScala.toList, processAll = true)
mySettings.classpath.append(projectFullCp.asScala.mkString(File.pathSeparator))
// do not use java class path because it contains scala library jars with version
// different from one that is used during compilation (it is passed from the plugin classpath)
mySettings.usejavacp.tryToSetFromPropertyValue(false.toString)
createInterpreter(mySettings)
val itp = intp.asInstanceOf[IMain]
itp.initializeCompiler()
itp.quietBind(new Typed[IMain]("$intp", itp)(tagOfIMain, classTag[IMain]))
}
override def createInterpreter(interpreterSettings: Settings): Unit = {
val reporter = new ReplReporterImpl(new DummyConfig, interpreterSettings, out) {
override def doReport(pos: Position, msg: String, severity: Severity): Unit =
wrapperReporter.report(severity.toString, pos.line, pos.column, pos.lineContent, msg)
}
intp = new IMain(interpreterSettings, None, interpreterSettings, reporter)
}
override def reset(): Unit = {
intp.reset()
}
override def shutdown(): Unit = {
closeInterpreter()
}
override def processChunk(code: String): Boolean =
intp.interpret(code) match {
case Results.Success => true
case _ => false
}
}
object ILoopWrapper213Impl {
class DummyConfig extends ShellConfig {
override def filesToPaste: List[String] = List.empty
override def filesToLoad: List[String] = List.empty
override def batchText: String = ""
override def batchMode: Boolean = false
override def doCompletion: Boolean = false
override def haveInteractiveConsole: Boolean = false
}
}
|
JetBrains/intellij-scala
|
scala/worksheet-repl-interface/resources/ILoopWrapper213Impl.scala
|
Scala
|
apache-2.0
| 2,822 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.run
import quasar._
import quasar.api.QueryEvaluator
import quasar.common.PhaseResultTell
import quasar.qscript._
import matryoshka._
import scalaz.Monad
final class RegressionQueryEvaluator[
T[_[_]]: BirecursiveT: EqualT: ShowT: RenderTreeT,
F[_]: Monad: MonadPlannerErr: PhaseResultTell]
extends QueryEvaluator[F, T[QScriptEducated[T, ?]], QScriptCount] {
val regressionEvaluator = new RegressionQScriptEvaluator[T, F]
def evaluate(qs: T[QScriptEducated[T, ?]]): F[QScriptCount] =
regressionEvaluator.evaluate(qs)
}
|
slamdata/slamengine
|
run/src/test/scala/quasar/RegressionQueryEvaulator.scala
|
Scala
|
apache-2.0
| 1,163 |
object K1 {
class Foo[T]
class Bar[F[_]]
object Bar {
implicit def barF[F[_]](implicit fooF: Foo[Bar[F]]): Bar[F] = null
}
class A[T]
object A {
implicit def fooA[F[_[_]]](implicit barB: F[B]): Foo[F[A]] = null
}
class B[T]
object B {
implicit def fooB[F[_[_]]]: Foo[F[B]] = null
}
}
object K1U {
class Foo[T]
class Bar[F[_ <: Int]]
object Bar {
implicit def barF[F[_ <: Int]](implicit fooF: Foo[Bar[F]]): Bar[F] = null
}
class A[T <: Int]
object A {
implicit def fooA[F[_[_ <: Int]]](implicit barB: F[B]): Foo[F[A]] = null
}
class B[T <: Int]
object B {
implicit def fooB[F[_[_ <: Int]]]: Foo[F[B]] = null
}
}
object K1L {
class Foo[T]
class Bar[F[_ >: Int]]
object Bar {
implicit def barF[F[_ >: Int]](implicit fooF: Foo[Bar[F]]): Bar[F] = null
}
class A[T >: Int]
object A {
implicit def fooA[F[_[_ >: Int]]](implicit barB: F[B]): Foo[F[A]] = null
}
class B[T >: Int]
object B {
implicit def fooB[F[_[_ >: Int]]]: Foo[F[B]] = null
}
}
object K11 {
class Foo[T]
class Bar[F[_[_]]]
object Bar {
implicit def barF[F[_[_]]](implicit fooF: Foo[Bar[F]]): Bar[F] = null
}
class A[T[_]]
object A {
implicit def fooA[F[_[_[_]]]](implicit barB: F[B]): Foo[F[A]] = null
}
class B[T[_]]
object B {
implicit def fooB[F[_[_[_]]]]: Foo[F[B]] = null
}
}
object K2 {
class Foo[T]
class Bar[F[_, _]]
object Bar {
implicit def barF[F[_, _]](implicit fooF: Foo[Bar[F]]): Bar[F] = null
}
class A[T, U]
object A {
implicit def fooA[F[_[_, _]]](implicit barB: F[B]): Foo[F[A]] = null
}
class B[T, U]
object B {
implicit def fooB[F[_[_, _]]]: Foo[F[B]] = null
}
}
object Test {
{
import K1._
implicitly[Bar[A]]
}
{
import K1U._
implicitly[Bar[A]]
}
{
import K1L._
implicitly[Bar[A]]
}
{
import K11._
implicitly[Bar[A]]
}
{
import K2._
implicitly[Bar[A]]
}
}
|
som-snytt/dotty
|
tests/pos/i6238.scala
|
Scala
|
apache-2.0
| 1,981 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.aggregate
import java.util
import org.apache.calcite.rel.`type`._
import org.apache.calcite.rel.core.AggregateCall
import org.apache.calcite.sql.`type`.SqlTypeName
import org.apache.calcite.sql.`type`.SqlTypeName._
import org.apache.calcite.sql.fun._
import org.apache.calcite.sql.{SqlAggFunction, SqlKind}
import org.apache.flink.api.common.functions.{MapFunction, RichGroupReduceFunction, AggregateFunction => DataStreamAggFunction, _}
import org.apache.flink.api.common.typeinfo.{BasicTypeInfo, TypeInformation}
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.streaming.api.functions.ProcessFunction
import org.apache.flink.streaming.api.functions.windowing.{AllWindowFunction, WindowFunction}
import org.apache.flink.streaming.api.windowing.windows.{Window => DataStreamWindow}
import org.apache.flink.table.api.dataview.DataViewSpec
import org.apache.flink.table.api.{StreamQueryConfig, TableException}
import org.apache.flink.table.calcite.FlinkRelBuilder.NamedWindowProperty
import org.apache.flink.table.calcite.FlinkTypeFactory
import org.apache.flink.table.codegen.AggregationCodeGenerator
import org.apache.flink.table.expressions.ExpressionUtils.isTimeIntervalLiteral
import org.apache.flink.table.expressions._
import org.apache.flink.table.functions.aggfunctions._
import org.apache.flink.table.functions.utils.AggSqlFunction
import org.apache.flink.table.functions.utils.UserDefinedFunctionUtils._
import org.apache.flink.table.functions.{AggregateFunction => TableAggregateFunction}
import org.apache.flink.table.plan.logical._
import org.apache.flink.table.runtime.types.{CRow, CRowTypeInfo}
import org.apache.flink.table.typeutils.TypeCheckUtils._
import org.apache.flink.table.typeutils.{RowIntervalTypeInfo, TimeIntervalTypeInfo}
import org.apache.flink.types.Row
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
object AggregateUtil {
type CalcitePair[T, R] = org.apache.calcite.util.Pair[T, R]
type JavaList[T] = java.util.List[T]
/**
* Create an [[org.apache.flink.streaming.api.functions.ProcessFunction]] for unbounded OVER
* window to evaluate final aggregate value.
*
* @param generator code generator instance
* @param namedAggregates Physical calls to aggregate functions and their output field names
* @param inputType Physical type of the row.
* @param inputTypeInfo Physical type information of the row.
* @param inputFieldTypeInfo Physical type information of the row's fields.
* @param rowTimeIdx The index of the rowtime field or None in case of processing time.
* @param isPartitioned It is a tag that indicate whether the input is partitioned
* @param isRowsClause It is a tag that indicates whether the OVER clause is ROWS clause
*/
private[flink] def createUnboundedOverProcessFunction(
generator: AggregationCodeGenerator,
namedAggregates: Seq[CalcitePair[AggregateCall, String]],
inputType: RelDataType,
inputTypeInfo: TypeInformation[Row],
inputFieldTypeInfo: Seq[TypeInformation[_]],
queryConfig: StreamQueryConfig,
rowTimeIdx: Option[Int],
isPartitioned: Boolean,
isRowsClause: Boolean)
: ProcessFunction[CRow, CRow] = {
val (aggFields, aggregates, accTypes, accSpecs) =
transformToAggregateFunctions(
namedAggregates.map(_.getKey),
inputType,
needRetraction = false,
isStateBackedDataViews = true)
val aggregationStateType: RowTypeInfo = new RowTypeInfo(accTypes: _*)
val forwardMapping = (0 until inputType.getFieldCount).toArray
val aggMapping = aggregates.indices.map(x => x + inputType.getFieldCount).toArray
val outputArity = inputType.getFieldCount + aggregates.length
val genFunction = generator.generateAggregations(
"UnboundedProcessingOverAggregateHelper",
inputFieldTypeInfo,
aggregates,
aggFields,
aggMapping,
partialResults = false,
forwardMapping,
None,
outputArity,
needRetract = false,
needMerge = false,
needReset = false,
accConfig = Some(accSpecs)
)
if (rowTimeIdx.isDefined) {
if (isRowsClause) {
// ROWS unbounded over process function
new RowTimeUnboundedRowsOver(
genFunction,
aggregationStateType,
CRowTypeInfo(inputTypeInfo),
rowTimeIdx.get,
queryConfig)
} else {
// RANGE unbounded over process function
new RowTimeUnboundedRangeOver(
genFunction,
aggregationStateType,
CRowTypeInfo(inputTypeInfo),
rowTimeIdx.get,
queryConfig)
}
} else {
new ProcTimeUnboundedOver(
genFunction,
aggregationStateType,
queryConfig)
}
}
/**
* Create an [[org.apache.flink.streaming.api.functions.ProcessFunction]] for group (without
* window) aggregate to evaluate final aggregate value.
*
* @param generator code generator instance
* @param namedAggregates List of calls to aggregate functions and their output field names
* @param inputRowType Input row type
* @param inputFieldTypes Types of the physical input fields
* @param groupings the position (in the input Row) of the grouping keys
* @param queryConfig The configuration of the query to generate.
* @param generateRetraction It is a tag that indicates whether generate retract record.
* @param consumeRetraction It is a tag that indicates whether consume the retract record.
* @return [[org.apache.flink.streaming.api.functions.ProcessFunction]]
*/
private[flink] def createGroupAggregateFunction(
generator: AggregationCodeGenerator,
namedAggregates: Seq[CalcitePair[AggregateCall, String]],
inputRowType: RelDataType,
inputFieldTypes: Seq[TypeInformation[_]],
groupings: Array[Int],
queryConfig: StreamQueryConfig,
generateRetraction: Boolean,
consumeRetraction: Boolean): ProcessFunction[CRow, CRow] = {
val (aggFields, aggregates, accTypes, accSpecs) =
transformToAggregateFunctions(
namedAggregates.map(_.getKey),
inputRowType,
consumeRetraction,
isStateBackedDataViews = true)
val aggMapping = aggregates.indices.map(_ + groupings.length).toArray
val outputArity = groupings.length + aggregates.length
val aggregationStateType: RowTypeInfo = new RowTypeInfo(accTypes: _*)
val genFunction = generator.generateAggregations(
"NonWindowedAggregationHelper",
inputFieldTypes,
aggregates,
aggFields,
aggMapping,
partialResults = false,
groupings,
None,
outputArity,
consumeRetraction,
needMerge = false,
needReset = false,
accConfig = Some(accSpecs)
)
new GroupAggProcessFunction(
genFunction,
aggregationStateType,
generateRetraction,
queryConfig)
}
/**
* Create an [[org.apache.flink.streaming.api.functions.ProcessFunction]] for ROWS clause
* bounded OVER window to evaluate final aggregate value.
*
* @param generator code generator instance
* @param namedAggregates Physical calls to aggregate functions and their output field names
* @param inputType Physical type of the row.
* @param inputTypeInfo Physical type information of the row.
* @param inputFieldTypeInfo Physical type information of the row's fields.
* @param precedingOffset the preceding offset
* @param isRowsClause It is a tag that indicates whether the OVER clause is ROWS clause
* @param rowTimeIdx The index of the rowtime field or None in case of processing time.
* @return [[org.apache.flink.streaming.api.functions.ProcessFunction]]
*/
private[flink] def createBoundedOverProcessFunction(
generator: AggregationCodeGenerator,
namedAggregates: Seq[CalcitePair[AggregateCall, String]],
inputType: RelDataType,
inputTypeInfo: TypeInformation[Row],
inputFieldTypeInfo: Seq[TypeInformation[_]],
precedingOffset: Long,
queryConfig: StreamQueryConfig,
isRowsClause: Boolean,
rowTimeIdx: Option[Int])
: ProcessFunction[CRow, CRow] = {
val needRetract = true
val (aggFields, aggregates, accTypes, accSpecs) =
transformToAggregateFunctions(
namedAggregates.map(_.getKey),
inputType,
needRetract,
isStateBackedDataViews = true)
val aggregationStateType: RowTypeInfo = new RowTypeInfo(accTypes: _*)
val inputRowType = CRowTypeInfo(inputTypeInfo)
val forwardMapping = (0 until inputType.getFieldCount).toArray
val aggMapping = aggregates.indices.map(x => x + inputType.getFieldCount).toArray
val outputArity = inputType.getFieldCount + aggregates.length
val genFunction = generator.generateAggregations(
"BoundedOverAggregateHelper",
inputFieldTypeInfo,
aggregates,
aggFields,
aggMapping,
partialResults = false,
forwardMapping,
None,
outputArity,
needRetract,
needMerge = false,
needReset = false,
accConfig = Some(accSpecs)
)
if (rowTimeIdx.isDefined) {
if (isRowsClause) {
new RowTimeBoundedRowsOver(
genFunction,
aggregationStateType,
inputRowType,
precedingOffset,
rowTimeIdx.get,
queryConfig)
} else {
new RowTimeBoundedRangeOver(
genFunction,
aggregationStateType,
inputRowType,
precedingOffset,
rowTimeIdx.get,
queryConfig)
}
} else {
if (isRowsClause) {
new ProcTimeBoundedRowsOver(
genFunction,
precedingOffset,
aggregationStateType,
inputRowType,
queryConfig)
} else {
new ProcTimeBoundedRangeOver(
genFunction,
precedingOffset,
aggregationStateType,
inputRowType,
queryConfig)
}
}
}
/**
* Create a [[org.apache.flink.api.common.functions.MapFunction]] that prepares for aggregates.
* The output of the function contains the grouping keys and the timestamp and the intermediate
* aggregate values of all aggregate function. The timestamp field is aligned to time window
* start and used to be a grouping key in case of time window. In case of count window on
* event-time, the timestamp is not aligned and used to sort.
*
* The output is stored in Row by the following format:
* {{{
* avg(x) count(z)
* | |
* v v
* +---------+---------+----------------+----------------+------------------+-------+
* |groupKey1|groupKey2| AvgAccumulator | SumAccumulator | CountAccumulator |rowtime|
* +---------+---------+----------------+----------------+------------------+-------+
* ^ ^
* | |
* sum(y) rowtime to group or sort
* }}}
*
* NOTE: this function is only used for time based window on batch tables.
*/
def createDataSetWindowPrepareMapFunction(
generator: AggregationCodeGenerator,
window: LogicalWindow,
namedAggregates: Seq[CalcitePair[AggregateCall, String]],
groupings: Array[Int],
inputType: RelDataType,
inputFieldTypeInfo: Seq[TypeInformation[_]],
isParserCaseSensitive: Boolean)
: MapFunction[Row, Row] = {
val needRetract = false
val (aggFieldIndexes, aggregates, accTypes, _) = transformToAggregateFunctions(
namedAggregates.map(_.getKey),
inputType,
needRetract)
val mapReturnType: RowTypeInfo =
createRowTypeForKeysAndAggregates(
groupings,
aggregates,
accTypes,
inputType,
Some(Array(BasicTypeInfo.LONG_TYPE_INFO)))
val (timeFieldPos, tumbleTimeWindowSize) = window match {
case TumblingGroupWindow(_, time, size) =>
val timeFieldPos = getTimeFieldPosition(time, inputType, isParserCaseSensitive)
size match {
case Literal(value: Long, TimeIntervalTypeInfo.INTERVAL_MILLIS) =>
(timeFieldPos, Some(value))
case _ => (timeFieldPos, None)
}
case SessionGroupWindow(_, time, _) =>
(getTimeFieldPosition(time, inputType, isParserCaseSensitive), None)
case SlidingGroupWindow(_, time, size, slide) =>
val timeFieldPos = getTimeFieldPosition(time, inputType, isParserCaseSensitive)
size match {
case Literal(_: Long, TimeIntervalTypeInfo.INTERVAL_MILLIS) =>
// pre-tumble incremental aggregates on time-windows
val timeFieldPos = getTimeFieldPosition(time, inputType, isParserCaseSensitive)
val preTumblingSize = determineLargestTumblingSize(asLong(size), asLong(slide))
(timeFieldPos, Some(preTumblingSize))
case _ => (timeFieldPos, None)
}
case _ =>
throw new UnsupportedOperationException(s"$window is currently not supported on batch")
}
val aggMapping = aggregates.indices.toArray.map(_ + groupings.length)
val outputArity = aggregates.length + groupings.length + 1
val genFunction = generator.generateAggregations(
"DataSetAggregatePrepareMapHelper",
inputFieldTypeInfo,
aggregates,
aggFieldIndexes,
aggMapping,
partialResults = true,
groupings,
None,
outputArity,
needRetract,
needMerge = false,
needReset = true,
None
)
new DataSetWindowAggMapFunction(
genFunction,
timeFieldPos,
tumbleTimeWindowSize,
mapReturnType)
}
/**
* Create a [[org.apache.flink.api.common.functions.GroupReduceFunction]] that prepares for
* partial aggregates of sliding windows (time and count-windows).
* It requires a prepared input (with intermediate aggregate fields and aligned rowtime for
* pre-tumbling in case of time-windows), pre-aggregates (pre-tumbles) rows, aligns the
* window-start, and replicates or omits records for different panes of a sliding window.
*
* The output of the function contains the grouping keys, the intermediate aggregate values of
* all aggregate function and the aligned window start. Window start must not be a timestamp,
* but can also be a count value for count-windows.
*
* The output is stored in Row by the following format:
*
* {{{
* avg(x) aggOffsetInRow = 2 count(z) aggOffsetInRow = 5
* | |
* v v
* +---------+---------+--------+--------+--------+--------+-------------+
* |groupKey1|groupKey2| sum1 | count1 | sum2 | count2 | windowStart |
* +---------+---------+--------+--------+--------+--------+-------------+
* ^ ^
* | |
* sum(y) aggOffsetInRow = 4 window start for pane mapping
* }}}
*
* NOTE: this function is only used for sliding windows with partial aggregates on batch tables.
*/
def createDataSetSlideWindowPrepareGroupReduceFunction(
generator: AggregationCodeGenerator,
window: LogicalWindow,
namedAggregates: Seq[CalcitePair[AggregateCall, String]],
groupings: Array[Int],
physicalInputRowType: RelDataType,
physicalInputTypes: Seq[TypeInformation[_]],
isParserCaseSensitive: Boolean)
: RichGroupReduceFunction[Row, Row] = {
val needRetract = false
val (aggFieldIndexes, aggregates, accTypes, _) = transformToAggregateFunctions(
namedAggregates.map(_.getKey),
physicalInputRowType,
needRetract)
val returnType: RowTypeInfo = createRowTypeForKeysAndAggregates(
groupings,
aggregates,
accTypes,
physicalInputRowType,
Some(Array(BasicTypeInfo.LONG_TYPE_INFO)))
val keysAndAggregatesArity = groupings.length + namedAggregates.length
window match {
case SlidingGroupWindow(_, _, size, slide) if isTimeInterval(size.resultType) =>
// sliding time-window for partial aggregations
val genFunction = generator.generateAggregations(
"DataSetAggregatePrepareMapHelper",
physicalInputTypes,
aggregates,
aggFieldIndexes,
aggregates.indices.map(_ + groupings.length).toArray,
partialResults = true,
groupings.indices.toArray,
Some(aggregates.indices.map(_ + groupings.length).toArray),
keysAndAggregatesArity + 1,
needRetract,
needMerge = true,
needReset = true,
None
)
new DataSetSlideTimeWindowAggReduceGroupFunction(
genFunction,
keysAndAggregatesArity,
asLong(size),
asLong(slide),
returnType)
case _ =>
throw new UnsupportedOperationException(s"$window is currently not supported on batch.")
}
}
/**
* Create a [[org.apache.flink.api.common.functions.FlatMapFunction]] that prepares for
* non-incremental aggregates of sliding windows (time-windows).
*
* It requires a prepared input (with intermediate aggregate fields), aligns the
* window-start, and replicates or omits records for different panes of a sliding window.
*
* The output of the function contains the grouping keys, the intermediate aggregate values of
* all aggregate function and the aligned window start.
*
* The output is stored in Row by the following format:
*
* {{{
* avg(x) aggOffsetInRow = 2 count(z) aggOffsetInRow = 5
* | |
* v v
* +---------+---------+--------+--------+--------+--------+-------------+
* |groupKey1|groupKey2| sum1 | count1 | sum2 | count2 | windowStart |
* +---------+---------+--------+--------+--------+--------+-------------+
* ^ ^
* | |
* sum(y) aggOffsetInRow = 4 window start for pane mapping
* }}}
*
* NOTE: this function is only used for time-based sliding windows on batch tables.
*/
def createDataSetSlideWindowPrepareFlatMapFunction(
window: LogicalWindow,
namedAggregates: Seq[CalcitePair[AggregateCall, String]],
groupings: Array[Int],
inputType: TypeInformation[Row],
isParserCaseSensitive: Boolean)
: FlatMapFunction[Row, Row] = {
window match {
case SlidingGroupWindow(_, _, size, slide) if isTimeInterval(size.resultType) =>
new DataSetSlideTimeWindowAggFlatMapFunction(
inputType.getArity - 1,
asLong(size),
asLong(slide),
inputType)
case _ =>
throw new UnsupportedOperationException(
s"$window is currently not supported in a batch environment.")
}
}
/**
* Create a [[org.apache.flink.api.common.functions.GroupReduceFunction]] to compute window
* aggregates on batch tables. If all aggregates support partial aggregation and is a time
* window, the [[org.apache.flink.api.common.functions.GroupReduceFunction]] implements
* [[org.apache.flink.api.common.functions.CombineFunction]] as well.
*
* NOTE: this function is only used for window on batch tables.
*/
def createDataSetWindowAggregationGroupReduceFunction(
generator: AggregationCodeGenerator,
window: LogicalWindow,
namedAggregates: Seq[CalcitePair[AggregateCall, String]],
physicalInputRowType: RelDataType,
physicalInputTypes: Seq[TypeInformation[_]],
outputType: RelDataType,
groupings: Array[Int],
properties: Seq[NamedWindowProperty],
isInputCombined: Boolean = false)
: RichGroupReduceFunction[Row, Row] = {
val needRetract = false
val (aggFieldIndexes, aggregates, _, _) = transformToAggregateFunctions(
namedAggregates.map(_.getKey),
physicalInputRowType,
needRetract)
val aggMapping = aggregates.indices.toArray.map(_ + groupings.length)
val genPreAggFunction = generator.generateAggregations(
"GroupingWindowAggregateHelper",
physicalInputTypes,
aggregates,
aggFieldIndexes,
aggMapping,
partialResults = true,
groupings,
Some(aggregates.indices.map(_ + groupings.length).toArray),
outputType.getFieldCount,
needRetract,
needMerge = true,
needReset = true,
None
)
val genFinalAggFunction = generator.generateAggregations(
"GroupingWindowAggregateHelper",
physicalInputTypes,
aggregates,
aggFieldIndexes,
aggMapping,
partialResults = false,
groupings.indices.toArray,
Some(aggregates.indices.map(_ + groupings.length).toArray),
outputType.getFieldCount,
needRetract,
needMerge = true,
needReset = true,
None
)
val keysAndAggregatesArity = groupings.length + namedAggregates.length
window match {
case TumblingGroupWindow(_, _, size) if isTimeInterval(size.resultType) =>
// tumbling time window
val (startPos, endPos, timePos) = computeWindowPropertyPos(properties)
if (doAllSupportPartialMerge(aggregates)) {
// for incremental aggregations
new DataSetTumbleTimeWindowAggReduceCombineFunction(
genPreAggFunction,
genFinalAggFunction,
asLong(size),
startPos,
endPos,
timePos,
keysAndAggregatesArity)
}
else {
// for non-incremental aggregations
new DataSetTumbleTimeWindowAggReduceGroupFunction(
genFinalAggFunction,
asLong(size),
startPos,
endPos,
timePos,
outputType.getFieldCount)
}
case TumblingGroupWindow(_, _, size) =>
// tumbling count window
new DataSetTumbleCountWindowAggReduceGroupFunction(
genFinalAggFunction,
asLong(size))
case SessionGroupWindow(_, _, gap) =>
val (startPos, endPos, timePos) = computeWindowPropertyPos(properties)
new DataSetSessionWindowAggReduceGroupFunction(
genFinalAggFunction,
keysAndAggregatesArity,
startPos,
endPos,
timePos,
asLong(gap),
isInputCombined)
case SlidingGroupWindow(_, _, size, _) if isTimeInterval(size.resultType) =>
val (startPos, endPos, timePos) = computeWindowPropertyPos(properties)
if (doAllSupportPartialMerge(aggregates)) {
// for partial aggregations
new DataSetSlideWindowAggReduceCombineFunction(
genPreAggFunction,
genFinalAggFunction,
keysAndAggregatesArity,
startPos,
endPos,
timePos,
asLong(size))
}
else {
// for non-partial aggregations
new DataSetSlideWindowAggReduceGroupFunction(
genFinalAggFunction,
keysAndAggregatesArity,
startPos,
endPos,
timePos,
asLong(size))
}
case SlidingGroupWindow(_, _, size, _) =>
new DataSetSlideWindowAggReduceGroupFunction(
genFinalAggFunction,
keysAndAggregatesArity,
None,
None,
None,
asLong(size))
case _ =>
throw new UnsupportedOperationException(s"$window is currently not supported on batch")
}
}
/**
* Create a [[org.apache.flink.api.common.functions.MapPartitionFunction]] that aggregation
* for aggregates.
* The function returns aggregate values of all aggregate function which are
* organized by the following format:
*
* {{{
* avg(x) aggOffsetInRow = 2 count(z) aggOffsetInRow = 5
* | | windowEnd(max(rowtime)
* | | |
* v v v
* +--------+--------+--------+--------+-----------+---------+
* | sum1 | count1 | sum2 | count2 |windowStart|windowEnd|
* +--------+--------+--------+--------+-----------+---------+
* ^ ^
* | |
* sum(y) aggOffsetInRow = 4 windowStart(min(rowtime))
*
* }}}
*
*/
def createDataSetWindowAggregationMapPartitionFunction(
generator: AggregationCodeGenerator,
window: LogicalWindow,
namedAggregates: Seq[CalcitePair[AggregateCall, String]],
physicalInputRowType: RelDataType,
physicalInputTypes: Seq[TypeInformation[_]],
groupings: Array[Int]): MapPartitionFunction[Row, Row] = {
val needRetract = false
val (aggFieldIndexes, aggregates, accTypes, _) = transformToAggregateFunctions(
namedAggregates.map(_.getKey),
physicalInputRowType,
needRetract)
val aggMapping = aggregates.indices.map(_ + groupings.length).toArray
val keysAndAggregatesArity = groupings.length + namedAggregates.length
window match {
case SessionGroupWindow(_, _, gap) =>
val combineReturnType: RowTypeInfo =
createRowTypeForKeysAndAggregates(
groupings,
aggregates,
accTypes,
physicalInputRowType,
Option(Array(BasicTypeInfo.LONG_TYPE_INFO, BasicTypeInfo.LONG_TYPE_INFO)))
val genFunction = generator.generateAggregations(
"GroupingWindowAggregateHelper",
physicalInputTypes,
aggregates,
aggFieldIndexes,
aggMapping,
partialResults = true,
groupings.indices.toArray,
Some(aggregates.indices.map(_ + groupings.length).toArray),
groupings.length + aggregates.length + 2,
needRetract,
needMerge = true,
needReset = true,
None
)
new DataSetSessionWindowAggregatePreProcessor(
genFunction,
keysAndAggregatesArity,
asLong(gap),
combineReturnType)
case _ =>
throw new UnsupportedOperationException(s"$window is currently not supported on batch")
}
}
/**
* Create a [[org.apache.flink.api.common.functions.GroupCombineFunction]] that pre-aggregation
* for aggregates.
* The function returns intermediate aggregate values of all aggregate function which are
* organized by the following format:
* {{{
* avg(x) windowEnd(max(rowtime)
* | |
* v v
* +---------+---------+----------------+----------------+-------------+-----------+
* |groupKey1|groupKey2| AvgAccumulator | SumAccumulator | windowStart | windowEnd |
* +---------+---------+----------------+----------------+-------------+-----------+
* ^ ^
* | |
* sum(y) windowStart(min(rowtime))
* }}}
*
*/
private[flink] def createDataSetWindowAggregationCombineFunction(
generator: AggregationCodeGenerator,
window: LogicalWindow,
namedAggregates: Seq[CalcitePair[AggregateCall, String]],
physicalInputRowType: RelDataType,
physicalInputTypes: Seq[TypeInformation[_]],
groupings: Array[Int])
: GroupCombineFunction[Row, Row] = {
val needRetract = false
val (aggFieldIndexes, aggregates, accTypes, _) = transformToAggregateFunctions(
namedAggregates.map(_.getKey),
physicalInputRowType,
needRetract)
val aggMapping = aggregates.indices.map(_ + groupings.length).toArray
val keysAndAggregatesArity = groupings.length + namedAggregates.length
window match {
case SessionGroupWindow(_, _, gap) =>
val combineReturnType: RowTypeInfo =
createRowTypeForKeysAndAggregates(
groupings,
aggregates,
accTypes,
physicalInputRowType,
Option(Array(BasicTypeInfo.LONG_TYPE_INFO, BasicTypeInfo.LONG_TYPE_INFO)))
val genFunction = generator.generateAggregations(
"GroupingWindowAggregateHelper",
physicalInputTypes,
aggregates,
aggFieldIndexes,
aggMapping,
partialResults = true,
groupings.indices.toArray,
Some(aggregates.indices.map(_ + groupings.length).toArray),
groupings.length + aggregates.length + 2,
needRetract,
needMerge = true,
needReset = true,
None
)
new DataSetSessionWindowAggregatePreProcessor(
genFunction,
keysAndAggregatesArity,
asLong(gap),
combineReturnType)
case _ =>
throw new UnsupportedOperationException(
s" [ ${window.getClass.getCanonicalName.split("\\.").last} ] is currently not " +
s"supported on batch")
}
}
/**
* Create functions to compute a [[org.apache.flink.table.plan.nodes.dataset.DataSetAggregate]].
* If all aggregation functions support pre-aggregation, a pre-aggregation function and the
* respective output type are generated as well.
*/
private[flink] def createDataSetAggregateFunctions(
generator: AggregationCodeGenerator,
namedAggregates: Seq[CalcitePair[AggregateCall, String]],
inputType: RelDataType,
inputFieldTypeInfo: Seq[TypeInformation[_]],
outputType: RelDataType,
groupings: Array[Int]): (Option[DataSetPreAggFunction],
Option[TypeInformation[Row]],
RichGroupReduceFunction[Row, Row]) = {
val needRetract = false
val (aggInFields, aggregates, accTypes, _) = transformToAggregateFunctions(
namedAggregates.map(_.getKey),
inputType,
needRetract)
val (gkeyOutMapping, aggOutMapping) = getOutputMappings(
namedAggregates,
groupings,
inputType,
outputType
)
val aggOutFields = aggOutMapping.map(_._1)
if (doAllSupportPartialMerge(aggregates)) {
// compute preaggregation type
val preAggFieldTypes = gkeyOutMapping.map(_._2)
.map(inputType.getFieldList.get(_).getType)
.map(FlinkTypeFactory.toTypeInfo) ++ accTypes
val preAggRowType = new RowTypeInfo(preAggFieldTypes: _*)
val genPreAggFunction = generator.generateAggregations(
"DataSetAggregatePrepareMapHelper",
inputFieldTypeInfo,
aggregates,
aggInFields,
aggregates.indices.map(_ + groupings.length).toArray,
partialResults = true,
groupings,
None,
groupings.length + aggregates.length,
needRetract,
needMerge = false,
needReset = true,
None
)
// compute mapping of forwarded grouping keys
val gkeyMapping: Array[Int] = if (gkeyOutMapping.nonEmpty) {
val gkeyOutFields = gkeyOutMapping.map(_._1)
val mapping = Array.fill[Int](gkeyOutFields.max + 1)(-1)
gkeyOutFields.zipWithIndex.foreach(m => mapping(m._1) = m._2)
mapping
} else {
new Array[Int](0)
}
val genFinalAggFunction = generator.generateAggregations(
"DataSetAggregateFinalHelper",
inputFieldTypeInfo,
aggregates,
aggInFields,
aggOutFields,
partialResults = false,
gkeyMapping,
Some(aggregates.indices.map(_ + groupings.length).toArray),
outputType.getFieldCount,
needRetract,
needMerge = true,
needReset = true,
None
)
(
Some(new DataSetPreAggFunction(genPreAggFunction)),
Some(preAggRowType),
new DataSetFinalAggFunction(genFinalAggFunction)
)
}
else {
val genFunction = generator.generateAggregations(
"DataSetAggregateHelper",
inputFieldTypeInfo,
aggregates,
aggInFields,
aggOutFields,
partialResults = false,
groupings,
None,
outputType.getFieldCount,
needRetract,
needMerge = false,
needReset = true,
None
)
(
None,
None,
new DataSetAggFunction(genFunction)
)
}
}
/**
* Create an [[AllWindowFunction]] for non-partitioned window aggregates.
*/
private[flink] def createAggregationAllWindowFunction(
window: LogicalWindow,
finalRowArity: Int,
properties: Seq[NamedWindowProperty])
: AllWindowFunction[Row, CRow, DataStreamWindow] = {
if (isTimeWindow(window)) {
val (startPos, endPos, timePos) = computeWindowPropertyPos(properties)
new IncrementalAggregateAllTimeWindowFunction(
startPos,
endPos,
timePos,
finalRowArity)
.asInstanceOf[AllWindowFunction[Row, CRow, DataStreamWindow]]
} else {
new IncrementalAggregateAllWindowFunction(
finalRowArity)
}
}
/**
* Create a [[WindowFunction]] for group window aggregates.
*/
private[flink] def createAggregationGroupWindowFunction(
window: LogicalWindow,
numGroupingKeys: Int,
numAggregates: Int,
finalRowArity: Int,
properties: Seq[NamedWindowProperty]):
WindowFunction[Row, CRow, Row, DataStreamWindow] = {
if (isTimeWindow(window)) {
val (startPos, endPos, timePos) = computeWindowPropertyPos(properties)
new IncrementalAggregateTimeWindowFunction(
numGroupingKeys,
numAggregates,
startPos,
endPos,
timePos,
finalRowArity)
.asInstanceOf[WindowFunction[Row, CRow, Row, DataStreamWindow]]
} else {
new IncrementalAggregateWindowFunction(
numGroupingKeys,
numAggregates,
finalRowArity)
}
}
private[flink] def createDataStreamAggregateFunction(
generator: AggregationCodeGenerator,
namedAggregates: Seq[CalcitePair[AggregateCall, String]],
inputType: RelDataType,
inputFieldTypeInfo: Seq[TypeInformation[_]],
outputType: RelDataType,
groupingKeys: Array[Int],
needMerge: Boolean)
: (DataStreamAggFunction[CRow, Row, Row], RowTypeInfo, RowTypeInfo) = {
val needRetract = false
val (aggFields, aggregates, accTypes, _) =
transformToAggregateFunctions(
namedAggregates.map(_.getKey),
inputType,
needRetract)
val aggMapping = aggregates.indices.toArray
val outputArity = aggregates.length
val genFunction = generator.generateAggregations(
"GroupingWindowAggregateHelper",
inputFieldTypeInfo,
aggregates,
aggFields,
aggMapping,
partialResults = false,
groupingKeys,
None,
outputArity,
needRetract,
needMerge,
needReset = false,
None
)
val aggResultTypes = namedAggregates.map(a => FlinkTypeFactory.toTypeInfo(a.left.getType))
val accumulatorRowType = new RowTypeInfo(accTypes: _*)
val aggResultRowType = new RowTypeInfo(aggResultTypes: _*)
val aggFunction = new AggregateAggFunction(genFunction)
(aggFunction, accumulatorRowType, aggResultRowType)
}
/**
* Return true if all aggregates can be partially merged. False otherwise.
*/
private[flink] def doAllSupportPartialMerge(
aggregateCalls: Seq[AggregateCall],
inputType: RelDataType,
groupKeysCount: Int): Boolean = {
val aggregateList = transformToAggregateFunctions(
aggregateCalls,
inputType,
needRetraction = false)._2
doAllSupportPartialMerge(aggregateList)
}
/**
* Return true if all aggregates can be partially merged. False otherwise.
*/
private[flink] def doAllSupportPartialMerge(
aggregateList: Array[TableAggregateFunction[_ <: Any, _ <: Any]]): Boolean = {
aggregateList.forall(ifMethodExistInFunction("merge", _))
}
/**
* @return A mappings of field positions from input type to output type for grouping keys and
* aggregates.
*/
private def getOutputMappings(
namedAggregates: Seq[CalcitePair[AggregateCall, String]],
groupings: Array[Int],
inputType: RelDataType,
outputType: RelDataType) : (Array[(Int, Int)], Array[(Int, Int)]) = {
val groupKeyNames: Seq[(String, Int)] =
groupings.map(g => (inputType.getFieldList.get(g).getName, g))
val aggNames: Seq[(String, Int)] =
namedAggregates.zipWithIndex.map(a => (a._1.right, a._2))
val groupOutMapping: Array[(Int, Int)] =
groupKeyNames.map(g => (outputType.getField(g._1, false, false).getIndex, g._2)).toArray
val aggOutMapping: Array[(Int, Int)] =
aggNames.map(a => (outputType.getField(a._1, false, false).getIndex, a._2)).toArray
(groupOutMapping, aggOutMapping)
}
private def isTimeWindow(window: LogicalWindow) = {
window match {
case TumblingGroupWindow(_, _, size) => isTimeIntervalLiteral(size)
case SlidingGroupWindow(_, _, size, _) => isTimeIntervalLiteral(size)
case SessionGroupWindow(_, _, _) => true
}
}
/**
* Computes the positions of (window start, window end, rowtime).
*/
private[flink] def computeWindowPropertyPos(
properties: Seq[NamedWindowProperty]): (Option[Int], Option[Int], Option[Int]) = {
val propPos = properties.foldRight(
(None: Option[Int], None: Option[Int], None: Option[Int], 0)) {
case (p, (s, e, rt, i)) => p match {
case NamedWindowProperty(_, prop) =>
prop match {
case WindowStart(_) if s.isDefined =>
throw TableException("Duplicate window start property encountered. This is a bug.")
case WindowStart(_) =>
(Some(i), e, rt, i - 1)
case WindowEnd(_) if e.isDefined =>
throw TableException("Duplicate window end property encountered. This is a bug.")
case WindowEnd(_) =>
(s, Some(i), rt, i - 1)
case RowtimeAttribute(_) if rt.isDefined =>
throw TableException("Duplicate window rowtime property encountered. This is a bug.")
case RowtimeAttribute(_) =>
(s, e, Some(i), i - 1)
case ProctimeAttribute(_) =>
// ignore this property, it will be null at the position later
(s, e, rt, i - 1)
}
}
}
(propPos._1, propPos._2, propPos._3)
}
private def transformToAggregateFunctions(
aggregateCalls: Seq[AggregateCall],
inputType: RelDataType,
needRetraction: Boolean,
isStateBackedDataViews: Boolean = false)
: (Array[Array[Int]],
Array[TableAggregateFunction[_, _]],
Array[TypeInformation[_]],
Array[Seq[DataViewSpec[_]]]) = {
// store the aggregate fields of each aggregate function, by the same order of aggregates.
val aggFieldIndexes = new Array[Array[Int]](aggregateCalls.size)
val aggregates = new Array[TableAggregateFunction[_ <: Any, _ <: Any]](aggregateCalls.size)
val accTypes = new Array[TypeInformation[_]](aggregateCalls.size)
// create aggregate function instances by function type and aggregate field data type.
aggregateCalls.zipWithIndex.foreach { case (aggregateCall, index) =>
val argList: util.List[Integer] = aggregateCall.getArgList
if (argList.isEmpty) {
if (aggregateCall.getAggregation.isInstanceOf[SqlCountAggFunction]) {
aggFieldIndexes(index) = Array[Int](0)
} else {
throw new TableException("Aggregate fields should not be empty.")
}
} else {
aggFieldIndexes(index) = argList.asScala.map(i => i.intValue).toArray
}
val relDataType = inputType.getFieldList.get(aggFieldIndexes(index)(0)).getType
val sqlTypeName = relDataType.getSqlTypeName
aggregateCall.getAggregation match {
case _: SqlSumAggFunction =>
if (needRetraction) {
aggregates(index) = sqlTypeName match {
case TINYINT =>
new ByteSumWithRetractAggFunction
case SMALLINT =>
new ShortSumWithRetractAggFunction
case INTEGER =>
new IntSumWithRetractAggFunction
case BIGINT =>
new LongSumWithRetractAggFunction
case FLOAT =>
new FloatSumWithRetractAggFunction
case DOUBLE =>
new DoubleSumWithRetractAggFunction
case DECIMAL =>
new DecimalSumWithRetractAggFunction
case sqlType: SqlTypeName =>
throw new TableException(s"Sum aggregate does no support type: '$sqlType'")
}
} else {
aggregates(index) = sqlTypeName match {
case TINYINT =>
new ByteSumAggFunction
case SMALLINT =>
new ShortSumAggFunction
case INTEGER =>
new IntSumAggFunction
case BIGINT =>
new LongSumAggFunction
case FLOAT =>
new FloatSumAggFunction
case DOUBLE =>
new DoubleSumAggFunction
case DECIMAL =>
new DecimalSumAggFunction
case sqlType: SqlTypeName =>
throw new TableException(s"Sum aggregate does no support type: '$sqlType'")
}
}
case _: SqlSumEmptyIsZeroAggFunction =>
if (needRetraction) {
aggregates(index) = sqlTypeName match {
case TINYINT =>
new ByteSum0WithRetractAggFunction
case SMALLINT =>
new ShortSum0WithRetractAggFunction
case INTEGER =>
new IntSum0WithRetractAggFunction
case BIGINT =>
new LongSum0WithRetractAggFunction
case FLOAT =>
new FloatSum0WithRetractAggFunction
case DOUBLE =>
new DoubleSum0WithRetractAggFunction
case DECIMAL =>
new DecimalSum0WithRetractAggFunction
case sqlType: SqlTypeName =>
throw new TableException(s"Sum0 aggregate does no support type: '$sqlType'")
}
} else {
aggregates(index) = sqlTypeName match {
case TINYINT =>
new ByteSum0AggFunction
case SMALLINT =>
new ShortSum0AggFunction
case INTEGER =>
new IntSum0AggFunction
case BIGINT =>
new LongSum0AggFunction
case FLOAT =>
new FloatSum0AggFunction
case DOUBLE =>
new DoubleSum0AggFunction
case DECIMAL =>
new DecimalSum0AggFunction
case sqlType: SqlTypeName =>
throw new TableException(s"Sum0 aggregate does no support type: '$sqlType'")
}
}
case _: SqlAvgAggFunction =>
aggregates(index) = sqlTypeName match {
case TINYINT =>
new ByteAvgAggFunction
case SMALLINT =>
new ShortAvgAggFunction
case INTEGER =>
new IntAvgAggFunction
case BIGINT =>
new LongAvgAggFunction
case FLOAT =>
new FloatAvgAggFunction
case DOUBLE =>
new DoubleAvgAggFunction
case DECIMAL =>
new DecimalAvgAggFunction
case sqlType: SqlTypeName =>
throw new TableException(s"Avg aggregate does no support type: '$sqlType'")
}
case sqlMinMaxFunction: SqlMinMaxAggFunction =>
aggregates(index) = if (sqlMinMaxFunction.getKind == SqlKind.MIN) {
if (needRetraction) {
sqlTypeName match {
case TINYINT =>
new ByteMinWithRetractAggFunction
case SMALLINT =>
new ShortMinWithRetractAggFunction
case INTEGER =>
new IntMinWithRetractAggFunction
case BIGINT =>
new LongMinWithRetractAggFunction
case FLOAT =>
new FloatMinWithRetractAggFunction
case DOUBLE =>
new DoubleMinWithRetractAggFunction
case DECIMAL =>
new DecimalMinWithRetractAggFunction
case BOOLEAN =>
new BooleanMinWithRetractAggFunction
case VARCHAR | CHAR =>
new StringMinWithRetractAggFunction
case TIMESTAMP =>
new TimestampMinWithRetractAggFunction
case DATE =>
new DateMinWithRetractAggFunction
case TIME =>
new TimeMinWithRetractAggFunction
case sqlType: SqlTypeName =>
throw new TableException(
s"Min with retract aggregate does no support type: '$sqlType'")
}
} else {
sqlTypeName match {
case TINYINT =>
new ByteMinAggFunction
case SMALLINT =>
new ShortMinAggFunction
case INTEGER =>
new IntMinAggFunction
case BIGINT =>
new LongMinAggFunction
case FLOAT =>
new FloatMinAggFunction
case DOUBLE =>
new DoubleMinAggFunction
case DECIMAL =>
new DecimalMinAggFunction
case BOOLEAN =>
new BooleanMinAggFunction
case VARCHAR | CHAR =>
new StringMinAggFunction
case TIMESTAMP =>
new TimestampMinAggFunction
case DATE =>
new DateMinAggFunction
case TIME =>
new TimeMinAggFunction
case sqlType: SqlTypeName =>
throw new TableException(s"Min aggregate does no support type: '$sqlType'")
}
}
} else {
if (needRetraction) {
sqlTypeName match {
case TINYINT =>
new ByteMaxWithRetractAggFunction
case SMALLINT =>
new ShortMaxWithRetractAggFunction
case INTEGER =>
new IntMaxWithRetractAggFunction
case BIGINT =>
new LongMaxWithRetractAggFunction
case FLOAT =>
new FloatMaxWithRetractAggFunction
case DOUBLE =>
new DoubleMaxWithRetractAggFunction
case DECIMAL =>
new DecimalMaxWithRetractAggFunction
case BOOLEAN =>
new BooleanMaxWithRetractAggFunction
case VARCHAR | CHAR =>
new StringMaxWithRetractAggFunction
case TIMESTAMP =>
new TimestampMaxWithRetractAggFunction
case DATE =>
new DateMaxWithRetractAggFunction
case TIME =>
new TimeMaxWithRetractAggFunction
case sqlType: SqlTypeName =>
throw new TableException(
s"Max with retract aggregate does no support type: '$sqlType'")
}
} else {
sqlTypeName match {
case TINYINT =>
new ByteMaxAggFunction
case SMALLINT =>
new ShortMaxAggFunction
case INTEGER =>
new IntMaxAggFunction
case BIGINT =>
new LongMaxAggFunction
case FLOAT =>
new FloatMaxAggFunction
case DOUBLE =>
new DoubleMaxAggFunction
case DECIMAL =>
new DecimalMaxAggFunction
case BOOLEAN =>
new BooleanMaxAggFunction
case VARCHAR | CHAR =>
new StringMaxAggFunction
case TIMESTAMP =>
new TimestampMaxAggFunction
case DATE =>
new DateMaxAggFunction
case TIME =>
new TimeMaxAggFunction
case sqlType: SqlTypeName =>
throw new TableException(s"Max aggregate does no support type: '$sqlType'")
}
}
}
case _: SqlCountAggFunction =>
aggregates(index) = new CountAggFunction
case collect: SqlAggFunction if collect.getKind == SqlKind.COLLECT =>
aggregates(index) = new CollectAggFunction(FlinkTypeFactory.toTypeInfo(relDataType))
accTypes(index) = aggregates(index).getAccumulatorType
case udagg: AggSqlFunction =>
aggregates(index) = udagg.getFunction
accTypes(index) = udagg.accType
case unSupported: SqlAggFunction =>
throw new TableException(s"unsupported Function: '${unSupported.getName}'")
}
}
val accSpecs = new Array[Seq[DataViewSpec[_]]](aggregateCalls.size)
// create accumulator type information for every aggregate function
aggregates.zipWithIndex.foreach { case (agg, index) =>
if (accTypes(index) != null) {
val (accType, specs) = removeStateViewFieldsFromAccTypeInfo(index,
agg,
accTypes(index),
isStateBackedDataViews)
if (specs.isDefined) {
accSpecs(index) = specs.get
accTypes(index) = accType
} else {
accSpecs(index) = Seq()
}
} else {
accSpecs(index) = Seq()
accTypes(index) = getAccumulatorTypeOfAggregateFunction(agg)
}
}
(aggFieldIndexes, aggregates, accTypes, accSpecs)
}
private def createRowTypeForKeysAndAggregates(
groupings: Array[Int],
aggregates: Array[TableAggregateFunction[_, _]],
aggTypes: Array[TypeInformation[_]],
inputType: RelDataType,
windowKeyTypes: Option[Array[TypeInformation[_]]] = None): RowTypeInfo = {
// get the field data types of group keys.
val groupingTypes: Seq[TypeInformation[_]] =
groupings
.map(inputType.getFieldList.get(_).getType)
.map(FlinkTypeFactory.toTypeInfo)
// concat group key types, aggregation types, and window key types
val allFieldTypes: Seq[TypeInformation[_]] = windowKeyTypes match {
case None => groupingTypes ++: aggTypes
case _ => groupingTypes ++: aggTypes ++: windowKeyTypes.get
}
new RowTypeInfo(allFieldTypes: _*)
}
private def getTimeFieldPosition(
timeField: Expression,
inputType: RelDataType,
isParserCaseSensitive: Boolean): Int = {
timeField match {
case ResolvedFieldReference(name, _) =>
// get the RelDataType referenced by the time-field
val relDataType = inputType.getFieldList.filter { r =>
if (isParserCaseSensitive) {
name.equals(r.getName)
} else {
name.equalsIgnoreCase(r.getName)
}
}
// should only match one
if (relDataType.length == 1) {
relDataType.head.getIndex
} else {
throw TableException(
s"Encountered more than one time attribute with the same name: '$relDataType'")
}
case e => throw TableException(
"The time attribute of window in batch environment should be " +
s"ResolvedFieldReference, but is $e")
}
}
private[flink] def asLong(expr: Expression): Long = expr match {
case Literal(value: Long, TimeIntervalTypeInfo.INTERVAL_MILLIS) => value
case Literal(value: Long, RowIntervalTypeInfo.INTERVAL_ROWS) => value
case _ => throw new IllegalArgumentException()
}
private[flink] def determineLargestTumblingSize(size: Long, slide: Long) = gcd(size, slide)
private def gcd(a: Long, b: Long): Long = {
if (b == 0) a else gcd(b, a % b)
}
}
|
zimmermatt/flink
|
flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/AggregateUtil.scala
|
Scala
|
apache-2.0
| 54,045 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.precog
package object common {
type JobId = String
implicit def stringExtensions(s: String): StringExtensions = new StringExtensions(s)
}
package common {
final class StringExtensions(s: String) {
def cpath = CPath(s)
}
}
|
jedesah/Quasar
|
blueeyes/src/main/scala/quasar/precog/common/package.scala
|
Scala
|
apache-2.0
| 856 |
package spire
package benchmark
import scala.util.Random
import Random._
import spire.math._
import spire.implicits._
import com.google.caliper.{ Runner, SimpleBenchmark, Param }
import org.apache.commons.math3.analysis.polynomials._
import org.apache.commons.math3.analysis.UnivariateFunction
object PolynomialBenchmarks extends MyRunner(classOf[PolynomialBenchmarks])
class PolynomialBenchmarks extends MyBenchmark {
@Param(Array("1", "2", "4", "8", "16"))
var size: Int = 0
def arbitraryRational = {
val d = nextLong() % 100
Rational(nextLong(), if (d == 0L) 1L else d)
}
var spireDenseRationalPolys: Array[Polynomial[Rational]] = null
var spireSparseRationalPolys: Array[Polynomial[Rational]] = null
var spireDenseDoublePolys: Array[Polynomial[Double]] = null
var spireSparseDoublePolys: Array[Polynomial[Double]] = null
var commonsDoublePolys: Array[PolynomialFunction] = null
override protected def setUp(): Unit = {
val coeffs: Array[Array[Rational]] =
init(100)(init(size)(arbitraryRational))
spireDenseRationalPolys = coeffs.map(cs => Polynomial.dense(cs))
spireSparseRationalPolys = spireDenseRationalPolys.map(_.toSparse)
spireDenseDoublePolys = coeffs.map(cs => Polynomial.dense(cs.map(_.toDouble)))
spireSparseDoublePolys = spireDenseDoublePolys.map(_.toSparse)
commonsDoublePolys = coeffs.map(cs => new PolynomialFunction(cs.map(_.toDouble)))
}
def addSpireRationalPolynomials(data: Array[Polynomial[Rational]]): Polynomial[Rational] = {
var total: Polynomial[Rational] = null
var i = 0
val len = data.length
while (i < len) { total = data(0) + data(i); i += 1 }
total
}
def addSpireDoublePolynomials(data: Array[Polynomial[Double]]): Polynomial[Double] = {
var total: Polynomial[Double] = null
var i = 0
val len = data.length
while (i < len) { total = data(0) + data(i); i += 1 }
total
}
def addCommonsDoublePolynomials(data: Array[PolynomialFunction]): PolynomialFunction = {
var total: PolynomialFunction = null
var i = 0
val len = data.length
while (i < len) { total = data(0).add(data(i)); i += 1 }
total
}
def multiplySpireRationalPolynomials(data: Array[Polynomial[Rational]]): Polynomial[Rational] = {
var total: Polynomial[Rational] = null
var i = 0
val len = data.length
while (i < len) { total = data(0) * data(i); i += 1 }
total
}
def multiplySpireDoublePolynomials(data: Array[Polynomial[Double]]): Polynomial[Double] = {
var total: Polynomial[Double] = null
var i = 0
val len = data.length
while (i < len) { total = data(0) * data(i); i += 1 }
total
}
def multiplyCommonsDoublePolynomials(data: Array[PolynomialFunction]): PolynomialFunction = {
var total: PolynomialFunction = null
var i = 0
val len = data.length
while (i < len) { total = data(0).multiply(data(i)); i += 1 }
total
}
def derivativeSpireRationalPolynomials(data: Array[Polynomial[Rational]]): Polynomial[Rational] = {
var total : Polynomial[Rational] = null
var i = 0
val len = data.length
while (i < len) { total = data(i).derivative; i += 1 }
total
}
def derivativeSpireDoublePolynomials(data: Array[Polynomial[Double]]): Polynomial[Double] = {
var total : Polynomial[Double] = null
var i = 0
val len = data.length
while (i < len) { total = data(i).derivative; i += 1 }
total
}
def derivativeCommonsDoublePolynomials(data: Array[PolynomialFunction]): PolynomialFunction = {
var total : PolynomialFunction = null
var i = 0
val len = data.length
while (i < len) { total = data(i).polynomialDerivative; i += 1 }
total
}
def evaluateSpireRationalPolynomials(data: Array[Polynomial[Rational]]): Rational = {
val testVariable = Rational(2, 1)
var total : Rational = Rational(1,1)
var i = 0
val len = data.length
while (i < len) { total = data(i).apply(testVariable); i += 1 }
total
}
def evaluateSpireDoublePolynomials(data: Array[Polynomial[Double]]): Double = {
val testVariable = 2.0
var total : Double = 0.0
var i = 0
val len = data.length
while (i < len) { total = data(i).apply(testVariable); i += 1 }
total
}
def evaluateCommonsDoublePolynomials(data: Array[PolynomialFunction]): Double = {
val testVariable = 2.0
var total : Double = 0.0
var i = 0
val len = data.length
while (i < len) { total = data(i).value(testVariable); i += 1 }
total
}
def quotModSpireRationalPolynomials(data: Array[Polynomial[Rational]]): (Polynomial[Rational], Polynomial[Rational]) = {
var total: (Polynomial[Rational], Polynomial[Rational]) = null
var i = 0
val len = data.length
while (i < len) { total = data(0) /% data(i); i += 1 }
total
}
def quotModSpireDoublePolynomials(data: Array[Polynomial[Double]]): (Polynomial[Double], Polynomial[Double]) = {
var total: (Polynomial[Double], Polynomial[Double]) = null
var i = 0
val len = data.length
while (i < len) { total = data(0) /% data(i); i += 1 }
total
}
def timeAddSpireRationalPolysSparse(reps: Int) = run(reps)(addSpireRationalPolynomials(spireSparseRationalPolys))
def timeAddSpireRationalPolysDense(reps: Int) = run(reps)(addSpireRationalPolynomials(spireDenseRationalPolys))
def timeAddSpireDoublePolysSparse(reps: Int) = run(reps)(addSpireDoublePolynomials(spireSparseDoublePolys))
def timeAddSpireDoublePolysDense(reps: Int) = run(reps)(addSpireDoublePolynomials(spireDenseDoublePolys))
def timeAddCommonsDoublePolynomials(reps: Int) = run(reps)(addCommonsDoublePolynomials(commonsDoublePolys))
def timeMultiplySpireRationalPolysSparse(reps: Int) = run(reps)(multiplySpireRationalPolynomials(spireSparseRationalPolys))
def timeMultiplySpireRationalPolysDense(reps: Int) = run(reps)(multiplySpireRationalPolynomials(spireDenseRationalPolys))
def timeMultiplySpireDoublePolysSparse(reps: Int) = run(reps)(multiplySpireDoublePolynomials(spireSparseDoublePolys))
def timeMultiplySpireDoublePolysDense(reps: Int) = run(reps)(multiplySpireDoublePolynomials(spireDenseDoublePolys))
def timeMultiplyCommonsDoublePolynomials(reps: Int) = run(reps)(multiplyCommonsDoublePolynomials(commonsDoublePolys))
def timeDerivativeSpireRationalPolysSparse(reps: Int) = run(reps)(derivativeSpireRationalPolynomials(spireSparseRationalPolys))
def timeDerivativeSpireRationalPolysDense(reps: Int) = run(reps)(derivativeSpireRationalPolynomials(spireDenseRationalPolys))
def timeDerivativeSpireDoublePolysSparse(reps: Int) = run(reps)(derivativeSpireDoublePolynomials(spireSparseDoublePolys))
def timeDerivativeSpireDoublePolysDense(reps: Int) = run(reps)(derivativeSpireDoublePolynomials(spireDenseDoublePolys))
def timeDerivativeCommonsDoublePolynomials(reps: Int) = run(reps)(derivativeCommonsDoublePolynomials(commonsDoublePolys))
def timeEvaluateSpireRationalPolysSparse(reps: Int) = run(reps)(evaluateSpireRationalPolynomials(spireSparseRationalPolys))
def timeEvaluateSpireRationalPolysDense(reps: Int) = run(reps)(evaluateSpireRationalPolynomials(spireDenseRationalPolys))
def timeEvaluateSpireDoublePolysSparse(reps: Int) = run(reps)(evaluateSpireDoublePolynomials(spireSparseDoublePolys))
def timeEvaluateSpireDoublePolysDense(reps: Int) = run(reps)(evaluateSpireDoublePolynomials(spireDenseDoublePolys))
def timeEvaluateCommonsDoublePolynomials(reps: Int) = run(reps)(evaluateCommonsDoublePolynomials(commonsDoublePolys))
def timeQuotModSpireRationalPolysSparse(reps: Int) = run(reps)(quotModSpireRationalPolynomials(spireSparseRationalPolys))
def timeQuotModSpireRationalPolysDense(reps: Int) = run(reps)(quotModSpireRationalPolynomials(spireDenseRationalPolys))
def timeQuotModSpireDoublePolysSparse(reps: Int) = run(reps)(quotModSpireDoublePolynomials(spireSparseDoublePolys))
def timeQuotModSpireDoublePolysDense(reps: Int) = run(reps)(quotModSpireDoublePolynomials(spireDenseDoublePolys))
}
|
tixxit/spire
|
benchmark/src/main/scala/spire/benchmark/PolynomialBenchmark.scala
|
Scala
|
mit
| 8,010 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.linalg
import com.github.fommil.netlib.{BLAS => NetlibBLAS, F2jBLAS}
import com.github.fommil.netlib.BLAS.{getInstance => NativeBLAS}
import org.apache.spark.internal.Logging
/**
* BLAS routines for MLlib's vectors and matrices.
*/
private[spark] object BLAS extends Serializable with Logging {
@transient private var _f2jBLAS: NetlibBLAS = _
@transient private var _nativeBLAS: NetlibBLAS = _
private val nativeL1Threshold: Int = 256
// For level-1 function dspmv, use f2jBLAS for better performance.
private[mllib] def f2jBLAS: NetlibBLAS = {
if (_f2jBLAS == null) {
_f2jBLAS = new F2jBLAS
}
_f2jBLAS
}
private[mllib] def getBLAS(vectorSize: Int): NetlibBLAS = {
if (vectorSize < nativeL1Threshold) {
f2jBLAS
} else {
nativeBLAS
}
}
/**
* y += a * x
*/
def axpy(a: Double, x: Vector, y: Vector): Unit = {
require(x.size == y.size)
y match {
case dy: DenseVector =>
x match {
case sx: SparseVector =>
axpy(a, sx, dy)
case dx: DenseVector =>
axpy(a, dx, dy)
case _ =>
throw new UnsupportedOperationException(
s"axpy doesn't support x type ${x.getClass}.")
}
case _ =>
throw new IllegalArgumentException(
s"axpy only supports adding to a dense vector but got type ${y.getClass}.")
}
}
/**
* y += a * x
*/
private def axpy(a: Double, x: DenseVector, y: DenseVector): Unit = {
val n = x.size
getBLAS(n).daxpy(n, a, x.values, 1, y.values, 1)
}
/**
* y += a * x
*/
private def axpy(a: Double, x: SparseVector, y: DenseVector): Unit = {
val xValues = x.values
val xIndices = x.indices
val yValues = y.values
val nnz = xIndices.length
if (a == 1.0) {
var k = 0
while (k < nnz) {
yValues(xIndices(k)) += xValues(k)
k += 1
}
} else {
var k = 0
while (k < nnz) {
yValues(xIndices(k)) += a * xValues(k)
k += 1
}
}
}
/** Y += a * x */
private[spark] def axpy(a: Double, X: DenseMatrix, Y: DenseMatrix): Unit = {
require(X.numRows == Y.numRows && X.numCols == Y.numCols, "Dimension mismatch: " +
s"size(X) = ${(X.numRows, X.numCols)} but size(Y) = ${(Y.numRows, Y.numCols)}.")
getBLAS(X.values.length).daxpy(X.numRows * X.numCols, a, X.values, 1, Y.values, 1)
}
/**
* dot(x, y)
*/
def dot(x: Vector, y: Vector): Double = {
require(x.size == y.size,
"BLAS.dot(x: Vector, y:Vector) was given Vectors with non-matching sizes:" +
" x.size = " + x.size + ", y.size = " + y.size)
(x, y) match {
case (dx: DenseVector, dy: DenseVector) =>
dot(dx, dy)
case (sx: SparseVector, dy: DenseVector) =>
dot(sx, dy)
case (dx: DenseVector, sy: SparseVector) =>
dot(sy, dx)
case (sx: SparseVector, sy: SparseVector) =>
dot(sx, sy)
case _ =>
throw new IllegalArgumentException(s"dot doesn't support (${x.getClass}, ${y.getClass}).")
}
}
/**
* dot(x, y)
*/
private def dot(x: DenseVector, y: DenseVector): Double = {
val n = x.size
getBLAS(n).ddot(n, x.values, 1, y.values, 1)
}
/**
* dot(x, y)
*/
private def dot(x: SparseVector, y: DenseVector): Double = {
val xValues = x.values
val xIndices = x.indices
val yValues = y.values
val nnz = xIndices.length
var sum = 0.0
var k = 0
while (k < nnz) {
sum += xValues(k) * yValues(xIndices(k))
k += 1
}
sum
}
/**
* dot(x, y)
*/
private def dot(x: SparseVector, y: SparseVector): Double = {
val xValues = x.values
val xIndices = x.indices
val yValues = y.values
val yIndices = y.indices
val nnzx = xIndices.length
val nnzy = yIndices.length
var kx = 0
var ky = 0
var sum = 0.0
// y catching x
while (kx < nnzx && ky < nnzy) {
val ix = xIndices(kx)
while (ky < nnzy && yIndices(ky) < ix) {
ky += 1
}
if (ky < nnzy && yIndices(ky) == ix) {
sum += xValues(kx) * yValues(ky)
ky += 1
}
kx += 1
}
sum
}
/**
* y = x
*/
def copy(x: Vector, y: Vector): Unit = {
val n = y.size
require(x.size == n)
y match {
case dy: DenseVector =>
x match {
case sx: SparseVector =>
val sxIndices = sx.indices
val sxValues = sx.values
val dyValues = dy.values
val nnz = sxIndices.length
var i = 0
var k = 0
while (k < nnz) {
val j = sxIndices(k)
while (i < j) {
dyValues(i) = 0.0
i += 1
}
dyValues(i) = sxValues(k)
i += 1
k += 1
}
while (i < n) {
dyValues(i) = 0.0
i += 1
}
case dx: DenseVector =>
Array.copy(dx.values, 0, dy.values, 0, n)
}
case _ =>
throw new IllegalArgumentException(s"y must be dense in copy but got ${y.getClass}")
}
}
/**
* x = a * x
*/
def scal(a: Double, x: Vector): Unit = {
x match {
case sx: SparseVector =>
getBLAS(sx.values.length).dscal(sx.values.length, a, sx.values, 1)
case dx: DenseVector =>
getBLAS(dx.size).dscal(dx.values.length, a, dx.values, 1)
case _ =>
throw new IllegalArgumentException(s"scal doesn't support vector type ${x.getClass}.")
}
}
// For level-3 routines, we use the native BLAS.
private[mllib] def nativeBLAS: NetlibBLAS = {
if (_nativeBLAS == null) {
_nativeBLAS = NativeBLAS
}
_nativeBLAS
}
/**
* Adds alpha * v * v.t to a matrix in-place. This is the same as BLAS's ?SPR.
*
* @param U the upper triangular part of the matrix in a [[DenseVector]](column major)
*/
def spr(alpha: Double, v: Vector, U: DenseVector): Unit = {
spr(alpha, v, U.values)
}
/**
* Adds alpha * v * v.t to a matrix in-place. This is the same as BLAS's ?SPR.
*
* @param U the upper triangular part of the matrix packed in an array (column major)
*/
def spr(alpha: Double, v: Vector, U: Array[Double]): Unit = {
val n = v.size
v match {
case DenseVector(values) =>
NativeBLAS.dspr("U", n, alpha, values, 1, U)
case SparseVector(size, indices, values) =>
val nnz = indices.length
var colStartIdx = 0
var prevCol = 0
var col = 0
var j = 0
var i = 0
var av = 0.0
while (j < nnz) {
col = indices(j)
// Skip empty columns.
colStartIdx += (col - prevCol) * (col + prevCol + 1) / 2
av = alpha * values(j)
i = 0
while (i <= j) {
U(colStartIdx + indices(i)) += av * values(i)
i += 1
}
j += 1
prevCol = col
}
case _ =>
throw new IllegalArgumentException(s"Unknown vector type ${v.getClass}.")
}
}
/**
* A := alpha * x * x^T^ + A
* @param alpha a real scalar that will be multiplied to x * x^T^.
* @param x the vector x that contains the n elements.
* @param A the symmetric matrix A. Size of n x n.
*/
def syr(alpha: Double, x: Vector, A: DenseMatrix): Unit = {
val mA = A.numRows
val nA = A.numCols
require(mA == nA, s"A is not a square matrix (and hence is not symmetric). A: $mA x $nA")
require(mA == x.size, s"The size of x doesn't match the rank of A. A: $mA x $nA, x: ${x.size}")
x match {
case dv: DenseVector => syr(alpha, dv, A)
case sv: SparseVector => syr(alpha, sv, A)
case _ =>
throw new IllegalArgumentException(s"syr doesn't support vector type ${x.getClass}.")
}
}
private def syr(alpha: Double, x: DenseVector, A: DenseMatrix): Unit = {
val nA = A.numRows
val mA = A.numCols
nativeBLAS.dsyr("U", x.size, alpha, x.values, 1, A.values, nA)
// Fill lower triangular part of A
var i = 0
while (i < mA) {
var j = i + 1
while (j < nA) {
A(j, i) = A(i, j)
j += 1
}
i += 1
}
}
private def syr(alpha: Double, x: SparseVector, A: DenseMatrix): Unit = {
val mA = A.numCols
val xIndices = x.indices
val xValues = x.values
val nnz = xValues.length
val Avalues = A.values
var i = 0
while (i < nnz) {
val multiplier = alpha * xValues(i)
val offset = xIndices(i) * mA
var j = 0
while (j < nnz) {
Avalues(xIndices(j) + offset) += multiplier * xValues(j)
j += 1
}
i += 1
}
}
/**
* C := alpha * A * B + beta * C
* @param alpha a scalar to scale the multiplication A * B.
* @param A the matrix A that will be left multiplied to B. Size of m x k.
* @param B the matrix B that will be left multiplied by A. Size of k x n.
* @param beta a scalar that can be used to scale matrix C.
* @param C the resulting matrix C. Size of m x n. C.isTransposed must be false.
*/
def gemm(
alpha: Double,
A: Matrix,
B: DenseMatrix,
beta: Double,
C: DenseMatrix): Unit = {
require(!C.isTransposed,
"The matrix C cannot be the product of a transpose() call. C.isTransposed must be false.")
if (alpha == 0.0 && beta == 1.0) {
logDebug("gemm: alpha is equal to 0 and beta is equal to 1. Returning C.")
} else if (alpha == 0.0) {
getBLAS(C.values.length).dscal(C.values.length, beta, C.values, 1)
} else {
A match {
case sparse: SparseMatrix => gemm(alpha, sparse, B, beta, C)
case dense: DenseMatrix => gemm(alpha, dense, B, beta, C)
case _ =>
throw new IllegalArgumentException(s"gemm doesn't support matrix type ${A.getClass}.")
}
}
}
/**
* C := alpha * A * B + beta * C
* For `DenseMatrix` A.
*/
private def gemm(
alpha: Double,
A: DenseMatrix,
B: DenseMatrix,
beta: Double,
C: DenseMatrix): Unit = {
val tAstr = if (A.isTransposed) "T" else "N"
val tBstr = if (B.isTransposed) "T" else "N"
val lda = if (!A.isTransposed) A.numRows else A.numCols
val ldb = if (!B.isTransposed) B.numRows else B.numCols
require(A.numCols == B.numRows,
s"The columns of A don't match the rows of B. A: ${A.numCols}, B: ${B.numRows}")
require(A.numRows == C.numRows,
s"The rows of C don't match the rows of A. C: ${C.numRows}, A: ${A.numRows}")
require(B.numCols == C.numCols,
s"The columns of C don't match the columns of B. C: ${C.numCols}, A: ${B.numCols}")
nativeBLAS.dgemm(tAstr, tBstr, A.numRows, B.numCols, A.numCols, alpha, A.values, lda,
B.values, ldb, beta, C.values, C.numRows)
}
/**
* C := alpha * A * B + beta * C
* For `SparseMatrix` A.
*/
private def gemm(
alpha: Double,
A: SparseMatrix,
B: DenseMatrix,
beta: Double,
C: DenseMatrix): Unit = {
val mA: Int = A.numRows
val nB: Int = B.numCols
val kA: Int = A.numCols
val kB: Int = B.numRows
require(kA == kB, s"The columns of A don't match the rows of B. A: $kA, B: $kB")
require(mA == C.numRows, s"The rows of C don't match the rows of A. C: ${C.numRows}, A: $mA")
require(nB == C.numCols,
s"The columns of C don't match the columns of B. C: ${C.numCols}, A: $nB")
val Avals = A.values
val Bvals = B.values
val Cvals = C.values
val ArowIndices = A.rowIndices
val AcolPtrs = A.colPtrs
// Slicing is easy in this case. This is the optimal multiplication setting for sparse matrices
if (A.isTransposed) {
var colCounterForB = 0
if (!B.isTransposed) { // Expensive to put the check inside the loop
while (colCounterForB < nB) {
var rowCounterForA = 0
val Cstart = colCounterForB * mA
val Bstart = colCounterForB * kA
while (rowCounterForA < mA) {
var i = AcolPtrs(rowCounterForA)
val indEnd = AcolPtrs(rowCounterForA + 1)
var sum = 0.0
while (i < indEnd) {
sum += Avals(i) * Bvals(Bstart + ArowIndices(i))
i += 1
}
val Cindex = Cstart + rowCounterForA
Cvals(Cindex) = beta * Cvals(Cindex) + sum * alpha
rowCounterForA += 1
}
colCounterForB += 1
}
} else {
while (colCounterForB < nB) {
var rowCounterForA = 0
val Cstart = colCounterForB * mA
while (rowCounterForA < mA) {
var i = AcolPtrs(rowCounterForA)
val indEnd = AcolPtrs(rowCounterForA + 1)
var sum = 0.0
while (i < indEnd) {
sum += Avals(i) * B(ArowIndices(i), colCounterForB)
i += 1
}
val Cindex = Cstart + rowCounterForA
Cvals(Cindex) = beta * Cvals(Cindex) + sum * alpha
rowCounterForA += 1
}
colCounterForB += 1
}
}
} else {
// Scale matrix first if `beta` is not equal to 1.0
if (beta != 1.0) {
getBLAS(C.values.length).dscal(C.values.length, beta, C.values, 1)
}
// Perform matrix multiplication and add to C. The rows of A are multiplied by the columns of
// B, and added to C.
var colCounterForB = 0 // the column to be updated in C
if (!B.isTransposed) { // Expensive to put the check inside the loop
while (colCounterForB < nB) {
var colCounterForA = 0 // The column of A to multiply with the row of B
val Bstart = colCounterForB * kB
val Cstart = colCounterForB * mA
while (colCounterForA < kA) {
var i = AcolPtrs(colCounterForA)
val indEnd = AcolPtrs(colCounterForA + 1)
val Bval = Bvals(Bstart + colCounterForA) * alpha
while (i < indEnd) {
Cvals(Cstart + ArowIndices(i)) += Avals(i) * Bval
i += 1
}
colCounterForA += 1
}
colCounterForB += 1
}
} else {
while (colCounterForB < nB) {
var colCounterForA = 0 // The column of A to multiply with the row of B
val Cstart = colCounterForB * mA
while (colCounterForA < kA) {
var i = AcolPtrs(colCounterForA)
val indEnd = AcolPtrs(colCounterForA + 1)
val Bval = B(colCounterForA, colCounterForB) * alpha
while (i < indEnd) {
Cvals(Cstart + ArowIndices(i)) += Avals(i) * Bval
i += 1
}
colCounterForA += 1
}
colCounterForB += 1
}
}
}
}
/**
* y := alpha * A * x + beta * y
* @param alpha a scalar to scale the multiplication A * x.
* @param A the matrix A that will be left multiplied to x. Size of m x n.
* @param x the vector x that will be left multiplied by A. Size of n x 1.
* @param beta a scalar that can be used to scale vector y.
* @param y the resulting vector y. Size of m x 1.
*/
def gemv(
alpha: Double,
A: Matrix,
x: Vector,
beta: Double,
y: DenseVector): Unit = {
require(A.numCols == x.size,
s"The columns of A don't match the number of elements of x. A: ${A.numCols}, x: ${x.size}")
require(A.numRows == y.size,
s"The rows of A don't match the number of elements of y. A: ${A.numRows}, y:${y.size}")
if (alpha == 0.0 && beta == 1.0) {
logDebug("gemv: alpha is equal to 0 and beta is equal to 1. Returning y.")
} else if (alpha == 0.0) {
scal(beta, y)
} else {
(A, x) match {
case (smA: SparseMatrix, dvx: DenseVector) =>
gemv(alpha, smA, dvx, beta, y)
case (smA: SparseMatrix, svx: SparseVector) =>
gemv(alpha, smA, svx, beta, y)
case (dmA: DenseMatrix, dvx: DenseVector) =>
gemv(alpha, dmA, dvx, beta, y)
case (dmA: DenseMatrix, svx: SparseVector) =>
gemv(alpha, dmA, svx, beta, y)
case _ =>
throw new IllegalArgumentException(s"gemv doesn't support running on matrix type " +
s"${A.getClass} and vector type ${x.getClass}.")
}
}
}
/**
* y := alpha * A * x + beta * y
* For `DenseMatrix` A and `DenseVector` x.
*/
private def gemv(
alpha: Double,
A: DenseMatrix,
x: DenseVector,
beta: Double,
y: DenseVector): Unit = {
val tStrA = if (A.isTransposed) "T" else "N"
val mA = if (!A.isTransposed) A.numRows else A.numCols
val nA = if (!A.isTransposed) A.numCols else A.numRows
nativeBLAS.dgemv(tStrA, mA, nA, alpha, A.values, mA, x.values, 1, beta,
y.values, 1)
}
/**
* y := alpha * A * x + beta * y
* For `DenseMatrix` A and `SparseVector` x.
*/
private def gemv(
alpha: Double,
A: DenseMatrix,
x: SparseVector,
beta: Double,
y: DenseVector): Unit = {
val mA: Int = A.numRows
val nA: Int = A.numCols
val Avals = A.values
val xIndices = x.indices
val xNnz = xIndices.length
val xValues = x.values
val yValues = y.values
if (A.isTransposed) {
var rowCounterForA = 0
while (rowCounterForA < mA) {
var sum = 0.0
var k = 0
while (k < xNnz) {
sum += xValues(k) * Avals(xIndices(k) + rowCounterForA * nA)
k += 1
}
yValues(rowCounterForA) = sum * alpha + beta * yValues(rowCounterForA)
rowCounterForA += 1
}
} else {
var rowCounterForA = 0
while (rowCounterForA < mA) {
var sum = 0.0
var k = 0
while (k < xNnz) {
sum += xValues(k) * Avals(xIndices(k) * mA + rowCounterForA)
k += 1
}
yValues(rowCounterForA) = sum * alpha + beta * yValues(rowCounterForA)
rowCounterForA += 1
}
}
}
/**
* y := alpha * A * x + beta * y
* For `SparseMatrix` A and `SparseVector` x.
*/
private def gemv(
alpha: Double,
A: SparseMatrix,
x: SparseVector,
beta: Double,
y: DenseVector): Unit = {
val xValues = x.values
val xIndices = x.indices
val xNnz = xIndices.length
val yValues = y.values
val mA: Int = A.numRows
val nA: Int = A.numCols
val Avals = A.values
val Arows = if (!A.isTransposed) A.rowIndices else A.colPtrs
val Acols = if (!A.isTransposed) A.colPtrs else A.rowIndices
if (A.isTransposed) {
var rowCounter = 0
while (rowCounter < mA) {
var i = Arows(rowCounter)
val indEnd = Arows(rowCounter + 1)
var sum = 0.0
var k = 0
while (i < indEnd && k < xNnz) {
if (xIndices(k) == Acols(i)) {
sum += Avals(i) * xValues(k)
k += 1
i += 1
} else if (xIndices(k) < Acols(i)) {
k += 1
} else {
i += 1
}
}
yValues(rowCounter) = sum * alpha + beta * yValues(rowCounter)
rowCounter += 1
}
} else {
if (beta != 1.0) scal(beta, y)
var colCounterForA = 0
var k = 0
while (colCounterForA < nA && k < xNnz) {
if (xIndices(k) == colCounterForA) {
var i = Acols(colCounterForA)
val indEnd = Acols(colCounterForA + 1)
val xTemp = xValues(k) * alpha
while (i < indEnd) {
yValues(Arows(i)) += Avals(i) * xTemp
i += 1
}
k += 1
}
colCounterForA += 1
}
}
}
/**
* y := alpha * A * x + beta * y
* For `SparseMatrix` A and `DenseVector` x.
*/
private def gemv(
alpha: Double,
A: SparseMatrix,
x: DenseVector,
beta: Double,
y: DenseVector): Unit = {
val xValues = x.values
val yValues = y.values
val mA: Int = A.numRows
val nA: Int = A.numCols
val Avals = A.values
val Arows = if (!A.isTransposed) A.rowIndices else A.colPtrs
val Acols = if (!A.isTransposed) A.colPtrs else A.rowIndices
// Slicing is easy in this case. This is the optimal multiplication setting for sparse matrices
if (A.isTransposed) {
var rowCounter = 0
while (rowCounter < mA) {
var i = Arows(rowCounter)
val indEnd = Arows(rowCounter + 1)
var sum = 0.0
while (i < indEnd) {
sum += Avals(i) * xValues(Acols(i))
i += 1
}
yValues(rowCounter) = beta * yValues(rowCounter) + sum * alpha
rowCounter += 1
}
} else {
if (beta != 1.0) scal(beta, y)
// Perform matrix-vector multiplication and add to y
var colCounterForA = 0
while (colCounterForA < nA) {
var i = Acols(colCounterForA)
val indEnd = Acols(colCounterForA + 1)
val xVal = xValues(colCounterForA) * alpha
while (i < indEnd) {
yValues(Arows(i)) += Avals(i) * xVal
i += 1
}
colCounterForA += 1
}
}
}
}
|
witgo/spark
|
mllib/src/main/scala/org/apache/spark/mllib/linalg/BLAS.scala
|
Scala
|
apache-2.0
| 21,941 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.eval
import monix.execution.exceptions.DummyException
import concurrent.duration._
import scala.util.{Failure, Success}
object TaskTraverseSuite extends BaseTestSuite {
test("Task.traverse should not execute in parallel") { implicit s =>
val seq = Seq((1, 2), (2, 1), (3, 3))
val f = Task.traverse(seq) {
case (i, d) =>
Task.evalAsync(i + 1).delayExecution(d.seconds)
}.runToFuture
s.tick()
assertEquals(f.value, None)
s.tick(2.seconds)
assertEquals(f.value, None)
s.tick(1.second)
assertEquals(f.value, None)
s.tick(3.second)
assertEquals(f.value, Some(Success(Seq(2, 3, 4))))
}
test("Task.traverse should onError if one of the tasks terminates in error") { implicit s =>
val ex = DummyException("dummy")
val seq = Seq((1, 2), (-1, 0), (3, 3), (3, 1))
val f = Task.traverse(seq) {
case (i, d) =>
Task.evalAsync(if (i < 0) throw ex else i + 1)
.delayExecution(d.seconds)
}.runToFuture
// First
s.tick(1.second)
assertEquals(f.value, None)
// Second
s.tick(2.second)
assertEquals(f.value, Some(Failure(ex)))
}
test("Task.traverse should be canceled") { implicit s =>
val seq = Seq((1, 2), (2, 1), (3, 3))
val f = Task.traverse(seq) {
case (i, d) => Task.evalAsync(i + 1).delayExecution(d.seconds)
}.runToFuture
s.tick()
assertEquals(f.value, None)
s.tick(2.seconds)
assertEquals(f.value, None)
f.cancel()
s.tick(1.second)
assertEquals(f.value, None)
}
}
|
Wogan/monix
|
monix-eval/shared/src/test/scala/monix/eval/TaskTraverseSuite.scala
|
Scala
|
apache-2.0
| 2,231 |
package dundertext.ui.keyboard
object KeyCodes {
final val NumPad0 = 96
final val NumPad1 = 97
final val NumPad2 = 98
final val NumPad3 = 99
final val NumPad4 = 100
final val NumPad5 = 101
final val NumPad6 = 102
final val NumPad7 = 103
final val NumPad8 = 104
final val NumPad9 = 105
final val F13 = 124
final val F14 = 125
final val F15 = 126
final val F16 = 127
final val F17 = 128
final val F18 = 129
final val F19 = 130
}
|
dundertext/dundertext
|
ui/src/main/scala/dundertext/ui/keyboard/KeyCodes.scala
|
Scala
|
gpl-3.0
| 463 |
package org.coursera.naptime.actions
import org.coursera.common.jsonformat.JsonFormats
import org.coursera.common.stringkey.StringKeyFormat
import play.api.libs.json.Format
import scala.annotation.tailrec
/**
* A somewhat-human-readable identifier. This will generally look like "what-is-machine-learning".
* This is suitable for URLs, etc. which require unique but readable identifiers that may be
* (very infrequently) changed.
*
*
* This is a Coursera-defined notion of slug and you should not expect it to match any external standard.
* Do not change the validation because lots of existing opencourse data expects the existing validation.
*/
case class Slug(string: String) {
require(Slug.ValidRegex.pattern.matcher(string).matches, s"Slug not allowed: $string")
}
object Slug {
implicit val stringKeyFormat: StringKeyFormat[Slug] =
StringKeyFormat.delegateFormat[Slug, String](
key =>
try {
Some(Slug(key))
} catch {
case e: IllegalArgumentException => None
},
_.string)
implicit val format: Format[Slug] = JsonFormats.stringKeyFormat
private val ValidRegex = """[a-z0-9\-]+""".r
private val MaxSlugLength = 80
/**
* Method to create a URL friendly slug string from a given input string
* Most of the logic for this has been copied from Play framework's Java
* extension implementation here:
* https://github.com/playframework/play1/blob/b835b790c795bddd7d41ac6a4a7a1eb6922fab2f/framework/src/play/templates/JavaExtensions.java#L368.
*/
def slugify(input: String): Slug = {
// Convert the unicode input to ascii. This ensures that non Latin based languages have
// reasonable conversions.
val slugString = input //Junidecode.unidecode(input)
.replaceAll("([a-z])'s([^a-z])", "$1s$2") // Convert apostrophes.
.replaceAll("[^a-zA-Z0-9]", "-") // Convert all non alphanumeric characters with hyphen.
.replaceAll("-{2,}", "-") // Collapse multiple hyphens into one
.stripPrefix("-")
.stripSuffix("-")
.toLowerCase
val words = slugString.split("-").toList
if (words.head.length() > MaxSlugLength) {
Slug(words.head.take(MaxSlugLength))
} else {
@tailrec
def buildTruncatedSlug(currentWord: String, wordList: List[String]): String = {
wordList match {
case firstWord :: tail =>
val candidate = currentWord + "-" + firstWord
if (candidate.length() > MaxSlugLength) {
currentWord
} else {
buildTruncatedSlug(candidate, tail)
}
case Nil => currentWord
}
}
Slug(buildTruncatedSlug(words.head, words.drop(1)))
}
}
}
|
coursera/naptime
|
naptime/src/test/scala/org/coursera/naptime/actions/Slug.scala
|
Scala
|
apache-2.0
| 2,724 |
package lila.socket
import akka.actor._
import play.api.libs.json.JsObject
import scala.collection.mutable
import scala.concurrent.duration._
import actorApi.{ SocketLeave, SocketEnter }
import lila.hub.actorApi.{ SendTo, SendTos, WithUserIds }
private final class UserRegister extends Actor {
override def preStart() {
context.system.lilaBus.subscribe(self, 'users, 'socketDoor)
}
override def postStop() {
super.postStop()
context.system.lilaBus.unsubscribe(self)
}
type UID = String
type UserId = String
val users = mutable.Map.empty[UserId, mutable.Map[UID, SocketMember]]
def receive = {
case SendTo(userId, msg) => sendTo(userId, msg)
case SendTos(userIds, msg) => userIds foreach { sendTo(_, msg) }
case WithUserIds(f) => f(users.keys)
case SocketEnter(uid, member) => member.userId foreach { userId =>
users get userId match {
case None => users += (userId -> mutable.Map(uid -> member))
case Some(members) => members += (uid -> member)
}
}
case SocketLeave(uid, member) => member.userId foreach { userId =>
users get userId foreach { members =>
members -= uid
if (members.isEmpty) users -= userId
}
}
}
private def sendTo(userId: String, msg: JsObject) {
users get userId foreach { members =>
members.values foreach (_ push msg)
}
}
}
|
clarkerubber/lila
|
modules/socket/src/main/UserRegister.scala
|
Scala
|
agpl-3.0
| 1,408 |
package com.sksamuel.elastic4s
import play.api.libs.json.{Json, Reads, Writes}
import scala.annotation.implicitNotFound
import scala.util.Try
package object playjson {
@implicitNotFound("No Writes for type ${T} found. Bring an implicit Writes[T] instance in scope")
implicit def playJsonIndexable[T](implicit w: Writes[T]): Indexable[T] =
(t: T) => Json.stringify(Json.toJson(t)(w))
@implicitNotFound("No Reads for type ${T} found. Bring an implicit Reads[T] instance in scope")
implicit def playJsonHitReader[T](implicit r: Reads[T]): HitReader[T] =
(hit: Hit) => Try {
Json.parse(hit.sourceAsString).as[T]
}
@implicitNotFound("No Reads for type ${T} found. Bring an implicit Reads[T] instance in scope")
implicit def playJsonAggReader[T](implicit r: Reads[T]): AggReader[T] =
(json: String) => Try {
Json.parse(json).as[T]
}
@implicitNotFound("No Writes for type ${T} found. Bring an implicit Writes[T] instance in scope")
implicit def playJsonParamSerializer[T](implicit w: Writes[T]): ParamSerializer[T] =
(t: T) => Json.stringify(Json.toJson(t)(w))
}
|
sksamuel/elastic4s
|
elastic4s-json-play/src/main/scala/com/sksamuel/elastic4s/playjson/package.scala
|
Scala
|
apache-2.0
| 1,115 |
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import bintray.BintrayCredentials
import bintray.BintrayKeys.{ bintrayEnsureCredentials, bintrayOrganization, bintrayPackage }
import bintry.Client
import com.typesafe.sbt.packager.Keys._
import com.typesafe.sbt.packager.debian.DebianPlugin.autoImport.Debian
import com.typesafe.sbt.packager.rpm.RpmPlugin.autoImport.Rpm
import com.typesafe.sbt.packager.universal.UniversalPlugin.autoImport.Universal
import dispatch.{ FunctionHandler, Http }
import sbt.Keys._
import sbt._
object Bintray extends AutoPlugin {
object autoImport {
val publishRelease: TaskKey[Unit] = taskKey[Unit]("Publish binary in bintray")
val publishLatest: TaskKey[Unit] = taskKey[Unit]("Publish latest binary in bintray")
val publishDebian: TaskKey[Unit] = taskKey[Unit]("publish debian package in Bintray")
val publishRpm: TaskKey[Unit] = taskKey[Unit]("publish rpm package in Bintray")
val rpmReleaseFile = taskKey[File]("The rpm release package file")
}
import autoImport._
override lazy val projectSettings = Seq(
publishRelease in ThisBuild := {
val file = (packageBin in Universal).value
btPublish(file.getName,
file,
bintrayEnsureCredentials.value,
bintrayOrganization.value,
"binary",
bintrayPackage.value,
(version in ThisBuild).value,
sLog.value)
},
publishLatest in ThisBuild := Def.taskDyn {
if ((version in ThisBuild).value.endsWith("-SNAPSHOT")) sys.error("Snapshot version can't be released")
val file = (packageBin in Universal).value
val latestVersion = if (version.value.contains('-')) "latest-beta" else "latest"
val latestName = file.getName.replace(version.value, latestVersion)
if (latestName == file.getName)
Def.task {
sLog.value.warn(s"Latest package name can't be built using package name [$latestName], publish aborted")
}
else Def.task {
removeVersion(bintrayEnsureCredentials.value,
bintrayOrganization.value,
"binary",
bintrayPackage.value,
latestVersion,
sLog.value)
btPublish(latestName,
file,
bintrayEnsureCredentials.value,
bintrayOrganization.value,
"binary",
bintrayPackage.value,
latestVersion,
sLog.value)
}
}
.value,
publishDebian in ThisBuild := {
if ((version in ThisBuild).value.endsWith("-SNAPSHOT")) sys.error("Snapshot version can't be released")
val file = (debianSign in Debian).value
val bintrayCredentials = bintrayEnsureCredentials.value
btPublish(file.getName,
file,
bintrayCredentials,
bintrayOrganization.value,
"debian-beta",
bintrayPackage.value,
version.value,
sLog.value,
"deb_distribution" → "any",
"deb_component" → "main",
"deb_architecture" → "all"
)
if (!version.value.contains('-'))
btPublish(file.getName,
file,
bintrayCredentials,
bintrayOrganization.value,
"debian-stable",
bintrayPackage.value,
version.value,
sLog.value,
"deb_distribution" → "any",
"deb_component" → "main",
"deb_architecture" → "all"
)
},
publishRpm in ThisBuild := {
if ((version in ThisBuild).value.endsWith("-SNAPSHOT")) sys.error("Snapshot version can't be released")
val file = (packageBin in Rpm).value
val bintrayCredentials = bintrayEnsureCredentials.value
btPublish(file.getName,
file,
bintrayCredentials,
bintrayOrganization.value,
"rpm-beta",
bintrayPackage.value,
(version in Rpm).value + '-' + (rpmRelease in Rpm).value,
sLog.value)
if (!version.value.contains('-'))
btPublish(file.getName,
file,
bintrayCredentials,
bintrayOrganization.value,
"rpm-stable",
bintrayPackage.value,
(version in Rpm).value + '-' + (rpmRelease in Rpm).value,
sLog.value)
}
)
private def asStatusAndBody = new FunctionHandler({ r => (r.getStatusCode, r.getResponseBody) })
def removeVersion(credential: BintrayCredentials,
org: Option[String],
repoName: String,
packageName: String,
version: String,
log: Logger): Unit = {
val BintrayCredentials(user, key) = credential
val client: Client = Client(user, key, new Http())
val repo: Client#Repo = client.repo(org.getOrElse(user), repoName)
Await.result(repo.get(packageName).version(version).delete(asStatusAndBody), Duration.Inf) match {
case (status, body) => log.info(s"Delete version $packageName $version: $status ($body)")
}
}
private def btPublish(filename: String,
file: File,
credential: BintrayCredentials,
org: Option[String],
repoName: String,
packageName: String,
version: String,
log: Logger,
additionalParams: (String, String)*): Unit = {
val BintrayCredentials(user, key) = credential
val owner: String = org.getOrElse(user)
val client: Client = Client(user, key, new Http())
val repo: Client#Repo = client.repo(org.getOrElse(user), repoName)
val params = additionalParams
.map { case (k, v) => s"$k=$v" }
.mkString(";", ";", "")
val upload = repo.get(packageName).version(version).upload(filename + params, file)
log.info(s"Uploading $file ... (${org.getOrElse(user)}/$repoName/$packageName/$version/$filename$params)")
Await.result(upload(asStatusAndBody), Duration.Inf) match {
case (201, _) => log.info(s"$file was uploaded to $owner/$packageName@$version")
case (_, fail) => sys.error(s"failed to upload $file to $owner/$packageName@$version: $fail")
}
}
}
|
CERT-BDF/Cortex
|
project/Bintray.scala
|
Scala
|
agpl-3.0
| 6,205 |
package sharry.store.records
import cats.effect.Sync
import cats.implicits._
import fs2.Stream
import sharry.common._
import sharry.store.doobie.DoobieMeta._
import sharry.store.doobie._
import doobie._
import doobie.implicits._
case class RAlias(
id: Ident,
account: Ident,
name: String,
validity: Duration,
enabled: Boolean,
created: Timestamp
)
object RAlias {
val table = fr"alias_"
object Columns {
val id = Column("id")
val account = Column("account_id")
val name = Column("name_")
val validity = Column("validity")
val enabled = Column("enabled")
val created = Column("created")
val all = List(id, account, name, validity, enabled, created)
}
def createNew[F[_]: Sync](
account: Ident,
name: String,
validity: Duration,
enabled: Boolean
): F[RAlias] =
for {
id <- Ident.randomId[F]
now <- Timestamp.current[F]
} yield RAlias(id, account, name, validity, enabled, now)
import Columns._
def insert(v: RAlias): ConnectionIO[Int] = {
val sql = Sql.insertRow(
table,
all,
fr"${v.id},${v.account},${v.name},${v.validity},${v.enabled},${v.created}"
)
sql.update.run
}
def update(aid: Ident, acc: Ident, v: RAlias): ConnectionIO[Int] =
Sql
.updateRow(
table,
Sql.and(id.is(aid), account.is(acc)),
Sql.commas(
id.setTo(v.id),
name.setTo(v.name),
validity.setTo(v.validity),
enabled.setTo(v.enabled)
)
)
.update
.run
def findById(aliasId: Ident, accId: Ident): ConnectionIO[Option[(RAlias, Ident)]] = {
val aId = "a" :: id
find0(accId, aId.is(aliasId)).option
}
def findAll(acc: Ident, nameQ: String): Stream[ConnectionIO, (RAlias, Ident)] = {
val aName = "a" :: name
val q =
if (nameQ.isEmpty) Fragment.empty
else aName.like("%" + nameQ + "%")
find0(acc, q).stream
}
private def find0(accId: Ident, cond: Fragment) = {
val aId = "a" :: id
val aAccount = "a" :: account
val cId = "c" :: RAccount.Columns.id
val cLogin = "c" :: RAccount.Columns.login
val from =
table ++ fr"a" ++
fr"INNER JOIN" ++ RAccount.table ++ fr"c ON" ++ aAccount.is(cId)
Sql
.selectSimple(
all.map("a" :: _) :+ cLogin,
from,
Sql.and(
Sql.or(aAccount.is(accId), aId.in(RAliasMember.aliasMemberOf(accId))),
cond
)
)
.query[(RAlias, Ident)]
}
def existsById(aliasId: Ident): ConnectionIO[Boolean] =
Sql.selectCount(id, table, id.is(aliasId)).query[Int].map(_ > 0).unique
def delete(aliasId: Ident, accId: Ident): ConnectionIO[Int] =
Sql.deleteFrom(table, Sql.and(account.is(accId), id.is(aliasId))).update.run
def deleteForAccount(accountId: Ident): ConnectionIO[Int] =
Sql.deleteFrom(table, account.is(accountId)).update.run
}
|
eikek/sharry
|
modules/store/src/main/scala/sharry/store/records/RAlias.scala
|
Scala
|
gpl-3.0
| 2,931 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.api.services
import slamdata.Predef.{ -> => _, _ }
import quasar.Data
import quasar.api._, ToQResponse.ops._, ToApiError.ops._
import quasar.contrib.pathy._
import quasar.contrib.scalaz.disjunction._
import quasar.fp._, numeric._
import quasar.fs._
import java.nio.charset.StandardCharsets
import argonaut.Parse
import argonaut.Argonaut._
import argonaut.ArgonautScalaz._
import eu.timepit.refined.auto._
import org.http4s._
import org.http4s.dsl._
import org.http4s.headers.{`Content-Type`, Accept}
import pathy.Path._
import pathy.argonaut.PosixCodecJson._
import scalaz.{Zip => _, _}, Scalaz._
import scalaz.concurrent.Task
import scalaz.stream.Process
import scodec.bits.ByteVector
object data {
import ManageFile.PathPair
def service[S[_]](
implicit
R: ReadFile.Ops[S],
W: WriteFile.Ops[S],
M: ManageFile.Ops[S],
Q: QueryFile.Ops[S],
S0: Task :<: S,
S1: FileSystemFailure :<: S
): QHttpService[S] = QHttpService {
case req @ GET -> AsPath(path) :? Offset(offsetParam) +& Limit(limitParam) =>
respondT {
val offsetLimit: ApiErrT[Free[S, ?], (Natural, Option[Positive])] =
(offsetOrInvalid(offsetParam) |@| limitOrInvalid(limitParam)).tupled.liftT[Free[S, ?]]
val requestedFormat = MessageFormat.fromAccept(req.headers.get(Accept))
val zipped = req.headers.get(Accept).exists(_.values.exists(_.mediaRange == MediaType.`application/zip`))
offsetLimit >>= { case (offset, limit) =>
download[S](requestedFormat, path, offset, limit, zipped).liftM[ApiErrT]
}
}
case req @ POST -> AsFilePath(path) =>
upload(req, path, W.appendThese(_, _))
case req @ PUT -> AsPath(path) =>
upload(req, path, W.saveThese(_, _).as(Vector.empty))
case req @ Method.MOVE -> AsPath(path) =>
respond((for {
dstStr <- EitherT.fromDisjunction[M.FreeS](
requiredHeader(Destination, req) map (_.value))
dst <- EitherT.fromDisjunction[M.FreeS](parseDestination(dstStr))
pair <- EitherT.fromDisjunction[M.FreeS](pathPair(path, dst, "move"))
_ <- EitherT.fromDisjunction[M.FreeS](if (pair.src === pair.dst) sameDst(pair.src).left else ().right)
_ <- M.move(pair, MoveSemantics.FailIfExists)
.leftMap(_.toApiError)
} yield Created).run)
case req @ Method.COPY -> AsPath(path) =>
respond((for {
dstStr <- EitherT.fromDisjunction[M.FreeS](
requiredHeader(Destination, req) map (_.value))
dst <- EitherT.fromDisjunction[M.FreeS](parseDestination(dstStr))
pair <- EitherT.fromDisjunction[M.FreeS](pathPair(path, dst, "copy"))
_ <- EitherT.fromDisjunction[M.FreeS](if (pair.src === pair.dst) sameDst(pair.src).left else ().right)
_ <- M.copy(pair).leftMap(_.toApiError)
} yield Created).run)
case DELETE -> AsPath(path) =>
respond(M.delete(path).run)
}
////
private def sameDst(path: APath) = ApiError.fromMsg(
BadRequest withReason "Destination is same path as source",
s"Destination is same path as source",
"path" := path)
private def download[S[_]](
format: MessageFormat,
path: APath,
offset: Natural,
limit: Option[Positive],
zipped: Boolean
)(implicit
R: ReadFile.Ops[S],
Q: QueryFile.Ops[S],
S0: FileSystemFailure :<: S,
S1: Task :<: S
): Free[S, QResponse[S]] =
refineType(path).fold(
dirPath => {
val p = zippedContents[S](dirPath, format, offset, limit)
val headers =
`Content-Type`(MediaType.`application/zip`) ::
(format.disposition.toList: List[Header])
QResponse.streaming(p) ∘ (_.modifyHeaders(_ ++ headers))
},
filePath => {
Q.fileExists(filePath).flatMap { exists =>
if (exists) {
val d = R.scan(filePath, offset, limit)
zipped.fold(
formattedZipDataResponse(format, filePath, d),
formattedDataResponse(format, d))
// ToQResponse is called explicitly because Scala type inference fails otherwise...
} else ToQResponse[ApiError, S].toResponse(FileSystemError.pathErr(PathError.pathNotFound(filePath)).toApiError).point[Free[S, ?]]
}
})
private def parseDestination(dstString: String): ApiError \\/ APath = {
def absPathRequired(rf: pathy.Path[Rel, _, _]) = ApiError.fromMsg(
BadRequest withReason "Illegal move.",
"Absolute path required for Destination.",
"dstPath" := posixCodec.unsafePrintPath(rf)).left
UriPathCodec.parsePath(
absPathRequired,
unsafeSandboxAbs(_).right,
absPathRequired,
unsafeSandboxAbs(_).right
)(dstString)
}
private def pathPair(src: APath, dst: APath, operation: String): ApiError \\/ PathPair =
refineType(src).fold(
srcDir =>
refineType(dst).swap.bimap(
df => ApiError.fromMsg(
BadRequest withReason "Illegal move.",
s"Cannot $operation directory into a file",
"srcPath" := srcDir,
"dstPath" := df),
PathPair.dirToDir(srcDir, _)),
srcFile =>
refineType(dst).bimap(
dd => ApiError.fromMsg(
BadRequest withReason "Illegal move.",
s"Cannot $operation a file into a directory, must specify destination precisely",
"srcPath" := srcFile,
"dstPath" := dd),
PathPair.fileToFile(srcFile, _)))
// TODO: Streaming
private def upload[S[_]](
req: Request,
path: APath,
by: (AFile, Vector[Data]) => FileSystemErrT[Free[S,?], Vector[FileSystemError]]
)(implicit S0: Task :<: S): Free[S, QResponse[S]] = {
type FreeS[A] = Free[S, A]
val inj = free.injectFT[Task, S]
def hoist = Hoist[EitherT[?[_], QResponse[S], ?]].hoist(inj)
def decodeUtf8(bytes: ByteVector): EitherT[FreeS, QResponse[S], String] =
EitherT(bytes.decodeUtf8.disjunction.point[FreeS])
.leftMap(err => InvalidMessageBodyFailure(err.toString).toResponse[S])
def errorsResponse(
decodeErrors: IndexedSeq[DecodeError],
persistErrors: FileSystemErrT[FreeS, Vector[FileSystemError]]
): OptionT[FreeS, QResponse[S]] =
OptionT(decodeErrors.toList.toNel.map(errs =>
respond_[S, ApiError](ApiError.apiError(
BadRequest withReason "Malformed upload data.",
"errors" := errs.map(_.shows)))).sequence)
.orElse(OptionT(persistErrors.fold[Option[QResponse[S]]](
_.toResponse[S].some,
errs => errs.toList.toNel.map(errs1 =>
errs1.toApiError.copy(status = InternalServerError.withReason(
"Error persisting uploaded data."
)).toResponse[S]))))
def write(fPath: AFile, xs: IndexedSeq[(DecodeError \\/ Data)]): FreeS[QResponse[S] \\/ Unit] =
if (xs.isEmpty) {
respond_[S, ApiError](ApiError.fromStatus(BadRequest withReason "Request has no body.")).map(_.left)
} else {
val (errors, data) = xs.toVector.separate
errorsResponse(errors, by(fPath, data)).toLeft(()).run
}
def decodeContent(format: MessageFormat, strs: Process[Task, String])
: EitherT[Task, DecodeFailure, Process[Task, DecodeError \\/ Data]] =
EitherT(format.decode(strs).map(_.leftMap(err => InvalidMessageBodyFailure(err.msg): DecodeFailure)))
def writeOne(fPath: AFile, fmt: MessageFormat, strs: Process[Task, String])
: EitherT[FreeS, QResponse[S], Unit] = {
hoist(decodeContent(fmt, strs).leftMap(_.toResponse[S]))
.flatMap(dataStream => EitherT(inj(dataStream.runLog).flatMap(write(fPath, _))))
}
def writeAll(files: Map[RFile, ByteVector], meta: ArchiveMetadata, aDir: ADir) =
files.toList.traverse { case (rFile, contentBytes) =>
for {
// What's the metadata for this file
fileMetadata <- EitherT((meta.files.get(rFile) \\/> InvalidMessageBodyFailure(s"metadata file does not contain metadata for ${posixCodec.printPath(rFile)}").toResponse[S]).point[FreeS])
mdType = fileMetadata.contentType.mediaType
// Do we have a quasar format that corresponds to the content-type in the metadata
fmt <- EitherT((MessageFormat.fromMediaType(mdType) \\/> (InvalidMessageBodyFailure(s"Unsupported media type: $mdType for file: ${posixCodec.printPath(rFile)}").toResponse[S])).point[FreeS])
// Transform content from bytes to a String
content <- decodeUtf8(contentBytes)
// Write a single file with the specified format
_ <- writeOne(aDir </> rFile, fmt, Process.emit(content))
} yield ()
}
// We only support uploading zip files into directory paths
val uploadFormats = Set(MediaType.`application/zip`: MediaRange)
refineType(path).fold[EitherT[FreeS, QResponse[S], Unit]](
// Client is attempting to upload a directory
aDir =>
for {
// Make sure the request content-type is zip
_ <- EitherT((req.headers.get(`Content-Type`) \\/> (MediaTypeMissing(uploadFormats): DecodeFailure)).flatMap { contentType =>
val mdType = contentType.mediaType
if (mdType == MediaType.`application/zip`) ().right else MediaTypeMismatch(mdType, uploadFormats).left
}.leftMap(_.toResponse[S]).point[FreeS])
// Unzip the uploaded archive
filesToContent <- hoist(Zip.unzipFiles(req.body)
.leftMap(err => InvalidMessageBodyFailure(err).toResponse[S]))
// Seperate metadata file from all others
tuple <- filesToContent.get(ArchiveMetadata.HiddenFile).cata(
meta => decodeUtf8(meta).strengthR(filesToContent - ArchiveMetadata.HiddenFile),
EitherT.leftT[FreeS, QResponse[S], (String, Map[RelFile[Sandboxed], ByteVector])](InvalidMessageBodyFailure("metadata not found: " + posixCodec.printPath(ArchiveMetadata.HiddenFile)).toResponse[S].point[FreeS]))
(metaString, restOfFilesToContent) = tuple
meta <- EitherT((Parse.decodeOption[ArchiveMetadata](metaString) \\/> (InvalidMessageBodyFailure("metadata file has incorrect format").toResponse[S])).point[FreeS])
// Write each file if we can determine a format
_ <- writeAll(restOfFilesToContent, meta, aDir)
} yield (),
// Client is attempting to upload a single file
aFile => for {
// What's the format they want to upload
fmt <- EitherT(MessageFormat.forMessage(req).point[FreeS]).leftMap(_.toResponse[S])
// Write a single file with the specified format
_ <- writeOne(aFile, fmt, req.bodyAsText)
} yield ()).as(QResponse.ok[S]).run.map(_.merge) // Return 200 Ok if everything goes smoothly
}
private def zippedContents[S[_]](
dir: AbsDir[Sandboxed],
format: MessageFormat,
offset: Natural,
limit: Option[Positive]
)(implicit
R: ReadFile.Ops[S],
Q: QueryFile.Ops[S]
): Process[R.M, ByteVector] =
Process.await(Q.descendantFiles(dir)) { children =>
val files = children.collect {
case (f, Node.View) => f
case (f, Node.Data) => f
}.toList
val metadata = ArchiveMetadata(files.strengthR(FileMetadata(`Content-Type`(format.mediaType))).toMap)
val metaFileAndContent = (ArchiveMetadata.HiddenFile, Process.emit(metadata.asJson.spaces2))
val qFilesAndContent = files.map { file =>
val data = R.scan(dir </> file, offset, limit)
(file, format.encode(data))
}
val allFiles = metaFileAndContent :: qFilesAndContent
Zip.zipFiles(allFiles.toMap.mapValues(strContent => strContent.map(str => ByteVector.view(str.getBytes(StandardCharsets.UTF_8)))))
}
}
|
jedesah/Quasar
|
web/src/main/scala/quasar/api/services/data.scala
|
Scala
|
apache-2.0
| 12,426 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.matchers.dsl
import org.scalatest._
import org.scalatest.funspec.AnyFunSpec
import org.scalatest.matchers.should.Matchers._
class ResultOfLengthWordApplicationSpec extends AnyFunSpec {
describe("ResultOfLengthWordApplication ") {
it("should have pretty toString") {
val result = new ResultOfLengthWordApplication(8)
result.toString should be ("length (8)")
}
}
}
|
scalatest/scalatest
|
jvm/scalatest-test/src/test/scala/org/scalatest/matchers/dsl/ResultOfLengthWordApplicationSpec.scala
|
Scala
|
apache-2.0
| 1,028 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.convert
import java.nio.charset.StandardCharsets
import java.util.concurrent.ConcurrentHashMap
import java.util.{Date, DoubleSummaryStatistics, Locale, ServiceLoader, UUID}
import com.google.common.hash.Hashing
import com.typesafe.scalalogging.LazyLogging
import com.vividsolutions.jts.geom._
import org.apache.commons.codec.binary.Base64
import org.geotools.geometry.jts.{JTS, JTSFactoryFinder}
import org.geotools.referencing.CRS
import org.geotools.util.Converters
import org.locationtech.geomesa.curve.TimePeriod
import org.locationtech.geomesa.utils.text.{DateParsing, EnhancedTokenParsers, WKTUtils}
import org.locationtech.geomesa.utils.uuid.Z3UuidGenerator
import org.opengis.referencing.operation.MathTransform
import scala.collection.JavaConversions._
import scala.collection.mutable
import scala.util.Try
import scala.util.control.NonFatal
import scala.util.matching.Regex
object Transformers extends EnhancedTokenParsers with LazyLogging {
lazy val functionMap = {
val fn = mutable.HashMap[String, TransformerFn]()
ServiceLoader.load(classOf[TransformerFunctionFactory]).foreach { factory =>
factory.functions.foreach(f => f.names.foreach(fn.put(_, f)))
}
fn
}
val EQ = "Eq"
val LT = "LT"
val GT = "GT"
val LTEQ = "LTEq"
val GTEQ = "GTEq"
val NEQ = "NEq"
object TransformerParser {
private val OPEN_PAREN = "("
private val CLOSE_PAREN = ")"
def decimal = """-?\\d*\\.\\d+""".r
def string = quotedString ^^ { s => LitString(s) }
def int = wholeNumber ^^ { i => LitInt(i.toInt) }
def double = decimal <~ "[dD]?".r ^^ { d => LitDouble(d.toDouble) }
def long = wholeNumber <~ "L" ^^ { l => LitLong(l.toLong) }
def float = decimal <~ "[fF]".r ^^ { l => LitFloat(l.toFloat) }
def boolean = "false|true".r ^^ { l => LitBoolean(l.toBoolean) }
def nulls = "null" ^^ { _ => LitNull }
def lit = string | float | double | long | int | boolean | nulls // order is important - most to least specific
def wholeRecord = "$0" ^^ { _ => WholeRecord }
def regexExpr = string <~ "::r" ^^ { case LitString(s) => RegexExpr(s) }
def column = "$" ~> "[1-9][0-9]*".r ^^ { i => Col(i.toInt) }
def cast2int = expr <~ "::int" ~ "(eger)?".r ^^ { e => Cast2Int(e) }
def cast2long = expr <~ "::long" ^^ { e => Cast2Long(e) }
def cast2float = expr <~ "::float" ^^ { e => Cast2Float(e) }
def cast2double = expr <~ "::double" ^^ { e => Cast2Double(e) }
def cast2boolean = expr <~ "::bool" ~ "(ean)?".r ^^ { e => Cast2Boolean(e) }
def cast2string = expr <~ "::string" ^^ { e => Cast2String(e) }
def fieldLookup = "$" ~> ident ^^ { i => FieldLookup(i) }
def noNsfnName = ident ^^ { n => LitString(n) }
def nsFnName = ident ~ ":" ~ ident ^^ { case ns ~ ":" ~ n => LitString(s"$ns:$n") }
def fnName = nsFnName | noNsfnName
def tryFn = ("try" ~ OPEN_PAREN) ~> (argument ~ "," ~ argument) <~ CLOSE_PAREN ^^ {
case arg ~ "," ~ fallback => TryFunctionExpr(arg, fallback)
}
def fn = (fnName <~ OPEN_PAREN) ~ (repsep(argument, ",") <~ CLOSE_PAREN) ^^ {
case LitString(name) ~ e => FunctionExpr(functionMap(name).getInstance, e.toArray)
}
def strEq = ("strEq" ~ OPEN_PAREN) ~> (transformExpr ~ "," ~ transformExpr) <~ CLOSE_PAREN ^^ {
case l ~ "," ~ r => strBinOps(EQ)(l, r)
}
def numericPredicate[I](fn: String, predBuilder: (Expr, Expr) => BinaryPredicate[I]) =
(fn ~ OPEN_PAREN) ~> (transformExpr ~ "," ~ transformExpr) <~ CLOSE_PAREN ^^ {
case l ~ "," ~ r => predBuilder(l, r)
}
def getBinPreds[T](t: String, bops: Map[String, ExprToBinPred[T]]) =
bops.map{case (n, op) => numericPredicate(t+n, op) }.reduce(_ | _)
def binaryPred =
strEq |
getBinPreds("int", intBinOps) |
getBinPreds("integer", intBinOps) |
getBinPreds("long", longBinOps) |
getBinPreds("float", floatBinOps) |
getBinPreds("double", doubleBinOps) |
getBinPreds("bool", boolBinOps) |
getBinPreds("boolean", boolBinOps)
def andPred = ("and" ~ OPEN_PAREN) ~> (pred ~ "," ~ pred) <~ CLOSE_PAREN ^^ {
case l ~ "," ~ r => And(l, r)
}
def orPred = ("or" ~ OPEN_PAREN) ~> (pred ~ "," ~ pred) <~ CLOSE_PAREN ^^ {
case l ~ "," ~ r => Or(l, r)
}
def notPred = ("not" ~ OPEN_PAREN) ~> pred <~ CLOSE_PAREN ^^ {
pred => Not(pred)
}
def logicPred = andPred | orPred | notPred
def pred: Parser[Predicate] = binaryPred | logicPred
def expr = tryFn | fn | wholeRecord | regexExpr | fieldLookup | column | lit
def transformExpr: Parser[Expr] = cast2double | cast2int | cast2boolean | cast2float | cast2long | cast2string | expr
def argument = transformExpr | string
}
sealed trait Expr {
def eval(args: Array[Any])(implicit ctx: EvaluationContext): Any
/**
* Gets the field dependencies that this expr relies on
*
* @param stack current field stack, used to detect circular dependencies
* @param fieldNameMap fields lookup
* @return dependencies
*/
def dependenciesOf(stack: Set[Field], fieldNameMap: Map[String, Field]): Set[Field]
}
sealed trait Lit[T <: Any] extends Expr {
def value: T
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Any = value
override def dependenciesOf(stack: Set[Field], fieldMap: Map[String, Field]): Set[Field] = Set.empty
override def toString: String = String.valueOf(value)
}
case class LitString(value: String) extends Lit[String] {
override def toString: String = s"'${String.valueOf(value)}'"
}
case class LitInt(value: Integer) extends Lit[Integer]
case class LitLong(value: Long) extends Lit[Long]
case class LitFloat(value: java.lang.Float) extends Lit[java.lang.Float]
case class LitDouble(value: java.lang.Double) extends Lit[java.lang.Double]
case class LitBoolean(value: java.lang.Boolean) extends Lit[java.lang.Boolean]
case object LitNull extends Lit[AnyRef] { override def value = null }
sealed trait CastExpr extends Expr {
def e: Expr
override def dependenciesOf(stack: Set[Field], fieldMap: Map[String, Field]): Set[Field] =
e.dependenciesOf(stack, fieldMap)
}
case class Cast2Int(e: Expr) extends CastExpr {
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Int =
e.eval(args) match {
case int: Int => int
case double: Double => double.toInt
case float: Float => float.toInt
case long: Long => long.toInt
case any: Any => any.toString.toInt
}
override def toString: String = s"$e::int"
}
case class Cast2Long(e: Expr) extends CastExpr {
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Long =
e.eval(args) match {
case int: Int => int.toLong
case double: Double => double.toLong
case float: Float => float.toLong
case long: Long => long
case any: Any => any.toString.toLong
}
override def toString: String = s"$e::long"
}
case class Cast2Float(e: Expr) extends CastExpr {
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Float =
e.eval(args) match {
case int: Int => int.toFloat
case double: Double => double.toFloat
case float: Float => float
case long: Long => long.toFloat
case any: Any => any.toString.toFloat
}
override def toString: String = s"$e::float"
}
case class Cast2Double(e: Expr) extends CastExpr {
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Double =
e.eval(args) match {
case int: Int => int.toDouble
case double: Double => double
case float: Float => float.toDouble
case long: Long => long.toDouble
case any: Any => any.toString.toDouble
}
override def toString: String = s"$e::double"
}
case class Cast2Boolean(e: Expr) extends CastExpr {
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Any =
e.eval(args).asInstanceOf[String].toBoolean
override def toString: String = s"$e::boolean"
}
case class Cast2String(e: Expr) extends CastExpr {
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Any =
e.eval(args).toString
override def toString: String = s"$e::string"
}
case object WholeRecord extends Expr {
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Any = args(0)
override def dependenciesOf(stack: Set[Field], fieldMap: Map[String, Field]): Set[Field] = Set.empty
}
case class Col(i: Int) extends Expr {
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Any = args(i)
override def dependenciesOf(stack: Set[Field], fieldMap: Map[String, Field]): Set[Field] = Set.empty
override def toString: String = s"$$$i"
}
case class FieldLookup(n: String) extends Expr {
var doEval: EvaluationContext => Any = { ctx =>
val idx = ctx.indexOf(n)
doEval =
if(idx < 0) _ => null
else ec => ec.get(idx)
doEval(ctx)
}
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Any = doEval(ctx)
override def dependenciesOf(stack: Set[Field], fieldMap: Map[String, Field]): Set[Field] = {
fieldMap.get(n) match {
case None => Set.empty
case Some(field) =>
if (stack.contains(field)) {
throw new IllegalArgumentException(s"Cyclical dependency detected in field $field")
} else {
Option(field.transform).toSeq.flatMap(_.dependenciesOf(stack + field, fieldMap)).toSet + field
}
}
}
override def toString: String = s"$$$n"
}
case class RegexExpr(s: String) extends Expr {
val compiled = s.r
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Any = compiled
override def dependenciesOf(stack: Set[Field], fieldMap: Map[String, Field]): Set[Field] = Set.empty
override def toString: String = s"$s::r"
}
case class FunctionExpr(f: TransformerFn, arguments: Array[Expr]) extends Expr {
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Any =
f.eval(arguments.map(_.eval(args)))
override def dependenciesOf(stack: Set[Field], fieldMap: Map[String, Field]): Set[Field] =
arguments.flatMap(_.dependenciesOf(stack, fieldMap)).toSet
override def toString: String = s"${f.names.head}${arguments.mkString("(", ",", ")")}"
}
case class TryFunctionExpr(toTry: Expr, fallback: Expr) extends Expr {
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Any = {
Try(toTry.eval(args)).getOrElse(fallback.eval(args))
}
override def dependenciesOf(stack: Set[Field], fieldMap: Map[String, Field]): Set[Field] =
toTry.dependenciesOf(stack, fieldMap) ++ fallback.dependenciesOf(stack, fieldMap)
override def toString: String = s"try($toTry,$fallback)"
}
sealed trait Predicate {
def eval(args: Array[Any])(implicit ctx: EvaluationContext): Boolean
}
class BinaryPredicate[T](left: Expr, right: Expr, isEqual: (T, T) => Boolean) extends Predicate {
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Boolean = eval(left, right, args)
def eval(left: Expr, right: Expr, args: Array[Any])(implicit ctx: EvaluationContext): Boolean =
isEqual(left.eval(args).asInstanceOf[T], right.eval(args).asInstanceOf[T])
}
def buildPred[T](f: (T, T) => Boolean): ExprToBinPred[T] = new BinaryPredicate[T](_, _, f)
type ExprToBinPred[T] = (Expr, Expr) => BinaryPredicate[T]
val intBinOps = Map[String, ExprToBinPred[Int]](
EQ -> buildPred[Int](_ == _),
LT -> buildPred[Int](_ < _ ),
GT -> buildPred[Int](_ > _ ),
LTEQ -> buildPred[Int](_ <= _),
GTEQ -> buildPred[Int](_ >= _),
NEQ -> buildPred[Int](_ != _)
)
val longBinOps = Map[String, ExprToBinPred[Long]](
EQ -> buildPred[Long](_ == _),
LT -> buildPred[Long](_ < _ ),
GT -> buildPred[Long](_ > _ ),
LTEQ -> buildPred[Long](_ <= _),
GTEQ -> buildPred[Long](_ >= _),
NEQ -> buildPred[Long](_ != _)
)
val floatBinOps = Map[String, ExprToBinPred[Float]](
EQ -> buildPred[Float](_ == _),
LT -> buildPred[Float](_ < _ ),
GT -> buildPred[Float](_ > _ ),
LTEQ -> buildPred[Float](_ <= _),
GTEQ -> buildPred[Float](_ >= _),
NEQ -> buildPred[Float](_ != _)
)
val doubleBinOps = Map[String, ExprToBinPred[Double]](
EQ -> buildPred[Double](_ == _),
LT -> buildPred[Double](_ < _ ),
GT -> buildPred[Double](_ > _ ),
LTEQ -> buildPred[Double](_ <= _),
GTEQ -> buildPred[Double](_ >= _),
NEQ -> buildPred[Double](_ != _)
)
val boolBinOps = Map[String, ExprToBinPred[Boolean]](
EQ -> buildPred[Boolean](_ == _),
NEQ -> buildPred[Boolean](_ != _)
)
val strBinOps = Map[String, ExprToBinPred[String]](
EQ -> buildPred[String](_.equals(_)),
NEQ -> buildPred[String]((a, b) => a != b)
)
case class Not(p: Predicate) extends Predicate {
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Boolean = !p.eval(args)
}
class BinaryLogicPredicate(l: Predicate, r: Predicate, f: (Boolean, Boolean) => Boolean) extends Predicate {
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Boolean = f(l.eval(args), r.eval(args))
}
def buildBinaryLogicPredicate(f: (Boolean, Boolean) => Boolean): (Predicate, Predicate) => BinaryLogicPredicate =
new BinaryLogicPredicate(_, _, f)
val And = buildBinaryLogicPredicate(_ && _)
val Or = buildBinaryLogicPredicate(_ || _)
def parseTransform(s: String): Expr = {
logger.trace(s"Parsing transform $s")
parse(TransformerParser.transformExpr, s) match {
case Success(r, _) => r
case Failure(e, _) => throw new IllegalArgumentException(s"Error parsing expression '$s': $e")
case Error(e, _) => throw new RuntimeException(s"Error parsing expression '$s': $e")
}
}
def parsePred(s: String): Predicate = {
logger.trace(s"Parsing predicate $s")
parse(TransformerParser.pred, s) match {
case Success(p, _) => p
case Failure(e, _) => throw new IllegalArgumentException(s"Error parsing predicate '$s': $e")
case Error(e, _) => throw new RuntimeException(s"Error parsing predicate '$s': $e")
}
}
}
object TransformerFn {
def apply(n: String*)(f: (Array[Any]) => Any) = new TransformerFn {
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Any = f(args)
override def names: Seq[String] = n
}
}
trait TransformerFn {
def names: Seq[String]
def eval(args: Array[Any])(implicit ctx: EvaluationContext): Any
// some transformers cache arguments that don't change, override getInstance in order
// to return a new transformer that can cache args
def getInstance: TransformerFn = this
}
trait TransformerFunctionFactory {
def functions: Seq[TransformerFn]
}
class DefaultsFunctionFactory extends TransformerFunctionFactory {
override def functions: Seq[TransformerFn] = Seq(withDefault)
private val withDefault = TransformerFn("withDefault") { args =>
if (args(0) == null) { args(1) } else { args(0) }
}
}
class StringFunctionFactory extends TransformerFunctionFactory {
override def functions: Seq[TransformerFn] =
Seq(stripQuotes, strLen, trim, capitalize, lowercase, uppercase, regexReplace, concat, substr, string, mkstring, emptyToNull)
val string = TransformerFn("toString") { args => args(0).toString }
val stripQuotes = TransformerFn("stripQuotes") { args => args(0).asInstanceOf[String].replaceAll("\\"", "") }
val trim = TransformerFn("trim") { args => args(0).asInstanceOf[String].trim }
val capitalize = TransformerFn("capitalize") { args => args(0).asInstanceOf[String].capitalize }
val lowercase = TransformerFn("lowercase") { args => args(0).asInstanceOf[String].toLowerCase }
val uppercase = TransformerFn("uppercase") { args => args(0).asInstanceOf[String].toUpperCase }
val concat = TransformerFn("concat", "concatenate") { args => args.map(_.toString).mkString }
val mkstring = TransformerFn("mkstring") { args => args.drop(1).map(_.toString).mkString(args(0).toString) }
val emptyToNull = TransformerFn("emptyToNull") { args => Option(args(0)).map(_.toString).filterNot(_.trim.isEmpty).orNull }
val regexReplace = TransformerFn("regexReplace") {
args => args(0).asInstanceOf[Regex].replaceAllIn(args(2).asInstanceOf[String], args(1).asInstanceOf[String])
}
val substr = TransformerFn("substr", "substring") {
args => args(0).asInstanceOf[String].substring(args(1).asInstanceOf[Int], args(2).asInstanceOf[Int])
}
val strLen = TransformerFn("strlen", "stringLength", "length") {
args => args(0).asInstanceOf[String].length
}
}
class DateFunctionFactory extends TransformerFunctionFactory {
import java.time.{ZoneOffset, ZonedDateTime}
import java.time.format.{DateTimeFormatter, DateTimeFormatterBuilder}
import java.time.temporal.ChronoField
// yyyy-MM-dd'T'HH:mm:ss.SSSZZ (ZZ is time zone with colon)
private val dateTimeFormat =
new DateTimeFormatterBuilder()
.parseCaseInsensitive()
.append(DateTimeFormatter.ISO_LOCAL_DATE)
.parseLenient()
.appendLiteral('T')
.appendValue(ChronoField.HOUR_OF_DAY, 2)
.appendLiteral(':')
.appendValue(ChronoField.MINUTE_OF_HOUR, 2)
.appendLiteral(':')
.appendValue(ChronoField.SECOND_OF_MINUTE, 2)
.appendFraction(ChronoField.MILLI_OF_SECOND, 3, 3, true)
.optionalStart()
.appendOffsetId()
.toFormatter(Locale.US)
.withZone(ZoneOffset.UTC)
// yyyyMMdd
private val basicDateFormat = DateTimeFormatter.BASIC_ISO_DATE.withZone(ZoneOffset.UTC)
// yyyyMMdd'T'HHmmss.SSSZ
private val basicDateTimeFormat =
new DateTimeFormatterBuilder()
.parseCaseInsensitive()
.appendValue(ChronoField.YEAR, 4)
.appendValue(ChronoField.MONTH_OF_YEAR, 2)
.appendValue(ChronoField.DAY_OF_MONTH, 2)
.appendLiteral('T')
.appendValue(ChronoField.HOUR_OF_DAY, 2)
.appendValue(ChronoField.MINUTE_OF_HOUR, 2)
.appendValue(ChronoField.SECOND_OF_MINUTE, 2)
.appendFraction(ChronoField.MILLI_OF_SECOND, 3, 3, true)
.optionalStart()
.appendOffsetId()
.toFormatter(Locale.US)
.withZone(ZoneOffset.UTC)
// yyyyMMdd'T'HHmmssZ
private val basicDateTimeNoMillisFormat =
new DateTimeFormatterBuilder()
.parseCaseInsensitive()
.appendValue(ChronoField.YEAR, 4)
.appendValue(ChronoField.MONTH_OF_YEAR, 2)
.appendValue(ChronoField.DAY_OF_MONTH, 2)
.appendLiteral('T')
.appendValue(ChronoField.HOUR_OF_DAY, 2)
.appendValue(ChronoField.MINUTE_OF_HOUR, 2)
.appendValue(ChronoField.SECOND_OF_MINUTE, 2)
.optionalStart()
.appendOffsetId()
.toFormatter(Locale.US)
.withZone(ZoneOffset.UTC)
// yyyy-MM-dd'T'HH:mm:ss.SSS
private val dateHourMinuteSecondMillisFormat =
new DateTimeFormatterBuilder()
.parseCaseInsensitive()
.append(DateTimeFormatter.ISO_LOCAL_DATE)
.parseLenient()
.appendLiteral('T')
.appendValue(ChronoField.HOUR_OF_DAY, 2)
.appendLiteral(':')
.appendValue(ChronoField.MINUTE_OF_HOUR, 2)
.appendLiteral(':')
.appendValue(ChronoField.SECOND_OF_MINUTE, 2)
.appendFraction(ChronoField.MILLI_OF_SECOND, 3, 3, true)
.toFormatter(Locale.US)
.withZone(ZoneOffset.UTC)
override def functions: Seq[TransformerFn] =
Seq(now, customFormatDateParser, datetime, isodate, isodatetime, basicDateTimeNoMillis,
dateHourMinuteSecondMillis, millisToDate, secsToDate, dateToString)
private val now = TransformerFn("now") { _ => Date.from(ZonedDateTime.now(ZoneOffset.UTC).toInstant) }
private val millisToDate = TransformerFn("millisToDate") { args => new Date(args(0).asInstanceOf[Long]) }
private val secsToDate = TransformerFn("secsToDate") { args => new Date(args(0).asInstanceOf[Long] * 1000L) }
private val customFormatDateParser = CustomFormatDateParser()
private val datetime = StandardDateParser("datetime", "dateTime")(dateTimeFormat)
private val isodate = StandardDateParser("isodate", "basicDate")(basicDateFormat)
private val isodatetime = StandardDateParser("isodatetime", "basicDateTime")(basicDateTimeFormat)
private val basicDateTimeNoMillis = StandardDateParser("basicDateTimeNoMillis")(basicDateTimeNoMillisFormat)
private val dateHourMinuteSecondMillis = StandardDateParser("dateHourMinuteSecondMillis")(dateHourMinuteSecondMillisFormat)
private val dateToString = DateToString()
case class StandardDateParser(names: String*)(format: DateTimeFormatter) extends TransformerFn {
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Any =
DateParsing.parseDate(args(0).toString, format)
}
case class CustomFormatDateParser(var format: DateTimeFormatter = null) extends TransformerFn {
override val names = Seq("date")
override def getInstance: CustomFormatDateParser = CustomFormatDateParser()
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Any = {
if (format == null) {
format = DateTimeFormatter.ofPattern(args(0).asInstanceOf[String]).withZone(ZoneOffset.UTC)
}
DateParsing.parseDate(args(1).toString, format)
}
}
case class DateToString(var format: DateTimeFormatter = null) extends TransformerFn {
override val names = Seq("dateToString")
override def getInstance: DateToString = DateToString()
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Any = {
if (format == null) {
format = DateTimeFormatter.ofPattern(args(0).asInstanceOf[String]).withZone(ZoneOffset.UTC)
}
DateParsing.formatDate(args(1).asInstanceOf[java.util.Date], format)
}
}
}
class GeometryFunctionFactory extends TransformerFunctionFactory {
override def functions = Seq(pointParserFn,
multiPointParserFn,
lineStringParserFn,
multiLineStringParserFn,
polygonParserFn,
multiPolygonParserFn,
geometryParserFn,
geometryCollectionParserFn,
projectFromParserFn)
private val gf = JTSFactoryFinder.getGeometryFactory
val pointParserFn = TransformerFn("point") { args =>
args.length match {
case 1 =>
args(0) match {
case g: Geometry => g.asInstanceOf[Point]
case s: String => WKTUtils.read(s).asInstanceOf[Point]
}
case 2 =>
gf.createPoint(new Coordinate(args(0).asInstanceOf[Double], args(1).asInstanceOf[Double]))
case _ =>
throw new IllegalArgumentException(s"Invalid point conversion argument: ${args.toList}")
}
}
val multiPointParserFn = TransformerFn("multipoint") { args =>
args(0) match {
case g: Geometry => g.asInstanceOf[MultiPoint]
case s: String => WKTUtils.read(s).asInstanceOf[MultiPoint]
case _ =>
throw new IllegalArgumentException(s"Invalid multipoint conversion argument: ${args.toList}")
}
}
val lineStringParserFn = TransformerFn("linestring") { args =>
args(0) match {
case g: Geometry => g.asInstanceOf[LineString]
case s: String => WKTUtils.read(s).asInstanceOf[LineString]
case _ =>
throw new IllegalArgumentException(s"Invalid linestring conversion argument: ${args.toList}")
}
}
val multiLineStringParserFn = TransformerFn("multilinestring") { args =>
args(0) match {
case g: Geometry => g.asInstanceOf[MultiLineString]
case s: String => WKTUtils.read(s).asInstanceOf[MultiLineString]
case _ =>
throw new IllegalArgumentException(s"Invalid multilinestring conversion argument: ${args.toList}")
}
}
val polygonParserFn = TransformerFn("polygon") { args =>
args(0) match {
case g: Geometry => g.asInstanceOf[Polygon]
case s: String => WKTUtils.read(s).asInstanceOf[Polygon]
case _ =>
throw new IllegalArgumentException(s"Invalid polygon conversion argument: ${args.toList}")
}
}
val multiPolygonParserFn = TransformerFn("multipolygon") { args =>
args(0) match {
case g: Geometry => g.asInstanceOf[MultiPolygon]
case s: String => WKTUtils.read(s).asInstanceOf[MultiPolygon]
case _ =>
throw new IllegalArgumentException(s"Invalid multipolygon conversion argument: ${args.toList}")
}
}
val geometryParserFn = TransformerFn("geometry") { args =>
args(0) match {
case g: Geometry => g.asInstanceOf[Geometry]
case s: String => WKTUtils.read(s)
case _ =>
throw new IllegalArgumentException(s"Invalid geometry conversion argument: ${args.toList}")
}
}
val geometryCollectionParserFn = TransformerFn("geometrycollection") { args =>
args(0) match {
case g: Geometry => g.asInstanceOf[GeometryCollection]
case s: String => WKTUtils.read(s)
case _ =>
throw new IllegalArgumentException(s"Invalid geometrycollection conversion argument: ${args.toList}")
}
}
val projectFromParserFn = new TransformerFn {
private val cache = new ConcurrentHashMap[String, MathTransform]
override val names: Seq[String] = Seq("projectFrom")
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Any = {
import org.locationtech.geomesa.utils.geotools.CRS_EPSG_4326
val epsg = args(0).asInstanceOf[String]
val geom = args(1).asInstanceOf[Geometry]
val lenient = if (args.length > 2) { java.lang.Boolean.parseBoolean(args(2).toString) } else { true }
// transforms should be thread safe according to https://sourceforge.net/p/geotools/mailman/message/32123017/
val transform = cache.getOrElseUpdate(s"$epsg:$lenient",
CRS.findMathTransform(CRS.decode(epsg), CRS_EPSG_4326, lenient))
JTS.transform(geom, transform)
}
}
}
class IdFunctionFactory extends TransformerFunctionFactory with LazyLogging {
override def functions = Seq(string2Bytes, md5, uuidFn, uuidZ3, uuidZ3Centroid, base64, murmur3_32, murmur3_64)
val string2Bytes: TransformerFn = TransformerFn("string2bytes", "stringToBytes") {
args => args(0).asInstanceOf[String].getBytes(StandardCharsets.UTF_8)
}
val uuidFn: TransformerFn = TransformerFn("uuid") { args => UUID.randomUUID().toString }
val uuidZ3: TransformerFn = TransformerFn("uuidZ3") { args =>
val geom = args(0).asInstanceOf[Point]
val date = args(1).asInstanceOf[Date]
val interval = TimePeriod.withName(args(2).asInstanceOf[String])
try { Z3UuidGenerator.createUuid(geom, date.getTime, interval).toString } catch {
case NonFatal(e) =>
logger.warn(s"Invalid z3 values for UUID: $geom $date $interval: $e")
UUID.randomUUID().toString
}
}
val uuidZ3Centroid: TransformerFn = TransformerFn("uuidZ3Centroid") { args =>
val geom = args(0).asInstanceOf[Geometry]
val date = args(1).asInstanceOf[Date]
val interval = TimePeriod.withName(args(2).asInstanceOf[String])
try { Z3UuidGenerator.createUuid(geom, date.getTime, interval).toString } catch {
case NonFatal(e) =>
logger.warn(s"Invalid z3 values for UUID: $geom $date $interval: $e")
UUID.randomUUID().toString
}
}
val base64: TransformerFn = TransformerFn("base64") {
args => Base64.encodeBase64URLSafeString(args(0).asInstanceOf[Array[Byte]])
}
val md5 = new MD5
class MD5 extends TransformerFn {
override val names = Seq("md5")
override def getInstance: MD5 = new MD5()
private val hasher = Hashing.md5()
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Any =
hasher.hashBytes(args(0).asInstanceOf[Array[Byte]]).toString
}
class Murmur3_32 extends TransformerFn {
private val hasher = Hashing.murmur3_32()
override val names: Seq[String] = Seq("murmur3_32")
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Any = {
hasher.hashString(args(0).toString, StandardCharsets.UTF_8)
}
}
val murmur3_32 = new Murmur3_32
class Murmur3_64 extends TransformerFn {
private val hasher = Hashing.murmur3_128()
override val names: Seq[String] = Seq("murmur3_64")
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Any = {
hasher.hashString(args(0).toString, StandardCharsets.UTF_8).asLong()
}
}
val murmur3_64 = new Murmur3_64
}
class LineNumberFunctionFactory extends TransformerFunctionFactory {
override def functions = Seq(LineNumberFn())
case class LineNumberFn() extends TransformerFn {
override def getInstance: LineNumberFn = LineNumberFn()
override val names = Seq("lineNo", "lineNumber")
def eval(args: Array[Any])(implicit ctx: EvaluationContext): Any = ctx.counter.getLineCount
}
}
trait MapListParsing {
protected def determineClazz(s: String) = s.toLowerCase match {
case "string" | "str" => classOf[String]
case "int" | "integer" => classOf[java.lang.Integer]
case "long" => classOf[java.lang.Long]
case "double" => classOf[java.lang.Double]
case "float" => classOf[java.lang.Float]
case "bool" | "boolean" => classOf[java.lang.Boolean]
case "bytes" => classOf[Array[Byte]]
case "uuid" => classOf[UUID]
case "date" => classOf[java.util.Date]
}
}
class CollectionFunctionFactory extends TransformerFunctionFactory {
import scala.collection.JavaConverters._
override def functions = Seq(listFn, mapValueFunction)
val listFn = TransformerFn("list") { args => args.toList.asJava }
val mapValueFunction = TransformerFn("mapValue") {
args => args(0).asInstanceOf[java.util.Map[Any, Any]].get(args(1))
}
}
class StringMapListFunctionFactory extends TransformerFunctionFactory with MapListParsing {
override def functions = Seq(listParserFn, mapParserFn)
val defaultListDelim = ","
val defaultKVDelim = "->"
import scala.collection.JavaConverters._
def convert(value: Any, clazz: Class[_]) =
Option(Converters.convert(value, clazz))
.getOrElse(throw new IllegalArgumentException(s"Could not convert value '$value' to type ${clazz.getName})"))
val listParserFn = TransformerFn("parseList") { args =>
val clazz = determineClazz(args(0).asInstanceOf[String])
val s = args(1).asInstanceOf[String]
val delim = if (args.length >= 3) args(2).asInstanceOf[String] else defaultListDelim
if (s.isEmpty) {
List().asJava
} else {
s.split(delim).map(_.trim).map(convert(_, clazz)).toList.asJava
}
}
val mapParserFn = TransformerFn("parseMap") { args =>
val kv = args(0).asInstanceOf[String].split("->").map(_.trim)
val keyClazz = determineClazz(kv(0))
val valueClazz = determineClazz(kv(1))
val s: String = args(1).toString
val kvDelim: String = if (args.length >= 3) args(2).asInstanceOf[String] else defaultKVDelim
val pairDelim: String = if (args.length >= 4) args(3).asInstanceOf[String] else defaultListDelim
if (s.isEmpty) {
Map().asJava
} else {
s.split(pairDelim)
.map(_.split(kvDelim).map(_.trim))
.map { case Array(key, value) =>
(convert(key, keyClazz), convert(value, valueClazz))
}.toMap.asJava
}
}
}
class CastFunctionFactory extends TransformerFunctionFactory {
override def functions = Seq(stringToDouble, stringToInt, stringToFloat, stringToLong, stringToBoolean)
val stringToDouble = TransformerFn("stringToDouble") {
args => tryConvert(args(0).asInstanceOf[String], (s) => s.toDouble, args(1))
}
val stringToInt = TransformerFn("stringToInt", "stringToInteger") {
args => tryConvert(args(0).asInstanceOf[String], (s) => s.toInt, args(1))
}
val stringToFloat = TransformerFn("stringToFloat") {
args => tryConvert(args(0).asInstanceOf[String], (s) => s.toFloat, args(1))
}
val stringToLong = TransformerFn("stringToLong") {
args => tryConvert(args(0).asInstanceOf[String], (s) => s.toLong, args(1))
}
val stringToBoolean = TransformerFn("stringToBool", "stringToBoolean") {
args => tryConvert(args(0).asInstanceOf[String], (s) => s.toBoolean, args(1))
}
def tryConvert(s: String, conversion: (String) => Any, default: Any): Any = {
if (s == null || s.isEmpty) {
return default
}
try { conversion(s) } catch { case e: Exception => default }
}
}
class MathFunctionFactory extends TransformerFunctionFactory {
override def functions = Seq(add, subtract, multiply, divide, mean, min, max)
def parseDouble(v: Any): Double = {
v match {
case int: Int => int.toDouble
case double: Double => double
case float: Float => float.toDouble
case long: Long => long.toDouble
case string: String => string.toDouble
case any: Any => any.toString.toDouble
}
}
val add = TransformerFn("add") { args =>
var s: Double = 0.0
args.foreach(s += parseDouble(_))
s
}
val multiply = TransformerFn("multiply") { args =>
var s: Double = 1.0
args.foreach(s *= parseDouble(_))
s
}
val subtract = TransformerFn("subtract") { args =>
var s: Double = parseDouble(args(0))
args.drop(1).foreach(s -= parseDouble(_))
s
}
val divide = TransformerFn("divide") { args =>
var s: Double = parseDouble(args(0))
args.drop(1).foreach(s /= parseDouble(_))
s
}
val mean = TransformerFn("mean") { args =>
val stats = new DoubleSummaryStatistics
args.map(parseDouble).foreach { d => stats.accept(d) }
stats.getAverage
}
val min = TransformerFn("min") { args =>
val stats = new DoubleSummaryStatistics
args.map(parseDouble).foreach { d => stats.accept(d) }
stats.getMin
}
val max = TransformerFn("max") { args =>
val stats = new DoubleSummaryStatistics
args.map(parseDouble).foreach { d => stats.accept(d) }
stats.getMax
}
}
class EnrichmentCacheFunctionFactory extends TransformerFunctionFactory {
override def functions = Seq(cacheLookup)
val cacheLookup = new TransformerFn {
override def names: Seq[String] = Seq("cacheLookup")
override def eval(args: Array[Any])(implicit ctx: EvaluationContext): Any = {
val cache = ctx.getCache(args(0).asInstanceOf[String])
cache.get(Array(args(1).asInstanceOf[String], args(2).asInstanceOf[String]))
}
}
}
|
boundlessgeo/geomesa
|
geomesa-convert/geomesa-convert-common/src/main/scala/org/locationtech/geomesa/convert/Transformers.scala
|
Scala
|
apache-2.0
| 35,741 |
/*
* Copyright 2011-2021 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.spark.compiler
package operator
package user
import org.junit.runner.RunWith
import org.scalatest.FlatSpec
import org.scalatest.junit.JUnitRunner
import java.io.{ DataInput, DataOutput }
import java.util.function.Consumer
import scala.collection.JavaConversions._
import scala.language.reflectiveCalls
import org.apache.hadoop.io.Writable
import org.apache.spark.broadcast.{ Broadcast => Broadcasted }
import com.asakusafw.lang.compiler.model.description.{ ClassDescription, ImmediateDescription }
import com.asakusafw.lang.compiler.model.graph.{ Groups, MarkerOperator, Operator, OperatorInput }
import com.asakusafw.lang.compiler.model.testing.OperatorExtractor
import com.asakusafw.lang.compiler.planning.PlanMarker
import com.asakusafw.runtime.core.{ GroupView, View }
import com.asakusafw.runtime.model.DataModel
import com.asakusafw.runtime.value.{ IntOption, LongOption, StringOption }
import com.asakusafw.spark.compiler.broadcast.MockBroadcast
import com.asakusafw.spark.compiler.spi.{ OperatorCompiler, OperatorType }
import com.asakusafw.spark.runtime.fragment.{ Fragment, GenericOutputFragment }
import com.asakusafw.spark.runtime.graph.BroadcastId
import com.asakusafw.spark.runtime.io.WritableSerDe
import com.asakusafw.spark.runtime.rdd.ShuffleKey
import com.asakusafw.spark.tools.asm._
import com.asakusafw.vocabulary.operator.Convert
@RunWith(classOf[JUnitRunner])
class ConvertOperatorCompilerSpecTest extends ConvertOperatorCompilerSpec
class ConvertOperatorCompilerSpec extends FlatSpec with UsingCompilerContext {
import ConvertOperatorCompilerSpec._
behavior of classOf[ConvertOperatorCompiler].getSimpleName
for {
s <- Seq("s", null)
} {
it should s"compile Convert operator${if (s == null) " with argument null" else ""}" in {
val operator = OperatorExtractor
.extract(classOf[Convert], classOf[ConvertOperator], "convert")
.input("input", ClassDescription.of(classOf[Input]))
.output("original", ClassDescription.of(classOf[Input]))
.output("out", ClassDescription.of(classOf[Output]))
.argument("n", ImmediateDescription.of(10))
.argument("s", ImmediateDescription.of(s))
.build()
implicit val context = newOperatorCompilerContext("flowId")
val thisType = OperatorCompiler.compile(operator, OperatorType.ExtractType)
val cls = context.loadClass[Fragment[Input]](thisType.getClassName)
val out1 = new GenericOutputFragment[Input]()
val out2 = new GenericOutputFragment[Output]()
val fragment = cls.getConstructor(
classOf[Map[BroadcastId, Broadcasted[_]]],
classOf[Fragment[_]], classOf[Fragment[_]]).newInstance(Map.empty, out1, out2)
fragment.reset()
val input = new Input()
for (i <- 0 until 10) {
input.i.modify(i)
input.l.modify(i)
fragment.add(input)
}
out1.iterator.zipWithIndex.foreach {
case (input, i) =>
assert(input.i.get === i)
assert(input.l.get === i)
}
out2.iterator.zipWithIndex.foreach {
case (output, i) =>
assert(output.l.get === 10 * i)
if (s == null) {
assert(output.s.isNull)
} else {
assert(output.s.getAsString === s)
}
}
fragment.reset()
}
}
it should "compile Convert operator with projective model" in {
val operator = OperatorExtractor
.extract(classOf[Convert], classOf[ConvertOperator], "convertp")
.input("input", ClassDescription.of(classOf[Input]))
.output("original", ClassDescription.of(classOf[Input]))
.output("out", ClassDescription.of(classOf[Output]))
.argument("n", ImmediateDescription.of(10))
.build()
implicit val context = newOperatorCompilerContext("flowId")
val thisType = OperatorCompiler.compile(operator, OperatorType.ExtractType)
val cls = context.loadClass[Fragment[Input]](thisType.getClassName)
val out1 = new GenericOutputFragment[Input]()
val out2 = new GenericOutputFragment[Output]()
val fragment = cls.getConstructor(
classOf[Map[BroadcastId, Broadcasted[_]]],
classOf[Fragment[_]], classOf[Fragment[_]]).newInstance(Map.empty, out1, out2)
fragment.reset()
val input = new Input()
for (i <- 0 until 10) {
input.i.modify(i)
input.l.modify(i)
fragment.add(input)
}
out1.iterator.zipWithIndex.foreach {
case (input, i) =>
assert(input.i.get === i)
assert(input.l.get === i)
}
out2.iterator.zipWithIndex.foreach {
case (output, i) =>
assert(output.l.get === 10 * i)
assert(output.s.isNull)
}
fragment.reset()
}
it should "compile Convert operator modifying original port" in {
val operator = OperatorExtractor
.extract(classOf[Convert], classOf[ConvertOperator], "convertp")
.input("input", ClassDescription.of(classOf[Input]))
.output("original", ClassDescription.of(classOf[Input]))
.output("out", ClassDescription.of(classOf[Output]))
.argument("n", ImmediateDescription.of(10))
.build()
implicit val context = newOperatorCompilerContext("flowId")
val thisType = OperatorCompiler.compile(operator, OperatorType.ExtractType)
val cls = context.loadClass[Fragment[Input]](thisType.getClassName)
val out1 = new Fragment[Input] {
val output = new GenericOutputFragment[Input]()
override def doAdd(result: Input): Unit = {
result.l.add(10)
output.add(result)
}
override def doReset(): Unit = {
output.reset()
}
}
val out2 = new GenericOutputFragment[Output]()
val fragment = cls.getConstructor(
classOf[Map[BroadcastId, Broadcasted[_]]],
classOf[Fragment[_]], classOf[Fragment[_]]).newInstance(Map.empty, out1, out2)
fragment.reset()
val input = new Input()
for (i <- 0 until 10) {
input.i.modify(i)
input.l.modify(i)
fragment.add(input)
}
out1.output.iterator.zipWithIndex.foreach {
case (input, i) =>
assert(input.i.get === i)
assert(input.l.get === i + 10)
}
out2.iterator.zipWithIndex.foreach {
case (output, i) =>
assert(output.l.get === 10 * i)
assert(output.s.isNull)
}
fragment.reset()
}
it should "compile Convert operator with view" in {
val vMarker = MarkerOperator.builder(ClassDescription.of(classOf[Input]))
.attribute(classOf[PlanMarker], PlanMarker.BROADCAST).build()
val gvMarker = MarkerOperator.builder(ClassDescription.of(classOf[Input]))
.attribute(classOf[PlanMarker], PlanMarker.BROADCAST).build()
val operator = OperatorExtractor
.extract(classOf[Convert], classOf[ConvertOperator], "convertWithView")
.input("input", ClassDescription.of(classOf[Input]))
.input("v", ClassDescription.of(classOf[Input]),
new Consumer[Operator.InputOptionBuilder] {
override def accept(builder: Operator.InputOptionBuilder): Unit = {
builder
.unit(OperatorInput.InputUnit.WHOLE)
.group(Groups.parse(Seq.empty, Seq.empty))
.upstream(vMarker.getOutput)
}
})
.input("gv", ClassDescription.of(classOf[Input]),
new Consumer[Operator.InputOptionBuilder] {
override def accept(builder: Operator.InputOptionBuilder): Unit = {
builder
.unit(OperatorInput.InputUnit.WHOLE)
.group(Groups.parse(Seq("i"), Seq.empty))
.upstream(gvMarker.getOutput)
}
})
.output("original", ClassDescription.of(classOf[Input]))
.output("out", ClassDescription.of(classOf[Output]))
.argument("n", ImmediateDescription.of(10))
.build()
implicit val context = newOperatorCompilerContext("flowId")
val thisType = OperatorCompiler.compile(operator, OperatorType.ExtractType)
context.addClass(context.broadcastIds)
val cls = context.loadClass[Fragment[Input]](thisType.getClassName)
val broadcastIdsCls = context.loadClass(context.broadcastIds.thisType.getClassName)
def getBroadcastId(marker: MarkerOperator): BroadcastId = {
val sn = marker.getSerialNumber
broadcastIdsCls.getField(context.broadcastIds.getField(sn)).get(null).asInstanceOf[BroadcastId]
}
val out1 = new GenericOutputFragment[Input]()
val out2 = new GenericOutputFragment[Output]()
val view = new MockBroadcast(0, Map(ShuffleKey.empty -> Seq(new Input())))
val groupview = new MockBroadcast(1,
(0 until 10).map { i =>
val input = new Input()
input.i.modify(i)
new ShuffleKey(WritableSerDe.serialize(input.i)) -> Seq(input)
}.toMap)
val fragment = cls.getConstructor(
classOf[Map[BroadcastId, Broadcasted[_]]],
classOf[Fragment[_]], classOf[Fragment[_]])
.newInstance(
Map(
getBroadcastId(vMarker) -> view,
getBroadcastId(gvMarker) -> groupview),
out1, out2)
fragment.reset()
val input = new Input()
for (i <- 0 until 10) {
input.i.modify(i)
input.l.modify(i)
fragment.add(input)
}
out1.iterator.zipWithIndex.foreach {
case (input, i) =>
assert(input.i.get === i)
assert(input.l.get === i)
}
out2.iterator.zipWithIndex.foreach {
case (output, i) =>
assert(output.l.get === 10 * i)
assert(output.s.isNull)
}
fragment.reset()
}
}
object ConvertOperatorCompilerSpec {
trait InputP {
def getIOption: IntOption
def getLOption: LongOption
}
class Input extends DataModel[Input] with InputP with Writable {
val i: IntOption = new IntOption()
val l: LongOption = new LongOption()
override def reset: Unit = {
i.setNull()
l.setNull()
}
override def copyFrom(other: Input): Unit = {
i.copyFrom(other.i)
l.copyFrom(other.l)
}
override def readFields(in: DataInput): Unit = {
i.readFields(in)
l.readFields(in)
}
override def write(out: DataOutput): Unit = {
i.write(out)
l.write(out)
}
def getIOption: IntOption = i
def getLOption: LongOption = l
}
class Output extends DataModel[Output] with Writable {
val l: LongOption = new LongOption()
val s: StringOption = new StringOption()
override def reset: Unit = {
l.setNull()
s.setNull()
}
override def copyFrom(other: Output): Unit = {
l.copyFrom(other.l)
s.copyFrom(other.s)
}
override def readFields(in: DataInput): Unit = {
l.readFields(in)
s.readFields(in)
}
override def write(out: DataOutput): Unit = {
l.write(out)
s.write(out)
}
def getLOption: LongOption = l
}
class ConvertOperator {
private[this] val out = new Output()
@Convert
def convert(in: Input, n: Int, s: String): Output = {
out.reset()
out.l.modify(n * in.l.get)
if (s != null) {
out.s.modify(s)
}
out
}
@Convert
def convertp[I <: InputP](in: I, n: Int): Output = {
out.reset()
out.getLOption.modify(n * in.getLOption.get)
out
}
@Convert
def convertWithView(in: Input, v: View[Input], gv: GroupView[Input], n: Int): Output = {
out.reset()
out.l.modify(n * in.l.get)
out
}
}
}
|
asakusafw/asakusafw-spark
|
compiler/src/test/scala/com/asakusafw/spark/compiler/operator/user/ConvertOperatorCompilerSpec.scala
|
Scala
|
apache-2.0
| 12,045 |
/* sbt -- Simple Build Tool
* Copyright 2010 Mark Harrah
*/
package sbt
package inc
import xsbti.api.Source
import java.io.File
import sbt.Util.counted
trait Analysis
{
val stamps: Stamps
val apis: APIs
val relations: Relations
def ++(other: Analysis): Analysis
def -- (sources: Iterable[File]): Analysis
def copy(stamps: Stamps = stamps, apis: APIs = apis, relations: Relations = relations): Analysis
def addSource(src: File, api: Source, stamp: Stamp, internalDeps: Iterable[File]): Analysis
def addBinaryDep(src: File, dep: File, className: String, stamp: Stamp): Analysis
def addExternalDep(src: File, dep: String, api: Source): Analysis
def addProduct(src: File, product: File, stamp: Stamp, name: String): Analysis
override lazy val toString = Analysis.summary(this)
}
object Analysis
{
lazy val Empty: Analysis = new MAnalysis(Stamps.empty, APIs.empty, Relations.empty)
def summary(a: Analysis): String =
{
val (j, s) = a.apis.allInternalSources.partition(_.getName.endsWith(".java"))
val c = a.stamps.allProducts
val ext = a.apis.allExternals
val jars = a.relations.allBinaryDeps.filter(_.getName.endsWith(".jar"))
val sections =
counted("Scala source", "", "s", s.size) ++
counted("Java source", "", "s", j.size) ++
counted("class", "", "es", c.size) ++
counted("external source dependenc", "y", "ies", ext.size) ++
counted("binary dependenc", "y", "ies", jars.size)
sections.mkString("Analysis: ", ", ", "")
}
}
private class MAnalysis(val stamps: Stamps, val apis: APIs, val relations: Relations) extends Analysis
{
def ++ (o: Analysis): Analysis = new MAnalysis(stamps ++ o.stamps, apis ++ o.apis, relations ++ o.relations)
def -- (sources: Iterable[File]): Analysis =
{
val newRelations = relations -- sources
def keep[T](f: (Relations, T) => Set[_]): T => Boolean = file => !f(newRelations, file).isEmpty
val newAPIs = apis.removeInternal(sources).filterExt( keep(_ usesExternal _) )
val newStamps = stamps.filter( keep(_ produced _), sources, keep(_ usesBinary _))
new MAnalysis(newStamps, newAPIs, newRelations)
}
def copy(stamps: Stamps, apis: APIs, relations: Relations): Analysis = new MAnalysis(stamps, apis, relations)
def addSource(src: File, api: Source, stamp: Stamp, internalDeps: Iterable[File]): Analysis =
copy( stamps.markInternalSource(src, stamp), apis.markInternalSource(src, api), relations.addInternalSrcDeps(src, internalDeps) )
def addBinaryDep(src: File, dep: File, className: String, stamp: Stamp): Analysis =
copy( stamps.markBinary(dep, className, stamp), apis, relations.addBinaryDep(src, dep) )
def addExternalDep(src: File, dep: String, depAPI: Source): Analysis =
copy( stamps, apis.markExternalAPI(dep, depAPI), relations.addExternalDep(src, dep) )
def addProduct(src: File, product: File, stamp: Stamp, name: String): Analysis =
copy( stamps.markProduct(product, stamp), apis, relations.addProduct(src, product, name) )
}
|
kuochaoyi/xsbt
|
compile/inc/Analysis.scala
|
Scala
|
bsd-3-clause
| 2,954 |
/*
* Copyright 2014–2017 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.sql
trait Arbitraries extends
ExprArbitrary with
ScopedExprArbitrary with
StatementArbitrary with
CINameArbitrary
object Arbitraries extends Arbitraries
|
drostron/quasar
|
frontend/src/test/scala/quasar/sql/Arbitraries.scala
|
Scala
|
apache-2.0
| 783 |
// mutable
abstract class AbstractVector[A] {
var as = new Array[Any](10)
var size = 0
var copyCount = 0
def add(a: A):Unit = {
if (size == as.length) {
resize()
}
as(size) = a
size = size + 1
copyCount = copyCount + 1
}
def apply(i: Int): A = {
if (size <= i) throw new ArrayIndexOutOfBoundsException
as(i).asInstanceOf[A]
}
def first: A = {
as(0).asInstanceOf[A]
}
def last: A = {
as(size - 1).asInstanceOf[A]
}
override def toString: String = {
"[" + as.filter(_ != null).mkString(",") + "]"
}
protected def resize(): Unit = {
val bs = as
as = newArray(as)
size = 0
for (x <- bs) add(x.asInstanceOf[A])
}
protected def newArray(as: Array[Any]): Array[Any]
}
class BetterVector[A] extends AbstractVector[A] {
override protected def newArray(as: Array[Any]): Array[Any] = {
new Array[Any](as.length * 2)
}
}
class BadVector[A] extends AbstractVector[A] {
override protected def newArray(as: Array[Any]): Array[Any] = {
new Array[Any](as.length + 10)
}
}
|
bati11/study-algorithm
|
catalog/vector/vector.scala
|
Scala
|
mit
| 1,075 |
package scala.meta
package internal
package tokenizers
import scala.util.Try
import LegacyToken._
import Chars._
import scala.meta.syntactic._
private[meta] trait LegacyTokenData {
/** the input that is currently being tokenized */
var content: Content = null
/** the next token */
var token: LegacyToken = EMPTY
/** the offset of the first character of the current token */
var offset: Offset = 0
/** the offset of the character following the token preceding this one */
var lastOffset: Offset = 0
/** the offset of the last character of the current token */
var endOffset: Offset = 0
/** the name of an identifier */
var name: String = null
/** the string value of a literal */
var strVal: String = null
/** the base of a number */
var base: Int = 0
def copyFrom(td: LegacyTokenData): this.type = {
this.content = td.content
this.token = td.token
this.offset = td.offset
this.lastOffset = td.lastOffset
this.endOffset = td.endOffset
this.name = td.name
this.strVal = td.strVal
this.base = td.base
this
}
override def toString = s"{token = $token, position = $offset..$endOffset, lastOffset = $lastOffset, name = $name, strVal = $strVal, base = $base}"
lazy val reporter: Reporter = Reporter(content)
import reporter._
/** Convert current strVal to char value
*/
def charVal: Char = if (strVal.length > 0) strVal.charAt(0) else 0
/** Convert current strVal, base to an integer value
* This is tricky because of max negative value.
*/
private def integerVal: BigInt = {
var input = strVal
if (input.startsWith("0x") || input.startsWith("0X")) input = input.substring(2)
if (input.endsWith("l") || input.endsWith("L")) input = input.substring(0, input.length - 1)
var value: BigInt = 0
val divider = if (base == 10) 1 else 2
var i = 0
val len = input.length
while (i < len) {
val d = digit2int(input charAt i, base)
if (d < 0) {
syntaxError("malformed integer number", at = offset)
}
value = value * base + d
i += 1
}
value
}
/** Convert current strVal, base to double value
*/
private def floatingVal: BigDecimal = {
def isDeprecatedForm = {
val idx = strVal indexOf '.'
(idx == strVal.length - 1) || (
(idx >= 0)
&& (idx + 1 < strVal.length)
&& (!Character.isDigit(strVal charAt (idx + 1)))
)
}
if (isDeprecatedForm) {
syntaxError("floating point number is missing digit after dot", at = offset)
} else {
val designatorSuffixes = List('d', 'D', 'f', 'F')
val parsee = if (strVal.nonEmpty && designatorSuffixes.contains(strVal.last)) strVal.dropRight(1) else strVal
try BigDecimal(parsee)
catch { case ex: Exception => syntaxError("malformed floating point number", at = offset) }
}
}
def intVal: BigInt = integerVal
def longVal: BigInt = integerVal
def floatVal: BigDecimal = floatingVal
def doubleVal: BigDecimal = floatingVal
}
|
mdemarne/scalameta
|
tokens/src/main/scala/scala/meta/internal/tokenizers/LegacyTokenData.scala
|
Scala
|
bsd-3-clause
| 3,034 |
package com.aergonaut.lifeaquatic.item
import cofh.lib.util.helpers.ItemHelper
import com.aergonaut.lib.core.TInitializer
import com.aergonaut.lifeaquatic.constants.Names
import com.aergonaut.lifeaquatic.item.armor.{ItemArmorBase, LinenChest, LinenHelmet, SwimTrunkFaceMask, SwimTrunkFins, SwimTrunkShorts}
import com.aergonaut.lifeaquatic.item.manual.ItemAlmanac
import com.aergonaut.lifeaquatic.item.material._
import com.aergonaut.lifeaquatic.item.metal._
import com.aergonaut.lifeaquatic.item.tool.{LavaPearl, Lens, Wrench}
import cpw.mods.fml.common.registry.GameRegistry
import net.minecraft.init.{Items, Blocks}
import net.minecraft.item.ItemStack
import net.minecraftforge.oredict.OreDictionary
object ModItems extends TInitializer {
final val Pearl: ItemBase = new Pearl
final val SwimTrunkFaceMask: ItemArmorBase = new SwimTrunkFaceMask
final val SwimTrunkFins: ItemArmorBase = new SwimTrunkFins
final val SwimTrunkShorts: ItemArmorBase = new SwimTrunkShorts
final val LavaPearl: ItemBase = new LavaPearl
final val LinenHelmet: ItemArmorBase = new LinenHelmet
final val LinenChest: ItemArmorBase = new LinenChest
final val ToolManual: ItemBase = new ItemAlmanac
final val Wrench: ItemBase = new Wrench
final val Lens: ItemBase = new Lens
final val IronCasing: ItemBase = new IronCasing
final val BronzeCasing: ItemBase = new BronzeCasing
final val BrassCasing: ItemBase = new BrassCasing
final val Ingot: ItemBase = new ItemIngot
final val Calx: ItemBase = new ItemCalx
// Convenience constants for referencing the ingots
final val CopperIngot: ItemStack = ItemHelper.stack(Ingot, 1, 0)
final val TinIngot: ItemStack = ItemHelper.stack(Ingot, 1, 1)
final val NickelIngot: ItemStack = ItemHelper.stack(Ingot, 1, 2)
final val BronzeIngot: ItemStack = ItemHelper.stack(Ingot, 1, 3)
final val BrassIngot: ItemStack = ItemHelper.stack(Ingot, 1, 4)
// Convenience constants for referencing the dusts
final val IronAsh: ItemStack = ItemHelper.stack(Calx, 1, 0)
final val GoldAsh: ItemStack = ItemHelper.stack(Calx, 1, 1)
final val CopperAsh: ItemStack = ItemHelper.stack(Calx, 1, 2)
final val TinAsh: ItemStack = ItemHelper.stack(Calx, 1, 3)
final val NickelAsh: ItemStack = ItemHelper.stack(Calx, 1, 4)
override def preInit(): Boolean = {
GameRegistry.registerItem(ToolManual, Names.Items.Tool.Almanac)
GameRegistry.registerItem(Wrench, Names.Items.Tool.Wrench)
GameRegistry.registerItem(Lens, Names.Items.Tool.Lens)
GameRegistry.registerItem(Pearl, Names.Items.Material.Pearl)
GameRegistry.registerItem(SwimTrunkFaceMask, Names.Items.Armor.SwimTrunkFaceMask)
GameRegistry.registerItem(SwimTrunkFins, Names.Items.Armor.SwimTrunkFins)
GameRegistry.registerItem(SwimTrunkShorts, Names.Items.Armor.SwimTrunkShorts)
GameRegistry.registerItem(LavaPearl, Names.Items.Tool.LavaPearl)
GameRegistry.registerItem(LinenHelmet, Names.Items.Armor.LinenHelmet)
GameRegistry.registerItem(LinenChest, Names.Items.Armor.LinenChest)
GameRegistry.registerItem(IronCasing, Names.Items.IronCasing)
GameRegistry.registerItem(BronzeCasing, Names.Items.BronzeCasing)
GameRegistry.registerItem(BrassCasing, Names.Items.BrassCasing)
GameRegistry.registerItem(Ingot, Names.Items.Ingot)
GameRegistry.registerItem(Calx, Names.Items.Calx)
OreDictionary.registerOre("ingotCopper", ItemHelper.cloneStack(CopperIngot))
OreDictionary.registerOre("ingotTin", ItemHelper.cloneStack(TinIngot))
OreDictionary.registerOre("ingotNickel", ItemHelper.cloneStack(NickelIngot))
OreDictionary.registerOre("ingotBronze", ItemHelper.cloneStack(BronzeIngot))
OreDictionary.registerOre("ingotBrass", ItemHelper.cloneStack(BrassIngot))
OreDictionary.registerOre("dustIron", ItemHelper.cloneStack(IronAsh))
OreDictionary.registerOre("dustGold", ItemHelper.cloneStack(GoldAsh))
OreDictionary.registerOre("dustCopper", ItemHelper.cloneStack(CopperAsh))
OreDictionary.registerOre("dustTin", ItemHelper.cloneStack(TinAsh))
OreDictionary.registerOre("dustNickel", ItemHelper.cloneStack(NickelAsh))
true
}
override def initialize(): Boolean = {
ItemHelper.addSmelting(ItemHelper.getOre("ingotIron"), ItemHelper.cloneStack(IronAsh), 0.3F)
ItemHelper.addSmelting(ItemHelper.getOre("ingotGold"), ItemHelper.cloneStack(GoldAsh), 0.3F)
ItemHelper.addSmelting(ItemHelper.cloneStack(CopperIngot), ItemHelper.cloneStack(CopperAsh), 0.3F)
ItemHelper.addSmelting(ItemHelper.cloneStack(TinIngot), ItemHelper.cloneStack(TinAsh), 0.3F)
ItemHelper.addSmelting(ItemHelper.cloneStack(NickelIngot), ItemHelper.cloneStack(NickelAsh), 0.3F)
true
}
}
|
aergonaut/LifeAquatic
|
src/main/scala/com/aergonaut/lifeaquatic/item/ModItems.scala
|
Scala
|
mit
| 4,692 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.util
import scala.collection.mutable.ArrayBuffer
import org.apache.spark._
import org.apache.spark.sql.{functions, AnalysisException, QueryTest}
import org.apache.spark.sql.catalyst.analysis.UnresolvedRelation
import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, InsertIntoStatement, LogicalPlan, Project}
import org.apache.spark.sql.execution.{QueryExecution, WholeStageCodegenExec}
import org.apache.spark.sql.execution.datasources.{CreateTable, InsertIntoHadoopFsRelationCommand}
import org.apache.spark.sql.execution.datasources.json.JsonFileFormat
import org.apache.spark.sql.test.SharedSparkSession
class DataFrameCallbackSuite extends QueryTest with SharedSparkSession {
import testImplicits._
import functions._
test("execute callback functions when a DataFrame action finished successfully") {
val metrics = ArrayBuffer.empty[(String, QueryExecution, Long)]
val listener = new QueryExecutionListener {
// Only test successful case here, so no need to implement `onFailure`
override def onFailure(funcName: String, qe: QueryExecution, error: Throwable): Unit = {}
override def onSuccess(funcName: String, qe: QueryExecution, duration: Long): Unit = {
metrics += ((funcName, qe, duration))
}
}
spark.listenerManager.register(listener)
val df = Seq(1 -> "a").toDF("i", "j")
df.select("i").collect()
df.filter($"i" > 0).count()
sparkContext.listenerBus.waitUntilEmpty()
assert(metrics.length == 2)
assert(metrics(0)._1 == "collect")
assert(metrics(0)._2.analyzed.isInstanceOf[Project])
assert(metrics(0)._3 > 0)
assert(metrics(1)._1 == "count")
assert(metrics(1)._2.analyzed.isInstanceOf[Aggregate])
assert(metrics(1)._3 > 0)
spark.listenerManager.unregister(listener)
}
testQuietly("execute callback functions when a DataFrame action failed") {
val metrics = ArrayBuffer.empty[(String, QueryExecution, Throwable)]
val listener = new QueryExecutionListener {
override def onFailure(funcName: String, qe: QueryExecution, error: Throwable): Unit = {
metrics += ((funcName, qe, error))
}
// Only test failed case here, so no need to implement `onSuccess`
override def onSuccess(funcName: String, qe: QueryExecution, duration: Long): Unit = {}
}
spark.listenerManager.register(listener)
val errorUdf = udf[Int, Int] { _ => throw new RuntimeException("udf error") }
val df = sparkContext.makeRDD(Seq(1 -> "a")).toDF("i", "j")
val e = intercept[SparkException](df.select(errorUdf($"i")).collect())
sparkContext.listenerBus.waitUntilEmpty()
assert(metrics.length == 1)
assert(metrics(0)._1 == "collect")
assert(metrics(0)._2.analyzed.isInstanceOf[Project])
assert(metrics(0)._3.getMessage == e.getMessage)
spark.listenerManager.unregister(listener)
}
test("get numRows metrics by callback") {
val metrics = ArrayBuffer.empty[Long]
val listener = new QueryExecutionListener {
// Only test successful case here, so no need to implement `onFailure`
override def onFailure(funcName: String, qe: QueryExecution, error: Throwable): Unit = {}
override def onSuccess(funcName: String, qe: QueryExecution, duration: Long): Unit = {
val metric = qe.executedPlan match {
case w: WholeStageCodegenExec => w.child.longMetric("numOutputRows")
case other => other.longMetric("numOutputRows")
}
metrics += metric.value
}
}
spark.listenerManager.register(listener)
val df = Seq(1 -> "a").toDF("i", "j").groupBy("i").count()
df.collect()
// Wait for the first `collect` to be caught by our listener. Otherwise the next `collect` will
// reset the plan metrics.
sparkContext.listenerBus.waitUntilEmpty()
df.collect()
Seq(1 -> "a", 2 -> "a").toDF("i", "j").groupBy("i").count().collect()
sparkContext.listenerBus.waitUntilEmpty()
assert(metrics.length == 3)
assert(metrics(0) === 1)
assert(metrics(1) === 1)
assert(metrics(2) === 2)
spark.listenerManager.unregister(listener)
}
// TODO: Currently some LongSQLMetric use -1 as initial value, so if the accumulator is never
// updated, we can filter it out later. However, when we aggregate(sum) accumulator values at
// driver side for SQL physical operators, these -1 values will make our result smaller.
// A easy fix is to create a new SQLMetric(including new MetricValue, MetricParam, etc.), but we
// can do it later because the impact is just too small (1048576 tasks for 1 MB).
ignore("get size metrics by callback") {
val metrics = ArrayBuffer.empty[Long]
val listener = new QueryExecutionListener {
// Only test successful case here, so no need to implement `onFailure`
override def onFailure(funcName: String, qe: QueryExecution, error: Throwable): Unit = {}
override def onSuccess(funcName: String, qe: QueryExecution, duration: Long): Unit = {
metrics += qe.executedPlan.longMetric("dataSize").value
val bottomAgg = qe.executedPlan.children(0).children(0)
metrics += bottomAgg.longMetric("dataSize").value
}
}
spark.listenerManager.register(listener)
val sparkListener = new SaveInfoListener
spark.sparkContext.addSparkListener(sparkListener)
val df = (1 to 100).map(i => i -> i.toString).toDF("i", "j")
df.groupBy("i").count().collect()
def getPeakExecutionMemory(stageId: Int): Long = {
val peakMemoryAccumulator = sparkListener.getCompletedStageInfos(stageId).accumulables
.filter(_._2.name == Some(InternalAccumulator.PEAK_EXECUTION_MEMORY))
assert(peakMemoryAccumulator.size == 1)
peakMemoryAccumulator.head._2.value.get.asInstanceOf[Long]
}
assert(sparkListener.getCompletedStageInfos.length == 2)
val bottomAggDataSize = getPeakExecutionMemory(0)
val topAggDataSize = getPeakExecutionMemory(1)
// For this simple case, the peakExecutionMemory of a stage should be the data size of the
// aggregate operator, as we only have one memory consuming operator per stage.
sparkContext.listenerBus.waitUntilEmpty()
assert(metrics.length == 2)
assert(metrics(0) == topAggDataSize)
assert(metrics(1) == bottomAggDataSize)
spark.listenerManager.unregister(listener)
}
test("execute callback functions for DataFrameWriter") {
val commands = ArrayBuffer.empty[(String, LogicalPlan)]
val errors = ArrayBuffer.empty[(String, Throwable)]
val listener = new QueryExecutionListener {
override def onFailure(funcName: String, qe: QueryExecution, error: Throwable): Unit = {
errors += funcName -> error
}
override def onSuccess(funcName: String, qe: QueryExecution, duration: Long): Unit = {
commands += funcName -> qe.logical
}
}
spark.listenerManager.register(listener)
withTempPath { path =>
spark.range(10).write.format("json").save(path.getCanonicalPath)
sparkContext.listenerBus.waitUntilEmpty()
assert(commands.length == 1)
assert(commands.head._1 == "save")
assert(commands.head._2.isInstanceOf[InsertIntoHadoopFsRelationCommand])
assert(commands.head._2.asInstanceOf[InsertIntoHadoopFsRelationCommand]
.fileFormat.isInstanceOf[JsonFileFormat])
}
withTable("tab") {
sql("CREATE TABLE tab(i long) using parquet") // adds commands(1) via onSuccess
spark.range(10).write.insertInto("tab")
sparkContext.listenerBus.waitUntilEmpty()
assert(commands.length == 3)
assert(commands(2)._1 == "insertInto")
assert(commands(2)._2.isInstanceOf[InsertIntoStatement])
assert(commands(2)._2.asInstanceOf[InsertIntoStatement].table
.asInstanceOf[UnresolvedRelation].multipartIdentifier == Seq("tab"))
}
// exiting withTable adds commands(3) via onSuccess (drops tab)
withTable("tab") {
spark.range(10).select($"id", $"id" % 5 as "p").write.partitionBy("p").saveAsTable("tab")
sparkContext.listenerBus.waitUntilEmpty()
assert(commands.length == 5)
assert(commands(4)._1 == "saveAsTable")
assert(commands(4)._2.isInstanceOf[CreateTable])
assert(commands(4)._2.asInstanceOf[CreateTable].tableDesc.partitionColumnNames == Seq("p"))
}
withTable("tab") {
sql("CREATE TABLE tab(i long) using parquet")
val e = intercept[AnalysisException] {
spark.range(10).select($"id", $"id").write.insertInto("tab")
}
sparkContext.listenerBus.waitUntilEmpty()
assert(errors.length == 1)
assert(errors.head._1 == "insertInto")
assert(errors.head._2 == e)
}
}
}
|
caneGuy/spark
|
sql/core/src/test/scala/org/apache/spark/sql/util/DataFrameCallbackSuite.scala
|
Scala
|
apache-2.0
| 9,516 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalactic.{Equality, Every, One, Many, Entry}
import org.scalactic.StringNormalizations._
import SharedHelpers._
import FailureMessages.decorateToStringValue
import Matchers._
import exceptions.TestFailedException
class EveryShouldContainInOrderOnlyLogicalOrSpec extends Spec {
//ADDITIONAL//
val invertedStringEquality =
new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
val invertedListOfStringEquality =
new Equality[Every[String]] {
def areEqual(a: Every[String], b: Any): Boolean = a != b
}
private def upperCase(value: Any): Any =
value match {
case l: Every[_] => l.map(upperCase(_))
case s: String => s.toUpperCase
case c: Char => c.toString.toUpperCase.charAt(0)
case (s1: String, s2: String) => (s1.toUpperCase, s2.toUpperCase)
case e: java.util.Map.Entry[_, _] =>
(e.getKey, e.getValue) match {
case (k: String, v: String) => Entry(k.toUpperCase, v.toUpperCase)
case _ => value
}
case _ => value
}
val upperCaseStringEquality =
new Equality[String] {
def areEqual(a: String, b: Any): Boolean = upperCase(a) == upperCase(b)
}
val fileName: String = "EveryShouldContainInOrderOnlyLogicalOrSpec.scala"
object `an Every` {
val fumList: Every[String] = Every("fum", "fum", "foe", "fie", "fee", "fee", "fee")
val toList: Every[String] = Every("you", "to", "to", "birthday", "birthday", "happy")
object `when used with (contain inOrderOnly xx or contain inOrderOnly xx)` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (contain inOrderOnly ("fum", "foe", "fie", "fee") or contain inOrderOnly ("fum", "foe", "fie", "fee"))
fumList should (contain inOrderOnly ("fee", "fie", "foe", "fum") or contain inOrderOnly ("fum", "foe", "fie", "fee"))
fumList should (contain inOrderOnly ("fum", "foe", "fie", "fee") or contain inOrderOnly ("fee", "fie", "foe", "fum"))
val e1 = intercept[TestFailedException] {
fumList should (contain inOrderOnly ("fee", "fie", "foe", "fum") or contain inOrderOnly ("happy", "birthday", "to", "you"))
}
checkMessageStackDepth(e1, Resources.didNotContainInOrderOnlyElements(decorateToStringValue(fumList), "\\"fee\\", \\"fie\\", \\"foe\\", \\"fum\\"") + ", and " + Resources.didNotContainInOrderOnlyElements(decorateToStringValue(fumList), "\\"happy\\", \\"birthday\\", \\"to\\", \\"you\\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (contain inOrderOnly ("FUM", "FOE", "FIE", "FEE") or contain inOrderOnly ("FUM", "FOE", "FIE", "FEE"))
fumList should (contain inOrderOnly ("FEE", "FIE", "FOE", "FUM") or contain inOrderOnly ("FUM", "FOE", "FIE", "FEE"))
fumList should (contain inOrderOnly ("FUM", "FOE", "FIE", "FEE") or contain inOrderOnly ("FEE", "FIE", "FOE", "FUM"))
val e1 = intercept[TestFailedException] {
fumList should (contain inOrderOnly ("FEE", "FIE", "FOE", "FUM") or (contain inOrderOnly ("FIE", "FEE", "FAM", "FOE")))
}
checkMessageStackDepth(e1, Resources.didNotContainInOrderOnlyElements(decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FUM\\"") + ", and " + Resources.didNotContainInOrderOnlyElements(decorateToStringValue(fumList), "\\"FIE\\", \\"FEE\\", \\"FAM\\", \\"FOE\\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (contain inOrderOnly ("FUM", "FOE", "FIE", "FEE") or contain inOrderOnly ("FUM", "FOE", "FIE", "FEE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(fumList should (contain inOrderOnly ("FEE", "FIE", "FOE", "FUM") or contain inOrderOnly ("FUM", "FOE", "FIE", "FEE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(fumList should (contain inOrderOnly ("FUM", "FOE", "FIE", "FEE") or contain inOrderOnly ("FEE", "FIE", "FOE", "FUM"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (contain inOrderOnly ("FEE", "FIE", "FOE", "FUM") or contain inOrderOnly ("FIE", "FEE", "FAM", "FOE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources.didNotContainInOrderOnlyElements(decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FUM\\"") + ", and " + Resources.didNotContainInOrderOnlyElements(decorateToStringValue(fumList), "\\"FIE\\", \\"FEE\\", \\"FAM\\", \\"FOE\\""), fileName, thisLineNumber - 2)
(fumList should (contain inOrderOnly (" FUM ", " FOE ", " FIE ", " FEE ") or contain inOrderOnly (" FUM ", " FOE ", " FIE ", " FEE "))) (after being lowerCased and trimmed, after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (contain inOrderOnly ("fee", "fie", "foe", "fie", "fum") or contain inOrderOnly ("fum", "foe", "fie", "fee"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.inOrderOnlyDuplicate))
val e2 = intercept[exceptions.NotAllowedException] {
fumList should (contain inOrderOnly ("fum", "foe", "fie", "fee") or contain inOrderOnly ("fee", "fie", "foe", "fie", "fum"))
}
e2.failedCodeFileName.get should be (fileName)
e2.failedCodeLineNumber.get should be (thisLineNumber - 3)
e2.message should be (Some(Resources.inOrderOnlyDuplicate))
}
}
object `when used with (equal xx and contain inOrderOnly xx)` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (equal (fumList) or contain inOrderOnly ("fum", "foe", "fie", "fee"))
fumList should (equal (toList) or contain inOrderOnly ("fum", "foe", "fie", "fee"))
fumList should (equal (fumList) or contain inOrderOnly ("fee", "fie", "foe", "fum"))
val e1 = intercept[TestFailedException] {
fumList should (equal (toList) or contain inOrderOnly ("fee", "fie", "foe", "fum"))
}
checkMessageStackDepth(e1, Resources.didNotEqual(decorateToStringValue(fumList), decorateToStringValue(toList)) + ", and " + Resources.didNotContainInOrderOnlyElements(decorateToStringValue(fumList), "\\"fee\\", \\"fie\\", \\"foe\\", \\"fum\\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (equal (fumList) or contain inOrderOnly ("FUM", "FOE", "FIE", "FEE"))
fumList should (equal (toList) or contain inOrderOnly ("FUM", "FOE", "FIE", "FEE"))
fumList should (equal (fumList) or contain inOrderOnly ("FEE", "FIE", "FOE", "FUM"))
val e1 = intercept[TestFailedException] {
fumList should (equal (toList) or (contain inOrderOnly ("FEE", "FIE", "FOE", "FUM")))
}
checkMessageStackDepth(e1, Resources.didNotEqual(decorateToStringValue(fumList), decorateToStringValue(toList)) + ", and " + Resources.didNotContainInOrderOnlyElements(decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FUM\\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (equal (toList) or contain inOrderOnly ("FUM", "FOE", "FIE", "FEE"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
(fumList should (equal (fumList) or contain inOrderOnly ("FUM", "FOE", "FIE", "FEE"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
(fumList should (equal (toList) or contain inOrderOnly ("FEE", "FIE", "FOE", "FUM"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (equal (fumList) or contain inOrderOnly ("FEE", "FIE", "FOE", "FUM"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources.didNotEqual(decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", and " + Resources.didNotContainInOrderOnlyElements(decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FUM\\""), fileName, thisLineNumber - 2)
(fumList should (equal (toList) or contain inOrderOnly (" FEE ", " FIE ", " FOE ", " FUM "))) (decided by invertedListOfStringEquality, after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (equal (fumList) or contain inOrderOnly ("fee", "fie", "foe", "fie", "fum"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.inOrderOnlyDuplicate))
}
}
object `when used with (be xx and contain inOrderOnly xx)` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (be_== (fumList) or contain inOrderOnly ("fum", "foe", "fie", "fee"))
fumList should (be_== (toList) or contain inOrderOnly ("fum", "foe", "fie", "fee"))
fumList should (be_== (fumList) or contain inOrderOnly ("fee", "fie", "foe", "fum"))
val e1 = intercept[TestFailedException] {
fumList should (be_== (toList) or contain inOrderOnly ("fee", "fie", "foe", "fum"))
}
checkMessageStackDepth(e1, Resources.wasNotEqualTo(decorateToStringValue(fumList), decorateToStringValue(toList)) + ", and " + Resources.didNotContainInOrderOnlyElements(decorateToStringValue(fumList), "\\"fee\\", \\"fie\\", \\"foe\\", \\"fum\\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (be_== (fumList) or contain inOrderOnly ("FUM", "FOE", "FIE", "FEE"))
fumList should (be_== (toList) or contain inOrderOnly ("FUM", "FOE", "FIE", "FEE"))
fumList should (be_== (fumList) or contain inOrderOnly ("FEE", "FIE", "FOE", "FUM"))
val e1 = intercept[TestFailedException] {
fumList should (be_== (toList) or (contain inOrderOnly ("FEE", "FIE", "FOE", "FUM")))
}
checkMessageStackDepth(e1, Resources.wasNotEqualTo(decorateToStringValue(fumList), decorateToStringValue(toList)) + ", and " + Resources.didNotContainInOrderOnlyElements(decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FUM\\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (be_== (fumList) or contain inOrderOnly ("FUM", "FOE", "FIE", "FEE"))) (decided by upperCaseStringEquality)
(fumList should (be_== (toList) or contain inOrderOnly ("FUM", "FOE", "FIE", "FEE"))) (decided by upperCaseStringEquality)
(fumList should (be_== (fumList) or contain inOrderOnly ("FEE", "FIE", "FOE", "FUM"))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (be_== (toList) or contain inOrderOnly ("FEE", "FIE", "FOE", "FUM"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources.wasNotEqualTo(decorateToStringValue(fumList), decorateToStringValue(toList)) + ", and " + Resources.didNotContainInOrderOnlyElements(decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FUM\\""), fileName, thisLineNumber - 2)
(fumList should (be_== (fumList) or contain inOrderOnly (" FUM ", " FOE ", " FIE ", " FEE "))) (after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (be_== (fumList) or contain inOrderOnly ("fee", "fie", "foe", "fie", "fum"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.inOrderOnlyDuplicate))
}
}
object `when used with (contain inOrderOnly xx and be xx)` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (contain inOrderOnly ("fum", "foe", "fie", "fee") or be_== (fumList))
fumList should (contain inOrderOnly ("fee", "fie", "foe", "fum") or be_== (fumList))
fumList should (contain inOrderOnly ("fum", "foe", "fie", "fee") or be_== (toList))
val e1 = intercept[TestFailedException] {
fumList should (contain inOrderOnly ("fee", "fie", "foe", "fum") or be_== (toList))
}
checkMessageStackDepth(e1, Resources.didNotContainInOrderOnlyElements(decorateToStringValue(fumList), "\\"fee\\", \\"fie\\", \\"foe\\", \\"fum\\"") + ", and " + Resources.wasNotEqualTo(decorateToStringValue(fumList), decorateToStringValue(toList)), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (contain inOrderOnly ("FUM", "FOE", "FIE", "FEE") or be_== (fumList))
fumList should (contain inOrderOnly ("FEE", "FIE", "FOE", "FUM") or be_== (fumList))
fumList should (contain inOrderOnly ("FUM", "FOE", "FIE", "FEE") or be_== (toList))
val e1 = intercept[TestFailedException] {
fumList should (contain inOrderOnly ("FEE", "FIE", "FOE", "FUM") or be_== (toList))
}
checkMessageStackDepth(e1, Resources.didNotContainInOrderOnlyElements(decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FUM\\"") + ", and " + Resources.wasNotEqualTo(decorateToStringValue(fumList), decorateToStringValue(toList)), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (contain inOrderOnly ("FUM", "FOE", "FIE", "FEE") or be_== (fumList))) (decided by upperCaseStringEquality)
(fumList should (contain inOrderOnly ("FEE", "FIE", "FOE", "FUM") or be_== (fumList))) (decided by upperCaseStringEquality)
(fumList should (contain inOrderOnly ("FUM", "FOE", "FIE", "FEE") or be_== (toList))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (contain inOrderOnly ("FEE", "FIE", "FOE", "FUM") or be_== (toList))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources.didNotContainInOrderOnlyElements(decorateToStringValue(fumList), "\\"FEE\\", \\"FIE\\", \\"FOE\\", \\"FUM\\"") + ", and " + Resources.wasNotEqualTo(decorateToStringValue(fumList), decorateToStringValue(toList)), fileName, thisLineNumber - 2)
(fumList should (contain inOrderOnly (" FUM ", " FOE ", " FIE ", " FEE ") or be_== (fumList))) (after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (contain inOrderOnly ("fee", "fie", "foe", "fie", "fum") or be_== (fumList))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.inOrderOnlyDuplicate))
}
}
object `when used with (not contain inOrderOnly xx and not contain inOrderOnly xx)` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (not contain inOrderOnly ("fee", "fie", "foe", "fum") or not contain inOrderOnly ("fee", "fie", "foe", "fum"))
fumList should (not contain inOrderOnly ("fum", "foe", "fie", "fee") or not contain inOrderOnly ("fee", "fie", "foe", "fum"))
fumList should (not contain inOrderOnly ("fee", "fie", "foe", "fum") or not contain inOrderOnly ("fum", "foe", "fie", "fee"))
val e1 = intercept[TestFailedException] {
fumList should (not contain inOrderOnly ("fum", "foe", "fie", "fee") or not contain inOrderOnly ("fum", "foe", "fie", "fee"))
}
checkMessageStackDepth(e1, Resources.containedInOrderOnlyElements(decorateToStringValue(fumList), "\\"fum\\", \\"foe\\", \\"fie\\", \\"fee\\"") + ", and " + Resources.containedInOrderOnlyElements(decorateToStringValue(fumList), "\\"fum\\", \\"foe\\", \\"fie\\", \\"fee\\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (not contain inOrderOnly ("FEE", "FIE", "FOE", "FUM") or not contain inOrderOnly ("FEE", "FIE", "FOE", "FUM"))
fumList should (not contain inOrderOnly ("FUM", "FOE", "FIE", "FEE") or not contain inOrderOnly ("FEE", "FIE", "FOE", "FUM"))
fumList should (not contain inOrderOnly ("FEE", "FIE", "FOE", "FUM") or not contain inOrderOnly ("FUM", "FOE", "FIE", "FEE"))
val e1 = intercept[TestFailedException] {
fumList should (not contain inOrderOnly ("FUM", "FOE", "FIE", "FEE") or not contain inOrderOnly ("FUM", "FOE", "FIE", "FEE"))
}
checkMessageStackDepth(e1, Resources.containedInOrderOnlyElements(decorateToStringValue(fumList), "\\"FUM\\", \\"FOE\\", \\"FIE\\", \\"FEE\\"") + ", and " + Resources.containedInOrderOnlyElements(decorateToStringValue(fumList), "\\"FUM\\", \\"FOE\\", \\"FIE\\", \\"FEE\\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (not contain inOrderOnly ("FEE", "FIE", "FOE", "FUM") or not contain inOrderOnly ("FEE", "FIE", "FOE", "FUM"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(fumList should (not contain inOrderOnly ("FUM", "FOE", "FIE", "FEE") or not contain inOrderOnly ("FEE", "FIE", "FOE", "FUM"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(fumList should (not contain inOrderOnly ("FEE", "FIE", "FOE", "FUM") or not contain inOrderOnly ("FUM", "FOE", "FIE", "FEE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (not contain inOrderOnly ("FUM", "FOE", "FIE", "FEE") or not contain inOrderOnly ("FUM", "FOE", "FIE", "FEE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources.containedInOrderOnlyElements(decorateToStringValue(fumList), "\\"FUM\\", \\"FOE\\", \\"FIE\\", \\"FEE\\"") + ", and " + Resources.containedInOrderOnlyElements(decorateToStringValue(fumList), "\\"FUM\\", \\"FOE\\", \\"FIE\\", \\"FEE\\""), fileName, thisLineNumber - 2)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (not contain inOrderOnly ("fee", "fie", "foe", "fie", "fum") or not contain inOrderOnly ("fee", "fie", "foe", "fum"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.inOrderOnlyDuplicate))
val e2 = intercept[exceptions.NotAllowedException] {
fumList should (not contain inOrderOnly ("fee", "fie", "foe", "fum") or not contain inOrderOnly ("fee", "fie", "foe", "fie", "fum"))
}
e2.failedCodeFileName.get should be (fileName)
e2.failedCodeLineNumber.get should be (thisLineNumber - 3)
e2.message should be (Some(Resources.inOrderOnlyDuplicate))
}
}
object `when used with (not equal xx and not contain inOrderOnly xx)` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (not equal (toList) or not contain inOrderOnly ("fee", "fie", "foe", "fum"))
fumList should (not equal (fumList) or not contain inOrderOnly ("fee", "fie", "foe", "fum"))
fumList should (not equal (toList) or not contain inOrderOnly ("fum", "foe", "fie", "fee"))
val e1 = intercept[TestFailedException] {
fumList should (not equal (fumList) or not contain inOrderOnly ("fum", "foe", "fie", "fee"))
}
checkMessageStackDepth(e1, Resources.equaled(decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", and " + Resources.containedInOrderOnlyElements(decorateToStringValue(fumList), "\\"fum\\", \\"foe\\", \\"fie\\", \\"fee\\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (not equal (toList) or not contain inOrderOnly ("FEE", "FIE", "FOE", "FUM"))
fumList should (not equal (fumList) or not contain inOrderOnly ("FEE", "FIE", "FOE", "FUM"))
fumList should (not equal (toList) or not contain inOrderOnly ("FUM", "FOE", "FIE", "FEE"))
val e2 = intercept[TestFailedException] {
fumList should (not equal (fumList) or (not contain inOrderOnly ("FUM", "FOE", "FIE", "FEE")))
}
checkMessageStackDepth(e2, Resources.equaled(decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", and " + Resources.containedInOrderOnlyElements(decorateToStringValue(fumList), "\\"FUM\\", \\"FOE\\", \\"FIE\\", \\"FEE\\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (not equal (fumList) or not contain inOrderOnly ("FEE", "FIE", "FOE", "FUM"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
(fumList should (not equal (toList) or not contain inOrderOnly ("FEE", "FIE", "FOE", "FUM"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
(fumList should (not equal (fumList) or not contain inOrderOnly ("FUM", "FOE", "FIE", "FEE"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (not equal (toList) or not contain inOrderOnly ("FUM", "FOE", "FIE", "FEE"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources.equaled(decorateToStringValue(fumList), decorateToStringValue(toList)) + ", and " + Resources.containedInOrderOnlyElements(decorateToStringValue(fumList), "\\"FUM\\", \\"FOE\\", \\"FIE\\", \\"FEE\\""), fileName, thisLineNumber - 2)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (not equal (toList) or not contain inOrderOnly ("fee", "fie", "foe", "fie", "fum"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.inOrderOnlyDuplicate))
}
}
object `when used with (not be xx and not contain inOrderOnly xx)` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
fumList should (not be_== (toList) or not contain inOrderOnly ("fee", "fie", "foe", "fum"))
fumList should (not be_== (fumList) or not contain inOrderOnly ("fee", "fie", "foe", "fum"))
fumList should (not be_== (toList) or not contain inOrderOnly ("fum", "foe", "fie", "fee"))
val e1 = intercept[TestFailedException] {
fumList should (not be_== (fumList) or not contain inOrderOnly ("fum", "foe", "fie", "fee"))
}
checkMessageStackDepth(e1, Resources.wasEqualTo(decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", and " + Resources.containedInOrderOnlyElements(decorateToStringValue(fumList), "\\"fum\\", \\"foe\\", \\"fie\\", \\"fee\\""), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
fumList should (not be_== (toList) or not contain inOrderOnly ("FEE", "FIE", "FOE", "FUM"))
fumList should (not be_== (fumList) or not contain inOrderOnly ("FEE", "FIE", "FOE", "FUM"))
fumList should (not be_== (toList) or not contain inOrderOnly ("FUM", "FOE", "FIE", "FEE"))
val e1 = intercept[TestFailedException] {
fumList should (not be_== (fumList) or (not contain inOrderOnly ("FUM", "FOE", "FIE", "FEE")))
}
checkMessageStackDepth(e1, Resources.wasEqualTo(decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", and " + Resources.containedInOrderOnlyElements(decorateToStringValue(fumList), "\\"FUM\\", \\"FOE\\", \\"FIE\\", \\"FEE\\""), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(fumList should (not be_== (toList) or not contain inOrderOnly ("FEE", "FIE", "FOE", "FUM"))) (decided by upperCaseStringEquality)
(fumList should (not be_== (fumList) or not contain inOrderOnly ("FEE", "FIE", "FOE", "FUM"))) (decided by upperCaseStringEquality)
(fumList should (not be_== (toList) or not contain inOrderOnly ("FUM", "FOE", "FIE", "FEE"))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (not be_== (fumList) or not contain inOrderOnly ("FUM", "FOE", "FIE", "FEE"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, Resources.wasEqualTo(decorateToStringValue(fumList), decorateToStringValue(fumList)) + ", and " + Resources.containedInOrderOnlyElements(decorateToStringValue(fumList), "\\"FUM\\", \\"FOE\\", \\"FIE\\", \\"FEE\\""), fileName, thisLineNumber - 2)
(fumList should (not contain inOrderOnly (" FEE ", " FIE ", " FOE ", " FUU ") or not contain inOrderOnly (" FEE ", " FIE ", " FOE ", " FUU "))) (after being lowerCased and trimmed, after being lowerCased and trimmed)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
fumList should (not be_== (toList) or not contain inOrderOnly ("fee", "fie", "foe", "fie", "fum"))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.inOrderOnlyDuplicate))
}
}
}
object `every of Everys` {
val list1s: Every[Every[Int]] = Every(Every(1, 2, 2, 3), Every(1, 2, 2, 3), Every(1, 2, 2, 3))
val lists: Every[Every[Int]] = Every(Every(1, 2, 2, 3), Every(1, 1, 2, 3, 3), Every(2, 3, 4))
val hiLists: Every[Every[String]] = Every(Every("hi", "hello"), Every("hi", "hello"), Every("hi", "hello"))
val toLists: Every[Every[String]] = Every(Every("you", "to"), Every("you", "to"), Every("you", "to"))
def allErrMsg(index: Int, message: String, lineNumber: Int, left: Any): String =
"'all' inspection failed, because: \\n" +
" at index " + index + ", " + message + " (" + fileName + ":" + (lineNumber) + ") \\n" +
"in " + decorateToStringValue(left)
object `when used with (contain inOrderOnly xx and contain inOrderOnly xx)` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list1s) should (contain inOrderOnly (1, 2, 3) or contain inOrderOnly (1, 2, 3))
all (list1s) should (contain inOrderOnly (3, 2, 5) or contain inOrderOnly (1, 2, 3))
all (list1s) should (contain inOrderOnly (1, 2, 3) or contain inOrderOnly (2, 3, 4))
atLeast (2, lists) should (contain inOrderOnly (1, 2, 3) or contain inOrderOnly (1, 2, 3))
atLeast (2, lists) should (contain inOrderOnly (3, 6, 5) or contain inOrderOnly (1, 2, 3))
atLeast (2, lists) should (contain inOrderOnly (1, 2, 3) or contain inOrderOnly (8, 3, 4))
val e1 = intercept[TestFailedException] {
all (lists) should (contain inOrderOnly (1, 2, 3) or contain inOrderOnly (1, 2, 3))
}
checkMessageStackDepth(e1, allErrMsg(2, decorateToStringValue(Many(2, 3, 4)) + " did not contain only " + "(1, 2, 3)" + " in order" + ", and " + decorateToStringValue(Many(2, 3, 4)) + " did not contain only " + "(1, 2, 3)" + " in order", thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
all (hiLists) should (contain inOrderOnly ("HI", "HELLO") or contain inOrderOnly ("hi", "hello"))
all (hiLists) should (contain inOrderOnly ("HELLO", "HO") or contain inOrderOnly ("hi", "hello"))
all (hiLists) should (contain inOrderOnly ("HI", "HELLO") or contain inOrderOnly ("hello", "ho"))
val e1 = intercept[TestFailedException] {
all (hiLists) should (contain inOrderOnly ("HELLO", "HO") or contain inOrderOnly ("hello", "ho"))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " did not contain only " + "(\\"HELLO\\", \\"HO\\")" + " in order" + ", and " + decorateToStringValue(Many("hi", "hello")) + " did not contain only " + "(\\"hello\\", \\"ho\\")" + " in order", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(all (hiLists) should (contain inOrderOnly ("HI", "HELLO") or contain inOrderOnly ("hi", "hello"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(all (hiLists) should (contain inOrderOnly ("HELLO", "HO") or contain inOrderOnly ("hi", "hello"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(all (hiLists) should (contain inOrderOnly ("HI", "HELLO") or contain inOrderOnly ("hello", "ho"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (contain inOrderOnly ("HELLO", "HO") or contain inOrderOnly ("hello", "ho"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " did not contain only " + "(\\"HELLO\\", \\"HO\\")" + " in order" + ", and " + decorateToStringValue(Many("hi", "hello")) + " did not contain only " + "(\\"hello\\", \\"ho\\")" + " in order", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
all (list1s) should (contain inOrderOnly (1, 2, 2, 3) or contain inOrderOnly (1, 2, 3))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.inOrderOnlyDuplicate))
val e2 = intercept[exceptions.NotAllowedException] {
all (list1s) should (contain inOrderOnly (1, 2, 3) or contain inOrderOnly (1, 2, 2, 3))
}
e2.failedCodeFileName.get should be (fileName)
e2.failedCodeLineNumber.get should be (thisLineNumber - 3)
e2.message should be (Some(Resources.inOrderOnlyDuplicate))
}
}
object `when used with (be xx and contain inOrderOnly xx)` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list1s) should (be_== (Many(1, 2, 2, 3)) or contain inOrderOnly (1, 2, 3))
all (list1s) should (be_== (Many(2, 3, 4)) or contain inOrderOnly (1, 2, 3))
all (list1s) should (be_== (Many(1, 2, 2, 3)) or contain inOrderOnly (2, 3, 4))
val e1 = intercept[TestFailedException] {
all (list1s) should (be_== (Many(2, 3, 4)) or contain inOrderOnly (2, 3, 4))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many(1, 2, 2, 3)) + " was not equal to " + decorateToStringValue(Many(2, 3, 4)) + ", and " + decorateToStringValue(Many(1, 2, 2, 3)) + " did not contain only " + "(2, 3, 4)" + " in order", thisLineNumber - 2, list1s), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
all (hiLists) should (be_== (Many("hi", "hello")) or contain inOrderOnly ("HI", "HELLO"))
all (hiLists) should (be_== (Many("ho", "hello")) or contain inOrderOnly ("HI", "HELLO"))
all (hiLists) should (be_== (Many("hi", "hello")) or contain inOrderOnly ("HELLO", "HI"))
val e1 = intercept[TestFailedException] {
all (hiLists) should (be_== (Many("ho", "hello")) or contain inOrderOnly ("HELLO", "HI"))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " was not equal to " + decorateToStringValue(Many("ho", "hello")) + ", and " + decorateToStringValue(Many("hi", "hello")) + " did not contain only " + "(\\"HELLO\\", \\"HI\\")" + " in order", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(all (hiLists) should (be_== (Many("hi", "hello")) or contain inOrderOnly ("HI", "HELLO"))) (decided by upperCaseStringEquality)
(all (hiLists) should (be_== (Many("ho", "hello")) or contain inOrderOnly ("HI", "HELLO"))) (decided by upperCaseStringEquality)
(all (hiLists) should (be_== (Many("hi", "hello")) or contain inOrderOnly ("HELLO", "HI"))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (be_== (Many("ho", "hello")) or contain inOrderOnly ("HELLO", "HI"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " was not equal to " + decorateToStringValue(Many("ho", "hello")) + ", and " + decorateToStringValue(Many("hi", "hello")) + " did not contain only " + "(\\"HELLO\\", \\"HI\\")" + " in order", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
all (list1s) should (be_== (Many(1, 2, 2, 3)) or contain inOrderOnly (1, 2, 2, 3))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.inOrderOnlyDuplicate))
}
}
object `when used with (not contain inOrderOnly xx and not contain inOrderOnly xx)` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list1s) should (not contain inOrderOnly (3, 2, 8) or not contain inOrderOnly (8, 3, 4))
all (list1s) should (not contain inOrderOnly (1, 2, 3) or not contain inOrderOnly (8, 3, 4))
all (list1s) should (not contain inOrderOnly (3, 2, 8) or not contain inOrderOnly (1, 2, 3))
val e1 = intercept[TestFailedException] {
all (lists) should (not contain inOrderOnly (2, 3, 4) or not contain inOrderOnly (2, 3, 4))
}
checkMessageStackDepth(e1, allErrMsg(2, decorateToStringValue(Many(2, 3, 4)) + " contained only " + "(2, 3, 4)" + " in order" + ", and " + decorateToStringValue(Many(2, 3, 4)) + " contained only " + "(2, 3, 4)" + " in order", thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
all (hiLists) should (not contain inOrderOnly ("HELLO", "HI") or not contain inOrderOnly ("hello", "hi"))
all (hiLists) should (not contain inOrderOnly ("HI", "HELLO") or not contain inOrderOnly ("hello", "hi"))
all (hiLists) should (not contain inOrderOnly ("HELLO", "HI") or not contain inOrderOnly ("hi", "hello"))
val e1 = intercept[TestFailedException] {
all (hiLists) should (not contain inOrderOnly ("HI", "HELLO") or not contain inOrderOnly ("hi", "hello"))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " contained only " + "(\\"HI\\", \\"HELLO\\")" + " in order" + ", and " + decorateToStringValue(Many("hi", "hello")) + " contained only " + "(\\"hi\\", \\"hello\\")" + " in order", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(all (hiLists) should (not contain inOrderOnly ("HELLO", "HI") or not contain inOrderOnly ("hello", "hi"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(all (hiLists) should (not contain inOrderOnly ("HI", "HELLO") or not contain inOrderOnly ("hello", "hi"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(all (hiLists) should (not contain inOrderOnly ("HELLO", "HI") or not contain inOrderOnly ("hi", "hello"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (not contain inOrderOnly ("HI", "HELLO") or not contain inOrderOnly ("hi", "hello"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " contained only " + "(\\"HI\\", \\"HELLO\\")" + " in order" + ", and " + decorateToStringValue(Many("hi", "hello")) + " contained only " + "(\\"hi\\", \\"hello\\")" + " in order", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
all (list1s) should (not contain inOrderOnly (1, 2, 2, 3) or not contain inOrderOnly (8, 3, 4))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.inOrderOnlyDuplicate))
val e2 = intercept[exceptions.NotAllowedException] {
all (list1s) should (not contain inOrderOnly (8, 3, 4) or not contain inOrderOnly (1, 2, 2, 3))
}
e2.failedCodeFileName.get should be (fileName)
e2.failedCodeLineNumber.get should be (thisLineNumber - 3)
e2.message should be (Some(Resources.inOrderOnlyDuplicate))
}
}
object `when used with (not be xx and not contain inOrderOnly xx)` {
def `should do nothing if valid, else throw a TFE with an appropriate error message` {
all (list1s) should (not be_== (One(2)) or not contain inOrderOnly (8, 3, 4))
all (list1s) should (not be_== (Many(1, 2, 2, 3)) or not contain inOrderOnly (8, 3, 4))
all (list1s) should (not be_== (One(2)) or not contain inOrderOnly (1, 2, 3))
val e1 = intercept[TestFailedException] {
all (list1s) should (not be_== (Many(1, 2, 2, 3)) or not contain inOrderOnly (1, 2, 3))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many(1, 2, 2, 3)) + " was equal to " + decorateToStringValue(Many(1, 2, 2, 3)) + ", and " + decorateToStringValue(Many(1, 2, 2, 3)) + " contained only " + "(1, 2, 3)" + " in order", thisLineNumber - 2, list1s), fileName, thisLineNumber - 2)
}
def `should use the implicit Equality in scope` {
implicit val ise = upperCaseStringEquality
all (hiLists) should (not be_== (Many("hello", "ho")) or not contain inOrderOnly ("HELLO", "HO"))
all (hiLists) should (not be_== (Many("hi", "hello")) or not contain inOrderOnly ("HELLO", "HO"))
all (hiLists) should (not be_== (Many("hello", "ho")) or not contain inOrderOnly ("HI", "HELLO"))
val e1 = intercept[TestFailedException] {
all (hiLists) should (not be_== (Many("hi", "hello")) or not contain inOrderOnly ("HI", "HELLO"))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " was equal to " + decorateToStringValue(Many("hi", "hello")) + ", and " + decorateToStringValue(Many("hi", "hello")) + " contained only " + "(\\"HI\\", \\"HELLO\\")" + " in order", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should use an explicitly provided Equality` {
(all (hiLists) should (not be_== (Many("hello", "ho")) or not contain inOrderOnly ("HELLO", "HO"))) (decided by upperCaseStringEquality)
(all (hiLists) should (not be_== (Many("hi", "hello")) or not contain inOrderOnly ("HELLO", "HO"))) (decided by upperCaseStringEquality)
(all (hiLists) should (not be_== (Many("hello", "ho")) or not contain inOrderOnly ("HI", "HELLO"))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (not be_== (Many("hi", "hello")) or not contain inOrderOnly ("HI", "HELLO"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(Many("hi", "hello")) + " was equal to " + decorateToStringValue(Many("hi", "hello")) + ", and " + decorateToStringValue(Many("hi", "hello")) + " contained only " + "(\\"HI\\", \\"HELLO\\")" + " in order", thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
def `should throw NotAllowedException with correct stack depth and message when RHS contain duplicated value` {
val e1 = intercept[exceptions.NotAllowedException] {
all (list1s) should (not be_== (One(2)) or not contain inOrderOnly (1, 2, 2, 3))
}
e1.failedCodeFileName.get should be (fileName)
e1.failedCodeLineNumber.get should be (thisLineNumber - 3)
e1.message should be (Some(Resources.inOrderOnlyDuplicate))
}
}
}
}
|
SRGOM/scalatest
|
scalatest-test/src/test/scala/org/scalatest/EveryShouldContainInOrderOnlyLogicalOrSpec.scala
|
Scala
|
apache-2.0
| 43,321 |
/*
* Copyright (c) 2015 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless
import org.junit.Test
import org.junit.Assert._
import test._
package Generic1TestsAux {
trait TC1[F[_]]
object TC1 extends TC10 {
implicit def tc1Id: TC1[Id] = new TC1[Id] {}
}
trait TC1sub[F[_]] extends TC1[F]
object TC1sub {
implicit def tc1sub[F[_]]: TC1sub[F] = new TC1sub[F] {}
}
trait TC10 {
implicit def tc1[F[_]]: TC1[F] = new TC1[F] {}
}
trait TC2[L[_]]
object TC2 {
implicit def tc2[L[_]]: TC2[L] = new TC2[L] {}
}
trait TC2sub[F[_]] extends TC2[F]
object TC2sub {
implicit def tc2sub[F[_]]: TC2sub[F] = new TC2sub[F] {}
}
trait TC3[F[_], G[_]]
object TC3 {
implicit def tc3[F[_], G[_]]: TC3[F, G] = new TC3[F, G] {}
}
trait Box[T]
case class Foo[T](t: T)
case class Bar[T](t: Box[T])
case class Baz[T](t: T, s: String)
sealed trait Cp[+T]
case class CpA[+T](t: T) extends Cp[T]
case class CpB[+T](t: T) extends Cp[T]
case object CpC extends Cp[Nothing]
case class CpD[+T](t: T, n: Cp[T]) extends Cp[T]
case class Prod[T](t: T, ts: List[T])
sealed trait IList[A]
final case class ICons[A](head: A, tail: IList[A]) extends IList[A]
final case class INil[A]() extends IList[A]
object IList {
def fromSeq[T](ts: Seq[T]): IList[T] =
ts.foldRight(INil[T](): IList[T])(ICons(_, _))
}
sealed trait Tree[T]
case class Leaf[T](t: T) extends Tree[T]
case class Node[T](l: Tree[T], r: Tree[T]) extends Tree[T]
sealed trait Overlapping1[+T]
sealed trait OA1[+T] extends Overlapping1[T]
case class OAC1[+T](t: T) extends OA1[T]
sealed trait OB1[+T] extends Overlapping1[T]
case class OBC1[+T](t: T) extends OB1[T]
case class OAB1[+T](t: T) extends OA1[T] with OB1[T]
trait Functor[F[_]] {
def map[A, B](fa: F[A])(f: A => B): F[B]
}
object Functor extends Functor0 {
def apply[F[_]](implicit f: Lazy[Functor[F]]): Functor[F] = f.value
implicit val idFunctor: Functor[Id] =
new Functor[Id] {
def map[A, B](a: A)(f: A => B): B = f(a)
}
// Induction step for products
implicit def hcons[F[_]](implicit ihc: IsHCons1[F, Functor, Functor]): Functor[F] =
new Functor[F] {
def map[A, B](fa: F[A])(f: A => B): F[B] = {
val (hd, tl) = ihc.unpack(fa)
ihc.pack((ihc.fh.map(hd)(f), ihc.ft.map(tl)(f)))
}
}
implicit def constFunctor[T]: Functor[Const[T]#λ] =
new Functor[Const[T]#λ] {
def map[A, B](t: T)(f: A => B): T = t
}
}
trait Functor0 {
// Induction step for coproducts
implicit def ccons[F[_]](implicit icc: IsCCons1[F, Functor, Functor]): Functor[F] =
new Functor[F] {
def map[A, B](fa: F[A])(f: A => B): F[B] =
icc.pack(icc.unpack(fa).fold(hd => Left(icc.fh.map(hd)(f)), tl => Right(icc.ft.map(tl)(f))))
}
implicit def generic[F[_]](implicit gen: Generic1[F, Functor]): Functor[F] =
new Functor[F] {
def map[A, B](fa: F[A])(f: A => B): F[B] =
gen.from(gen.fr.map(gen.to(fa))(f))
}
}
// Functor syntax
object functorSyntax {
implicit def apply[F[_]: Functor, A](fa: F[A]): FunctorOps[F, A] =
new FunctorOps[F, A](fa)
class FunctorOps[F[_], A](fa: F[A])(implicit F: Functor[F]) {
def map[B](f: A => B): F[B] = F.map(fa)(f)
}
}
/** This version of Pointed isn't complete & NOT working but it allows to show bugs in IsHCons1/ISCCons/Generic1 macro generation */
trait Pointed[F[_]] { def point[A](a: A): F[A] }
object Pointed extends Pointed0 {
def apply[F[_]](implicit f: Lazy[Pointed[F]]): Pointed[F] = f.value
implicit val idPointed: Pointed[Id] =
new Pointed[Id] {
def point[A](a: A): Id[A] = a
}
// Pointed can be built for Singleton types
implicit def constSingletonPointed[T](implicit w: Witness.Aux[T]): Pointed[Const[T]#λ] =
new Pointed[Const[T]#λ] {
def point[A](a: A): T = w.value
}
implicit def isCPointedSingleSingleton[C](
implicit w: Witness.Aux[C], pf: Lazy[Pointed[Const[C]#λ]]
): Pointed[({type λ[A] = Const[C]#λ[A] :+: Const[CNil]#λ[A] })#λ] =
new Pointed[({type λ[A] = Const[C]#λ[A] :+: Const[CNil]#λ[A] })#λ] {
def point[A](a: A): Const[C]#λ[A] :+: Const[CNil]#λ[A] = Inl(pf.value.point(a))
}
implicit def isCPointedSingle[F[_]](
implicit pf: Lazy[Pointed[F]]
): Pointed[({type λ[A] = F[A] :+: Const[CNil]#λ[A] })#λ] =
new Pointed[({type λ[A] = F[A] :+: Const[CNil]#λ[A] })#λ] {
def point[A](a: A): F[A] :+: Const[CNil]#λ[A] = Inl(pf.value.point(a))
}
}
trait Pointed0 extends Pointed1 {
implicit def hcons[F[_]](implicit ihc: IsHCons1[F, Pointed, Pointed]): Pointed[F] =
new Pointed[F] {
def point[A](a: A): F[A] = {
ihc.pack(ihc.fh.point(a), ihc.ft.point(a))
}
}
implicit def ccons[F[_]](implicit ihc: IsCCons1[F, Pointed, Pointed]): Pointed[F] =
new Pointed[F] {
def point[A](a: A): F[A] = {
ihc.pack(Left(ihc.fh.point(a)))
}
}
implicit def generic[F[_]](implicit gen: Generic1[F, Pointed]): Pointed[F] =
new Pointed[F] {
def point[A](a: A): F[A] = gen.from(gen.fr.point(a))
}
}
trait Pointed1 {
// HACKING the fact that CNil can't be pointed
implicit def isCPointedSimpleType: Pointed[({type λ[A] = A :+: Const[CNil]#λ[A] })#λ] =
new Pointed[({type λ[A] = A :+: Const[CNil]#λ[A] })#λ] {
def point[A](a: A): A :+: Const[CNil]#λ[A] = Inl(a)
}
implicit val constHNilPointed: Pointed[Const[HNil]#λ] =
new Pointed[Const[HNil]#λ] {
def point[A](a: A): HNil = HNil
}
}
// Pointed syntax
object pointedSyntax {
implicit def pointedOps[A](a: A): PointedOps[A] = new PointedOps(a)
class PointedOps[A](a: A) {
def point[F[_]](implicit F: Pointed[F]): F[A] = F.point(a)
}
}
trait Trivial1[F[_]]
object Trivial1 {
implicit def trivially[F[_]]: Trivial1[F] = new Trivial1[F] {}
}
trait Trivial10[F[_], T]
object Trivial10 {
implicit def trivially[F[_], T]: Trivial10[F, T] = new Trivial10[F, T] {}
}
trait Trivial01[T, F[_]]
object Trivial01 {
implicit def trivially[T, F[_]]: Trivial01[T, F] = new Trivial01[T, F] {}
}
trait Trivial11[F[_], T[_]]
object Trivial11 {
implicit def trivially[F[_], T[_]]: Trivial11[F, T] = new Trivial11[F, T] {}
}
}
class Generic1Tests {
import Generic1TestsAux._
@Test
def testGeneric1: Unit = {
Generic1[Foo, TC1]
Generic1[Bar, TC1]
Generic1[Baz, TC1]
Generic1[Cp, TC1]
Generic1[Some, TC1]
Generic1[Option, TC1]
Generic1[List, TC1]
Generic1[IList, TC1]
//
// type aliases required here: see https://issues.scala-lang.org/browse/SI-6895
type LList[T] = List[List[T]]
Generic1[LList, TC1]
type LPair[T] = IList[(T, T)]
Generic1[LPair, TC1]
type PList[T] = (IList[T], IList[T])
Generic1[PList, TC1]
type PIdList[T] = (T, List[T])
Generic1[PIdList, TC1]
type Either1[T] = Either[T, Int]
Generic1[Either1, TC1]
type Either2[T] = Either[Int, T]
Generic1[Either2, TC1]
val gen0 = Generic1[Prod, TC2]
val prod = Prod(23, List(1, 2, 3))
val r = gen0.to(prod)
typed[Int :: List[Int] :: HNil](r)
assertEquals((23 :: List(1, 2, 3) :: HNil), r)
val fr = gen0.fr
typed[TC2[gen0.R]](fr)
typed[TC2[({ type λ[t] = t :: List[t] :: HNil })#λ]](fr)
}
@Test
def testOverlappingCoproducts1: Unit = {
val gen = Generic1[Overlapping1, TC1]
val o: Overlapping1[Int] = OAB1(1)
val o0 = gen.to(o)
typed[OAB1[Int] :+: OAC1[Int] :+: OBC1[Int] :+: CNil](o0)
val s1 = gen.from(o0)
typed[Overlapping1[Int]](s1)
}
@Test
def testIsHCons1: Unit = {
type L[t] = Id[t] :: t :: String :: (t, t) :: List[Option[t]] :: Option[t] :: List[t] :: HNil
val ihc = the[IsHCons1[L, TC1, TC2]]
val l: L[Int] = 23 :: 13 :: "foo" :: (7, 13) :: List(Some(5)) :: Some(11) :: List(1, 2, 3) :: HNil
val (hd, tl) = ihc.unpack(l)
typed[Int](hd)
assertEquals(23, hd)
typed[Id[Int] :: String :: (Int, Int) :: List[Option[Int]] :: Option[Int] :: List[Int] :: HNil](tl)
assertEquals(13 :: "foo" :: (7, 13) :: List(Some(5)) :: Some(11) :: List(1, 2, 3) :: HNil, tl)
val cons = ihc.pack((hd, tl))
typed[L[Int]](cons)
assertEquals(l, cons)
type T[t] = (t, t) :: Option[t] :: HNil
val ihcT = implicitly[IsHCons1[T, TC1, TC2]]
}
trait Singleton1[T[_]]
object Singleton1 {
implicit val hnilInstance: Singleton1[Const[HNil]#λ] = new Singleton1[Const[HNil]#λ] {}
}
@Test
def testSingletons: Unit = {
type Unit1[t] = Unit
type None1[t] = None.type
implicitly[Generic1[Unit1, Singleton1]]
implicitly[Generic1[None1, Singleton1]]
}
@Test
def testFunctor: Unit = {
import functorSyntax._
type R0[t] = t :: HNil
type R1[t] = t :+: CNil
IsHCons1[R0, Functor, Functor]
IsCCons1[R1, Functor, Functor]
Functor[Id]
Functor[Const[Int]#λ]
Functor[Const[HNil]#λ]
Functor[Const[CNil]#λ]
Functor[R0]
Functor[R1]
Functor[Some]
Functor[Const[None.type]#λ]
Functor[Option]
Functor[List]
type Twin[t] = (t, t)
Functor[Twin]
type SS[t] = Some[Some[t]]
Functor[SS]
type SO[t] = Some[Option[t]]
Functor[SO]
type OS[t] = Option[Some[t]]
Functor[OS]
type OO[t] = Option[Option[t]]
Functor[OO]
type OL[t] = Option[List[t]]
Functor[OL]
type OT[t] = Option[(t, t)]
Functor[OT]
def transform[F[_]: Functor, A, B](ft: F[A])(f: A => B): F[B] = ft.map(f)
// Option has a Functor
val o = transform(Option("foo"))(_.length)
assertEquals(Some(3), o)
// List has a Functor
val l = transform(List("foo", "wibble", "quux"))(_.length)
assertEquals(List(3, 6, 4), l)
// Any case class has a Functor
val prod = Prod("Three", List("French", "Hens"))
val p0 = transform(prod)(_.length)
val p1 = prod.map(_.length) // they also have Functor syntax ...
val expectedProd = Prod(5, List(6, 4))
assertEquals(expectedProd, p0)
assertEquals(expectedProd, p1)
// Any ADT has a Functor ... even with recursion
val tree =
Node(
Leaf("quux"),
Node(
Leaf("foo"),
Leaf("wibble")
)
)
val t0 = transform(tree)(_.length)
val t1 = tree.map(_.length) // they also have Functor syntax ...
val expectedTree =
Node(
Leaf(4),
Node(
Leaf(3),
Leaf(6)
)
)
assertEquals(expectedTree, t0)
assertEquals(expectedTree, t1)
}
@Test
def testPointed: Unit = {
import pointedSyntax._
type R0[t] = None.type :: HNil
IsHCons1[R0, Pointed, Pointed]
Pointed[Option]
}
@Test
def testPartiallyApplied: Unit = {
implicitly[Trivial10[List, Int]]
type FI[f[_]] = Trivial10[f, Int]
implicitly[FI[List]]
val g0 = Generic1[Foo, FI]
typed[Trivial10[g0.R, Int]](g0.mkFrr)
implicitly[Trivial01[Int, List]]
type IF[f[_]] = Trivial01[Int, f]
implicitly[IF[List]]
val g1 = Generic1[Foo, IF]
typed[Trivial01[Int, g0.R]](g1.mkFrr)
implicitly[Trivial11[Set, List]]
type FL[f[_]] = Trivial11[f, List]
implicitly[FL[Set]]
val g2 = Generic1[Foo, FL]
typed[Trivial11[g2.R, List]](g2.mkFrr)
implicitly[Trivial11[List, Set]]
type LF[f[_]] = Trivial11[List, f]
implicitly[LF[Set]]
val g3 = Generic1[Foo, LF]
typed[Trivial11[List, g3.R]](g3.mkFrr)
type HC[t] = t :: HNil
val ih0 = IsHCons1[HC, FI, Trivial1]
typed[Trivial10[ih0.H, Int]](ih0.mkFhh)
typed[Trivial1[ih0.T]](ih0.mkFtt)
val ih1 = IsHCons1[HC, Trivial1, FI]
typed[Trivial1[ih1.H]](ih1.mkFhh)
typed[Trivial10[ih1.T, Int]](ih1.mkFtt)
type CC[t] = t :+: CNil
val ic0 = IsCCons1[CC, FI, Trivial1]
typed[Trivial10[ic0.H, Int]](ic0.mkFhh)
typed[Trivial1[ic0.T]](ic0.mkFtt)
val ic1 = IsCCons1[CC, Trivial1, FI]
typed[Trivial1[ic1.H]](ic1.mkFhh)
typed[Trivial10[ic1.T, Int]](ic1.mkFtt)
type LO[t] = List[Option[t]]
val s0 = Split1[LO, FI, Trivial1]
typed[Trivial10[s0.O, Int]](s0.mkFoo)
typed[Trivial1[s0.I]](s0.mkFii)
val s1 = Split1[LO, Trivial1, FI]
typed[Trivial1[s1.O]](s1.mkFoo)
typed[Trivial10[s1.I, Int]](s1.mkFii)
}
@Test
def testPartiallyApplied2: Unit = {
type CRepr[t] = t :: List[t] :: HNil
type LRepr[t] = scala.collection.immutable.::[t] :+: Nil.type :+: CNil
type LS[t] = List[Set[t]]
val g0 = Generic1[List, ({ type λ[t[_]] = TC3[t, Option] })#λ]
implicitly[g0.R[Int] =:= LRepr[Int]]
typed[TC3[LRepr, Option]](g0.fr)
val g1 = Generic1[List, ({ type λ[t[_]] = TC3[Option, t] })#λ]
implicitly[g1.R[Int] =:= LRepr[Int]]
typed[TC3[Option, LRepr]](g1.fr)
val h0 = IsHCons1[CRepr, ({ type λ[t[_]] = TC3[t, Option] })#λ, Trivial1]
typed[TC3[h0.H, Option]](h0.fh)
typed[Trivial1[h0.T]](h0.ft)
val h1 = IsHCons1[CRepr, ({ type λ[t[_]] = TC3[Option, t] })#λ, Trivial1]
typed[TC3[Option, h1.H]](h1.fh)
typed[Trivial1[h1.T]](h1.ft)
val h2 = IsHCons1[CRepr, Trivial1, ({ type λ[t[_]] = TC3[t, Option] })#λ]
typed[Trivial1[h2.H]](h2.fh)
typed[TC3[h2.T, Option]](h2.ft)
val h3 = IsHCons1[CRepr, Trivial1, ({ type λ[t[_]] = TC3[Option, t] })#λ]
typed[Trivial1[h3.H]](h3.fh)
typed[TC3[Option, h3.T]](h3.ft)
val c0 = IsCCons1[LRepr, ({ type λ[t[_]] = TC3[t, Option] })#λ, Trivial1]
typed[TC3[c0.H, Option]](c0.fh)
typed[Trivial1[c0.T]](c0.ft)
val c1 = IsCCons1[LRepr, ({ type λ[t[_]] = TC3[Option, t] })#λ, Trivial1]
typed[TC3[Option, c1.H]](c1.fh)
typed[Trivial1[c1.T]](c1.ft)
val c2 = IsCCons1[LRepr, Trivial1, ({ type λ[t[_]] = TC3[t, Option] })#λ]
typed[Trivial1[c2.H]](c2.fh)
typed[TC3[c2.T, Option]](c2.ft)
val c3 = IsCCons1[LRepr, Trivial1, ({ type λ[t[_]] = TC3[Option, t] })#λ]
typed[Trivial1[c3.H]](c3.fh)
typed[TC3[Option, c3.T]](c3.ft)
val s0 = Split1[LS, ({ type λ[t[_]] = TC3[t, Option] })#λ, Trivial1]
typed[TC3[s0.O, Option]](s0.fo)
typed[Trivial1[s0.I]](s0.fi)
val s1 = Split1[LS, ({ type λ[t[_]] = TC3[Option, t] })#λ, Trivial1]
typed[TC3[Option, s1.O]](s1.fo)
typed[Trivial1[s1.I]](s1.fi)
val s2 = Split1[LS, Trivial1, ({ type λ[t[_]] = TC3[t, Option] })#λ]
typed[Trivial1[s2.O]](s2.fo)
typed[TC3[s2.I, Option]](s2.fi)
val s3 = Split1[LS, Trivial1, ({ type λ[t[_]] = TC3[Option, t] })#λ]
typed[Trivial1[s3.O]](s3.fo)
typed[TC3[Option, s3.I]](s3.fi)
}
def testPartiallyApplied3: Unit = {
def materialize1[F[_]](implicit gen: Generic1[F, ({ type λ[r[_]] = TC3[r, Option]})#λ]): Unit = ()
def materialize2[F[_]](implicit gen: Generic1[F, ({ type λ[r[_]] = TC3[Option, r]})#λ]): Unit = ()
materialize1[List]
materialize2[List]
def materialize3[F[_]](implicit ihc: IsHCons1[F, Trivial1, ({ type λ[r[_]] = TC3[r, Option]})#λ]): Unit = ()
def materialize4[F[_]](implicit ihc: IsHCons1[F, Trivial1, ({ type λ[r[_]] = TC3[Option, r]})#λ]): Unit = ()
def materialize5[F[_]](implicit ihc: IsHCons1[F, ({ type λ[r[_]] = TC3[r, Option]})#λ, Trivial1]): Unit = ()
def materialize6[F[_]](implicit ihc: IsHCons1[F, ({ type λ[r[_]] = TC3[Option, r]})#λ, Trivial1]): Unit = ()
type H[t] = t :: scala.collection.immutable.List[t] :: HNil
materialize3[H]
materialize4[H]
materialize5[H]
materialize6[H]
def materialize7[F[_]](implicit ihc: IsCCons1[F, Trivial1, ({ type λ[r[_]] = TC3[r, Option]})#λ]): Unit = ()
def materialize8[F[_]](implicit ihc: IsCCons1[F, Trivial1, ({ type λ[r[_]] = TC3[Option, r]})#λ]): Unit = ()
def materialize9[F[_]](implicit ihc: IsCCons1[F, ({ type λ[r[_]] = TC3[r, Option]})#λ, Trivial1]): Unit = ()
def materialize10[F[_]](implicit ihc: IsCCons1[F, ({ type λ[r[_]] = TC3[Option, r]})#λ, Trivial1]): Unit = ()
type C[t] = scala.collection.immutable.::[t] :+: Nil.type :+: CNil
materialize7[C]
materialize8[C]
materialize9[C]
materialize10[C]
def materialize11[F[_]](implicit ihc: Split1[F, Trivial1, ({ type λ[r[_]] = TC3[r, Option]})#λ]): Unit = ()
def materialize12[F[_]](implicit ihc: Split1[F, Trivial1, ({ type λ[r[_]] = TC3[Option, r]})#λ]): Unit = ()
def materialize13[F[_]](implicit ihc: Split1[F, ({ type λ[r[_]] = TC3[r, Option]})#λ, Trivial1]): Unit = ()
def materialize14[F[_]](implicit ihc: Split1[F, ({ type λ[r[_]] = TC3[Option, r]})#λ, Trivial1]): Unit = ()
type S[t] = List[Option[t]]
materialize11[S]
materialize12[S]
materialize13[S]
materialize14[S]
}
@Test
def testCovariance: Unit = {
type L[A] = (A, A) :: List[A] :: HNil
type C[A] = (A, A) :+: List[A] :+: CNil
type N[A] = List[(A, A)]
typed[Generic1[Foo, TC2]](Generic1[Foo, TC2sub])
typed[IsHCons1[L, TC1, TC2]](IsHCons1[L, TC1sub, TC2sub])
typed[IsCCons1[C, TC1, TC2]](IsCCons1[C, TC1sub, TC2sub])
typed[Split1[N, TC1, TC2]](Split1[N, TC1sub, TC2sub])
}
}
object SplitTestDefns {
trait Dummy1[F[_]]
object Dummy1 {
implicit def mkDummy1[F[_]]: Dummy1[F] = new Dummy1[F] {}
}
trait Kleisli[F[_], A, B] extends (A => F[B])
}
class SplitTests {
import SplitTestDefns._
@Test
def testBasics: Unit = {
illTyped("""
Split1[List, Dummy1, Dummy1]
""")
Split1[({ type λ[t] = List[List[t]] })#λ, Dummy1, Dummy1]
Split1[({ type λ[t] = List[List[List[t]]] })#λ, Dummy1, Dummy1]
type LList[T] = List[List[T]]
Split1[LList, Dummy1, Dummy1]
type ListDiag[T] = List[(T, T)]
Split1[ListDiag, Dummy1, Dummy1]
type ListDiagL[T] = List[(T, List[T])]
Split1[ListDiagL, Dummy1, Dummy1]
illTyped("""
Split1[({ type λ[t] = Either[Int, t] })#λ, Dummy1, Dummy1]
""")
illTyped("""
Split1[({ type λ[t] = Either[t, Int] })#λ, Dummy1, Dummy1]
""")
Split1[({ type λ[t] = Either[Int, List[t]] })#λ, Dummy1, Dummy1]
Split1[({ type λ[t] = Either[List[t], Int] })#λ, Dummy1, Dummy1]
type DiagList[T] = (List[T], List[T])
Split1[DiagList, Dummy1, Dummy1]
illTyped("""
Split1[({ type λ[t] = (t, t) })#λ, Dummy1, Dummy1]
""")
type DiDiag[T] = ((T, T), (T, T))
Split1[DiDiag, Dummy1, Dummy1]
illTyped("""
Split1[({ type λ[t] = Int => t }), Dummy1, Dummy1]
""")
illTyped("""
Split1[({ type λ[t] = t => Int })#λ, Dummy1, Dummy1]
""")
Split1[({ type λ[t] = Int => List[t] })#λ, Dummy1, Dummy1]
Split1[({ type λ[t] = List[t] => Int })#λ, Dummy1, Dummy1]
type HNil1[t] = HNil
type HCons1[t] = t :: HNil
type CNil1[t] = CNil
type CCons[t] = t :+: CNil
illTyped("""
Split1[HNil1, Dummy1, Dummy1]
""")
illTyped("""
Split1[HCons1, Dummy1, Dummy1]
""")
illTyped("""
Split1[CNil1, Dummy1, Dummy1]
""")
illTyped("""
Split1[CCons1, Dummy1, Dummy1]
""")
Split1[({ type λ[t] = Kleisli[Id, String, Option[t]] })#λ, Dummy1, Dummy1]
}
}
|
wheaties/shapeless
|
core/src/test/scala/shapeless/generic1.scala
|
Scala
|
apache-2.0
| 19,848 |
package au.id.cxd.math.model.classification
import breeze.linalg.DenseMatrix
/**
* A data object to contain the outputs of Model assessment for
* classification
* Created by cd on 10/05/2016.
*/
class ModelAssessment(val className:String,
val mse: Double,
val loglikelihood: Double,
val beta:DenseMatrix[Double],
val accuracy:Double,
val oddsRatio:Double,
val truePositive:Int,
val falsePositive:Int,
val trueNegative:Int,
val falseNegative:Int,
val dataPositive:Int,
val dataNegative:Int) extends Serializable {
}
object ModelAssessment {
def apply(className:String,
mse: Double,
loglikelihoodDeviance: Double,
beta:DenseMatrix[Double],
accuracy:Double,
oddsRatio:Double,
tp:Int,
fp:Int,
tn:Int,
fn:Int,
dp:Int,
dn:Int) =
new ModelAssessment(className, mse, loglikelihoodDeviance, beta, accuracy, oddsRatio, tp, fp, tn, fn, dp, dn)
}
|
cxd/scala-au.id.cxd.math
|
math/src/main/scala/au/id/cxd/math/model/classification/ModelAssessment.scala
|
Scala
|
mit
| 1,227 |
package org.orbeon.sbt
import sbt.FileFunction.cached
import sbt.FilesInfo.{exists, lastModified}
import sbt.Keys._
import sbt._
object OrbeonSupport {
val MatchScalaJSFileNameFormatRE = """((.+)-(fastopt|opt)).js""".r
val MatchJarNameRE = """(.+)\\.jar""".r
val MatchRawJarNameRE = """([^_]+)(?:_.*)?\\.jar""".r
def dummyDependency(value: Any) = ()
// This is copied from the sbt source but doesn't seem to be exported publicly
def myFindUnmanagedJars(config: Configuration, base: File, filter: FileFilter, excl: FileFilter): Classpath = {
(base * (filter -- excl) +++ (base / config.name).descendantsExcept(filter, excl)).classpath
}
val FileIsMinifiedVersionFilter = new SimpleFileFilter(f ⇒ {
val path = f.absolutePath
val prefix = path.substring(0, path.length - ".js".length)
def endsWithMin = Seq("-min.js", ".min.js") exists path.endsWith
def existsSource = new File(prefix + "_src.js").exists
endsWithMin || existsSource
}
)
val FileHasNoMinifiedVersionFilter = new SimpleFileFilter(f ⇒ {
val path = f.absolutePath
val prefix = path.substring(0, path.length - ".js".length)
def hasNoMin = Seq("-min.js", ".min.js") forall (suffix ⇒ ! new File(prefix + suffix).exists)
def isNotSourceWithMin = ! (path.endsWith("_src.js") && new File(path.substring(0, path.length - "_src.js".length) + ".js").exists)
hasNoMin && isNotSourceWithMin
}
)
def copyJarFile(sourceJarFile: File, destination: String, excludes: String ⇒ Boolean, matchRawJarName: Boolean) = {
val sourceJarNameOpt = Some(sourceJarFile.name) collect {
case MatchRawJarNameRE(name) if matchRawJarName ⇒ name
case MatchJarNameRE(name) ⇒ name
}
sourceJarNameOpt flatMap { sourceJarName ⇒
val targetJarFile = new File(destination + '/' + sourceJarName + ".jar")
if (! sourceJarFile.name.contains("_sjs") &&
! excludes(sourceJarName) &&
(! targetJarFile.exists || sourceJarFile.lastModified > targetJarFile.lastModified)) {
println(s"Copying JAR ${sourceJarFile.name} to ${targetJarFile.absolutePath}.")
IO.copy(List(sourceJarFile → targetJarFile), overwrite = false, preserveLastModified = false)
Some(targetJarFile)
} else {
None
}
}
}
}
// Custom version of `xsbt-web-plugin`'s `WebappPlugin` by Earl Douglas under BSD-3-Clause-license
object OrbeonWebappPlugin {
import OrbeonSupport._
lazy val webappPrepare = taskKey[Seq[(File, String)]]("prepare webapp contents for packaging")
lazy val webappPostProcess = taskKey[File ⇒ Unit]("additional task after preparing the webapp")
lazy val webappWebInfClasses = settingKey[Boolean]("use WEB-INF/classes instead of WEB-INF/lib")
def projectSettings: Seq[Setting[_]] =
Seq(
sourceDirectory in webappPrepare := (sourceDirectory in Compile).value / "webapp",
target in webappPrepare := (target in Compile).value / "webapp",
webappPrepare := webappPrepareTask.value,
webappPostProcess := { _ ⇒ () },
webappWebInfClasses := false,
watchSources ++= ((sourceDirectory in webappPrepare).value ** "*").get
) ++
Defaults.packageTaskSettings(Keys.`package`, webappPrepare)
private def webappPrepareTask = Def.task {
def cacheify(name: String, dest: File ⇒ Option[File], in: Set[File]): Set[File] =
cached(streams.value.cacheDirectory / "xsbt-orbeon-web-plugin" / name)(lastModified, exists)({
(inChanges, outChanges) ⇒
// toss out removed files
for {
removed ← inChanges.removed
toRemove ← dest(removed)
} locally {
IO.delete(toRemove)
}
// apply and report changes
for {
in ← inChanges.added ++ inChanges.modified -- inChanges.removed
out ← dest(in)
_ = IO.copyFile(in, out)
} yield
out
}).apply(in)
val webappSrcDir = (sourceDirectory in webappPrepare).value
val webappTarget = (target in webappPrepare).value
val isDevelopmentMode = webappSrcDir.getAbsolutePath.equals(webappTarget.getAbsolutePath)
val classpath = (fullClasspath in Runtime).value
val webInfDir = webappTarget / "WEB-INF"
val webappLibDir = webInfDir / "lib"
if (! isDevelopmentMode) {
cacheify(
"webapp",
{ in ⇒
for {
f ← Some(in)
if !f.isDirectory
r ← IO.relativizeFile(webappSrcDir, f)
} yield
IO.resolve(webappTarget, r)
},
(webappSrcDir ** "*").get.toSet
)
}
val thisArtifact = (packagedArtifact in (Compile, packageBin)).value._1
// The following is a lot by trial and error. We assume `exportJars := true` in projects. `fullClasspath` then contains
// only JAR files. From there, we collect those which are "artifacts". This includes our own artifacts, but also managed
// dependencies (but not unmanaged ones it seems). To discriminate, we find that our own artifacts contain the `Compile`
// configuration.
val onlyJars =
for {
item ← classpath.toList
if ! item.data.isDirectory
} yield
item
val candidates =
for {
item ← onlyJars
artifactOpt = item.metadata.entries collectFirst {
case AttributeEntry(key, value: Artifact) if value.configurations.to[Set].contains(Compile) ⇒ value
}
} yield
item → artifactOpt
val (compiled, notCompiled) =
candidates.partition(_._2.isDefined)
for {
(item, artifactOpt) ← compiled
artifact ← artifactOpt
if artifact != thisArtifact
} locally {
IO.copyFile(item.data, webappLibDir / (artifact.name + ".jar"))
}
val providedClasspath =
myFindUnmanagedJars(Provided,
unmanagedBase.value,
(includeFilter in unmanagedJars).value,
(excludeFilter in unmanagedJars).value
)
val providedJars = providedClasspath.to[List].map(_.data).to[Set]
cacheify(
"lib-deps",
{ in ⇒ Some(webappTarget / "WEB-INF" / "lib" / in.getName) },
// Include non-compiled dependencies but exclude "provided" JARs
notCompiled.map(_._1.data).to[Set] -- providedJars
)
if (isDevelopmentMode) {
streams.value.log.info("starting server in development mode, postProcess not available!")
} else {
webappPostProcess.value(webappTarget)
}
(webappTarget ** "*") pair (relativeTo(webappTarget) | flat)
}
}
|
brunobuzzi/orbeon-forms
|
project/Project.scala
|
Scala
|
lgpl-2.1
| 6,830 |
package org.aguo.civsim.controller
import org.aguo.civsim.model.World
import org.aguo.civsim.view._
object HelpController {
def handleInput(input: String, world: World): World = input match {
case "view" => HelpViewScreen.render(world)
case "examine" => HelpExamineScreen.render(world)
case "jobs" => HelpJobsScreen.render(world)
case _ => UnknownScreen.render(world)
}
}
|
aguo777/civ-sim
|
src/main/scala/org/aguo/civsim/controller/HelpController.scala
|
Scala
|
mit
| 394 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import java.nio.ByteBuffer
import java.nio.channels.GatheringByteChannel
import kafka.common.{TopicAndPartition, ErrorMapping}
import kafka.message.{MessageSet, ByteBufferMessageSet}
import kafka.network.{MultiSend, Send}
import kafka.api.ApiUtils._
object FetchResponsePartitionData {
def readFrom(buffer: ByteBuffer): FetchResponsePartitionData = {
val error = buffer.getShort
val hw = buffer.getLong
val messageSetSize = buffer.getInt
val messageSetBuffer = buffer.slice()
messageSetBuffer.limit(messageSetSize)
buffer.position(buffer.position + messageSetSize)
new FetchResponsePartitionData(error, hw, new ByteBufferMessageSet(messageSetBuffer))
}
val headerSize =
2 + /* error code */
8 + /* high watermark */
4 /* messageSetSize */
}
case class FetchResponsePartitionData(error: Short = ErrorMapping.NoError, hw: Long = -1L, messages: MessageSet) {
val sizeInBytes = FetchResponsePartitionData.headerSize + messages.sizeInBytes
def this(messages: MessageSet) = this(ErrorMapping.NoError, -1L, messages)
}
// SENDS
class PartitionDataSend(val partitionId: Int,
val partitionData: FetchResponsePartitionData) extends Send {
private val messageSize = partitionData.messages.sizeInBytes
private var messagesSentSize = 0
private val buffer = ByteBuffer.allocate( 4 /** partitionId **/ + FetchResponsePartitionData.headerSize)
buffer.putInt(partitionId)
buffer.putShort(partitionData.error)
buffer.putLong(partitionData.hw)
buffer.putInt(partitionData.messages.sizeInBytes)
buffer.rewind()
override def complete = !buffer.hasRemaining && messagesSentSize >= messageSize
override def writeTo(channel: GatheringByteChannel): Int = {
var written = 0
if(buffer.hasRemaining)
written += channel.write(buffer)
if(!buffer.hasRemaining && messagesSentSize < messageSize) {
val bytesSent = partitionData.messages.writeTo(channel, messagesSentSize, messageSize - messagesSentSize)
messagesSentSize += bytesSent
written += bytesSent
}
written
}
}
object TopicData {
def readFrom(buffer: ByteBuffer): TopicData = {
val topic = readShortString(buffer)
val partitionCount = buffer.getInt
val topicPartitionDataPairs = (1 to partitionCount).map(_ => {
val partitionId = buffer.getInt
val partitionData = FetchResponsePartitionData.readFrom(buffer)
(partitionId, partitionData)
})
TopicData(topic, Map(topicPartitionDataPairs:_*))
}
def headerSize(topic: String) =
shortStringLength(topic) +
4 /* partition count */
}
case class TopicData(topic: String, partitionData: Map[Int, FetchResponsePartitionData]) {
val sizeInBytes =
TopicData.headerSize(topic) + partitionData.values.foldLeft(0)(_ + _.sizeInBytes + 4)
val headerSize = TopicData.headerSize(topic)
}
class TopicDataSend(val topicData: TopicData) extends Send {
private val size = topicData.sizeInBytes
private var sent = 0
override def complete = sent >= size
private val buffer = ByteBuffer.allocate(topicData.headerSize)
writeShortString(buffer, topicData.topic)
buffer.putInt(topicData.partitionData.size)
buffer.rewind()
val sends = new MultiSend(topicData.partitionData.toList
.map(d => new PartitionDataSend(d._1, d._2))) {
val expectedBytesToWrite = topicData.sizeInBytes - topicData.headerSize
}
def writeTo(channel: GatheringByteChannel): Int = {
expectIncomplete()
var written = 0
if(buffer.hasRemaining)
written += channel.write(buffer)
if(!buffer.hasRemaining && !sends.complete) {
written += sends.writeCompletely(channel)
}
sent += written
written
}
}
object FetchResponse {
val headerSize =
4 + /* correlationId */
4 /* topic count */
def readFrom(buffer: ByteBuffer): FetchResponse = {
val correlationId = buffer.getInt
val topicCount = buffer.getInt
val pairs = (1 to topicCount).flatMap(_ => {
val topicData = TopicData.readFrom(buffer)
topicData.partitionData.map {
case (partitionId, partitionData) =>
(TopicAndPartition(topicData.topic, partitionId), partitionData)
}
})
FetchResponse(correlationId, Map(pairs:_*))
}
}
case class FetchResponse(correlationId: Int,
data: Map[TopicAndPartition, FetchResponsePartitionData]) {
/**
* Partitions the data into a map of maps (one for each topic).
*/
lazy val dataGroupedByTopic = data.groupBy(_._1.topic)
val sizeInBytes =
FetchResponse.headerSize +
dataGroupedByTopic.foldLeft(0) ((folded, curr) => {
val topicData = TopicData(curr._1, curr._2.map {
case (topicAndPartition, partitionData) => (topicAndPartition.partition, partitionData)
})
folded + topicData.sizeInBytes
})
private def partitionDataFor(topic: String, partition: Int): FetchResponsePartitionData = {
val topicAndPartition = TopicAndPartition(topic, partition)
data.get(topicAndPartition) match {
case Some(partitionData) => partitionData
case _ =>
throw new IllegalArgumentException(
"No partition %s in fetch response %s".format(topicAndPartition, this.toString))
}
}
def messageSet(topic: String, partition: Int): ByteBufferMessageSet =
partitionDataFor(topic, partition).messages.asInstanceOf[ByteBufferMessageSet]
def highWatermark(topic: String, partition: Int) = partitionDataFor(topic, partition).hw
def hasError = data.values.exists(_.error != ErrorMapping.NoError)
def errorCode(topic: String, partition: Int) = partitionDataFor(topic, partition).error
}
class FetchResponseSend(val fetchResponse: FetchResponse) extends Send {
private val size = fetchResponse.sizeInBytes
private var sent = 0
private val sendSize = 4 /* for size */ + size
override def complete = sent >= sendSize
private val buffer = ByteBuffer.allocate(4 /* for size */ + FetchResponse.headerSize)
buffer.putInt(size)
buffer.putInt(fetchResponse.correlationId)
buffer.putInt(fetchResponse.dataGroupedByTopic.size) // topic count
buffer.rewind()
val sends = new MultiSend(fetchResponse.dataGroupedByTopic.toList.map {
case(topic, data) => new TopicDataSend(TopicData(topic,
data.map{case(topicAndPartition, message) => (topicAndPartition.partition, message)}))
}) {
val expectedBytesToWrite = fetchResponse.sizeInBytes - FetchResponse.headerSize
}
def writeTo(channel: GatheringByteChannel):Int = {
expectIncomplete()
var written = 0
if(buffer.hasRemaining)
written += channel.write(buffer)
if(!buffer.hasRemaining && !sends.complete) {
written += sends.writeCompletely(channel)
}
sent += written
written
}
}
|
dchenbecker/kafka-sbt
|
core/src/main/scala/kafka/api/FetchResponse.scala
|
Scala
|
apache-2.0
| 7,667 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.vectorized
import org.scalatest.BeforeAndAfterEach
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.catalyst.expressions.SpecificInternalRow
import org.apache.spark.sql.execution.columnar.ColumnAccessor
import org.apache.spark.sql.execution.columnar.compression.ColumnBuilderHelper
import org.apache.spark.sql.types._
import org.apache.spark.sql.vectorized.ColumnarArray
import org.apache.spark.unsafe.types.UTF8String
class ColumnVectorSuite extends SparkFunSuite with BeforeAndAfterEach {
private def withVector(
vector: WritableColumnVector)(
block: WritableColumnVector => Unit): Unit = {
try block(vector) finally vector.close()
}
private def withVectors(
size: Int,
dt: DataType)(
block: WritableColumnVector => Unit): Unit = {
withVector(new OnHeapColumnVector(size, dt))(block)
withVector(new OffHeapColumnVector(size, dt))(block)
}
private def testVectors(
name: String,
size: Int,
dt: DataType)(
block: WritableColumnVector => Unit): Unit = {
test(name) {
withVectors(size, dt)(block)
}
}
testVectors("boolean", 10, BooleanType) { testVector =>
(0 until 10).foreach { i =>
testVector.appendBoolean(i % 2 == 0)
}
val array = new ColumnarArray(testVector, 0, 10)
val arrayCopy = array.copy()
(0 until 10).foreach { i =>
assert(array.get(i, BooleanType) === (i % 2 == 0))
assert(arrayCopy.get(i, BooleanType) === (i % 2 == 0))
}
}
testVectors("byte", 10, ByteType) { testVector =>
(0 until 10).foreach { i =>
testVector.appendByte(i.toByte)
}
val array = new ColumnarArray(testVector, 0, 10)
val arrayCopy = array.copy()
(0 until 10).foreach { i =>
assert(array.get(i, ByteType) === i.toByte)
assert(arrayCopy.get(i, ByteType) === i.toByte)
}
}
testVectors("short", 10, ShortType) { testVector =>
(0 until 10).foreach { i =>
testVector.appendShort(i.toShort)
}
val array = new ColumnarArray(testVector, 0, 10)
val arrayCopy = array.copy()
(0 until 10).foreach { i =>
assert(array.get(i, ShortType) === i.toShort)
assert(arrayCopy.get(i, ShortType) === i.toShort)
}
}
testVectors("int", 10, IntegerType) { testVector =>
(0 until 10).foreach { i =>
testVector.appendInt(i)
}
val array = new ColumnarArray(testVector, 0, 10)
val arrayCopy = array.copy()
(0 until 10).foreach { i =>
assert(array.get(i, IntegerType) === i)
assert(arrayCopy.get(i, IntegerType) === i)
}
}
testVectors("date", 10, DateType) { testVector =>
(0 until 10).foreach { i =>
testVector.appendInt(i)
}
val array = new ColumnarArray(testVector, 0, 10)
val arrayCopy = array.copy()
(0 until 10).foreach { i =>
assert(array.get(i, DateType) === i)
assert(arrayCopy.get(i, DateType) === i)
}
}
testVectors("long", 10, LongType) { testVector =>
(0 until 10).foreach { i =>
testVector.appendLong(i)
}
val array = new ColumnarArray(testVector, 0, 10)
val arrayCopy = array.copy()
(0 until 10).foreach { i =>
assert(array.get(i, LongType) === i)
assert(arrayCopy.get(i, LongType) === i)
}
}
testVectors("timestamp", 10, TimestampType) { testVector =>
(0 until 10).foreach { i =>
testVector.appendLong(i)
}
val array = new ColumnarArray(testVector, 0, 10)
val arrayCopy = array.copy()
(0 until 10).foreach { i =>
assert(array.get(i, TimestampType) === i)
assert(arrayCopy.get(i, TimestampType) === i)
}
}
testVectors("float", 10, FloatType) { testVector =>
(0 until 10).foreach { i =>
testVector.appendFloat(i.toFloat)
}
val array = new ColumnarArray(testVector, 0, 10)
val arrayCopy = array.copy()
(0 until 10).foreach { i =>
assert(array.get(i, FloatType) === i.toFloat)
assert(arrayCopy.get(i, FloatType) === i.toFloat)
}
}
testVectors("double", 10, DoubleType) { testVector =>
(0 until 10).foreach { i =>
testVector.appendDouble(i.toDouble)
}
val array = new ColumnarArray(testVector, 0, 10)
val arrayCopy = array.copy()
(0 until 10).foreach { i =>
assert(array.get(i, DoubleType) === i.toDouble)
assert(arrayCopy.get(i, DoubleType) === i.toDouble)
}
}
testVectors("string", 10, StringType) { testVector =>
(0 until 10).map { i =>
val utf8 = s"str$i".getBytes("utf8")
testVector.appendByteArray(utf8, 0, utf8.length)
}
val array = new ColumnarArray(testVector, 0, 10)
val arrayCopy = array.copy()
(0 until 10).foreach { i =>
assert(array.get(i, StringType) === UTF8String.fromString(s"str$i"))
assert(arrayCopy.get(i, StringType) === UTF8String.fromString(s"str$i"))
}
}
testVectors("binary", 10, BinaryType) { testVector =>
(0 until 10).map { i =>
val utf8 = s"str$i".getBytes("utf8")
testVector.appendByteArray(utf8, 0, utf8.length)
}
val array = new ColumnarArray(testVector, 0, 10)
val arrayCopy = array.copy()
(0 until 10).foreach { i =>
val utf8 = s"str$i".getBytes("utf8")
assert(array.get(i, BinaryType) === utf8)
assert(arrayCopy.get(i, BinaryType) === utf8)
}
}
DataTypeTestUtils.yearMonthIntervalTypes.foreach {
dt =>
testVectors(dt.typeName,
10,
dt) { testVector =>
(0 until 10).foreach { i =>
testVector.appendInt(i)
}
val array = new ColumnarArray(testVector, 0, 10)
val arrayCopy = array.copy()
(0 until 10).foreach { i =>
assert(array.get(i, dt) === i)
assert(arrayCopy.get(i, dt) === i)
}
}
}
DataTypeTestUtils.dayTimeIntervalTypes.foreach {
dt =>
testVectors(dt.typeName,
10,
dt) { testVector =>
(0 until 10).foreach { i =>
testVector.appendLong(i)
}
val array = new ColumnarArray(testVector, 0, 10)
val arrayCopy = array.copy()
(0 until 10).foreach { i =>
assert(array.get(i, dt) === i)
assert(arrayCopy.get(i, dt) === i)
}
}
}
testVectors("mutable ColumnarRow", 10, IntegerType) { testVector =>
val mutableRow = new MutableColumnarRow(Array(testVector))
(0 until 10).foreach { i =>
mutableRow.rowId = i
mutableRow.setInt(0, 10 - i)
}
(0 until 10).foreach { i =>
mutableRow.rowId = i
assert(mutableRow.getInt(0) === (10 - i))
}
}
val arrayType: ArrayType = ArrayType(IntegerType, containsNull = true)
testVectors("array", 10, arrayType) { testVector =>
val data = testVector.arrayData()
var i = 0
while (i < 6) {
data.putInt(i, i)
i += 1
}
// Populate it with arrays [0], [1, 2], [], [3, 4, 5]
testVector.putArray(0, 0, 1)
testVector.putArray(1, 1, 2)
testVector.putArray(2, 3, 0)
testVector.putArray(3, 3, 3)
assert(testVector.getArray(0).toIntArray() === Array(0))
assert(testVector.getArray(1).toIntArray() === Array(1, 2))
assert(testVector.getArray(2).toIntArray() === Array.empty[Int])
assert(testVector.getArray(3).toIntArray() === Array(3, 4, 5))
}
testVectors("SPARK-35898: array append", 1, arrayType) { testVector =>
// Populate it with arrays [0], [1, 2], [], [3, 4, 5]
val data = testVector.arrayData()
testVector.appendArray(1)
data.appendInt(0)
testVector.appendArray(2)
data.appendInt(1)
data.appendInt(2)
testVector.appendArray(0)
testVector.appendArray(3)
data.appendInt(3)
data.appendInt(4)
data.appendInt(5)
assert(testVector.getArray(0).toIntArray === Array(0))
assert(testVector.getArray(1).toIntArray === Array(1, 2))
assert(testVector.getArray(2).toIntArray === Array.empty[Int])
assert(testVector.getArray(3).toIntArray === Array(3, 4, 5))
}
val mapType: MapType = MapType(IntegerType, StringType)
testVectors("SPARK-35898: map", 5, mapType) { testVector =>
val keys = testVector.getChild(0)
val values = testVector.getChild(1)
var i = 0
while (i < 6) {
keys.appendInt(i)
val utf8 = s"str$i".getBytes("utf8")
values.appendByteArray(utf8, 0, utf8.length)
i += 1
}
testVector.putArray(0, 0, 1)
testVector.putArray(1, 1, 2)
testVector.putArray(2, 3, 0)
testVector.putArray(3, 3, 3)
assert(testVector.getMap(0).keyArray().toIntArray === Array(0))
assert(testVector.getMap(0).valueArray().toArray[UTF8String](StringType) ===
Array(UTF8String.fromString(s"str0")))
assert(testVector.getMap(1).keyArray().toIntArray === Array(1, 2))
assert(testVector.getMap(1).valueArray().toArray[UTF8String](StringType) ===
(1 to 2).map(i => UTF8String.fromString(s"str$i")).toArray)
assert(testVector.getMap(2).keyArray().toIntArray === Array.empty[Int])
assert(testVector.getMap(2).valueArray().toArray[UTF8String](StringType) ===
Array.empty[UTF8String])
assert(testVector.getMap(3).keyArray().toIntArray === Array(3, 4, 5))
assert(testVector.getMap(3).valueArray().toArray[UTF8String](StringType) ===
(3 to 5).map(i => UTF8String.fromString(s"str$i")).toArray)
}
testVectors("SPARK-35898: map append", 1, mapType) { testVector =>
val keys = testVector.getChild(0)
val values = testVector.getChild(1)
def appendPair(i: Int): Unit = {
keys.appendInt(i)
val utf8 = s"str$i".getBytes("utf8")
values.appendByteArray(utf8, 0, utf8.length)
}
// Populate it with the maps [0 -> str0], [1 -> str1, 2 -> str2], [],
// [3 -> str3, 4 -> str4, 5 -> str5]
testVector.appendArray(1)
appendPair(0)
testVector.appendArray(2)
appendPair(1)
appendPair(2)
testVector.appendArray(0)
testVector.appendArray(3)
appendPair(3)
appendPair(4)
appendPair(5)
assert(testVector.getMap(0).keyArray().toIntArray === Array(0))
assert(testVector.getMap(0).valueArray().toArray[UTF8String](StringType) ===
Array(UTF8String.fromString(s"str0")))
assert(testVector.getMap(1).keyArray().toIntArray === Array(1, 2))
assert(testVector.getMap(1).valueArray().toArray[UTF8String](StringType) ===
(1 to 2).map(i => UTF8String.fromString(s"str$i")).toArray)
assert(testVector.getMap(2).keyArray().toIntArray === Array.empty[Int])
assert(testVector.getMap(2).valueArray().toArray[UTF8String](StringType) ===
Array.empty[UTF8String])
assert(testVector.getMap(3).keyArray().toIntArray === Array(3, 4, 5))
assert(testVector.getMap(3).valueArray().toArray[UTF8String](StringType) ===
(3 to 5).map(i => UTF8String.fromString(s"str$i")).toArray)
}
val structType: StructType = new StructType().add("int", IntegerType).add("double", DoubleType)
testVectors("struct", 10, structType) { testVector =>
val c1 = testVector.getChild(0)
val c2 = testVector.getChild(1)
c1.putInt(0, 123)
c2.putDouble(0, 3.45)
c1.putInt(1, 456)
c2.putDouble(1, 5.67)
assert(testVector.getStruct(0).get(0, IntegerType) === 123)
assert(testVector.getStruct(0).get(1, DoubleType) === 3.45)
assert(testVector.getStruct(1).get(0, IntegerType) === 456)
assert(testVector.getStruct(1).get(1, DoubleType) === 5.67)
}
test("[SPARK-22092] off-heap column vector reallocation corrupts array data") {
withVector(new OffHeapColumnVector(8, arrayType)) { testVector =>
val data = testVector.arrayData()
(0 until 8).foreach(i => data.putInt(i, i))
(0 until 8).foreach(i => testVector.putArray(i, i, 1))
// Increase vector's capacity and reallocate the data to new bigger buffers.
testVector.reserve(16)
// Check that none of the values got lost/overwritten.
(0 until 8).foreach { i =>
assert(testVector.getArray(i).toIntArray() === Array(i))
}
}
}
test("[SPARK-22092] off-heap column vector reallocation corrupts struct nullability") {
withVector(new OffHeapColumnVector(8, structType)) { testVector =>
(0 until 8).foreach(i => if (i % 2 == 0) testVector.putNull(i) else testVector.putNotNull(i))
testVector.reserve(16)
(0 until 8).foreach(i => assert(testVector.isNullAt(i) == (i % 2 == 0)))
}
}
test("CachedBatch boolean Apis") {
val dataType = BooleanType
val columnBuilder = ColumnBuilderHelper(dataType, 1024, "col", true)
val row = new SpecificInternalRow(Array(dataType))
row.setNullAt(0)
columnBuilder.appendFrom(row, 0)
for (i <- 1 until 16) {
row.setBoolean(0, i % 2 == 0)
columnBuilder.appendFrom(row, 0)
}
withVectors(16, dataType) { testVector =>
val columnAccessor = ColumnAccessor(dataType, columnBuilder.build)
ColumnAccessor.decompress(columnAccessor, testVector, 16)
assert(testVector.isNullAt(0))
for (i <- 1 until 16) {
assert(testVector.isNullAt(i) == false)
assert(testVector.getBoolean(i) == (i % 2 == 0))
}
}
}
test("CachedBatch byte Apis") {
val dataType = ByteType
val columnBuilder = ColumnBuilderHelper(dataType, 1024, "col", true)
val row = new SpecificInternalRow(Array(dataType))
row.setNullAt(0)
columnBuilder.appendFrom(row, 0)
for (i <- 1 until 16) {
row.setByte(0, i.toByte)
columnBuilder.appendFrom(row, 0)
}
withVectors(16, dataType) { testVector =>
val columnAccessor = ColumnAccessor(dataType, columnBuilder.build)
ColumnAccessor.decompress(columnAccessor, testVector, 16)
assert(testVector.isNullAt(0))
for (i <- 1 until 16) {
assert(testVector.isNullAt(i) == false)
assert(testVector.getByte(i) == i)
}
}
}
test("CachedBatch short Apis") {
val dataType = ShortType
val columnBuilder = ColumnBuilderHelper(dataType, 1024, "col", true)
val row = new SpecificInternalRow(Array(dataType))
row.setNullAt(0)
columnBuilder.appendFrom(row, 0)
for (i <- 1 until 16) {
row.setShort(0, i.toShort)
columnBuilder.appendFrom(row, 0)
}
withVectors(16, dataType) { testVector =>
val columnAccessor = ColumnAccessor(dataType, columnBuilder.build)
ColumnAccessor.decompress(columnAccessor, testVector, 16)
assert(testVector.isNullAt(0))
for (i <- 1 until 16) {
assert(testVector.isNullAt(i) == false)
assert(testVector.getShort(i) == i)
}
}
}
test("CachedBatch int Apis") {
val dataType = IntegerType
val columnBuilder = ColumnBuilderHelper(dataType, 1024, "col", true)
val row = new SpecificInternalRow(Array(dataType))
row.setNullAt(0)
columnBuilder.appendFrom(row, 0)
for (i <- 1 until 16) {
row.setInt(0, i)
columnBuilder.appendFrom(row, 0)
}
withVectors(16, dataType) { testVector =>
val columnAccessor = ColumnAccessor(dataType, columnBuilder.build)
ColumnAccessor.decompress(columnAccessor, testVector, 16)
assert(testVector.isNullAt(0))
for (i <- 1 until 16) {
assert(testVector.isNullAt(i) == false)
assert(testVector.getInt(i) == i)
}
}
}
test("CachedBatch long Apis") {
val dataType = LongType
val columnBuilder = ColumnBuilderHelper(dataType, 1024, "col", true)
val row = new SpecificInternalRow(Array(dataType))
row.setNullAt(0)
columnBuilder.appendFrom(row, 0)
for (i <- 1 until 16) {
row.setLong(0, i.toLong)
columnBuilder.appendFrom(row, 0)
}
withVectors(16, dataType) { testVector =>
val columnAccessor = ColumnAccessor(dataType, columnBuilder.build)
ColumnAccessor.decompress(columnAccessor, testVector, 16)
assert(testVector.isNullAt(0))
for (i <- 1 until 16) {
assert(testVector.isNullAt(i) == false)
assert(testVector.getLong(i) == i.toLong)
}
}
}
test("CachedBatch float Apis") {
val dataType = FloatType
val columnBuilder = ColumnBuilderHelper(dataType, 1024, "col", true)
val row = new SpecificInternalRow(Array(dataType))
row.setNullAt(0)
columnBuilder.appendFrom(row, 0)
for (i <- 1 until 16) {
row.setFloat(0, i.toFloat)
columnBuilder.appendFrom(row, 0)
}
withVectors(16, dataType) { testVector =>
val columnAccessor = ColumnAccessor(dataType, columnBuilder.build)
ColumnAccessor.decompress(columnAccessor, testVector, 16)
assert(testVector.isNullAt(0))
for (i <- 1 until 16) {
assert(testVector.isNullAt(i) == false)
assert(testVector.getFloat(i) == i.toFloat)
}
}
}
test("CachedBatch double Apis") {
val dataType = DoubleType
val columnBuilder = ColumnBuilderHelper(dataType, 1024, "col", true)
val row = new SpecificInternalRow(Array(dataType))
row.setNullAt(0)
columnBuilder.appendFrom(row, 0)
for (i <- 1 until 16) {
row.setDouble(0, i.toDouble)
columnBuilder.appendFrom(row, 0)
}
withVectors(16, dataType) { testVector =>
val columnAccessor = ColumnAccessor(dataType, columnBuilder.build)
ColumnAccessor.decompress(columnAccessor, testVector, 16)
assert(testVector.isNullAt(0))
for (i <- 1 until 16) {
assert(testVector.isNullAt(i) == false)
assert(testVector.getDouble(i) == i.toDouble)
}
}
}
DataTypeTestUtils.yearMonthIntervalTypes.foreach { dt =>
val structType = new StructType().add(dt.typeName, dt)
testVectors("ColumnarRow " + dt.typeName, 10, structType) { v =>
val column = v.getChild(0)
(0 until 10).foreach { i =>
column.putInt(i, i)
}
(0 until 10).foreach { i =>
val row = v.getStruct(i)
val rowCopy = row.copy()
assert(row.get(0, dt) === i)
assert(rowCopy.get(0, dt) === i)
}
}
}
DataTypeTestUtils.dayTimeIntervalTypes.foreach { dt =>
val structType = new StructType().add(dt.typeName, dt)
testVectors("ColumnarRow " + dt.typeName, 10, structType) { v =>
val column = v.getChild(0)
(0 until 10).foreach { i =>
column.putLong(i, i)
}
(0 until 10).foreach { i =>
val row = v.getStruct(i)
val rowCopy = row.copy()
assert(row.get(0, dt) === i)
assert(rowCopy.get(0, dt) === i)
}
}
}
}
|
holdenk/spark
|
sql/core/src/test/scala/org/apache/spark/sql/execution/vectorized/ColumnVectorSuite.scala
|
Scala
|
apache-2.0
| 19,205 |
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datamountaineer.streamreactor.connect.config.base.traits
import com.datamountaineer.streamreactor.connect.config.base.const.TraitConfigConst.RETRY_INTERVAL_PROP_SUFFIX
trait RetryIntervalSettings extends BaseSettings {
val retryIntervalConstant: String = s"$connectorPrefix.$RETRY_INTERVAL_PROP_SUFFIX"
def getRetryInterval: Int = getInt(retryIntervalConstant)
}
|
datamountaineer/kafka-connect-common
|
src/main/scala/com/datamountaineer/streamreactor/connect/config/base/traits/RetryIntervalSettings.scala
|
Scala
|
apache-2.0
| 989 |
/* Copyright 2014 UniCredit S.p.A.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package eu.unicredit.reactive_aerospike.model
import eu.unicredit.reactive_aerospike.data.AerospikeKey
import eu.unicredit.reactive_aerospike.data.AerospikeValue._
import eu.unicredit.reactive_aerospike.model.experimental._
import eu.unicredit.reactive_aerospike.data.{ AerospikeKey, AerospikeBinProto, AerospikeRecord }
import play.api.libs.json._ // JSON library
import play.api.libs.json.Reads._ // Custom validation helpers
import play.api.libs.functional.syntax._ //
import scala.reflect.ClassTag
object JsonMailDao extends Dao[String, Mail] {
val namespace = "test"
val setName = "mails"
def getKeyDigest(obj: Mail): Array[Byte] =
Dao.macroKeyDigest[Mail](obj)
val objWrite: Seq[AerospikeBinProto[Mail, _]] =
Dao.macroObjWrite[Mail]
val objRead: (AerospikeKey[String], AerospikeRecord) => Mail =
Dao.macroObjRead[Mail][String]
implicit def aerospikeKeyWrites[T: ClassTag]: Writes[AerospikeKey[T]] = new Writes[AerospikeKey[T]] {
def writes(ak: AerospikeKey[T]) =
JsArray(ak.digest.map(b => Json.toJson(b)).toList)
}
implicit def AerospikeKeyReads[T: ClassTag](implicit keyConverter: AerospikeValueConverter[T]): Reads[AerospikeKey[T]] = new Reads[AerospikeKey[T]] {
def reads(json: JsValue) =
try {
new JsSuccess(
AerospikeKey(namespace, setName, json.validate[List[Byte]].map(_.toArray).get)(keyConverter))
} catch {
case err: Throwable => new JsError(Seq()) //To be improved
}
}
val mailJsonReads = Json.reads[Mail]
val mailJsonWrites = Json.writes[Mail]
}
|
unicredit/ReactiveAerospike
|
src/test/scala/eu/unicredit/reactive_aerospike/model/JsonMailDao.scala
|
Scala
|
apache-2.0
| 2,150 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.dataset
import org.apache.calcite.rex.RexNode
import org.apache.flink.api.common.functions.MapFunction
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.DataSet
import org.apache.flink.table.api.TableConfig
import org.apache.flink.table.codegen.{FunctionCodeGenerator, GeneratedFunction}
import org.apache.flink.table.plan.nodes.CommonScan
import org.apache.flink.table.plan.schema.RowSchema
import org.apache.flink.table.runtime.MapRunner
import org.apache.flink.table.typeutils.TimeIndicatorTypeInfo
import org.apache.flink.types.Row
trait BatchScan extends CommonScan[Row] with DataSetRel {
protected def convertToInternalRow[T](
schema: RowSchema,
input: DataSet[T],
fieldIdxs: Array[Int],
config: TableConfig,
rowtimeExpression: Option[RexNode]): DataSet[Row] = {
val inputType = input.getType
val internalType = schema.typeInfo
val hasTimeIndicator = fieldIdxs.exists(f =>
f == TimeIndicatorTypeInfo.ROWTIME_BATCH_MARKER ||
f == TimeIndicatorTypeInfo.PROCTIME_BATCH_MARKER)
// conversion
if (inputType != internalType || hasTimeIndicator) {
val function = generateConversionMapper(
config,
inputType,
internalType,
"DataSetSourceConversion",
schema.fieldNames,
fieldIdxs,
rowtimeExpression)
val runner = new MapRunner[T, Row](
function.name,
function.code,
function.returnType)
val opName = s"from: (${schema.fieldNames.mkString(", ")})"
input.map(runner).name(opName)
}
// no conversion necessary, forward
else {
input.asInstanceOf[DataSet[Row]]
}
}
private def generateConversionMapper(
config: TableConfig,
inputType: TypeInformation[_],
outputType: TypeInformation[Row],
conversionOperatorName: String,
fieldNames: Seq[String],
inputFieldMapping: Array[Int],
rowtimeExpression: Option[RexNode]): GeneratedFunction[MapFunction[_, Row], Row] = {
val generator = new FunctionCodeGenerator(
config,
false,
inputType,
None,
Some(inputFieldMapping))
val conversion = generator.generateConverterResultExpression(
outputType,
fieldNames,
rowtimeExpression)
val body =
s"""
|${conversion.code}
|return ${conversion.resultTerm};
|""".stripMargin
generator.generateFunction(
"DataSetSourceConversion",
classOf[MapFunction[_, Row]],
body,
outputType)
}
}
|
hequn8128/flink
|
flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/plan/nodes/dataset/BatchScan.scala
|
Scala
|
apache-2.0
| 3,419 |
package com.lightning.walletapp.test
import java.nio.ByteOrder
import fr.acinq.bitcoin.Protocol
import scodec.bits.ByteVector
class FeaturesSpec {
import com.lightning.walletapp.ln.Features._
def allTests = {
{
println("features compatibility")
assert(isNodeSupported(Protocol.writeUInt64(1L << OPTION_DATA_LOSS_PROTECT_OPTIONAL, ByteOrder.BIG_ENDIAN)))
assert(!isNodeSupported(ByteVector.fromValidHex("14")))
assert(!isNodeSupported(ByteVector.fromValidHex("0141")))
assert(isNodeSupported(Protocol.writeUInt64(1l << VARIABLE_LENGTH_ONION_OPTIONAL, ByteOrder.BIG_ENDIAN)))
assert(!isNodeSupported(ByteVector.fromValidHex("14")))
assert(!isNodeSupported(ByteVector.fromValidHex("0141")))
}
{
assert(isNodeSupported(ByteVector.fromLong(1L << OPTION_DATA_LOSS_PROTECT_MANDATORY)))
assert(isNodeSupported(ByteVector.fromLong(1L << OPTION_DATA_LOSS_PROTECT_OPTIONAL)))
assert(isNodeSupported(ByteVector.fromLong(1L << VARIABLE_LENGTH_ONION_OPTIONAL)))
assert(isNodeSupported(ByteVector.fromLong(1L << VARIABLE_LENGTH_ONION_MANDATORY)))
assert(isNodeSupported(ByteVector.fromLong(1L << PAYMENT_SECRET_MANDATORY)))
assert(isNodeSupported(ByteVector.fromLong(1L << PAYMENT_SECRET_OPTIONAL)))
assert(isNodeSupported(ByteVector.fromLong(1L << BASIC_MULTI_PART_PAYMENT_MANDATORY)))
assert(isNodeSupported(ByteVector.fromLong(1L << BASIC_MULTI_PART_PAYMENT_OPTIONAL)))
}
}
}
|
btcontract/lnwallet
|
app/src/main/java/com/lightning/walletapp/test/FeaturesSpec.scala
|
Scala
|
apache-2.0
| 1,481 |
package sampler.empirical
import org.scalatest.FreeSpec
import org.scalatest.prop.GeneratorDrivenPropertyChecks
import org.scalacheck.Arbitrary.arbitrary
import org.scalacheck.Gen.posNum
class EmpiricalTest extends FreeSpec with GeneratorDrivenPropertyChecks {
def buildFromProbTab[A](count: Int, probTab: Map[A, Double]) = new Empirical[A]{
val observationCount = count
val probabilityTable = probTab
}
val weightsMapGen =
arbitrary[List[Double]]
.suchThat(l => l.sum > 0)
.map{list =>
val map = list.map(v => math.abs(v)).zipWithIndex.map(_.swap).toMap
val totalWeight = map.values.sum
map.mapValues(_ / totalWeight)
}
"Right tail of empty table" in {
assert(0.0 === buildFromProbTab(0, Map()).rightTail(null))
assert(0.0 === buildFromProbTab(0, Map()).rightTail(1))
}
"Right tail of uniform outcomes" in {
val empirical = buildFromProbTab(4, Map(
1 -> 0.25, 2 -> 0.25, 3 -> 0.25, 4 -> 0.25
))
assert(empirical.rightTail(1) === 1)
assert(empirical.rightTail(2) === 0.75)
assert(empirical.rightTail(3) === 0.5)
assert(empirical.rightTail(4) === 0.25)
assert(empirical.rightTail(5) === 0)
}
"Right tail of weighted outcomes" in {
val map = Map(1 -> 0.1, 2 -> 0.2, 3 -> 0.3, 4 -> 0.4)
assert(buildFromProbTab(3, map).rightTail(1) === 1)
assert(buildFromProbTab(3, map).rightTail(2) === 0.9)
assert(buildFromProbTab(3, map).rightTail(3) === 0.7)
assert(buildFromProbTab(3, map).rightTail(4) === 0.4)
}
"Right tail of generated map" in forAll(weightsMapGen, posNum[Int]){ (wtMap, int) =>
val index = int % wtMap.size
val sortedKeys = wtMap.keys.toIndexedSeq.sorted
val item = sortedKeys.apply(index)
val expected = sortedKeys.filter(_ >= item).map(wtMap).sum
assert(expected == buildFromProbTab(0, wtMap).rightTail(item))
}
"Percentile" in pending
"Percentile sequence" in pending
"Mean" in pending
"MeanDistanceTo" in pending
"MaxDistanceTo" in pending
// "Calculates empirical sequence mean" in {
// val empSeq = IndexedSeq[Double](1,2,3,4).toEmpiricalSeq
//
// mean(empSeq) should be(2.5 +- tolerance)
// }
//
// "Calculates empirical table mean" in {
// val empTable = IndexedSeq[Double](1,2,3,4).toEmpiricalTable
//
// mean(empTable) should be(2.5 +- tolerance)
// }
//
// "Quantiles from an empirical sequence" in {
// val empSeq = IndexedSeq[Double](1,2,3,4).toEmpiricalSeq
//
// assert(quantiles(empSeq, Seq(0.25, 0.5, 0.75)) === Seq(1.0,2.0,3.0))
// }
//
// "Quantiles from an empirical table" in {
// val empTable = IndexedSeq[Double](1,2,2,3,4).toEmpiricalTable
//
// assert(quantiles(empTable, Seq(0.25, 0.5, 0.75)) === Seq(2.0,2.0,3.0))
// }
//
// "Quantiles from an empirical sequence of 12 primes" in {
// val empSeq = IndexedSeq[Double](2,3,5,7,11,13,17,19,23,27,31,37).toEmpiricalSeq
//
// assert(quantiles(empSeq, Seq(0.25, 0.5, 0.75)) === Seq(5.0,13.0,23.0))
// }
//
// "Quantiles from an empirical table of 11 primes" in {
// val empTable = IndexedSeq[Double](2,3,5,7,11,13,17,19,23,27,31).toEmpiricalSeq
//
// assert(quantiles(empTable, Seq(0.25, 0.5, 0.75)) === Seq(5.0,13.0,23.0))
// }
//
// "Quantiles from an empirical sequence of 10 primes" in {
// val empSeq = IndexedSeq[Double](2,3,5,7,11,13,17,19,23,27).toEmpiricalSeq
//
// assert(quantiles(empSeq, Seq(0.25, 0.5, 0.75)) === Seq(5.0,11.0,19.0))
// }
//
// "Quantiles from an empirical talbe of 9 primes" in {
// val empTable = IndexedSeq[Double](2,3,5,7,11,13,17,19,23).toEmpiricalSeq
//
// assert(quantiles(empTable, Seq(0.25, 0.5, 0.75)) === Seq(5.0,11.0,17.0))
// }
//
// "Quantile accepts sequence of length 1" in {
// val empSeq = IndexedSeq[Double](1.0).toEmpiricalSeq
//
// assert(quantiles(empSeq, Seq(0.25, 0.5, 0.75)) === Seq(1.0,1.0,1.0))
// }
//
// "Error when sequence of lenght zero to quantile" in {
// val empSeq = IndexedSeq[Double]().toEmpiricalSeq
//
// intercept[AssertionError] {
// quantiles(empSeq, Seq(0.25))
// }
// }
//
// "Exception when negative probability supplied" in {
// val empSeq = IndexedSeq[Double](1,2,3,4).toEmpiricalSeq
//
// intercept[RangeException[Double]] {
// quantiles(empSeq, Seq(0.25, -0.5))
// }
// }
//
// "Exception when probability greater than one supplied" in {
// val empSeq = IndexedSeq[Double](1,2,3,4).toEmpiricalSeq
//
// intercept[RangeException[Double]] {
// quantiles(empSeq, Seq(0.25, 5.0))
// }
// }
//
// "calcualatesAbsoluteDifferenceMetric" in {
// val instance1 = IndexedSeq[Double](1,2,3).toEmpiricalSeq // mean 2
// val instance2 = IndexedSeq[Double](4,5,6).toEmpiricalSeq // mean 5
//
// assert(meanDistance(instance1, instance2) === 3)
// }
//
// "calculatesMaximumDifferenceMetric" in {
// val instance1 = IndexedSeq(1,2,3,4).toEmpiricalSeq
// val instance2 = IndexedSeq(1,2,2,2).toEmpiricalSeq // biggest distance 4
//
// maxDistance(instance1, instance2) should be(0.5 +- tolerance)
// }
}
|
tearne/Sampler
|
sampler-core/src/test/scala/sampler/empirical/EmpiricalTest.scala
|
Scala
|
apache-2.0
| 5,159 |
package lt.node.scandra.pirkimai.util
import android.os.Environment
import android.util.Log
//import lt.node.scandra.pirkimai.Constants
import java.io.{BufferedReader, BufferedWriter, File, FileInputStream}
import java.io.{FileOutputStream, FileReader, FileWriter, IOException}
import java.nio.channels.FileChannel
/**
* FileUtil methods.
*
* @author ccollins
* https://raw.github.com/michelou/android-examples/master/...
* ...android-in-practice/FileExplorer/src/com/manning/aip/fileexplorer/util/FileUtil.scala
*/
object FileUtil extends OrderPurchase {
final val LINE_SEP = System.getProperty("line.separator")
// from the Android docs, these are the recommended paths
private final val EXT_STORAGE_PATH_PREFIX = "/Android/data/"
private final val EXT_STORAGE_FILES_PATH_SUFFIX = "/files/"
private final val EXT_STORAGE_CACHE_PATH_SUFFIX = "/cache/"
// Object for intrinsic lock (per docs 0 length array "lighter" than a normal Object)
final val DATA_LOCK = new Array[Object](0)
/**
* Use Environment to check if external storage is writable.
*
* @return
*/
def isExternalStorageWritable: Boolean =
Environment.getExternalStorageState equals Environment.MEDIA_MOUNTED
/**
* Use environment to check if external storage is readable.
*
* @return
*/
def isExternalStorageReadable: Boolean =
isExternalStorageWritable ||
(Environment.getExternalStorageState equals Environment.MEDIA_MOUNTED_READ_ONLY)
/**
* Return the recommended external files directory, whether using API level 8 or lower.
* (Uses getExternalStorageDirectory and then appends the recommended path.)
*
* @param packageName
* @return
*/
def getExternalFilesDirAllApiLevels(packageName: String): File =
FileUtil.getExternalDirAllApiLevels(packageName, EXT_STORAGE_FILES_PATH_SUFFIX)
/**
* Return the recommended external cache directory, whether using API level 8 or lower.
* (Uses getExternalStorageDirectory and then appends the recommended path.)
*
* @param packageName
* @return
*/
def getExternalCacheDirAllApiLevels(packageName: String): File =
FileUtil.getExternalDirAllApiLevels(packageName, EXT_STORAGE_CACHE_PATH_SUFFIX)
private def getExternalDirAllApiLevels(packageName: String, suffixType: String): File = {
val dir = new File(Environment.getExternalStorageDirectory + EXT_STORAGE_PATH_PREFIX + packageName + suffixType)
synchronized { //(FileUtil.DATA_LOCK) {
try {
dir.mkdirs()
dir.createNewFile()
} catch {
case e: IOException =>
Log.e(this.TAG, "Error creating file", e)
}
}
dir
}
/**
* Copy file, return true on success, false on failure.
*
* @param src
* @param dst
* @return
*/
def copyFile(src: File, dst: File): Boolean = {
var result = false
var inChannel: FileChannel = null
var outChannel: FileChannel = null
synchronized { //(FileUtil.DATA_LOCK) {
try {
inChannel = new FileInputStream(src).getChannel
outChannel = new FileOutputStream(dst).getChannel
inChannel.transferTo(0, inChannel.size, outChannel)
result = true
} catch {
case e: IOException => // ignore
} finally {
if (inChannel != null && inChannel.isOpen) {
try inChannel.close()
catch { case e: IOException => /* ignore */ }
}
if (outChannel != null && outChannel.isOpen) {
try outChannel.close()
catch { case e: IOException => /* ignore */ }
}
}
}
result
}
/**
* Replace entire File with contents of String, return true on success, false on failure.
*
* @param fileContents
* @param file
* @return
*/
def writeStringAsFile(fileContents: String, file: File): Boolean = {
var result = false
try {
synchronized { //(FileUtil.DATA_LOCK) {
if (file != null) {
file.createNewFile() // ok if returns false, overwrite
val out = new BufferedWriter(new FileWriter(file), 1024)
out.write(fileContents)
out.close()
result = true
}
}
} catch {
case e: IOException =>
Log.e(this.TAG, "Error writing string data to file " + e.getMessage, e)
}
result
}
/**
* Append String to end of File, return true on success, false on failure.
*
* @param appendContents
* @param file
* @return
*/
def appendStringToFile(appendContents: String, file: File): Boolean = {
var result = false
try {
FileUtil.DATA_LOCK synchronized {
if ((file != null) && file.canWrite) {
file.createNewFile() // ok if returns false, overwrite
val out = new BufferedWriter(new FileWriter(file, true), 1024)
out.write(appendContents)
out.close()
result = true
}
}
} catch {
case e: IOException =>
Log.e(this.TAG, "Error appending string data to file " + e.getMessage, e)
}
result
}
/**
* Read file as String, return null if file is not present or not readable.
*
* @param file
* @return
*/
def readFileAsString(file: File): String = {
var sb: StringBuilder = null
try {
FileUtil.DATA_LOCK synchronized {
if ((file != null) && file.canRead) {
sb = new StringBuilder()
val in = new BufferedReader(new FileReader(file), 1024)
var line: String = in.readLine()
while (line != null) {
sb append line
sb append LINE_SEP
line = in.readLine()
}
}
}
} catch {
case e: IOException =>
Log.e(this.TAG, "Error reading file " + e.getMessage, e)
}
if (sb != null) sb.toString
else null
}
/**
* Read file as array of String, return ("") if file is not present or not readable.
*
* @param file
* @return
*/
def readFileAsStringArray(file: File): Array[String] = {
//val fileContents = readFileAsString(file)
this.readFileAsString(file) match {
case null => Array[String] {""}
case contents => contents.split(LINE_SEP)
}
}
}
|
vytasab/scandra
|
pirkimai/src/main/scala/lt/node/scandra/pirkimai/util/FileUtil.scala
|
Scala
|
apache-2.0
| 6,200 |
package com.seanshubin.scala.training.sample.data
import com.seanshubin.scala.training.core.Item
class ItemFormatterImpl extends ItemFormatter {
def format(item: Item): String = {
item match {
case Item(Some(color), name, sku) => s"$color $name with sku $sku"
case Item(None, name, sku) => s"$name with sku $sku"
}
}
}
|
SeanShubin/scala-training
|
sample-data/src/main/scala/com/seanshubin/scala/training/sample/data/ItemFormatterImpl.scala
|
Scala
|
unlicense
| 345 |
import edu.vanderbilt.accre.stackex._
import org.scalatest.WordSpec
/**
* Created by arnold-jr on 12/21/16.
*/
class TestStackExApp extends WordSpec {
val line =
"""<row Id="2435" PostTypeId="1" CreationDate="2016-12-06T19:50:13.853"
|Score="-1" ViewCount="12" Body="<p>If I have a dataset of images,
|and I extract all cnn feature vectors from them.
After that I generate
|the pca model of these features by doing:</p> 
 
 <pre>
|<code>pca.fit(ALL_features)
 </code> </pre> 

|
 <p>IF I have a new image and I need to check the similarity
|between this image and the whole dataset, what I have to do?</p>
|
 
 <ol> 
 <li>Extract cnn features from this
|image.</li> 
 <li>How to use the previous pca
|model?</li> 
 <li>How to check the similarity between
|the dataset features and the new image features?</li> 

|</ol> 
 
 <p>Is by doing this? or how?</p>
|
 
 <pre> <code>self.pca.transform(self.db_feats)

|</code> </pre> 
" OwnerUserId="1644"
|LastActivityDate="2016-12-06T19:50:13.853" Title="PCA pca.fit VS
|pca.transform" Tags="<machine-learning> <deep-learning>
|<image-recognition> <conv-neural-network>" AnswerCount="0"
|CommentCount="0"/>""".stripMargin
val tagText = """"<machine-learning><deep-learning>
|<image-recognition><conv-neural-network>""""
"getTags" when {
"applied to Tag text" should {
"return a list of tags as String" in {
assert(getTags(tagText) ==
List("machine-learning", "deep-learning", "image-recognition",
"conv-neural-network")
)
}
}
}
"getTextFromHtml" when {
"passed some nested html" should {
"return the body text in the correct order" in {
assert(
getTextFromHtml("<p><em>Emphatic</em><a>excluded </a> parallel</p>") ==
"Emphatic parallel"
)
}
}
"passed a valid html snippet" should {
"return the body text" in {
assert(getTextFromHtml("<p>some text.</p>") == "some text.")
}
}
"passed some nested html" should {
"return the body text" in {
assert(getTextFromHtml("<p><p>some text.</p></p>") == "some text.")
}
}
"passed some heterogeneous nested html" should {
"return the body text" in {
assert(getTextFromHtml("<i><p>some text.</p></i>") == "some text.")
}
}
"passed some heterogeneous 3-level-nested html" should {
"return the body text" in {
assert(getTextFromHtml("<p><em><i>some text.</i></em></p>") ==
"some text.")
}
}
"passed some excluded html" should {
"return the body text" in {
assert(
getTextFromHtml("<a>excluded text</a><p>some text.</p>") ==
"some text."
)
}
}
}
}
|
bigdata-vandy/stack-ex
|
src/test/scala-2.10/TestStackExApp.scala
|
Scala
|
mit
| 3,116 |
package scalashop
import common._
import org.scalameter._
object VerticalBoxBlurRunner {
val standardConfig = config(
Key.exec.minWarmupRuns -> 5,
Key.exec.maxWarmupRuns -> 10,
Key.exec.benchRuns -> 10,
Key.verbose -> true
) withWarmer (new Warmer.Default)
def main(args: Array[String]): Unit = {
val radius = 3
val width = 1920
val height = 1080
val src = new Img(width, height)
val dst = new Img(width, height)
val seqtime = standardConfig measure {
VerticalBoxBlur.blur(src, dst, 0, width, radius)
}
println(s"sequential blur time: $seqtime ms")
val numTasks = 32
val partime = standardConfig measure {
VerticalBoxBlur.parBlur(src, dst, numTasks, radius)
}
println(s"fork/join blur time: $partime ms")
println(s"speedup: ${seqtime / partime}")
}
}
/** A simple, trivially parallelizable computation. */
object VerticalBoxBlur {
/** Blurs the columns of the source image `src` into the destination image
* `dst`, starting with `from` and ending with `end` (non-inclusive).
*
* Within each column, `blur` traverses the pixels by going from top to
* bottom.
*/
def blur(src: Img, dst: Img, from: Int, end: Int, radius: Int): Unit = {
for {
x <- from until end
y <- 0 until src.height
if 0 <= x && x < src.width
} yield {
dst(x, y) = boxBlurKernel(src, x, y, radius)
}
}
/** Blurs the columns of the source image in parallel using `numTasks` tasks.
*
* Parallelization is done by stripping the source image `src` into
* `numTasks` separate strips, where each strip is composed of some number of
* columns.
*/
def parBlur(src: Img, dst: Img, numTasks: Int, radius: Int): Unit = {
val colsPerTask = math.max(src.width / numTasks, 1)
val startPoints = 0 to src.width by colsPerTask
val tasks = startPoints map { start =>
task {
blur(src, dst, start, start + colsPerTask, radius)
}
}
tasks foreach (_.join)
}
}
|
yurii-khomenko/fpScalaSpec
|
c3w1scalashop/src/main/scala/scalashop/VerticalBoxBlur.scala
|
Scala
|
gpl-3.0
| 2,033 |
/*
* Copyright (c) 2014. Regents of the University of California
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.avocado.postprocessing
import org.apache.commons.configuration.HierarchicalConfiguration
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.models.ADAMVariantContext
import org.bdgenomics.avocado.stats.AvocadoConfigAndStats
object Postprocessor {
private val stages = List[PostprocessingStage](FilterStrandBias,
FilterDepth)
assert(stages.map(_.stageName).length == stages.map(_.stageName).distinct.length,
"Postprocessing stages have duplicated names.")
def apply(rdd: RDD[ADAMVariantContext],
stageName: String,
stageAlgorithm: String,
stats: AvocadoConfigAndStats,
config: HierarchicalConfiguration): RDD[ADAMVariantContext] = {
val stage = stages.find(_.stageName == stageAlgorithm)
stage match {
case Some(s) => {
val c = config.configurationAt(stageName)
s.apply(rdd, stats, c)
}
case None => throw new IllegalArgumentException("Postprocessing stage " + stageAlgorithm + "does not exist.")
}
}
}
|
hammerlab/avocado
|
avocado-core/src/main/scala/org/bdgenomics/avocado/postprocessing/Postprocessor.scala
|
Scala
|
apache-2.0
| 1,681 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.