code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.eval.stream
import java.io.IOException
import java.nio.charset.StandardCharsets
import java.util.concurrent.CountDownLatch
import java.util.concurrent.TimeUnit
import akka.NotUsed
import akka.actor.ActorSystem
import akka.http.scaladsl.model.HttpEntity
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.model.HttpResponse
import akka.http.scaladsl.model.MediaTypes
import akka.http.scaladsl.model.StatusCodes
import akka.http.scaladsl.model.headers._
import akka.stream.KillSwitches
import akka.stream.scaladsl.Flow
import akka.stream.scaladsl.Keep
import akka.stream.scaladsl.Sink
import akka.stream.scaladsl.Source
import akka.util.ByteString
import munit.FunSuite
import scala.concurrent.Await
import scala.concurrent.Future
import scala.util.Failure
import scala.util.Success
import scala.util.Try
import scala.util.Using
class HostSourceSuite extends FunSuite {
import scala.concurrent.duration._
implicit val system = ActorSystem(getClass.getSimpleName)
def source(response: => Try[HttpResponse]): Source[ByteString, NotUsed] = {
val client = Flow[HttpRequest].map(_ => response)
HostSource("http://localhost/api/test", client = client, delay = 1.milliseconds)
}
def compress(str: String): Array[Byte] = {
import com.netflix.atlas.core.util.Streams._
byteArray { out =>
Using.resource(gzip(out))(_.write(str.getBytes(StandardCharsets.UTF_8)))
}
}
test("ok") {
val response = HttpResponse(StatusCodes.OK, entity = ByteString("ok"))
val future = source(Success(response))
.take(5)
.map(_.decodeString(StandardCharsets.UTF_8))
.runWith(Sink.seq[String])
val result = Await.result(future, Duration.Inf).toList
assertEquals(result, (0 until 5).map(_ => "ok").toList)
}
test("no size limit on data stream") {
val entity = HttpEntity(ByteString("ok")).withSizeLimit(1)
val response = HttpResponse(StatusCodes.OK, entity = entity)
val future = source(Success(response))
.take(5)
.map(_.decodeString(StandardCharsets.UTF_8))
.runWith(Sink.seq[String])
val result = Await.result(future, Duration.Inf).toList
assertEquals(result, (0 until 5).map(_ => "ok").toList)
}
test("handles decompression") {
val headers = List(`Content-Encoding`(HttpEncodings.gzip))
val data = ByteString(compress("ok"))
val response = HttpResponse(StatusCodes.OK, headers = headers, entity = data)
val future = source(Success(response))
.take(5)
.map(_.decodeString(StandardCharsets.UTF_8))
.runWith(Sink.seq[String])
val result = Await.result(future, Duration.Inf).toList
assertEquals(result, (0 until 5).map(_ => "ok").toList)
}
test("retries on error response from host") {
val response = HttpResponse(StatusCodes.BadRequest, entity = ByteString("error"))
val latch = new CountDownLatch(5)
val (switch, future) = source {
latch.countDown()
Success(response)
}.viaMat(KillSwitches.single)(Keep.right)
.toMat(Sink.ignore)(Keep.both)
.run()
// If it doesn't retry successfully this should time out and fail the test
latch.await(60, TimeUnit.SECONDS)
switch.shutdown()
Await.result(future, Duration.Inf)
}
test("retries on exception from host") {
val latch = new CountDownLatch(5)
val (switch, future) = source {
latch.countDown()
Failure(new IOException("cannot connect"))
}.viaMat(KillSwitches.single)(Keep.right)
.toMat(Sink.ignore)(Keep.both)
.run()
// If it doesn't retry successfully this should time out and fail the test
latch.await(60, TimeUnit.SECONDS)
switch.shutdown()
Await.result(future, Duration.Inf)
}
test("retries on exception from host entity source") {
val latch = new CountDownLatch(5)
val (switch, future) = source {
latch.countDown()
val source = Source.future(Future.failed[ByteString](new IOException("reset by peer")))
val entity = HttpEntity(MediaTypes.`text/event-stream`, source)
Success(HttpResponse(StatusCodes.OK, entity = entity))
}.viaMat(KillSwitches.single)(Keep.right)
.toMat(Sink.ignore)(Keep.both)
.run()
// If it doesn't retry successfully this should time out and fail the test
latch.await(60, TimeUnit.SECONDS)
switch.shutdown()
Await.result(future, Duration.Inf)
}
test("ref stops host source") {
val response = Success(HttpResponse(StatusCodes.OK, entity = ByteString("ok")))
val ref = EvaluationFlows.stoppableSource(source(response))
ref.stop()
val future = ref.source
.map(_.decodeString(StandardCharsets.UTF_8))
.runWith(Sink.seq[String])
val result = Await.result(future, Duration.Inf).toList
assert(result.isEmpty)
}
test("ref host source works until stopped") {
val response = Success(HttpResponse(StatusCodes.OK, entity = ByteString("ok")))
val ref = EvaluationFlows.stoppableSource(source(response))
val future = ref.source
.map(_.decodeString(StandardCharsets.UTF_8))
.take(5)
.runWith(Sink.seq[String])
val result = Await.result(future, Duration.Inf).toList
assertEquals(result, (0 until 5).map(_ => "ok").toList)
}
}
| brharrington/atlas | atlas-eval/src/test/scala/com/netflix/atlas/eval/stream/HostSourceSuite.scala | Scala | apache-2.0 | 5,843 |
package org.jetbrains.plugins.scala.lang.psi.types.api
import com.intellij.psi.PsiTypeParameter
import org.jetbrains.plugins.scala.extensions.PsiNamedElementExt
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{PsiTypeParameterExt, ScTypeParam}
import org.jetbrains.plugins.scala.lang.psi.types.{NamedType, ScSubstitutor, ScType, ScUndefinedSubstitutor}
import org.jetbrains.plugins.scala.project.ProjectContext
import scala.collection.Seq
sealed trait TypeParameterType extends ValueType with NamedType {
val arguments: Seq[TypeParameterType]
def lowerType: ScType
def upperType: ScType
def psiTypeParameter: PsiTypeParameter
override implicit def projectContext: ProjectContext = psiTypeParameter
override val name: String = psiTypeParameter.name
def nameAndId: (String, Long) = psiTypeParameter.nameAndId
def isInvariant: Boolean = psiTypeParameter match {
case typeParam: ScTypeParam => !typeParam.isCovariant && !typeParam.isContravariant
case _ => false
}
def isCovariant: Boolean = psiTypeParameter match {
case typeParam: ScTypeParam => typeParam.isCovariant
case _ => false
}
def isContravariant: Boolean = psiTypeParameter match {
case typeParam: ScTypeParam => typeParam.isContravariant
case _ => false
}
override def equivInner(`type`: ScType, substitutor: ScUndefinedSubstitutor, falseUndef: Boolean): (Boolean, ScUndefinedSubstitutor) =
(`type` match {
case that: TypeParameterType => (that.psiTypeParameter eq psiTypeParameter) || {
(psiTypeParameter, that.psiTypeParameter) match {
case (myBound: ScTypeParam, thatBound: ScTypeParam) =>
//TODO this is a temporary hack, so ignore substitutor for now
myBound.lowerBound.exists(_.equiv(thatBound.lowerBound.getOrNothing)) &&
myBound.upperBound.exists(_.equiv(thatBound.upperBound.getOrNothing)) &&
(myBound.name == thatBound.name || thatBound.isHigherKindedTypeParameter || myBound.isHigherKindedTypeParameter)
case _ => false
}
}
case _ => false
}, substitutor)
override def visitType(visitor: TypeVisitor): Unit = visitor.visitTypeParameterType(this)
}
object TypeParameterType {
def apply(tp: TypeParameter): TypeParameterType = LazyTpt(tp, Some(ScSubstitutor.empty))
def apply(psiTp: PsiTypeParameter, maybeSubstitutor: Option[ScSubstitutor] = Some(ScSubstitutor.empty)): TypeParameterType =
LazyTpt(TypeParameter(psiTp), maybeSubstitutor)
def apply(arguments: Seq[TypeParameterType],
lowerType: ScType,
upperType: ScType,
psiTypeParameter: PsiTypeParameter): TypeParameterType = StrictTpt(arguments, lowerType, upperType, psiTypeParameter)
def unapply(tpt: TypeParameterType): Option[(Seq[TypeParameterType], ScType, ScType, PsiTypeParameter)] =
Some(tpt.arguments, tpt.lowerType, tpt.upperType, tpt.psiTypeParameter)
private case class LazyTpt(typeParameter: TypeParameter, maybeSubstitutor: Option[ScSubstitutor] = Some(ScSubstitutor.empty))
extends TypeParameterType {
val arguments: Seq[TypeParameterType] = typeParameter.typeParameters.map(LazyTpt(_, maybeSubstitutor))
lazy val lowerType: ScType = lift(typeParameter.lowerType)
lazy val upperType: ScType = lift(typeParameter.upperType)
def psiTypeParameter: PsiTypeParameter = typeParameter.psiTypeParameter
private def lift(tp: ScType): ScType = maybeSubstitutor match {
case Some(s) => s.subst(tp)
case _ => tp
}
}
private case class StrictTpt(arguments: Seq[TypeParameterType],
override val lowerType: ScType,
override val upperType: ScType,
psiTypeParameter: PsiTypeParameter) extends TypeParameterType
}
| loskutov/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/types/api/TypeParameterType.scala | Scala | apache-2.0 | 3,837 |
/*
* Copyright (C) 2010 Lalit Pant <[email protected]>
*
* The contents of this file are subject to the GNU General Public License
* Version 3 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.gnu.org/copyleft/gpl.html
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
*/
package net.kogics.kojo.geogebra
import java.util.logging._
import geogebra.kernel.GeoElement
import geogebra.plugin.GgbAPI
import net.kogics.kojo.util.Utils
import net.kogics.kojo.core.Labelled
trait MwShape extends Labelled {
val ggbApi: GgbAPI
protected def geogebraElement: GeoElement
protected def ctorDone() {
ggbApi.getApplication.storeUndoInfo()
repaint()
}
def repaint() {
// geogebraElement.updateRepaint()
geogebraElement.updateCascade()
ggbApi.getKernel.notifyRepaint()
}
def hide() {
Utils.runInSwingThread {
geogebraElement.setEuclidianVisible(false)
repaint()
}
}
def show() {
Utils.runInSwingThread {
geogebraElement.setEuclidianVisible(true)
repaint()
}
}
def setColor(color: java.awt.Color) {
Utils.runInSwingThread {
geogebraElement.setObjColor(color)
repaint()
}
}
def showNameInLabel() {
Utils.runInSwingThread {
geogebraElement.setLabelMode(GeoElement.LABEL_NAME)
repaint()
}
}
def showNameValueInLabel() {
Utils.runInSwingThread {
geogebraElement.setLabelMode(GeoElement.LABEL_NAME_VALUE)
repaint()
}
}
def showValueInLabel() {
Utils.runInSwingThread {
geogebraElement.setLabelMode(GeoElement.LABEL_VALUE)
repaint()
}
}
def hideLabel() {
Utils.runInSwingThread {
geogebraElement.setLabelVisible(false)
repaint()
}
}
def showLabel() {
Utils.runInSwingThread {
geogebraElement.setLabelVisible(true)
repaint()
}
}
}
| richardfontana/fontana2007-t | KojoEnv/src/net/kogics/kojo/geogebra/MwShape.scala | Scala | gpl-3.0 | 2,138 |
import _root_.io.gatling.core.scenario.Simulation
import ch.qos.logback.classic.{Level, LoggerContext}
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import org.slf4j.LoggerFactory
import scala.concurrent.duration._
/**
* Performance test for the HipchatBot entity.
*/
class HipchatBotGatlingTest extends Simulation {
val context: LoggerContext = LoggerFactory.getILoggerFactory.asInstanceOf[LoggerContext]
// Log all HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("TRACE"))
// Log failed HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("DEBUG"))
val baseURL = Option(System.getProperty("baseURL")) getOrElse """http://localhost:8080"""
val httpConf = http
.baseURL(baseURL)
.inferHtmlResources()
.acceptHeader("*/*")
.acceptEncodingHeader("gzip, deflate")
.acceptLanguageHeader("fr,fr-fr;q=0.8,en-us;q=0.5,en;q=0.3")
.connectionHeader("keep-alive")
.userAgentHeader("Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:33.0) Gecko/20100101 Firefox/33.0")
val headers_http = Map(
"Accept" -> """application/json"""
)
val headers_http_authentication = Map(
"Content-Type" -> """application/json""",
"Accept" -> """application/json"""
)
val headers_http_authenticated = Map(
"Accept" -> """application/json""",
"Authorization" -> "${access_token}"
)
val scn = scenario("Test the HipchatBot entity")
.exec(http("First unauthenticated request")
.get("/api/account")
.headers(headers_http)
.check(status.is(401))).exitHereIfFailed
.pause(10)
.exec(http("Authentication")
.post("/api/authenticate")
.headers(headers_http_authentication)
.body(StringBody("""{"username":"admin", "password":"admin"}""")).asJSON
.check(header.get("Authorization").saveAs("access_token"))).exitHereIfFailed
.pause(1)
.exec(http("Authenticated request")
.get("/api/account")
.headers(headers_http_authenticated)
.check(status.is(200)))
.pause(10)
.repeat(2) {
exec(http("Get all hipchatBots")
.get("/gamecrafthipchatnotificationmanager/api/hipchat-bots")
.headers(headers_http_authenticated)
.check(status.is(200)))
.pause(10 seconds, 20 seconds)
.exec(http("Create new hipchatBot")
.post("/gamecrafthipchatnotificationmanager/api/hipchat-bots")
.headers(headers_http_authenticated)
.body(StringBody("""{"id":null, "hipchatBotName":"SAMPLE_TEXT", "hipchatBotDescription":"SAMPLE_TEXT", "hipchatBotToken":"SAMPLE_TEXT", "hipchatBotEnabled":null}""")).asJSON
.check(status.is(201))
.check(headerRegex("Location", "(.*)").saveAs("new_hipchatBot_url"))).exitHereIfFailed
.pause(10)
.repeat(5) {
exec(http("Get created hipchatBot")
.get("/gamecrafthipchatnotificationmanager${new_hipchatBot_url}")
.headers(headers_http_authenticated))
.pause(10)
}
.exec(http("Delete created hipchatBot")
.delete("/gamecrafthipchatnotificationmanager${new_hipchatBot_url}")
.headers(headers_http_authenticated))
.pause(10)
}
val users = scenario("Users").exec(scn)
setUp(
users.inject(rampUsers(Integer.getInteger("users", 100)) over (Integer.getInteger("ramp", 1) minutes))
).protocols(httpConf)
}
| iMartinezMateu/gamecraft | gamecraft-hipchat-notification-manager/src/test/gatling/user-files/simulations/HipchatBotGatlingTest.scala | Scala | mit | 3,631 |
package edu.rice.habanero.benchmarks.bndbuffer
import edu.rice.habanero.actors.{FuncJavaActor, FuncJavaActorState, FuncJavaPool}
import edu.rice.habanero.benchmarks.bndbuffer.ProdConsBoundedBufferConfig._
import edu.rice.habanero.benchmarks.{Benchmark, BenchmarkRunner}
import scala.collection.mutable.ListBuffer
/**
* @author <a href="http://shams.web.rice.edu/">Shams Imam</a> ([email protected])
*/
object ProdConsFuncJavaActorBenchmark {
def main(args: Array[String]) {
BenchmarkRunner.runBenchmark(args, new ProdConsFuncJavaActorBenchmark)
}
private final class ProdConsFuncJavaActorBenchmark extends Benchmark {
def initialize(args: Array[String]) {
ProdConsBoundedBufferConfig.parseArgs(args)
}
def printArgInfo() {
ProdConsBoundedBufferConfig.printArgs()
}
def runIteration() {
val manager = new ManagerActor(
ProdConsBoundedBufferConfig.bufferSize,
ProdConsBoundedBufferConfig.numProducers,
ProdConsBoundedBufferConfig.numConsumers,
ProdConsBoundedBufferConfig.numItemsPerProducer)
manager.start()
FuncJavaActorState.awaitTermination()
}
def cleanupIteration(lastIteration: Boolean, execTimeMillis: Double) {
if (lastIteration) {
FuncJavaPool.shutdown()
}
}
private class ManagerActor(bufferSize: Int, numProducers: Int, numConsumers: Int, numItemsPerProducer: Int) extends FuncJavaActor[AnyRef] {
private val adjustedBufferSize: Int = bufferSize - numProducers
private val availableProducers = new ListBuffer[ProducerActor]
private val availableConsumers = new ListBuffer[ConsumerActor]
private val pendingData = new ListBuffer[ProdConsBoundedBufferConfig.DataItemMessage]
private var numTerminatedProducers: Int = 0
private val producers = Array.tabulate[ProducerActor](numProducers)(i =>
new ProducerActor(i, this, numItemsPerProducer))
private val consumers = Array.tabulate[ConsumerActor](numConsumers)(i =>
new ConsumerActor(i, this))
override def onPostStart() {
consumers.foreach(loopConsumer => {
availableConsumers.append(loopConsumer)
loopConsumer.start()
})
producers.foreach(loopProducer => {
loopProducer.start()
loopProducer.send(ProduceDataMessage.ONLY)
})
}
override def onPreExit() {
consumers.foreach(loopConsumer => {
loopConsumer.send(ConsumerExitMessage.ONLY)
})
}
override def process(theMsg: AnyRef) {
theMsg match {
case dm: ProdConsBoundedBufferConfig.DataItemMessage =>
val producer: ProducerActor = dm.producer.asInstanceOf[ProducerActor]
if (availableConsumers.isEmpty) {
pendingData.append(dm)
} else {
availableConsumers.remove(0).send(dm)
}
if (pendingData.size >= adjustedBufferSize) {
availableProducers.append(producer)
} else {
producer.send(ProduceDataMessage.ONLY)
}
case cm: ProdConsBoundedBufferConfig.ConsumerAvailableMessage =>
val consumer: ConsumerActor = cm.consumer.asInstanceOf[ConsumerActor]
if (pendingData.isEmpty) {
availableConsumers.append(consumer)
tryExit()
} else {
consumer.send(pendingData.remove(0))
if (!availableProducers.isEmpty) {
availableProducers.remove(0).send(ProduceDataMessage.ONLY)
}
}
case _: ProdConsBoundedBufferConfig.ProducerExitMessage =>
numTerminatedProducers += 1
tryExit()
case msg =>
val ex = new IllegalArgumentException("Unsupported message: " + msg)
ex.printStackTrace(System.err)
}
}
def tryExit() {
if (numTerminatedProducers == numProducers && availableConsumers.size == numConsumers) {
exit()
}
}
}
private class ProducerActor(id: Int, manager: ManagerActor, numItemsToProduce: Int) extends FuncJavaActor[AnyRef] {
private var prodItem: Double = 0.0
private var itemsProduced: Int = 0
private def produceData() {
prodItem = processItem(prodItem, prodCost)
manager.send(new ProdConsBoundedBufferConfig.DataItemMessage(prodItem, this))
itemsProduced += 1
}
override def process(theMsg: AnyRef) {
if (theMsg.isInstanceOf[ProdConsBoundedBufferConfig.ProduceDataMessage]) {
if (itemsProduced == numItemsToProduce) {
exit()
} else {
produceData()
}
} else {
val ex = new IllegalArgumentException("Unsupported message: " + theMsg)
ex.printStackTrace(System.err)
}
}
override def onPreExit() {
manager.send(ProducerExitMessage.ONLY)
}
}
private class ConsumerActor(id: Int, manager: ManagerActor) extends FuncJavaActor[AnyRef] {
private val consumerAvailableMessage = new ProdConsBoundedBufferConfig.ConsumerAvailableMessage(this)
private var consItem: Double = 0
protected def consumeDataItem(dataToConsume: Double) {
consItem = processItem(consItem + dataToConsume, consCost)
}
override def process(theMsg: AnyRef) {
theMsg match {
case dm: ProdConsBoundedBufferConfig.DataItemMessage =>
consumeDataItem(dm.data)
manager.send(consumerAvailableMessage)
case _: ProdConsBoundedBufferConfig.ConsumerExitMessage =>
exit()
case msg =>
val ex = new IllegalArgumentException("Unsupported message: " + msg)
ex.printStackTrace(System.err)
}
}
}
}
}
| shamsmahmood/savina | src/main/scala/edu/rice/habanero/benchmarks/bndbuffer/ProdConsFuncJavaActorBenchmark.scala | Scala | gpl-2.0 | 5,846 |
trait FlowExamples {
import flowlib._
import ProcessUtil._
import Producers._
import Gate._
import Flow._
type A
type A1 <: A
type B
type C
type C1 <: C
def s1: Source[A]
def s2: Sink[B]
def sa: Sink[Any]
def c1: Channel[A]
def c2: Channel[C1]
def g1: Gate[B, A1]
def p1: Source[A] => Sink[B] => Process[Nothing]
def p2: Source[C] => Source[A] => Sink[B] => Process[Nothing]
def l1: List[B]
def pr1: Producer[A]
def r1: Reducer[A, List[A]] = reducer(List[A]())((la, a: A) => Process.stop(a :: la))
def ps: Process[Any] = (
s1 ->: p1 :-> s2 &
c1 ->: p1 :-> s2 &
g1 ->: p1 :-> g1 &
s1 ->: p1 :-> g1 &
s1 ->: c2 ->: p2 :-> s2 &
c2 ->: p2 :<- s1 :-> (tee[B] :-> s2 :-> sa) &
emit(l1) :-> s2 &
emit(pr1) :-> c1 &
s1 ->: absorb(r1) &
g1 ->: absorb(r1)
)
}
| arnolddevos/FlowLib | src/test/scala/example/FlowExamples.scala | Scala | lgpl-2.1 | 841 |
package pl.touk.nussknacker.engine.management
import com.typesafe.scalalogging.LazyLogging
import org.apache.flink.api.common.JobStatus
import pl.touk.nussknacker.engine.{BaseModelData, ModelData}
import pl.touk.nussknacker.engine.api.ProcessVersion
import pl.touk.nussknacker.engine.api.deployment._
import pl.touk.nussknacker.engine.api.namespaces.{FlinkUsageKey, NamingContext}
import pl.touk.nussknacker.engine.api.process.{ProcessId, ProcessName, VersionId}
import pl.touk.nussknacker.engine.canonicalgraph.CanonicalProcess
import pl.touk.nussknacker.engine.deployment.{ExternalDeploymentId, User}
import pl.touk.nussknacker.engine.management.rest.HttpFlinkClient
import pl.touk.nussknacker.engine.management.rest.flinkRestModel.JobOverview
import sttp.client._
import java.io.File
import java.util.concurrent.TimeUnit
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, Future}
class FlinkRestManager(config: FlinkConfig, modelData: BaseModelData, mainClassName: String)
(implicit ec: ExecutionContext, backend: SttpBackend[Future, Nothing, NothingT])
extends FlinkDeploymentManager(modelData, config.shouldVerifyBeforeDeploy, mainClassName) with LazyLogging {
protected lazy val jarFile: File = new FlinkModelJar().buildJobJar(modelData)
private val client = new HttpFlinkClient(config)
private val slotsChecker = new FlinkSlotsChecker(client)
/*
It's ok to have many jobs with same name, however:
- there MUST be at most 1 job in *non-terminal* state with given name
- deployment is possible IFF there is NO job in *non-terminal* state with given name
*/
override def findJobStatus(name: ProcessName): Future[Option[ProcessState]] = withJobOverview(name)(
whenNone = Future.successful(None),
whenDuplicates = duplicates => Future.successful(Some(processStateDefinitionManager.processState(
//we cannot have e.g. Failed here as we don't want to allow more jobs
FlinkStateStatus.MultipleJobsRunning,
Some(ExternalDeploymentId(duplicates.head.jid)),
version = Option.empty,
attributes = Option.empty,
startTime = Some(duplicates.head.`start-time`),
errors = List(s"Expected one job, instead: ${duplicates.map(job => s"${job.jid} - ${job.state}").mkString(", ")}"))
)),
whenSingle = job => withVersion(job.jid, name).map { version =>
//TODO: return error when there's no correct version in process
//currently we're rather lax on this, so that this change is backward-compatible
//we log debug here for now, since it's invoked v. often
if (version.isEmpty) {
logger.debug(s"No correct version in deployed scenario: ${job.name}")
}
Some(processStateDefinitionManager.processState(
mapJobStatus(job),
Some(ExternalDeploymentId(job.jid)),
version = version,
startTime = Some(job.`start-time`),
attributes = Option.empty,
errors = List.empty
))
}
)
private def withJobOverview[T](name: ProcessName)(whenNone: => Future[T], whenDuplicates: List[JobOverview] => Future[T], whenSingle: JobOverview => Future[T]): Future[T] = {
val preparedName = modelData.objectNaming.prepareName(name.value, modelData.processConfig, new NamingContext(FlinkUsageKey))
client.findJobsByName(preparedName).flatMap {
case Nil => whenNone
case duplicates if duplicates.count(isNotFinished) > 1 => whenDuplicates(duplicates)
case jobs => whenSingle(findRunningOrFirst(jobs))
}
}
private def findRunningOrFirst(jobOverviews: List[JobOverview]) = jobOverviews.find(isNotFinished).getOrElse(jobOverviews.head)
//NOTE: Flink <1.10 compatibility - protected to make it easier to work with Flink 1.9, JobStatus changed package, so we use String in case class
protected def isNotFinished(overview: JobOverview): Boolean = {
!toJobStatus(overview).isGloballyTerminalState
}
private def toJobStatus(overview: JobOverview): JobStatus = {
import org.apache.flink.api.common.JobStatus
JobStatus.valueOf(overview.state)
}
//NOTE: Flink <1.10 compatibility - protected to make it easier to work with Flink 1.9, JobStatus changed package, so we use String in case class
protected def mapJobStatus(overview: JobOverview): StateStatus = {
toJobStatus(overview) match {
case JobStatus.RUNNING if ensureTasksRunning(overview) => FlinkStateStatus.Running
case s if checkDuringDeployForNotRunningJob(s) => FlinkStateStatus.DuringDeploy
case JobStatus.FINISHED => FlinkStateStatus.Finished
case JobStatus.RESTARTING => FlinkStateStatus.Restarting
case JobStatus.CANCELED => FlinkStateStatus.Canceled
case JobStatus.CANCELLING => FlinkStateStatus.DuringCancel
//The job is not technically running, but should be in a moment
case JobStatus.RECONCILING | JobStatus.CREATED | JobStatus.SUSPENDED => FlinkStateStatus.Running
case JobStatus.FAILING => FlinkStateStatus.Failing
case JobStatus.FAILED => FlinkStateStatus.Failed
}
}
protected def ensureTasksRunning(overview: JobOverview): Boolean = {
// We sum running and finished tasks because for batch jobs some tasks can be already finished but the others are still running.
// We don't handle correctly case when job creates some tasks lazily e.g. in batch case. Without knowledge about what
// kind of job is deployed, we don't know if it is such case or it is just a streaming job which is not fully running yet
overview.tasks.running + overview.tasks.finished == overview.tasks.total
}
protected def checkDuringDeployForNotRunningJob(s: JobStatus): Boolean = {
// Flink return running status even if some tasks are scheduled or initializing
s == JobStatus.RUNNING || s == JobStatus.INITIALIZING
}
//TODO: cache by jobId?
private def withVersion(jobId: String, name: ProcessName): Future[Option[ProcessVersion]] = {
client.getJobConfig(jobId).map { executionConfig =>
val userConfig = executionConfig.`user-config`
for {
version <- userConfig.get("versionId").flatMap(_.asString).map(_.toLong)
user <- userConfig.get("user").map(_.asString.getOrElse(""))
modelVersion = userConfig.get("modelVersion").flatMap(_.asString).map(_.toInt)
processId = userConfig.get("processId").flatMap(_.asString).map(_.toLong).getOrElse(-1L)
} yield {
ProcessVersion(VersionId(version), name, ProcessId(processId), user, modelVersion)
}
}
}
override def cancel(processName: ProcessName, user: User): Future[Unit] = {
def doCancel(overview: JobOverview) = {
val status = mapJobStatus(overview)
if (processStateDefinitionManager.statusActions(status).contains(ProcessActionType.Cancel) && isNotFinished(overview)) {
cancel(ExternalDeploymentId(overview.jid))
} else {
logger.warn(s"Trying to cancel ${processName.value} which is in status $status.")
Future.successful(())
}
}
withJobOverview(processName)(
whenNone = {
logger.warn(s"Trying to cancel ${processName.value} which is not present in Flink.")
Future.successful(())
},
whenDuplicates = { overviews =>
logger.warn(s"Found duplicate jobs of ${processName.value}: $overviews. Cancelling all in non terminal state.")
Future.sequence(overviews.map(doCancel)).map(_=> (()))
},
whenSingle = doCancel
)
}
override protected def cancel(deploymentId: ExternalDeploymentId): Future[Unit] = {
client.cancel(deploymentId)
}
override protected def makeSavepoint(deploymentId: ExternalDeploymentId, savepointDir: Option[String]): Future[SavepointResult] = {
client.makeSavepoint(deploymentId, savepointDir)
}
override protected def stop(deploymentId: ExternalDeploymentId, savepointDir: Option[String]): Future[SavepointResult] = {
client.stop(deploymentId, savepointDir)
}
// this code is executed synchronously by ManagementActor thus we don't care that much about possible races
// and extraneous jar uploads introduced by asynchronous invocation
override protected def runProgram(processName: ProcessName, mainClass: String, args: List[String], savepointPath: Option[String]): Future[Option[ExternalDeploymentId]] = {
logger.debug(s"Starting to deploy scenario: $processName with savepoint $savepointPath")
client.runProgram(jarFile, mainClass, args, savepointPath)
}
override protected def checkRequiredSlotsExceedAvailableSlots(canonicalProcess: CanonicalProcess, currentlyDeployedJobId: Option[ExternalDeploymentId]): Future[Unit] = {
if (config.shouldCheckAvailableSlots)
slotsChecker.checkRequiredSlotsExceedAvailableSlots(canonicalProcess, currentlyDeployedJobId)
else
Future.successful(())
}
override def close(): Unit = Await.result(backend.close(), Duration(10, TimeUnit.SECONDS))
}
| TouK/nussknacker | engine/flink/management/src/main/scala/pl/touk/nussknacker/engine/management/FlinkRestManager.scala | Scala | apache-2.0 | 8,932 |
/*
* Licensed to Tuplejump Software Pvt. Ltd. under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Tuplejump Software Pvt. Ltd. licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.spark.sql
import com.datastax.driver.core.{KeyspaceMetadata, TableMetadata}
import com.tuplejump.calliope.sql.{CassandraAwareSQLContextFunctions, CassandraProperties, CassandraSchemaHelper}
import org.apache.spark.Logging
import org.apache.spark.sql.catalyst.analysis.Catalog
import org.apache.spark.sql.catalyst.plans.logical.{Subquery, LogicalPlan}
protected[sql] trait CassandraCatalog extends Catalog with Logging{
protected def context: SQLContext with CassandraAwareSQLContextFunctions
abstract override def lookupRelation(mayBeDbName: Option[String], tableRef: String, alias: Option[String]): LogicalPlan = {
logInfo(s"LOOKING UP DB [$mayBeDbName] for CF [$tableRef]")
val (databaseName, tableName) = getDbAndTable(mayBeDbName, tableRef)
logInfo(s"INTERPRETED AS DB [$databaseName] for CF [$tableName]")
val cassandraProperties = CassandraProperties(context.sparkContext)
import cassandraProperties._
databaseName match {
case Some(dbname) =>
val metadata = CassandraSchemaHelper.getCassandraMetadata(cassandraHost, cassandraNativePort, cassandraUsername, cassandraPassword)
if(metadata != null){
metadata.getKeyspace(dbname) match {
case ksmeta: KeyspaceMetadata =>
ksmeta.getTable(tableName) match {
case tableMeta: TableMetadata =>
val cschema = new SchemaRDD(context,
CassandraRelation(cassandraHost,
cassandraNativePort,
cassandraRpcPort,
dbname,
tableName,
context,
cassandraUsername,
cassandraPassword,
mayUseStargate,
Some(sparkContext.hadoopConfiguration)))
println(cschema.baseLogicalPlan.output)
val basePlan = cschema.baseLogicalPlan
val tableWithQualifers = Subquery(tableName, basePlan)
// If an alias was specified by the lookup, wrap the plan in a subquery so that attributes are
// properly qualified with this alias.
alias.map(a => Subquery(a, tableWithQualifers)).getOrElse(basePlan)
case null =>
super.lookupRelation(databaseName, tableName, alias)
}
case null =>
super.lookupRelation(databaseName, tableName, alias)
}
}else{
super.lookupRelation(databaseName, tableName, alias)
}
case None =>
//We cannot fetch a table without the keyspace name in cassandra
super.lookupRelation(databaseName, tableName, alias)
}
}
private val dbtblRegex = "(.*)\\\\.(.*)".r
def getDbAndTable(dbname: Option[String], tablename: String): (Option[String], String) = {
dbname match {
case db@Some(name) => (db, tablename)
case None => tablename match {
case dbtblRegex(db, tbl) => (Some(db), tbl)
case _ => (dbname, tablename)
}
}
}
}
| tuplejump/calliope | sql/core/src/main/scala/org/apache/spark/sql/CassandraCatalog.scala | Scala | apache-2.0 | 3,926 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.kubernetes.integrationtest.jobs
import org.apache.spark.deploy.kubernetes.integrationtest.PiHelper
import org.apache.spark.sql.SparkSession
// Equivalent to SparkPi except does not stop the Spark Context
// at the end and spins forever, so other things can inspect the
// Spark UI immediately after the fact.
private[spark] object SparkPiWithInfiniteWait {
def main(args: Array[String]): Unit = {
val spark = SparkSession
.builder
.appName("Spark Pi")
.getOrCreate()
val slices = if (args.length > 0) args(0).toInt else 10
val n = math.min(100000L * slices, Int.MaxValue).toInt // avoid overflow
val count = spark.sparkContext.parallelize(1 until n, slices).map { _ =>
PiHelper.helpPi()
}.reduce(_ + _)
// scalastyle:off println
println("Pi is roughly " + 4.0 * count / (n - 1))
// scalastyle:on println
// Spin forever to keep the Spark UI active, so other things can inspect the job.
while (true) {
Thread.sleep(600000)
}
}
}
| kimoonkim/spark | resource-managers/kubernetes/integration-tests-spark-jobs/src/main/scala/org/apache/spark/deploy/kubernetes/integrationtest/jobs/SparkPiWithInfiniteWait.scala | Scala | apache-2.0 | 1,848 |
/*
* Copyright 2019 CJWW Development
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package forms.test
import forms.validation.CommonValidation
import models.test.TearDownUser
import play.api.data.Form
import play.api.data.Forms._
object TearDownUserForm {
val form = Form(
mapping(
"testUserName" -> CommonValidation.hasTextBeenEntered("testUserName"),
"credentialType" -> CommonValidation.hasTextBeenEntered("credentialType")
)(TearDownUser.apply)(TearDownUser.unapply)
)
}
| cjww-development/auth-service | app/forms/test/TearDownUserForm.scala | Scala | apache-2.0 | 1,020 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.reflect.ClassTag
import scala.reflect.runtime.universe.TypeTag
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.mapreduce.{JobContext, TaskAttemptContext}
import org.apache.parquet.column.{Encoding, ParquetProperties}
import org.apache.parquet.example.data.{Group, GroupWriter}
import org.apache.parquet.example.data.simple.SimpleGroup
import org.apache.parquet.hadoop._
import org.apache.parquet.hadoop.api.WriteSupport
import org.apache.parquet.hadoop.api.WriteSupport.WriteContext
import org.apache.parquet.hadoop.metadata.CompressionCodecName
import org.apache.parquet.io.api.RecordConsumer
import org.apache.parquet.schema.{MessageType, MessageTypeParser}
import org.apache.spark.SparkException
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.{InternalRow, ScalaReflection}
import org.apache.spark.sql.catalyst.expressions.{GenericInternalRow, UnsafeRow}
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
// Write support class for nested groups: ParquetWriter initializes GroupWriteSupport
// with an empty configuration (it is after all not intended to be used in this way?)
// and members are private so we need to make our own in order to pass the schema
// to the writer.
private[parquet] class TestGroupWriteSupport(schema: MessageType) extends WriteSupport[Group] {
var groupWriter: GroupWriter = null
override def prepareForWrite(recordConsumer: RecordConsumer): Unit = {
groupWriter = new GroupWriter(recordConsumer, schema)
}
override def init(configuration: Configuration): WriteContext = {
new WriteContext(schema, new java.util.HashMap[String, String]())
}
override def write(record: Group) {
groupWriter.write(record)
}
}
/**
* A test suite that tests basic Parquet I/O.
*/
class ParquetIOSuite extends QueryTest with ParquetTest with SharedSQLContext {
import testImplicits._
/**
* Writes `data` to a Parquet file, reads it back and check file contents.
*/
protected def checkParquetFile[T <: Product : ClassTag: TypeTag](data: Seq[T]): Unit = {
withParquetDataFrame(data)(r => checkAnswer(r, data.map(Row.fromTuple)))
}
test("basic data types (without binary)") {
val data = (1 to 4).map { i =>
(i % 2 == 0, i, i.toLong, i.toFloat, i.toDouble)
}
checkParquetFile(data)
}
test("raw binary") {
val data = (1 to 4).map(i => Tuple1(Array.fill(3)(i.toByte)))
withParquetDataFrame(data) { df =>
assertResult(data.map(_._1.mkString(",")).sorted) {
df.collect().map(_.getAs[Array[Byte]](0).mkString(",")).sorted
}
}
}
test("SPARK-11694 Parquet logical types are not being tested properly") {
val parquetSchema = MessageTypeParser.parseMessageType(
"""message root {
| required int32 a(INT_8);
| required int32 b(INT_16);
| required int32 c(DATE);
| required int32 d(DECIMAL(1,0));
| required int64 e(DECIMAL(10,0));
| required binary f(UTF8);
| required binary g(ENUM);
| required binary h(DECIMAL(32,0));
| required fixed_len_byte_array(32) i(DECIMAL(32,0));
|}
""".stripMargin)
val expectedSparkTypes = Seq(ByteType, ShortType, DateType, DecimalType(1, 0),
DecimalType(10, 0), StringType, StringType, DecimalType(32, 0), DecimalType(32, 0))
withTempPath { location =>
val path = new Path(location.getCanonicalPath)
val conf = spark.sessionState.newHadoopConf()
writeMetadata(parquetSchema, path, conf)
readParquetFile(path.toString)(df => {
val sparkTypes = df.schema.map(_.dataType)
assert(sparkTypes === expectedSparkTypes)
})
}
}
test("string") {
val data = (1 to 4).map(i => Tuple1(i.toString))
// Property spark.sql.parquet.binaryAsString shouldn't affect Parquet files written by Spark SQL
// as we store Spark SQL schema in the extra metadata.
withSQLConf(SQLConf.PARQUET_BINARY_AS_STRING.key -> "false")(checkParquetFile(data))
withSQLConf(SQLConf.PARQUET_BINARY_AS_STRING.key -> "true")(checkParquetFile(data))
}
testStandardAndLegacyModes("fixed-length decimals") {
def makeDecimalRDD(decimal: DecimalType): DataFrame = {
spark
.range(1000)
// Parquet doesn't allow column names with spaces, have to add an alias here.
// Minus 500 here so that negative decimals are also tested.
.select((('id - 500) / 100.0) cast decimal as 'dec)
.coalesce(1)
}
val combinations = Seq((5, 2), (1, 0), (1, 1), (18, 10), (18, 17), (19, 0), (38, 37))
for ((precision, scale) <- combinations) {
withTempPath { dir =>
val data = makeDecimalRDD(DecimalType(precision, scale))
data.write.parquet(dir.getCanonicalPath)
readParquetFile(dir.getCanonicalPath) { df => {
checkAnswer(df, data.collect().toSeq)
}}
}
}
}
test("date type") {
def makeDateRDD(): DataFrame =
sparkContext
.parallelize(0 to 1000)
.map(i => Tuple1(DateTimeUtils.toJavaDate(i)))
.toDF()
.select($"_1")
withTempPath { dir =>
val data = makeDateRDD()
data.write.parquet(dir.getCanonicalPath)
readParquetFile(dir.getCanonicalPath) { df =>
checkAnswer(df, data.collect().toSeq)
}
}
}
testStandardAndLegacyModes("map") {
val data = (1 to 4).map(i => Tuple1(Map(i -> s"val_$i")))
checkParquetFile(data)
}
testStandardAndLegacyModes("array") {
val data = (1 to 4).map(i => Tuple1(Seq(i, i + 1)))
checkParquetFile(data)
}
testStandardAndLegacyModes("array and double") {
val data = (1 to 4).map(i => (i.toDouble, Seq(i.toDouble, (i + 1).toDouble)))
checkParquetFile(data)
}
testStandardAndLegacyModes("struct") {
val data = (1 to 4).map(i => Tuple1((i, s"val_$i")))
withParquetDataFrame(data) { df =>
// Structs are converted to `Row`s
checkAnswer(df, data.map { case Tuple1(struct) =>
Row(Row(struct.productIterator.toSeq: _*))
})
}
}
testStandardAndLegacyModes("nested struct with array of array as field") {
val data = (1 to 4).map(i => Tuple1((i, Seq(Seq(s"val_$i")))))
withParquetDataFrame(data) { df =>
// Structs are converted to `Row`s
checkAnswer(df, data.map { case Tuple1(struct) =>
Row(Row(struct.productIterator.toSeq: _*))
})
}
}
testStandardAndLegacyModes("nested map with struct as value type") {
val data = (1 to 4).map(i => Tuple1(Map(i -> (i, s"val_$i"))))
withParquetDataFrame(data) { df =>
checkAnswer(df, data.map { case Tuple1(m) =>
Row(m.mapValues(struct => Row(struct.productIterator.toSeq: _*)))
})
}
}
test("nulls") {
val allNulls = (
null.asInstanceOf[java.lang.Boolean],
null.asInstanceOf[Integer],
null.asInstanceOf[java.lang.Long],
null.asInstanceOf[java.lang.Float],
null.asInstanceOf[java.lang.Double])
withParquetDataFrame(allNulls :: Nil) { df =>
val rows = df.collect()
assert(rows.length === 1)
assert(rows.head === Row(Seq.fill(5)(null): _*))
}
}
test("nones") {
val allNones = (
None.asInstanceOf[Option[Int]],
None.asInstanceOf[Option[Long]],
None.asInstanceOf[Option[String]])
withParquetDataFrame(allNones :: Nil) { df =>
val rows = df.collect()
assert(rows.length === 1)
assert(rows.head === Row(Seq.fill(3)(null): _*))
}
}
test("SPARK-10113 Support for unsigned Parquet logical types") {
val parquetSchema = MessageTypeParser.parseMessageType(
"""message root {
| required int32 c(UINT_32);
|}
""".stripMargin)
withTempPath { location =>
val path = new Path(location.getCanonicalPath)
val conf = spark.sessionState.newHadoopConf()
writeMetadata(parquetSchema, path, conf)
val errorMessage = intercept[Throwable] {
spark.read.parquet(path.toString).printSchema()
}.toString
assert(errorMessage.contains("Parquet type not supported"))
}
}
test("SPARK-11692 Support for Parquet logical types, JSON and BSON (embedded types)") {
val parquetSchema = MessageTypeParser.parseMessageType(
"""message root {
| required binary a(JSON);
| required binary b(BSON);
|}
""".stripMargin)
val expectedSparkTypes = Seq(StringType, BinaryType)
withTempPath { location =>
val path = new Path(location.getCanonicalPath)
val conf = spark.sessionState.newHadoopConf()
writeMetadata(parquetSchema, path, conf)
val sparkTypes = spark.read.parquet(path.toString).schema.map(_.dataType)
assert(sparkTypes === expectedSparkTypes)
}
}
test("compression codec") {
val hadoopConf = spark.sessionState.newHadoopConf()
def compressionCodecFor(path: String, codecName: String): String = {
val codecs = for {
footer <- readAllFootersWithoutSummaryFiles(new Path(path), hadoopConf)
block <- footer.getParquetMetadata.getBlocks.asScala
column <- block.getColumns.asScala
} yield column.getCodec.name()
assert(codecs.distinct === Seq(codecName))
codecs.head
}
val data = (0 until 10).map(i => (i, i.toString))
def checkCompressionCodec(codec: CompressionCodecName): Unit = {
withSQLConf(SQLConf.PARQUET_COMPRESSION.key -> codec.name()) {
withParquetFile(data) { path =>
assertResult(spark.conf.get(SQLConf.PARQUET_COMPRESSION).toUpperCase) {
compressionCodecFor(path, codec.name())
}
}
}
}
// Checks default compression codec
checkCompressionCodec(
CompressionCodecName.fromConf(spark.conf.get(SQLConf.PARQUET_COMPRESSION)))
checkCompressionCodec(CompressionCodecName.UNCOMPRESSED)
checkCompressionCodec(CompressionCodecName.GZIP)
checkCompressionCodec(CompressionCodecName.SNAPPY)
}
test("read raw Parquet file") {
def makeRawParquetFile(path: Path): Unit = {
val schema = MessageTypeParser.parseMessageType(
"""
|message root {
| required boolean _1;
| required int32 _2;
| required int64 _3;
| required float _4;
| required double _5;
|}
""".stripMargin)
val testWriteSupport = new TestGroupWriteSupport(schema)
/**
* Provide a builder for constructing a parquet writer - after PARQUET-248 directly
* constructing the writer is deprecated and should be done through a builder. The default
* builders include Avro - but for raw Parquet writing we must create our own builder.
*/
class ParquetWriterBuilder() extends
ParquetWriter.Builder[Group, ParquetWriterBuilder](path) {
override def getWriteSupport(conf: Configuration) = testWriteSupport
override def self() = this
}
val writer = new ParquetWriterBuilder().build()
(0 until 10).foreach { i =>
val record = new SimpleGroup(schema)
record.add(0, i % 2 == 0)
record.add(1, i)
record.add(2, i.toLong)
record.add(3, i.toFloat)
record.add(4, i.toDouble)
writer.write(record)
}
writer.close()
}
withTempDir { dir =>
val path = new Path(dir.toURI.toString, "part-r-0.parquet")
makeRawParquetFile(path)
readParquetFile(path.toString) { df =>
checkAnswer(df, (0 until 10).map { i =>
Row(i % 2 == 0, i, i.toLong, i.toFloat, i.toDouble) })
}
}
}
test("write metadata") {
val hadoopConf = spark.sessionState.newHadoopConf()
withTempPath { file =>
val path = new Path(file.toURI.toString)
val fs = FileSystem.getLocal(hadoopConf)
val schema = StructType.fromAttributes(ScalaReflection.attributesFor[(Int, String)])
writeMetadata(schema, path, hadoopConf)
assert(fs.exists(new Path(path, ParquetFileWriter.PARQUET_COMMON_METADATA_FILE)))
assert(fs.exists(new Path(path, ParquetFileWriter.PARQUET_METADATA_FILE)))
val expectedSchema = new ParquetSchemaConverter().convert(schema)
val actualSchema = readFooter(path, hadoopConf).getFileMetaData.getSchema
actualSchema.checkContains(expectedSchema)
expectedSchema.checkContains(actualSchema)
}
}
test("save - overwrite") {
withParquetFile((1 to 10).map(i => (i, i.toString))) { file =>
val newData = (11 to 20).map(i => (i, i.toString))
newData.toDF().write.format("parquet").mode(SaveMode.Overwrite).save(file)
readParquetFile(file) { df =>
checkAnswer(df, newData.map(Row.fromTuple))
}
}
}
test("save - ignore") {
val data = (1 to 10).map(i => (i, i.toString))
withParquetFile(data) { file =>
val newData = (11 to 20).map(i => (i, i.toString))
newData.toDF().write.format("parquet").mode(SaveMode.Ignore).save(file)
readParquetFile(file) { df =>
checkAnswer(df, data.map(Row.fromTuple))
}
}
}
test("save - throw") {
val data = (1 to 10).map(i => (i, i.toString))
withParquetFile(data) { file =>
val newData = (11 to 20).map(i => (i, i.toString))
val errorMessage = intercept[Throwable] {
newData.toDF().write.format("parquet").mode(SaveMode.ErrorIfExists).save(file)
}.getMessage
assert(errorMessage.contains("already exists"))
}
}
test("save - append") {
val data = (1 to 10).map(i => (i, i.toString))
withParquetFile(data) { file =>
val newData = (11 to 20).map(i => (i, i.toString))
newData.toDF().write.format("parquet").mode(SaveMode.Append).save(file)
readParquetFile(file) { df =>
checkAnswer(df, (data ++ newData).map(Row.fromTuple))
}
}
}
test("SPARK-6315 regression test") {
// Spark 1.1 and prior versions write Spark schema as case class string into Parquet metadata.
// This has been deprecated by JSON format since 1.2. Notice that, 1.3 further refactored data
// types API, and made StructType.fields an array. This makes the result of StructType.toString
// different from prior versions: there's no "Seq" wrapping the fields part in the string now.
val sparkSchema =
"StructType(Seq(StructField(a,BooleanType,false),StructField(b,IntegerType,false)))"
// The Parquet schema is intentionally made different from the Spark schema. Because the new
// Parquet data source simply falls back to the Parquet schema once it fails to parse the Spark
// schema. By making these two different, we are able to assert the old style case class string
// is parsed successfully.
val parquetSchema = MessageTypeParser.parseMessageType(
"""message root {
| required int32 c;
|}
""".stripMargin)
withTempPath { location =>
val extraMetadata = Map(ParquetReadSupport.SPARK_METADATA_KEY -> sparkSchema.toString)
val path = new Path(location.getCanonicalPath)
val conf = spark.sessionState.newHadoopConf()
writeMetadata(parquetSchema, path, conf, extraMetadata)
readParquetFile(path.toString) { df =>
assertResult(df.schema) {
StructType(
StructField("a", BooleanType, nullable = true) ::
StructField("b", IntegerType, nullable = true) ::
Nil)
}
}
}
}
test("SPARK-8121: spark.sql.parquet.output.committer.class shouldn't be overridden") {
val extraOptions = Map(
SQLConf.OUTPUT_COMMITTER_CLASS.key -> classOf[ParquetOutputCommitter].getCanonicalName,
"spark.sql.parquet.output.committer.class" ->
classOf[JobCommitFailureParquetOutputCommitter].getCanonicalName
)
withTempPath { dir =>
val message = intercept[SparkException] {
spark.range(0, 1).write.options(extraOptions).parquet(dir.getCanonicalPath)
}.getCause.getMessage
assert(message === "Intentional exception for testing purposes")
}
}
test("SPARK-6330 regression test") {
// In 1.3.0, save to fs other than file: without configuring core-site.xml would get:
// IllegalArgumentException: Wrong FS: hdfs://..., expected: file:///
intercept[Throwable] {
spark.read.parquet("file:///nonexistent")
}
val errorMessage = intercept[Throwable] {
spark.read.parquet("hdfs://nonexistent")
}.toString
assert(errorMessage.contains("UnknownHostException"))
}
test("SPARK-7837 Do not close output writer twice when commitTask() fails") {
// Using a output committer that always fail when committing a task, so that both
// `commitTask()` and `abortTask()` are invoked.
val extraOptions = Map[String, String](
"spark.sql.parquet.output.committer.class" ->
classOf[TaskCommitFailureParquetOutputCommitter].getCanonicalName
)
// Before fixing SPARK-7837, the following code results in an NPE because both
// `commitTask()` and `abortTask()` try to close output writers.
withTempPath { dir =>
val m1 = intercept[SparkException] {
spark.range(1).coalesce(1).write.options(extraOptions).parquet(dir.getCanonicalPath)
}.getCause.getMessage
assert(m1.contains("Intentional exception for testing purposes"))
}
withTempPath { dir =>
val m2 = intercept[SparkException] {
val df = spark.range(1).select('id as 'a, 'id as 'b).coalesce(1)
df.write.partitionBy("a").options(extraOptions).parquet(dir.getCanonicalPath)
}.getCause.getMessage
assert(m2.contains("Intentional exception for testing purposes"))
}
}
test("SPARK-11044 Parquet writer version fixed as version1 ") {
// For dictionary encoding, Parquet changes the encoding types according to its writer
// version. So, this test checks one of the encoding types in order to ensure that
// the file is written with writer version2.
val extraOptions = Map[String, String](
// Write a Parquet file with writer version2.
ParquetOutputFormat.WRITER_VERSION -> ParquetProperties.WriterVersion.PARQUET_2_0.toString,
// By default, dictionary encoding is enabled from Parquet 1.2.0 but
// it is enabled just in case.
ParquetOutputFormat.ENABLE_DICTIONARY -> "true"
)
val hadoopConf = spark.sessionState.newHadoopConfWithOptions(extraOptions)
withSQLConf(ParquetOutputFormat.ENABLE_JOB_SUMMARY -> "true") {
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}/part-r-0.parquet"
spark.range(1 << 16).selectExpr("(id % 4) AS i")
.coalesce(1).write.options(extraOptions).mode("overwrite").parquet(path)
val blockMetadata = readFooter(new Path(path), hadoopConf).getBlocks.asScala.head
val columnChunkMetadata = blockMetadata.getColumns.asScala.head
// If the file is written with version2, this should include
// Encoding.RLE_DICTIONARY type. For version1, it is Encoding.PLAIN_DICTIONARY
assert(columnChunkMetadata.getEncodings.contains(Encoding.RLE_DICTIONARY))
}
}
}
test("null and non-null strings") {
// Create a dataset where the first values are NULL and then some non-null values. The
// number of non-nulls needs to be bigger than the ParquetReader batch size.
val data: Dataset[String] = spark.range(200).map (i =>
if (i < 150) null
else "a"
)
val df = data.toDF("col")
assert(df.agg("col" -> "count").collect().head.getLong(0) == 50)
withTempPath { dir =>
val path = s"${dir.getCanonicalPath}/data"
df.write.parquet(path)
readParquetFile(path) { df2 =>
assert(df2.agg("col" -> "count").collect().head.getLong(0) == 50)
}
}
}
test("read dictionary encoded decimals written as INT32") {
("true" :: "false" :: Nil).foreach { vectorized =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> vectorized) {
checkAnswer(
// Decimal column in this file is encoded using plain dictionary
readResourceParquetFile("test-data/dec-in-i32.parquet"),
spark.range(1 << 4).select('id % 10 cast DecimalType(5, 2) as 'i32_dec))
}
}
}
test("read dictionary encoded decimals written as INT64") {
("true" :: "false" :: Nil).foreach { vectorized =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> vectorized) {
checkAnswer(
// Decimal column in this file is encoded using plain dictionary
readResourceParquetFile("test-data/dec-in-i64.parquet"),
spark.range(1 << 4).select('id % 10 cast DecimalType(10, 2) as 'i64_dec))
}
}
}
test("read dictionary encoded decimals written as FIXED_LEN_BYTE_ARRAY") {
("true" :: "false" :: Nil).foreach { vectorized =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> vectorized) {
checkAnswer(
// Decimal column in this file is encoded using plain dictionary
readResourceParquetFile("test-data/dec-in-fixed-len.parquet"),
spark.range(1 << 4).select('id % 10 cast DecimalType(10, 2) as 'fixed_len_dec))
}
}
}
test("SPARK-12589 copy() on rows returned from reader works for strings") {
withTempPath { dir =>
val data = (1, "abc") ::(2, "helloabcde") :: Nil
data.toDF().write.parquet(dir.getCanonicalPath)
var hash1: Int = 0
var hash2: Int = 0
(false :: true :: Nil).foreach { v =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> v.toString) {
val df = spark.read.parquet(dir.getCanonicalPath)
val rows = df.queryExecution.toRdd.map(_.copy()).collect()
val unsafeRows = rows.map(_.asInstanceOf[UnsafeRow])
if (!v) {
hash1 = unsafeRows(0).hashCode()
hash2 = unsafeRows(1).hashCode()
} else {
assert(hash1 == unsafeRows(0).hashCode())
assert(hash2 == unsafeRows(1).hashCode())
}
}
}
}
}
test("VectorizedParquetRecordReader - direct path read") {
val data = (0 to 10).map(i => (i, (i + 'a').toChar.toString))
withTempPath { dir =>
spark.createDataFrame(data).repartition(1).write.parquet(dir.getCanonicalPath)
val file = SpecificParquetRecordReaderBase.listDirectory(dir).get(0);
{
val reader = new VectorizedParquetRecordReader
try {
reader.initialize(file, null)
val result = mutable.ArrayBuffer.empty[(Int, String)]
while (reader.nextKeyValue()) {
val row = reader.getCurrentValue.asInstanceOf[InternalRow]
val v = (row.getInt(0), row.getString(1))
result += v
}
assert(data == result)
} finally {
reader.close()
}
}
// Project just one column
{
val reader = new VectorizedParquetRecordReader
try {
reader.initialize(file, ("_2" :: Nil).asJava)
val result = mutable.ArrayBuffer.empty[(String)]
while (reader.nextKeyValue()) {
val row = reader.getCurrentValue.asInstanceOf[InternalRow]
result += row.getString(0)
}
assert(data.map(_._2) == result)
} finally {
reader.close()
}
}
// Project columns in opposite order
{
val reader = new VectorizedParquetRecordReader
try {
reader.initialize(file, ("_2" :: "_1" :: Nil).asJava)
val result = mutable.ArrayBuffer.empty[(String, Int)]
while (reader.nextKeyValue()) {
val row = reader.getCurrentValue.asInstanceOf[InternalRow]
val v = (row.getString(0), row.getInt(1))
result += v
}
assert(data.map { x => (x._2, x._1) } == result)
} finally {
reader.close()
}
}
// Empty projection
{
val reader = new VectorizedParquetRecordReader
try {
reader.initialize(file, List[String]().asJava)
var result = 0
while (reader.nextKeyValue()) {
result += 1
}
assert(result == data.length)
} finally {
reader.close()
}
}
}
}
test("VectorizedParquetRecordReader - partition column types") {
withTempPath { dir =>
Seq(1).toDF().repartition(1).write.parquet(dir.getCanonicalPath)
val dataTypes =
Seq(StringType, BooleanType, ByteType, ShortType, IntegerType, LongType,
FloatType, DoubleType, DecimalType(25, 5), DateType, TimestampType)
val constantValues =
Seq(
UTF8String.fromString("a string"),
true,
1.toByte,
2.toShort,
3,
Long.MaxValue,
0.25.toFloat,
0.75D,
Decimal("1234.23456"),
DateTimeUtils.fromJavaDate(java.sql.Date.valueOf("2015-01-01")),
DateTimeUtils.fromJavaTimestamp(java.sql.Timestamp.valueOf("2015-01-01 23:50:59.123")))
dataTypes.zip(constantValues).foreach { case (dt, v) =>
val schema = StructType(StructField("pcol", dt) :: Nil)
val vectorizedReader = new VectorizedParquetRecordReader
val partitionValues = new GenericInternalRow(Array(v))
val file = SpecificParquetRecordReaderBase.listDirectory(dir).get(0)
try {
vectorizedReader.initialize(file, null)
vectorizedReader.initBatch(schema, partitionValues)
vectorizedReader.nextKeyValue()
val row = vectorizedReader.getCurrentValue.asInstanceOf[InternalRow]
// Use `GenericMutableRow` by explicitly copying rather than `ColumnarBatch`
// in order to use get(...) method which is not implemented in `ColumnarBatch`.
val actual = row.copy().get(1, dt)
val expected = v
assert(actual == expected)
} finally {
vectorizedReader.close()
}
}
}
}
test("SPARK-18433: Improve DataSource option keys to be more case-insensitive") {
withSQLConf(SQLConf.PARQUET_COMPRESSION.key -> "snappy") {
val option = new ParquetOptions(Map("Compression" -> "uncompressed"), spark.sessionState.conf)
assert(option.compressionCodecClassName == "UNCOMPRESSED")
}
}
}
class JobCommitFailureParquetOutputCommitter(outputPath: Path, context: TaskAttemptContext)
extends ParquetOutputCommitter(outputPath, context) {
override def commitJob(jobContext: JobContext): Unit = {
sys.error("Intentional exception for testing purposes")
}
}
class TaskCommitFailureParquetOutputCommitter(outputPath: Path, context: TaskAttemptContext)
extends ParquetOutputCommitter(outputPath, context) {
override def commitTask(context: TaskAttemptContext): Unit = {
sys.error("Intentional exception for testing purposes")
}
}
| Panos-Bletsos/spark-cost-model-optimizer | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetIOSuite.scala | Scala | apache-2.0 | 28,051 |
/*
* Copyright 2011-2017 Chris de Vreeze
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package eu.cdevreeze.yaidom.integrationtest
import eu.cdevreeze.yaidom.core.EName
import eu.cdevreeze.yaidom.core.QName
import eu.cdevreeze.yaidom.core.Scope
import eu.cdevreeze.yaidom.indexed
import eu.cdevreeze.yaidom.parse.DocumentParserUsingDom
import eu.cdevreeze.yaidom.resolved
import eu.cdevreeze.yaidom.simple
import eu.cdevreeze.yaidom.simple.Node
import eu.cdevreeze.yaidom.simple.Node._
import org.scalatest.funsuite.AnyFunSuite
/**
* XML creation test case.
*
* @author Chris de Vreeze
*/
class CreationTest extends AnyFunSuite {
test("testCreation") {
// 1. Parse XML file into Elem
val docParser = DocumentParserUsingDom.newInstance()
val is = classOf[CreationTest].getResourceAsStream("books-with-strange-namespaces.xml")
val doc1: simple.Document = docParser.parse(is)
val resolvedRootElm1: resolved.Elem = resolved.Elem.from(doc1.documentElement)
val expectedResolvedBookElm: resolved.Elem = {
import resolved._
Elem(
EName("{http://books}Book"),
Map(EName("ISBN") -> "ISBN-9-88-777777-6", EName("Price") -> "25"),
Vector(
Elem(EName("{http://books}Title"), Map(), Vector(Text("Jennifer's Economical Database Hints"))),
Elem(
EName("Authors"),
Map(),
Vector(
Elem(
EName("{http://bookstore}Author"),
Map(),
Vector(
Elem(EName("{http://ns}First_Name"), Map(), Vector(Text("Jennifer"))),
Elem(EName("{http://ns}Last_Name"), Map(), Vector(Text("Widom"))))
))
)
)
)
}
assertResult(Some(expectedResolvedBookElm)) {
resolvedRootElm1.removeAllInterElementWhitespace.findChildElem { e =>
e.localName == "Book" && e.attributeOption(EName("ISBN")).contains("ISBN-9-88-777777-6")
}
}
val scope: Scope = Scope.from("books" -> "http://books", "names" -> "http://names")
val otherScope: Scope = Scope.from("names" -> "http://names", "magazines" -> "http://magazines")
val yetAnotherScope: Scope = Scope.from("books" -> "http://bookstore", "names" -> "http://ns", "magazines" -> "http://magazines")
val elm2Builder: simple.Elem =
elem(
qname = QName("books:Book"),
attributes = Vector(QName("ISBN") -> "ISBN-9-88-777777-6", QName("Price") -> "25"),
scope = scope,
children = Vector(
textElem(QName("books:Title"), scope, "Jennifer's Economical Database Hints"),
elem(
qname = QName("Authors"),
scope = otherScope,
children = Vector(
elem(
qname = QName("books:Author"),
scope = yetAnotherScope,
children = Vector(
textElem(QName("names:First_Name"), yetAnotherScope, "Jennifer"),
textElem(QName("names:Last_Name"), yetAnotherScope, "Widom"))
))
)
)
)
val elm2: simple.Elem = elm2Builder
val resolvedElm2 = resolved.Elem.from(elm2)
assertResult(expectedResolvedBookElm) {
resolvedElm2
}
assertResult(false) {
elm2Builder.findAllElems.forall(_.scope == elm2Builder.scope)
}
val bookScope: Scope = Scope.from("books" -> "http://books")
val magazineScope: Scope = Scope.from("magazines" -> "http://magazines")
val bookAndNamesScope: Scope = Scope.from("books" -> "http://bookstore", "magazines" -> "http://magazines", "names" -> "http://ns")
val elm3Builder: simple.Elem =
elem(
qname = QName("books:Book"),
attributes = Vector(QName("ISBN") -> "ISBN-9-88-777777-6", QName("Price") -> "25"),
scope = bookScope,
children = Vector(
textElem(QName("books:Title"), bookScope, "Jennifer's Economical Database Hints"),
elem(
qname = QName("Authors"),
scope = magazineScope,
children = Vector(
elem(
qname = QName("books:Author"),
scope = bookAndNamesScope,
children = Vector(
textElem(QName("names:First_Name"), bookAndNamesScope, "Jennifer"),
textElem(QName("names:Last_Name"), bookAndNamesScope, "Widom"))
))
)
)
)
val prefixesUsed: Set[String] = {
elm3Builder.findAllElemsOrSelf.foldLeft(Set[String]()) { (acc, elemBuilder) =>
val qnames: Set[QName] = elemBuilder.attributes.toMap.keySet + elemBuilder.qname
val prefixes: Set[String] = qnames.flatMap { qname =>
qname.prefixOption
}
acc ++ prefixes
}
}
assertResult(Set("books", "names")) {
prefixesUsed
}
assertResult(false) {
elm3Builder.findAllElems.forall(_.scope == elm3Builder.scope)
}
val elm3: simple.Elem = elm3Builder.notUndeclaringPrefixes(Scope.from("books" -> "http://bookstore"))
val resolvedElm3 = resolved.Elem.from(elm3)
assertResult(expectedResolvedBookElm) {
resolvedElm3
}
val elm4: simple.Elem = {
import Node._
elem(
qname = QName("books:Book"),
attributes = Vector(QName("ISBN") -> "ISBN-9-88-777777-6", QName("Price") -> "25"),
scope = Scope.from("books" -> "http://books", "names" -> "http://names"),
children = Vector(
textElem(
QName("books:Title"),
Scope.from("books" -> "http://books", "names" -> "http://names"),
"Jennifer's Economical Database Hints"),
elem(
qname = QName("Authors"),
scope = Scope.from("magazines" -> "http://magazines"),
children = Vector(
elem(
qname = QName("books:Author"),
scope =
Scope.from("books" -> "http://bookstore", "names" -> "http://ns", "magazines" -> "http://magazines"),
children = Vector(
textElem(
QName("names:First_Name"),
Scope
.from("books" -> "http://bookstore", "names" -> "http://ns", "magazines" -> "http://magazines"),
"Jennifer"),
textElem(
QName("names:Last_Name"),
Scope
.from("books" -> "http://bookstore", "names" -> "http://ns", "magazines" -> "http://magazines"),
"Widom")
)
))
)
)
)
}
val resolvedElm4 = resolved.Elem.from(elm4)
assertResult(expectedResolvedBookElm) {
resolvedElm4
}
}
test("testNotUndeclaringPrefixes") {
val docParser = DocumentParserUsingDom.newInstance()
val is = classOf[CreationTest].getResourceAsStream("books-with-strange-namespaces.xml")
val doc1: simple.Document = docParser.parse(is)
val isbn = "ISBN-9-88-777777-6"
val bookElm1 = doc1.documentElement
.findElem(e => e.localName == "Book" && e.attributeOption(EName("ISBN")).contains(isbn))
.getOrElse(sys.error(s"No book with ISBN $isbn"))
val authorsElm1 = bookElm1.getChildElem(_.localName == "Authors")
val doc2: simple.Document = simple.Document(doc1.documentElement.notUndeclaringPrefixes(Scope.Empty))
val bookElm2 = doc2.documentElement
.findElem(e => e.localName == "Book" && e.attributeOption(EName("ISBN")).contains(isbn))
.getOrElse(sys.error(s"No book with ISBN $isbn"))
val authorsElm2 = bookElm2.getChildElem(_.localName == "Authors")
val doc3: simple.Document =
simple.Document(doc1.documentElement.notUndeclaringPrefixes(Scope.from("books" -> "http://bookstore")))
val bookElm3 = doc3.documentElement
.findElem(e => e.localName == "Book" && e.attributeOption(EName("ISBN")).contains(isbn))
.getOrElse(sys.error(s"No book with ISBN $isbn"))
val authorsElm3 = bookElm3.getChildElem(_.localName == "Authors")
val doc4: simple.Document =
simple.Document(doc1.documentElement.notUndeclaringPrefixes(Scope.from("books" -> "http://abc")))
val bookElm4 = doc4.documentElement
.findElem(e => e.localName == "Book" && e.attributeOption(EName("ISBN")).contains(isbn))
.getOrElse(sys.error(s"No book with ISBN $isbn"))
val authorsElm4 = bookElm4.getChildElem(_.localName == "Authors")
assertResult((bookElm1.scope ++ Scope.from("magazines" -> "http://magazines")) -- Set("books")) {
authorsElm1.scope
}
assertResult(bookElm1.scope ++ Scope.from("magazines" -> "http://magazines")) {
authorsElm2.scope
}
assertResult(bookElm1.scope ++ Scope.from("magazines" -> "http://magazines")) {
authorsElm3.scope
}
assertResult(bookElm1.scope ++ Scope.from("magazines" -> "http://magazines")) {
authorsElm4.scope
}
val resolvedRoot = resolved.Elem.from(doc1.documentElement)
assertResult(resolvedRoot) {
resolved.Elem.from(doc2.documentElement)
}
assertResult(resolvedRoot) {
resolved.Elem.from(doc3.documentElement)
}
assertResult(resolvedRoot) {
resolved.Elem.from(doc4.documentElement)
}
}
test("testNotUndeclaringPrefixesAgain") {
val docParser = DocumentParserUsingDom.newInstance()
val is = classOf[CreationTest].getResourceAsStream("books-with-strange-namespaces.xml")
val doc1: simple.Document = docParser.parse(is)
val resolvedRootElm1: resolved.Elem = resolved.Elem.from(doc1.documentElement)
// First call notUndeclaringPrefixes with an empty Scope
val parentScope2 = Scope.Empty
val rootElem2 = doc1.documentElement.notUndeclaringPrefixes(parentScope2)
assertResult(resolvedRootElm1) {
resolved.Elem.from(rootElem2)
}
// Now call notUndeclaringPrefixes with Scope.from("books" -> "http://bookstore", "names" -> "http://xyz")
val parentScope3 = Scope.from("books" -> "http://bookstore", "names" -> "http://xyz")
val rootElem3 = doc1.documentElement.notUndeclaringPrefixes(parentScope3)
assertResult(resolvedRootElm1) {
resolved.Elem.from(rootElem3)
}
// Next call notUndeclaringPrefixes with Scope.from("abcde" -> "http://abcde")
val parentScope4 = Scope.from("abcde" -> "http://abcde")
val rootElem4 = doc1.documentElement.notUndeclaringPrefixes(parentScope4)
assertResult(resolvedRootElm1) {
resolved.Elem.from(rootElem4)
}
// Finally call notUndeclaringPrefixes with Scope.from("books" -> "http://bookstore", "names" -> "http://xyz", "abcde" -> "http://abcde")
val parentScope5 = Scope.from("books" -> "http://bookstore", "names" -> "http://xyz", "abcde" -> "http://abcde")
val rootElem5 = doc1.documentElement.notUndeclaringPrefixes(parentScope5)
assertResult(resolvedRootElm1) {
resolved.Elem.from(rootElem5)
}
}
test("testInsertionWhileReusingPrefixes") {
val bookScope: Scope = Scope.from("books" -> "http://bookstore")
val booksElmBuilder: simple.Elem =
elem(
qname = QName("books:Book"),
attributes = Vector(QName("ISBN") -> "ISBN-9-88-777777-6", QName("Price") -> "25"),
scope = bookScope,
children = Vector(
textElem(QName("books:Title"), bookScope, "Jennifer's Economical Database Hints"),
emptyElem(qname = QName("books:Authors"), scope = bookScope))
)
assertResult(true) {
booksElmBuilder.findAllElemsOrSelf.forall(_.qname.prefixOption.isDefined)
}
assertResult(true) {
booksElmBuilder.findAllElems.forall(_.scope == booksElmBuilder.scope)
}
val booksElm: simple.Elem = booksElmBuilder.notUndeclaringPrefixes(Scope.Empty)
val prefixBooks = booksElm.scope.prefixesForNamespace("http://bookstore").headOption.getOrElse("bks")
assertResult("books") {
prefixBooks
}
val authorElmBuilder: simple.Elem =
elem(
qname = QName(prefixBooks, "Author"),
scope = Scope.from(prefixBooks -> "http://bookstore"),
children = Vector(
textElem(QName(prefixBooks, "First_Name"), Scope.from(prefixBooks -> "http://bookstore"), "Jennifer"),
textElem(QName(prefixBooks, "Last_Name"), Scope.from(prefixBooks -> "http://bookstore"), "Widom")
)
)
assertResult(true) {
authorElmBuilder.findAllElemsOrSelf.forall(e => e.qname.prefixOption.isDefined)
}
assertResult(true) {
authorElmBuilder.findAllElems.forall(_.scope == authorElmBuilder.scope)
}
val authorElm: simple.Elem = authorElmBuilder.notUndeclaringPrefixes(Scope.Empty)
// Let's functionally insert the author
val authorsPath = indexed
.Elem(booksElm)
.findElem(_.resolvedName == EName("{http://bookstore}Authors"))
.map(_.path)
.getOrElse(sys.error("No 'Authors' element found"))
val updatedBooksElm: simple.Elem = booksElm.updateElemOrSelf(authorsPath) { e =>
e.plusChild(authorElm)
}
assertResult(Some(authorsPath)) {
indexed.Elem(updatedBooksElm).findElemOrSelf(e => e.localName == "Author").flatMap(e => e.path.parentPathOption)
}
assertResult(true) {
updatedBooksElm.findAllElemsOrSelf.forall { e =>
e.scope == Scope.from("books" -> "http://bookstore")
}
}
}
}
| dvreeze/yaidom | jvm/src/test/scala/eu/cdevreeze/yaidom/integrationtest/CreationTest.scala | Scala | apache-2.0 | 13,886 |
package gnieh.pp.tests
import gnieh.pp._
class IndentTest extends PpTest {
"the indent operator" should "indent text as block" in {
val docs: List[Doc] = List("line1", "line2", "line3")
val finalDoc = indent(2)(vcat(docs))
render80(finalDoc) should be(" line1\\n line2\\n line3")
}
it should "behave as original Haskell implementation" in {
val doc = indent(4)(fillSep(words("the indent combinator indents these words !")))
val expectedRender =
""" the indent
| combinator
| indents these
| words !""".stripMargin
render20(doc) should be(expectedRender)
}
}
| gnieh/tekstlib | src/test/scala/gnieh/pp/tests/IndentTest.scala | Scala | apache-2.0 | 638 |
// Copyright 2015,2016,2017,2018,2019,2020 Commonwealth Bank of Australia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package commbank.grimlock.test
import commbank.grimlock.framework._
import commbank.grimlock.framework.content._
import commbank.grimlock.framework.encoding._
import commbank.grimlock.framework.environment.implicits._
import commbank.grimlock.framework.metadata._
import commbank.grimlock.framework.position._
import commbank.grimlock.framework.window._
import commbank.grimlock.library.window._
import shapeless.{ ::, HNil }
import shapeless.nat.{ _0, _1 }
trait TestBatchMovingAverage extends TestGrimlock {
type P = Value[String] :: Value[String] :: Value[String] :: HNil
type S = Value[String] :: HNil
type R = Value[String] :: Value[String] :: HNil
// test prepare&initilise
val cell = Cell(Position("foo", "bar", "baz"), Content(ContinuousSchema[Long](), 1L))
val rem = Position("bar", "baz")
val in = 1.0
val first = (rem, in)
val second = (rem, in)
// test present
val sel = Position("sales")
def createCell(year: String, value: Double) = List(
Cell(Position("sales", year), Content(ContinuousSchema[Double](), value))
)
}
class TestSimpleMovingAverage extends TestBatchMovingAverage {
"A SimpleMovingAverage" should "prepare correctly" in {
SimpleMovingAverage(1, Locate.AppendRemainderDimension[S, R, _0, Value[String]](_0), false)
.prepare(cell) shouldBe in
}
it should "initialise correctly" in {
SimpleMovingAverage(1, Locate.AppendRemainderDimension[S, R, _0, Value[String]](_0), false)
.initialise(rem, in) shouldBe ((List(first), List()))
SimpleMovingAverage(1, Locate.AppendRemainderDimension[S, R, _0, Value[String]](_0), true)
.initialise(rem, in) shouldBe ((List(first), List((rem, 1.0))))
SimpleMovingAverage(1, Locate.AppendRemainderDimension[S, R, _1, Value[String]](_1), false)
.initialise(rem, in) shouldBe ((List(second), List()))
SimpleMovingAverage(1, Locate.AppendRemainderDimension[S, R, _1, Value[String]](_1), true)
.initialise(rem, in) shouldBe ((List(second), List((rem, 1.0))))
SimpleMovingAverage(5, Locate.AppendRemainderDimension[S, R, _0, Value[String]](_0), false)
.initialise(rem, in) shouldBe ((List(first), List()))
SimpleMovingAverage(5, Locate.AppendRemainderDimension[S, R, _0, Value[String]](_0), true)
.initialise(rem, in) shouldBe ((List(first), List((rem, 1.0))))
SimpleMovingAverage(5, Locate.AppendRemainderDimension[S, R, _1, Value[String]](_1), false)
.initialise(rem, in) shouldBe ((List(second), List()))
SimpleMovingAverage(5, Locate.AppendRemainderDimension[S, R, _1, Value[String]](_1), true)
.initialise(rem, in) shouldBe ((List(second), List((rem, 1.0))))
}
it should "update correctly" in {
val obj = SimpleMovingAverage(
5,
Locate.AppendRemainderDimension[Value[String] :: HNil, Value[String] :: HNil, _0, Value[String]](_0),
false
)
val init = obj.initialise(Position("2003"), 4.0)
init shouldBe ((List((Position("2003"), 4.0)), List()))
val first = obj.update(Position("2004"), 6.0, init._1)
first shouldBe ((List((Position("2003"), 4.0), (Position("2004"), 6.0)), List()))
val second = obj.update(Position("2005"), 5.0, first._1)
second shouldBe ((List((Position("2003"), 4.0), (Position("2004"), 6.0), (Position("2005"), 5.0)), List()))
val third = obj.update(Position("2006"), 8.0, second._1)
third shouldBe ((List(
(Position("2003"), 4.0),
(Position("2004"), 6.0),
(Position("2005"), 5.0),
(Position("2006"), 8.0)
), List()))
val fourth = obj.update(Position("2007"), 9.0, third._1)
fourth shouldBe ((List(
(Position("2003"), 4.0),
(Position("2004"), 6.0),
(Position("2005"), 5.0),
(Position("2006"), 8.0),
(Position("2007"), 9.0)
), List((Position("2007"), 6.4))))
val fifth = obj.update(Position("2008"), 5.0, fourth._1)
fifth shouldBe ((List(
(Position("2004"), 6.0),
(Position("2005"), 5.0),
(Position("2006"), 8.0),
(Position("2007"), 9.0),
(Position("2008"), 5.0)
), List((Position("2008"), 6.6))))
}
it should "update all correctly" in {
val obj = SimpleMovingAverage(
5,
Locate.AppendRemainderDimension[Value[String] :: HNil, Value[String] :: HNil, _0, Value[String]](_0),
true
)
val init = obj.initialise(Position("2003"), 4.0)
init shouldBe ((List((Position("2003"), 4.0)), List((Position("2003"), 4.0))))
val first = obj.update(Position("2004"), 6.0, init._1)
first shouldBe ((List((Position("2003"), 4.0), (Position("2004"), 6.0)), List((Position("2004"), 5))))
val second = obj.update(Position("2005"), 5.0, first._1)
second shouldBe ((List(
(Position("2003"), 4.0),
(Position("2004"), 6.0),
(Position("2005"), 5.0)
), List((Position("2005"), 5))))
val third = obj.update(Position("2006"), 8.0, second._1)
third shouldBe ((List(
(Position("2003"), 4.0),
(Position("2004"), 6.0),
(Position("2005"), 5.0),
(Position("2006"), 8.0)
), List((Position("2006"), 5.75))))
val fourth = obj.update(Position("2007"), 9.0, third._1)
fourth shouldBe ((List(
(Position("2003"), 4.0),
(Position("2004"), 6.0),
(Position("2005"), 5.0),
(Position("2006"), 8.0),
(Position("2007"), 9.0)
), List((Position("2007"), 6.4))))
val fifth = obj.update(Position("2008"), 5.0, fourth._1)
fifth shouldBe ((List(
(Position("2004"), 6.0),
(Position("2005"), 5.0),
(Position("2006"), 8.0),
(Position("2007"), 9.0),
(Position("2008"), 5.0)
), List((Position("2008"), 6.6))))
}
it should "present" in {
SimpleMovingAverage(
1,
Locate.AppendRemainderDimension[Value[String] :: HNil, Value[String] :: HNil, _0, Value[String]](_0),
false
).present(sel, (Position("2008"), 6.6)) shouldBe createCell("2008", 6.6)
}
}
class TestCenteredMovingAverage extends TestBatchMovingAverage {
"A CenteredMovingAverage" should "prepare correctly" in {
CenteredMovingAverage(1, Locate.AppendRemainderDimension[S, R, _0, Value[String]](_0))
.prepare(cell) shouldBe in
}
it should "initialise correctly" in {
CenteredMovingAverage(1, Locate.AppendRemainderDimension[S, R, _0, Value[String]](_0))
.initialise(rem, in) shouldBe ((List(first), List()))
CenteredMovingAverage(1, Locate.AppendRemainderDimension[S, R, _1, Value[String]](_1))
.initialise(rem, in) shouldBe ((List(second), List()))
CenteredMovingAverage(5, Locate.AppendRemainderDimension[S, R, _0, Value[String]](_0))
.initialise(rem, in) shouldBe ((List(first), List()))
CenteredMovingAverage(5, Locate.AppendRemainderDimension[S, R, _1, Value[String]](_1))
.initialise(rem, in) shouldBe ((List(second), List()))
}
it should "update correctly" in {
val obj = CenteredMovingAverage(
2,
Locate.AppendRemainderDimension[Value[String] :: HNil, Value[String] :: HNil, _0, Value[String]](_0)
)
val init = obj.initialise(Position("2003"), 4.0)
init shouldBe ((List((Position("2003"), 4.0)), List()))
val first = obj.update(Position("2004"), 6.0, init._1)
first shouldBe ((List((Position("2003"), 4.0), (Position("2004"), 6.0)), List()))
val second = obj.update(Position("2005"), 5.0, first._1)
second shouldBe ((List((Position("2003"), 4.0), (Position("2004"), 6.0), (Position("2005"), 5.0)), List()))
val third = obj.update(Position("2006"), 8.0, second._1)
third shouldBe ((List(
(Position("2003"), 4.0),
(Position("2004"), 6.0),
(Position("2005"), 5.0),
(Position("2006"), 8.0)
), List()))
val fourth = obj.update(Position("2007"), 9.0, third._1)
fourth shouldBe ((List(
(Position("2003"), 4.0),
(Position("2004"), 6.0),
(Position("2005"), 5.0),
(Position("2006"), 8.0),
(Position("2007"), 9.0)
), List((Position("2005"), 6.4))))
val fifth = obj.update(Position("2008"), 5.0, fourth._1)
fifth shouldBe ((List(
(Position("2004"), 6.0),
(Position("2005"), 5.0),
(Position("2006"), 8.0),
(Position("2007"), 9.0),
(Position("2008"), 5.0)
), List((Position("2006"), 6.6))))
}
it should "present correctly" in {
CenteredMovingAverage(
2,
Locate.AppendRemainderDimension[Value[String] :: HNil, Value[String] :: HNil, _0, Value[String]](_0)
).present(sel, (Position("2006"), 6.6)) shouldBe createCell("2006", 6.6)
}
}
class TestWeightedMovingAverage extends TestBatchMovingAverage {
"A WeightedMovingAverage" should "prepare correctly" in {
WeightedMovingAverage(1, Locate.AppendRemainderDimension[S, R, _0, Value[String]](_0), false)
.prepare(cell) shouldBe in
}
it should "initialise correctly" in {
WeightedMovingAverage(1, Locate.AppendRemainderDimension[S, R, _0, Value[String]](_0), false)
.initialise(rem, in) shouldBe ((List(first), List()))
WeightedMovingAverage(1, Locate.AppendRemainderDimension[S, R, _0, Value[String]](_0), true)
.initialise(rem, in) shouldBe ((List(first), List((rem, in))))
WeightedMovingAverage(1, Locate.AppendRemainderDimension[S, R, _1, Value[String]](_1), false)
.initialise(rem, in) shouldBe ((List(second), List()))
WeightedMovingAverage(1, Locate.AppendRemainderDimension[S, R, _1, Value[String]](_1), true)
.initialise(rem, in) shouldBe ((List(second), List((rem, in))))
WeightedMovingAverage(5, Locate.AppendRemainderDimension[S, R, _0, Value[String]](_0), false)
.initialise(rem, in) shouldBe ((List(first), List()))
WeightedMovingAverage(5, Locate.AppendRemainderDimension[S, R, _0, Value[String]](_0), true)
.initialise(rem, in) shouldBe ((List(first), List((rem, in))))
WeightedMovingAverage(5, Locate.AppendRemainderDimension[S, R, _1, Value[String]](_1), false)
.initialise(rem, in) shouldBe ((List(second), List()))
WeightedMovingAverage(5, Locate.AppendRemainderDimension[S, R, _1, Value[String]](_1), true)
.initialise(rem, in) shouldBe ((List(second), List((rem, in))))
}
it should "update correctly" in {
val obj = WeightedMovingAverage(
5,
Locate.AppendRemainderDimension[Value[String] :: HNil, Value[String] :: HNil, _0, Value[String]](_0),
false
)
val init = obj.initialise(Position("2003"), 4.0)
init shouldBe ((List((Position("2003"), 4.0)), List()))
val first = obj.update(Position("2004"), 6.0, init._1)
first shouldBe ((List((Position("2003"), 4.0), (Position("2004"), 6.0)), List()))
val second = obj.update(Position("2005"), 5.0, first._1)
second shouldBe ((List((Position("2003"), 4.0), (Position("2004"), 6.0), (Position("2005"), 5.0)), List()))
val third = obj.update(Position("2006"), 8.0, second._1)
third shouldBe ((List(
(Position("2003"), 4.0),
(Position("2004"), 6.0),
(Position("2005"), 5.0),
(Position("2006"), 8.0)
), List()))
val fourth = obj.update(Position("2007"), 9.0, third._1)
fourth shouldBe ((List(
(Position("2003"), 4.0),
(Position("2004"), 6.0),
(Position("2005"), 5.0),
(Position("2006"), 8.0),
(Position("2007"), 9.0)
), List((Position("2007"), 7.2))))
val fifth = obj.update(Position("2008"), 5.0, fourth._1)
fifth shouldBe ((List(
(Position("2004"), 6.0),
(Position("2005"), 5.0),
(Position("2006"), 8.0),
(Position("2007"), 9.0),
(Position("2008"), 5.0)
), List((Position("2008"), 6.733333333333333))))
}
it should "update all correctly" in {
val obj = WeightedMovingAverage(
5,
Locate.AppendRemainderDimension[Value[String] :: HNil, Value[String] :: HNil, _0, Value[String]](_0),
true
)
val init = obj.initialise(Position("2003"), 4.0)
init shouldBe ((List((Position("2003"), 4.0)), List((Position("2003"), 4.0))))
val first = obj.update(Position("2004"), 6.0, init._1)
first shouldBe ((List(
(Position("2003"), 4.0),
(Position("2004"), 6.0)
), List((Position("2004"), 5.333333333333333))))
val second = obj.update(Position("2005"), 5.0, first._1)
second shouldBe ((List(
(Position("2003"), 4.0),
(Position("2004"), 6.0),
(Position("2005"), 5.0)
), List((Position("2005"), 5.166666666666667))))
val third = obj.update(Position("2006"), 8.0, second._1)
third shouldBe ((List(
(Position("2003"), 4.0),
(Position("2004"), 6.0),
(Position("2005"), 5.0),
(Position("2006"), 8.0)
), List((Position("2006"), 6.3))))
val fourth = obj.update(Position("2007"), 9.0, third._1)
fourth shouldBe ((List(
(Position("2003"), 4.0),
(Position("2004"), 6.0),
(Position("2005"), 5.0),
(Position("2006"), 8.0),
(Position("2007"), 9.0)
), List((Position("2007"), 7.2))))
val fifth = obj.update(Position("2008"), 5.0, fourth._1)
fifth shouldBe ((List(
(Position("2004"), 6.0),
(Position("2005"), 5.0),
(Position("2006"), 8.0),
(Position("2007"), 9.0),
(Position("2008"), 5.0)
), List((Position("2008"), 6.733333333333333))))
}
it should "present correctly" in {
WeightedMovingAverage(
5,
Locate.AppendRemainderDimension[Value[String] :: HNil, Value[String] :: HNil, _0, Value[String]](_0),
false
).present(sel, (Position("2008"), 6.733333333333333)) shouldBe createCell("2008", 6.733333333333333)
}
}
trait TestOnlineMovingAverage extends TestGrimlock {
type P = Value[String] :: Value[String] :: Value[String] :: HNil
type S = Value[String] :: HNil
type R = Value[String] :: Value[String] :: HNil
// test prepare&initilise
val cell = Cell(Position("foo", "bar", "baz"), Content(ContinuousSchema[Long](), 1L))
val rem = Position("bar", "baz")
val in = 1.0
val first = ((1.0, 1), List((rem, in)))
val second = ((1.0, 1), List((rem, in)))
// test present
val sel = Position()
def createCell(str: String, value: Double) = List(Cell(Position(str), Content(ContinuousSchema[Double](), value)))
}
class TestCumulativeMovingAverage extends TestOnlineMovingAverage {
"A CumulativeMovingAverage" should "prepare correctly" in {
CumulativeMovingAverage(Locate.AppendRemainderDimension[S, R, _0, Value[String]](_0)).prepare(cell) shouldBe in
}
it should "initialise correctly" in {
CumulativeMovingAverage(Locate.AppendRemainderDimension[S, R, _0, Value[String]](_0))
.initialise(rem, in) shouldBe first
CumulativeMovingAverage(Locate.AppendRemainderDimension[S, R, _1, Value[String]](_1))
.initialise(rem, in) shouldBe second
}
it should "update correctly" in {
val obj = CumulativeMovingAverage(
Locate.AppendRemainderDimension[HNil, Value[String] :: HNil, _0, Value[String]](_0)
)
val init = obj.initialise(Position("val.1"), 1.0)
init shouldBe (((1.0, 1), List((Position("val.1"), 1.0))))
val first = obj.update(Position("val.2"), 2.0, init._1)
first shouldBe (((1.5, 2), List((Position("val.2"), 1.5))))
val second = obj.update(Position("val.3"), 3.0, first._1)
second shouldBe (((2.0, 3), List((Position("val.3"), 2))))
val third = obj.update(Position("val.4"), 4.0, second._1)
third shouldBe (((2.5, 4), List((Position("val.4"), 2.5))))
val fourth = obj.update(Position("val.5"), 5.0, third._1)
fourth shouldBe (((3.0, 5), List((Position("val.5"), 3))))
}
it should "present correctly" in {
CumulativeMovingAverage(Locate.AppendRemainderDimension[HNil, Value[String] :: HNil, _0, Value[String]](_0))
.present(sel, (Position("val.5"), 3.0)) shouldBe createCell("val.5", 3)
}
}
class TestExponentialMovingAverage extends TestOnlineMovingAverage {
"A ExponentialMovingAverage" should "prepare correctly" in {
ExponentialMovingAverage(3, Locate.AppendRemainderDimension[S, R, _0, Value[String]](_0)).prepare(cell) shouldBe in
}
it should "initialise correctly" in {
ExponentialMovingAverage(0.33, Locate.AppendRemainderDimension[S, R, _0, Value[String]](_0))
.initialise(rem, in) shouldBe first
ExponentialMovingAverage(3, Locate.AppendRemainderDimension[S, R, _0, Value[String]](_0))
.initialise(rem, in) shouldBe first
ExponentialMovingAverage(0.33, Locate.AppendRemainderDimension[S, R, _1, Value[String]](_1))
.initialise(rem, in) shouldBe second
ExponentialMovingAverage(3, Locate.AppendRemainderDimension[S, R, _1, Value[String]](_1))
.initialise(rem, in) shouldBe second
}
it should "update correctly" in {
val obj = ExponentialMovingAverage(
0.33,
Locate.AppendRemainderDimension[HNil, Value[String] :: HNil, _0, Value[String]](_0)
)
val init = obj.initialise(Position("day.1"), 16.0)
init shouldBe (((16.0, 1), List((Position("day.1"), 16.0))))
val first = obj.update(Position("day.2"), 17.0, init._1)
first shouldBe (((16.33, 2), List((Position("day.2"), 16.33))))
val second = obj.update(Position("day.3"), 17.0, first._1)
second shouldBe (((16.551099999999998, 3), List((Position("day.3"), 16.551099999999998))))
val third = obj.update(Position("day.4"), 10.0, second._1)
third shouldBe (((14.389236999999998, 4), List((Position("day.4"), 14.389236999999998))))
val fourth = obj.update(Position("day.5"), 17.0, third._1)
fourth shouldBe (((15.250788789999998, 5), List((Position("day.5"), 15.250788789999998))))
}
it should "present correctly" in {
ExponentialMovingAverage(
0.33,
Locate.AppendRemainderDimension[HNil, Value[String] :: HNil, _0, Value[String]](_0)
).present(sel, (Position("day.5"), 15.250788789999998)) shouldBe createCell("day.5", 15.250788789999998)
}
}
trait TestWindow extends TestGrimlock {
type P = Value[String] :: Value[String] :: Value[String] :: HNil
type S = Value[String] :: HNil
type R = Value[String] :: Value[String] :: HNil
val cell1 = Cell(Position("foo", "bar", "baz"), Content(ContinuousSchema[Long](), 1L))
val cell2 = Cell(Position("foo", "bar", "baz"), Content(NominalSchema[String](), "abc"))
val sel = Position("foo")
val rem = Position("bar", "baz")
val in1 = Option(1.0)
val in2f = None
val in2t = Option(Double.NaN)
}
class TestCumulativeSum extends TestWindow {
def createCell(value: Double) = List(Cell(Position("foo", "bar|baz"), Content(ContinuousSchema[Double](), value)))
"A CumulativeSum" should "prepare correctly" in {
CumulativeSums(Locate.AppendRemainderString[S, R]("|"), true).prepare(cell1) shouldBe in1
CumulativeSums(Locate.AppendRemainderString[S, R]("|"), false).prepare(cell1) shouldBe in1
CumulativeSums(Locate.AppendRemainderString[S, R]("|"), true)
.prepare(cell2)
.map(_.compare(Double.NaN)) shouldBe Option(0)
CumulativeSums(Locate.AppendRemainderString[S, R]("|"), false).prepare(cell2) shouldBe in2f
}
it should "initialise correctly" in {
val obj = CumulativeSums(Locate.AppendRemainderString[S, R]("|"), true)
obj.initialise(rem, Option(1.0)) shouldBe ((Option(1.0), List((rem, 1.0))))
obj.initialise(rem, None) shouldBe ((None, List()))
val init = obj.initialise(rem, Option(Double.NaN))
init._1.map(_.compare(Double.NaN)) shouldBe (Option(0))
init._2.toList.map { case (r, d) => (r, d.compare(Double.NaN)) } shouldBe (List((rem, 0)))
}
it should "update correctly strict" in {
val obj = CumulativeSums(Locate.AppendRemainderString[S, R]("|"), true)
val init = obj.initialise(rem, in1)
init shouldBe ((Option(1.0), List((rem, 1.0))))
val first = obj.update(rem, in1, init._1)
first shouldBe ((Option(2.0), List((rem, 2.0))))
val second = obj.update(rem, in2t, first._1)
second._1.map(_.compare(Double.NaN)) shouldBe (Option(0))
second._2.toList.map { case (r, d) => (r, d.compare(Double.NaN)) } shouldBe (List((rem, 0)))
val third = obj.update(rem, in1, second._1)
third._1.map(_.compare(Double.NaN)) shouldBe (Option(0))
third._2.toList.map { case (r, d) => (r, d.compare(Double.NaN)) } shouldBe (List((rem, 0)))
}
it should "update correctly strict on first" in {
val obj = CumulativeSums(Locate.AppendRemainderString[S, R]("|"), true)
val init = obj.initialise(rem, in2t)
init._1.map(_.compare(Double.NaN)) shouldBe (Option(0))
init._2.toList.map { case (r, d) => (r, d.compare(Double.NaN)) } shouldBe (List((rem, 0)))
val first = obj.update(rem, in1, init._1)
first._1.map(_.compare(Double.NaN)) shouldBe (Option(0))
first._2.toList.map { case (r, d) => (r, d.compare(Double.NaN)) } shouldBe (List((rem, 0)))
val second = obj.update(rem, in2t, first._1)
second._1.map(_.compare(Double.NaN)) shouldBe (Option(0))
second._2.toList.map { case (r, d) => (r, d.compare(Double.NaN)) } shouldBe (List((rem, 0)))
val third = obj.update(rem, in1, second._1)
third._1.map(_.compare(Double.NaN)) shouldBe (Option(0))
third._2.toList.map { case (r, d) => (r, d.compare(Double.NaN)) } shouldBe (List((rem, 0)))
}
it should "update correctly non-strict" in {
val obj = CumulativeSums(Locate.AppendRemainderString[S, R]("|"), false)
val init = obj.initialise(rem, in1)
init shouldBe ((Option(1.0), List((rem, 1.0))))
val first = obj.update(rem, in1, init._1)
first shouldBe ((Option(2.0), List((rem, 2.0))))
val second = obj.update(rem, in2f, first._1)
second shouldBe ((Option(2.0), List()))
val third = obj.update(rem, in1, second._1)
third shouldBe ((Option(3.0), List((rem, 3.0))))
}
it should "update correctly non-strict on first" in {
val obj = CumulativeSums(Locate.AppendRemainderString[S, R]("|"), false)
val init = obj.initialise(rem, in2f)
init shouldBe ((None, List()))
val first = obj.update(rem, in1, init._1)
first shouldBe ((Option(1.0), List((rem, 1.0))))
val second = obj.update(rem, in2f, first._1)
second shouldBe ((Option(1.0), List()))
val third = obj.update(rem, in1, second._1)
third shouldBe ((Option(2.0), List((rem, 2.0))))
}
it should "present correctly strict" in {
CumulativeSums(Locate.AppendRemainderString[S, R]("|"), true).present(sel, (rem, 1.0)) shouldBe createCell(1.0)
}
}
class TestBinaryOperator extends TestWindow {
def createCell(value: Double) = List(
Cell(Position("foo", "p(bar|baz, bar|baz)"), Content(ContinuousSchema[Double](), value))
)
"A BinaryOperator" should "prepare correctly" in {
BinaryOperator(_ + _, Locate.AppendPairwiseRemainderString[S, R]("p(%1$s, %2$s)", "|"), true)
.prepare(cell1) shouldBe in1
BinaryOperator(_ + _, Locate.AppendPairwiseRemainderString[S, R]("p(%1$s, %2$s)", "|"), false)
.prepare(cell1) shouldBe in1
BinaryOperator(_ + _, Locate.AppendPairwiseRemainderString[S, R]("p(%1$s, %2$s)", "|"), true)
.prepare(cell2)
.map(_.compare(Double.NaN)) shouldBe Option(0)
BinaryOperator(_ + _, Locate.AppendPairwiseRemainderString[S, R]("p(%1$s, %2$s)", "|"), false)
.prepare(cell2) shouldBe in2f
}
it should "initialise correctly" in {
BinaryOperator(_ + _, Locate.AppendPairwiseRemainderString[S, R]("p(%1$s, %2$s)", "|"), true)
.initialise(rem, in1) shouldBe (((in1, rem), List()))
BinaryOperator(_ + _, Locate.AppendPairwiseRemainderString[S, R]("p(%1$s, %2$s)", "|"), false)
.initialise(rem, in1) shouldBe (((in1, rem), List()))
val init = BinaryOperator(_ + _, Locate.AppendPairwiseRemainderString[S, R]("p(%1$s, %2$s)", "|"), true)
.initialise(rem, in2t)
init._1._1.map(_.compare(Double.NaN)) shouldBe Option(0)
init._1._2 shouldBe rem
init._2 shouldBe List()
BinaryOperator(_ + _, Locate.AppendPairwiseRemainderString[S, R]("p(%1$s, %2$s)", "|"), false)
.initialise(rem, in2f) shouldBe (((None, rem), List()))
}
it should "update correctly strict" in {
val obj = BinaryOperator(_ + _, Locate.AppendPairwiseRemainderString[S, R]("p(%1$s, %2$s)", "|"), true)
val init = obj.initialise(rem, in1)
init shouldBe (((Option(1.0), rem), List()))
val first = obj.update(rem, in1, init._1)
first shouldBe (((Option(1.0), rem), List((2.0, rem, rem))))
val second = obj.update(rem, in2t, first._1)
second._1._1.map(_.compare(Double.NaN)) shouldBe Option(0)
second._1._2 shouldBe rem
second._2.toList.map { case (d, c, p) => (d.compare(Double.NaN), c, p) } shouldBe List((0, rem, rem))
val third = obj.update(rem, in1, second._1)
third._1._1.map(_.compare(Double.NaN)) shouldBe Option(0)
third._1._2 shouldBe rem
third._2.toList.map { case (d, c, p) => (d.compare(Double.NaN), c, p) } shouldBe List((0, rem, rem))
}
it should "update correctly strict on first" in {
val obj = BinaryOperator(_ + _, Locate.AppendPairwiseRemainderString[S, R]("p(%1$s, %2$s)", "|"), true)
val init = obj.initialise(rem, in2t)
init._1._1.map(_.compare(Double.NaN)) shouldBe Option(0)
init._1._2 shouldBe rem
init._2.toList shouldBe List()
val first = obj.update(rem, in1, init._1)
first._1._1.map(_.compare(Double.NaN)) shouldBe Option(0)
first._1._2 shouldBe rem
first._2.toList.map { case (d, c, p) => (d.compare(Double.NaN), c, p) } shouldBe List((0, rem, rem))
val second = obj.update(rem, in2t, first._1)
second._1._1.map(_.compare(Double.NaN)) shouldBe Option(0)
second._1._2 shouldBe rem
second._2.toList.map { case (d, c, p) => (d.compare(Double.NaN), c, p) } shouldBe List((0, rem, rem))
val third = obj.update(rem, in1, second._1)
third._1._1.map(_.compare(Double.NaN)) shouldBe Option(0)
third._1._2 shouldBe rem
third._2.toList.map { case (d, c, p) => (d.compare(Double.NaN), c, p) } shouldBe List((0, rem, rem))
}
it should "update correctly non-strict" in {
val obj = BinaryOperator(_ + _, Locate.AppendPairwiseRemainderString[S, R]("p(%1$s, %2$s)", "|"), false)
val init = obj.initialise(rem, in1)
init shouldBe (((Option(1.0), rem), List()))
val first = obj.update(rem, in1, init._1)
first shouldBe (((Option(1.0), rem), List((2.0, rem, rem))))
val second = obj.update(rem, in2f, first._1)
second shouldBe (((Option(1.0), rem), List()))
val third = obj.update(rem, in1, second._1)
third shouldBe (((Option(1.0), rem), List((2.0, rem, rem))))
}
it should "update correctly non-strict on first" in {
val obj = BinaryOperator(_ + _, Locate.AppendPairwiseRemainderString[S, R]("p(%1$s, %2$s)", "|"), false)
val init = obj.initialise(rem, in2f)
init shouldBe (((None, rem), List()))
val first = obj.update(rem, in1, init._1)
first shouldBe (((Option(1.0), rem), List()))
val second = obj.update(rem, in2f, first._1)
second shouldBe (((Option(1.0), rem), List()))
val third = obj.update(rem, in1, second._1)
third shouldBe (((Option(1.0), rem), List((2.0, rem, rem))))
}
it should "present correctly" in {
BinaryOperator(_ + _, Locate.AppendPairwiseRemainderString[S, R]("p(%1$s, %2$s)", "|"), false)
.present(sel, (1.0, rem, rem)) shouldBe createCell(1.0)
}
}
class TestCombinationWindow extends TestGrimlock {
type P = Value[String] :: Value[String] :: HNil
type S = Value[String] :: HNil
type R = Value[String] :: HNil
type Q = Value[String] :: Value[String] :: HNil
def renamer(name: String)(cell: Cell[P]): Option[Position[P]] = Option(
cell.position.update(_1, name.format(cell.position(_1).toShortString))
)
"A CombinationWindow" should "present correctly" in {
val sel = Position("sales")
val obj: Window[P, S, R, Q] = List(
SimpleMovingAverage[P, S, R, Q](
5,
Locate.AppendRemainderDimension(_0),
false
).andThenRelocate(renamer("%1$s.simple")),
WeightedMovingAverage[P, S, R, Q](
5,
Locate.AppendRemainderDimension(_0),
false
).andThenRelocate(renamer("%1$s.weighted"))
)
val prep3 = obj.prepare(Cell(Position("sales", "2003"), createContent(4)))
prep3 shouldBe List(4.0, 4.0)
val init = obj.initialise(Position("2003"), prep3)
init shouldBe ((List(List((Position("2003"), 4.0)), List((Position("2003"), 4.0))), List(List(List(), List()))))
val prep4 = obj.prepare(Cell(Position("sales", "2004"), createContent(6)))
prep4 shouldBe List(6.0, 6.0)
val first = obj.update(Position("2004"), prep4, init._1)
first shouldBe ((List(
List((Position("2003"), 4.0), (Position("2004"), 6.0)),
List((Position("2003"), 4.0), (Position("2004"), 6.0))
), List(List(List(), List()))))
val prep5 = obj.prepare(Cell(Position("sales", "2005"), createContent(5)))
prep5 shouldBe List(5.0, 5.0)
val second = obj.update(Position("2005"), prep5, first._1)
second shouldBe ((List(
List((Position("2003"), 4.0), (Position("2004"), 6.0), (Position("2005"), 5.0)),
List((Position("2003"), 4.0), (Position("2004"), 6.0), (Position("2005"), 5.0))
), List(List(List(), List()))))
val prep6 = obj.prepare(Cell(Position("sales", "2006"), createContent(8)))
prep6 shouldBe List(8.0, 8.0)
val third = obj.update(Position("2006"), prep6, second._1)
third shouldBe ((List(
List((Position("2003"), 4.0), (Position("2004"), 6.0), (Position("2005"), 5.0), (Position("2006"), 8.0)),
List((Position("2003"), 4.0), (Position("2004"), 6.0), (Position("2005"), 5.0), (Position("2006"), 8.0))
), List(List(List(), List()))))
val prep7 = obj.prepare(Cell(Position("sales", "2007"), createContent(9)))
prep7 shouldBe List(9.0, 9.0)
val fourth = obj.update(Position("2007"), prep7, third._1)
fourth shouldBe ((List(
List(
(Position("2003"), 4.0),
(Position("2004"), 6.0),
(Position("2005"), 5.0),
(Position("2006"), 8.0),
(Position("2007"), 9.0)
),
List(
(Position("2003"), 4.0),
(Position("2004"), 6.0),
(Position("2005"), 5.0),
(Position("2006"), 8.0),
(Position("2007"), 9.0)
)
), List(List(List((Position("2007"), 6.4)), List((Position("2007"), 7.2))))))
val prep8 = obj.prepare(Cell(Position("sales", "2008"), createContent(5)))
prep8 shouldBe List(5.0, 5.0)
val fifth = obj.update(Position("2008"), prep8, fourth._1)
fifth shouldBe ((List(
List(
(Position("2004"), 6.0),
(Position("2005"), 5.0),
(Position("2006"), 8.0),
(Position("2007"), 9.0),
(Position("2008"), 5.0)
),
List(
(Position("2004"), 6.0),
(Position("2005"), 5.0),
(Position("2006"), 8.0),
(Position("2007"), 9.0),
(Position("2008"), 5.0)
)
), List(List(List((Position("2008"), 6.6)), List((Position("2008"), 6.733333333333333))))))
val cells = obj.present(sel, fifth._2.toList(0))
cells shouldBe createCell("2008", 6.6, 6.733333333333333)
}
def createContent(value: Long): Content = Content(ContinuousSchema[Long](), value)
def createCell(year: String, value1: Double, value2: Double) = List(
Cell(Position("sales", year + ".simple"), Content(ContinuousSchema[Double](), value1)),
Cell(Position("sales", year + ".weighted"), Content(ContinuousSchema[Double](), value2))
)
}
case class DeltaWithValue() extends WindowWithValue[
Value[String] :: HNil,
Value[String] :: HNil,
HNil,
Value[String] :: HNil
] {
type V = Map[Position[Value[String] :: HNil], Content]
type I = Option[Double]
type T = Option[Double]
type O = Double
def prepareWithValue(cell: Cell[Value[String] :: HNil], ext: V): I = cell.content.value.as[Double]
def initialise(rem: Position[HNil], in: I): (T, TraversableOnce[O]) = (in, List())
def update(rem: Position[HNil], in: I, t: T): (T, TraversableOnce[O]) = (in, (in, t) match {
case (Some(dc), Some(dt)) => List(dc - dt)
case _ => List()
})
def presentWithValue(
pos: Position[Value[String] :: HNil],
out: O,
ext: V
): TraversableOnce[Cell[Value[String] :: HNil]] = List(
Cell(pos, Content(ContinuousSchema[Double](), ext(pos).value.as[Double].get * out))
)
}
class TestWithPrepareWindow extends TestGrimlock {
val str = Cell(Position("x"), getStringContent("foo"))
val dbl = Cell(Position("y"), getDoubleContent(3.14))
val lng = Cell(Position("z"), getLongContent(42))
val ext = Map(
Position("x") -> getDoubleContent(1),
Position("y") -> getDoubleContent(2),
Position("z") -> getDoubleContent(3)
)
def prepare(cell: Cell[Value[String] :: HNil]): Content = cell.content.value match {
case LongValue(_) => cell.content
case DoubleValue(_) => getStringContent("not.supported")
case StringValue(s) => getLongContent(s.length)
}
def prepareWithValue(
cell: Cell[Value[String] :: HNil],
ext: Map[Position[Value[String] :: HNil], Content]
): Content = (cell.content.value, ext(cell.position).value) match {
case (LongValue(l), DoubleValue(d)) => getLongContent(l * d.toLong)
case (DoubleValue(_), _) => getStringContent("not.supported")
case (StringValue(s), _) => getLongContent(s.length)
}
val locate = (sel: Position[Value[String] :: HNil], rem: Position[HNil]) => sel.toOption
def getLongContent(value: Long): Content = Content(DiscreteSchema[Long](), value)
def getDoubleContent(value: Double): Content = Content(ContinuousSchema[Double](), value)
def getStringContent(value: String): Content = Content(NominalSchema[String](), value)
"A Window" should "withPrepare prepare correctly" in {
val obj = CumulativeMovingAverage[
Value[String] :: HNil,
Value[String] :: HNil,
HNil,
Value[String] :: HNil
](locate).withPrepare(prepare)
obj.prepare(str) shouldBe 3.0
obj.prepare(dbl).compare(Double.NaN) shouldBe 0
obj.prepare(lng) shouldBe 42.0
}
it should "withPrepareWithValue correctly (without value)" in {
val obj = DeltaWithValue().withPrepare(prepare)
obj.prepareWithValue(str, ext) shouldBe Option(3.0)
obj.prepareWithValue(dbl, ext) shouldBe None
obj.prepareWithValue(lng, ext) shouldBe Option(42.0)
}
it should "withPrepareWithVaue correctly" in {
val obj = DeltaWithValue().withPrepareWithValue(prepareWithValue)
obj.prepareWithValue(str, ext) shouldBe Option(3.0)
obj.prepareWithValue(dbl, ext) shouldBe None
obj.prepareWithValue(lng, ext) shouldBe Option(3 * 42.0)
}
}
class TestAndThenMutateWindow extends TestGrimlock {
val str = Cell(Position("x"), getStringContent("foo"))
val dbl = Cell(Position("y"), getDoubleContent(3.14))
val lng = Cell(Position("z"), getLongContent(42))
val ext = Map(
Position("x") -> getDoubleContent(3),
Position("y") -> getDoubleContent(2),
Position("z") -> getDoubleContent(1)
)
def mutate(cell: Cell[Value[String] :: HNil]): Option[Content] = cell.position(_0) match {
case StringValue("x") => cell.content.toOption
case StringValue("y") => getStringContent("not.supported").toOption
case StringValue("z") => getLongContent(42).toOption
}
def mutateWithValue(
cell: Cell[Value[String] :: HNil],
ext: Map[Position[Value[String] :: HNil], Content]
): Option[Content] = (cell.position(_0), ext(cell.position).value) match {
case (StringValue("x"), DoubleValue(_)) => cell.content.toOption
case (StringValue("y"), _) => getStringContent("not.supported").toOption
case (StringValue("z"), DoubleValue(_)) => getLongContent(42).toOption
}
val locate = (sel: Position[Value[String] :: HNil], rem: Position[HNil]) => sel.toOption
def getLongContent(value: Long): Content = Content(DiscreteSchema[Long](), value)
def getDoubleContent(value: Double): Content = Content(ContinuousSchema[Double](), value)
def getStringContent(value: String): Content = Content(NominalSchema[String](), value)
"A Window" should "andThenMutate prepare correctly" in {
val obj = CumulativeMovingAverage[
Value[String] :: HNil,
Value[String] :: HNil,
HNil,
Value[String] :: HNil
](locate).andThenMutate(mutate)
obj.present(str.position, (Position(), 3.14)).toList shouldBe List(Cell(str.position, getDoubleContent(3.14)))
obj.present(dbl.position, (Position(), 3.14)).toList shouldBe List(
Cell(dbl.position, getStringContent("not.supported"))
)
obj.present(lng.position, (Position(), 3.14)).toList shouldBe List(Cell(lng.position, getLongContent(42)))
}
it should "andThenMutateWithValue correctly (without value)" in {
val obj = DeltaWithValue().andThenMutate(mutate)
obj.presentWithValue(str.position, 3.14, ext).toList shouldBe List(Cell(str.position, getDoubleContent(3 * 3.14)))
obj.presentWithValue(dbl.position, 3.14, ext).toList shouldBe List(
Cell(dbl.position, getStringContent("not.supported"))
)
obj.presentWithValue(lng.position, 3.14, ext).toList shouldBe List(Cell(lng.position, getLongContent(42)))
}
it should "andThenMutateWithVaue correctly" in {
val obj = DeltaWithValue().andThenMutateWithValue(mutateWithValue)
obj.presentWithValue(str.position, 3.14, ext).toList shouldBe List(Cell(str.position, getDoubleContent(3 * 3.14)))
obj.presentWithValue(dbl.position, 3.14, ext).toList shouldBe List(
Cell(dbl.position, getStringContent("not.supported"))
)
obj.presentWithValue(lng.position, 3.14, ext).toList shouldBe List(Cell(lng.position, getLongContent(42)))
}
}
| CommBank/grimlock | grimlock-core/src/test/scala/commbank/grimlock/TestWindows.scala | Scala | apache-2.0 | 38,073 |
package com.sksamuel.elastic4s.requests.searches.queries.term
import com.sksamuel.elastic4s.json.{XContentBuilder, XContentFactory}
import com.sksamuel.elastic4s.requests.searches.queries.FuzzyQuery
object FuzzyQueryBodyFn {
def apply(q: FuzzyQuery): XContentBuilder = {
val builder = XContentFactory.jsonBuilder()
builder.startObject("fuzzy")
builder.startObject(q.field)
builder.autofield("value", q.termValue)
q.maxExpansions.foreach(builder.field("max_expansions", _))
q.prefixLength.foreach(builder.field("prefix_length", _))
q.fuzziness.foreach(builder.field("fuzziness", _))
q.boost.foreach(builder.field("boost", _))
q.queryName.foreach(builder.field("_name", _))
builder.endObject()
builder.endObject()
builder.endObject()
builder
}
}
| stringbean/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/requests/searches/queries/term/FuzzyQueryBodyFn.scala | Scala | apache-2.0 | 803 |
package org.allenai.common.webapp
import org.allenai.common.testkit.UnitSpec
import spray.http.{ HttpHeader, HttpHeaders, HttpOrigin, SomeOrigins }
import spray.routing.HttpService
import spray.testkit.ScalatestRouteTest
/** Tests for our custom directives. */
class DirectivesSpec extends UnitSpec with ScalatestRouteTest with HttpService {
def actorRefFactory = system
// Test route. Has API and non-API routes.
// format: OFF
val testRoute =
get { path("foo") { complete { "foo" } } } ~
Directives.allowHosts("localhost", "ari.dev.allenai.org", "ari.prod.allenai.org") {
get { path("api") { complete { "api" } } }
} ~
Directives.allowHosts("localhost2") {
get{ path("api2") { complete { "api2" } } }
} ~
get { path("bar") { complete { "bar" } } }
// format: ON
def allowOriginHeader(hostname: String): HttpHeader = {
HttpHeaders.`Access-Control-Allow-Origin`(
SomeOrigins(Seq(HttpOrigin("http", HttpHeaders.Host(hostname))))
)
}
def addOriginHeader(origin: String): RequestTransformer = {
addHeader(HttpHeaders.Origin(Seq(HttpOrigin("http", HttpHeaders.Host(origin)))))
}
"jsonApi" should "complete without CORS headers by default" in {
Get("/api") ~> testRoute ~> check {
header[HttpHeaders.`Access-Control-Allow-Origin`] should be(None)
header[HttpHeaders.`Access-Control-Allow-Headers`] should be(None)
responseAs[String] should be("api")
}
}
it should "complete directives before the api directive" in {
Get("/foo") ~> addOriginHeader("localhost") ~> testRoute ~> check {
header[HttpHeaders.`Access-Control-Allow-Origin`] should be(None)
header[HttpHeaders.`Access-Control-Allow-Headers`] should be(None)
responseAs[String] should be("foo")
}
}
it should "complete directives after the api directive" in {
Get("/bar") ~> addOriginHeader("localhost") ~> testRoute ~> check {
header[HttpHeaders.`Access-Control-Allow-Origin`] should be(None)
header[HttpHeaders.`Access-Control-Allow-Headers`] should be(None)
responseAs[String] should be("bar")
}
}
it should "complete with CORS headers when given a matching origin" in {
Get("/api") ~> addOriginHeader("localhost") ~> testRoute ~> check {
header[HttpHeaders.`Access-Control-Allow-Origin`] should be(
Some(allowOriginHeader("localhost"))
)
header[HttpHeaders.`Access-Control-Allow-Headers`] should be(
Some(Headers.AccessControlAllowHeadersAll)
)
responseAs[String] should be("api")
}
}
it should "ignore ports and non-HTTP schemes" in {
val origin = HttpOrigin("https", HttpHeaders.Host("ari.dev.allenai.org", 8081))
Get("/api") ~> addHeader(HttpHeaders.Origin(Seq(origin))) ~> testRoute ~> check {
header[HttpHeaders.`Access-Control-Allow-Origin`] should be(
Some(HttpHeaders.`Access-Control-Allow-Origin`(SomeOrigins(Seq(origin))))
)
header[HttpHeaders.`Access-Control-Allow-Headers`] should be(
Some(Headers.AccessControlAllowHeadersAll)
)
responseAs[String] should be("api")
}
}
it should "complete an OPTIONS request" in {
Options("/api") ~> addOriginHeader("localhost") ~> testRoute ~> check {
header[HttpHeaders.`Access-Control-Allow-Origin`] should be(
Some(allowOriginHeader("localhost"))
)
header[HttpHeaders.`Access-Control-Allow-Headers`] should be(
Some(Headers.AccessControlAllowHeadersAll)
)
}
}
it should "complete properly to a secondary api" in {
Get("/api2") ~> addOriginHeader("localhost2") ~> testRoute ~> check {
header[HttpHeaders.`Access-Control-Allow-Origin`] should be(
Some(allowOriginHeader("localhost2"))
)
header[HttpHeaders.`Access-Control-Allow-Headers`] should be(
Some(Headers.AccessControlAllowHeadersAll)
)
responseAs[String] should be("api2")
}
}
it should "complete an OPTIONS request to a seconary api" in {
Options("/api2") ~> addOriginHeader("localhost2") ~> testRoute ~> check {
header[HttpHeaders.`Access-Control-Allow-Origin`] should be(
Some(allowOriginHeader("localhost2"))
)
header[HttpHeaders.`Access-Control-Allow-Headers`] should be(
Some(Headers.AccessControlAllowHeadersAll)
)
}
}
}
| ryanai3/common | webapp/src/test/scala/org/allenai/common/webapp/DirectivesSpec.scala | Scala | apache-2.0 | 4,342 |
package net.combinatory.rtm
/* (c) rtm-scala contributors, 2012. All rights reserved. */
trait Logger {
protected[this] def logw(ref: AnyRef) = ()
protected[this] def logd(ref: AnyRef) = ()
protected[this] def loge(ref: AnyRef) = ()
protected[this] def loge(err: Throwable) = ()
}
| comb/rtm-scala | src/main/scala/net/combinatory/rtm/Logger.scala | Scala | apache-2.0 | 293 |
package org.apache.mesos.chronos.scheduler.config
import org.rogach.scallop.ScallopConf
trait CassandraConfiguration extends ScallopConf {
lazy val cassandraContactPoints = opt[String]("cassandra_contact_points",
descr = "Comma separated list of contact points for Cassandra",
default = None)
lazy val cassandraPort = opt[Int]("cassandra_port",
descr = "Port for Cassandra",
default = Some(9042))
lazy val cassandraKeyspace = opt[String]("cassandra_keyspace",
descr = "Keyspace to use for Cassandra",
default = Some("metrics"))
lazy val cassandraTable = opt[String]("cassandra_table",
descr = "Table to use for Cassandra",
default = Some("chronos"))
lazy val cassandraStatCountTable = opt[String]("cassandra_stat_count_table",
descr = "Table to track stat counts in Cassandra",
default = Some("chronos_stat_count"))
lazy val cassandraConsistency = opt[String]("cassandra_consistency",
descr = "Consistency to use for Cassandra",
default = Some("ANY"))
lazy val cassandraTtl = opt[Int]("cassandra_ttl",
descr = "TTL for records written to Cassandra",
default = Some(3600 * 24 * 365))
lazy val jobHistoryLimit = opt[Int]("job_history_limit",
descr = "Number of past job executions to show in history view",
default = Some(5))
}
| tony-kerz/chronos | src/main/scala/org/apache/mesos/chronos/scheduler/config/CassandraConfiguration.scala | Scala | apache-2.0 | 1,316 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui
import java.util.{Timer, TimerTask}
import org.apache.spark._
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.UI._
import org.apache.spark.status.api.v1.StageData
/**
* ConsoleProgressBar shows the progress of stages in the next line of the console. It poll the
* status of active stages from the app state store periodically, the progress bar will be showed
* up after the stage has ran at least 500ms. If multiple stages run in the same time, the status
* of them will be combined together, showed in one line.
*/
private[spark] class ConsoleProgressBar(sc: SparkContext) extends Logging {
// Carriage return
private val CR = '\\r'
// Update period of progress bar, in milliseconds
private val updatePeriodMSec = sc.getConf.get(UI_CONSOLE_PROGRESS_UPDATE_INTERVAL)
// Delay to show up a progress bar, in milliseconds
private val firstDelayMSec = 500L
// The width of terminal
private val TerminalWidth = sys.env.getOrElse("COLUMNS", "80").toInt
private var lastFinishTime = 0L
private var lastUpdateTime = 0L
private var lastProgressBar = ""
// Schedule a refresh thread to run periodically
private val timer = new Timer("refresh progress", true)
timer.schedule(new TimerTask{
override def run(): Unit = {
refresh()
}
}, firstDelayMSec, updatePeriodMSec)
/**
* Try to refresh the progress bar in every cycle
*/
private def refresh(): Unit = synchronized {
val now = System.currentTimeMillis()
if (now - lastFinishTime < firstDelayMSec) {
return
}
val stages = sc.statusStore.activeStages()
.filter { s => now - s.submissionTime.get.getTime() > firstDelayMSec }
if (stages.length > 0) {
show(now, stages.take(3)) // display at most 3 stages in same time
}
}
/**
* Show progress bar in console. The progress bar is displayed in the next line
* after your last output, keeps overwriting itself to hold in one line. The logging will follow
* the progress bar, then progress bar will be showed in next line without overwrite logs.
*/
private def show(now: Long, stages: Seq[StageData]): Unit = {
val width = TerminalWidth / stages.size
val bar = stages.map { s =>
val total = s.numTasks
val header = s"[Stage ${s.stageId}:"
val tailer = s"(${s.numCompleteTasks} + ${s.numActiveTasks}) / $total]"
val w = width - header.length - tailer.length
val bar = if (w > 0) {
val percent = w * s.numCompleteTasks / total
(0 until w).map { i =>
if (i < percent) "=" else if (i == percent) ">" else " "
}.mkString("")
} else {
""
}
header + bar + tailer
}.mkString("")
// only refresh if it's changed OR after 1 minute (or the ssh connection will be closed
// after idle some time)
if (bar != lastProgressBar || now - lastUpdateTime > 60 * 1000L) {
System.err.print(CR + bar)
lastUpdateTime = now
}
lastProgressBar = bar
}
/**
* Clear the progress bar if showed.
*/
private def clear(): Unit = {
if (!lastProgressBar.isEmpty) {
System.err.printf(CR + " " * TerminalWidth + CR)
lastProgressBar = ""
}
}
/**
* Mark all the stages as finished, clear the progress bar if showed, then the progress will not
* interweave with output of jobs.
*/
def finishAll(): Unit = synchronized {
clear()
lastFinishTime = System.currentTimeMillis()
}
/**
* Tear down the timer thread. The timer thread is a GC root, and it retains the entire
* SparkContext if it's not terminated.
*/
def stop(): Unit = timer.cancel()
}
| rezasafi/spark | core/src/main/scala/org/apache/spark/ui/ConsoleProgressBar.scala | Scala | apache-2.0 | 4,484 |
package moe.pizza.eveapi.endpoints
import moe.pizza.eveapi.generated.account
import moe.pizza.eveapi._
import org.http4s.client.Client
class Account(baseurl: String, apikey: Option[XmlApiKey])(implicit c: Client) {
def AccountStatus() =
new ApiRequest[account.AccountStatus.Eveapi](baseurl, "Account/AccountStatus.xml.aspx", apikey)
.apply()
.map(r => new XMLApiResponse(r.currentTime.toDateTime, r.cachedUntil.toDateTime, r.result))
def APIKeyInfo() =
new ApiRequest[account.APIKeyInfo.Eveapi](baseurl, "Account/APIKeyInfo.xml.aspx", apikey)
.apply()
.map(r => new XMLApiResponse(r.currentTime.toDateTime, r.cachedUntil.toDateTime, r.result))
def Characters() =
new ApiRequest[account.Characters.Eveapi](baseurl, "Account/Characters.xml.aspx", apikey)
.apply()
.map(r => new XMLApiResponse(r.currentTime.toDateTime, r.cachedUntil.toDateTime, r.result))
}
| xxpizzaxx/pizza-eveapi | src/main/scala/moe/pizza/eveapi/endpoints/Account.scala | Scala | mit | 912 |
package com.jensraaby.restbucks.controllers
import javax.inject.Inject
import com.jensraaby.restbucks.orders.{Order, OrderService}
import com.twitter.finagle.http.Request
import com.twitter.finatra.http.Controller
import com.twitter.inject.domain.WrappedValue
case class WrappedOrder(data: OrderReq) extends WrappedValue[OrderReq]
case class OrderReq(id: String)
class OrderController @Inject()(orderService: OrderService) extends Controller {
post("/order") { order: Order =>
time("creating order took %d millis") {
orderService.create(order)
}
}
// get("/orders") { request: Request =>
// request.encodeString()
// }
get("/orders/:id") { request: Request =>
infoResult("Looked up order: %s") {
WrappedOrder(OrderReq(request.params("id")))
}
}
}
| jensraaby/restbucks-finatra | src/main/scala/com/jensraaby/restbucks/controllers/OrderController.scala | Scala | mit | 796 |
/*
Copyright 2013 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.summingbird.online.executor
import com.twitter.algebird.{SummingQueue, Semigroup, MapAlgebra}
import com.twitter.bijection.Injection
import com.twitter.util.Future
import com.twitter.summingbird.online.Externalizer
import com.twitter.summingbird.online.{FlatMapOperation, AsyncCache, CacheBuilder}
import com.twitter.summingbird.option.CacheSize
import com.twitter.summingbird.online.option.{
MaxWaitingFutures,
MaxFutureWaitTime,
MaxEmitPerExecute,
FlushFrequency
}
/**
* @author Oscar Boykin
* @author Sam Ritchie
* @author Ashu Singhal
* @author Ian O Connell
*/
// This is not a user settable variable.
// Its supplied by the planning system usually to ensure its large enough to cover the space
// used by the summers times some delta.
private[summingbird] case class KeyValueShards(get: Int) {
def summerIdFor[K](k: K): Int = k.hashCode % get
}
class FinalFlatMap[Event, Key, Value: Semigroup, S <: InputState[_], D](
@transient flatMapOp: FlatMapOperation[Event, (Key, Value)],
cacheBuilder: CacheBuilder[Int, (List[S], Map[Key, Value])],
maxWaitingFutures: MaxWaitingFutures,
maxWaitingTime: MaxFutureWaitTime,
maxEmitPerExec: MaxEmitPerExecute,
summerShards: KeyValueShards,
pDecoder: Injection[Event, D],
pEncoder: Injection[(Int, Map[Key, Value]), D]
)
extends AsyncBase[Event, (Int, Map[Key, Value]), S, D](maxWaitingFutures,
maxWaitingTime,
maxEmitPerExec) {
type InS = S
type OutputElement = (Int, Map[Key, Value])
val encoder = pEncoder
val decoder = pDecoder
val lockedOp = Externalizer(flatMapOp)
lazy val sCache = cacheBuilder(implicitly[Semigroup[(List[S], Map[Key, Value])]])
private def formatResult(outData: Map[Int, (List[S], Map[Key, Value])])
: TraversableOnce[(List[S], Future[TraversableOnce[OutputElement]])] = {
outData.iterator.map { case (outerKey, (tupList, valList)) =>
if(valList.isEmpty) {
(tupList, Future.value(Nil))
} else {
(tupList, Future.value(List((outerKey, valList))))
}
}
}
override def tick: Future[TraversableOnce[(List[S], Future[TraversableOnce[OutputElement]])]] = {
sCache.tick.map(formatResult(_))
}
def cache(state: S,
items: TraversableOnce[(Key, Value)]): Future[TraversableOnce[(List[S], Future[TraversableOnce[OutputElement]])]] = {
try {
val itemL = items.toList
if(itemL.size > 0) {
state.fanOut(itemL.size)
sCache.insert(itemL.map{case (k, v) =>
summerShards.summerIdFor(k) -> (List(state), Map(k -> v))
}).map(formatResult(_))
}
else { // Here we handle mapping to nothing, option map et. al
Future.value(
List(
(List(state), Future.value(Nil))
)
)
}
}
catch {
case t: Throwable => Future.exception(t)
}
}
override def apply(state: S,
tup: Event) =
lockedOp.get.apply(tup).map { cache(state, _) }.flatten
override def cleanup {
lockedOp.get.close
sCache.cleanup
}
}
| surabhiiyer/summingbird | summingbird-online/src/main/scala/com/twitter/summingbird/online/executor/FinalFlatMap.scala | Scala | apache-2.0 | 3,763 |
package concrete.generator.cspompatterns
import cspom.CSPOM._
import cspom.CSPOMConstraint
import cspom.compiler.ACCSE
import cspom.variable._
sealed trait MinMaxType {
def function: String
}
case object MinType extends MinMaxType {
def function = "min"
}
case object MaxType extends MinMaxType {
def function = "max"
}
object ACCSE_MinMax extends ACCSE[MinMaxType] {
def functions: Seq[String] = Seq("min", "max")
override def define(subexp: List[Arg], aux: CSPOMVariable[_]): (Arg, CSPOMConstraint[_]) = {
val typ = subexp.head._2
assert(subexp.forall(_._2 == typ))
val constraint = CSPOMConstraint(aux)(typ.function)(subexp.map(_._1): _*)
val arg = (aux, typ)
(arg, constraint)
}
override def replace(subexp: List[Arg], arg: Arg, constraint: Args): Option[Arg] = {
assert(subexp.forall(_._2 == arg._2))
assert(constraint.values.forall(_ == arg._2))
if (subexp.forall { case (expr, typ) => constraint(expr).contains(typ) }) {
constraint --= subexp.map(_._1)
constraint += arg
Some(arg)
} else {
None
}
}
override def constraintToArgs(c: CSPOMConstraint[_]): IndexedSeq[Arg] = {
c.function match {
case "min" => c.arguments.map(v => (v, MinType)).toIndexedSeq
case "max" => c.arguments.map(v => (v, MaxType)).toIndexedSeq
}
}
override def argsToConstraint(original: CSPOMConstraint[_], args: ACCSE_MinMax.Args): CSPOMConstraint[_] = {
val typ = args.head._2
require(args.forall(_._2 == typ))
CSPOMConstraint(original.result)(typ.function)(args.keys: _*)
}
override def canonize(args: List[Arg]): List[Arg] = args
override def intersect(se1: List[Arg], se2: List[Arg], including: List[Arg]): List[Arg] = {
val typ = se1.head._2
assert(se1.forall(_._2 == typ))
assert(se2.forall(_._2 == typ))
val se1Set = se1.toSet
assert(including.forall(se1.contains))
assert {
val se2Set = se2.toSet
including.forall(se2Set.contains)
}
val additional1 = se1Set -- including
val additional2 = se2.filter(additional1)
including ++ additional2
}
} | concrete-cp/concrete | src/main/scala/concrete/generator/cspompatterns/ACCSE_MinMax.scala | Scala | lgpl-2.1 | 2,123 |
// scalac: -Ymacro-annotations -Wunused:params -Werror
@mymacro
class X
object Test {
println(X.f(123))
}
| lrytz/scala | test/files/pos/macro-annot-unused-param/Test_2.scala | Scala | apache-2.0 | 109 |
package lore.compiler.semantics.core
import lore.compiler.semantics.NamePath
import lore.compiler.types.{BasicType, TraitSchema, Type, TypeSchema}
/**
* A core trait is a trait that the compiler might directly depend on. For example, type introspection works with a
* `Type` trait that represents run-time types.
*
* @param schema The underlying schema. `None` if it cannot be resolved.
*/
class CoreTrait(val name: NamePath, val schema: Option[TraitSchema]) {
val schemaOrNothing: TypeSchema = schema.getOrElse(BasicType.Nothing)
val schemaOrAny: TypeSchema = schema.getOrElse(BasicType.Any)
val typeOrNothing: Type = schema.map(_.constantType).getOrElse(BasicType.Nothing)
val typeOrAny: Type = schema.map(_.constantType).getOrElse(BasicType.Any)
}
| marcopennekamp/lore | compiler/src/lore/compiler/semantics/core/CoreTrait.scala | Scala | mit | 771 |
package com.wavesplatform.it.async
import com.typesafe.config.{Config, ConfigFactory}
import com.wavesplatform.it.BaseFreeSpec
import com.wavesplatform.it.NodeConfigs.Default
import com.wavesplatform.it.api.AsyncHttpApi._
import com.wavesplatform.test._
import scala.concurrent.Future.traverse
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.util.Random
class MicroblocksFeeTestSuite extends BaseFreeSpec {
private def firstAddress = nodes(1).address
private def txRequestsGen(n: Int, fee: Long): Future[Unit] = {
val parallelRequests = 10
def requests(n: Int): Future[Unit] =
Future
.sequence {
//Not mining node sends transfer transactions to another not mining node
//Mining nodes collect fee
(1 to n).map { _ =>
notMiner.transfer(notMiner.address, firstAddress, (1 + Random.nextInt(10)).waves, fee)
}
}
.map(_ => ())
val steps = (1 to n)
.sliding(parallelRequests, parallelRequests)
.map(_.size)
steps.foldLeft(Future.successful(())) { (r, numRequests) =>
r.flatMap(_ => requests(numRequests))
}
}
"fee distribution when NG activates" in {
val f = for {
_ <- traverse(nodes)(_.height).map(_.max)
_ <- traverse(nodes)(_.waitForHeight(microblockActivationHeight - 1))
_ <- txRequestsGen(200, 2.waves)
_ <- traverse(nodes)(_.waitForHeight(microblockActivationHeight + 3))
initialBalances <- notMiner.debugStateAt(microblockActivationHeight - 1) //100%
balancesBeforeActivation <- notMiner.debugStateAt(microblockActivationHeight) // 100%
blockBeforeActivation <- notMiner.blockHeadersAt(microblockActivationHeight)
balancesOnActivation <- notMiner.debugStateAt(microblockActivationHeight + 1) // 40%
blockOnActivation <- notMiner.blockHeadersAt(microblockActivationHeight + 1)
balancesAfterActivation <- notMiner.debugStateAt(microblockActivationHeight + 2) // 60% of previous + 40% of current
blockAfterActivation <- notMiner.blockHeadersAt(microblockActivationHeight + 2)
} yield {
balancesBeforeActivation(blockBeforeActivation.generator) shouldBe {
nodes.head.settings.blockchainSettings.rewardsSettings.initial +
initialBalances(blockBeforeActivation.generator) + blockBeforeActivation.totalFee
}
balancesOnActivation(blockOnActivation.generator) shouldBe {
nodes.head.settings.blockchainSettings.rewardsSettings.initial +
balancesBeforeActivation(blockOnActivation.generator) + blockOnActivation.totalFee * 4 / 10
}
balancesAfterActivation(blockAfterActivation.generator) shouldBe {
nodes.head.settings.blockchainSettings.rewardsSettings.initial +
balancesOnActivation(blockAfterActivation.generator) + blockOnActivation.totalFee * 6 / 10 +
blockAfterActivation.totalFee * 4 / 10
}
}
Await.result(f, 5.minute)
}
private val microblockActivationHeight = 10
private val minerConfig = ConfigFactory.parseString(
s"""waves {
| blockchain.custom.functionality.pre-activated-features.2 = $microblockActivationHeight
| miner.quorum = 3
|}
""".stripMargin
)
private val notMinerConfig = ConfigFactory.parseString(
s"""waves {
| blockchain.custom.functionality.pre-activated-features.2 = $microblockActivationHeight
| miner.enable = no
|}
""".stripMargin
)
override protected val nodeConfigs: Seq[Config] = Seq(
minerConfig.withFallback(Default(0)),
notMinerConfig.withFallback(Default(1)),
notMinerConfig.withFallback(Default(2)),
notMinerConfig.withFallback(Default(3))
)
}
| wavesplatform/Waves | node-it/src/test/scala/com/wavesplatform/it/async/MicroblocksFeeTestSuite.scala | Scala | mit | 3,752 |
// This test case used to fail when mixin forwarders were generated before erasure,
// it doesn't anymore since the forwarders generated after erasure do not clash,
// the comments are preserved for posterity.
class Foo
trait One[X] {
def concat(suffix: Int): X = ???
}
trait Two[Y <: Foo] {
def concat[Dummy](suffix: Int): Y = ???
}
class Bar1 extends One[Foo]
// Because mixin forwarders are generated before erasure, we get:
// override def concat(suffix: Int): Foo
class Bar2 extends Bar1 with Two[Foo] // error
// We get a mixin forwarder for Two:
// override def concat[Dummy](suffix: Int): Foo
// which gets erased to:
// override def concat(suffix: Int): Foo
// This clashes with the forwarder generated in Bar1, and the compiler detects that:
//
// |class Bar2 extends Bar1 with Two[Foo]
// | ^
// | Name clash between defined and inherited member:
// | override def concat(suffix: Int): Foo in class Bar1 and
// | override def concat: [Dummy](suffix: Int): Foo in class Bar2
// | have the same type after erasure.
//
// But note that the compiler is able to see the mixin forwarder in Bar1
// only because of joint compilation, this doesn't work with separate
// compilation as in mixin-forwarder-clash2.
| som-snytt/dotty | tests/pos/mixin-forwarder-clash1.scala | Scala | apache-2.0 | 1,290 |
/**
* Copyright (c) 2016 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.trustedanalytics.sparktk.graph.internal.ops.orientdb
import com.tinkerpop.blueprints.Vertex
import com.tinkerpop.blueprints.impls.orient.OrientGraphNoTx
import org.apache.spark.sql.Row
import org.graphframes.GraphFrame
import scala.collection.JavaConverters._
import scala.collection.mutable
/**
* exports graph frame vertex to OrientDB
*
* @param orientGraph OrientDB graph database
*/
class VertexWriter(orientGraph: OrientGraphNoTx) {
/**
* converts Spark SQL Row to OrientDB vertex
*
* @param row row
* @param vertexTypeColumnName the given column name for vertex type
*/
def create(row: Row, vertexTypeColumnName: Option[String] = None): Vertex = {
val propMap = mutable.Map[String, Any]()
val vertexType = if (vertexTypeColumnName.isDefined) {
row.getAs[String](vertexTypeColumnName.get)
}
else {
orientGraph.getVertexBaseType.getName
}
val propKeysIterator = orientGraph.getRawGraph.getMetadata.getSchema.getClass(vertexType).properties().iterator()
while (propKeysIterator.hasNext) {
val propKey = propKeysIterator.next().getName
if (propKey == exportGraphParam.vertexId) {
propMap.put(propKey, row.getAs(GraphFrame.ID))
}
else if (row.getAs(propKey) != null) {
propMap.put(propKey, row.getAs(propKey))
}
}
orientGraph.addVertex(s"class:$vertexType", propMap.asJava)
}
/**
* finds a vertex
*
* @param vertexId vertex ID
* @return OrientDB vertex if exists or null if not found
*/
def find(vertexId: Any): Option[Vertex] = {
val vertices = orientGraph.getVertices(exportGraphParam.vertexId, vertexId)
val vertexIterator = vertices.iterator()
if (vertexIterator.hasNext) {
val existingVertex = vertexIterator.next()
return Some(existingVertex)
}
None
}
/**
* looking up a vertex in OrientDB graph or creates a new vertex if not found
*
* @param vertexId vertex ID
* @return OrientDB vertex
*/
def findOrCreate(vertexId: Any): Vertex = {
val vertexType = orientGraph.getVertexBaseType.getName
val vertex = find(vertexId)
if (vertex.isEmpty) {
orientGraph.addVertex(s"class:$vertexType", exportGraphParam.vertexId, vertexId.toString)
}
else {
vertex.get
}
}
}
| ashaarunkumar/spark-tk | sparktk-core/src/main/scala/org/trustedanalytics/sparktk/graph/internal/ops/orientdb/VertexWriter.scala | Scala | apache-2.0 | 3,025 |
package lostvaults.tests
import org.scalatest.FunSuite
import lostvaults.server.Room
class RoomTest extends FunSuite {
// implicit lazy val system = ActorSystem("RoomSystem")
var room = new Room()
val TestMan = "testman"
val playerList: List[String] = List("Philip", "Jimmy", "Anna", "Felix", "Fredrik")
test("This test checks if non existing player is not in room!") {
assertResult(false) {
room.hasPlayer(TestMan)
}
}
test("This test checks if added player in playerlist is added!") {
room.addPlayer(TestMan)
assert(room.hasPlayer(TestMan))
}
test("This test checks if removed player in playerlist is removed!") {
room.removePlayer(TestMan)
assert(!room.hasPlayer(TestMan))
}
test("This test checks the playerlist of a room") {
room.addPlayer("Jimmy")
room.addPlayer("Anna")
room.addPlayer("Philip")
room.addPlayer("Fredrik")
room.addPlayer("Felix")
TestHelpFunctions.equalsWithoutOrder(room.getPlayerList, playerList)
}
test("This test checks if a player is in the room") {
room.addPlayer("Jimmy")
room.addPlayer("Anna")
assert(room.hasPlayer("Anna"))
assert(room.hasPlayer("Jimmy"))
}
test("This test "){
}
/* test("This test checks if non existing item is not in room!") {asza<
assertResult(false) {
room.hasItem(TestMan)
}
}
test("This test checks if added item in itemlist is added!") {
room.addItem(TestMan)
assert(room.hasItem(TestMan))
}
test("This test checks if removed item in itemlist is removed!") {
room.removeItem(TestMan)
assert(room.hasItem(TestMan))
}
test("This test checks if non existing NPC is not in room!") {
assertResult(false) {
room.hasNPC(TestMan)
}
}
test("This test checks if added NPC in NPC is added!") {
room.addNPC(TestMan)
assert(room.hasNPC(TestMan))
}
test("This test checks if removed NPC in NPClist is removed!") {
room.removeNPC(TestMan)
assert(room.hasNPC(TestMan))
}
*/
}
| senilica/LostVaults | src/lostvaults/tests/RoomTest.scala | Scala | mit | 2,064 |
package scife.enumeration
trait Empty extends Finite[Nothing] {
override def size = 0
override def apply(ind: Int) = throw new NoSuchElementException("no elements in Empty")
}
object Empty extends Empty
| kaptoxic/SciFe | src/main/scala/scife/enumeration/Empty.scala | Scala | gpl-2.0 | 212 |
/***
* Copyright 2014 Rackspace US, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.rackspace.com.papi.components.checker
import javax.management.InstanceAlreadyExistsException
import com.rackspace.cloud.api.wadl.Converters._
import com.rackspace.com.papi.components.checker.handler.InstrumentedHandler
import org.junit.runner.RunWith
import org.scalatestplus.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class ValidatorWADLDestroySuite extends BaseValidatorSuite {
val testWADL = <application xmlns="http://wadl.dev.java.net/2009/02">
<grammars/>
<resources base="https://test.api.openstack.com">
<resource/>
</resources>
</application>
test("If destroy is called the same WADL may be created twice with the same name") {
var validator1 = Validator("ATestWADL",testWADL, assertConfig)
validator1.destroy
validator1 = null
var validator2 = Validator("ATestWADL",testWADL, assertConfig)
validator2.destroy
validator2 = null
}
test("If destroy is called the same WADL may be created twice with the same name (with instrumented handler)") {
val instConfig = TestConfig()
instConfig.resultHandler = new InstrumentedHandler
var validator1 = Validator("ATestWADLInst",testWADL, instConfig)
validator1.destroy
validator1 = null
var validator2 = Validator("ATestWADLInst",testWADL, instConfig)
validator2.destroy
validator2 = null
}
test("If destroy is *NOT* called then creating the the same validator the same name should cause an exception") {
var validator1 = Validator("ATestWADL",testWADL, assertConfig)
validator1 = null
intercept[InstanceAlreadyExistsException] {
var validator2 = Validator("ATestWADL",testWADL, assertConfig)
}
}
test("If destroy is *NOT* called then creating the the same validator the same name should cause an exception (with instrumented handler)") {
val instConfig = TestConfig()
instConfig.resultHandler = new InstrumentedHandler
var validator1 = Validator("ATestWADLInst",testWADL, instConfig)
validator1 = null
intercept[InstanceAlreadyExistsException] {
var validator2 = Validator("ATestWADLInst",testWADL, instConfig)
}
}
}
| rackerlabs/api-checker | core/src/test/scala/com/rackspace/com/papi/components/checker/ValidatorWADLDestroySuite.scala | Scala | apache-2.0 | 2,842 |
package org.scalaide.ui.internal.preferences
import scala.collection.mutable.ListBuffer
import org.eclipse.core.runtime.preferences.AbstractPreferenceInitializer
import org.eclipse.jface.preference.ColorFieldEditor
import org.eclipse.jface.preference.PreferencePage
import org.eclipse.swt.SWT
import org.eclipse.swt.layout.GridData
import org.eclipse.swt.layout.GridLayout
import org.eclipse.swt.widgets.Composite
import org.eclipse.swt.widgets.Control
import org.eclipse.swt.widgets.Group
import org.eclipse.ui.IWorkbench
import org.eclipse.ui.IWorkbenchPreferencePage
import org.scalaide.core.IScalaPlugin
import EditorPreferencePage._
class EditorPreferencePage extends PreferencePage with IWorkbenchPreferencePage {
private val store = IScalaPlugin().getPreferenceStore()
private val preferencesToSave = ListBuffer[() => Unit]()
override def performOk(): Boolean = {
preferencesToSave foreach (_())
super.performOk()
}
override def init(workbench: IWorkbench): Unit = {}
override def createContents(parent: Composite): Control = {
setPreferenceStore(store)
val base = new Composite(parent, SWT.NONE)
base.setLayout(new GridLayout(1, true))
base.setLayoutData(new GridData(SWT.FILL, SWT.FILL, true, true))
createSettingsGroup(base)
createIndentGuideGroup(base)
base
}
private def createSettingsGroup(base: Composite): Unit = {
val typing = group("Typing", base)
checkBox(P_ENABLE_AUTO_CLOSING_COMMENTS, "Automatically close multi line comments and Scaladoc", typing)
checkBox(P_ENABLE_AUTO_ESCAPE_LITERALS, "Automatically escape \\" signs in string literals", typing)
checkBox(P_ENABLE_AUTO_ESCAPE_SIGN, "Automatically escape \\\\ signs in string and character literals", typing)
checkBox(P_ENABLE_AUTO_REMOVE_ESCAPED_SIGN, "Automatically remove complete escaped sign in string and character literals", typing)
checkBox(P_ENABLE_AUTO_BREAKING_COMMENTS, "Automatically break multi-line comments and Scaladoc after the Print Margin", typing)
val indent = group("Indentation", base)
checkBox(P_ENABLE_AUTO_INDENT_ON_TAB, "Automatically indent when tab is pressed", indent)
checkBox(P_ENABLE_AUTO_INDENT_MULTI_LINE_STRING, "Enable auto indent for multi line string literals", indent)
checkBox(P_ENABLE_AUTO_STRIP_MARGIN_IN_MULTI_LINE_STRING, "Automatically add strip margins when multi line string starts with a |", indent)
val highlighting = group("Highlighting", base)
checkBox(P_ENABLE_MARK_OCCURRENCES, "Mark Occurences of the selected element in the current file", highlighting)
checkBox(P_SHOW_INFERRED_SEMICOLONS, "Show inferred semicolons", highlighting)
val completion = group("Completion", base)
checkBox(P_ENABLE_HOF_COMPLETION, "Always insert lambdas when completing higher-order functions", completion)
val outline = group("Outline", base)
checkBox(P_INITIAL_IMPORT_FOLD, "Fold import nodes by default", outline)
}
private def group(text: String, parent: Composite): Group = {
val g = new Group(parent, SWT.NONE)
g.setText(text)
g.setLayout(new GridLayout(1, true))
g.setLayoutData(new GridData(SWT.FILL, SWT.DEFAULT, true, false))
g
}
import org.scalaide.util.eclipse.SWTUtils.CheckBox
private def checkBox(preference: String, labelText: String, parent: Composite): CheckBox = {
val b = new CheckBox(store, preference, labelText, parent)
preferencesToSave += { () => b.store() }
b
}
private def createIndentGuideGroup(base: Composite): Unit = {
val indentGuide = group("Indent Guide", base)
val enable = checkBox(INDENT_GUIDE_ENABLE, "Enable the indent guide", indentGuide)
val color = new Composite(indentGuide, SWT.NONE)
val c = new ColorFieldEditor(INDENT_GUIDE_COLOR, "Color:", color)
c.setPreferenceStore(store)
c.load()
preferencesToSave += { () => c.store() }
def enableControls(b: Boolean) = c.setEnabled(b, color)
enable += (_ => enableControls(enable.isChecked))
enableControls(enable.isChecked)
}
}
object EditorPreferencePage {
private final val BASE = "scala.tools.eclipse.editor."
final val P_ENABLE_AUTO_CLOSING_COMMENTS = BASE + "autoClosingComments"
final val P_ENABLE_AUTO_ESCAPE_LITERALS = BASE + "autoEscapeLiterals"
final val P_ENABLE_AUTO_ESCAPE_SIGN = BASE + "autoEscapeSign"
final val P_ENABLE_AUTO_REMOVE_ESCAPED_SIGN = BASE + "autoRemoveEscapedSign"
final val P_ENABLE_AUTO_INDENT_ON_TAB = BASE + "autoIndent"
final val P_ENABLE_AUTO_INDENT_MULTI_LINE_STRING = BASE + "autoIndentMultiLineString"
final val P_ENABLE_AUTO_STRIP_MARGIN_IN_MULTI_LINE_STRING = BASE + "autoStringMarginInMultiLineString"
final val P_ENABLE_AUTO_BREAKING_COMMENTS = BASE + "autoBreakingComments"
final val P_ENABLE_MARK_OCCURRENCES = BASE + "markOccurences"
final val P_SHOW_INFERRED_SEMICOLONS = BASE + "showInferredSemicolons"
final val INDENT_GUIDE_ENABLE = BASE + "indentGuideEnable"
final val INDENT_GUIDE_COLOR = BASE + "indentGuideColor"
final val P_ENABLE_HOF_COMPLETION = BASE + "completionAlwaysLambdas"
final val P_INITIAL_IMPORT_FOLD = BASE + "initialImportFold"
}
class EditorPreferenceInitializer extends AbstractPreferenceInitializer {
override def initializeDefaultPreferences(): Unit = {
val store = IScalaPlugin().getPreferenceStore
store.setDefault(P_ENABLE_AUTO_CLOSING_COMMENTS, true)
store.setDefault(P_ENABLE_AUTO_ESCAPE_LITERALS, false)
store.setDefault(P_ENABLE_AUTO_ESCAPE_SIGN, false)
store.setDefault(P_ENABLE_AUTO_REMOVE_ESCAPED_SIGN, false)
store.setDefault(P_ENABLE_AUTO_INDENT_ON_TAB, true)
store.setDefault(P_ENABLE_AUTO_INDENT_MULTI_LINE_STRING, false)
store.setDefault(P_ENABLE_AUTO_STRIP_MARGIN_IN_MULTI_LINE_STRING, false)
store.setDefault(P_ENABLE_AUTO_BREAKING_COMMENTS, false)
store.setDefault(P_ENABLE_MARK_OCCURRENCES, false)
store.setDefault(P_ENABLE_HOF_COMPLETION, true)
// TODO This preference is added in 4.0. Delete the former preference once support for the former release is dropped.
store.setDefault(P_SHOW_INFERRED_SEMICOLONS, store.getBoolean("actions.showInferredSemicolons"))
store.setDefault(INDENT_GUIDE_ENABLE, false)
store.setDefault(INDENT_GUIDE_COLOR, "72,72,72")
store.setDefault(P_INITIAL_IMPORT_FOLD, true)
}
}
| stephenh/scala-ide | org.scala-ide.sdt.core/src/org/scalaide/ui/internal/preferences/EditorPreferencePage.scala | Scala | bsd-3-clause | 6,327 |
import sbt._
class TestProject(info: ProjectInfo) extends ParentProject(info)
{
val addRepo = "Extra Test Repository" at "http://dev.camptocamp.com/files/m2_repo/"
val sub = project("sub", "Sub Project", new SubProject(_))
def ivyCacheDirectory = outputPath / "ivy-cache"
override def updateOptions = CacheDirectory(ivyCacheDirectory) :: super.updateOptions.toList
class SubProject(info: ProjectInfo) extends DefaultProject(info)
{
def ivyCacheDirectory = outputPath / "ivy-cache"
override def updateOptions = CacheDirectory(ivyCacheDirectory) :: super.updateOptions.toList
override def ivyXML =
<dependencies>
<dependency org="com.camptocamp.tl.caltar" name="core" rev="0.5" transitive="false"/>
</dependencies>
}
} | sbt/sbt-zero-seven | src/sbt-test/dependency-management/inherit-repo/changes/CorrectProject.scala | Scala | bsd-3-clause | 743 |
package spark
import org.slf4j.Logger
import org.slf4j.LoggerFactory
/**
* Utility trait for classes that want to log data. Creates a SLF4J logger for the class and allows
* logging messages at different levels using methods that only evaluate parameters lazily if the
* log level is enabled.
*/
trait Logging {
// Make the log field transient so that objects with Logging can
// be serialized and used on another machine
@transient
private var log_ : Logger = null
// Method to get or create the logger for this object
protected def log: Logger = {
if (log_ == null) {
var className = this.getClass.getName
// Ignore trailing $'s in the class names for Scala objects
if (className.endsWith("$")) {
className = className.substring(0, className.length - 1)
}
log_ = LoggerFactory.getLogger(className)
}
return log_
}
// Log methods that take only a String
protected def logInfo(msg: => String) {
if (log.isInfoEnabled) log.info(msg)
}
protected def logDebug(msg: => String) {
if (log.isDebugEnabled) log.debug(msg)
}
protected def logTrace(msg: => String) {
if (log.isTraceEnabled) log.trace(msg)
}
protected def logWarning(msg: => String) {
if (log.isWarnEnabled) log.warn(msg)
}
protected def logError(msg: => String) {
if (log.isErrorEnabled) log.error(msg)
}
// Log methods that take Throwables (Exceptions/Errors) too
protected def logInfo(msg: => String, throwable: Throwable) {
if (log.isInfoEnabled) log.info(msg, throwable)
}
protected def logDebug(msg: => String, throwable: Throwable) {
if (log.isDebugEnabled) log.debug(msg, throwable)
}
protected def logTrace(msg: => String, throwable: Throwable) {
if (log.isTraceEnabled) log.trace(msg, throwable)
}
protected def logWarning(msg: => String, throwable: Throwable) {
if (log.isWarnEnabled) log.warn(msg, throwable)
}
protected def logError(msg: => String, throwable: Throwable) {
if (log.isErrorEnabled) log.error(msg, throwable)
}
// Method for ensuring that logging is initialized, to avoid having multiple
// threads do it concurrently (as SLF4J initialization is not thread safe).
protected def initLogging() { log }
}
| joeywen/spark_cpp_api | core/src/main/scala/spark/Logging.scala | Scala | bsd-3-clause | 2,259 |
/*
* Copyright (c) 2014-16 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless
import scala.language.experimental.macros
import scala.reflect.macros.whitebox
object labelled {
/**
* The type of fields with keys of singleton type `K` and value type `V`.
*/
type FieldType[K, +V] = V with KeyTag[K, V]
trait KeyTag[K, +V]
/**
* Yields a result encoding the supplied value with the singleton type `K' of its key.
*/
def field[K] = new FieldBuilder[K]
class FieldBuilder[K] {
def apply[V](v : V): FieldType[K, V] = v.asInstanceOf[FieldType[K, V]]
}
}
trait DefaultSymbolicLabelling[T] extends DepFn0 with Serializable { type Out <: HList }
object DefaultSymbolicLabelling {
type Aux[T, Out0] = DefaultSymbolicLabelling[T] { type Out = Out0 }
def apply[T](implicit lab: DefaultSymbolicLabelling[T]): Aux[T, lab.Out] = lab
implicit def mkDefaultSymbolicLabelling[T]: DefaultSymbolicLabelling[T] =
macro LabelledMacros.mkDefaultSymbolicLabellingImpl[T]
}
/**
* Polymorphic function that allows modifications on record fields while preserving the
* original key types.
*
* @author Dario Rexin
*/
trait FieldPoly extends Poly1 {
import labelled._
class FieldCaseBuilder[A, T] {
def apply[Res](fn: A => Res) = new Case[FieldType[T, A]] {
type Result = FieldType[T, Res]
val value: Function1[A :: HNil, FieldType[T, Res]] =
(l: A :: HNil) => field[T](fn(l.head))
}
}
def atField[A](w: Witness) = new FieldCaseBuilder[A, w.T]
}
/**
* Field with values of type `V`.
*
* Record keys of this form should be objects which extend this trait. Keys may also be arbitrary singleton typed
* values, however keys of this form enforce the type of their values.
*
* @author Miles Sabin
*/
trait FieldOf[V] {
import labelled._
def ->>(v: V): FieldType[this.type, V] = field[this.type](v)
}
@macrocompat.bundle
class LabelledMacros(val c: whitebox.Context) extends SingletonTypeUtils with CaseClassMacros {
import labelled._
import c.universe._
def mkDefaultSymbolicLabellingImpl[T](implicit tTag: WeakTypeTag[T]): Tree = {
val tTpe = weakTypeOf[T]
val labels: List[String] =
if(isProduct(tTpe)) fieldsOf(tTpe).map { f => nameAsString(f._1) }
else if(isCoproduct(tTpe)) ctorsOf(tTpe).map { tpe => nameAsString(nameOf(tpe)) }
else c.abort(c.enclosingPosition, s"$tTpe is not case class like or the root of a sealed family of types")
val labelTpes = labels.map(SingletonSymbolType(_))
val labelValues = labels.map(mkSingletonSymbol)
val labelsTpe = mkHListTpe(labelTpes)
val labelsValue =
labelValues.foldRight(q"_root_.shapeless.HNil": Tree) {
case (elem, acc) => q"_root_.shapeless.::($elem, $acc)"
}
q"""
new _root_.shapeless.DefaultSymbolicLabelling[$tTpe] {
type Out = $labelsTpe
def apply(): $labelsTpe = $labelsValue
} : _root_.shapeless.DefaultSymbolicLabelling.Aux[$tTpe, $labelsTpe]
"""
}
def recordTypeImpl(tpeSelector: Tree): Tree =
labelledTypeImpl(tpeSelector, "record", hnilTpe, hconsTpe)
def unionTypeImpl(tpeSelector: Tree): Tree =
labelledTypeImpl(tpeSelector, "union", cnilTpe, cconsTpe)
def labelledTypeImpl(tpeSelector: Tree, variety: String, nilTpe: Type, consTpe: Type): Tree = {
def mkFieldTpe(keyTpe: Type, valueTpe: Type): Type =
appliedType(fieldTypeTpe, List(keyTpe, valueTpe))
val q"${tpeString: String}" = tpeSelector
val fields =
if (tpeString.trim.isEmpty)
Array.empty[(Type, Type)]
else
tpeString.split(",").map(_.trim).map(_.split("->").map(_.trim)).map {
case Array(key, value) =>
val keyTpe =
parseLiteralType(key)
.getOrElse(c.abort(c.enclosingPosition, s"Malformed literal type $key"))
val valueTpe =
parseType(value)
.getOrElse(c.abort(c.enclosingPosition, s"Malformed literal or standard type $value"))
(keyTpe, valueTpe)
case other =>
c.abort(c.enclosingPosition, s"Malformed $variety type $tpeString")
}
val labelledTpe =
fields.foldRight(nilTpe) { case ((keyTpe, valueTpe), acc) =>
val fieldTpe = mkFieldTpe(keyTpe, valueTpe)
appliedType(consTpe, List(fieldTpe, acc))
}
typeCarrier(labelledTpe)
}
def hlistTypeImpl(tpeSelector: Tree): Tree =
nonLabelledTypeImpl(tpeSelector, "hlist", hnilTpe, hconsTpe)
def coproductTypeImpl(tpeSelector: Tree): Tree =
nonLabelledTypeImpl(tpeSelector, "coproduct", cnilTpe, cconsTpe)
def nonLabelledTypeImpl(tpeSelector: Tree, variety: String, nilTpe: Type, consTpe: Type): Tree = {
val q"${tpeString: String}" = tpeSelector
val elemTypes =
if (tpeString.trim.isEmpty)
Array.empty[Type]
else
tpeString.split(",").map(_.trim).map { elemTypeStr =>
parseType(elemTypeStr)
.getOrElse(c.abort(c.enclosingPosition, s"Malformed literal or standard type $elemTypeStr"))
}
val tpe =
elemTypes.foldRight(nilTpe) { case (elemTpe, acc) =>
appliedType(consTpe, List(elemTpe, acc))
}
typeCarrier(tpe)
}
}
| liff/shapeless | core/src/main/scala/shapeless/labelled.scala | Scala | apache-2.0 | 5,756 |
package coursier.cache
sealed abstract class CachePolicy extends Product with Serializable {
def acceptChanging: CachePolicy.Mixed
def rejectChanging: CachePolicy.NoChanging
def acceptsChangingArtifacts: Boolean
}
object CachePolicy {
sealed abstract class Mixed extends CachePolicy {
def acceptChanging: Mixed = this
def acceptsChangingArtifacts: Boolean = true
}
/** Only pick local files, possibly from the cache. Don't try to download anything. */
case object LocalOnly extends Mixed {
def rejectChanging = NoChanging.LocalOnly
}
/** Only pick local files, possibly from the cache. Don't return changing artifacts (whose last
* check is) older than TTL
*/
case object LocalOnlyIfValid extends Mixed {
def rejectChanging = NoChanging.LocalOnly
}
/** Only pick local files. If one of these local files corresponds to a changing artifact, check
* for updates, and download these if needed.
*
* If no local file is found, *don't* try download it. Updates are only checked for files already
* in cache.
*
* Follows the TTL parameter (assumes no update is needed if the last one is recent enough).
*/
case object LocalUpdateChanging extends Mixed {
def rejectChanging = NoChanging.LocalOnly
}
/** Only pick local files, check if any update is available for them, and download these if
* needed.
*
* If no local file is found, *don't* try download it. Updates are only checked for files already
* in cache.
*
* Follows the TTL parameter (assumes no update is needed if the last one is recent enough).
*
* Unlike `LocalUpdateChanging`, all found local files are checked for updates, not just the
* changing ones.
*/
case object LocalUpdate extends Mixed {
def rejectChanging = NoChanging.LocalUpdate
}
/** Pick local files, and download the missing ones.
*
* For changing ones, check for updates, and download those if any.
*
* Follows the TTL parameter (assumes no update is needed if the last one is recent enough).
*/
case object UpdateChanging extends Mixed {
def rejectChanging = NoChanging.FetchMissing
}
/** Pick local files, download the missing ones, check for updates and download those if any.
*
* Follows the TTL parameter (assumes no update is needed if the last one is recent enough).
*
* Unlike `UpdateChanging`, all found local files are checked for updates, not just the changing
* ones.
*/
case object Update extends Mixed {
def rejectChanging = NoChanging.FetchMissing
}
/** Pick local files, download the missing ones.
*
* No updates are checked for files already downloaded.
*/
case object FetchMissing extends Mixed {
def rejectChanging = NoChanging.FetchMissing
}
/** (Re-)download all files.
*
* Erases files already in cache.
*/
case object ForceDownload extends Mixed {
def rejectChanging = NoChanging.ForceDownload
}
sealed abstract class NoChanging extends CachePolicy {
def rejectChanging: CachePolicy.NoChanging = this
def acceptsChangingArtifacts: Boolean = false
}
object NoChanging {
case object LocalOnly extends NoChanging {
def acceptChanging = CachePolicy.LocalOnly
}
case object LocalUpdate extends NoChanging {
def acceptChanging = CachePolicy.LocalUpdate
}
case object FetchMissing extends NoChanging {
def acceptChanging = CachePolicy.FetchMissing
}
case object ForceDownload extends NoChanging {
def acceptChanging = CachePolicy.ForceDownload
}
}
}
| alexarchambault/coursier | modules/cache/jvm/src/main/scala/coursier/cache/CachePolicy.scala | Scala | apache-2.0 | 3,632 |
/*
* Copyright 2015 Textocat
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
*
*/
package com.textocat.textokit.phrrecog
import com.textocat.textokit.morph.fs.{Word, Wordform}
import com.textocat.textokit.phrrecog.cas.NounPhrase
import com.textocat.textokit.phrrecog.input.AnnotationSpan
import com.textocat.textokit.phrrecog.parsing.{NP, NPParsers}
import com.textocat.textokit.segmentation.fstype.Sentence
import org.apache.uima.fit.component.JCasAnnotator_ImplBase
import org.apache.uima.fit.util.{FSCollectionFactory, JCasUtil}
import org.apache.uima.jcas.JCas
import org.apache.uima.jcas.cas.FSArray
import org.apache.uima.jcas.tcas.Annotation
import scala.collection.JavaConversions._
import scala.collection.immutable.TreeSet
import scala.util.parsing.input.Reader
/**
* @author Rinat Gareev
*
*/
class NPRecognizer extends JCasAnnotator_ImplBase with NPParsers {
override def process(jCas: JCas): Unit = {
JCasUtil.select(jCas, classOf[Sentence]).foreach(processSpan(jCas, _))
}
private def processSpan(jCas: JCas, span: Annotation) {
val spanWords = JCasUtil.selectCovered(jCas, classOf[Word], span).toList
if (spanWords.nonEmpty)
parseFrom(new AnnotationSpan(spanWords).reader)
}
private def parseFrom(reader: Reader[Word]): Unit =
if (!reader.atEnd)
np(reader) match {
case Success(np, rest) =>
addToCas(np)
parseFrom(rest)
case Failure(_, _) =>
// start from next anno
parseFrom(reader.rest)
}
private def addToCas(np: NP) {
val npAnno = createNPAnnotation(np)
npAnno.addToIndexes()
}
private def createNPAnnotation(np: NP): NounPhrase = {
val head = np.noun
val jCas = head.getCAS.getJCas
val phrase = new NounPhrase(jCas)
phrase.setBegin(head.getWord.getBegin)
phrase.setEnd(head.getWord.getEnd)
phrase.setHead(head)
np.prepOpt match {
case Some(prep) => phrase.setPreposition(prep)
case None =>
}
np.particleOpt match {
case Some(particle) => phrase.setParticle(particle)
case None =>
}
val depWordsFsArray = new FSArray(jCas, np.depWords.size)
FSCollectionFactory.fillArrayFS(depWordsFsArray, TreeSet.empty[Wordform](wfOffsetComp) ++ np.depWords)
phrase.setDependentWords(depWordsFsArray)
// TODO low priority: add a sanity check to avoid infinite recursion
val depAnnoQ = np.depNPs.map(createNPAnnotation)
val depNPsFsArray = new FSArray(jCas, depAnnoQ.size)
FSCollectionFactory.fillArrayFS(depNPsFsArray, depAnnoQ)
phrase.setDependentPhrases(depNPsFsArray)
phrase
}
} | Denis220795/Textokit | Textokit.PhraseRecognizer/src/main/scala/com/textocat/textokit/phrrecog/NPRecognizer.scala | Scala | apache-2.0 | 3,169 |
package chapter16
import scala.xml.Elem
object Exercise8 extends App {
def toMap(dl: Elem): Map[String, String] = {
val terms = (dl \\ "dt").map(_.text)
val descs = (dl \\ "dd").map(_.text)
(terms zip descs).toMap
}
val dl = <dl>
<dt>A</dt>
<dd>1</dd>
<dt>B</dt>
<dd>2</dd>
</dl>
println(toMap(dl))
}
| vsuharnikov/books-exercises | scala/scala-for-the-impatient/src/main/scala/chapter16/Exercise8.scala | Scala | mit | 354 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
// DO NOT EDIT, CHANGES WILL BE LOST
// This auto-generated code can be modified in "project/GenerateAnyVals.scala".
// Afterwards, running "sbt generateSources" regenerates this source file.
package scala
/** `Long`, a 64-bit signed integer (equivalent to Java's `long` primitive type) is a
* subtype of [[scala.AnyVal]]. Instances of `Long` are not
* represented by an object in the underlying runtime system.
*
* There is an implicit conversion from [[scala.Long]] => [[scala.runtime.RichLong]]
* which provides useful non-primitive operations.
*/
final abstract class Long private extends AnyVal {
def toByte: Byte
def toShort: Short
def toChar: Char
def toInt: Int
def toLong: Long
def toFloat: Float
def toDouble: Double
/**
* Returns the bitwise negation of this value.
* @example {{{
* ~5 == -6
* // in binary: ~00000101 ==
* // 11111010
* }}}
*/
def unary_~ : Long
/** Returns this value, unmodified. */
def unary_+ : Long
/** Returns the negation of this value. */
def unary_- : Long
@deprecated("Adding a number and a String is deprecated. Use the string interpolation `s\"$num$str\"`", "2.13.0")
def +(x: String): String
/**
* Returns this value bit-shifted left by the specified number of bits,
* filling in the new right bits with zeroes.
* @example {{{ 6 << 3 == 48 // in binary: 0110 << 3 == 0110000 }}}
*/
def <<(x: Int): Long
/**
* Returns this value bit-shifted left by the specified number of bits,
* filling in the new right bits with zeroes.
* @example {{{ 6 << 3 == 48 // in binary: 0110 << 3 == 0110000 }}}
*/
def <<(x: Long): Long
/**
* Returns this value bit-shifted right by the specified number of bits,
* filling the new left bits with zeroes.
* @example {{{ 21 >>> 3 == 2 // in binary: 010101 >>> 3 == 010 }}}
* @example {{{
* -21 >>> 3 == 536870909
* // in binary: 11111111 11111111 11111111 11101011 >>> 3 ==
* // 00011111 11111111 11111111 11111101
* }}}
*/
def >>>(x: Int): Long
/**
* Returns this value bit-shifted right by the specified number of bits,
* filling the new left bits with zeroes.
* @example {{{ 21 >>> 3 == 2 // in binary: 010101 >>> 3 == 010 }}}
* @example {{{
* -21 >>> 3 == 536870909
* // in binary: 11111111 11111111 11111111 11101011 >>> 3 ==
* // 00011111 11111111 11111111 11111101
* }}}
*/
def >>>(x: Long): Long
/**
* Returns this value bit-shifted right by the specified number of bits,
* filling in the left bits with the same value as the left-most bit of this.
* The effect of this is to retain the sign of the value.
* @example {{{
* -21 >> 3 == -3
* // in binary: 11111111 11111111 11111111 11101011 >> 3 ==
* // 11111111 11111111 11111111 11111101
* }}}
*/
def >>(x: Int): Long
/**
* Returns this value bit-shifted right by the specified number of bits,
* filling in the left bits with the same value as the left-most bit of this.
* The effect of this is to retain the sign of the value.
* @example {{{
* -21 >> 3 == -3
* // in binary: 11111111 11111111 11111111 11101011 >> 3 ==
* // 11111111 11111111 11111111 11111101
* }}}
*/
def >>(x: Long): Long
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Byte): Boolean
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Short): Boolean
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Char): Boolean
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Int): Boolean
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Long): Boolean
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Float): Boolean
/** Returns `true` if this value is equal to x, `false` otherwise. */
def ==(x: Double): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Byte): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Short): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Char): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Int): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Long): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Float): Boolean
/** Returns `true` if this value is not equal to x, `false` otherwise. */
def !=(x: Double): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Byte): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Short): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Char): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Int): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Long): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Float): Boolean
/** Returns `true` if this value is less than x, `false` otherwise. */
def <(x: Double): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Byte): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Short): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Char): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Int): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Long): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Float): Boolean
/** Returns `true` if this value is less than or equal to x, `false` otherwise. */
def <=(x: Double): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Byte): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Short): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Char): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Int): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Long): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Float): Boolean
/** Returns `true` if this value is greater than x, `false` otherwise. */
def >(x: Double): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Byte): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Short): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Char): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Int): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Long): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Float): Boolean
/** Returns `true` if this value is greater than or equal to x, `false` otherwise. */
def >=(x: Double): Boolean
/**
* Returns the bitwise OR of this value and `x`.
* @example {{{
* (0xf0 | 0xaa) == 0xfa
* // in binary: 11110000
* // | 10101010
* // --------
* // 11111010
* }}}
*/
def |(x: Byte): Long
/**
* Returns the bitwise OR of this value and `x`.
* @example {{{
* (0xf0 | 0xaa) == 0xfa
* // in binary: 11110000
* // | 10101010
* // --------
* // 11111010
* }}}
*/
def |(x: Short): Long
/**
* Returns the bitwise OR of this value and `x`.
* @example {{{
* (0xf0 | 0xaa) == 0xfa
* // in binary: 11110000
* // | 10101010
* // --------
* // 11111010
* }}}
*/
def |(x: Char): Long
/**
* Returns the bitwise OR of this value and `x`.
* @example {{{
* (0xf0 | 0xaa) == 0xfa
* // in binary: 11110000
* // | 10101010
* // --------
* // 11111010
* }}}
*/
def |(x: Int): Long
/**
* Returns the bitwise OR of this value and `x`.
* @example {{{
* (0xf0 | 0xaa) == 0xfa
* // in binary: 11110000
* // | 10101010
* // --------
* // 11111010
* }}}
*/
def |(x: Long): Long
/**
* Returns the bitwise AND of this value and `x`.
* @example {{{
* (0xf0 & 0xaa) == 0xa0
* // in binary: 11110000
* // & 10101010
* // --------
* // 10100000
* }}}
*/
def &(x: Byte): Long
/**
* Returns the bitwise AND of this value and `x`.
* @example {{{
* (0xf0 & 0xaa) == 0xa0
* // in binary: 11110000
* // & 10101010
* // --------
* // 10100000
* }}}
*/
def &(x: Short): Long
/**
* Returns the bitwise AND of this value and `x`.
* @example {{{
* (0xf0 & 0xaa) == 0xa0
* // in binary: 11110000
* // & 10101010
* // --------
* // 10100000
* }}}
*/
def &(x: Char): Long
/**
* Returns the bitwise AND of this value and `x`.
* @example {{{
* (0xf0 & 0xaa) == 0xa0
* // in binary: 11110000
* // & 10101010
* // --------
* // 10100000
* }}}
*/
def &(x: Int): Long
/**
* Returns the bitwise AND of this value and `x`.
* @example {{{
* (0xf0 & 0xaa) == 0xa0
* // in binary: 11110000
* // & 10101010
* // --------
* // 10100000
* }}}
*/
def &(x: Long): Long
/**
* Returns the bitwise XOR of this value and `x`.
* @example {{{
* (0xf0 ^ 0xaa) == 0x5a
* // in binary: 11110000
* // ^ 10101010
* // --------
* // 01011010
* }}}
*/
def ^(x: Byte): Long
/**
* Returns the bitwise XOR of this value and `x`.
* @example {{{
* (0xf0 ^ 0xaa) == 0x5a
* // in binary: 11110000
* // ^ 10101010
* // --------
* // 01011010
* }}}
*/
def ^(x: Short): Long
/**
* Returns the bitwise XOR of this value and `x`.
* @example {{{
* (0xf0 ^ 0xaa) == 0x5a
* // in binary: 11110000
* // ^ 10101010
* // --------
* // 01011010
* }}}
*/
def ^(x: Char): Long
/**
* Returns the bitwise XOR of this value and `x`.
* @example {{{
* (0xf0 ^ 0xaa) == 0x5a
* // in binary: 11110000
* // ^ 10101010
* // --------
* // 01011010
* }}}
*/
def ^(x: Int): Long
/**
* Returns the bitwise XOR of this value and `x`.
* @example {{{
* (0xf0 ^ 0xaa) == 0x5a
* // in binary: 11110000
* // ^ 10101010
* // --------
* // 01011010
* }}}
*/
def ^(x: Long): Long
/** Returns the sum of this value and `x`. */
def +(x: Byte): Long
/** Returns the sum of this value and `x`. */
def +(x: Short): Long
/** Returns the sum of this value and `x`. */
def +(x: Char): Long
/** Returns the sum of this value and `x`. */
def +(x: Int): Long
/** Returns the sum of this value and `x`. */
def +(x: Long): Long
/** Returns the sum of this value and `x`. */
def +(x: Float): Float
/** Returns the sum of this value and `x`. */
def +(x: Double): Double
/** Returns the difference of this value and `x`. */
def -(x: Byte): Long
/** Returns the difference of this value and `x`. */
def -(x: Short): Long
/** Returns the difference of this value and `x`. */
def -(x: Char): Long
/** Returns the difference of this value and `x`. */
def -(x: Int): Long
/** Returns the difference of this value and `x`. */
def -(x: Long): Long
/** Returns the difference of this value and `x`. */
def -(x: Float): Float
/** Returns the difference of this value and `x`. */
def -(x: Double): Double
/** Returns the product of this value and `x`. */
def *(x: Byte): Long
/** Returns the product of this value and `x`. */
def *(x: Short): Long
/** Returns the product of this value and `x`. */
def *(x: Char): Long
/** Returns the product of this value and `x`. */
def *(x: Int): Long
/** Returns the product of this value and `x`. */
def *(x: Long): Long
/** Returns the product of this value and `x`. */
def *(x: Float): Float
/** Returns the product of this value and `x`. */
def *(x: Double): Double
/** Returns the quotient of this value and `x`. */
def /(x: Byte): Long
/** Returns the quotient of this value and `x`. */
def /(x: Short): Long
/** Returns the quotient of this value and `x`. */
def /(x: Char): Long
/** Returns the quotient of this value and `x`. */
def /(x: Int): Long
/** Returns the quotient of this value and `x`. */
def /(x: Long): Long
/** Returns the quotient of this value and `x`. */
def /(x: Float): Float
/** Returns the quotient of this value and `x`. */
def /(x: Double): Double
/** Returns the remainder of the division of this value by `x`. */
def %(x: Byte): Long
/** Returns the remainder of the division of this value by `x`. */
def %(x: Short): Long
/** Returns the remainder of the division of this value by `x`. */
def %(x: Char): Long
/** Returns the remainder of the division of this value by `x`. */
def %(x: Int): Long
/** Returns the remainder of the division of this value by `x`. */
def %(x: Long): Long
/** Returns the remainder of the division of this value by `x`. */
def %(x: Float): Float
/** Returns the remainder of the division of this value by `x`. */
def %(x: Double): Double
// Provide a more specific return type for Scaladoc
override def getClass(): Class[Long] = ???
}
object Long extends AnyValCompanion {
/** The smallest value representable as a Long. */
final val MinValue = java.lang.Long.MIN_VALUE
/** The largest value representable as a Long. */
final val MaxValue = java.lang.Long.MAX_VALUE
/** Transform a value type into a boxed reference type.
*
* Runtime implementation determined by `scala.runtime.BoxesRunTime.boxToLong`. See [[https://github.com/scala/scala src/library/scala/runtime/BoxesRunTime.java]].
*
* @param x the Long to be boxed
* @return a java.lang.Long offering `x` as its underlying value.
*/
def box(x: Long): java.lang.Long = ???
/** Transform a boxed type into a value type. Note that this
* method is not typesafe: it accepts any Object, but will throw
* an exception if the argument is not a java.lang.Long.
*
* Runtime implementation determined by `scala.runtime.BoxesRunTime.unboxToLong`. See [[https://github.com/scala/scala src/library/scala/runtime/BoxesRunTime.java]].
*
* @param x the java.lang.Long to be unboxed.
* @throws ClassCastException if the argument is not a java.lang.Long
* @return the Long resulting from calling longValue() on `x`
*/
def unbox(x: java.lang.Object): Long = ???
/** The String representation of the scala.Long companion object. */
override def toString = "object scala.Long"
/** Language mandated coercions from Long to "wider" types. */
import scala.language.implicitConversions
@deprecated("Implicit conversion from Long to Float is dangerous because it loses precision. Write `.toFloat` instead.", "2.13.1")
implicit def long2float(x: Long): Float = x.toFloat
@deprecated("Implicit conversion from Long to Double is dangerous because it loses precision. Write `.toDouble` instead.", "2.13.1")
implicit def long2double(x: Long): Double = x.toDouble
}
| scala/scala | src/library/scala/Long.scala | Scala | apache-2.0 | 16,552 |
/**
* Copyright 2016, deepsense.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.deepsense.deeplang.doperations.readwritedataframe
import io.deepsense.deeplang.ExecutionContext
object FilePathFromLibraryPath {
def apply(path: FilePath)(implicit ctx: ExecutionContext): FilePath = {
require(path.fileScheme == FileScheme.Library)
val libraryPath = ctx.libraryPath + "/" + path.pathWithoutScheme
FilePath(FileScheme.File, libraryPath)
}
}
| deepsense-io/seahorse-workflow-executor | deeplang/src/main/scala/io/deepsense/deeplang/doperations/readwritedataframe/FilePathFromLibraryPath.scala | Scala | apache-2.0 | 986 |
package dotty.tools.dotc
package transform
import dotty.tools.dotc.transform.TreeTransforms.{TransformerInfo, TreeTransform, TreeTransformer}
import dotty.tools.dotc.ast.{Trees, tpd}
import scala.collection.{ mutable, immutable }
import ValueClasses._
import scala.annotation.tailrec
import core._
import Types._, Contexts._, Constants._, Names._, NameOps._, Flags._, DenotTransformers._
import SymDenotations._, Symbols._, StdNames._, Annotations._, Trees._, Scopes._, Denotations._
import util.Positions._
import Decorators._
import Symbols._, TypeUtils._
/** This class performs the following functions:
*
* (1) Adds super accessors for all super calls that either
* appear in a trait or have as a target a member of some outer class.
*
* (2) Adds protected accessors if the access to the protected member happens
* in a class which is not a subclass of the member's owner.
*
* It also checks that:
*
* (1) Symbols accessed from super are not abstract, or are overridden by
* an abstract override.
*
* (2) If a symbol accessed from super is defined in a real class (not a trait),
* there are no abstract members which override this member in Java's rules
* (see SI-4989; such an access would lead to illegal bytecode)
*
* (3) Super calls do not go to some synthetic members of Any (see isDisallowed)
*
* (4) Super calls do not go to synthetic field accessors
*/
class SuperAccessors(thisTransformer: DenotTransformer) {
import tpd._
/** Some parts of trees will get a new owner in subsequent phases.
* These are value class methods, which will become extension methods.
* (By-name arguments used to be included also, but these
* don't get a new class anymore, they are just wrapped in a new method).
*
* These regions will have to be treated specially for the purpose
* of adding accessors. For instance, super calls from these regions
* always have to go through an accessor.
*
* The `invalidEnclClass` field, if different from NoSymbol,
* contains the symbol that is not a valid owner.
*/
private var invalidEnclClass: Symbol = NoSymbol
private def withInvalidCurrentClass[A](trans: => A)(implicit ctx: Context): A = {
val saved = invalidEnclClass
invalidEnclClass = ctx.owner
try trans
finally invalidEnclClass = saved
}
private def validCurrentClass(implicit ctx: Context): Boolean =
ctx.owner.enclosingClass != invalidEnclClass
/** List buffers for new accessor definitions, indexed by class */
private val accDefs = mutable.Map[Symbol, mutable.ListBuffer[Tree]]()
/** A super accessor call corresponding to `sel` */
private def superAccessorCall(sel: Select)(implicit ctx: Context) = {
val Select(qual, name) = sel
val sym = sel.symbol
val clazz = qual.symbol.asClass
var supername = name.superName
if (clazz is Trait) supername = supername.expandedName(clazz)
val superAcc = clazz.info.decl(supername).suchThat(_.signature == sym.signature).symbol orElse {
ctx.debuglog(s"add super acc ${sym.showLocated} to $clazz")
val deferredOrPrivate = if (clazz is Trait) Deferred | ExpandedName else Private
val acc = ctx.newSymbol(
clazz, supername, SuperAccessor | Artifact | Method | deferredOrPrivate,
sel.tpe.widenSingleton.ensureMethodic, coord = sym.coord).enteredAfter(thisTransformer)
// Diagnostic for SI-7091
if (!accDefs.contains(clazz))
ctx.error(s"Internal error: unable to store accessor definition in ${clazz}. clazz.hasPackageFlag=${clazz is Package}. Accessor required for ${sel} (${sel.show})", sel.pos)
else accDefs(clazz) += DefDef(acc, EmptyTree)
acc
}
This(clazz).select(superAcc).withPos(sel.pos)
}
/** Check selection `super.f` for conforming to rules. If necessary,
* replace by a super accessor call.
*/
private def transformSuperSelect(sel: Select)(implicit ctx: Context): Tree = {
val Select(sup @ Super(_, mix), name) = sel
val sym = sel.symbol
assert(sup.symbol.exists, s"missing symbol in $sel: ${sup.tpe}")
val clazz = sup.symbol.asClass
if ((sym.isTerm) && !(sym is Method) || (sym is Accessor))
ctx.error(s"super may be not be used on ${sym.underlyingSymbol}", sel.pos)
else if (isDisallowed(sym))
ctx.error(s"super not allowed here: use this.${sel.name.decode} instead", sel.pos)
else if (sym is Deferred) {
val member = sym.overridingSymbol(clazz)
if (mix != tpnme.EMPTY ||
!member.exists ||
!((member is AbsOverride) && member.isIncompleteIn(clazz)))
ctx.error(
i"${sym.showLocated} is accessed from super. It may not be abstract unless it is overridden by a member declared `abstract' and `override'",
sel.pos)
else println(i"ok super $sel ${sym.showLocated} $member $clazz ${member.isIncompleteIn(clazz)}")
}
else if (mix == tpnme.EMPTY && !(sym.owner is Trait))
// SI-4989 Check if an intermediate class between `clazz` and `sym.owner` redeclares the method as abstract.
for (intermediateClass <- clazz.info.baseClasses.tail.takeWhile(_ != sym.owner)) {
val overriding = sym.overridingSymbol(intermediateClass)
if ((overriding is (Deferred, butNot = AbsOverride)) && !(overriding.owner is Trait))
ctx.error(
s"${sym.showLocated} cannot be directly accessed from ${clazz} because ${overriding.owner} redeclares it as abstract",
sel.pos)
}
if (name.isTermName && mix == tpnme.EMPTY &&
((clazz is Trait) || clazz != ctx.owner.enclosingClass || !validCurrentClass))
superAccessorCall(sel)(ctx.withPhase(thisTransformer.next))
else sel
}
/** Disallow some super.XX calls targeting Any methods which would
* otherwise lead to either a compiler crash or runtime failure.
*/
private def isDisallowed(sym: Symbol)(implicit ctx: Context) = {
val d = defn
import d._
(sym eq Any_isInstanceOf) ||
(sym eq Any_asInstanceOf) ||
(sym eq Any_==) ||
(sym eq Any_!=) ||
(sym eq Any_##)
}
/** Replace `sel` (or `sel[targs]` if `targs` is nonempty) with a protected accessor
* call, if necessary.
*/
private def ensureProtectedAccessOK(sel: Select, targs: List[Tree])(implicit ctx: Context) = {
val sym = sel.symbol
if (sym.exists && needsProtectedAccessor(sym, sel.pos)) {
ctx.debuglog("Adding protected accessor for " + sel)
protectedAccessorCall(sel, targs)
} else sel
}
/** Add a protected accessor, if needed, and return a tree that calls
* the accessor and returns the same member. The result is already
* typed.
*/
private def protectedAccessorCall(sel: Select, targs: List[Tree])(implicit ctx: Context): Tree = {
val Select(qual, _) = sel
val sym = sel.symbol.asTerm
val clazz = hostForAccessorOf(sym, currentClass)
assert(clazz.exists, sym)
ctx.debuglog("Decided for host class: " + clazz)
val accName = sym.name.protectedAccessorName
def isThisType(tpe: Type): Boolean = tpe match {
case tpe: ThisType => !tpe.cls.is(PackageClass)
case tpe: TypeProxy => isThisType(tpe.underlying)
case _ => false
}
// if the result type depends on the this type of an enclosing class, the accessor
// has to take an object of exactly this type, otherwise it's more general
val receiverType =
if (isThisType(sym.info.finalResultType)) clazz.thisType
else clazz.classInfo.selfType
val accType = {
def accTypeOf(tpe: Type): Type = tpe match {
case tpe: PolyType =>
tpe.derivedPolyType(tpe.paramNames, tpe.paramBounds, accTypeOf(tpe.resultType))
case _ =>
MethodType(receiverType :: Nil)(mt => tpe.substThis(sym.owner.asClass, MethodParam(mt, 0)))
}
accTypeOf(sym.info)
}
val protectedAccessor = clazz.info.decl(accName).suchThat(_.signature == accType.signature).symbol orElse {
val newAcc = ctx.newSymbol(
clazz, accName, Artifact, accType, coord = sel.pos).enteredAfter(thisTransformer)
val code = polyDefDef(newAcc, trefs => vrefss => {
val (receiver :: _) :: tail = vrefss
val base = receiver.select(sym).appliedToTypes(trefs)
(base /: vrefss)(Apply(_, _))
})
ctx.debuglog("created protected accessor: " + code)
accDefs(clazz) += code
newAcc
}
val res = This(clazz)
.select(protectedAccessor)
.appliedToTypeTrees(targs)
.appliedTo(qual)
.withPos(sel.pos)
ctx.debuglog(s"Replaced $sel with $res")
res
}
def isProtectedAccessor(tree: Tree)(implicit ctx: Context): Boolean = tree match {
case Apply(TypeApply(Select(_, name), _), qual :: Nil) => name.isProtectedAccessorName
case _ => false
}
/** Add a protected accessor, if needed, and return a tree that calls
* the accessor and returns the same member. The result is already
* typed.
*/
private def protectedAccessor(tree: Select, targs: List[Tree])(implicit ctx: Context): Tree = {
val Select(qual, _) = tree
val sym = tree.symbol.asTerm
val clazz = hostForAccessorOf(sym, currentClass)
assert(clazz.exists, sym)
ctx.debuglog("Decided for host class: " + clazz)
val accName = sym.name.protectedAccessorName
// if the result type depends on the this type of an enclosing class, the accessor
// has to take an object of exactly this type, otherwise it's more general
val receiverType =
if (isThisType(sym.info.finalResultType)) clazz.thisType
else clazz.classInfo.selfType
def accTypeOf(tpe: Type): Type = tpe match {
case tpe: PolyType =>
tpe.derivedPolyType(tpe.paramNames, tpe.paramBounds, accTypeOf(tpe.resultType))
case _ =>
MethodType(receiverType :: Nil)(mt => tpe.substThis(sym.owner.asClass, MethodParam(mt, 0)))
}
val accType = accTypeOf(sym.info)
val protectedAccessor = clazz.info.decl(accName).suchThat(_.signature == accType.signature).symbol orElse {
val newAcc = ctx.newSymbol(
clazz, accName, Artifact, accType, coord = tree.pos).enteredAfter(thisTransformer)
val code = polyDefDef(newAcc, trefs => vrefss => {
val (receiver :: _) :: tail = vrefss
val base = receiver.select(sym).appliedToTypes(trefs)
(base /: vrefss)(Apply(_, _))
})
ctx.debuglog("created protected accessor: " + code)
accDefs(clazz) += code
newAcc
}
val res = This(clazz)
.select(protectedAccessor)
.appliedToTypeTrees(targs)
.appliedTo(qual)
.withPos(tree.pos)
ctx.debuglog(s"Replaced $tree with $res")
res
}
/** Add an accessor for field, if needed, and return a selection tree for it .
* The result is not typed.
*/
private def protectedSetter(tree: Select)(implicit ctx: Context): Tree = {
val field = tree.symbol.asTerm
val clazz = hostForAccessorOf(field, currentClass)
assert(clazz.exists, field)
ctx.debuglog("Decided for host class: " + clazz)
val accName = field.name.protectedSetterName
val accType = MethodType(clazz.classInfo.selfType :: field.info :: Nil, defn.UnitType)
val protectedAccessor = clazz.info.decl(accName).symbol orElse {
val newAcc = ctx.newSymbol(
clazz, accName, Artifact, accType, coord = tree.pos).enteredAfter(thisTransformer)
val code = DefDef(newAcc, vrefss => {
val (receiver :: value :: Nil) :: Nil = vrefss
Assign(receiver.select(field), value).withPos(tree.pos)
})
ctx.debuglog("created protected setter: " + code)
accDefs(clazz) += code
newAcc
}
This(clazz).select(protectedAccessor).withPos(tree.pos)
}
/** Does `sym` need an accessor when accessed from `currentClass`?
* A special case arises for classes with explicit self-types. If the
* self type is a Java class, and a protected accessor is needed, we issue
* an error. If the self type is a Scala class, we don't add an accessor.
* An accessor is not needed if the access boundary is larger than the
* enclosing package, since that translates to 'public' on the host sys.
* (as Java has no real package nesting).
*
* If the access happens inside a 'trait', access is more problematic since
* the implementation code is moved to an '$class' class which does not
* inherit anything. Since we can't (yet) add accessors for 'required'
* classes, this has to be signaled as error.
* FIXME Need to better understand this logic
*/
private def needsProtectedAccessor(sym: Symbol, pos: Position)(implicit ctx: Context): Boolean = {
val clazz = currentClass
val host = hostForAccessorOf(sym, clazz)
val selfType = host.classInfo.selfType
def accessibleThroughSubclassing =
validCurrentClass && (selfType <:< sym.owner.typeRef) && !clazz.is(Trait)
val isCandidate = (
sym.is(Protected)
&& sym.is(JavaDefined)
&& !sym.effectiveOwner.is(Package)
&& !accessibleThroughSubclassing
&& (sym.enclosingPackageClass != currentClass.enclosingPackageClass)
&& (sym.enclosingPackageClass == sym.accessBoundary(sym.enclosingPackageClass))
)
def isSelfType = !(host.typeRef <:< selfType) && {
if (selfType.typeSymbol.is(JavaDefined))
ctx.restrictionError(s"cannot accesses protected $sym from within $clazz with self type $selfType", pos)
true
}
def isJavaProtected = host.is(Trait) && sym.is(JavaDefined) && {
ctx.restrictionError(
s"""$clazz accesses protected $sym inside a concrete trait method.
|Add an accessor in a class extending ${sym.enclosingClass} as a workaround.""".stripMargin,
pos
)
true
}
isCandidate && !host.is(Package) && !isSelfType && !isJavaProtected
}
/** Return the innermost enclosing class C of referencingClass for which either
* of the following holds:
* - C is a subclass of sym.owner or
* - C is declared in the same package as sym's owner
*/
private def hostForAccessorOf(sym: Symbol, referencingClass: ClassSymbol)(implicit ctx: Context): ClassSymbol =
if (referencingClass.derivesFrom(sym.owner)
|| referencingClass.classInfo.selfType <:< sym.owner.typeRef
|| referencingClass.enclosingPackageClass == sym.owner.enclosingPackageClass) {
assert(referencingClass.isClass, referencingClass)
referencingClass
}
else if (referencingClass.owner.enclosingClass.exists)
hostForAccessorOf(sym, referencingClass.owner.enclosingClass.asClass)
else
referencingClass
/** Is 'tpe' a ThisType, or a type proxy with a ThisType as transitively underlying type? */
private def isThisType(tpe: Type)(implicit ctx: Context): Boolean = tpe match {
case tpe: ThisType => !tpe.cls.is(PackageClass)
case tpe: TypeProxy => isThisType(tpe.underlying)
case _ => false
}
/** Transform select node, adding super and protected accessors as needed */
def transformSelect(tree: Tree, targs: List[Tree])(implicit ctx: Context) = {
val sel @ Select(qual, name) = tree
val sym = sel.symbol
qual match {
case _: This =>
/*
* A trait which extends a class and accesses a protected member
* of that class cannot implement the necessary accessor method
* because its implementation is in an implementation class (e.g.
* Foo$class) which inherits nothing, and jvm access restrictions
* require the call site to be in an actual subclass. So non-trait
* classes inspect their ancestors for any such situations and
* generate the accessors. See SI-2296.
*/
// FIXME (from scalac's SuperAccessors)
// - this should be unified with needsProtectedAccessor, but some
// subtlety which presently eludes me is foiling my attempts.
val shouldEnsureAccessor = (
(currentClass is Trait)
&& (sym is Protected)
&& sym.enclosingClass != currentClass
&& !(sym.owner is PackageClass) // SI-7091 no accessor needed package owned (ie, top level) symbols
&& !(sym.owner is Trait)
&& sym.owner.enclosingPackageClass != currentClass.enclosingPackageClass
&& qual.symbol.info.member(sym.name).exists
&& !needsProtectedAccessor(sym, sel.pos))
if (shouldEnsureAccessor) {
ctx.log("Ensuring accessor for call to protected " + sym.showLocated + " from " + currentClass)
superAccessorCall(sel)
} else
ensureProtectedAccessOK(sel, targs)
case Super(_, mix) =>
transformSuperSelect(sel)
case _ =>
ensureProtectedAccessOK(sel, targs)
}
}
/** Transform assignment, adding a protected setter if needed */
def transformAssign(tree: Tree)(implicit ctx: Context) = {
val Assign(lhs @ Select(qual, name), rhs) = tree
if ((lhs.symbol is Mutable) &&
(lhs.symbol is JavaDefined) &&
needsProtectedAccessor(lhs.symbol, tree.pos)) {
ctx.debuglog("Adding protected setter for " + tree)
val setter = protectedSetter(lhs)
ctx.debuglog("Replaced " + tree + " with " + setter)
setter.appliedTo(qual, rhs)
}
else tree
}
/** Wrap template to template transform `op` with needed initialization and finalization */
def wrapTemplate(tree: Template)(op: Template => Template)(implicit ctx: Context) = {
accDefs(currentClass) = new mutable.ListBuffer[Tree]
val impl = op(tree)
val accessors = accDefs.remove(currentClass).get
if (accessors.isEmpty) impl
else {
val (params, rest) = impl.body span {
case td: TypeDef => !td.isClassDef
case vd: ValOrDefDef => vd.symbol.flags is ParamAccessor
case _ => false
}
cpy.Template(impl)(body = params ++ accessors ++ rest)
}
}
/** Wrap `DefDef` producing operation `op`, potentially setting `invalidClass` info */
def wrapDefDef(ddef: DefDef)(op: => DefDef)(implicit ctx: Context) =
if (isMethodWithExtension(ddef.symbol)) withInvalidCurrentClass(op) else op
}
| yusuke2255/dotty | src/dotty/tools/dotc/transform/SuperAccessors.scala | Scala | bsd-3-clause | 18,889 |
package cromwell.util
import java.sql.SQLTransientException
import akka.actor.ActorSystem
import cromwell.core.retry.{Retry, SimpleExponentialBackoff}
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps
object DatabaseUtil {
private def isTransient(throwable: Throwable): Boolean = throwable match {
case _: SQLTransientException => true
case _ => false
}
def withRetry[A](f: () => Future[A])(implicit actorSystem: ActorSystem): Future[A] = {
val RetryBackoff = SimpleExponentialBackoff(50 millis, 1 seconds, 1D)
Retry.withRetry(f, maxRetries = Option(10), backoff = RetryBackoff, isTransient = isTransient)
}
}
| ohsu-comp-bio/cromwell | core/src/main/scala/cromwell/util/DatabaseUtil.scala | Scala | bsd-3-clause | 688 |
// Copyright (c) 2013-2020 Rob Norris and Contributors
// This software is licensed under the MIT License (MIT).
// For more information see LICENSE or https://opensource.org/licenses/MIT
package doobie.postgres
import doobie.Meta
import doobie.enumerated.{JdbcType => JT}
import doobie.util.meta.MetaConstructors
import java.time.{OffsetDateTime, ZoneOffset} // Using database JDBC driver native support
/**
* Instances for JSR-310 date time types.
*
* Implementation is based on https://jdbc.postgresql.org/documentation/head/8-date-time.html, using
* native support for Postgres JDBC driver.
*/
trait JavaTimeInstances extends MetaConstructors {
/**
* This type should map to TIMESTAMP WITH TIMEZONE (TIMESTAMPTZ)
* When writing to the database, the same instant is preserved if your target column is of type TIMESTAMPTZ
* (The JDBC driver works out the timezone conversion for you). Note that since offset information is not stored in
* the database column, retrieving the same value will yield the same instant in time, but with offset = 0 (UTC)
*/
implicit val JavaTimeOffsetDateTimeMeta: Meta[java.time.OffsetDateTime] =
Basic.one[java.time.OffsetDateTime](
JT.Timestamp,
List(JT.Char, JT.VarChar, JT.LongVarChar, JT.Date, JT.Time),
_.getObject(_, classOf[java.time.OffsetDateTime]), _.setObject(_, _), _.updateObject(_, _))
/**
* This type should map to TIMESTAMP WITH TIMEZONE (TIMESTAMPTZ)
*/
implicit val JavaTimeInstantMeta: Meta[java.time.Instant] =
JavaTimeOffsetDateTimeMeta.timap(_.toInstant)(OffsetDateTime.ofInstant(_, ZoneOffset.UTC))
/**
* This type should map to TIMESTAMP WITH TIMEZONE (TIMESTAMPTZ)
* When writing to the database, the same instant is preserved if your target column is of type TIMESTAMPTZ
* (The JDBC driver works out the timezone conversion for you). Note that since zone information is not stored in
* the database column, retrieving the same value will yield the same instant in time, but in UTC.
*/
implicit val JavaTimeZonedDateTimeMeta: Meta[java.time.ZonedDateTime] =
JavaTimeOffsetDateTimeMeta.timap(_.atZoneSameInstant(ZoneOffset.UTC))(_.toOffsetDateTime)
/**
* This type should map to TIMESTAMP
*/
implicit val JavaTimeLocalDateTimeMeta: Meta[java.time.LocalDateTime] =
Basic.one[java.time.LocalDateTime](
JT.Timestamp,
List(JT.Char, JT.VarChar, JT.LongVarChar, JT.Date, JT.Time),
_.getObject(_, classOf[java.time.LocalDateTime]), _.setObject(_, _), _.updateObject(_, _))
/**
* This type should map to DATE
*/
implicit val JavaTimeLocalDateMeta: Meta[java.time.LocalDate] =
Basic.one[java.time.LocalDate](
JT.Date,
List(JT.Char, JT.VarChar, JT.LongVarChar, JT.Timestamp),
_.getObject(_, classOf[java.time.LocalDate]), _.setObject(_, _), _.updateObject(_, _))
/**
* This type should map to TIME
*/
implicit val JavaTimeLocalTimeMeta: Meta[java.time.LocalTime] =
Basic.one[java.time.LocalTime](
JT.Time,
List(JT.Char, JT.VarChar, JT.LongVarChar, JT.Timestamp),
_.getObject(_, classOf[java.time.LocalTime]), _.setObject(_, _), _.updateObject(_, _))
}
| tpolecat/doobie | modules/postgres/src/main/scala/doobie/postgres/JavaTimeInstances.scala | Scala | mit | 3,188 |
package com.monsanto.arch.cloudformation.model.resource
import com.monsanto.arch.cloudformation.model.{ ResourceRef, Template, Token }
import org.scalatest.{ FunSpec, Matchers }
import spray.json._
class CodeCommit_UT extends FunSpec with Matchers {
val repo = `AWS::CodeCommit::Repository`(
name = "RepoFoo",
RepositoryDescription = Some(""),
RepositoryName = "RepoBar",
Triggers = Some(Seq(
CodeCommitTrigger(
Branches = Some(Seq("foo")),
CustomData = Some("bar"),
DestinationArn = Some("arn::::baz"),
Events = Some(Seq(
CodeCommitEvent.updateReference,
CodeCommitEvent.deleteReference
)),
Name = "BarTrigger"
)
))
)
describe("UsagePlan"){
it ("should serialize as expected") {
val expectedJson =
"""
|{
| "Resources": {
| "RepoFoo": {
| "Properties": {
| "RepositoryDescription": "",
| "RepositoryName": "RepoBar",
| "Triggers": [
| {
| "Branches": [
| "foo"
| ],
| "CustomData": "bar",
| "DestinationArn": "arn::::baz",
| "Events": [
| "updateReference",
| "deleteReference"
| ],
| "Name": "BarTrigger"
| }
| ]
| },
| "Type": "AWS::CodeCommit::Repository"
| }
| }
|}
""".stripMargin.parseJson
Template.fromResource(repo).toJson should be (expectedJson)
}
}
}
| MonsantoCo/cloudformation-template-generator | src/test/scala/com/monsanto/arch/cloudformation/model/resource/CodeCommit_UT.scala | Scala | bsd-3-clause | 1,745 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.data.hibernate.naming
import org.beangle.commons.config.Resources
import org.beangle.commons.lang.ClassLoaders
import org.beangle.data.hibernate.model.IdType
import org.beangle.data.orm.cfg.Profiles
import org.scalatest.funspec.AnyFunSpec
import org.scalatest.matchers.should.Matchers
class RailsNamingPolicyTest extends AnyFunSpec with Matchers {
describe("RailsNamingPolicy") {
it("Get Module") {
System.setProperty("jpa_prefix", "public")
val profiles = new Profiles(new Resources(None, ClassLoaders.getResources("META-INF/beangle/orm.xml"), None))
val module = profiles.getProfile(classOf[NationBean])
assert(module.schema.contains("public_naming"))
assert(profiles.getPrefix(classOf[NationBean]) == "gb_")
val daoModule = profiles.getProfile(classOf[SchoolBean])
assert(daoModule.parent.nonEmpty)
assert(daoModule.parent.get.packageName == "org.beangle.data.hibernate")
assert(profiles.getSchema(classOf[IdType]).contains("school"))
}
}
}
| beangle/data | hibernate/src/test/scala/org/beangle/data/hibernate/naming/RailsNamingPolicyTest.scala | Scala | lgpl-3.0 | 1,756 |
package wow
import akka.actor.{Actor, ActorLogging, ActorSystem, Props, SupervisorStrategy}
import akka.http.scaladsl.settings.ServerSettings
import pureconfig._
import scalikejdbc.ConnectionPool
import wow.api.WebServer
import wow.auth.{AccountsState, AuthServer}
import wow.common.config.deriveIntMap
import wow.common.database.Database
import wow.realm.RealmServer
import wow.utils.Reflection
class Application extends Actor with ActorLogging {
Reflection.eagerLoadClasses()
Database.configure()
// This database access is required by both authserver and realmserver
// Can't rely on AuthServer actor existing for it to be initialized
AuthServer.initializeDatabase()
context.actorOf(AccountsState.props, AccountsState.PreferredName)
context.actorOf(AuthServer.props, AuthServer.PreferredName)
for (id <- Application.configuration.realms.keys) {
context.actorOf(RealmServer.props(id), RealmServer.PreferredName(id))
}
override def supervisorStrategy: SupervisorStrategy = SupervisorStrategy.stoppingStrategy
override def postStop(): Unit = {
// In case any latent connections remain, close them
// Should not be useful, as actors would close their own connections
ConnectionPool.closeAll()
super.postStop()
}
override def receive: Receive = PartialFunction.empty
}
object Application {
private var startTime: Long = _
val configuration: ApplicationConfiguration = loadConfigOrThrow[ApplicationConfiguration]("wow")
def main(args: Array[String]): Unit = {
val system = ActorSystem("wow")
system.actorOf(Props(new Application), "app")
startTime = System.currentTimeMillis()
WebServer.startServer(configuration.webServer.host, configuration.webServer.port, ServerSettings(system), system)
system.terminate()
}
def uptimeMillis(): Long = {
System.currentTimeMillis() - startTime
}
val ActorPath = "akka://wow/user/app/"
}
| SKNZ/SpinaciCore | wow/core/src/main/scala/wow/Application.scala | Scala | mit | 1,922 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.javadsl.testkit
import java.time.LocalDateTime
import java.time.format.DateTimeFormatter
import java.util.Optional
import java.util.function.{ Function => JFunction }
import akka.actor.ActorSystem
import akka.annotation.ApiMayChange
import akka.japi.function.Effect
import akka.japi.function.Procedure
import akka.stream.Materializer
import com.lightbend.lagom.devmode.ssl.LagomDevModeSSLHolder
import com.lightbend.lagom.internal.javadsl.api.broker.TopicFactory
import com.lightbend.lagom.internal.javadsl.cluster.JoinClusterModule
import com.lightbend.lagom.internal.persistence.testkit.AwaitPersistenceInit.awaitPersistenceInit
import com.lightbend.lagom.internal.persistence.testkit.PersistenceTestConfig._
import com.lightbend.lagom.internal.testkit.TestkitSslSetup.Disabled
import com.lightbend.lagom.internal.testkit._
import com.lightbend.lagom.javadsl.api.Service
import com.lightbend.lagom.javadsl.api.ServiceLocator
import com.lightbend.lagom.javadsl.persistence.PersistenceModule
import com.lightbend.lagom.javadsl.pubsub.PubSubModule
import com.lightbend.lagom.spi.persistence.InMemoryOffsetStore
import com.lightbend.lagom.spi.persistence.OffsetStore
import javax.net.ssl.SSLContext
import play.Application
import play.api.inject.ApplicationLifecycle
import play.api.inject.BindingKey
import play.api.inject.DefaultApplicationLifecycle
import play.api.inject.{ bind => sBind }
import play.api.Configuration
import play.api.Play
import play.core.server.Server
import play.core.server.ServerConfig
import play.core.server.ServerProvider
import play.inject.Injector
import play.inject.guice.GuiceApplicationBuilder
import scala.annotation.tailrec
import scala.concurrent.Promise
import scala.concurrent.duration._
import scala.util.Try
import scala.util.control.NonFatal
/**
* Support for writing functional tests for one service. The service is running
* in a server and in the test you can interact with it using its service client,
* i.e. calls to the service API.
*
* Dependencies to other services must be replaced by stub or mock implementations by
* overriding the bindings of the `GuiceApplicationBuilder` in the `Setup`.
*
* The server is ran standalone without persistence, pubsub or cluster features
* enabled. Cassandra is also disabled by default. If your service require either of these features you
* can enable them in the `Setup`.
*
* There are two different styles that can be used. It is most convenient to use [[#withServer withServer]],
* since it automatically starts and stops the server before and after the given lambda.
* When your test have several test methods, and especially when using persistence, it is
* faster to only [[#startServer start]] the server once in a static method annotated with `@BeforeClass`
* and stop it in a method annotated with `@AfterClass`.
*/
object ServiceTest {
// These are all specified as strings so that we can say they are disabled without having a dependency on them.
private val JdbcPersistenceModule = "com.lightbend.lagom.javadsl.persistence.jdbc.JdbcPersistenceModule"
private val CassandraPersistenceModule =
"com.lightbend.lagom.javadsl.persistence.cassandra.CassandraPersistenceModule"
private val KafkaBrokerModule = "com.lightbend.lagom.internal.javadsl.broker.kafka.KafkaBrokerModule"
private val KafkaClientModule = "com.lightbend.lagom.javadsl.broker.kafka.KafkaClientModule"
sealed trait Setup {
@deprecated(message = "Use withCassandra instead", since = "1.2.0")
def withPersistence(enabled: Boolean): Setup = withCassandra(enabled)
/**
* Enable or disable Cassandra.
*
* If enabled, this will start an embedded Cassandra server before the tests start, and shut it down afterwards.
* It will also configure Lagom to use the embedded Cassandra server. Enabling Cassandra will also enable the
* cluster.
*
* @param enabled True if Cassandra should be enabled, or false if disabled.
* @return A copy of this setup.
*/
def withCassandra(enabled: Boolean): Setup
/**
* Enable Cassandra.
*
* If enabled, this will start an embedded Cassandra server before the tests start, and shut it down afterwards.
* It will also configure Lagom to use the embedded Cassandra server. Enabling Cassandra will also enable the
* cluster.
*
* @return A copy of this setup.
*/
def withCassandra(): Setup = withCassandra(true)
/**
* Enable or disable JDBC.
*
* Enabling JDBC will also enable the cluster.
*
* @param enabled True if JDBC should be enabled, or false if disabled.
* @return A copy of this setup.
*/
def withJdbc(enabled: Boolean): Setup
/**
* Enable JDBC.
*
* Enabling JDBC will also enable the cluster.
*
* @return A copy of this setup.
*/
def withJdbc(): Setup = withJdbc(true)
@deprecated(message = "Use configureBuilder instead", since = "1.2.0")
def withConfigureBuilder(configureBuilder: JFunction[GuiceApplicationBuilder, GuiceApplicationBuilder]) =
this.configureBuilder(configureBuilder)
/**
* Configure the builder.
*
* Allows a function to be supplied to configure the Play Guice application builder. This allows components to be
* mocked, modules to be enabled/disabled, and custom configuration to be supplied.
*
* @param configureBuilder The builder configuration function.
* @return A copy of this setup.
*/
def configureBuilder(configureBuilder: JFunction[GuiceApplicationBuilder, GuiceApplicationBuilder]): Setup
/**
* Enable or disable clustering.
*
* Disabling this will automatically disable any persistence plugins, since persistence requires clustering.
*
* @param enabled True if clustering should be enabled, or false if disabled.
* @return A copy of this setup.
*/
def withCluster(enabled: Boolean): Setup
/**
* Enable clustering.
*
* Disabling this will automatically disable any persistence plugins, since persistence requires clustering.
*
* @return A copy of this setup.
*/
def withCluster(): Setup = withCluster(true)
/**
* Enable or disable the SSL port.
*
* @param enabled True if the server should bind an HTTP+TLS port, or false if only HTTP should be bound.
* @return A copy of this setup.
*/
@ApiMayChange
def withSsl(enabled: Boolean): Setup
/**
* Enable the SSL port.
*
* @return A copy of this setup.
*/
@ApiMayChange
def withSsl(): Setup = withSsl(true)
/**
* Whether Cassandra is enabled.
*/
def cassandra: Boolean
/**
* Whether JDBC is enabled.
*/
def jdbc: Boolean
/**
* Whether clustering is enabled.
*/
def cluster: Boolean
/**
* Whether HTTPS is enabled.
*/
def ssl: Boolean
/**
* The builder configuration function
*/
def configureBuilder: JFunction[GuiceApplicationBuilder, GuiceApplicationBuilder]
}
private case class SetupImpl(
cassandra: Boolean,
jdbc: Boolean,
cluster: Boolean,
ssl: Boolean,
configureBuilder: JFunction[GuiceApplicationBuilder, GuiceApplicationBuilder]
) extends Setup {
def this() = this(
cassandra = false,
jdbc = false,
cluster = false,
ssl = false,
configureBuilder = new JFunction[GuiceApplicationBuilder, GuiceApplicationBuilder] {
override def apply(b: GuiceApplicationBuilder): GuiceApplicationBuilder = b
}
)
override def withCassandra(enabled: Boolean): Setup = {
if (enabled) {
copy(cassandra = true, cluster = true)
} else {
copy(cassandra = false)
}
}
override def withJdbc(enabled: Boolean): Setup =
if (enabled) {
copy(jdbc = true, cluster = true)
} else {
copy(jdbc = false)
}
override def withCluster(enabled: Boolean): Setup = {
if (enabled) {
copy(cluster = true)
} else {
copy(cluster = false, cassandra = false)
}
}
override def withSsl(enabled: Boolean): Setup = {
copy(ssl = enabled)
}
override def configureBuilder(
configureBuilder: JFunction[GuiceApplicationBuilder, GuiceApplicationBuilder]
): Setup = {
copy(configureBuilder = configureBuilder)
}
}
/**
* The default `Setup` configuration, which has persistence enabled.
*/
val defaultSetup: Setup = new SetupImpl()
/**
* When the server is started you can get the service client and other
* Guice bindings here.
*/
class TestServer(
val port: Int,
val app: Application,
server: Server,
@ApiMayChange val clientSslContext: Optional[SSLContext] = Optional.empty()
) {
@ApiMayChange val portSsl: Optional[Integer] = Optional.ofNullable(server.httpsPort.map(Integer.valueOf).orNull)
/**
* Get the service client for a service.
*/
def client[S <: Service](serviceClass: Class[S]): S =
app.injector().instanceOf(serviceClass)
/**
* Stream materializer. Useful for Akka Streams TestKit.
*/
def materializer: Materializer = injector.instanceOf(classOf[Materializer])
/**
* Current Akka `ActorSystem`. Useful for Akka Streams TestKit.
*/
def system: ActorSystem = injector.instanceOf(classOf[ActorSystem])
/**
* The Guice injector that can be used for retrieving anything
* that has been bound to Guice.
*/
def injector: Injector = app.injector()
/**
* If you use `startServer` you must also stop the server with
* this method when the test is finished. That is handled automatically
* by `withServer`.
*/
def stop(): Unit = {
Try(Play.stop(app.asScala()))
Try(server.stop())
}
}
/**
* Start the test server with the given `setup` and run the `block` (lambda). When
* the `block` returns or throws the test server will automatically be stopped.
*
* This method should be used when the server can be started and stopped for each test
* method. When your test have several test methods, and especially when using persistence, it is
* faster to only start the server once with [[#startServer]].
*
* You can get the service client from the `TestServer` that is passed as parameter
* to the `block`.
*/
def withServer(
setup: Setup,
block: Procedure[TestServer]
): Unit = {
// using Procedure instead of Consumer to support throwing Exception
val testServer = startServer(setup)
try {
block(testServer)
} finally {
testServer.stop()
}
}
/**
* Start the test server with the given `setup`. You must stop the server with
* the `stop` method of the returned `TestServer` when the test is finished.
*
* When your test have several test methods, and especially when using persistence, it is
* faster to only start the server once in a static method annotated with `@BeforeClass`
* and stop it in a method annotated with `@AfterClass`. Otherwise [[#withServer]] is
* more convenient.
*
* You can get the service client from the returned `TestServer`.
*/
def startServer(setup: Setup): TestServer = {
val port = Promise[Int]()
val testServiceLocatorPort = TestServiceLocatorPort(port.future)
val now = DateTimeFormatter.ofPattern("yyMMddHHmmssSSS").format(LocalDateTime.now())
val testName = s"ServiceTest_$now"
val lifecycle = new DefaultApplicationLifecycle
val initialBuilder = new GuiceApplicationBuilder()
.bindings(sBind[TestServiceLocatorPort].to(testServiceLocatorPort))
.bindings(sBind[ServiceLocator].to(classOf[TestServiceLocator]))
.bindings(sBind[TopicFactory].to(classOf[TestTopicFactory]))
.overrides(sBind[ApplicationLifecycle].to(lifecycle))
.configure("play.akka.actor-system", testName)
val finalBuilder =
if (setup.cassandra) {
val cassandraPort = CassandraTestServer.run(testName, lifecycle)
initialBuilder
.configure(cassandraConfig(testName, cassandraPort))
.disableModules(JdbcPersistenceModule, KafkaClientModule, KafkaBrokerModule)
} else if (setup.jdbc) {
initialBuilder
.configure(JdbcConfig)
.disableModules(CassandraPersistenceModule, KafkaClientModule, KafkaBrokerModule)
} else if (setup.cluster) {
initialBuilder
.configure(ClusterConfig)
.disable(classOf[PersistenceModule])
.bindings(play.api.inject.bind[OffsetStore].to[InMemoryOffsetStore])
.disableModules(CassandraPersistenceModule, JdbcPersistenceModule, KafkaClientModule, KafkaBrokerModule)
} else {
initialBuilder
.configure(BasicConfig)
.disable(classOf[PersistenceModule], classOf[PubSubModule], classOf[JoinClusterModule])
.bindings(play.api.inject.bind[OffsetStore].to[InMemoryOffsetStore])
.disableModules(CassandraPersistenceModule, JdbcPersistenceModule, KafkaClientModule, KafkaBrokerModule)
}
val application = setup.configureBuilder(finalBuilder).build()
Play.start(application.asScala())
val sslSetup: TestkitSslSetup.TestkitSslSetup = if (setup.ssl) {
val sslHolder = new LagomDevModeSSLHolder(application.environment().asScala())
val clientSslContext: SSLContext = sslHolder.sslContext
// In tests we're using a self-signed certificate so we use the same keyStore for both
// the server and the client trustStore.
TestkitSslSetup.enabled(sslHolder.keyStoreMetadata, sslHolder.trustStoreMetadata, clientSslContext)
} else {
Disabled
}
val props = System.getProperties
val sslConfig: Configuration =
Configuration.load(this.getClass.getClassLoader, props, sslSetup.sslSettings, allowMissingApplicationConf = true)
val serverConfig: ServerConfig = new ServerConfig(
port = Some(0),
sslPort = sslSetup.sslPort,
mode = application.environment().mode.asScala(),
configuration = sslConfig,
rootDir = application.environment().rootPath,
address = "0.0.0.0",
properties = props
)
val srv = ServerProvider.defaultServerProvider.createServer(serverConfig, application.asScala())
val assignedPort = srv.httpPort.orElse(srv.httpsPort).get
port.success(assignedPort)
if (setup.cassandra || setup.jdbc) {
val system = application.injector().instanceOf(classOf[ActorSystem])
awaitPersistenceInit(system)
}
val javaSslContext = Optional.ofNullable(sslSetup.clientSslContext.orNull)
new TestServer(assignedPort, application, srv, javaSslContext)
}
/**
* Enriches [[GuiceApplicationBuilder]] with a `disableModules` method.
*/
private implicit class GuiceBuilderOps(val builder: GuiceApplicationBuilder) extends AnyVal {
def disableModules(classes: String*): GuiceApplicationBuilder = {
val loadedClasses = classes.flatMap { className =>
try {
Seq(getClass.getClassLoader.loadClass(className))
} catch {
case cfne: ClassNotFoundException =>
Seq.empty[Class[_]]
}
}
if (loadedClasses.nonEmpty) {
builder.disable(loadedClasses: _*)
} else {
builder
}
}
}
/**
* Retry the give `block` (lambda) until it does not throw an exception or the timeout
* expires, whichever comes first. If the timeout expires the last exception
* is thrown. The `block` is retried with 100 milliseconds interval.
*/
def eventually(max: FiniteDuration, block: Effect): Unit =
eventually(max, 100.millis, block)
/**
* Retry the give `block` (lambda) until it does not throw an exception or the timeout
* expires, whichever comes first. If the timeout expires the last exception
* is thrown. The `block` is retried with the given `interval`.
*/
def eventually(max: FiniteDuration, interval: FiniteDuration, block: Effect): Unit = {
def now = System.nanoTime.nanos
val stop = now + max
@tailrec
def poll(t: Duration): Unit = {
val failed =
try {
block(); false
} catch {
case NonFatal(e) =>
if ((now + t) >= stop) throw e
true
}
if (failed) {
Thread.sleep(t.toMillis)
poll((stop - now).min(interval))
}
}
poll(max.min(interval))
}
/**
* Create a binding that can be used with the `GuiceApplicationBuilder`
* in the `Setup`, e.g. to override bindings to stub out dependencies to
* other services.
*/
def bind[T](clazz: Class[T]): BindingKey[T] =
play.inject.Bindings.bind(clazz)
}
| lagom/lagom | testkit/javadsl/src/main/scala/com/lightbend/lagom/javadsl/testkit/ServiceTest.scala | Scala | apache-2.0 | 16,935 |
package structures
import simulacrum.{ noop, op, typeclass }
@typeclass trait FlatMap[F[_]] extends Any with Apply[F] {
@op(">>=", alias = true)
def flatMap[A, B](fa: F[A])(f: A => F[B]): F[B]
def flatten[A, B](ffa: F[F[A]]): F[A] =
flatMap(ffa)(identity)
@noop override def apply[A, B](fa: F[A])(f: F[A => B]): F[B] =
flatMap(f)(map(fa))
}
| mpilquist/Structures | core/shared/src/main/scala/structures/FlatMap.scala | Scala | bsd-3-clause | 362 |
package microtools.anyops
import org.scalatest.{MustMatchers, WordSpec}
import microtools.anyops.AutoAnyValNumeric._
import scala.math.Numeric.Implicits._
import scala.math.Fractional.Implicits._
case class Bauer(value: Int) extends AnyVal
case class Ralph(value: Double) extends AnyVal
class AnuValNumericOperationsSpec extends WordSpec with MustMatchers {
"AnyVal operations spec" should {
"apply operators on anyval" in {
Bauer(2) + Bauer(3) must equal(Bauer(5))
Bauer(2) - Bauer(3) must equal(Bauer(-1))
Bauer(2) * Bauer(3) must equal(Bauer(6))
Ralph(10.0) / Ralph(2.0d) must equal(Ralph(5.0d))
Bauer(2).toInt must equal(2)
Bauer(2).toDouble must equal(2.0d)
Bauer(2).toFloat must equal(2.0f)
Bauer(2).toLong must equal(2L)
-Bauer(2) must equal(Bauer(-2))
Bauer(2) must be <= Bauer(3)
Bauer(3) must be >= Bauer(2)
Bauer(2) must be < Bauer(3)
Bauer(3) must be > Bauer(2)
}
}
}
| 21re/play-micro-tools | src/test/scala/microtools/anyops/AnyValNumericOperationsSpec.scala | Scala | mit | 979 |
package mesosphere.mesos.simulation
import java.util
import java.util.Collections
import akka.actor.{ActorRef, ActorSystem, Props}
import com.typesafe.config.{Config, ConfigFactory}
import com.typesafe.scalalogging.StrictLogging
import scala.jdk.CollectionConverters._
import org.apache.mesos.Protos._
import org.apache.mesos.scheduler.Protos.OfferConstraints
import org.apache.mesos.SchedulerDriver
import scala.concurrent.Await
import scala.concurrent.duration.Duration
/**
* The facade to the mesos simulation.
*
* It starts/stops a new actor system for the simulation when the corresponding life-cycle methods of the
* [[org.apache.mesos.SchedulerDriver]] interface are called.
*
* The implemented commands of the driver interface are forwarded as messages to the
* [[mesosphere.mesos.simulation.DriverActor]].
* Unimplemented methods throw [[scala.NotImplementedError]]s.
*/
class SimulatedDriver(driverProps: Props) extends SchedulerDriver with StrictLogging {
private[this] def driverCmd(cmd: AnyRef): Status = {
driverActorRefOpt match {
case Some(driverActor) =>
logger.debug(s"send driver cmd $cmd")
driverActor ! cmd
case None =>
logger.debug("no driver actor configured")
}
status
}
override def declineOffer(offerId: OfferID): Status =
driverCmd(DriverActor.DeclineOffer(offerId))
override def launchTasks(offerIds: util.Collection[OfferID], tasks: util.Collection[TaskInfo]): Status =
driverCmd(DriverActor.LaunchTasks(offerIds.asScala.toSeq, tasks.asScala.toSeq))
// Mesos 0.23.x
override def acceptOffers(offerIds: util.Collection[OfferID], ops: util.Collection[Offer.Operation], filters: Filters): Status =
driverCmd(DriverActor.AcceptOffers(offerIds.asScala.toSeq, ops.asScala.toSeq, filters))
override def killTask(taskId: TaskID): Status = driverCmd(DriverActor.KillTask(taskId))
override def reconcileTasks(statuses: util.Collection[TaskStatus]): Status = {
driverCmd(DriverActor.ReconcileTask(statuses.asScala.toSeq))
}
override def suppressOffers(): Status = driverCmd(DriverActor.SuppressOffers)
override def reviveOffers(): Status = driverCmd(DriverActor.ReviveOffers)
override def reviveOffers(collection: util.Collection[String]): Status = ???
override def declineOffer(offerId: OfferID, filters: Filters): Status = Status.DRIVER_RUNNING
override def launchTasks(offerIds: util.Collection[OfferID], tasks: util.Collection[TaskInfo], filters: Filters): Status =
launchTasks(offerIds, tasks)
override def launchTasks(offerId: OfferID, tasks: util.Collection[TaskInfo], filters: Filters): Status =
launchTasks(Collections.singleton(offerId), tasks)
override def launchTasks(offerId: OfferID, tasks: util.Collection[TaskInfo]): Status =
launchTasks(Collections.singleton(offerId), tasks)
override def requestResources(requests: util.Collection[Request]): Status = ???
override def sendFrameworkMessage(executorId: ExecutorID, slaveId: SlaveID, data: Array[Byte]): Status = ???
override def acknowledgeStatusUpdate(ackStatus: TaskStatus): Status = status
// life cycle
@volatile
var system: Option[ActorSystem] = None
@volatile
var driverActorRefOpt: Option[ActorRef] = None
private def status: Status =
system match {
case None => Status.DRIVER_STOPPED
case Some(_) => Status.DRIVER_RUNNING
}
override def start(): Status = {
logger.info("Starting simulated Mesos")
val config: Config = ConfigFactory.load(getClass.getClassLoader, "mesos-simulation.conf")
val sys: ActorSystem = ActorSystem("mesos-simulation", config)
system = Some(sys)
driverActorRefOpt = Some(sys.actorOf(driverProps, "driver"))
driverCmd(this)
Status.DRIVER_RUNNING
}
override def stop(failover: Boolean): Status = stop()
override def stop(): Status = abort()
override def abort(): Status = {
system match {
case None => Status.DRIVER_NOT_STARTED
case Some(sys) =>
sys.terminate()
Status.DRIVER_ABORTED
}
}
override def run(): Status = {
start()
join()
}
override def join(): Status = {
system match {
case None => Status.DRIVER_NOT_STARTED
case Some(sys) =>
Await.result(sys.whenTerminated, Duration.Inf)
driverActorRefOpt = None
system = None
logger.info("Stopped simulated Mesos")
Status.DRIVER_STOPPED
}
}
override def suppressOffers(roles: util.Collection[String]): Status = ???
override def updateFramework(frameworkInfo: FrameworkInfo, suppressedRoles: util.Collection[String]): Status = ???
override def updateFramework(
frameworkInfo: FrameworkInfo,
suppressedRoles: util.Collection[String],
offerConstraints: OfferConstraints
): Status = ???
}
| mesosphere/marathon | mesos-simulation/src/main/scala/mesosphere/mesos/simulation/SimulatedDriver.scala | Scala | apache-2.0 | 4,814 |
import leon.lang._
import leon.lang.synthesis._
// Examples taken from http://lara.epfl.ch/~psuter/spt/
object SynthesisProceduresToolkit {
def e1(a: Nat, b: Nat, c: Nat): Nat = if ((b == c)) {
if ((a == c)) {
(choose { (x: Nat) =>
(x != a)
})
} else {
(choose { (x: Nat) =>
((x != a) && (x != b))
})
}
} else {
if ((a == b)) {
(choose { (x: Nat) =>
((x != a) && (x != c))
})
} else {
(choose { (x: Nat) =>
((x != a) && (x != b) && (x != c))
})
}
}
def e2(): (Nat, Nat, Nat) = (Z(), Succ(Z()), Succ(Succ(Succ(Z()))))
def e3(a1 : Nat, a2 : Nat, a3 : Nat, a4 : Nat): (Nat, Nat) = (a3, a4)
def e4(a1 : Nat, a2 : Nat, a3 : Nat, a4 : Nat): (Nat, Nat, NatList) = (Succ(a2), a1, Nil())
def e5(a1 : NatList, a2 : Nat, a3 : NatList): (Nat, NatList, Nat, NatList) = (choose { (x1: Nat, x2: NatList, x3: Nat, x4: NatList) =>
((Cons(Succ(x1), x2) == a1) && (Succ(x1) != a2) && (a3 == Cons(x3, Cons(x3, x4))))
})
def e6(a: Nat, b: Nat): (Nat, NatList) = if ((a == Succ(b))) {
(Z(), Nil())
} else {
leon.lang.error[(Nat, NatList)]("Precondition failed")
}
def e7(a1 : NatList, a2 : Nat, a3 : NatList): (Nat, NatList, Nat, NatList) = (choose { (x1: Nat, x2: NatList, x3: Nat, x4: NatList) =>
((Cons(Succ(x1), x2) == a1) && (Succ(x1) != a2) && (a3 == Cons(x3, Cons(x3, x4))))
})
def e8(a : Nat) = (a match {
case Succ(n150) =>
n150
case _ =>
leon.lang.error[(Nat)]("Precondition failed")
})
abstract class Nat
case class Z() extends Nat
case class Succ(n: Nat) extends Nat
abstract class NatList
case class Nil() extends NatList
case class Cons(head: Nat, tail: NatList) extends NatList
}
| ericpony/scala-examples | testcases/synthesis/Spt.scala | Scala | mit | 1,701 |
package nounou
///**
// * @author ktakagaki
// * // //@date 3/17/14.
// */
///**Base class for all options*/
//abstract class Opt
///Individual Options
/**Option values: window function for filter design.*/
abstract class OptSpikeDetectorFlush extends Opt
object OptSpikeDetectorFlush {
case object All extends OptSpikeDetectorFlush {
override def toString = "Flush all spikes of appropriate trode from XSpikes object before detecting."
}
// implicit def optSpikeDetectorFlush_None_specialize( nounou.None ): OptSpikeDetectorFlush = OptSpikeDetectorFlush.None
case object None extends OptSpikeDetectorFlush {
override def toString = "Do not flush any spikes prior to detecting, just add on top."
}
// case object Range extends OptSpikeDetectorFlush {
// override def toString = "Just flush spikes within detection range, prior to adding new."
// }
}
| ktakagaki/nounou.rebooted150527 | src/main/scala/nounou/Options.scala | Scala | apache-2.0 | 877 |
package org.jetbrains.plugins.scala.highlighter
import com.intellij.openapi.project.Project
import org.jetbrains.plugins.scala.settings.{ScalaProjectSettings, ScalaProjectSettingsUtil}
/**
* @author Roman.Shein
* Date: 11.01.2016
*/
object ScalaTestHighlighterUtil {
private val scalaTestKeywords =
Set("in", "ignore", "is", "be", "taggedAs", "when", "that", "which", "must", "can", "should", "behave", "feature",
"scenario", "like", "pending", "it", "they", "behavior", "describe", "property", "test")
//TODO it is possible for this to create some false-positives, but it is very unlikely
def isHighlightableScalaTestKeyword(classFqn: String, methodName: String, project: Project): Boolean =
classFqn != null && //CHANGED
classFqn.startsWith("org.scalatest") && scalaTestKeywords.contains(methodName)
}
| gtache/intellij-lsp | intellij-lsp-dotty/src/org/jetbrains/plugins/scala/highlighter/ScalaTestHighlighterUtil.scala | Scala | apache-2.0 | 848 |
package controllers
import scala.concurrent.duration._
import views._
import lila.app._
import lila.common.IpAddress
final class Search(env: Env) extends LilaController(env) {
def searchForm = env.gameSearch.forms.search
private val SearchRateLimitPerIP = new lila.memo.RateLimit[IpAddress](
credits = 50,
duration = 5.minutes,
key = "search.games.ip"
)
private val SearchConcurrencyLimitPerIP = new lila.memo.FutureConcurrencyLimit[IpAddress](
key = "search.games.concurrency.ip",
ttl = 10.minutes,
maxConcurrency = 1
)
def index(p: Int) =
OpenBody { implicit ctx =>
env.game.cached.nbTotal flatMap { nbGames =>
if (ctx.isAnon)
negotiate(
html = Unauthorized(html.search.login(nbGames)).fuccess,
api = _ => Unauthorized(jsonError("Login required")).fuccess
)
else
OnlyHumans {
val page = p atLeast 1
Reasonable(page, 100) {
val cost = scala.math.sqrt(page.toDouble).toInt
implicit def req = ctx.body
def limited =
fuccess {
val form = searchForm
.bindFromRequest()
.withError(
key = "",
message = "Please only send one request at a time per IP address"
)
TooManyRequests(html.search.index(form, none, nbGames))
}
SearchRateLimitPerIP(ctx.ip, cost = cost) {
SearchConcurrencyLimitPerIP(ctx.ip, limited = limited) {
negotiate(
html = searchForm
.bindFromRequest()
.fold(
failure => BadRequest(html.search.index(failure, none, nbGames)).fuccess,
data =>
data.nonEmptyQuery ?? { query =>
env.gameSearch.paginator(query, page) map some
} map { pager =>
Ok(html.search.index(searchForm fill data, pager, nbGames))
} recover { _ =>
InternalServerError("Sorry, we can't process that query at the moment")
}
),
api = _ =>
searchForm
.bindFromRequest()
.fold(
_ =>
BadRequest {
jsonError("Could not process search query")
}.fuccess,
data =>
data.nonEmptyQuery ?? { query =>
env.gameSearch.paginator(query, page) dmap some
} flatMap {
case Some(s) =>
env.api.userGameApi.jsPaginator(s) dmap {
Ok(_)
}
case None =>
BadRequest(jsonError("Could not process search query")).fuccess
} recover { _ =>
InternalServerError(
jsonError("Sorry, we can't process that query at the moment")
)
}
)
)
}
}(rateLimitedFu)
}
}
}
}
}
| luanlv/lila | app/controllers/Search.scala | Scala | mit | 3,599 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.expressions.validation
import org.apache.flink.table.api.ValidationException
import org.apache.flink.table.api.scala._
import org.apache.flink.table.expressions.utils.MapTypeTestBase
import org.junit.Test
class MapTypeValidationTest extends MapTypeTestBase {
@Test(expected = classOf[ValidationException])
def testWrongKeyType(): Unit = {
testAllApis('f2.at(12), "f2.at(12)", "f2[12]", "FAIL")
}
@Test(expected = classOf[ValidationException])
def testIncorrectMapTypeComparison(): Unit = {
testAllApis('f1 === 'f3, "f1 === f3", "f1 = f3", "FAIL")
}
@Test(expected = classOf[ValidationException])
def testUnsupportedComparisonType(): Unit = {
testAllApis('f6 !== 'f2, "f6 !== f2", "f6 != f2", "FAIL")
}
@Test(expected = classOf[ValidationException])
def testEmptyMap(): Unit = {
testAllApis("FAIL", "map()", "MAP[]", "FAIL")
}
@Test(expected = classOf[ValidationException])
def testUnsupportedMapImplicitTypeCastTableApi(): Unit = {
testTableApi(map("k1", 1.0, "k2", 2.0f), "map('k1', 1.0, 'k2', 2.0f)", "FAIL")
}
@Test(expected = classOf[ValidationException])
def testUnsupportedMapImplicitTypeCastSql(): Unit = {
testSqlApi("MAP['k1', 'string', 'k2', 12]", "FAIL")
}
}
| ueshin/apache-flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/expressions/validation/MapTypeValidationTest.scala | Scala | apache-2.0 | 2,076 |
/**
* *****************************************************************************
* Copyright 2014 Katja Hahn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ****************************************************************************
*/
package com.github.katjahahn.parser.sections.reloc
import com.github.katjahahn.parser.ScalaIOUtil.hex
import com.github.katjahahn.parser.IOUtil.NL
import com.github.katjahahn.parser.sections.SectionLoader.LoadInfo
import com.github.katjahahn.parser.optheader.WindowsEntryKey
import com.github.katjahahn.parser.optheader.StandardFieldEntryKey
import com.github.katjahahn.parser.optheader.DataDirectoryKey
import scala.collection.mutable.ListBuffer
import com.github.katjahahn.parser.sections.SpecialSection
import com.github.katjahahn.parser.Location
import scala.collection.JavaConverters._
import com.github.katjahahn.parser.PhysicalLocation
import org.apache.logging.log4j.LogManager
class RelocationSection(
private val blocks: List[BaseRelocBlock],
private val offset: Long) extends SpecialSection {
override def getInfo(): String = blocks.mkString(NL)
override def isEmpty(): Boolean = blocks.isEmpty
override def getOffset(): Long = 0
def getRelocBlocks(): java.util.List[BaseRelocBlock] = blocks.asJava
def getPhysicalLocations(): java.util.List[PhysicalLocation] =
blocks.flatMap(b => b.getLocations).asJava
}
object RelocationSection {
private val logger = LogManager.getLogger(RelocationSection.getClass().getName())
// set maximum to avoid endless parsing, e.g., in corkami's foldedhdr.exe
val maxblocks = 10000
// set maximum to avoid almost endless parsing, e.g., in corkami's reloccrypt.exe
val maxRelocsPerBlock = 10000
def apply(loadInfo: LoadInfo): RelocationSection = {
val opt = loadInfo.data.getOptionalHeader
val tableSize = opt.getDataDirectory().get(DataDirectoryKey.BASE_RELOCATION_TABLE).getDirectorySize()
val blocks = readBlocks(tableSize, loadInfo)
new RelocationSection(blocks, loadInfo.fileOffset)
}
private def readBlocks(tableSize: Long, loadInfo: LoadInfo): List[BaseRelocBlock] = {
val mmBytes = loadInfo.memoryMapped
val va = loadInfo.va
val blocks = ListBuffer[BaseRelocBlock]()
var offset = 0
while (offset < tableSize && blocks.size < maxblocks) {
val fileOffset = mmBytes.virtToPhysAddress(va + offset)
val length = 4
val fieldSize = 2
val pageRVA = mmBytes.getBytesLongValue(va + offset, length)
offset += length
val blockSize = mmBytes.getBytesLongValue(va + offset, length)
offset += length
val fields = ListBuffer[BlockEntry]()
val nrOfRelocs = ((blockSize - (length * 2)) / fieldSize).toInt
val limitedRelocs = if (nrOfRelocs <= maxRelocsPerBlock) nrOfRelocs else {
logger.warn(s"Too many relocations ($nrOfRelocs) for block at offset ${hex(fileOffset)}. Limit set.")
maxRelocsPerBlock
}
for (i <- 0 until limitedRelocs) {
val fieldValue = mmBytes.getBytesIntValue(va + offset, fieldSize)
fields += BlockEntry(fieldValue)
offset += fieldSize
}
blocks += new BaseRelocBlock(fileOffset, pageRVA, blockSize, fields.toList)
}
blocks.toList
}
def newInstance(loadInfo: LoadInfo): RelocationSection =
apply(loadInfo)
} | katjahahn/PortEx | src/main/java/com/github/katjahahn/parser/sections/reloc/RelocationSection.scala | Scala | apache-2.0 | 3,837 |
import _root_.io.gatling.core.scenario.Simulation
import ch.qos.logback.classic.{Level, LoggerContext}
import io.gatling.core.Predef._
import io.gatling.http.Predef._
import org.slf4j.LoggerFactory
import scala.concurrent.duration._
/**
* Performance test for the File entity.
*/
class FileGatlingTest extends Simulation {
val context: LoggerContext = LoggerFactory.getILoggerFactory.asInstanceOf[LoggerContext]
// Log all HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("TRACE"))
// Log failed HTTP requests
//context.getLogger("io.gatling.http").setLevel(Level.valueOf("DEBUG"))
val baseURL = Option(System.getProperty("baseURL")) getOrElse """http://127.0.0.1:8080"""
val httpConf = http
.baseURL(baseURL)
.inferHtmlResources()
.acceptHeader("*/*")
.acceptEncodingHeader("gzip, deflate")
.acceptLanguageHeader("fr,fr-fr;q=0.8,en-us;q=0.5,en;q=0.3")
.connection("keep-alive")
.userAgentHeader("Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:33.0) Gecko/20100101 Firefox/33.0")
val headers_http = Map(
"Accept" -> """application/json"""
)
val headers_http_authenticated = Map(
"Accept" -> """application/json""",
"X-CSRF-TOKEN" -> "${csrf_token}"
)
val scn = scenario("Test the File entity")
.exec(http("First unauthenticated request")
.get("/api/account")
.headers(headers_http)
.check(status.is(401))
.check(headerRegex("Set-Cookie", "CSRF-TOKEN=(.*); [P,p]ath=/").saveAs("csrf_token")))
.pause(10)
.exec(http("Authentication")
.post("/api/authentication")
.headers(headers_http_authenticated)
.formParam("j_username", "admin")
.formParam("j_password", "admin")
.formParam("remember-me", "true")
.formParam("submit", "Login"))
.pause(1)
.exec(http("Authenticated request")
.get("/api/account")
.headers(headers_http_authenticated)
.check(status.is(200))
.check(headerRegex("Set-Cookie", "CSRF-TOKEN=(.*); [P,p]ath=/").saveAs("csrf_token")))
.pause(10)
.repeat(2) {
exec(http("Get all files")
.get("/api/files")
.headers(headers_http_authenticated)
.check(status.is(200)))
.pause(10 seconds, 20 seconds)
.exec(http("Create new file")
.post("/api/files")
.headers(headers_http_authenticated)
.body(StringBody("""{"id":null, "name":"SAMPLE_TEXT", "date":"2020-01-01T00:00:00.000Z", "status":"SAMPLE_TEXT", "comments":"SAMPLE_TEXT"}""")).asJSON
.check(status.is(201))
.check(headerRegex("Location", "(.*)").saveAs("new_file_url")))
.pause(10)
.repeat(5) {
exec(http("Get created file")
.get("${new_file_url}")
.headers(headers_http_authenticated))
.pause(10)
}
.exec(http("Delete created file")
.delete("${new_file_url}")
.headers(headers_http_authenticated))
.pause(10)
}
val users = scenario("Users").exec(scn)
setUp(
users.inject(rampUsers(100) over (1 minutes))
).protocols(httpConf)
}
| eballerini/dictionary | src/test/gatling/simulations/FileGatlingTest.scala | Scala | mit | 3,335 |
package models
class DuplicatedUserNameException(val userName: String) extends Exception("User name '" + userName + "' is duplicated.")
| ruimo/store2 | app/models/DuplicatedUserNameException.scala | Scala | apache-2.0 | 137 |
package feh.phtpe
import feh.phtpe.Prefixes._
import org.specs2.Specification
class PhysTypedPrefixSpec extends Specification {
def is = "SI Prefixes\n".title ^
new PhysTypedIncrementalPrefixSpec ^
new PhysTypedDecrementalPrefixSpec ^
new NumericOverflow ^
new PhysTypedBundledPrefixSpec
}
class PhysTypedIncrementalPrefixSpec extends Specification {
def is = s2""" ${ "Incremental".title }
__ Int__
${ 1.of[Deca, Meter] phEquals 10.of[Meter] }
${ 1.of[Hecto, Meter] phEquals 100.of[Meter] }
${ 1.of[Kilo, Meter] phEquals 1000.of[Meter] }
${ 1.of[Mega, Meter] phEquals 1e6.of[Meter] }
${ 1.of[Giga, Meter] phEquals 1e9.of[Meter] }
__Long__
${ 1L.of[Deca, Meter] phEquals 10.of[Meter] }
${ 1L.of[Hecto, Meter] phEquals 100.of[Meter] }
${ 1L.of[Kilo, Meter] phEquals 1000.of[Meter] }
${ 1L.of[Mega, Meter] phEquals 1e6.of[Meter] }
${ 1L.of[Giga, Meter] phEquals 1e9.of[Meter] }
${ 1L.of[Tera, Meter] phEquals 1e12.of[Meter] }
__BigInt__
${ BigInt(1).of[Deca, Meter] phEquals 10.of[Meter] }
${ BigInt(1).of[Hecto, Meter] phEquals 100.of[Meter] }
${ BigInt(1).of[Kilo, Meter] phEquals 1000.of[Meter] }
${ BigInt(1).of[Mega, Meter] phEquals 1e6.of[Meter] }
${ BigInt(1).of[Giga, Meter] phEquals 1e9.of[Meter] }
${ BigInt(1).of[Tera, Meter] phEquals 1e12.of[Meter] }
__Float__
${ 1f.of[Deca, Meter] phEquals 10.of[Meter] }
${ 1f.of[Hecto, Meter] phEquals 100.of[Meter] }
${ 1f.of[Kilo, Meter] phEquals 1000.of[Meter] }
${ 1f.of[Mega, Meter] phEquals 1e6f.of[Meter] }
${ 1f.of[Giga, Meter] phEquals 1e9f.of[Meter] }
${ 1f.of[Tera, Meter] phEquals 1e12f.of[Meter] }
note that:
${ (1f.of[Tera, Meter] phEquals 1e12d.of[Meter]) isFailure }
because
${ 1e12f != 1e12d }
__Double__
${ 1d.of[Deca, Meter] phEquals 10.of[Meter] }
${ 1d.of[Hecto, Meter] phEquals 100.of[Meter] }
${ 1d.of[Kilo, Meter] phEquals 1000.of[Meter] }
${ 1d.of[Mega, Meter] phEquals 1e6.of[Meter] }
${ 1d.of[Giga, Meter] phEquals 1e9.of[Meter] }
${ 1d.of[Tera, Meter] phEquals 1e12.of[Meter] }
__BigDecimal__
${ BigDecimal(1).of[Deca, Meter] phEquals 10.of[Meter] }
${ BigDecimal(1).of[Hecto, Meter] phEquals 100.of[Meter] }
${ BigDecimal(1).of[Kilo, Meter] phEquals 1000.of[Meter] }
${ BigDecimal(1).of[Mega, Meter] phEquals 1e6.of[Meter] }
${ BigDecimal(1).of[Giga, Meter] phEquals 1e9.of[Meter] }
${ BigDecimal(1).of[Tera, Meter] phEquals 1e12.of[Meter] }
"""
}
class PhysTypedDecrementalPrefixSpec extends Specification {
def is = s2""" ${ "Decremental".title }
__Float__
${ 1f.of[Deci, Meter] phEquals 0.1f.of[Meter] }
${ 1f.of[Centi, Meter] phEquals 0.01f.of[Meter] }
${ 1f.of[Milli, Meter] phEquals 0.001f.of[Meter] }
${ 1f.of[Micro, Meter] phEquals 1e-6f.of[Meter] }
${ 1f.of[Nano, Meter] phEquals 1e-9f.of[Meter] }
${ 1f.of[Pico, Meter] phEquals 1e-12f.of[Meter] }
__Double__
${ 1d.of[Deci, Meter] phEquals 0.1.of[Meter] }
${ 1d.of[Centi, Meter] phEquals 0.01.of[Meter] }
${ 1d.of[Milli, Meter] phEquals 0.001.of[Meter] }
${ 1d.of[Micro, Meter] phEquals 1e-6.of[Meter] }
${ 1d.of[Nano, Meter] phEquals 1e-9.of[Meter] }
${ 1d.of[Pico, Meter] phEquals 1e-12.of[Meter] }
__BigDecimal__
${ BigDecimal(1).of[Deci, Meter] phEquals 0.1.of[Meter] }
${ BigDecimal(1).of[Centi, Meter] phEquals 0.01.of[Meter] }
${ BigDecimal(1).of[Milli, Meter] phEquals 0.001.of[Meter] }
${ BigDecimal(1).of[Micro, Meter] phEquals 1e-6.of[Meter] }
${ BigDecimal(1).of[Nano, Meter] phEquals 1e-9.of[Meter] }
${ BigDecimal(1).of[Pico, Meter] phEquals 1e-12.of[Meter] }
"""
}
class NumericOverflow extends Specification{
def is = s2""" ${ "WARNING".title }
__Prefixes might cause errors due to java's primitive types overflow__
${ 1000.of[Giga, Volt].value != 1L.of[Tera, Volt].value }
"""
}
class PhysTypedBundledPrefixSpec extends Specification{
def is = s2""" ${ "Prefixed Units".title }
__ __
${ type km = Prefixed[Kilo, Meter]; 1.of[km] phEquals 1000.of[Meter] }
${ type km = Kilo@@Meter; 1.of[km] phEquals 1000.of[Meter] }
${ type gr = Milli@@Kilogram; 1f.of[gr] phEquals 1e-3f.of[Kilogram] }
${ type pF = Pico@@Farad; 1e12.of[pF] phEquals 1.of[Farad] }
${ type GV = Giga@@Volt; type TV = Tera@@Volt; 1000L.of[GV] phEquals 1L.of[TV] }
${def foo(d: Int|Meter) = true; type km = Kilo@@Meter; foo(2.of[km])}
${def foo(d: Int|Meter) = true; foo(2.of[Kilo@@Meter])}
${def foo[X](x: X|Meter) = true; type km = Kilo@@Meter; foo(5f.of[km]); true}
${trait Foo[X]{ def x: X|Meter }; new Foo[Float]{ def x = 5f.of[Kilo@@Meter] }; true}
${class Foo[X](x: X|Meter); type km = Kilo@@Meter; new Foo[Float](5.of[km]); true}
${case class Foo[X](x: X|Meter); type km = Kilo@@Meter; Foo(5f.of[km]); true}
"""
}
| fehu/phtpe | phtpe/src/test/scala/feh/phtpe/PhysTypedPrefixSpec.scala | Scala | mit | 5,016 |
/*
Copyright 2013 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.summingbird.viz
import com.twitter.summingbird._
import scala.collection.mutable.{ Map => MMap }
case class ProducerViz[P <: Platform[P]](tail: Producer[P, _]) {
private val dependantState = Dependants(tail)
// These are caches that are only kept/used for a short period
// its single threaded and mutation is tightly controlled.
// Used here instead of an immutable for simplification of code.
private val nodeLookupTable = MMap[Producer[P, _], String]()
private val nameLookupTable = MMap[String, Int]()
def getName(node: Producer[P, _]): String = {
val preferredName = node match {
case NamedProducer(parent, name) => "NamedProducer(%s)".format(name)
case _ => node.getClass.getName.replaceFirst("com.twitter.summingbird.", "")
}
nodeLookupTable.get(node) match {
case Some(name) => name
case None =>
nameLookupTable.get(preferredName) match {
case Some(count) => {
val newNum = count + 1
val newName = preferredName + "[" + newNum + "]"
nodeLookupTable += (node -> newName)
nameLookupTable += (preferredName -> newNum)
newName
}
case None =>
nodeLookupTable += (node -> preferredName)
nameLookupTable += (preferredName -> 1)
preferredName
}
}
}
override def toString(): String = {
val base = "digraph summingbirdGraph {\\n"
val graphStr = dependantState.nodes.flatMap { evalNode =>
val children = dependantState.dependantsOf(evalNode).getOrElse(sys.error("Invalid node: %s, unable to find dependants".format(evalNode)))
val nodeName = getName(evalNode)
children.map { c =>
"\\"%s\\" -> \\"%s\\"\\n".format(nodeName, getName(c))
}
}.mkString("")
base + graphStr + "\\n}"
}
}
| rangadi/summingbird | summingbird-core/src/main/scala/com/twitter/summingbird/viz/ProducerViz.scala | Scala | apache-2.0 | 2,414 |
package io.scalac.seed.route
import akka.actor._
import io.scalac.seed.domain.VehicleAggregate
import io.scalac.seed.service._
import spray.httpx.Json4sSupport
import spray.routing._
import spray.routing.authentication.BasicAuth
object VehicleRoute {
case class UpdateVehicleData(value: String)
}
trait VehicleRoute extends HttpService with Json4sSupport with RequestHandlerCreator with UserAuthenticator {
import VehicleRoute._
import VehicleAggregateManager._
val vehicleAggregateManager: ActorRef
val vehicleRoute =
path("vehicles" / Segment / "regnumber" ) { id =>
post {
authenticate(BasicAuth(userAuthenticator _, realm = "secure site")) { user =>
entity(as[UpdateVehicleData]) { cmd =>
serveUpdate(UpdateRegNumber(id, cmd.value))
}
}
}
} ~
path("vehicles" / Segment / "color" ) { id =>
post {
authenticate(BasicAuth(userAuthenticator _, realm = "secure site")) { user =>
entity(as[UpdateVehicleData]) { cmd =>
serveUpdate(UpdateColor(id, cmd.value))
}
}
}
} ~
path("vehicles" / Segment ) { id =>
get {
serveGet(GetVehicle(id))
} ~
delete {
authenticate(BasicAuth(userAuthenticator _, realm = "secure site")) { user =>
serveDelete(DeleteVehicle(id))
}
}
} ~
path("vehicles") {
authenticate(BasicAuth(userAuthenticator _, realm = "secure site")) { user =>
post {
entity(as[RegisterVehicle]) { cmd =>
serveRegister(cmd)
}
}
}
}
private def serveUpdate(message : AggregateManager.Command): Route =
ctx => handleUpdate[VehicleAggregate.Vehicle](ctx, vehicleAggregateManager, message)
private def serveRegister(message : AggregateManager.Command): Route =
ctx => handleRegister[VehicleAggregate.Vehicle](ctx, vehicleAggregateManager, message)
private def serveDelete(message : AggregateManager.Command): Route =
ctx => handleDelete(ctx, vehicleAggregateManager, message)
private def serveGet(message : AggregateManager.Command): Route =
ctx => handleGet[VehicleAggregate.Vehicle](ctx, vehicleAggregateManager, message)
} | ScalaConsultants/akka-persistence-eventsourcing | src/main/scala/io/scalac/seed/route/VehicleRoute.scala | Scala | apache-2.0 | 2,237 |
package monocle.function
import monocle.{Iso, Optional}
import scala.annotation.{implicitNotFound, tailrec}
import scala.collection.immutable.{ListMap, SortedMap}
import scala.util.Try
/**
* Typeclass that defines an [[Optional]] from an `S` to an `A` at an index `I`
* [[Index]] is less powerful than [[At]] as it cannot create or delete value
* @tparam S source of [[Optional]]
* @tparam I index
* @tparam A target of [[Optional]], `A` is supposed to be unique for a given pair `(S, I)`
*/
@implicitNotFound(
"Could not find an instance of Index[${S},${I},${A}], please check Monocle instance location policy to " + "find out which import is necessary"
)
abstract class Index[S, I, A] extends Serializable {
def index(i: I): Optional[S, A]
}
trait IndexFunctions {
def index[S, I, A](i: I)(implicit ev: Index[S, I, A]): Optional[S, A] = ev.index(i)
@deprecated("use Index.fromAt", since = "1.4.0")
def atIndex[S, I, A](implicit ev: At[S, I, Option[A]]) = Index.fromAt[S, I, A]
}
object Index extends IndexFunctions with IndexInstancesScalaVersionSpecific {
def apply[S, I, A](optional: I => Optional[S, A]): Index[S, I, A] = (i: I) => optional(i)
/** lift an instance of [[Index]] using an [[Iso]] */
def fromIso[S, A, I, B](iso: Iso[S, A])(implicit ev: Index[A, I, B]): Index[S, I, B] =
Index(
iso composeOptional ev.index(_)
)
def fromAt[S, I, A](implicit ev: At[S, I, Option[A]]): Index[S, I, A] =
Index(
ev.at(_) composePrism monocle.std.option.some
)
/************************************************************************************************/
/** Std instances */
/************************************************************************************************/
implicit def listIndex[A]: Index[List[A], Int, A] =
Index(i =>
if (i < 0)
Optional[List[A], A](_ => None)(_ => identity)
else
Optional[List[A], A](_.drop(i).headOption)(a => s => Try(s.updated(i, a)).getOrElse(s))
)
implicit def listMapIndex[K, V]: Index[ListMap[K, V], K, V] = fromAt
implicit def mapIndex[K, V]: Index[Map[K, V], K, V] = fromAt
implicit def sortedMapIndex[K, V]: Index[SortedMap[K, V], K, V] = fromAt
implicit val stringIndex: Index[String, Int, Char] = Index(
monocle.std.string.stringToList composeOptional Index.index[List[Char], Int, Char](_)
)
implicit def vectorIndex[A]: Index[Vector[A], Int, A] =
Index(i =>
Optional[Vector[A], A](v => if (v.isDefinedAt(i)) Some(v(i)) else None)(a =>
v =>
if (v.isDefinedAt(i)) v.updated(i, a)
else v
)
)
/************************************************************************************************/
/** Cats instances */
/************************************************************************************************/
import cats.data.{Chain, NonEmptyChain, NonEmptyList, NonEmptyVector, OneAnd}
import monocle.function.Cons1.{necCons1, nelCons1, nevCons1, oneAndCons1}
implicit def chainIndex[A]: Index[Chain[A], Int, A] =
new Index[Chain[A], Int, A] {
def index(i: Int) =
Optional[Chain[A], A] { c =>
if (i < 0)
None
else {
val it = c.iterator.drop(i)
if (it.hasNext) Some(it.next)
else None
}
}(a =>
c => {
@tailrec
def go(cur: Int, oldC: Chain[A], newC: Chain[A]): Chain[A] =
oldC.uncons match {
case Some((h, t)) =>
if (cur == i)
newC.append(a).concat(t)
else
go(cur + 1, t, newC.append(h))
case None => newC
}
if (i >= 0 && i < c.length) go(0, c, Chain.empty) else c
}
)
}
implicit def necIndex[A]: Index[NonEmptyChain[A], Int, A] =
new Index[NonEmptyChain[A], Int, A] {
def index(i: Int): Optional[NonEmptyChain[A], A] =
i match {
case 0 => necCons1.head.asOptional
case _ => necCons1.tail composeOptional chainIndex.index(i - 1)
}
}
implicit def nelIndex[A]: Index[NonEmptyList[A], Int, A] =
new Index[NonEmptyList[A], Int, A] {
def index(i: Int): Optional[NonEmptyList[A], A] =
i match {
case 0 => nelCons1.head.asOptional
case _ => nelCons1.tail composeOptional listIndex.index(i - 1)
}
}
implicit def nevIndex[A]: Index[NonEmptyVector[A], Int, A] =
new Index[NonEmptyVector[A], Int, A] {
def index(i: Int): Optional[NonEmptyVector[A], A] =
i match {
case 0 => nevCons1.head.asOptional
case _ => nevCons1.tail composeOptional vectorIndex.index(i - 1)
}
}
implicit def oneAndIndex[T[_], A](implicit ev: Index[T[A], Int, A]): Index[OneAnd[T, A], Int, A] =
Index {
case 0 => oneAndCons1[T, A].head.asOptional
case i => oneAndCons1[T, A].tail composeOptional ev.index(i - 1)
}
}
| aoiroaoino/Monocle | core/shared/src/main/scala/monocle/function/Index.scala | Scala | mit | 5,167 |
object Test {
def f[F](f: [t] => t => F): Unit = ()
f([t] => (x: t) => x)
}
| dotty-staging/dotty | tests/pos/i8900-polyfunction.scala | Scala | apache-2.0 | 81 |
/**
* Copyright 2011 Cape Henry Technologies Inc.
*
* Licensed under the Apache License, Version 2.0
* (the "License"); You may not use this file except
* in compliance with the License. You may obtain a
* copy of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific
* language governing permissions and limitations under the
* License.
*
*/
package com.citechnical.model.acct
import com.citechnical.model.financial._
import com.citechnical.constant._
trait AccountBalance {
var currentBalance = new Money (1, "0.00", FinancialConstants.NUMERIC_DEC, 2, "USD") // default $0.00 US Currency
} | dlwhitehurst/accounting-model | src/main/java/com/citechnical/model/acct/AccountBalance.scala | Scala | apache-2.0 | 879 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.convert2.transforms
import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class MiscFunctionFactoryTest extends Specification {
"MiscFunctionFactory" >> {
"intToBoolean" should {
"convert a 0 to false" >> {
MiscFunctionFactory.intToBoolean(Array(Int.box(0))) mustEqual false
}
"convert a 1 to true" >> {
MiscFunctionFactory.intToBoolean(Array(Int.box(1))) mustEqual true
}
"convert any int other than 0 to true" >> {
MiscFunctionFactory.intToBoolean(Array(Int.box(1000))) mustEqual true
MiscFunctionFactory.intToBoolean(Array(Int.box(-2))) mustEqual true
}
"return null for null input" >> {
MiscFunctionFactory.intToBoolean(Array(null)) mustEqual null
}
"throw an error if faced with a non-int value" >> {
MiscFunctionFactory.intToBoolean(Array(Double.box(0.55567))) must throwA[ClassCastException]
MiscFunctionFactory.intToBoolean(Array("0")) must throwA[ClassCastException]
}
}
"withDefault" should {
"return the first argument, if it's not null" >> {
MiscFunctionFactory.withDefault(Array("a", Int.box(1))) mustEqual "a"
}
"return the default if the first argument is null" >> {
MiscFunctionFactory.withDefault(Array(null, Int.box(1))) mustEqual 1
}
}
"require" should {
"return the passed-in argument, if it exists" >> {
MiscFunctionFactory.require(Array("a")) mustEqual "a"
}
"throw an exception, if the passed-in argument is null" >> {
MiscFunctionFactory.require(Array(null)) must throwAn[IllegalArgumentException]
}
}
}
}
| locationtech/geomesa | geomesa-convert/geomesa-convert-common/src/test/scala/org/locationtech/geomesa/convert2/transforms/MiscFunctionFactoryTest.scala | Scala | apache-2.0 | 2,244 |
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt.internal.bsp
import sbt.internal.bsp.codec.JsonProtocol.BspConnectionDetailsFormat
import sbt.io.IO
import sjsonnew.support.scalajson.unsafe.{ CompactPrinter, Converter }
import java.io.File
import java.nio.file.{ Files, Paths }
import scala.util.Properties
object BuildServerConnection {
final val name = "sbt"
final val bspVersion = "2.0.0-M5"
final val languages = Vector("scala")
private final val SbtLaunchJar = "sbt-launch(-.*)?\\\\.jar".r
private[sbt] def writeConnectionFile(sbtVersion: String, baseDir: File): Unit = {
val bspConnectionFile = new File(baseDir, ".bsp/sbt.json")
val javaHome = System.getProperty("java.home")
val classPath = System.getProperty("java.class.path")
val sbtScript = Option(System.getProperty("sbt.script"))
.orElse(sbtScriptInPath)
.map(script => s"-Dsbt.script=$script")
// IntelliJ can start sbt even if the sbt script is not accessible from $PATH.
// To do so it uses its own bundled sbt-launch.jar.
// In that case, we must pass the path of the sbt-launch.jar to the BSP connection
// so that the server can be started.
// A known problem in that situation is that the .sbtopts and .jvmopts are not loaded.
val sbtLaunchJar = classPath
.split(File.pathSeparator)
.find(jar => SbtLaunchJar.findFirstIn(jar).nonEmpty)
.map(_.replace(" ", "%20"))
.map(jar => s"--sbt-launch-jar=$jar")
val argv =
Vector(
s"$javaHome/bin/java",
"-Xms100m",
"-Xmx100m",
"-classpath",
classPath,
) ++
sbtScript ++
Vector("xsbt.boot.Boot", "-bsp") ++
(if (sbtScript.isEmpty) sbtLaunchJar else None)
val details = BspConnectionDetails(name, sbtVersion, bspVersion, languages, argv)
val json = Converter.toJson(details).get
IO.write(bspConnectionFile, CompactPrinter(json), append = false)
}
private def sbtScriptInPath: Option[String] = {
// For those who use an old sbt script, the -Dsbt.script is not set
// As a fallback we try to find the sbt script in $PATH
val fileName = if (Properties.isWin) "sbt.bat" else "sbt"
val envPath = sys.env.getOrElse("PATH", "")
val allPaths = envPath.split(File.pathSeparator).map(Paths.get(_))
allPaths
.map(_.resolve(fileName))
.find(file => Files.exists(file) && Files.isExecutable(file))
.map(_.toString.replace(" ", "%20"))
}
}
| sbt/sbt | protocol/src/main/scala/sbt/internal/bsp/BuildServerConnection.scala | Scala | apache-2.0 | 2,579 |
import sbt._
import Keys._
import PlayProject._
object ApplicationBuild extends Build {
val appName = "sample"
val appVersion = "1.0"
val appDependencies = Seq(
// Add your project dependencies here,
)
val main = PlayProject(appName, appVersion, appDependencies, mainLang = JAVA).settings(
giter8.ScaffoldPlugin.scaffoldSettings:_*
)
}
| beni55/giter8 | sample/project/Build.scala | Scala | lgpl-3.0 | 390 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.utils.tf.loaders
import java.nio.ByteOrder
import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.dllib.nn.SpatialAveragePooling
import com.intel.analytics.bigdl.dllib.nn.abstractnn.DataFormat
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.dllib.utils.tf.Context
import org.tensorflow.framework.NodeDef
import scala.reflect.ClassTag
class AvgPool extends TensorflowOpsLoader {
import Utils._
override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder,
context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
val attributes = nodeDef.getAttrMap
val format = getString(attributes, "data_format")
val strideList = getIntList(attributes, "strides")
val kernelList = getIntList(attributes, "ksize")
val (strideH, strideW, ksizeH, ksizeW) = format match {
case "NHWC" =>
require(strideList(3) == 1, s"not support strides on depth")
(strideList(1), strideList(2), kernelList(1), kernelList(2))
case "NCHW" =>
require(strideList(1) == 1, s"not support strides on depth")
(strideList(2), strideList(3), kernelList(2), kernelList(3))
case _ =>
throw new IllegalArgumentException(s"not supported data format: $format")
}
val (pW, pH) =
if (getString(attributes, "padding") == "SAME") {
(-1, -1)
} else {
(0, 0)
}
SpatialAveragePooling[T](ksizeW, ksizeH, strideW, strideH, pW, pH,
countIncludePad = false, format = DataFormat(format))
}
}
| intel-analytics/BigDL | scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/utils/tf/loaders/AvgPool.scala | Scala | apache-2.0 | 2,278 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.status
import java.util.Date
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.JavaConverters._
import scala.collection.immutable.{HashSet, TreeSet}
import scala.collection.mutable.HashMap
import com.google.common.collect.Interners
import org.apache.spark.JobExecutionStatus
import org.apache.spark.executor.{ExecutorMetrics, TaskMetrics}
import org.apache.spark.resource.ResourceInformation
import org.apache.spark.scheduler.{AccumulableInfo, StageInfo, TaskInfo}
import org.apache.spark.status.api.v1
import org.apache.spark.storage.{RDDInfo, StorageLevel}
import org.apache.spark.ui.SparkUI
import org.apache.spark.util.AccumulatorContext
import org.apache.spark.util.collection.OpenHashSet
/**
* A mutable representation of a live entity in Spark (jobs, stages, tasks, et al). Every live
* entity uses one of these instances to keep track of their evolving state, and periodically
* flush an immutable view of the entity to the app state store.
*/
private[spark] abstract class LiveEntity {
var lastWriteTime = -1L
def write(store: ElementTrackingStore, now: Long, checkTriggers: Boolean = false): Unit = {
// Always check triggers on the first write, since adding an element to the store may
// cause the maximum count for the element type to be exceeded.
store.write(doUpdate(), checkTriggers || lastWriteTime == -1L)
lastWriteTime = now
}
/**
* Returns an updated view of entity data, to be stored in the status store, reflecting the
* latest information collected by the listener.
*/
protected def doUpdate(): Any
}
private class LiveJob(
val jobId: Int,
name: String,
description: Option[String],
val submissionTime: Option[Date],
val stageIds: Seq[Int],
jobGroup: Option[String],
numTasks: Int,
sqlExecutionId: Option[Long]) extends LiveEntity {
var activeTasks = 0
var completedTasks = 0
var failedTasks = 0
// Holds both the stage ID and the task index, packed into a single long value.
val completedIndices = new OpenHashSet[Long]()
var killedTasks = 0
var killedSummary: Map[String, Int] = Map()
var skippedTasks = 0
var skippedStages = Set[Int]()
var status = JobExecutionStatus.RUNNING
var completionTime: Option[Date] = None
var completedStages: Set[Int] = Set()
var activeStages = 0
var failedStages = 0
override protected def doUpdate(): Any = {
val info = new v1.JobData(
jobId,
name,
description,
submissionTime,
completionTime,
stageIds,
jobGroup,
status,
numTasks,
activeTasks,
completedTasks,
skippedTasks,
failedTasks,
killedTasks,
completedIndices.size,
activeStages,
completedStages.size,
skippedStages.size,
failedStages,
killedSummary)
new JobDataWrapper(info, skippedStages, sqlExecutionId)
}
}
private class LiveTask(
var info: TaskInfo,
stageId: Int,
stageAttemptId: Int,
lastUpdateTime: Option[Long]) extends LiveEntity {
import LiveEntityHelpers._
// The task metrics use a special value when no metrics have been reported. The special value is
// checked when calculating indexed values when writing to the store (see [[TaskDataWrapper]]).
private var metrics: v1.TaskMetrics = createMetrics(default = -1L)
var errorMessage: Option[String] = None
/**
* Update the metrics for the task and return the difference between the previous and new
* values.
*/
def updateMetrics(metrics: TaskMetrics): v1.TaskMetrics = {
if (metrics != null) {
val old = this.metrics
val newMetrics = createMetrics(
metrics.executorDeserializeTime,
metrics.executorDeserializeCpuTime,
metrics.executorRunTime,
metrics.executorCpuTime,
metrics.resultSize,
metrics.jvmGCTime,
metrics.resultSerializationTime,
metrics.memoryBytesSpilled,
metrics.diskBytesSpilled,
metrics.peakExecutionMemory,
metrics.inputMetrics.bytesRead,
metrics.inputMetrics.recordsRead,
metrics.outputMetrics.bytesWritten,
metrics.outputMetrics.recordsWritten,
metrics.shuffleReadMetrics.remoteBlocksFetched,
metrics.shuffleReadMetrics.localBlocksFetched,
metrics.shuffleReadMetrics.fetchWaitTime,
metrics.shuffleReadMetrics.remoteBytesRead,
metrics.shuffleReadMetrics.remoteBytesReadToDisk,
metrics.shuffleReadMetrics.localBytesRead,
metrics.shuffleReadMetrics.recordsRead,
metrics.shuffleWriteMetrics.bytesWritten,
metrics.shuffleWriteMetrics.writeTime,
metrics.shuffleWriteMetrics.recordsWritten)
this.metrics = newMetrics
// Only calculate the delta if the old metrics contain valid information, otherwise
// the new metrics are the delta.
if (old.executorDeserializeTime >= 0L) {
subtractMetrics(newMetrics, old)
} else {
newMetrics
}
} else {
null
}
}
override protected def doUpdate(): Any = {
val duration = if (info.finished) {
info.duration
} else {
info.timeRunning(lastUpdateTime.getOrElse(System.currentTimeMillis()))
}
val hasMetrics = metrics.executorDeserializeTime >= 0
/**
* SPARK-26260: For non successful tasks, store the metrics as negative to avoid
* the calculation in the task summary. `toApi` method in the `TaskDataWrapper` will make
* it actual value.
*/
val taskMetrics: v1.TaskMetrics = if (hasMetrics && !info.successful) {
makeNegative(metrics)
} else {
metrics
}
new TaskDataWrapper(
info.taskId,
info.index,
info.attemptNumber,
info.launchTime,
if (info.gettingResult) info.gettingResultTime else -1L,
duration,
weakIntern(info.executorId),
weakIntern(info.host),
weakIntern(info.status),
weakIntern(info.taskLocality.toString()),
info.speculative,
newAccumulatorInfos(info.accumulables),
errorMessage,
hasMetrics,
taskMetrics.executorDeserializeTime,
taskMetrics.executorDeserializeCpuTime,
taskMetrics.executorRunTime,
taskMetrics.executorCpuTime,
taskMetrics.resultSize,
taskMetrics.jvmGcTime,
taskMetrics.resultSerializationTime,
taskMetrics.memoryBytesSpilled,
taskMetrics.diskBytesSpilled,
taskMetrics.peakExecutionMemory,
taskMetrics.inputMetrics.bytesRead,
taskMetrics.inputMetrics.recordsRead,
taskMetrics.outputMetrics.bytesWritten,
taskMetrics.outputMetrics.recordsWritten,
taskMetrics.shuffleReadMetrics.remoteBlocksFetched,
taskMetrics.shuffleReadMetrics.localBlocksFetched,
taskMetrics.shuffleReadMetrics.fetchWaitTime,
taskMetrics.shuffleReadMetrics.remoteBytesRead,
taskMetrics.shuffleReadMetrics.remoteBytesReadToDisk,
taskMetrics.shuffleReadMetrics.localBytesRead,
taskMetrics.shuffleReadMetrics.recordsRead,
taskMetrics.shuffleWriteMetrics.bytesWritten,
taskMetrics.shuffleWriteMetrics.writeTime,
taskMetrics.shuffleWriteMetrics.recordsWritten,
stageId,
stageAttemptId)
}
}
private[spark] class LiveExecutor(val executorId: String, _addTime: Long) extends LiveEntity {
var hostPort: String = null
var host: String = null
var isActive = true
var totalCores = 0
val addTime = new Date(_addTime)
var removeTime: Date = null
var removeReason: String = null
var rddBlocks = 0
var memoryUsed = 0L
var diskUsed = 0L
var maxTasks = 0
var maxMemory = 0L
var totalTasks = 0
var activeTasks = 0
var completedTasks = 0
var failedTasks = 0
var totalDuration = 0L
var totalGcTime = 0L
var totalInputBytes = 0L
var totalShuffleRead = 0L
var totalShuffleWrite = 0L
var isBlacklisted = false
var blacklistedInStages: Set[Int] = TreeSet()
var executorLogs = Map[String, String]()
var attributes = Map[String, String]()
var resources = Map[String, ResourceInformation]()
// Memory metrics. They may not be recorded (e.g. old event logs) so if totalOnHeap is not
// initialized, the store will not contain this information.
var totalOnHeap = -1L
var totalOffHeap = 0L
var usedOnHeap = 0L
var usedOffHeap = 0L
def hasMemoryInfo: Boolean = totalOnHeap >= 0L
// peak values for executor level metrics
val peakExecutorMetrics = new ExecutorMetrics()
def hostname: String = if (host != null) host else hostPort.split(":")(0)
override protected def doUpdate(): Any = {
val memoryMetrics = if (totalOnHeap >= 0) {
Some(new v1.MemoryMetrics(usedOnHeap, usedOffHeap, totalOnHeap, totalOffHeap))
} else {
None
}
val info = new v1.ExecutorSummary(
executorId,
if (hostPort != null) hostPort else host,
isActive,
rddBlocks,
memoryUsed,
diskUsed,
totalCores,
maxTasks,
activeTasks,
failedTasks,
completedTasks,
totalTasks,
totalDuration,
totalGcTime,
totalInputBytes,
totalShuffleRead,
totalShuffleWrite,
isBlacklisted,
maxMemory,
addTime,
Option(removeTime),
Option(removeReason),
executorLogs,
memoryMetrics,
blacklistedInStages,
Some(peakExecutorMetrics).filter(_.isSet),
attributes,
resources)
new ExecutorSummaryWrapper(info)
}
}
private class LiveExecutorStageSummary(
stageId: Int,
attemptId: Int,
executorId: String) extends LiveEntity {
import LiveEntityHelpers._
var taskTime = 0L
var succeededTasks = 0
var failedTasks = 0
var killedTasks = 0
var isBlacklisted = false
var metrics = createMetrics(default = 0L)
override protected def doUpdate(): Any = {
val info = new v1.ExecutorStageSummary(
taskTime,
failedTasks,
succeededTasks,
killedTasks,
metrics.inputMetrics.bytesRead,
metrics.inputMetrics.recordsRead,
metrics.outputMetrics.bytesWritten,
metrics.outputMetrics.recordsWritten,
metrics.shuffleReadMetrics.remoteBytesRead + metrics.shuffleReadMetrics.localBytesRead,
metrics.shuffleReadMetrics.recordsRead,
metrics.shuffleWriteMetrics.bytesWritten,
metrics.shuffleWriteMetrics.recordsWritten,
metrics.memoryBytesSpilled,
metrics.diskBytesSpilled,
isBlacklisted)
new ExecutorStageSummaryWrapper(stageId, attemptId, executorId, info)
}
}
private class LiveStage extends LiveEntity {
import LiveEntityHelpers._
var jobs = Seq[LiveJob]()
var jobIds = Set[Int]()
var info: StageInfo = null
var status = v1.StageStatus.PENDING
var description: Option[String] = None
var schedulingPool: String = SparkUI.DEFAULT_POOL_NAME
var activeTasks = 0
var completedTasks = 0
var failedTasks = 0
val completedIndices = new OpenHashSet[Int]()
var killedTasks = 0
var killedSummary: Map[String, Int] = Map()
var firstLaunchTime = Long.MaxValue
var localitySummary: Map[String, Long] = Map()
var metrics = createMetrics(default = 0L)
val executorSummaries = new HashMap[String, LiveExecutorStageSummary]()
val activeTasksPerExecutor = new HashMap[String, Int]().withDefaultValue(0)
var blackListedExecutors = new HashSet[String]()
// Used for cleanup of tasks after they reach the configured limit. Not written to the store.
@volatile var cleaning = false
var savedTasks = new AtomicInteger(0)
def executorSummary(executorId: String): LiveExecutorStageSummary = {
executorSummaries.getOrElseUpdate(executorId,
new LiveExecutorStageSummary(info.stageId, info.attemptNumber, executorId))
}
def toApi(): v1.StageData = {
new v1.StageData(
status = status,
stageId = info.stageId,
attemptId = info.attemptNumber,
numTasks = info.numTasks,
numActiveTasks = activeTasks,
numCompleteTasks = completedTasks,
numFailedTasks = failedTasks,
numKilledTasks = killedTasks,
numCompletedIndices = completedIndices.size,
submissionTime = info.submissionTime.map(new Date(_)),
firstTaskLaunchedTime =
if (firstLaunchTime < Long.MaxValue) Some(new Date(firstLaunchTime)) else None,
completionTime = info.completionTime.map(new Date(_)),
failureReason = info.failureReason,
executorDeserializeTime = metrics.executorDeserializeTime,
executorDeserializeCpuTime = metrics.executorDeserializeCpuTime,
executorRunTime = metrics.executorRunTime,
executorCpuTime = metrics.executorCpuTime,
resultSize = metrics.resultSize,
jvmGcTime = metrics.jvmGcTime,
resultSerializationTime = metrics.resultSerializationTime,
memoryBytesSpilled = metrics.memoryBytesSpilled,
diskBytesSpilled = metrics.diskBytesSpilled,
peakExecutionMemory = metrics.peakExecutionMemory,
inputBytes = metrics.inputMetrics.bytesRead,
inputRecords = metrics.inputMetrics.recordsRead,
outputBytes = metrics.outputMetrics.bytesWritten,
outputRecords = metrics.outputMetrics.recordsWritten,
shuffleRemoteBlocksFetched = metrics.shuffleReadMetrics.remoteBlocksFetched,
shuffleLocalBlocksFetched = metrics.shuffleReadMetrics.localBlocksFetched,
shuffleFetchWaitTime = metrics.shuffleReadMetrics.fetchWaitTime,
shuffleRemoteBytesRead = metrics.shuffleReadMetrics.remoteBytesRead,
shuffleRemoteBytesReadToDisk = metrics.shuffleReadMetrics.remoteBytesReadToDisk,
shuffleLocalBytesRead = metrics.shuffleReadMetrics.localBytesRead,
shuffleReadBytes =
metrics.shuffleReadMetrics.localBytesRead + metrics.shuffleReadMetrics.remoteBytesRead,
shuffleReadRecords = metrics.shuffleReadMetrics.recordsRead,
shuffleWriteBytes = metrics.shuffleWriteMetrics.bytesWritten,
shuffleWriteTime = metrics.shuffleWriteMetrics.writeTime,
shuffleWriteRecords = metrics.shuffleWriteMetrics.recordsWritten,
name = info.name,
description = description,
details = info.details,
schedulingPool = schedulingPool,
rddIds = info.rddInfos.map(_.id),
accumulatorUpdates = newAccumulatorInfos(info.accumulables.values),
tasks = None,
executorSummary = None,
killedTasksSummary = killedSummary)
}
override protected def doUpdate(): Any = {
new StageDataWrapper(toApi(), jobIds, localitySummary)
}
}
/**
* Data about a single partition of a cached RDD. The RDD storage level is used to compute the
* effective storage level of the partition, which takes into account the storage actually being
* used by the partition in the executors, and thus may differ from the storage level requested
* by the application.
*/
private class LiveRDDPartition(val blockName: String, rddLevel: StorageLevel) {
import LiveEntityHelpers._
// Pointers used by RDDPartitionSeq.
@volatile var prev: LiveRDDPartition = null
@volatile var next: LiveRDDPartition = null
var value: v1.RDDPartitionInfo = null
def executors: Seq[String] = value.executors
def memoryUsed: Long = value.memoryUsed
def diskUsed: Long = value.diskUsed
def update(
executors: Seq[String],
memoryUsed: Long,
diskUsed: Long): Unit = {
val level = StorageLevel(diskUsed > 0, memoryUsed > 0, rddLevel.useOffHeap,
if (memoryUsed > 0) rddLevel.deserialized else false, executors.size)
value = new v1.RDDPartitionInfo(
blockName,
weakIntern(level.description),
memoryUsed,
diskUsed,
executors)
}
}
private class LiveRDDDistribution(exec: LiveExecutor) {
import LiveEntityHelpers._
val executorId = exec.executorId
var memoryUsed = 0L
var diskUsed = 0L
var onHeapUsed = 0L
var offHeapUsed = 0L
// Keep the last update handy. This avoids recomputing the API view when not needed.
var lastUpdate: v1.RDDDataDistribution = null
def toApi(): v1.RDDDataDistribution = {
if (lastUpdate == null) {
lastUpdate = new v1.RDDDataDistribution(
weakIntern(exec.hostPort),
memoryUsed,
exec.maxMemory - exec.memoryUsed,
diskUsed,
if (exec.hasMemoryInfo) Some(onHeapUsed) else None,
if (exec.hasMemoryInfo) Some(offHeapUsed) else None,
if (exec.hasMemoryInfo) Some(exec.totalOnHeap - exec.usedOnHeap) else None,
if (exec.hasMemoryInfo) Some(exec.totalOffHeap - exec.usedOffHeap) else None)
}
lastUpdate
}
}
/**
* Tracker for data related to a persisted RDD.
*
* The RDD storage level is immutable, following the current behavior of `RDD.persist()`, even
* though it is mutable in the `RDDInfo` structure. Since the listener does not track unpersisted
* RDDs, this covers the case where an early stage is run on the unpersisted RDD, and a later stage
* it started after the RDD is marked for caching.
*/
private class LiveRDD(val info: RDDInfo, storageLevel: StorageLevel) extends LiveEntity {
import LiveEntityHelpers._
var memoryUsed = 0L
var diskUsed = 0L
private val levelDescription = weakIntern(storageLevel.description)
private val partitions = new HashMap[String, LiveRDDPartition]()
private val partitionSeq = new RDDPartitionSeq()
private val distributions = new HashMap[String, LiveRDDDistribution]()
def partition(blockName: String): LiveRDDPartition = {
partitions.getOrElseUpdate(blockName, {
val part = new LiveRDDPartition(blockName, storageLevel)
part.update(Nil, 0L, 0L)
partitionSeq.addPartition(part)
part
})
}
def removePartition(blockName: String): Unit = {
partitions.remove(blockName).foreach(partitionSeq.removePartition)
}
def distribution(exec: LiveExecutor): LiveRDDDistribution = {
distributions.getOrElseUpdate(exec.executorId, new LiveRDDDistribution(exec))
}
def removeDistribution(exec: LiveExecutor): Boolean = {
distributions.remove(exec.executorId).isDefined
}
def distributionOpt(exec: LiveExecutor): Option[LiveRDDDistribution] = {
distributions.get(exec.executorId)
}
def getPartitions(): scala.collection.Map[String, LiveRDDPartition] = partitions
def getDistributions(): scala.collection.Map[String, LiveRDDDistribution] = distributions
override protected def doUpdate(): Any = {
val dists = if (distributions.nonEmpty) {
Some(distributions.values.map(_.toApi()).toSeq)
} else {
None
}
val rdd = new v1.RDDStorageInfo(
info.id,
info.name,
info.numPartitions,
partitions.size,
levelDescription,
memoryUsed,
diskUsed,
dists,
Some(partitionSeq))
new RDDStorageInfoWrapper(rdd)
}
}
private class SchedulerPool(name: String) extends LiveEntity {
var stageIds = Set[Int]()
override protected def doUpdate(): Any = {
new PoolData(name, stageIds)
}
}
private[spark] object LiveEntityHelpers {
private val stringInterner = Interners.newWeakInterner[String]()
private def accuValuetoString(value: Any): String = value match {
case list: java.util.List[_] =>
// SPARK-30379: For collection accumulator, string representation might
// takes much more memory (e.g. long => string of it) and cause OOM.
// So we only show first few elements.
if (list.size() > 5) {
list.asScala.take(5).mkString("[", ",", "," + "... " + (list.size() - 5) + " more items]")
} else {
list.toString
}
case _ => value.toString
}
def newAccumulatorInfos(accums: Iterable[AccumulableInfo]): Seq[v1.AccumulableInfo] = {
accums
.filter { acc =>
// We don't need to store internal or SQL accumulables as their values will be shown in
// other places, so drop them to reduce the memory usage.
!acc.internal && acc.metadata != Some(AccumulatorContext.SQL_ACCUM_IDENTIFIER)
}
.map { acc =>
new v1.AccumulableInfo(
acc.id,
acc.name.map(weakIntern).orNull,
acc.update.map(accuValuetoString),
acc.value.map(accuValuetoString).orNull)
}
.toSeq
}
/** String interning to reduce the memory usage. */
def weakIntern(s: String): String = {
stringInterner.intern(s)
}
// scalastyle:off argcount
def createMetrics(
executorDeserializeTime: Long,
executorDeserializeCpuTime: Long,
executorRunTime: Long,
executorCpuTime: Long,
resultSize: Long,
jvmGcTime: Long,
resultSerializationTime: Long,
memoryBytesSpilled: Long,
diskBytesSpilled: Long,
peakExecutionMemory: Long,
inputBytesRead: Long,
inputRecordsRead: Long,
outputBytesWritten: Long,
outputRecordsWritten: Long,
shuffleRemoteBlocksFetched: Long,
shuffleLocalBlocksFetched: Long,
shuffleFetchWaitTime: Long,
shuffleRemoteBytesRead: Long,
shuffleRemoteBytesReadToDisk: Long,
shuffleLocalBytesRead: Long,
shuffleRecordsRead: Long,
shuffleBytesWritten: Long,
shuffleWriteTime: Long,
shuffleRecordsWritten: Long): v1.TaskMetrics = {
new v1.TaskMetrics(
executorDeserializeTime,
executorDeserializeCpuTime,
executorRunTime,
executorCpuTime,
resultSize,
jvmGcTime,
resultSerializationTime,
memoryBytesSpilled,
diskBytesSpilled,
peakExecutionMemory,
new v1.InputMetrics(
inputBytesRead,
inputRecordsRead),
new v1.OutputMetrics(
outputBytesWritten,
outputRecordsWritten),
new v1.ShuffleReadMetrics(
shuffleRemoteBlocksFetched,
shuffleLocalBlocksFetched,
shuffleFetchWaitTime,
shuffleRemoteBytesRead,
shuffleRemoteBytesReadToDisk,
shuffleLocalBytesRead,
shuffleRecordsRead),
new v1.ShuffleWriteMetrics(
shuffleBytesWritten,
shuffleWriteTime,
shuffleRecordsWritten))
}
// scalastyle:on argcount
def createMetrics(default: Long): v1.TaskMetrics = {
createMetrics(default, default, default, default, default, default, default, default,
default, default, default, default, default, default, default, default,
default, default, default, default, default, default, default, default)
}
/** Add m2 values to m1. */
def addMetrics(m1: v1.TaskMetrics, m2: v1.TaskMetrics): v1.TaskMetrics = addMetrics(m1, m2, 1)
/** Subtract m2 values from m1. */
def subtractMetrics(m1: v1.TaskMetrics, m2: v1.TaskMetrics): v1.TaskMetrics = {
addMetrics(m1, m2, -1)
}
/**
* Convert all the metric values to negative as well as handle zero values.
* This method assumes that all the metric values are greater than or equal to zero
*/
def makeNegative(m: v1.TaskMetrics): v1.TaskMetrics = {
// To handle 0 metric value, add 1 and make the metric negative.
// To recover actual value do `math.abs(metric + 1)`
// Eg: if the metric values are (5, 3, 0, 1) => Updated metric values will be (-6, -4, -1, -2)
// To get actual metric value, do math.abs(metric + 1) => (5, 3, 0, 1)
def updateMetricValue(metric: Long): Long = {
metric * -1L - 1L
}
createMetrics(
updateMetricValue(m.executorDeserializeTime),
updateMetricValue(m.executorDeserializeCpuTime),
updateMetricValue(m.executorRunTime),
updateMetricValue(m.executorCpuTime),
updateMetricValue(m.resultSize),
updateMetricValue(m.jvmGcTime),
updateMetricValue(m.resultSerializationTime),
updateMetricValue(m.memoryBytesSpilled),
updateMetricValue(m.diskBytesSpilled),
updateMetricValue(m.peakExecutionMemory),
updateMetricValue(m.inputMetrics.bytesRead),
updateMetricValue(m.inputMetrics.recordsRead),
updateMetricValue(m.outputMetrics.bytesWritten),
updateMetricValue(m.outputMetrics.recordsWritten),
updateMetricValue(m.shuffleReadMetrics.remoteBlocksFetched),
updateMetricValue(m.shuffleReadMetrics.localBlocksFetched),
updateMetricValue(m.shuffleReadMetrics.fetchWaitTime),
updateMetricValue(m.shuffleReadMetrics.remoteBytesRead),
updateMetricValue(m.shuffleReadMetrics.remoteBytesReadToDisk),
updateMetricValue(m.shuffleReadMetrics.localBytesRead),
updateMetricValue(m.shuffleReadMetrics.recordsRead),
updateMetricValue(m.shuffleWriteMetrics.bytesWritten),
updateMetricValue(m.shuffleWriteMetrics.writeTime),
updateMetricValue(m.shuffleWriteMetrics.recordsWritten))
}
private def addMetrics(m1: v1.TaskMetrics, m2: v1.TaskMetrics, mult: Int): v1.TaskMetrics = {
createMetrics(
m1.executorDeserializeTime + m2.executorDeserializeTime * mult,
m1.executorDeserializeCpuTime + m2.executorDeserializeCpuTime * mult,
m1.executorRunTime + m2.executorRunTime * mult,
m1.executorCpuTime + m2.executorCpuTime * mult,
m1.resultSize + m2.resultSize * mult,
m1.jvmGcTime + m2.jvmGcTime * mult,
m1.resultSerializationTime + m2.resultSerializationTime * mult,
m1.memoryBytesSpilled + m2.memoryBytesSpilled * mult,
m1.diskBytesSpilled + m2.diskBytesSpilled * mult,
m1.peakExecutionMemory + m2.peakExecutionMemory * mult,
m1.inputMetrics.bytesRead + m2.inputMetrics.bytesRead * mult,
m1.inputMetrics.recordsRead + m2.inputMetrics.recordsRead * mult,
m1.outputMetrics.bytesWritten + m2.outputMetrics.bytesWritten * mult,
m1.outputMetrics.recordsWritten + m2.outputMetrics.recordsWritten * mult,
m1.shuffleReadMetrics.remoteBlocksFetched + m2.shuffleReadMetrics.remoteBlocksFetched * mult,
m1.shuffleReadMetrics.localBlocksFetched + m2.shuffleReadMetrics.localBlocksFetched * mult,
m1.shuffleReadMetrics.fetchWaitTime + m2.shuffleReadMetrics.fetchWaitTime * mult,
m1.shuffleReadMetrics.remoteBytesRead + m2.shuffleReadMetrics.remoteBytesRead * mult,
m1.shuffleReadMetrics.remoteBytesReadToDisk +
m2.shuffleReadMetrics.remoteBytesReadToDisk * mult,
m1.shuffleReadMetrics.localBytesRead + m2.shuffleReadMetrics.localBytesRead * mult,
m1.shuffleReadMetrics.recordsRead + m2.shuffleReadMetrics.recordsRead * mult,
m1.shuffleWriteMetrics.bytesWritten + m2.shuffleWriteMetrics.bytesWritten * mult,
m1.shuffleWriteMetrics.writeTime + m2.shuffleWriteMetrics.writeTime * mult,
m1.shuffleWriteMetrics.recordsWritten + m2.shuffleWriteMetrics.recordsWritten * mult)
}
}
/**
* A custom sequence of partitions based on a mutable linked list.
*
* The external interface is an immutable Seq, which is thread-safe for traversal. There are no
* guarantees about consistency though - iteration might return elements that have been removed
* or miss added elements.
*
* Internally, the sequence is mutable, and elements can modify the data they expose. Additions and
* removals are O(1). It is not safe to do multiple writes concurrently.
*/
private class RDDPartitionSeq extends Seq[v1.RDDPartitionInfo] {
@volatile private var _head: LiveRDDPartition = null
@volatile private var _tail: LiveRDDPartition = null
@volatile var count = 0
override def apply(idx: Int): v1.RDDPartitionInfo = {
var curr = 0
var e = _head
while (curr < idx && e != null) {
curr += 1
e = e.next
}
if (e != null) e.value else throw new IndexOutOfBoundsException(idx.toString)
}
override def iterator: Iterator[v1.RDDPartitionInfo] = {
new Iterator[v1.RDDPartitionInfo] {
var current = _head
override def hasNext: Boolean = current != null
override def next(): v1.RDDPartitionInfo = {
if (current != null) {
val tmp = current
current = tmp.next
tmp.value
} else {
throw new NoSuchElementException()
}
}
}
}
override def length: Int = count
def addPartition(part: LiveRDDPartition): Unit = {
part.prev = _tail
if (_tail != null) {
_tail.next = part
}
if (_head == null) {
_head = part
}
_tail = part
count += 1
}
def removePartition(part: LiveRDDPartition): Unit = {
count -= 1
// Remove the partition from the list, but leave the pointers unchanged. That ensures a best
// effort at returning existing elements when iterations still reference the removed partition.
if (part.prev != null) {
part.prev.next = part.next
}
if (part eq _head) {
_head = part.next
}
if (part.next != null) {
part.next.prev = part.prev
}
if (part eq _tail) {
_tail = part.prev
}
}
}
| zuotingbing/spark | core/src/main/scala/org/apache/spark/status/LiveEntity.scala | Scala | apache-2.0 | 29,386 |
package models
import com.mongodb.casbah.Imports._
import core.utils.TypeUtils.nullToNone
/**
* Message container.
*/
case class Grunt(
id: Option[ObjectId],
userId: ObjectId,
originalMessage: Option[ObjectId],
regrunts: List[ObjectId],
favorites: List[ObjectId],
message: String,
timestamp: Long
) extends BaseTOModel {
/**
* Mapper to serialize object to MongoDBObject
* @return The serialized object.
*/
override def toMongoDBObject = MongoDBObject(
"_id"->id.getOrElse(new ObjectId),
"userId"->userId,
"originalMessage"->originalMessage,
"regrunts"->regrunts,
"favorites"->favorites,
"message"->message,
"timestamp"->timestamp
)
}
/**
* Helper object for User Model.
* @author maximx1
*/
object Grunt extends BaseModel[Grunt] {
/**
* Row mapper to map the mongodb object to the case class.
* @param mongoObject The DBObject to deserialize.
* @return The deserialized object.
*/
override def fromMongoObject(mongoObject: DBObject): Grunt = Grunt(
Some(mongoObject.as[ObjectId]("_id")),
mongoObject.as[ObjectId]("userId"),
nullToNone(Some(mongoObject.as[ObjectId]("originalMessage"))),
mongoObject.as[List[ObjectId]]("regrunts"),
mongoObject.as[List[ObjectId]]("favorites"),
mongoObject.as[String]("message"),
mongoObject.as[Long]("timestamp")
)
} | maximx1/openwalrus | app/models/Grunt.scala | Scala | mit | 1,375 |
package incognito.utils
import scala.util.Random
import java.io.File
import java.io.PrintWriter
import org.apache.spark.rdd.RDD
import scala.collection.Map
import incognito.rdd.Data
import breeze.linalg.Vector
import org.apache.spark.broadcast.Broadcast
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.SparkContext
import incognito.archive.KMedoids
import java.math.BigDecimal
import java.math.RoundingMode
import com.google.common.hash.Hashing
class Utils extends Serializable {
val chars = ('a' to 'z') ++ ('A' to 'Z') ++ ('1' to '9')
val key = new ArrayBuffer[Int]
private val n = new java.util.concurrent.atomic.AtomicLong
def next = n.getAndIncrement()
def uniqueHashKey(length: Int = 22): Int = {
val newKey = (1 to length).map(x => chars(Random.nextInt(chars.length))).mkString.hashCode()
if (!key.contains(newKey)) { key += newKey; newKey } else uniqueHashKey()
}
/**
* A method to generate 64-bit identifier for a given variable using Google's Guava library
* @param str string to hash
*/
def hashId(obj: String): Long = {
Hashing.md5().hashString(obj).asLong
}
def shortCode(): String = {
val id = 15
val size = (math.log10(id) + 4).toInt
val timestamp: Long = System.currentTimeMillis
Random.alphanumeric.take(Random.nextInt(size) + 1).mkString + timestamp.toString
}
def deleteLocalFile(path: String) = {
val fileTemp = new File(path)
if (fileTemp.exists) {
fileTemp.delete()
}
}
def writeToLocalFile(data: Array[String], path: String) = {
val pw = new PrintWriter(new File(path))
data.foreach(d => pw.write(d + "\\n"))
pw.close
}
def getCategoricalQIMedian(ec: Array[Array[String]]): Array[String] = {
val nRows = ec.length
val nCols = ec.head.length
val ecT: Array[Array[String]] = new Array[Array[String]](nCols)
for (c <- Range(0, nCols)) {
val vals = new Array[String](nRows)
for (r <- Range(0, nRows)) {
vals(r) = ec(r)(c)
}
ecT(c) = vals
}
val medianQIs = ecT.map({ cols =>
val sCols = cols.sortBy { x => x }
sCols((nRows / 2).toInt)
})
medianQIs
}
def gcd(a: Int, b: Int): Int = {
if (b == 0) a
else
gcd(b, a % b)
}
def gcd(input: Array[Int]): Int = {
var result = input(0);
for (i <- 1 until input.length)
result = gcd(result, input(i));
result
}
def round(value: Double, places: Int = 2): Double = {
if (places < 0) throw new IllegalArgumentException();
var bd = new BigDecimal(value);
bd = bd.setScale(places, RoundingMode.HALF_UP);
bd.doubleValue();
}
} | achak1987/SparkAnonymizationToolkit | src/main/scala/incognito/utils/Utils.scala | Scala | apache-2.0 | 2,653 |
import stainless.annotation._
import stainless.lang._
import stainless.io._
object Nested {
@cCode.`export`
def main(): Int = {
f(100)
}
def f(x: Int): Int = {
require(0 <= x && x <= 100)
def gg(y: Int): Int = {
require(0 <= y && y <= 100)
x + y
}
val res = gg(15)
StdOut.println(res)(newState)
res
}
}
| epfl-lara/stainless | frontends/benchmarks/genc/valid/Nested.scala | Scala | apache-2.0 | 359 |
/*
* Licensed to SequoiaDB (C) under one or more contributor license agreements.
* See the NOTICE file distributed with this work for additional information
* regarding copyright ownership. The SequoiaDB (C) licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.sequoiadb.spark
/**
* Source File Name = SequoiadbConfig.scala
* Description = SequoiaDB Configuration
* When/how to use = Used when initializing SequoiadbRDD
* Restrictions = N/A
* Change Activity:
* Date Who Description
* ======== ================== ================================================
* 20150305 Tao Wang Initial Draft
*/
import _root_.com.sequoiadb.spark.SequoiadbConfig.Property
import scala.reflect.ClassTag
import com.sequoiadb.base.DBQuery
import org.bson.util.JSON
import org.bson.BSONObject
import org.bson.BasicBSONObject
case class SequoiadbConfigBuilder(
val properties: Map[Property,Any] = Map()) extends Serializable { build =>
val requiredProperties: List[Property] = SequoiadbConfig.all
/**
* Instantiate a brand new Builder from given properties map
*
* @param props Map of any-type properties.
* @return The new builder
*/
def apply(props: Map[Property, Any]) =
SequoiadbConfigBuilder(props)
/**
* Set (override if exists) a single property value given a new one.
*
* @param property Property to be set
* @param value New value for given property
* @tparam T Property type
* @return A new builder that includes new value of the specified property
*/
def set[T](property: Property,value: T): SequoiadbConfigBuilder =
apply(properties + (property -> value))
/**
* Build the config object from current builder properties.
*
* @return The SequoiaDB configuration object.
*/
def build(): SequoiadbConfig = new SequoiadbConfig(properties) {
require(
requiredProperties.forall(properties.isDefinedAt),
s"Not all properties are defined! : ${
requiredProperties.diff(
properties.keys.toList.intersect(requiredProperties))
}")
}
}
class SequoiadbConfig (
val properties: Map[Property,Any] = Map()) extends Serializable {
/**
* Gets specified property from current configuration object
* @param property Desired property
* @tparam T Property expected value type.
* @return An optional value of expected type
*/
def get[T:ClassTag](property: Property): Option[T] = {
val t = properties.get(property).map(_.asInstanceOf[T])
if (t == None || t == Option ("")) {
return SequoiadbConfig.Defaults.get (property).map(_.asInstanceOf[T])
}
if (property.equals(SequoiadbConfig.Preference)) {
val _preferenceValue = t.get.asInstanceOf[String]
// preferenceValue should equal "m"/"M"/"s"/"S"/"a"/"A"/"1-7"
val preferenceValue = _preferenceValue match {
case "m" => "m"
case "M" => "M"
case "s" => "s"
case "S" => "S"
case "a" => "a"
case "A" => "A"
case "1" => "1"
case "2" => "2"
case "3" => "3"
case "4" => "4"
case "5" => "5"
case "6" => "6"
case "7" => "7"
case "r" => "r"
case "R" => "r"
case _ => SequoiadbConfig.DefaultPreference
}
val preference_tmp = new BasicBSONObject ();
preference_tmp.put ("PreferedInstance", preferenceValue);
return Option (
preference_tmp.toString.asInstanceOf[T]
)
}
if (property.equals(SequoiadbConfig.BulkSize)) {
val _bulkSizeValue = t.get.asInstanceOf[String].toInt
if (_bulkSizeValue <= 0) {
return Option (
(SequoiadbConfig.DefaultBulkSize).asInstanceOf[T]
)
}
}
t
}
/**
* Gets specified property from current configuration object.
* It will fail if property is not previously set.
* @param property Desired property
* @tparam T Property expected value type
* @return Expected type value
*/
def apply[T:ClassTag](property: Property): T =
get[T](property).get
}
object SequoiadbConfig {
type Property = String
def notFound[T](key: String): T =
throw new IllegalStateException(s"Parameter $key not specified")
// Parameter names
val Host = "host"
val CollectionSpace = "collectionspace"
val Collection = "collection"
val SamplingRatio = "samplingRatio"
val Preference = "preference" // "m"/"M"/"s"/"S"/"a"/"A"/"1-7"/"r"/"R"
val Username = "username"
val Password = "password"
val ScanType = "scantype" // auto/ixscan/tbscan
val BulkSize = "bulksize" // default 512
val scanTypeExplain = 0
val scanTypeGetQueryMeta = 1
val QUERYRETURNBSON = 0
val QUERYRETURNCSV = DBQuery.FLG_QUERY_STRINGOUT
val all = List(
Host,
CollectionSpace,
Collection,
SamplingRatio,
Preference,
Username,
Password,
ScanType,
BulkSize
)
// Default values
val DefaultSamplingRatio = 1.0
val DefaultPreference = "r"
val DefaultPort = "11810"
val DefaultHost = "localhost"
val DefaultUsername = ""
val DefaultPassword = ""
val DefaultScanType = "auto"
val DefaultBulkSize = "512"
val preference_defaultObj = new BasicBSONObject ();
preference_defaultObj.put ("PreferedInstance", DefaultPreference);
val Defaults = Map(
SamplingRatio -> DefaultSamplingRatio,
Preference -> (preference_defaultObj.toString),
ScanType -> DefaultScanType,
Host -> List(DefaultHost + ":" + DefaultPort),
Username -> (DefaultUsername),
Password -> (DefaultPassword),
BulkSize -> (DefaultBulkSize)
)
}
| SequoiaDB/spark-sequoiadb | src/main/scala/com/sequoiadb/spark/SequoiadbConfig.scala | Scala | apache-2.0 | 6,238 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import org.apache.hadoop.fs.Path
import org.apache.spark.annotation.{Experimental, Since}
import org.apache.spark.ml._
import org.apache.spark.ml.param._
import org.apache.spark.ml.param.shared._
import org.apache.spark.ml.util._
import org.apache.spark.mllib.feature
import org.apache.spark.mllib.linalg.{Vector, VectorUDT}
import org.apache.spark.sql._
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.StructType
/**
* Params for [[IDF]] and [[IDFModel]].
*/
private[feature] trait IDFBase extends Params with HasInputCol with HasOutputCol {
/**
* The minimum of documents in which a term should appear.
* Default: 0
* @group param
*/
final val minDocFreq = new IntParam(
this, "minDocFreq", "minimum of documents in which a term should appear for filtering")
setDefault(minDocFreq -> 0)
/** @group getParam */
def getMinDocFreq: Int = $(minDocFreq)
/**
* Validate and transform the input schema.
*/
protected def validateAndTransformSchema(schema: StructType): StructType = {
SchemaUtils.checkColumnType(schema, $(inputCol), new VectorUDT)
SchemaUtils.appendColumn(schema, $(outputCol), new VectorUDT)
}
}
/**
* :: Experimental ::
* Compute the Inverse Document Frequency (IDF) given a collection of documents.
*/
@Experimental
final class IDF(override val uid: String) extends Estimator[IDFModel] with IDFBase
with DefaultParamsWritable {
def this() = this(Identifiable.randomUID("idf"))
/** @group setParam */
def setInputCol(value: String): this.type = set(inputCol, value)
/** @group setParam */
def setOutputCol(value: String): this.type = set(outputCol, value)
/** @group setParam */
def setMinDocFreq(value: Int): this.type = set(minDocFreq, value)
@Since("2.0.0")
override def fit(dataset: Dataset[_]): IDFModel = {
transformSchema(dataset.schema, logging = true)
val input = dataset.select($(inputCol)).rdd.map { case Row(v: Vector) => v }
val idf = new feature.IDF($(minDocFreq)).fit(input)
copyValues(new IDFModel(uid, idf).setParent(this))
}
override def transformSchema(schema: StructType): StructType = {
validateAndTransformSchema(schema)
}
override def copy(extra: ParamMap): IDF = defaultCopy(extra)
}
@Since("1.6.0")
object IDF extends DefaultParamsReadable[IDF] {
@Since("1.6.0")
override def load(path: String): IDF = super.load(path)
}
/**
* :: Experimental ::
* Model fitted by [[IDF]].
*/
@Experimental
class IDFModel private[ml] (
override val uid: String,
idfModel: feature.IDFModel)
extends Model[IDFModel] with IDFBase with MLWritable {
import IDFModel._
/** @group setParam */
def setInputCol(value: String): this.type = set(inputCol, value)
/** @group setParam */
def setOutputCol(value: String): this.type = set(outputCol, value)
@Since("2.0.0")
override def transform(dataset: Dataset[_]): DataFrame = {
transformSchema(dataset.schema, logging = true)
val idf = udf { vec: Vector => idfModel.transform(vec) }
dataset.withColumn($(outputCol), idf(col($(inputCol))))
}
override def transformSchema(schema: StructType): StructType = {
validateAndTransformSchema(schema)
}
override def copy(extra: ParamMap): IDFModel = {
val copied = new IDFModel(uid, idfModel)
copyValues(copied, extra).setParent(parent)
}
/** Returns the IDF vector. */
@Since("1.6.0")
def idf: Vector = idfModel.idf
@Since("1.6.0")
override def write: MLWriter = new IDFModelWriter(this)
}
@Since("1.6.0")
object IDFModel extends MLReadable[IDFModel] {
private[IDFModel] class IDFModelWriter(instance: IDFModel) extends MLWriter {
private case class Data(idf: Vector)
override protected def saveImpl(path: String): Unit = {
DefaultParamsWriter.saveMetadata(instance, path, sc)
val data = Data(instance.idf)
val dataPath = new Path(path, "data").toString
sqlContext.createDataFrame(Seq(data)).repartition(1).write.parquet(dataPath)
}
}
private class IDFModelReader extends MLReader[IDFModel] {
private val className = classOf[IDFModel].getName
override def load(path: String): IDFModel = {
val metadata = DefaultParamsReader.loadMetadata(path, sc, className)
val dataPath = new Path(path, "data").toString
val data = sqlContext.read.parquet(dataPath)
.select("idf")
.head()
val idf = data.getAs[Vector](0)
val model = new IDFModel(metadata.uid, new feature.IDFModel(idf))
DefaultParamsReader.getAndSetParams(model, metadata)
model
}
}
@Since("1.6.0")
override def read: MLReader[IDFModel] = new IDFModelReader
@Since("1.6.0")
override def load(path: String): IDFModel = super.load(path)
}
| xieguobin/Spark_2.0.0_cn1 | ml/feature/IDF.scala | Scala | apache-2.0 | 5,583 |
#! /bin/sh
exec scala "$0" "$@"
!#
// This is how you create an array in Scala. Note several differences
// between Scala and Java. First, the left-hand side does not have any
// type specification. Second, the construction of the array on the
// right-hand side is very different. In particular, we use the
// designated type (Array) rather than String[]. Also, the type
// parameterization (generic) uses [] brackets rather than <> as is
// the case in Java.
val greetStrings = new Array[String](3)
// Now, we can assign values to the array. Again, note the differences
// here between Java and Scala. What are they?
greetStrings(0) = "Hello"
greetStrings(1) = ", "
greetStrings(2) = "world!\\n"
// Here is a for loop iterating over the elements in the array:
for (i <- 0 to 2)
print(greetStrings(i))
// This above for loop is a "for expression" which is very different
// than Java. First to note is that the integers are true
// objects. This allows us to execute methods on them. In this case,
// we are invoking the `to` method on the 0 and passing it the integer
// 2 - this returns a Range object which can be used with this special
// for expression form.
//
// Also, you should see that the array is indexed like a function. It
// turns out that this is a short-hand for a method invocation on an
// object. For example, assigning a new value to a location in an
// array is:
greetStrings(2) = "world\\n"
// Is translated into:
greetStrings.update(2, "world!\\n")
// Likewise, access:
greetStrings(2)
// Is translated into:
greetStrings.apply(2)
// Thus, the above code is semantically equivalent to:
var greetStrings2 = new Array[String](3)
greetStrings2.update(0, "Hello")
greetStrings2.update(1, ", ")
greetStrings2.update(2, "world!\\n")
for (i <- 0.to(2))
print(greetStrings2.apply(i))
// Scala also provides a short-hand for creating arrays:
val numNames = Array("zero", "one", "two")
// Which is short-hand for:
val numNames2 = Array.apply("zero", "one", "two")
| umass-cs-220/week-02-principles-of-good-programming | code/scala-script/01-array.scala | Scala | apache-2.0 | 1,990 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.classification
import scala.collection.JavaConverters._
import org.apache.hadoop.fs.Path
import org.apache.spark.annotation.{Experimental, Since}
import org.apache.spark.ml.{PredictionModel, Predictor, PredictorParams}
import org.apache.spark.ml.ann.{FeedForwardTopology, FeedForwardTrainer}
import org.apache.spark.ml.param._
import org.apache.spark.ml.param.shared.{HasMaxIter, HasSeed, HasStepSize, HasTol}
import org.apache.spark.ml.util._
import org.apache.spark.mllib.linalg.{Vector, Vectors}
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.sql.Dataset
/** Params for Multilayer Perceptron. */
private[ml] trait MultilayerPerceptronParams extends PredictorParams
with HasSeed with HasMaxIter with HasTol with HasStepSize {
/**
* Layer sizes including input size and output size.
* Default: Array(1, 1)
*
* @group param
*/
final val layers: IntArrayParam = new IntArrayParam(this, "layers",
"Sizes of layers from input layer to output layer" +
" E.g., Array(780, 100, 10) means 780 inputs, " +
"one hidden layer with 100 neurons and output layer of 10 neurons.",
(t: Array[Int]) => t.forall(ParamValidators.gt(0)) && t.length > 1
)
/** @group getParam */
final def getLayers: Array[Int] = $(layers)
/**
* Block size for stacking input data in matrices to speed up the computation.
* Data is stacked within partitions. If block size is more than remaining data in
* a partition then it is adjusted to the size of this data.
* Recommended size is between 10 and 1000.
* Default: 128
*
* @group expertParam
*/
final val blockSize: IntParam = new IntParam(this, "blockSize",
"Block size for stacking input data in matrices. Data is stacked within partitions." +
" If block size is more than remaining data in a partition then " +
"it is adjusted to the size of this data. Recommended size is between 10 and 1000",
ParamValidators.gt(0))
/** @group getParam */
final def getBlockSize: Int = $(blockSize)
/**
* Allows setting the solver: minibatch gradient descent (gd) or l-bfgs.
* l-bfgs is the default one.
*
* @group expertParam
*/
final val solver: Param[String] = new Param[String](this, "solver",
" Allows setting the solver: minibatch gradient descent (gd) or l-bfgs. " +
" l-bfgs is the default one.",
ParamValidators.inArray[String](Array("gd", "l-bfgs")))
/** @group getParam */
final def getOptimizer: String = $(solver)
/**
* Model weights. Can be returned either after training or after explicit setting
*
* @group expertParam
*/
final val weights: Param[Vector] = new Param[Vector](this, "weights",
" Sets the weights of the model ")
/** @group getParam */
final def getWeights: Vector = $(weights)
setDefault(maxIter -> 100, tol -> 1e-4, blockSize -> 128, solver -> "l-bfgs", stepSize -> 0.03)
}
/** Label to vector converter. */
private object LabelConverter {
// TODO: Use OneHotEncoder instead
/**
* Encodes a label as a vector.
* Returns a vector of given length with zeroes at all positions
* and value 1.0 at the position that corresponds to the label.
*
* @param labeledPoint labeled point
* @param labelCount total number of labels
* @return pair of features and vector encoding of a label
*/
def encodeLabeledPoint(labeledPoint: LabeledPoint, labelCount: Int): (Vector, Vector) = {
val output = Array.fill(labelCount)(0.0)
output(labeledPoint.label.toInt) = 1.0
(labeledPoint.features, Vectors.dense(output))
}
/**
* Converts a vector to a label.
* Returns the position of the maximal element of a vector.
*
* @param output label encoded with a vector
* @return label
*/
def decodeLabel(output: Vector): Double = {
output.argmax.toDouble
}
}
/**
* :: Experimental ::
* Classifier trainer based on the Multilayer Perceptron.
* Each layer has sigmoid activation function, output layer has softmax.
* Number of inputs has to be equal to the size of feature vectors.
* Number of outputs has to be equal to the total number of labels.
*
*/
@Since("1.5.0")
@Experimental
class MultilayerPerceptronClassifier @Since("1.5.0") (
@Since("1.5.0") override val uid: String)
extends Predictor[Vector, MultilayerPerceptronClassifier, MultilayerPerceptronClassificationModel]
with MultilayerPerceptronParams with DefaultParamsWritable {
@Since("1.5.0")
def this() = this(Identifiable.randomUID("mlpc"))
/** @group setParam */
@Since("1.5.0")
def setLayers(value: Array[Int]): this.type = set(layers, value)
/** @group setParam */
@Since("1.5.0")
def setBlockSize(value: Int): this.type = set(blockSize, value)
/**
* Set the maximum number of iterations.
* Default is 100.
*
* @group setParam
*/
@Since("1.5.0")
def setMaxIter(value: Int): this.type = set(maxIter, value)
/**
* Set the convergence tolerance of iterations.
* Smaller value will lead to higher accuracy with the cost of more iterations.
* Default is 1E-4.
*
* @group setParam
*/
@Since("1.5.0")
def setTol(value: Double): this.type = set(tol, value)
/**
* Set the seed for weights initialization if weights are not set
*
* @group setParam
*/
@Since("1.5.0")
def setSeed(value: Long): this.type = set(seed, value)
/**
* Sets the model weights.
*
* @group expertParam
*/
@Since("2.0.0")
def setWeights(value: Vector): this.type = set(weights, value)
@Since("1.5.0")
override def copy(extra: ParamMap): MultilayerPerceptronClassifier = defaultCopy(extra)
/**
* Train a model using the given dataset and parameters.
* Developers can implement this instead of [[fit()]] to avoid dealing with schema validation
* and copying parameters into the model.
*
* @param dataset Training dataset
* @return Fitted model
*/
override protected def train(dataset: Dataset[_]): MultilayerPerceptronClassificationModel = {
val myLayers = $(layers)
val labels = myLayers.last
val lpData = extractLabeledPoints(dataset)
val data = lpData.map(lp => LabelConverter.encodeLabeledPoint(lp, labels))
val topology = FeedForwardTopology.multiLayerPerceptron(myLayers, true)
val trainer = new FeedForwardTrainer(topology, myLayers(0), myLayers.last)
if (isDefined(weights)) {
trainer.setWeights($(weights))
} else {
trainer.setSeed($(seed))
}
trainer.LBFGSOptimizer
.setConvergenceTol($(tol))
.setNumIterations($(maxIter))
trainer.setStackSize($(blockSize))
val mlpModel = trainer.train(data)
new MultilayerPerceptronClassificationModel(uid, myLayers, mlpModel.weights)
}
}
@Since("2.0.0")
object MultilayerPerceptronClassifier
extends DefaultParamsReadable[MultilayerPerceptronClassifier] {
@Since("2.0.0")
override def load(path: String): MultilayerPerceptronClassifier = super.load(path)
}
/**
* :: Experimental ::
* Classification model based on the Multilayer Perceptron.
* Each layer has sigmoid activation function, output layer has softmax.
*
* @param uid uid
* @param layers array of layer sizes including input and output layers
* @param weights vector of initial weights for the model that consists of the weights of layers
* @return prediction model
*/
@Since("1.5.0")
@Experimental
class MultilayerPerceptronClassificationModel private[ml] (
@Since("1.5.0") override val uid: String,
@Since("1.5.0") val layers: Array[Int],
@Since("1.5.0") val weights: Vector)
extends PredictionModel[Vector, MultilayerPerceptronClassificationModel]
with Serializable with MLWritable {
@Since("1.6.0")
override val numFeatures: Int = layers.head
private val mlpModel = FeedForwardTopology.multiLayerPerceptron(layers, true).model(weights)
/**
* Returns layers in a Java List.
*/
private[ml] def javaLayers: java.util.List[Int] = {
layers.toList.asJava
}
/**
* Predict label for the given features.
* This internal method is used to implement [[transform()]] and output [[predictionCol]].
*/
override protected def predict(features: Vector): Double = {
LabelConverter.decodeLabel(mlpModel.predict(features))
}
@Since("1.5.0")
override def copy(extra: ParamMap): MultilayerPerceptronClassificationModel = {
copyValues(new MultilayerPerceptronClassificationModel(uid, layers, weights), extra)
}
@Since("2.0.0")
override def write: MLWriter =
new MultilayerPerceptronClassificationModel.MultilayerPerceptronClassificationModelWriter(this)
}
@Since("2.0.0")
object MultilayerPerceptronClassificationModel
extends MLReadable[MultilayerPerceptronClassificationModel] {
@Since("2.0.0")
override def read: MLReader[MultilayerPerceptronClassificationModel] =
new MultilayerPerceptronClassificationModelReader
@Since("2.0.0")
override def load(path: String): MultilayerPerceptronClassificationModel = super.load(path)
/** [[MLWriter]] instance for [[MultilayerPerceptronClassificationModel]] */
private[MultilayerPerceptronClassificationModel]
class MultilayerPerceptronClassificationModelWriter(
instance: MultilayerPerceptronClassificationModel) extends MLWriter {
private case class Data(layers: Array[Int], weights: Vector)
override protected def saveImpl(path: String): Unit = {
// Save metadata and Params
DefaultParamsWriter.saveMetadata(instance, path, sc)
// Save model data: layers, weights
val data = Data(instance.layers, instance.weights)
val dataPath = new Path(path, "data").toString
sqlContext.createDataFrame(Seq(data)).repartition(1).write.parquet(dataPath)
}
}
private class MultilayerPerceptronClassificationModelReader
extends MLReader[MultilayerPerceptronClassificationModel] {
/** Checked against metadata when loading model */
private val className = classOf[MultilayerPerceptronClassificationModel].getName
override def load(path: String): MultilayerPerceptronClassificationModel = {
val metadata = DefaultParamsReader.loadMetadata(path, sc, className)
val dataPath = new Path(path, "data").toString
val data = sqlContext.read.parquet(dataPath).select("layers", "weights").head()
val layers = data.getAs[Seq[Int]](0).toArray
val weights = data.getAs[Vector](1)
val model = new MultilayerPerceptronClassificationModel(metadata.uid, layers, weights)
DefaultParamsReader.getAndSetParams(model, metadata)
model
}
}
}
| xieguobin/Spark_2.0.0_cn1 | ml/classification/MultilayerPerceptronClassifier.scala | Scala | apache-2.0 | 11,363 |
package chromex.notifications
object onClicked {
def addListener(callback: String => _): Unit = {
chrome.notifications.onClicked.addListener(callback)
}
}
| erdavila/auto-steamgifts | src/main/scala/chromex/notifications/onClicked.scala | Scala | mit | 164 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.benchmark
import java.util.HashMap
import org.apache.spark.SparkConf
import org.apache.spark.memory.{StaticMemoryManager, TaskMemoryManager}
import org.apache.spark.sql.catalyst.expressions.UnsafeRow
import org.apache.spark.sql.execution.joins.LongToUnsafeRowMap
import org.apache.spark.sql.execution.vectorized.AggregateHashMap
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.{LongType, StructType}
import org.apache.spark.unsafe.Platform
import org.apache.spark.unsafe.hash.Murmur3_x86_32
import org.apache.spark.unsafe.map.BytesToBytesMap
import org.apache.spark.util.Benchmark
/**
* Benchmark to measure performance for aggregate primitives.
* To run this:
* build/sbt "sql/test-only *benchmark.AggregateBenchmark"
*
* Benchmarks in this file are skipped in normal builds.
*/
class AggregateBenchmark extends BenchmarkBase {
ignore("aggregate without grouping") {
val N = 500L << 22
val benchmark = new Benchmark("agg without grouping", N)
runBenchmark("agg w/o group", N) {
sparkSession.range(N).selectExpr("sum(id)").collect()
}
/*
agg w/o group: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
------------------------------------------------------------------------------------------------
agg w/o group wholestage off 30136 / 31885 69.6 14.4 1.0X
agg w/o group wholestage on 1851 / 1860 1132.9 0.9 16.3X
*/
}
ignore("stat functions") {
val N = 100L << 20
runBenchmark("stddev", N) {
sparkSession.range(N).groupBy().agg("id" -> "stddev").collect()
}
runBenchmark("kurtosis", N) {
sparkSession.range(N).groupBy().agg("id" -> "kurtosis").collect()
}
/*
Using ImperativeAggregate (as implemented in Spark 1.6):
Intel(R) Core(TM) i7-4558U CPU @ 2.80GHz
stddev: Avg Time(ms) Avg Rate(M/s) Relative Rate
-------------------------------------------------------------------------------
stddev w/o codegen 2019.04 10.39 1.00 X
stddev w codegen 2097.29 10.00 0.96 X
kurtosis w/o codegen 2108.99 9.94 0.96 X
kurtosis w codegen 2090.69 10.03 0.97 X
Using DeclarativeAggregate:
Intel(R) Core(TM) i7-4558U CPU @ 2.80GHz
stddev: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-------------------------------------------------------------------------------------------
stddev codegen=false 5630 / 5776 18.0 55.6 1.0X
stddev codegen=true 1259 / 1314 83.0 12.0 4.5X
Intel(R) Core(TM) i7-4558U CPU @ 2.80GHz
kurtosis: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-------------------------------------------------------------------------------------------
kurtosis codegen=false 14847 / 15084 7.0 142.9 1.0X
kurtosis codegen=true 1652 / 2124 63.0 15.9 9.0X
*/
}
ignore("aggregate with linear keys") {
val N = 20 << 22
val benchmark = new Benchmark("Aggregate w keys", N)
def f(): Unit = {
sparkSession.range(N).selectExpr("(id & 65535) as k").groupBy("k").sum().collect()
}
benchmark.addCase(s"codegen = F", numIters = 2) { iter =>
sparkSession.conf.set("spark.sql.codegen.wholeStage", "false")
f()
}
benchmark.addCase(s"codegen = T hashmap = F", numIters = 3) { iter =>
sparkSession.conf.set("spark.sql.codegen.wholeStage", "true")
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", "false")
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "false")
f()
}
benchmark.addCase(s"codegen = T hashmap = T", numIters = 5) { iter =>
sparkSession.conf.set("spark.sql.codegen.wholeStage", "true")
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", "true")
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "true")
f()
}
benchmark.run()
/*
Java HotSpot(TM) 64-Bit Server VM 1.8.0_60-b27 on Mac OS X 10.11
Intel(R) Core(TM) i7-4960HQ CPU @ 2.60GHz
Aggregate w keys: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
------------------------------------------------------------------------------------------------
codegen = F 6619 / 6780 12.7 78.9 1.0X
codegen = T hashmap = F 3935 / 4059 21.3 46.9 1.7X
codegen = T hashmap = T 897 / 971 93.5 10.7 7.4X
*/
}
ignore("aggregate with randomized keys") {
val N = 20 << 22
val benchmark = new Benchmark("Aggregate w keys", N)
sparkSession.range(N).selectExpr("id", "floor(rand() * 10000) as k")
.createOrReplaceTempView("test")
def f(): Unit = sparkSession.sql("select k, k, sum(id) from test group by k, k").collect()
benchmark.addCase(s"codegen = F", numIters = 2) { iter =>
sparkSession.conf.set("spark.sql.codegen.wholeStage", value = false)
f()
}
benchmark.addCase(s"codegen = T hashmap = F", numIters = 3) { iter =>
sparkSession.conf.set("spark.sql.codegen.wholeStage", value = true)
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", "false")
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "false")
f()
}
benchmark.addCase(s"codegen = T hashmap = T", numIters = 5) { iter =>
sparkSession.conf.set("spark.sql.codegen.wholeStage", value = true)
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", "true")
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "true")
f()
}
benchmark.run()
/*
Java HotSpot(TM) 64-Bit Server VM 1.8.0_60-b27 on Mac OS X 10.11
Intel(R) Core(TM) i7-4960HQ CPU @ 2.60GHz
Aggregate w keys: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
------------------------------------------------------------------------------------------------
codegen = F 7445 / 7517 11.3 88.7 1.0X
codegen = T hashmap = F 4672 / 4703 18.0 55.7 1.6X
codegen = T hashmap = T 1764 / 1958 47.6 21.0 4.2X
*/
}
ignore("aggregate with string key") {
val N = 20 << 20
val benchmark = new Benchmark("Aggregate w string key", N)
def f(): Unit = sparkSession.range(N).selectExpr("id", "cast(id & 1023 as string) as k")
.groupBy("k").count().collect()
benchmark.addCase(s"codegen = F", numIters = 2) { iter =>
sparkSession.conf.set("spark.sql.codegen.wholeStage", "false")
f()
}
benchmark.addCase(s"codegen = T hashmap = F", numIters = 3) { iter =>
sparkSession.conf.set("spark.sql.codegen.wholeStage", "true")
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", "false")
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "false")
f()
}
benchmark.addCase(s"codegen = T hashmap = T", numIters = 5) { iter =>
sparkSession.conf.set("spark.sql.codegen.wholeStage", "true")
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", "true")
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "true")
f()
}
benchmark.run()
/*
Java HotSpot(TM) 64-Bit Server VM 1.8.0_73-b02 on Mac OS X 10.11.4
Intel(R) Core(TM) i7-4960HQ CPU @ 2.60GHz
Aggregate w string key: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-------------------------------------------------------------------------------------------
codegen = F 3307 / 3376 6.3 157.7 1.0X
codegen = T hashmap = F 2364 / 2471 8.9 112.7 1.4X
codegen = T hashmap = T 1740 / 1841 12.0 83.0 1.9X
*/
}
ignore("aggregate with decimal key") {
val N = 20 << 20
val benchmark = new Benchmark("Aggregate w decimal key", N)
def f(): Unit = sparkSession.range(N).selectExpr("id", "cast(id & 65535 as decimal) as k")
.groupBy("k").count().collect()
benchmark.addCase(s"codegen = F") { iter =>
sparkSession.conf.set("spark.sql.codegen.wholeStage", "false")
f()
}
benchmark.addCase(s"codegen = T hashmap = F") { iter =>
sparkSession.conf.set("spark.sql.codegen.wholeStage", "true")
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", "false")
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "false")
f()
}
benchmark.addCase(s"codegen = T hashmap = T") { iter =>
sparkSession.conf.set("spark.sql.codegen.wholeStage", "true")
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", "true")
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "true")
f()
}
benchmark.run()
/*
Java HotSpot(TM) 64-Bit Server VM 1.8.0_73-b02 on Mac OS X 10.11.4
Intel(R) Core(TM) i7-4960HQ CPU @ 2.60GHz
Aggregate w decimal key: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-------------------------------------------------------------------------------------------
codegen = F 2756 / 2817 7.6 131.4 1.0X
codegen = T hashmap = F 1580 / 1647 13.3 75.4 1.7X
codegen = T hashmap = T 641 / 662 32.7 30.6 4.3X
*/
}
ignore("aggregate with multiple key types") {
val N = 20 << 20
val benchmark = new Benchmark("Aggregate w multiple keys", N)
def f(): Unit = sparkSession.range(N)
.selectExpr(
"id",
"(id & 1023) as k1",
"cast(id & 1023 as string) as k2",
"cast(id & 1023 as int) as k3",
"cast(id & 1023 as double) as k4",
"cast(id & 1023 as float) as k5",
"id > 1023 as k6")
.groupBy("k1", "k2", "k3", "k4", "k5", "k6")
.sum()
.collect()
benchmark.addCase(s"codegen = F") { iter =>
sparkSession.conf.set("spark.sql.codegen.wholeStage", "false")
f()
}
benchmark.addCase(s"codegen = T hashmap = F") { iter =>
sparkSession.conf.set("spark.sql.codegen.wholeStage", "true")
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", "false")
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "false")
f()
}
benchmark.addCase(s"codegen = T hashmap = T") { iter =>
sparkSession.conf.set("spark.sql.codegen.wholeStage", "true")
sparkSession.conf.set("spark.sql.codegen.aggregate.map.twolevel.enabled", "true")
sparkSession.conf.set("spark.sql.codegen.aggregate.map.vectorized.enable", "true")
f()
}
benchmark.run()
/*
Java HotSpot(TM) 64-Bit Server VM 1.8.0_73-b02 on Mac OS X 10.11.4
Intel(R) Core(TM) i7-4960HQ CPU @ 2.60GHz
Aggregate w decimal key: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-------------------------------------------------------------------------------------------
codegen = F 5885 / 6091 3.6 280.6 1.0X
codegen = T hashmap = F 3625 / 4009 5.8 172.8 1.6X
codegen = T hashmap = T 3204 / 3271 6.5 152.8 1.8X
*/
}
ignore("max function bytecode size of wholestagecodegen") {
val N = 20 << 15
val benchmark = new Benchmark("max function bytecode size", N)
def f(): Unit = sparkSession.range(N)
.selectExpr(
"id",
"(id & 1023) as k1",
"cast(id & 1023 as double) as k2",
"cast(id & 1023 as int) as k3",
"case when id > 100 and id <= 200 then 1 else 0 end as v1",
"case when id > 200 and id <= 300 then 1 else 0 end as v2",
"case when id > 300 and id <= 400 then 1 else 0 end as v3",
"case when id > 400 and id <= 500 then 1 else 0 end as v4",
"case when id > 500 and id <= 600 then 1 else 0 end as v5",
"case when id > 600 and id <= 700 then 1 else 0 end as v6",
"case when id > 700 and id <= 800 then 1 else 0 end as v7",
"case when id > 800 and id <= 900 then 1 else 0 end as v8",
"case when id > 900 and id <= 1000 then 1 else 0 end as v9",
"case when id > 1000 and id <= 1100 then 1 else 0 end as v10",
"case when id > 1100 and id <= 1200 then 1 else 0 end as v11",
"case when id > 1200 and id <= 1300 then 1 else 0 end as v12",
"case when id > 1300 and id <= 1400 then 1 else 0 end as v13",
"case when id > 1400 and id <= 1500 then 1 else 0 end as v14",
"case when id > 1500 and id <= 1600 then 1 else 0 end as v15",
"case when id > 1600 and id <= 1700 then 1 else 0 end as v16",
"case when id > 1700 and id <= 1800 then 1 else 0 end as v17",
"case when id > 1800 and id <= 1900 then 1 else 0 end as v18")
.groupBy("k1", "k2", "k3")
.sum()
.collect()
benchmark.addCase("codegen = F") { iter =>
sparkSession.conf.set(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key, "false")
f()
}
benchmark.addCase("codegen = T hugeMethodLimit = 10000") { iter =>
sparkSession.conf.set(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key, "true")
sparkSession.conf.set(SQLConf.WHOLESTAGE_HUGE_METHOD_LIMIT.key, "10000")
f()
}
benchmark.addCase("codegen = T hugeMethodLimit = 1500") { iter =>
sparkSession.conf.set(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key, "true")
sparkSession.conf.set(SQLConf.WHOLESTAGE_HUGE_METHOD_LIMIT.key, "1500")
f()
}
benchmark.run()
/*
Java HotSpot(TM) 64-Bit Server VM 1.8.0_31-b13 on Mac OS X 10.10.2
Intel(R) Core(TM) i7-4578U CPU @ 3.00GHz
max function bytecode size: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
------------------------------------------------------------------------------------------------
codegen = F 709 / 803 0.9 1082.1 1.0X
codegen = T hugeMethodLimit = 10000 3485 / 3548 0.2 5317.7 0.2X
codegen = T hugeMethodLimit = 1500 636 / 701 1.0 969.9 1.1X
*/
}
ignore("cube") {
val N = 5 << 20
runBenchmark("cube", N) {
sparkSession.range(N).selectExpr("id", "id % 1000 as k1", "id & 256 as k2")
.cube("k1", "k2").sum("id").collect()
}
/**
Intel(R) Core(TM) i7-4558U CPU @ 2.80GHz
cube: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-------------------------------------------------------------------------------------------
cube codegen=false 3188 / 3392 1.6 608.2 1.0X
cube codegen=true 1239 / 1394 4.2 236.3 2.6X
*/
}
ignore("hash and BytesToBytesMap") {
val N = 20 << 20
val benchmark = new Benchmark("BytesToBytesMap", N)
benchmark.addCase("UnsafeRowhash") { iter =>
var i = 0
val keyBytes = new Array[Byte](16)
val key = new UnsafeRow(1)
key.pointTo(keyBytes, Platform.BYTE_ARRAY_OFFSET, 16)
var s = 0
while (i < N) {
key.setInt(0, i % 1000)
val h = Murmur3_x86_32.hashUnsafeWords(
key.getBaseObject, key.getBaseOffset, key.getSizeInBytes, 42)
s += h
i += 1
}
}
benchmark.addCase("murmur3 hash") { iter =>
var i = 0
val keyBytes = new Array[Byte](16)
val key = new UnsafeRow(1)
key.pointTo(keyBytes, Platform.BYTE_ARRAY_OFFSET, 16)
var p = 524283
var s = 0
while (i < N) {
var h = Murmur3_x86_32.hashLong(i, 42)
key.setInt(0, h)
s += h
i += 1
}
}
benchmark.addCase("fast hash") { iter =>
var i = 0
val keyBytes = new Array[Byte](16)
val key = new UnsafeRow(1)
key.pointTo(keyBytes, Platform.BYTE_ARRAY_OFFSET, 16)
var p = 524283
var s = 0
while (i < N) {
var h = i % p
if (h < 0) {
h += p
}
key.setInt(0, h)
s += h
i += 1
}
}
benchmark.addCase("arrayEqual") { iter =>
var i = 0
val keyBytes = new Array[Byte](16)
val valueBytes = new Array[Byte](16)
val key = new UnsafeRow(1)
key.pointTo(keyBytes, Platform.BYTE_ARRAY_OFFSET, 16)
val value = new UnsafeRow(1)
value.pointTo(valueBytes, Platform.BYTE_ARRAY_OFFSET, 16)
value.setInt(0, 555)
var s = 0
while (i < N) {
key.setInt(0, i % 1000)
if (key.equals(value)) {
s += 1
}
i += 1
}
}
benchmark.addCase("Java HashMap (Long)") { iter =>
var i = 0
val keyBytes = new Array[Byte](16)
val valueBytes = new Array[Byte](16)
val value = new UnsafeRow(1)
value.pointTo(valueBytes, Platform.BYTE_ARRAY_OFFSET, 16)
value.setInt(0, 555)
val map = new HashMap[Long, UnsafeRow]()
while (i < 65536) {
value.setInt(0, i)
map.put(i.toLong, value)
i += 1
}
var s = 0
i = 0
while (i < N) {
if (map.get(i % 100000) != null) {
s += 1
}
i += 1
}
}
benchmark.addCase("Java HashMap (two ints) ") { iter =>
var i = 0
val valueBytes = new Array[Byte](16)
val value = new UnsafeRow(1)
value.pointTo(valueBytes, Platform.BYTE_ARRAY_OFFSET, 16)
value.setInt(0, 555)
val map = new HashMap[Long, UnsafeRow]()
while (i < 65536) {
value.setInt(0, i)
val key = (i.toLong << 32) + Integer.rotateRight(i, 15)
map.put(key, value)
i += 1
}
var s = 0
i = 0
while (i < N) {
val key = ((i & 100000).toLong << 32) + Integer.rotateRight(i & 100000, 15)
if (map.get(key) != null) {
s += 1
}
i += 1
}
}
benchmark.addCase("Java HashMap (UnsafeRow)") { iter =>
var i = 0
val keyBytes = new Array[Byte](16)
val valueBytes = new Array[Byte](16)
val key = new UnsafeRow(1)
key.pointTo(keyBytes, Platform.BYTE_ARRAY_OFFSET, 16)
val value = new UnsafeRow(1)
value.pointTo(valueBytes, Platform.BYTE_ARRAY_OFFSET, 16)
value.setInt(0, 555)
val map = new HashMap[UnsafeRow, UnsafeRow]()
while (i < 65536) {
key.setInt(0, i)
value.setInt(0, i)
map.put(key, value.copy())
i += 1
}
var s = 0
i = 0
while (i < N) {
key.setInt(0, i % 100000)
if (map.get(key) != null) {
s += 1
}
i += 1
}
}
Seq(false, true).foreach { optimized =>
benchmark.addCase(s"LongToUnsafeRowMap (opt=$optimized)") { iter =>
var i = 0
val valueBytes = new Array[Byte](16)
val value = new UnsafeRow(1)
value.pointTo(valueBytes, Platform.BYTE_ARRAY_OFFSET, 16)
value.setInt(0, 555)
val taskMemoryManager = new TaskMemoryManager(
new StaticMemoryManager(
new SparkConf().set("spark.memory.offHeap.enabled", "false"),
Long.MaxValue,
Long.MaxValue,
1),
0)
val map = new LongToUnsafeRowMap(taskMemoryManager, 64)
while (i < 65536) {
value.setInt(0, i)
val key = i % 100000
map.append(key, value)
i += 1
}
if (optimized) {
map.optimize()
}
var s = 0
i = 0
while (i < N) {
val key = i % 100000
if (map.getValue(key, value) != null) {
s += 1
}
i += 1
}
}
}
Seq("off", "on").foreach { heap =>
benchmark.addCase(s"BytesToBytesMap ($heap Heap)") { iter =>
val taskMemoryManager = new TaskMemoryManager(
new StaticMemoryManager(
new SparkConf().set("spark.memory.offHeap.enabled", s"${heap == "off"}")
.set("spark.memory.offHeap.size", "102400000"),
Long.MaxValue,
Long.MaxValue,
1),
0)
val map = new BytesToBytesMap(taskMemoryManager, 1024, 64L<<20)
val keyBytes = new Array[Byte](16)
val valueBytes = new Array[Byte](16)
val key = new UnsafeRow(1)
key.pointTo(keyBytes, Platform.BYTE_ARRAY_OFFSET, 16)
val value = new UnsafeRow(1)
value.pointTo(valueBytes, Platform.BYTE_ARRAY_OFFSET, 16)
var i = 0
val numKeys = 65536
while (i < numKeys) {
key.setInt(0, i % 65536)
val loc = map.lookup(key.getBaseObject, key.getBaseOffset, key.getSizeInBytes,
Murmur3_x86_32.hashLong(i % 65536, 42))
if (!loc.isDefined) {
loc.append(key.getBaseObject, key.getBaseOffset, key.getSizeInBytes,
value.getBaseObject, value.getBaseOffset, value.getSizeInBytes)
}
i += 1
}
i = 0
var s = 0
while (i < N) {
key.setInt(0, i % 100000)
val loc = map.lookup(key.getBaseObject, key.getBaseOffset, key.getSizeInBytes,
Murmur3_x86_32.hashLong(i % 100000, 42))
if (loc.isDefined) {
s += 1
}
i += 1
}
}
}
benchmark.addCase("Aggregate HashMap") { iter =>
var i = 0
val numKeys = 65536
val schema = new StructType()
.add("key", LongType)
.add("value", LongType)
val map = new AggregateHashMap(schema)
while (i < numKeys) {
val row = map.findOrInsert(i.toLong)
row.setLong(1, row.getLong(1) + 1)
i += 1
}
var s = 0
i = 0
while (i < N) {
if (map.find(i % 100000) != -1) {
s += 1
}
i += 1
}
}
/*
Intel(R) Core(TM) i7-4960HQ CPU @ 2.60GHz
BytesToBytesMap: Best/Avg Time(ms) Rate(M/s) Per Row(ns) Relative
-------------------------------------------------------------------------------------------
UnsafeRow hash 267 / 284 78.4 12.8 1.0X
murmur3 hash 102 / 129 205.5 4.9 2.6X
fast hash 79 / 96 263.8 3.8 3.4X
arrayEqual 164 / 172 128.2 7.8 1.6X
Java HashMap (Long) 321 / 399 65.4 15.3 0.8X
Java HashMap (two ints) 328 / 363 63.9 15.7 0.8X
Java HashMap (UnsafeRow) 1140 / 1200 18.4 54.3 0.2X
LongToUnsafeRowMap (opt=false) 378 / 400 55.5 18.0 0.7X
LongToUnsafeRowMap (opt=true) 144 / 152 145.2 6.9 1.9X
BytesToBytesMap (off Heap) 1300 / 1616 16.1 62.0 0.2X
BytesToBytesMap (on Heap) 1165 / 1202 18.0 55.5 0.2X
Aggregate HashMap 121 / 131 173.3 5.8 2.2X
*/
benchmark.run()
}
}
| aray/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala | Scala | apache-2.0 | 25,122 |
package org.abhijitsarkar.moviedb
/**
* @author Abhijit Sarkar
*/
object TestHelper {
val movie = {
Movie(
"test",
-1,
Nil,
"",
"",
Nil,
Nil,
Nil,
Nil,
"",
"",
-1,
-1.0,
"1"
)
}
val movies = {
Seq(movie)
}
}
| asarkar/akka | movie-db/src/test/scala/org/abhijitsarkar/moviedb/TestHelper.scala | Scala | gpl-3.0 | 318 |
/*
* This file is part of AckCord, licensed under the MIT License (MIT).
*
* Copyright (c) 2019 Katrix
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package ackcord.commands
import ackcord.CacheSnapshot
import ackcord.data.Message
/**
* Top trait for all command messages.
*/
sealed trait AllCmdMessages
/**
* Trait for all command errors.
*/
sealed trait CmdError extends AllCmdMessages
/**
* Trait for commands that have not been parsed into a specific command.
*/
sealed trait RawCmdMessage extends AllCmdMessages
/**
* Trait for all unparsed command messages.
*/
sealed trait CmdMessage extends AllCmdMessages
/**
* Trait for all parsed command messages.
*/
sealed trait ParsedCmdMessage[+A] extends AllCmdMessages
/**
* A raw unparsed command.
* @param msg The message of this command.
* @param prefix The prefix for this command.
* @param cmd The name of this command.
* @param args The arguments of this command.
* @param c The cache for this command.
*/
case class RawCmd(msg: Message, prefix: String, cmd: String, args: List[String], c: CacheSnapshot) extends RawCmdMessage
/**
* Bot was mentioned, but no command was used.
*/
case class NoCmd(msg: Message, c: CacheSnapshot) extends RawCmdMessage with CmdError
/**
* An unknown prefix was used.
*/
case class NoCmdPrefix(msg: Message, command: String, args: List[String], c: CacheSnapshot)
extends RawCmdMessage
with CmdError
/**
* An unparsed specific command.
* @param msg The message of this command.
* @param args The args for this command.
* @param cache The cache for this command.
*/
case class Cmd(msg: Message, args: List[String], cache: CacheSnapshot) extends CmdMessage
/**
* A parsed specific command.
* @param msg The message of this command.
* @param args The args for this command.
* @param remaining The remaining arguments after the parser did it's thing.
* @param cache The cache for this command.
*/
case class ParsedCmd[A](msg: Message, args: A, remaining: List[String], cache: CacheSnapshot)
extends ParsedCmdMessage[A]
/**
* A parse error for a parsed command.
* @param msg The message of this command.
* @param error The error message.
* @param cache The cache for this command.
*/
case class CmdParseError(msg: Message, error: String, cache: CacheSnapshot)
extends ParsedCmdMessage[Nothing]
with CmdError
/**
* A command that did not make it through some filters.
* @param failedFilters The filters the command failed.
* @param cmd The raw command object.
*/
case class FilteredCmd(failedFilters: Seq[CmdFilter], cmd: RawCmd)
extends CmdMessage
with ParsedCmdMessage[Nothing]
with CmdError
/**
* A generic command error.
* @param error The error message.
* @param cmd The raw command object.
*/
case class GenericCmdError(error: String, cmd: RawCmd) extends CmdMessage with ParsedCmdMessage[Nothing] with CmdError
| Katrix-/AckCord | commands/src/main/scala/ackcord/commands/cmdObjs.scala | Scala | mit | 3,970 |
object Castles {
trait Monoid[M] {
def op(x: M, y: M): M
def id: M
}
implicit object SumInt extends Monoid[Int] {
def op(x: Int, y: Int) = x+y
def id = 0
}
class UnionFind[Node, M: Monoid] {
private var map = Map[Node, Node]()
private var m = Map[Node, M]()
def find(x: Node): Node = map.get(x) match {
case Some(y) if x != y => find(y)
case Some(y) /* x == y */ => y
case None =>
map += ((x, x))
m += ((x, implicitly[Monoid[M]].id))
map(x)
}
def union(x: Node, y: Node) = {
val (x0, y0) = (find(x), find(y))
if (x0 != y0) {
map += ((x0, y0))
m += ((y0, implicitly[Monoid[M]].op(m(x0), m(y0))))
m -= x0
}
()
}
def op(x: Node, v: M) = {
val x0 = find(x)
m += ((x0, implicitly[Monoid[M]].op(m(x0), v)))
}
def nodes = map.keys
def roots = m
}
type Module = (Int, Int)
type Castle = UnionFind[Module, Int]
def read(): Castle = {
val res = new Castle()
val in = io.Source.stdin.getLines
val (h, w) = (in.next.toInt, in.next.toInt)
for (y <- 0 until h) {
val ln = in.next.split(" ")
for (x <- 0 until w) {
res.op((x, y), 1)
val walls = ln(x).toInt
if ((walls & 4) == 0) res.union((x, y), (x+1, y))
if ((walls & 8) == 0) res.union((x, y), (x, y+1))
}
}
res
}
def main(args: Array[String]) = {
val castle = read()
println(castle.roots.size)
println(castle.roots.map(_._2).max)
println(
(for {
(x, y) <- castle.nodes
val root = castle.find((x, y))
dir <- 0 until 2
val other = castle.find((x+dir, y+(1-dir)))
if (root != other)
} yield castle.roots(root) + castle.roots(other)).max
)
}
} | tailcalled/GCCodeCompetition | GCCC/Samples/Castles.scala | Scala | agpl-3.0 | 1,648 |
package velocity
import java.io.{BufferedReader,IOException,UnsupportedEncodingException}
import java.security.Principal
import java.util.{Enumeration,Locale,Map}
import javax.servlet.{RequestDispatcher,ServletInputStream}
import javax.servlet.http.{Cookie,HttpServletRequest,HttpSession}
/**
* @author bryanjswift
*/
trait DummyRequest extends HttpServletRequest {
private[this] class UnsupportedMethodException extends RuntimeException
def getAuthType():String = throw new UnsupportedMethodException()
def getContextPath():String = throw new UnsupportedMethodException()
def getCookies():Array[Cookie] = throw new UnsupportedMethodException()
def getDateHeader(name:String):Long = throw new UnsupportedMethodException()
def getHeader(name:String):String = throw new UnsupportedMethodException()
def getHeaderNames():Enumeration[_] = throw new UnsupportedMethodException()
def getHeaders(name:String):Enumeration[_] = throw new UnsupportedMethodException()
def getIntHeader(name:String):Int = throw new UnsupportedMethodException()
def getLocalAddr():String = throw new UnsupportedMethodException()
def getLocalName():String = throw new UnsupportedMethodException()
def getLocalPort():Int = throw new UnsupportedMethodException()
def getMethod():String = throw new UnsupportedMethodException()
def getPathInfo():String = throw new UnsupportedMethodException()
def getPathTranslated():String = throw new UnsupportedMethodException()
def getQueryString():String = throw new UnsupportedMethodException()
def getRemotePort():Int = throw new UnsupportedMethodException()
def getRemoteUser():String = throw new UnsupportedMethodException()
def getRequestURI():String = throw new UnsupportedMethodException()
def getRequestURL():StringBuffer = throw new UnsupportedMethodException()
def getRequestedSessionId():String = throw new UnsupportedMethodException()
def getServletPath():String = throw new UnsupportedMethodException()
def getSession():HttpSession = throw new UnsupportedMethodException()
def getSession(create:Boolean):HttpSession = throw new UnsupportedMethodException()
def getUserPrincipal():Principal = throw new UnsupportedMethodException()
def isRequestedSessionIdFromCookie():Boolean = throw new UnsupportedMethodException()
def isRequestedSessionIdFromURL():Boolean = throw new UnsupportedMethodException()
def isRequestedSessionIdFromUrl():Boolean = throw new UnsupportedMethodException()
def isRequestedSessionIdValid():Boolean = throw new UnsupportedMethodException()
def isUserInRole(role:String):Boolean = throw new UnsupportedMethodException()
def getAttribute(name:String):Object = throw new UnsupportedMethodException()
def getAttributeNames():Enumeration[_] = throw new UnsupportedMethodException()
def getCharacterEncoding():String = throw new UnsupportedMethodException()
def getContentLength():Int = throw new UnsupportedMethodException()
def getContentType():String = throw new UnsupportedMethodException()
def getInputStream():ServletInputStream = throw new UnsupportedMethodException()
def getLocale():Locale = throw new UnsupportedMethodException()
def getLocales():Enumeration[_] = throw new UnsupportedMethodException()
def getParameter(name:String):String = throw new UnsupportedMethodException()
def getParameterMap():Map[_,_] = throw new UnsupportedMethodException()
def getParameterNames():Enumeration[_] = throw new UnsupportedMethodException()
def getParameterValues(name:String):Array[String] = throw new UnsupportedMethodException()
def getProtocol():String = throw new UnsupportedMethodException()
def getReader():BufferedReader = throw new UnsupportedMethodException()
def getRealPath(path:String):String = throw new UnsupportedMethodException()
def getRemoteAddr():String = throw new UnsupportedMethodException()
def getRemoteHost():String = throw new UnsupportedMethodException()
def getRequestDispatcher(path:String):RequestDispatcher = throw new UnsupportedMethodException()
def getScheme():String = throw new UnsupportedMethodException()
def getServerName():String = throw new UnsupportedMethodException()
def getServerPort():Int = throw new UnsupportedMethodException()
def isSecure():Boolean = throw new UnsupportedMethodException()
def removeAttribute(name:String):Unit = throw new UnsupportedMethodException()
def setAttribute(name:String, o:Any):Unit = throw new UnsupportedMethodException()
def setCharacterEncoding(arg0:String):Unit = throw new UnsupportedMethodException()
}
| bryanjswift/quotidian | src/test/scala/velocity/DummyRequest.scala | Scala | mit | 4,488 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.index.index.z2
import org.locationtech.jts.geom.{Geometry, Point}
import org.geotools.util.factory.Hints
import org.locationtech.geomesa.curve.Z2SFC
import org.locationtech.geomesa.filter.{FilterHelper, FilterValues}
import org.locationtech.geomesa.index.api.IndexKeySpace.IndexKeySpaceFactory
import org.locationtech.geomesa.index.api.ShardStrategy.{NoShardStrategy, ZShardStrategy}
import org.locationtech.geomesa.index.api._
import org.locationtech.geomesa.index.conf.QueryHints.LOOSE_BBOX
import org.locationtech.geomesa.index.conf.QueryProperties
import org.locationtech.geomesa.index.geotools.GeoMesaDataStoreFactory.GeoMesaDataStoreConfig
import org.locationtech.geomesa.index.utils.Explainer
import org.locationtech.geomesa.utils.geotools.{GeometryUtils, WholeWorldPolygon}
import org.locationtech.geomesa.utils.index.ByteArrays
import org.opengis.feature.simple.SimpleFeatureType
import org.opengis.filter.Filter
import scala.util.control.NonFatal
class Z2IndexKeySpace(val sft: SimpleFeatureType, val sharding: ShardStrategy, geomField: String)
extends IndexKeySpace[Z2IndexValues, Long] {
require(classOf[Point].isAssignableFrom(sft.getDescriptor(geomField).getType.getBinding),
s"Expected field $geomField to have a point binding, but instead it has: " +
sft.getDescriptor(geomField).getType.getBinding.getSimpleName)
protected val sfc: Z2SFC = Z2SFC
protected val geomIndex: Int = sft.indexOf(geomField)
override val attributes: Seq[String] = Seq(geomField)
override val indexKeyByteLength: Right[(Array[Byte], Int, Int) => Int, Int] = Right(8 + sharding.length)
override val sharing: Array[Byte] = Array.empty
override def toIndexKey(writable: WritableFeature,
tier: Array[Byte],
id: Array[Byte],
lenient: Boolean): RowKeyValue[Long] = {
val geom = writable.getAttribute[Point](geomIndex)
if (geom == null) {
throw new IllegalArgumentException(s"Null geometry in feature ${writable.feature.getID}")
}
val z = try { sfc.index(geom.getX, geom.getY, lenient) } catch {
case NonFatal(e) => throw new IllegalArgumentException(s"Invalid z value from geometry: $geom", e)
}
val shard = sharding(writable)
// create the byte array - allocate a single array up front to contain everything
// ignore tier, not used here
val bytes = Array.ofDim[Byte](shard.length + 8 + id.length)
if (shard.isEmpty) {
ByteArrays.writeLong(z, bytes, 0)
System.arraycopy(id, 0, bytes, 8, id.length)
} else {
bytes(0) = shard.head // shard is only a single byte
ByteArrays.writeLong(z, bytes, 1)
System.arraycopy(id, 0, bytes, 9, id.length)
}
SingleRowKeyValue(bytes, sharing, shard, z, tier, id, writable.values)
}
override def getIndexValues(filter: Filter, explain: Explainer): Z2IndexValues = {
val geometries: FilterValues[Geometry] = {
val extracted = FilterHelper.extractGeometries(filter, geomField, intersect = true) // intersect since we have points
if (extracted.nonEmpty) { extracted } else { FilterValues(Seq(WholeWorldPolygon)) }
}
explain(s"Geometries: $geometries")
if (geometries.disjoint) {
explain("Non-intersecting geometries extracted, short-circuiting to empty query")
return Z2IndexValues(sfc, geometries, Seq.empty)
}
// compute our ranges based on the coarse bounds for our query
val xy: Seq[(Double, Double, Double, Double)] = {
val multiplier = QueryProperties.PolygonDecompMultiplier.toInt.get
val bits = QueryProperties.PolygonDecompBits.toInt.get
geometries.values.flatMap(GeometryUtils.bounds(_, multiplier, bits))
}
Z2IndexValues(sfc, geometries, xy)
}
override def getRanges(values: Z2IndexValues, multiplier: Int): Iterator[ScanRange[Long]] = {
val Z2IndexValues(_, _, xy) = values
if (xy.isEmpty) { Iterator.empty } else {
// note: `target` will always be Some, as ScanRangesTarget has a default value
val target = QueryProperties.ScanRangesTarget.option.map(t => math.max(1, t.toInt / multiplier))
sfc.ranges(xy, 64, target).iterator.map(r => BoundedRange(r.lower, r.upper))
}
}
override def getRangeBytes(ranges: Iterator[ScanRange[Long]], tier: Boolean): Iterator[ByteRange] = {
if (sharding.length == 0) {
ranges.map {
case BoundedRange(lo, hi) => BoundedByteRange(ByteArrays.toBytes(lo), ByteArrays.toBytesFollowingPrefix(hi))
case r => throw new IllegalArgumentException(s"Unexpected range type $r")
}
} else {
ranges.flatMap {
case BoundedRange(lo, hi) =>
val lower = ByteArrays.toBytes(lo)
val upper = ByteArrays.toBytesFollowingPrefix(hi)
sharding.shards.map(p => BoundedByteRange(ByteArrays.concat(p, lower), ByteArrays.concat(p, upper)))
case r => throw new IllegalArgumentException(s"Unexpected range type $r")
}
}
}
override def useFullFilter(values: Option[Z2IndexValues],
config: Option[GeoMesaDataStoreConfig],
hints: Hints): Boolean = {
// if the user has requested strict bounding boxes, we apply the full filter
// if the spatial predicate is rectangular (e.g. a bbox), the index is fine enough that we
// don't need to apply the filter on top of it. this may cause some minor errors at extremely
// fine resolutions, but the performance is worth it
// if we have a complicated geometry predicate, we need to pass it through to be evaluated
val looseBBox = Option(hints.get(LOOSE_BBOX)).map(Boolean.unbox).getOrElse(config.forall(_.queries.looseBBox))
lazy val simpleGeoms = values.toSeq.flatMap(_.geometries.values).forall(GeometryUtils.isRectangular)
!looseBBox || !simpleGeoms
}
}
object Z2IndexKeySpace extends IndexKeySpaceFactory[Z2IndexValues, Long] {
override def supports(sft: SimpleFeatureType, attributes: Seq[String]): Boolean =
attributes.lengthCompare(1) == 0 && sft.indexOf(attributes.head) != -1 &&
classOf[Point].isAssignableFrom(sft.getDescriptor(attributes.head).getType.getBinding)
override def apply(sft: SimpleFeatureType, attributes: Seq[String], tier: Boolean): Z2IndexKeySpace = {
val shards = if (tier) { NoShardStrategy } else { ZShardStrategy(sft) }
new Z2IndexKeySpace(sft, shards, attributes.head)
}
}
| locationtech/geomesa | geomesa-index-api/src/main/scala/org/locationtech/geomesa/index/index/z2/Z2IndexKeySpace.scala | Scala | apache-2.0 | 6,937 |
/*
* Copyright 2009-2017. DigitalGlobe, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and limitations under the License.
*/
package org.mrgeo.mapalgebra
import java.awt.image.DataBuffer
import java.io.{Externalizable, IOException, ObjectInput, ObjectOutput}
import org.apache.spark.{SparkConf, SparkContext}
import org.mrgeo.data.raster.{MrGeoRaster, RasterWritable}
import org.mrgeo.data.rdd.RasterRDD
import org.mrgeo.job.JobArguments
import org.mrgeo.mapalgebra.parser._
import org.mrgeo.mapalgebra.raster.RasterMapOp
import org.mrgeo.utils.SparkUtils
object LogMapOp extends MapOpRegistrar {
override def register:Array[String] = {
Array[String]("log")
}
def create(raster:RasterMapOp, base:Double = 1.0):MapOp =
new LogMapOp(Some(raster), Some(base))
override def apply(node:ParserNode, variables:String => Option[ParserNode]):MapOp =
new LogMapOp(node, variables)
}
class LogMapOp extends RasterMapOp with Externalizable {
private var inputMapOp:Option[RasterMapOp] = None
private var base:Option[Double] = None
private var rasterRDD:Option[RasterRDD] = None
override def rdd():Option[RasterRDD] = rasterRDD
override def getZoomLevel(): Int = {
inputMapOp.getOrElse(throw new IOException("No raster input specified")).getZoomLevel()
}
override def execute(context:SparkContext):Boolean = {
val input:RasterMapOp = inputMapOp getOrElse (throw new IOException("Input MapOp not valid!"))
val meta = input.metadata() getOrElse
(throw new IOException("Can't load metadata! Ouch! " + input.getClass.getName))
val rdd = input.rdd() getOrElse (throw new IOException("Can't load RDD! Ouch! " + inputMapOp.getClass.getName))
// precompute the denominator for the calculation
val baseVal =
if (base.isDefined) {
Math.log(base.get)
}
else {
1
}
val nodata = meta.getDefaultValues
val outputnodata = Array.fill[Double](meta.getBands)(Float.NaN)
rasterRDD = Some(RasterRDD(rdd.map(tile => {
val raster = RasterWritable.toMrGeoRaster(tile._2)
val output = MrGeoRaster.createEmptyRaster(raster.width(), raster.height(), raster.bands(), DataBuffer.TYPE_FLOAT)
var y:Int = 0
while (y < raster.height()) {
var x:Int = 0
while (x < raster.width()) {
var b:Int = 0
while (b < raster.bands()) {
val v = raster.getPixelDouble(x, y, b)
if (RasterMapOp.isNotNodata(v, nodata(b))) {
output.setPixel(x, y, b, Math.log(v) / baseVal)
}
else {
output.setPixel(x, y, b, outputnodata(b))
}
b += 1
}
x += 1
}
y += 1
}
(tile._1, RasterWritable.toWritable(output))
})))
metadata(SparkUtils.calculateMetadata(rasterRDD.get, meta.getMaxZoomLevel, outputnodata,
bounds = meta.getBounds, calcStats = false))
true
}
override def setup(job:JobArguments, conf:SparkConf):Boolean = true
override def teardown(job:JobArguments, conf:SparkConf):Boolean = true
override def readExternal(in:ObjectInput):Unit = {
base = in.readObject().asInstanceOf[Option[Double]]
}
override def writeExternal(out:ObjectOutput):Unit = {
out.writeObject(base)
}
private[mapalgebra] def this(raster:Option[RasterMapOp], base:Option[Double]) = {
this()
inputMapOp = raster
this.base = base
}
private[mapalgebra] def this(node:ParserNode, variables:String => Option[ParserNode]) = {
this()
if (node.getNumChildren < 1) {
throw new ParserException(node.getName + " requires at least one argument")
}
else if (node.getNumChildren > 2) {
throw new ParserException(node.getName + " requires only one or two arguments")
}
inputMapOp = RasterMapOp.decodeToRaster(node.getChild(0), variables)
if (node.getNumChildren == 2) {
base = MapOp.decodeDouble(node.getChild(1))
}
}
}
| ngageoint/mrgeo | mrgeo-mapalgebra/mrgeo-mapalgebra-rastermath/src/main/scala/org/mrgeo/mapalgebra/LogMapOp.scala | Scala | apache-2.0 | 4,464 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.python
import java.io.File
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import net.razorvine.pickle.{Pickler, Unpickler}
import org.apache.spark.{SparkEnv, TaskContext}
import org.apache.spark.api.python.{ChainedPythonFunctions, PythonRunner}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.types.{DataType, StructField, StructType}
import org.apache.spark.util.Utils
/**
* A physical plan that evaluates a [[PythonUDF]], one partition of tuples at a time.
*
* Python evaluation works by sending the necessary (projected) input data via a socket to an
* external Python process, and combine the result from the Python process with the original row.
*
* For each row we send to Python, we also put it in a queue first. For each output row from Python,
* we drain the queue to find the original input row. Note that if the Python process is way too
* slow, this could lead to the queue growing unbounded and spill into disk when run out of memory.
*
* Here is a diagram to show how this works:
*
* Downstream (for parent)
* / \\
* / socket (output of UDF)
* / \\
* RowQueue Python
* \\ /
* \\ socket (input of UDF)
* \\ /
* upstream (from child)
*
* The rows sent to and received from Python are packed into batches (100 rows) and serialized,
* there should be always some rows buffered in the socket or Python process, so the pulling from
* RowQueue ALWAYS happened after pushing into it.
*/
case class BatchEvalPythonExec(udfs: Seq[PythonUDF], output: Seq[Attribute], child: SparkPlan)
extends SparkPlan {
def children: Seq[SparkPlan] = child :: Nil
override def producedAttributes: AttributeSet = AttributeSet(output.drop(child.output.length))
private def collectFunctions(udf: PythonUDF): (ChainedPythonFunctions, Seq[Expression]) = {
udf.children match {
case Seq(u: PythonUDF) =>
val (chained, children) = collectFunctions(u)
(ChainedPythonFunctions(chained.funcs ++ Seq(udf.func)), children)
case children =>
// There should not be any other UDFs, or the children can't be evaluated directly.
assert(children.forall(_.find(_.isInstanceOf[PythonUDF]).isEmpty))
(ChainedPythonFunctions(Seq(udf.func)), udf.children)
}
}
protected override def doExecute(): RDD[InternalRow] = {
val inputRDD = child.execute().map(_.copy())
val bufferSize = inputRDD.conf.getInt("spark.buffer.size", 65536)
val reuseWorker = inputRDD.conf.getBoolean("spark.python.worker.reuse", defaultValue = true)
inputRDD.mapPartitions { iter =>
EvaluatePython.registerPicklers() // register pickler for Row
// The queue used to buffer input rows so we can drain it to
// combine input with output from Python.
val queue = HybridRowQueue(TaskContext.get().taskMemoryManager(),
new File(Utils.getLocalDir(SparkEnv.get.conf)), child.output.length)
TaskContext.get().addTaskCompletionListener({ ctx =>
queue.close()
})
val (pyFuncs, inputs) = udfs.map(collectFunctions).unzip
// flatten all the arguments
val allInputs = new ArrayBuffer[Expression]
val dataTypes = new ArrayBuffer[DataType]
val argOffsets = inputs.map { input =>
input.map { e =>
if (allInputs.exists(_.semanticEquals(e))) {
allInputs.indexWhere(_.semanticEquals(e))
} else {
allInputs += e
dataTypes += e.dataType
allInputs.length - 1
}
}.toArray
}.toArray
val projection = newMutableProjection(allInputs, child.output)
val schema = StructType(dataTypes.map(dt => StructField("", dt)))
val needConversion = dataTypes.exists(EvaluatePython.needConversionInPython)
// enable memo iff we serialize the row with schema (schema and class should be memorized)
val pickle = new Pickler(needConversion)
// Input iterator to Python: input rows are grouped so we send them in batches to Python.
// For each row, add it to the queue.
val inputIterator = iter.map { inputRow =>
queue.add(inputRow.asInstanceOf[UnsafeRow])
val row = projection(inputRow)
if (needConversion) {
EvaluatePython.toJava(row, schema)
} else {
// fast path for these types that does not need conversion in Python
val fields = new Array[Any](row.numFields)
var i = 0
while (i < row.numFields) {
val dt = dataTypes(i)
fields(i) = EvaluatePython.toJava(row.get(i, dt), dt)
i += 1
}
fields
}
}.grouped(100).map(x => pickle.dumps(x.toArray))
val context = TaskContext.get()
// Output iterator for results from Python.
val outputIterator = new PythonRunner(pyFuncs, bufferSize, reuseWorker, true, argOffsets)
.compute(inputIterator, context.partitionId(), context)
val unpickle = new Unpickler
val mutableRow = new GenericInternalRow(1)
val joined = new JoinedRow
val resultType = if (udfs.length == 1) {
udfs.head.dataType
} else {
StructType(udfs.map(u => StructField("", u.dataType, u.nullable)))
}
val resultProj = UnsafeProjection.create(output, output)
outputIterator.flatMap { pickedResult =>
val unpickledBatch = unpickle.loads(pickedResult)
unpickledBatch.asInstanceOf[java.util.ArrayList[Any]].asScala
}.map { result =>
val row = if (udfs.length == 1) {
// fast path for single UDF
mutableRow(0) = EvaluatePython.fromJava(result, resultType)
mutableRow
} else {
EvaluatePython.fromJava(result, resultType).asInstanceOf[InternalRow]
}
resultProj(joined(queue.remove(), row))
}
}
}
}
| aokolnychyi/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/python/BatchEvalPythonExec.scala | Scala | apache-2.0 | 6,947 |
package controllers.cms.organization
import play.api.mvc._
import play.api.i18n._
import play.api.data.Form
import play.api.data.Forms._
import play.api.data.format.Formats._
import play.api.data.validation.Constraints._
import extensions.Formatters._
import controllers.OrganizationController
import extensions.{ MissingLibs, JJson }
import models._
import cms.{ MenuEntry, CMSPage }
import com.mongodb.casbah.Imports._
import plugins.CMSPlugin
import scala.collection.JavaConverters._
import core.storage.{ FileUploadResponse, FileStorage }
import controllers.dos.FileUpload
import com.escalatesoft.subcut.inject.BindingModule
/**
*
* @author Manuel Bernhardt <[email protected]>
*/
class CMS(implicit val bindingModule: BindingModule) extends OrganizationController {
def CMSAction[A](action: Action[A]): Action[A] = {
OrganizationMember {
MultitenantAction(action.parser) {
implicit request =>
{
if (organizationServiceLocator.byDomain.isAdmin(configuration.orgId, connectedUser) || Group.dao.count(MongoDBObject("users" -> connectedUser, "grantType" -> CMSPlugin.ROLE_CMS_ADMIN.key)) > 0) {
action(request)
} else {
Forbidden(Messages("hub.YouDoNotHaveAccess"))
}
}
}
}
}
def list(language: Option[String], menu: Option[String]) = CMSAction {
MultitenantAction {
implicit request =>
val lang = if (language.isDefined) Lang.get(language.get).getOrElse(getLang) else getLang
val entries = CMSPage.dao.entryList(lang, menu)
Ok(Template(
'data -> JJson.generate(Map("entries" -> entries)),
'languages -> getLanguages,
'currentLanguage -> lang.code,
'menuKey -> menu.getOrElse("")
))
}
}
def upload = CMSAction {
MultitenantAction {
implicit request =>
val files = FileStorage.listFiles(configuration.orgId).map(f => FileUploadResponse(f))
Ok(Template('uid -> MissingLibs.UUID, 'files -> JJson.generate(files)))
}
}
def uploadSubmit(uid: String) = CMSAction {
MultitenantAction {
implicit request =>
FileUpload.markFilesAttached(uid, configuration.orgId)
Redirect(routes.CMS.upload())
}
}
def listImages = CMSAction {
MultitenantAction {
implicit request =>
val images = FileStorage.listFiles(configuration.orgId).filter(_.contentType.contains("image"))
// tinyMCE stoopidity
val javascript = "var tinyMCEImageList = new Array(" + images.map(i => """["%s","%s"]""".format(i.name, "/file/image/%s".format(i.id))).mkString(", ") + ");"
Ok(javascript).as("text/html")
}
}
def page(language: String, page: Option[String], menu: String): Action[AnyContent] = CMSAction {
MultitenantAction {
implicit request =>
def menuEntries = MenuEntry.dao.findEntries(menu)
val (viewModel: Option[CMSPageViewModel], versions: List[CMSPageViewModel]) = page match {
case None =>
(Some(CMSPageViewModel(System.currentTimeMillis(), "", language, "", connectedUser, "", false, false, menuEntries.length + 1, menu)), List.empty)
case Some(key) =>
val versions = CMSPage.dao.findByKeyAndLanguage(key, language)
if (versions.isEmpty) {
(None, Seq.empty)
} else {
(Some(CMSPageViewModel(versions.head, menu)), versions.map(CMSPageViewModel(_, menu)))
}
}
val menuDefinitions: Seq[java.util.Map[String, String]] = CMSPlugin.getConfiguration.map { config =>
config.menuDefinitions.map { definition =>
Map(
"key" -> definition.key,
"value" -> definition.title.get(getLang.language).getOrElse(definition.key)
).asJava
}
}.getOrElse {
Seq.empty
}
val activeMenuKey = if (viewModel.isDefined) {
viewModel.get.menu
} else {
menu
}
if (page.isDefined && versions.isEmpty) {
NotFound(page.get)
} else {
Ok(
Template(
'page -> JJson.generate(viewModel),
'versions -> JJson.generate(Map("versions" -> versions)),
'languages -> getLanguages,
'currentLanguage -> language,
'isNew -> (versions.isEmpty),
'menuKey -> activeMenuKey,
'menuDefinitions -> menuDefinitions
)
)
}
}
}
def pageSubmit = CMSAction {
MultitenantAction {
implicit request =>
CMSPageViewModel.pageForm.bind(request.body.asJson.get).fold(
formWithErrors => handleValidationError(formWithErrors),
pageModel => {
// create / update the entry before we create / update the page since in the implicit conversion above we'll query for that page's position.
MenuEntry.dao.savePage(pageModel.menu, pageModel.key, pageModel.position, pageModel.title, pageModel.lang, pageModel.published)
val page: CMSPageViewModel = CMSPageViewModel(CMSPage.dao.create(pageModel.key, pageModel.lang, connectedUser, pageModel.title, pageModel.content, pageModel.published), pageModel.menu)
CMSPage.dao.removeOldVersions(pageModel.key, pageModel.lang)
Json(page)
}
)
}
}
def pageDelete(key: String, language: String) = CMSAction {
MultitenantAction {
implicit request =>
CMSPage.dao.delete(key, language)
// also delete menu entries that refer to that page
log.info(s"[$connectedUser@${configuration.orgId}] Removed CMS page '$key' in '$language")
MenuEntry.dao.removePage(key, language)
Ok
}
}
def pagePreview(key: String, language: String) = CMSAction {
MultitenantAction {
implicit request =>
CMSPage.dao.find(MongoDBObject("key" -> key, "lang" -> language)).$orderby(MongoDBObject("_id" -> -1)).limit(1).toList.headOption match {
case None => NotFound(key)
case Some(pagePreview) => Ok(Template('page -> pagePreview))
}
}
}
}
case class CMSPageViewModel(dateCreated: Long,
key: String, // the key of this page (unique across all version sets of pages)
lang: String, // 2-letters ISO code of the page language
title: String, // title of the page in this language
userName: String, // creator / editor
content: String, // actual page content (text)
isSnippet: Boolean = false, // is this a snippet in the welcome page or not
published: Boolean,
position: Int,
menu: String)
object CMSPageViewModel {
def apply(cmsPage: CMSPage, menu: String)(implicit configuration: OrganizationConfiguration): CMSPageViewModel = {
// we only allow linking once to a CMSPage so we can be sure that we will only ever find at most one MenuEntry for it
val (menuEntryPosition, menuKey) = MenuEntry.dao.findOneByTargetPageKey(cmsPage.key).map { e =>
(e.position, e.menuKey)
}.getOrElse {
(MenuEntry.dao.findEntries(menu).length + 1, CMSPlugin.MAIN_MENU)
}
CMSPageViewModel(cmsPage._id.getTime, cmsPage.key, cmsPage.lang, cmsPage.title, cmsPage.userName, cmsPage.content, cmsPage.isSnippet, cmsPage.published, menuEntryPosition, menuKey)
}
val pageForm = Form(
mapping(
"dateCreated" -> of[Long],
"key" -> text.verifying(pattern("^[-a-z0-9]{3,35}$".r, error = Messages("cms.InvalidKeyValue"))),
"lang" -> nonEmptyText,
"title" -> nonEmptyText,
"userName" -> text,
"content" -> text,
"isSnippet" -> boolean,
"published" -> boolean,
"position" -> number,
"menu" -> text
)(CMSPageViewModel.apply)(CMSPageViewModel.unapply)
)
} | delving/culture-hub | modules/cms/app/controllers/cms/organization/CMS.scala | Scala | apache-2.0 | 7,806 |
package lila.i18n
import scalatags.Text.all._
import play.api.i18n.Lang
import lila.common.String.html.escapeHtml
object Translator {
object frag {
def literal(key: MessageKey, args: Seq[Any], lang: Lang): RawFrag =
translate(key, lang, I18nQuantity.Other /* grmbl */, args)
def plural(key: MessageKey, count: Count, args: Seq[Any], lang: Lang): RawFrag =
translate(key, lang, I18nQuantity(lang, count), args)
private def translate(
key: MessageKey,
lang: Lang,
quantity: I18nQuantity,
args: Seq[Any]
): RawFrag =
findTranslation(key, lang) flatMap { translation =>
val htmlArgs = escapeArgs(args)
try {
translation match {
case literal: Simple => Some(literal.format(htmlArgs))
case literal: Escaped => Some(literal.format(htmlArgs))
case plurals: Plurals => plurals.format(quantity, htmlArgs)
}
} catch {
case e: Exception =>
logger.warn(s"Failed to format html $lang/$key -> $translation (${args.toList})", e)
Some(RawFrag(key))
}
} getOrElse RawFrag(key)
private def escapeArgs(args: Seq[Any]): Seq[RawFrag] =
args.map {
case s: String => escapeHtml(s)
case r: RawFrag => r
case f: StringFrag => RawFrag(f.render)
case a => RawFrag(a.toString)
}
}
object txt {
def literal(key: MessageKey, args: Seq[Any], lang: Lang): String =
translate(key, lang, I18nQuantity.Other /* grmbl */, args)
def plural(key: MessageKey, count: Count, args: Seq[Any], lang: Lang): String =
translate(key, lang, I18nQuantity(lang, count), args)
private def translate(
key: MessageKey,
lang: Lang,
quantity: I18nQuantity,
args: Seq[Any]
): String =
findTranslation(key, lang) flatMap { translation =>
try {
translation match {
case literal: Simple => Some(literal.formatTxt(args))
case literal: Escaped => Some(literal.formatTxt(args))
case plurals: Plurals => plurals.formatTxt(quantity, args)
}
} catch {
case e: Exception =>
logger.warn(s"Failed to format txt $lang/$key -> $translation (${args.toList})", e)
Some(key)
}
} getOrElse key
}
private[i18n] def findTranslation(key: MessageKey, lang: Lang): Option[Translation] =
Registry.all.get(lang).flatMap(t => Option(t get key)) orElse
Option(Registry.default.get(key))
}
| luanlv/lila | modules/i18n/src/main/Translator.scala | Scala | mit | 2,586 |
/**
* Original work: SecureSocial (https://github.com/jaliss/securesocial)
* Copyright 2013 Jorge Aliss (jaliss at gmail dot com) - twitter: @jaliss
*
* Derivative work: Silhouette (https://github.com/mohiva/play-silhouette)
* Modifications Copyright 2015 Mohiva Organisation (license at mohiva dot com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mohiva.play.silhouette.api
/**
* This trait represents an authenticated user.
*/
trait Identity
| mohiva/play-silhouette | silhouette/app/com/mohiva/play/silhouette/api/Identity.scala | Scala | apache-2.0 | 982 |
/* This file implements a trinarized convolution summation
*/
package binconcifar
import chisel3._
import chisel3.util._
import scala.collection.mutable.ArrayBuffer
object TriConvSum {
def mapToWires[ T <: Bits ](
conv : Seq[Seq[Seq[Int]]],
currData : Seq[Seq[Seq[T]]]
) : (Seq[T], Seq[T]) = {
val posNums = ArrayBuffer[T]()
val negNums = ArrayBuffer[T]()
for ( lyr1 <- conv.zip( currData ) ) {
for ( lyr2 <- lyr1._1.zip( lyr1._2 ) ) {
for ( lyr3 <- lyr2._1.zip( lyr2._2 ) ) {
if ( lyr3._1 == 1 )
posNums += lyr3._2
if ( lyr3._1 == -1 )
negNums += lyr3._2
}
}
}
( posNums.toSeq, negNums.toSeq )
}
def pipelineFanout[ T <: Bits ](
nbl : List[Vec[Vec[Vec[T]]]],
noReg : Int,
noOut : Int ) : List[Vec[Vec[Vec[T]]]] = {
val noInLyr = ( 1 to noReg ).map( n => {
math.round( math.exp( math.log( noOut ) * n / noReg ) ).toInt
}).toList
val nblLyrs = ArrayBuffer[List[Vec[Vec[Vec[T]]]]]()
nblLyrs += nbl
for ( n <- noInLyr ) {
val lastLyr = nblLyrs.last
val fanout = ( n / lastLyr.size ).toInt
val fanouts = ( 0 until lastLyr.size ).map( i => {
if ( i >= n % lastLyr.size )
fanout
else
fanout + 1
}).toList
val newLyr = fanouts.zipWithIndex.map( f => {
List.fill( f._1 ) { RegNext( lastLyr( f._2 ) ) }
}).toList.reduce( _ ++ _ )
nblLyrs += newLyr
}
nblLyrs.last
}
}
private class ParrallelTriConvSum (
dtype : SInt,
weights : Seq[Seq[Seq[Seq[Int]]]],
fanoutReg : Int
) extends Module {
val io = IO( new Bundle {
val dataIn = Input(Vec( weights(0).size, Vec( weights(0)(0).size, Vec( weights(0)(0)(0).size, dtype.cloneType ))))
val dataOut = Output(Vec( weights.size, dtype.cloneType ))
})
def computeSum( posNums : Seq[SInt], negNums : Seq[SInt] ) : (SInt, Int, Int) = {
var plusList = posNums.toList
var minusList = negNums.toList
val zero = 0.U.asTypeOf( dtype )
var opsTotal = 0
var stages = 0
while ( plusList.size > 1 || minusList.size > 0 ) {
// group by 3, partition on if not single op as should just add otherwise
val plusOps : (List[List[SInt]], List[List[SInt]]) = plusList.grouped(3).toList.partition( _.size > 1 )
plusList = plusOps._1.map( x => RegNext( x.reduce( ( a, b ) => a + b ) ) ).toList
opsTotal += plusOps._1.size
val negOps = minusList.grouped(3).toList.partition( _.size > 1 )
opsTotal += negOps._1.size
minusList = negOps._1.map( x => RegNext( x.reduce( _ + _ ) ) ).toList
// may have 1 left over from neither, only one, or both
if ( plusOps._2.size == 1 && negOps._2.size == 1 ) {
// both
plusList = RegNext( plusOps._2.head.head - negOps._2.head.head ) :: plusList
opsTotal += 1
} else if ( plusOps._2.size == 1 ) {
// just plus
plusList = RegNext( plusOps._2.head.head ) :: plusList
opsTotal += 1
} else if ( negOps._2.size == 1 ) {
// just minus
// can either take the neg and add to plus or keep in minus, check sizes to see
if ( minusList.size % 3 == 0 )
plusList = RegNext( zero - negOps._2.head.head ) :: plusList
else
minusList = RegNext( negOps._2.head.head ) :: minusList
opsTotal += 1
} // else neither so ignore
stages += 1
}
if ( plusList.size == 0 )
return ( zero, 0, 0 )
return ( plusList.head, stages, opsTotal )
}
val currData = ShiftRegister( io.dataIn, fanoutReg )
val outSums = weights.map( conv => {
val numsOut = TriConvSum.mapToWires( conv, currData )
val res = computeSum( numsOut._1, numsOut._2 )
( res._1, res._2 )
})
val outSumLat = outSums.map( _._2 ).max
io.dataOut := Vec( outSums.map( r => ShiftRegister( r._1, outSumLat - r._2 )) )
val latency = outSumLat + fanoutReg
}
private class SerialPipelinedAdderTree(
dtype : UInt,
addLen : Int,
subLen : Int,
bitWidth : Int
) extends Module {
val io = IO( new Bundle {
val start = Input( Bool() )
val posNums = Input( Vec( addLen, dtype ) )
val negNums = Input( Vec( subLen, dtype ) )
val sumOut = Output( dtype )
val startOut = Output( Bool() )
})
var plusList = io.posNums.toList.map( x => { ( x, io.start ) } )
var minusList = io.negNums.toList.map( x => { ( x, io.start ) } )
var stages = 0
while ( plusList.size > 1 || minusList.size > 0 ) {
val plusOps = plusList.grouped( 2 ).toList.partition( _.size > 1 )
plusList = plusOps._1.map( x => SerialAdder.add( x(0)._1, x(1)._1, x(0)._2, bitWidth ) )
val minusOps = minusList.grouped( 2 ).toList.partition( _.size > 1 )
minusList = minusOps._1.map( x => SerialAdder.add( x(0)._1, x(1)._1, x(0)._2, bitWidth ) )
if ( plusOps._2.size == 1 && minusOps._2.size == 1 ) {
plusList = SerialAdder.sub(
plusOps._2.head.head._1,
minusOps._2.head.head._1,
plusOps._2.head.head._2,
bitWidth
) :: plusList
} else if ( plusOps._2.size == 1 )
plusList = (
RegNext( plusOps._2.head.head._1 ),
RegNext( plusOps._2.head.head._2 ) ) :: plusList
else if ( minusOps._2.size == 1 ) {
plusList = SerialAdder.sub(
0.U( bitWidth.W ),
minusOps._2.head.head._1,
minusOps._2.head.head._2,
bitWidth
) :: plusList
}
stages += 1
}
val latency = stages
if ( plusList.size == 0 ) {
io.sumOut := 0.U( bitWidth.W )
io.startOut := true.B
} else {
io.sumOut := plusList.head._1
io.startOut := plusList.head._2
}
}
private class SerialTriConvSum (
dtype : SInt,
weights : Seq[Seq[Seq[Seq[Int]]]],
bitWidth : Int,
fanoutReg : Int
) extends Module {
val io = IO( new Bundle {
val start = Input( Bool() )
val dataIn = Input( Vec( weights(0).size, Vec( weights(0)(0).size, Vec( weights(0)(0)(0).size, dtype.cloneType ))))
val dataOut = Output(Vec( weights.size, dtype.cloneType ))
})
val inWidth = dtype.cloneType.getWidth
val nIter = inWidth / bitWidth
val log2BW = log2Ceil( bitWidth )
val log2Iter = log2Ceil( nIter )
val nibbleCntr = RegInit( 0.U( log2Iter.W ) )
when ( nibbleCntr > 0.U || io.start ) {
nibbleCntr := nibbleCntr + 1.U
}
val nibReg = RegNext( RegNext( nibbleCntr ) )
val dataNibble = Wire( Vec(
weights(0).size,
Vec( weights(0)(0).size,
Vec( weights(0)(0)(0).size,
0.U( bitWidth.W ).cloneType ))))
for ( x <- 0 until nIter ) {
for ( i <- 0 until weights(0).size ) {
for ( j <- 0 until weights(0)(0).size ) {
for ( k <- 0 until weights(0)(0)(0).size ) {
val thisNibble = RegNext( RegNext( io.dataIn( i )( j )( k )( bitWidth * ( x + 1 ) - 1, bitWidth*x ) ) )
if ( x > 0 ) {
when ( nibReg === x.U ) {
dataNibble( i )( j )( k ) := thisNibble
}
} else
dataNibble( i )( j )( k ) := thisNibble
}
}
}
}
val startReg = ShiftRegister( io.start, fanoutReg + 1 )
def computeSum( posNums : Seq[UInt], negNums : Seq[UInt], startReg : Bool ) : (UInt, Bool, Int) = {
var plusList = posNums.toList.map( x => { ( x, startReg ) } )
var minusList = negNums.toList.map( x => { ( x, startReg ) } )
var stages = 0
while ( plusList.size > 1 || minusList.size > 0 ) {
val plusOps = plusList.grouped( 2 ).toList.partition( _.size > 1 )
plusList = plusOps._1.map( x => SerialAdder.add( x(0)._1, x(1)._1, x(0)._2, bitWidth ) )
val minusOps = minusList.grouped( 2 ).toList.partition( _.size > 1 )
minusList = minusOps._1.map( x => SerialAdder.add( x(0)._1, x(1)._1, x(0)._2, bitWidth ) )
if ( plusOps._2.size == 1 && minusOps._2.size == 1 ) {
plusList = SerialAdder.sub(
plusOps._2.head.head._1,
minusOps._2.head.head._1,
plusOps._2.head.head._2,
bitWidth
) :: plusList
} else if ( plusOps._2.size == 1 )
plusList = (
RegNext( plusOps._2.head.head._1 ),
RegNext( plusOps._2.head.head._2 ) ) :: plusList
else if ( minusOps._2.size == 1 ) {
plusList = SerialAdder.sub(
0.U( bitWidth.W ),
minusOps._2.head.head._1,
minusOps._2.head.head._2,
bitWidth
) :: plusList
}
stages += 1
}
if ( plusList.size == 0 )
return ( 0.U( bitWidth.W ), true.B, 0 )
return ( plusList.head._1, plusList.head._2, stages )
}
/*
val dataFanout = pipelineFanout( List(dataNibble), fanoutReg, weights.size )
val outSums = weights.zip( dataFanout ).map( conv => {
val numsOut = TriConvSum.mapToWires( conv._1, conv._2 )
computeSum( numsOut._1, numsOut._2, startReg )
})
*/
val dataFanout = ShiftRegister( dataNibble, fanoutReg )
val outSums = weights.map( conv => {
val numsOut = TriConvSum.mapToWires( conv, dataFanout )
computeSum( numsOut._1, numsOut._2, startReg )
/*
val sPipeAdder = Module( new SerialPipelinedAdderTree(
numsOut._1.head.cloneType,
numsOut._1.size,
numsOut._2.size,
bitWidth
))
sPipeAdder.io.start := startReg
sPipeAdder.io.posNums := Vec( numsOut._1 )
sPipeAdder.io.negNums := Vec( numsOut._2 )
( sPipeAdder.io.sumOut, sPipeAdder.io.startOut, sPipeAdder.latency )
*/
})
val nibbleLat = outSums.map( _._3 ).max
val nibbleOut = outSums.map( r => ShiftRegister( r._1, nibbleLat - r._3 ))
val nibbleStarts = outSums.map( r => ShiftRegister( r._2, nibbleLat - r._3 ))
val unnibble = nibbleOut.zip( nibbleStarts ).map( x => {
val nibCntr = RegInit( 0.U( log2Iter.W ) )
when ( RegNext( x._2 ) || nibCntr > 0.U ) {
nibCntr := nibCntr + 1.U
}
val outReg = Reg( Vec( nIter, 0.U( bitWidth.W ).cloneType ) )
for ( i <- 0 until nIter ) {
when ( nibCntr === i.U ) {
outReg( i ) := x._1
}
}
outReg.reverse.reduce( _ ## _ ).asTypeOf( dtype )
})
val latency = nibbleLat + 3 + fanoutReg
io.dataOut := Vec( unnibble )
}
/* take in 1, 0, -1 weights
* perform the convolution on them
*/
class TriConvSum (
val dtype : SInt,
val weights : Seq[Seq[Seq[Seq[Int]]]],
tput : Double,
fanoutReg :Int = 2
) extends NNLayer(
dtype,
math.ceil( tput ),
weights(0)(0)(0).size * weights(0)(0).size * weights(0).size,
weights.size,
math.ceil( tput ).toInt
) {
io.dataIn.ready := true.B // always ready with tput = 1
val tPutRounded = math.ceil( throughput ).toInt
val inWidth = dtype.cloneType.getWidth
val bitWidth = math.ceil( inWidth * tput ).toInt
val log2Iter = math.max( log2Ceil( math.ceil( inWidth.toFloat / bitWidth ).toInt ), 1 ).toInt
val rdyCnt = RegInit( 0.U( log2Iter.W ) ) // NOTE: this is broken for not powers of 2
if ( bitWidth < inWidth ) {
// set ready signals
when ( io.dataIn.valid || rdyCnt > 0.U ) {
rdyCnt := rdyCnt + 1.U
}
io.dataIn.ready := ( rdyCnt === (( 1 << log2Iter) - 1).U || ( !io.dataIn.valid && rdyCnt === 0.U ) )
}
val noDelayReg = 1
val startReg = ShiftRegister( io.dataIn.valid && rdyCnt === 0.U, noDelayReg )
val dataVec = ShiftRegister( inIOToVVV( weights(0)(0).size, weights(0)(0)(0).size ), noDelayReg )
val convRes = ( 0 until tPutRounded ).map( idx => {
if ( bitWidth < inWidth ) {
val pConv = Module( new SerialTriConvSum( dtype, weights, bitWidth, fanoutReg ) )
pConv.io.start := startReg
pConv.io.dataIn := Vec( ( 0 until weights(0).size ).map( wIdx => {
dataVec( wIdx + weights(0).size * idx )
}))
( pConv.io.dataOut, pConv.latency )
} else {
val pConv = Module( new ParrallelTriConvSum( dtype, weights, 0 ) )
pConv.io.dataIn := Vec( ( 0 until weights(0).size ).map( wIdx => {
dataVec( wIdx + weights(0).size * idx )
}))
( pConv.io.dataOut, pConv.latency )
}
})
val latency = convRes.map( _._2 ).max + noDelayReg
val convOut = convRes.map( o => {
ShiftRegister( o._1, latency - noDelayReg - o._2 )
}).reduce( (a, b) => Vec( a ++ b ) )
val convValid = ShiftRegister( io.dataIn.valid && io.dataIn.ready, latency, false.B, true.B )
io.dataOut.bits := convOut
io.dataOut.valid := convValid
}
| da-steve101/binary_connect_cifar | src/main/scala/binconcifar/TriConvSum.scala | Scala | gpl-3.0 | 12,311 |
package org.jetbrains.plugins.scala
package annotator
package modifiers
import com.intellij.psi.PsiElement
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.annotations.Nls
import org.jetbrains.plugins.scala.annotator.quickfix.ModifierQuickFix
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.lexer.ScalaModifier
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.base.{ScAccessModifier, ScModifierList}
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScBlock
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{ScClassParameter, ScParameter}
import org.jetbrains.plugins.scala.lang.psi.api.statements.{ScDeclaration, ScExtensionBody, ScPatternDefinition, ScTypeAlias, ScValueDeclaration}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.ScTemplateBody
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.{ScEarlyDefinitions, ScModifierListOwner, ScPackaging}
import scala.collection.mutable
/**
* @author Aleksander Podkhalyuzin
* @since 25.03.2009
*/
private[annotator] object ModifierChecker {
import ScalaModifier._
private val IllegalCombinations = {
val modifiers = Seq(
(Final, Sealed),
(Private, Protected)
)
modifiers ++ modifiers.map {
case (left, right) => (right, left)
}
}
def checkModifiers(modifierList: ScModifierList)
(implicit holder: ScalaAnnotationHolder): Unit = modifierList.getParent match {
case owner: ScModifierListOwner =>
val modifiers = mutable.HashSet.empty[ScalaModifier]
def checkDuplicates(element: PsiElement, modifier: ScalaModifier): Boolean = {
val maybeIllegalModifier = IllegalCombinations.collectFirst {
case (`modifier`, illegalModifier) if owner.hasModifierPropertyScala(illegalModifier.text()) => illegalModifier
}.orElse {
if (modifiers.add(modifier)) None else Some(modifier)
}
for {
illegalModifier <- maybeIllegalModifier
message = ScalaBundle.message("illegal.modifiers.combination", modifier.text(), illegalModifier.text())
} createErrorWithQuickFix(message, element, owner, modifier)
maybeIllegalModifier.isEmpty
}
for (modifier <- modifierList.getNode.getChildren(null)) {
modifier.getPsi match {
case accessModifier: ScAccessModifier => // todo: check private with final or sealed combination.
val maybeModifier = if (accessModifier.isPrivate) Some(Private) else if (accessModifier.isProtected) Some(Protected) else None
maybeModifier.foreach { modifier =>
checkDuplicates(accessModifier, modifier)
if (owner.getContext.is[ScBlock]) {
createErrorWithQuickFix(
ScalaBundle.message("access.modifier.is.not.allowed.here", modifier.text()),
accessModifier,
owner,
modifier
)
}
}
case modifierPsi =>
modifier.getText match {
case LAZY =>
owner match {
case _: ScPatternDefinition => checkDuplicates(modifierPsi, Lazy)
case _: ScParameter =>
createErrorWithQuickFix(
ScalaBundle.message("lazy.modifier.is.not.allowed.with.param"),
modifierPsi,
owner,
Lazy
)
case _: ScValueDeclaration =>
if (!modifierList.isInScala3File) {
createErrorWithQuickFix(
ScalaBundle.message("lazy.values.may.not.be.abstract"),
modifierPsi,
owner,
Lazy
)
}
case _ =>
createErrorWithQuickFix(
ScalaBundle.message("lazy.modifier.is.not.allowed.here"),
modifierPsi,
owner,
Lazy
)
}
case FINAL =>
owner match {
case d: ScDeclaration if !d.hasAnnotation("scala.native") =>
createErrorWithQuickFix(
ScalaBundle.message("final.modifier.not.with.declarations"),
modifierPsi,
owner,
Final
)
case _: ScTrait =>
createErrorWithQuickFix(
ScalaBundle.message("final.modifier.not.with.trait"),
modifierPsi,
owner,
Final
)
case _: ScClass => checkDuplicates(modifierPsi, Final)
case _: ScObject => checkDuplicates(modifierPsi, Final)
case e: ScMember if e.getParent.is[ScTemplateBody, ScEarlyDefinitions] =>
val redundant = (e.containingClass, e) match {
case (_, valMember: ScPatternDefinition) if valMember.typeElement.isEmpty &&
valMember.pList.simplePatterns => false // constant value definition, see SCL-11500
case (cls, _) if cls.hasFinalModifier => true
case _ => false
}
if (redundant) {
if (checkDuplicates(modifierPsi, Final)) {
createWarningWithQuickFix(
ScalaBundle.message("final.modifier.is.redundant.with.final.parents"),
modifierPsi,
owner,
Final
)
}
} else {
checkDuplicates(modifierPsi, Final)
}
case e: ScMember if e.getParent.is[ScalaFile] =>
checkDuplicates(modifierPsi, Final)
case e: ScClassParameter =>
if (PsiTreeUtil.getParentOfType(e, classOf[ScTypeDefinition]).hasFinalModifier) {
if (checkDuplicates(modifierPsi, Final)) {
createWarningWithQuickFix(
ScalaBundle.message("final.modifier.is.redundant.with.final.parents"),
modifierPsi,
owner,
Final
)
}
} else {
checkDuplicates(modifierPsi, Final)
}
case _ =>
createErrorWithQuickFix(
ScalaBundle.message("final.modifier.is.not.allowed.here"),
modifierPsi,
owner,
Final
)
}
case SEALED =>
owner match {
case _: ScClass | _: ScTrait | _: ScClassParameter => checkDuplicates(modifierPsi, Sealed)
case _ =>
createErrorWithQuickFix(
ScalaBundle.message("sealed.modifier.is.not.allowed.here"),
modifierPsi,
owner,
Sealed
)
}
case ABSTRACT =>
owner match {
case _: ScClass => checkDuplicates(modifierPsi, Abstract)
case _: ScTrait => if (checkDuplicates(modifierPsi, Abstract)) {
createWarningWithQuickFix(
ScalaBundle.message("abstract.modifier.redundant.fot.traits"),
modifierPsi,
owner,
Abstract
)
}
case member: ScMember if !member.isInstanceOf[ScTemplateBody] &&
member.getParent.is[ScTemplateBody] && owner.hasModifierPropertyScala(OVERRIDE) =>
// 'abstract override' modifier only allowed for members of traits
if (!member.containingClass.is[ScTrait]) {
createErrorWithQuickFix(
ScalaBundle.message("abstract.override.modifier.is.not.allowed"),
modifierPsi,
owner,
Abstract
)
} else {
checkDuplicates(modifierPsi, Abstract)
}
case _ =>
createErrorWithQuickFix(
ScalaBundle.message("abstract.modifier.is.not.allowed"),
modifierPsi,
owner,
Abstract
)
}
case "override" =>
owner match {
case o: ScObject if o.containingClass != null => //allowed
case _: ScTypeDefinition =>
createErrorWithQuickFix(
ScalaBundle.message("override.modifier.is.not.allowed.for.classes"),
modifierPsi,
owner,
Override
)
case member: ScMember if member.getParent.is[ScTemplateBody, ScEarlyDefinitions, ScExtensionBody] =>
checkDuplicates(modifierPsi, Override)
case _: ScClassParameter => checkDuplicates(modifierPsi, Override)
case _ =>
createErrorWithQuickFix(
ScalaBundle.message("override.modifier.is.not.allowed"),
modifierPsi,
owner,
Override
)
}
case IMPLICIT =>
owner match {
case c@(_: ScClass | _: ScObject) =>
val onTopLevel = c.getContext match {
case file: ScalaFile if !file.isScriptFile && !file.isWorksheetFile => true
case _: ScPackaging => true
case _ => false
}
if (onTopLevel && !owner.isInScala3Module) {
createErrorWithQuickFix(
ScalaBundle.message("implicit.modifier.cannot.be.used.for.top.level.objects"),
modifierPsi,
owner,
Implicit
)
} else
c match {
case clazz: ScClass =>
def errorResult(): Unit = createErrorWithQuickFix(
ScalaBundle.message("implicit.class.must.have.a.primary.constructor.with.one.argument"),
modifierPsi,
owner,
Implicit
)
clazz.constructor match {
case Some(constr) =>
val clauses = constr.parameterList.clauses
if (clauses.isEmpty) errorResult()
else {
val parameters = clauses.head.parameters
if (parameters.length != 1) errorResult()
else if (parameters.head.isRepeatedParameter) errorResult()
else if (clauses.length > 2 || (clauses.length == 2 && !clauses(1).isImplicit)) errorResult()
}
case _ => errorResult()
}
case _ =>
}
case _: ScTrait | _: ScTypeAlias =>
createErrorWithQuickFix(
ScalaBundle.message("implicit.modifier.can.be.used.only.for"),
modifierPsi,
owner,
Implicit
)
case _ => checkDuplicates(modifierPsi, Implicit)
}
case _ =>
}
}
}
case _ =>
}
private def createWarningWithQuickFix(@Nls message: String, element: PsiElement,
owner: ScModifierListOwner, modifier: ScalaModifier)
(implicit holder: ScalaAnnotationHolder): Unit = {
holder.createWarningAnnotation(element, message, new ModifierQuickFix.Remove(owner, null, modifier))
}
private def createErrorWithQuickFix(@Nls message: String, element: PsiElement,
owner: ScModifierListOwner, modifier: ScalaModifier)
(implicit holder: ScalaAnnotationHolder): Unit = {
holder.createErrorAnnotation(element, message, new ModifierQuickFix.Remove(owner, null, modifier))
}
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/annotator/modifiers/ModifierChecker.scala | Scala | apache-2.0 | 13,225 |
package inloopio.util.io
import java.io._
class DataOutputStream(out: OutputStream) extends FilterOutputStream(out) with DataOutput {
/**
* The number of bytes written to the data output stream so far.
* If this counter overflows, it will be wrapped to Integer.MAX_VALUE.
*/
var written = 0
/**
* byteArr is initialized on demand by writeUTF
*/
private var byteArr: Array[Byte] = null
private val any = new AnyRef
/**
* Increases the written counter by the specified value
* until it reaches Integer.MAX_VALUE.
*/
private def incCount(value: Int) {
val temp = written + value
if (temp < 0) written = Integer.MAX_VALUE
else written = temp
}
/**
* Writes the specified byte (the low eight bits of the argument
* <code>b</code>) to the underlying output stream. If no exception
* is thrown, the counter <code>written</code> is incremented by
* <code>1</code>.
* <p>
* Implements the <code>write</code> method of <code>OutputStream</code>.
*
* @param b the <code>byte</code> to be written.
* @exception IOException if an I/O error occurs.
* @see java.io.FilterOutputStream#out
*/
@throws(classOf[IOException])
override def write(b: Int): Unit = any synchronized {
out.write(b)
incCount(1)
}
/**
* Writes <code>len</code> bytes from the specified byte array
* starting at offset <code>off</code> to the underlying output stream.
* If no exception is thrown, the counter <code>written</code> is
* incremented by <code>len</code>.
*
* @param b the data.
* @param off the start offset in the data.
* @param len the number of bytes to write.
* @exception IOException if an I/O error occurs.
* @see java.io.FilterOutputStream#out
*/
@throws(classOf[IOException])
override def write(b: Array[Byte], off: Int, len: Int): Unit = any synchronized {
out.write(b, off, len)
incCount(len)
}
/**
* Flushes this data output stream. This forces any buffered output
* bytes to be written out to the stream.
* <p>
* The <code>flush</code> method of <code>DataOutputStream</code>
* calls the <code>flush</code> method of its underlying output stream.
*
* @exception IOException if an I/O error occurs.
* @see java.io.FilterOutputStream#out
* @see java.io.OutputStream#flush
*/
@throws(classOf[IOException])
override def flush = out.flush
/**
* Writes a <code>boolean</code> to the underlying output stream as
* a 1-byte value. The value <code>true</code> is written out as the
* value <code>(byte)1</code> the value <code>false</code> is
* written out as the value <code>(byte)0</code>. If no exception is
* thrown, the counter <code>written</code> is incremented by
* <code>1</code>.
*
* @param v a <code>boolean</code> value to be written.
* @exception IOException if an I/O error occurs.
* @see java.io.FilterOutputStream#out
*/
@throws(classOf[IOException])
def writeBoolean(v: Boolean) {
out.write(if (v) 1 else 0)
incCount(1)
}
/**
* Writes out a <code>byte</code> to the underlying output stream as
* a 1-byte value. If no exception is thrown, the counter
* <code>written</code> is incremented by <code>1</code>.
*
* @param v a <code>byte</code> value to be written.
* @exception IOException if an I/O error occurs.
* @see java.io.FilterOutputStream#out
*/
@throws(classOf[IOException])
def writeByte(v: Int) {
out.write(v)
incCount(1)
}
/**
* Writes a <code>short</code> to the underlying output stream as two
* bytes, high byte first. If no exception is thrown, the counter
* <code>written</code> is incremented by <code>2</code>.
*
* @param v a <code>short</code> to be written.
* @exception IOException if an I/O error occurs.
* @see java.io.FilterOutputStream#out
*/
@throws(classOf[IOException])
def writeShort(v: Int) {
out.write((v >>> 8) & 0xFF)
out.write((v >>> 0) & 0xFF)
incCount(2)
}
/**
* Writes a <code>char</code> to the underlying output stream as a
* 2-byte value, high byte first. If no exception is thrown, the
* counter <code>written</code> is incremented by <code>2</code>.
*
* @param v a <code>char</code> value to be written.
* @exception IOException if an I/O error occurs.
* @see java.io.FilterOutputStream#out
*/
@throws(classOf[IOException])
def writeChar(v: Int) {
out.write((v >>> 8) & 0xFF)
out.write((v >>> 0) & 0xFF)
incCount(2)
}
/**
* Writes an <code>int</code> to the underlying output stream as four
* bytes, high byte first. If no exception is thrown, the counter
* <code>written</code> is incremented by <code>4</code>.
*
* @param v an <code>int</code> to be written.
* @exception IOException if an I/O error occurs.
* @see java.io.FilterOutputStream#out
*/
@throws(classOf[IOException])
def writeInt(v: Int) {
out.write((v >>> 0) & 0xFF)
out.write((v >>> 8) & 0xFF)
out.write((v >>> 16) & 0xFF)
out.write((v >>> 24) & 0xFF)
incCount(4)
}
private val writeBuffer = new Array[Byte](8)
/**
* Writes a <code>long</code> to the underlying output stream as eight
* bytes, high byte first. In no exception is thrown, the counter
* <code>written</code> is incremented by <code>8</code>.
*
* @param v a <code>long</code> to be written.
* @exception IOException if an I/O error occurs.
* @see java.io.FilterOutputStream#out
*/
@throws(classOf[IOException])
def writeLong(v: Long) {
for (i <- 0 to 7) {
writeBuffer(i) = (v >>> (56 - i * 8)).asInstanceOf[Byte]
}
out.write(writeBuffer, 0, 8)
incCount(8)
}
/**
* Converts the float argument to an <code>int</code> using the
* <code>floatToIntBits</code> method in class <code>Float</code>,
* and then writes that <code>int</code> value to the underlying
* output stream as a 4-byte quantity, high byte first. If no
* exception is thrown, the counter <code>written</code> is
* incremented by <code>4</code>.
*
* @param v a <code>float</code> value to be written.
* @exception IOException if an I/O error occurs.
* @see java.io.FilterOutputStream#out
* @see java.lang.Float#floatToIntBits(float)
*/
@throws(classOf[IOException])
def writeFloat(v: Float) {
writeInt(java.lang.Float.floatToIntBits(v))
}
/**
* Converts the double argument to a <code>long</code> using the
* <code>doubleToLongBits</code> method in class <code>Double</code>,
* and then writes that <code>long</code> value to the underlying
* output stream as an 8-byte quantity, high byte first. If no
* exception is thrown, the counter <code>written</code> is
* incremented by <code>8</code>.
*
* @param v a <code>double</code> value to be written.
* @exception IOException if an I/O error occurs.
* @see java.io.FilterOutputStream#out
* @see java.lang.Double#doubleToLongBits(double)
*/
@throws(classOf[IOException])
def writeDouble(v: Double) {
writeLong(java.lang.Double.doubleToLongBits(v))
}
/**
* Writes out the string to the underlying output stream as a
* sequence of bytes. Each character in the string is written out, in
* sequence, by discarding its high eight bits. If no exception is
* thrown, the counter <code>written</code> is incremented by the
* length of <code>s</code>.
*
* @param s a string of bytes to be written.
* @exception IOException if an I/O error occurs.
* @see java.io.FilterOutputStream#out
*/
@throws(classOf[IOException])
def writeBytes(s: String) {
for (c <- s) out.write(c.asInstanceOf[Byte])
incCount(s.length)
}
/**
* Writes a string to the underlying output stream as a sequence of
* characters. Each character is written to the data output stream as
* if by the <code>writeChar</code> method. If no exception is
* thrown, the counter <code>written</code> is incremented by twice
* the length of <code>s</code>.
*
* @param s a <code>String</code> value to be written.
* @exception IOException if an I/O error occurs.
* @see java.io.DataOutputStream#writeChar(int)
* @see java.io.FilterOutputStream#out
*/
@throws(classOf[IOException])
def writeChars(s: String) {
for (v <- s) {
out.write((v >>> 8) & 0xFF)
out.write((v >>> 0) & 0xFF)
}
incCount(s.length * 2)
}
/**
* Writes a string to the underlying output stream using
* <a href="DataInput.html#modified-utf-8">modified UTF-8</a>
* encoding in a machine-independent manner.
* <p>
* First, two bytes are written to the output stream as if by the
* <code>writeShort</code> method giving the number of bytes to
* follow. This value is the number of bytes actually written out,
* not the length of the string. Following the length, each character
* of the string is output, in sequence, using the modified UTF-8 encoding
* for the character. If no exception is thrown, the counter
* <code>written</code> is incremented by the total number of
* bytes written to the output stream. This will be at least two
* plus the length of <code>str</code>, and at most two plus
* thrice the length of <code>str</code>.
*
* @param str a string to be written.
* @exception IOException if an I/O error occurs.
*/
@throws(classOf[IOException])
def writeUTF(str: String) {
DataOutputStream.writeUTF(str, this)
}
/**
* Returns the current value of the counter <code>written</code>,
* the number of bytes written to this data output stream so far.
* If the counter overflows, it will be wrapped to Integer.MAX_VALUE.
*
* @return the value of the <code>written</code> field.
* @see java.io.DataOutputStream#written
*/
def size = written
}
object DataOutputStream {
/**
* Writes a string to the specified DataOutput using
* <a href="DataInput.html#modified-utf-8">modified UTF-8</a>
* encoding in a machine-independent manner.
* <p>
* First, two bytes are written to out as if by the <code>writeShort</code>
* method giving the number of bytes to follow. This value is the number of
* bytes actually written out, not the length of the string. Following the
* length, each character of the string is output, in sequence, using the
* modified UTF-8 encoding for the character. If no exception is thrown, the
* counter <code>written</code> is incremented by the total number of
* bytes written to the output stream. This will be at least two
* plus the length of <code>str</code>, and at most two plus
* thrice the length of <code>str</code>.
*
* @param str a string to be written.
* @param out destination to write to
* @return The number of bytes written out.
* @exception IOException if an I/O error occurs.
*/
@throws(classOf[IOException])
def writeUTF(str: String, out: DataOutput) = {
var utflen = 0
var count = 0
/* use charAt instead of copying String to char array */
for (c <- str) {
if ((c >= 0x0001) && (c <= 0x007F)) {
utflen += 1
} else if (c > 0x07FF) {
utflen += 3
} else {
utflen += 2
}
}
if (utflen > 65535)
throw new UTFDataFormatException(
"encoded string too long: " + utflen + " bytes")
var byteArr: Array[Byte] = null
if (out.isInstanceOf[DataOutputStream]) {
val dos = out.asInstanceOf[DataOutputStream]
if (dos.byteArr == null || (dos.byteArr.length < (utflen + 2)))
dos.byteArr = new Array[Byte]((utflen * 2) + 2)
byteArr = dos.byteArr
} else {
byteArr = new Array[Byte](utflen + 2)
}
byteArr(count) = ((utflen >>> 8) & 0xFF).asInstanceOf[Byte]
count += 1
byteArr(count) = ((utflen >>> 0) & 0xFF).asInstanceOf[Byte]
count += 1
var break = false
for (c <- str) {
if (!((c >= 0x0001) && (c <= 0x007F))) break = true
if (!break) {
byteArr(count) = c.asInstanceOf[Byte]
count += 1
} else {
if ((c >= 0x0001) && (c <= 0x007F)) {
byteArr(count) = c.asInstanceOf[Byte]
count += 1
} else if (c > 0x07FF) {
byteArr(count) = (0xE0 | ((c >> 12) & 0x0F)).asInstanceOf[Byte]
count += 1
byteArr(count) = (0x80 | ((c >> 6) & 0x3F)).asInstanceOf[Byte]
count += 1
byteArr(count) = (0x80 | ((c >> 0) & 0x3F)).asInstanceOf[Byte]
count += 1
} else {
byteArr(count) = (0xC0 | ((c >> 6) & 0x1F)).asInstanceOf[Byte]
count += 1
byteArr(count) = (0x80 | ((c >> 0) & 0x3F)).asInstanceOf[Byte]
count += 1
}
}
}
out.write(byteArr, 0, utflen + 2)
utflen + 2
}
}
| dcaoyuan/inloopio-libs | inloopio-util/src/main/scala/inloopio/util/io/DataOutputStream.scala | Scala | bsd-3-clause | 13,053 |
package com.azavea.gtfs
case class RouteRecord(
id: String,
shortName: String,
longName: String,
routeType: RouteType,
agencyId: Option[String] = None,
description: Option[String] = None,
url: Option[String] = None,
color: Option[String] = None,
textColor: Option[String] = None
)
| WorldBank-Transport/open-transit-indicators | scala/gtfs/src/main/scala/com/azavea/gtfs/RouteRecord.scala | Scala | gpl-3.0 | 300 |
package com.avsystem.scex
package compiler.presentation.ast
sealed trait Name {
val name: String
def isTerm = false
def isType = false
}
case class TermName(name: String) extends Name {
override def isTerm = true
}
object TermName {
final val WILDCARD = TermName("_")
final val ERROR = TermName("<error>")
final val EMPTY = TermName("<empty>")
final val PACKAGE = TermName("<package>")
final val CONSTRUCTOR = TermName("<init>")
final val ROOTPKG = TermName("_root_")
}
case class TypeName(name: String) extends Name {
override def isType = true
}
object TypeName {
final val WILDCARD = TypeName("_")
final val ERROR = TypeName("<error>")
final val EMPTY = TypeName("<empty>")
final val PACKAGE = TypeName("<package>")
final val WILDCARD_STAR = TypeName("_*")
}
| pnf/scex | scex-core/src/main/scala/com/avsystem/scex/compiler/presentation/ast/Name.scala | Scala | apache-2.0 | 801 |
/* Copyright (C) 2008-2016 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.app
package object nlp {
/** Mapping from annotation class (usually stored in an attr) and the DocumentAnnotor from which it can be obtained. */
type DocumentAnnotatorMap = collection.Map[Class[_], () => DocumentAnnotator]
/** Given a sequence of strings describing labels in IOB format, such as O I-PER I-LOC B-LOC I-LOC O I-ORG,
(where I, B prefixes are separated by a dash from the type suffix)
return a sequence of tuples indicating span start, length and label suffix, such as (3, 2, "LOC"). */
def iobBoundaries(labels:Seq[String]): Seq[(Int,Int,String)] = {
val result = new scala.collection.mutable.ArrayBuffer[(Int,Int,String)]
val strings = labels.map(_.split('-'))
val iobs = strings.map(_.apply(0))
val types = strings.map(a => if (a.length > 1) a(1) else "")
var start = -1; var prevType = ""
for (i <- 0 until labels.length) {
val atBoundary = types(i) != prevType || iobs(i) == "B"
if (start >= 0 && atBoundary) { result.+=((start, i-start, types(i-1))); start = -1 }
if (types(i) != "" && atBoundary){
start = i
if (i == labels.length-1)
result.+=((start, 1, types(i)))
}
prevType = types(i)
}
result
}
def bilouBoundaries(labels:Seq[String]): Seq[(Int,Int,String)] = {
val result = new scala.collection.mutable.ArrayBuffer[(Int,Int,String)]
val strings = labels.map(_.split('-'))
val bilous = strings.map(_.apply(0))
val types = strings.map(a => if (a.length > 1) a(1) else "")
var start = -1; var prevType = ""
for (i <- 0 until labels.length) {
val atBoundary = types(i) != prevType || bilous(i) == "B" || bilous(i) == "U"
if (bilous(i) == "U") result.+=((i, 1, types(i)))
else if (start >= 0 && atBoundary) { result.+=((start, i-start, types(i-1))); start = -1 }
if (types(i) != "" && atBoundary){
start = i
if (i == labels.length-1)
result.+=((start, 1, types(i)))
}
prevType = types(i)
}
result
}
/** Convenience alias for @see cc.factorie.app.nlp.iobBoundaries */
def bioBoundaries(labels:Seq[String]): Seq[(Int,Int,String)] = iobBoundaries(labels)
/** Command-line options available on all NLP model trainers.
@author David Belanger */
trait SharedNLPCmdOptions extends cc.factorie.util.CmdOptions {
val targetAccuracy = new CmdOption("target-accuracy", "", "FLOAT", "target accuracy for this NLP model. It will throw an exception if you don't hit this")
val trainPortion = new CmdOption("train-portion", 1.0, "FLOAT", "portion of train to load")
val testPortion = new CmdOption("test-portion", 1.0, "FLOAT", "portion of test to load")
}
}
| Craigacp/factorie | src/main/scala/cc/factorie/app/nlp/package.scala | Scala | apache-2.0 | 3,480 |
/*
* ******************************************************************************
* * Copyright (C) 2013 Christopher Harris (Itszuvalex)
* * [email protected]
* *
* * This program is free software; you can redistribute it and/or
* * modify it under the terms of the GNU General Public License
* * as published by the Free Software Foundation; either version 2
* * of the License, or (at your option) any later version.
* *
* * This program is distributed in the hope that it will be useful,
* * but WITHOUT ANY WARRANTY; without even the implied warranty of
* * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* * GNU General Public License for more details.
* *
* * You should have received a copy of the GNU General Public License
* * along with this program; if not, write to the Free Software
* * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
* *****************************************************************************
*/
package com.itszuvalex.femtocraft.utils
import com.itszuvalex.femtocraft.api.core.ISaveable
import net.minecraft.nbt.NBTTagCompound
import net.minecraft.world.World
import net.minecraftforge.common.DimensionManager
/**
* Created by Christopher Harris (Itszuvalex) on 5/9/14.
*/
class WorldLocation(var world: World, var x: Int, var y: Int, var z: Int) extends ISaveable with Comparable[WorldLocation] {
def this() = this(null, 0, 0, 0)
override def equals(o: scala.Any): Boolean = {
if (o == this) return true
if (!o.isInstanceOf[WorldLocation]) return false
val that: WorldLocation = o.asInstanceOf[WorldLocation]
if (x != that.x) return false
if (y != that.y) return false
if (z != that.z) return false
if (world == null && that.world != null) return false
if (that.world == null && world != null) return false
if (world == null) return true
if (world.provider.dimensionId != that.world.provider.dimensionId) return false
true
}
override def hashCode: Int = {
var result: Int = if (world == null) 0 else world.provider.dimensionId
result = 31 * result + x
result = 31 * result + y
result = 31 * result + z
result
}
def saveToNBT(compound: NBTTagCompound) {
compound.setInteger("x", x)
compound.setInteger("y", y)
compound.setInteger("z", z)
if (world != null && !world.isRemote) compound.setInteger("dim", world.provider.dimensionId)
}
def loadFromNBT(compound: NBTTagCompound) {
x = compound.getInteger("x")
y = compound.getInteger("y")
z = compound.getInteger("z")
world = DimensionManager.getWorld(compound.getInteger("dim"))
}
def getTileEntity = if (world == null) null else world.getTileEntity(x, y, z)
def getBlock = if (world == null) null else world.getBlock(x, y, z)
def compareTo(o: WorldLocation): Int = {
if (x < o.x) return -1
if (x > o.x) return 1
if (y < o.y) return -1
if (y > o.y) return 1
if (z < o.z) return -1
if (z > o.z) return 1
if (world == null && o.world != null) return -1
if (world != null && o.world == null) return 1
if (world == null) return 0
if (world.provider.dimensionId < o.world.provider.dimensionId) return -1
if (world.provider.dimensionId > o.world.provider.dimensionId) return 1
0
}
}
| Itszuvalex/Femtocraft-alpha-1 | src/main/java/com/itszuvalex/femtocraft/utils/WorldLocation.scala | Scala | gpl-2.0 | 3,331 |
package models.submission
import
java.{ io, nio, util },
io.{ ByteArrayInputStream, ByteArrayOutputStream, FileInputStream, InputStream },
nio.charset.Charset,
util.zip.{ ZipEntry, ZipOutputStream }
import
org.joda.time.{ DateTime, format },
format.DateTimeFormat
object WorkZipper {
private val CharSet = Charset.forName("ISO-8859-1")
def apply(works: UserWork*): Array[Byte] = {
val runBundles = works groupBy (_.runID) mapValues (_ groupBy (_.periodID) mapValues (_ groupBy (_.userID)))
val baos = new ByteArrayOutputStream
val out = new ZipOutputStream(baos, CharSet)
runBundles foreach {
case (runID, periodBundles) =>
out.putNextEntry(new ZipEntry(s"$runID/"))
periodBundles foreach {
case (periodID, userBundles) =>
out.putNextEntry(new ZipEntry(s"$runID/$periodID/"))
userBundles foreach {
case (userID, workBundles) =>
val prefix = s"$runID/$periodID/$userID/"
out.putNextEntry(new ZipEntry(prefix))
workBundles foreach zipWork(prefix, out)
}
}
}
out.close()
baos.toByteArray
}
private def zipWork(prefix: String, out: ZipOutputStream)(work: UserWork): ZipOutputStream = {
val newPrefix = s"$prefix${work.id.get}/"
out.putNextEntry(new ZipEntry(newPrefix))
implicit val p = Prefix(newPrefix)
(writeData(work) _ andThen writeMetadata(work) andThen writeComments(work.comments) andThen writeSupplements(work.supplements))(out)
}
private def writeData(work: UserWork)(out: ZipOutputStream)(implicit prefix: Prefix): ZipOutputStream = {
val fileExt = TypeBundleCache.byName(work.typ).fileExtension
val fis = new FileInputStream(s"./public/${work.data}")
writeEntry(fis, s"work.$fileExt")(out)
}
private def writeMetadata(work: UserWork)(out: ZipOutputStream)(implicit prefix: Prefix): ZipOutputStream = {
val unJson: PartialFunction[String, String] = {
case s if (s.startsWith("\\"") && s.endsWith("\\"")) => s drop 1 dropRight 1
case s => s
}
val predefKeys = Seq("id", "type", "time", "description")
val predefVals = Seq(work.id.get.toString, work.typ, work.timestamp.toTimeString, work.description)
val metaPairs = new JsonMetadata(work.metadata).toMap
val pairs = ((predefKeys zip predefVals) ++ metaPairs).toMap mapValues unJson
val metadataStr = pairs map { case (key, value) => s"$key: $value" } mkString "\\n"
writeEntry(metadataStr, "metadata.txt")(out)
}
private def writeComments(comments: Seq[UserWorkComment])(out: ZipOutputStream)(implicit prefix: Prefix): ZipOutputStream = {
val commentsStr =
comments map {
case UserWorkComment(_, _, timestamp, userID, comment) => s"[${timestamp.toTimeString}] $userID: $comment"
} mkString (
"\\n"
)
writeEntry(commentsStr, "comments.txt")(out)
}
private def writeSupplements(supplements: Seq[UserWorkSupplement])(out: ZipOutputStream)(implicit prefix: Prefix): ZipOutputStream = {
supplements foreach {
supp =>
val fileExt = TypeBundleCache.byName(supp.typ).fileExtension
val fis = new FileInputStream(s"./public/${supp.data}")
writeEntry(fis, s"supplement-${supp.typ}-${supp.id.get}.$fileExt")(out)
}
out
}
private def writeEntry[T <% PimpyStream](convertible: T, name: String)(out: ZipOutputStream)(implicit prefix: Prefix): ZipOutputStream = {
out.putNextEntry(new ZipEntry(s"${prefix.value}$name"))
val b = new Array[Byte](1024)
val in = convertible.toInputStream
var count = in.read(b)
while (count > 0) {
out.write(b, 0, count)
count = in.read(b)
}
in.close()
out.closeEntry()
out
}
private implicit class LongAndPimpy(timestamp: Long) {
def toTimeString = DateTimeFormat.forPattern("MM/dd/yy @ hh:mma").print(new DateTime(timestamp))
}
private trait PimpyStream {
def toInputStream: InputStream
}
private implicit class PimpyIS(is: InputStream) extends PimpyStream {
override def toInputStream = is
}
private implicit class PimpyString(str: String) extends PimpyStream {
override def toInputStream = new ByteArrayInputStream(str.getBytes(CharSet))
}
private case class Prefix(value: String) extends AnyVal
}
| NetLogo/SimServer | app/models/submission/WorkZipper.scala | Scala | gpl-2.0 | 4,434 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.aggregate
import java.sql.Timestamp
import org.apache.calcite.runtime.SqlFunctions
import org.apache.flink.api.common.functions.RichMapFunction
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.typeutils.ResultTypeQueryable
import org.apache.flink.configuration.Configuration
import org.apache.flink.streaming.api.windowing.windows.TimeWindow
import org.apache.flink.table.codegen.{Compiler, GeneratedAggregationsFunction}
import org.apache.flink.table.util.Logging
import org.apache.flink.types.Row
/**
* This map function only works for windows on batch tables.
* It appends an (aligned) rowtime field to the end of the output row.
*
* @param genAggregations Code-generated [[GeneratedAggregations]]
* @param timeFieldPos Time field position in input row
* @param tumbleTimeWindowSize The size of tumble time window
*/
class DataSetWindowAggMapFunction(
private val genAggregations: GeneratedAggregationsFunction,
private val timeFieldPos: Int,
private val tumbleTimeWindowSize: Option[Long],
@transient private val returnType: TypeInformation[Row])
extends RichMapFunction[Row, Row]
with ResultTypeQueryable[Row]
with Compiler[GeneratedAggregations]
with Logging {
private var accs: Row = _
private var output: Row = _
private var function: GeneratedAggregations = _
override def open(config: Configuration) {
LOG.debug(s"Compiling AggregateHelper: $genAggregations.name \n\n " +
s"Code:\n$genAggregations.code")
val clazz = compile(
getRuntimeContext.getUserCodeClassLoader,
genAggregations.name,
genAggregations.code)
LOG.debug("Instantiating AggregateHelper.")
function = clazz.newInstance()
accs = function.createAccumulators()
output = function.createOutputRow()
}
override def map(input: Row): Row = {
function.resetAccumulator(accs)
function.accumulate(accs, input)
function.setAggregationResults(accs, output)
function.setForwardedFields(input, output)
val timeField = input.getField(timeFieldPos)
val rowtime = getTimestamp(timeField)
val rowtimeIndex = output.getArity - 1
if (tumbleTimeWindowSize.isDefined) {
// in case of tumble time window, align rowtime to window start to represent the window
output.setField(
rowtimeIndex,
TimeWindow.getWindowStartWithOffset(rowtime, 0L, tumbleTimeWindowSize.get))
} else {
// for session window and slide window
output.setField(rowtimeIndex, rowtime)
}
output
}
private def getTimestamp(timeField: Any): Long = {
timeField match {
case b: Byte => b.toLong
case t: Character => t.toLong
case s: Short => s.toLong
case i: Int => i.toLong
case l: Long => l
case f: Float => f.toLong
case d: Double => d.toLong
case s: String => s.toLong
case t: Timestamp => SqlFunctions.toLong(t)
case _ =>
throw new RuntimeException(
s"Window time field doesn't support ${timeField.getClass} type currently")
}
}
override def getProducedType: TypeInformation[Row] = {
returnType
}
}
| hequn8128/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/runtime/aggregate/DataSetWindowAggMapFunction.scala | Scala | apache-2.0 | 4,036 |
import sbt._
object Dependencies {
lazy val testKit = Seq(
"com.typesafe.akka" %% "akka-testkit" % "2.+",
"org.scalatest" %% "scalatest" % "2.+" % "test",
"org.scalactic" %% "scalactic" % "2.+" % "test",
"org.scalacheck" %% "scalacheck" % "1.12.+" % "test",
"net.databinder.dispatch" %% "dispatch-core" % "+" % "test"
)
lazy val serialization = Seq(
"com.google.guava" % "guava" % "18.+",
"com.typesafe.play" %% "play-json" % "2.4.+"
)
lazy val akka = Seq(
"com.typesafe.akka" %% "akka-actor" % "2.+"
)
lazy val p2p = Seq(
"org.bitlet" % "weupnp" % "0.1.+"
)
lazy val db = Seq(
"com.h2database" % "h2-mvstore" % "1.+",
"org.mapdb" % "mapdb" % "2.+"
)
lazy val logging = Seq(
"ch.qos.logback" % "logback-classic" % "1.+",
"ch.qos.logback" % "logback-core" % "1.+"
)
lazy val http = Seq(
"com.typesafe.akka" %% "akka-http-experimental" % "2.+",
"com.chuusai" %% "shapeless" % "2.+",
"io.swagger" %% "swagger-scala-module" % "1.+",
"io.swagger" % "swagger-core" % "1.+",
"io.swagger" % "swagger-annotations" % "1.+",
"io.swagger" % "swagger-models" % "1.+",
"io.swagger" % "swagger-jaxrs" % "1.+",
"com.github.swagger-akka-http" %% "swagger-akka-http" % "0.+"
)
}
| ScorexProject/Scorex-Lagonaki | project/Dependencies.scala | Scala | cc0-1.0 | 1,281 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.mv.plans.modular
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.internal.SQLConf
import org.apache.carbondata.mv.plans
import org.apache.carbondata.mv.plans._
import org.apache.carbondata.mv.plans.modular.Flags._
abstract class Harmonizer(conf: SQLConf)
extends RuleExecutor[ModularPlan] {
// protected val fixedPoint = FixedPoint(conf.getConfString("spark.mv.harmonizer
// .maxIterations").toInt)
protected val fixedPoint = FixedPoint(conf.optimizerMaxIterations)
def batches: Seq[Batch] = {
Batch(
"Data Harmonizations", fixedPoint,
Seq( HarmonizeDimensionTable) ++
extendedOperatorHarmonizationRules: _*) :: Nil
}
/**
* Override to provide additional rules for the modular operator harmonization batch.
*/
def extendedOperatorHarmonizationRules: Seq[Rule[ModularPlan]] = Nil
}
/**
* A full Harmonizer - harmonize both fact and dimension tables
*/
object FullHarmonizer extends FullHarmonizer
class FullHarmonizer extends Harmonizer(new SQLConf()) {
override def extendedOperatorHarmonizationRules: Seq[Rule[ModularPlan]] =
super.extendedOperatorHarmonizationRules ++ (HarmonizeFactTable :: Nil)
}
/**
* A semi Harmonizer - it harmonizes dimension tables only
*/
object SemiHarmonizer extends SemiHarmonizer
class SemiHarmonizer extends Harmonizer(new SQLConf())
object HarmonizeDimensionTable extends Rule[ModularPlan] with PredicateHelper {
def apply(plan: ModularPlan): ModularPlan = {
plan transform {
case s@Select(_, _, _, _, jedges, fact :: dims, _, _, _, _) if
jedges.forall(e => e.joinType == LeftOuter || e.joinType == Inner) &&
fact.isInstanceOf[ModularRelation] &&
dims.filterNot(_.isInstanceOf[modular.LeafNode]).nonEmpty &&
dims.forall(d => (d.isInstanceOf[ModularRelation] || HarmonizedRelation.canHarmonize(d))) => {
var tPullUpPredicates = Seq.empty[Expression]
val tChildren = fact :: dims.map {
case m: ModularRelation => m
case h@GroupBy(
_,
_,
_,
_,
s1@Select(_, _, _, _, _, dim :: Nil, NoFlags, Nil, Nil, _),
NoFlags,
Nil, _) if (dim.isInstanceOf[ModularRelation]) => {
val rAliasMap = AttributeMap(h.outputList.collect {
case a: Alias if a.child.isInstanceOf[Attribute] =>
(a.child.asInstanceOf[Attribute], a.toAttribute) })
val pullUpPredicates = s1.predicateList
.map(replaceAlias(_, rAliasMap.asInstanceOf[AttributeMap[Expression]]))
if (pullUpPredicates.forall(cond => canEvaluate(cond, h))) {
tPullUpPredicates = tPullUpPredicates ++ pullUpPredicates
plans.modular.HarmonizedRelation(h.copy(child = s1.copy(predicateList = Nil)))
} else {
h
}
}
// case _ =>
}
if (tChildren.forall(_.isInstanceOf[modular.LeafNode])) {
s.copy(predicateList = s.predicateList ++ tPullUpPredicates, children = tChildren)
} else {
s
}
}
// s.withNewChildren(fact :: dims.map { case m: modular.ModularRelation => m; case h
// => HarmonizedRelation(h) })}
// s.copy(predicateList = predicateList ++ moveUpPredicates, children = tChildren)}
// fact :: dims.map { case m: modular.ModularRelation => m; case h => HarmonizedRelation(h)
// })}
}
}
}
object HarmonizeFactTable extends Rule[ModularPlan] with PredicateHelper with AggregatePushDown {
def apply(plan: ModularPlan): ModularPlan = {
plan transform {
case g@GroupBy(_, _, _, _,
s@Select(_, _, _, aliasm, jedges, fact :: dims, _, _, _, _), _, _, _)
if s.adjacencyList.keySet.size <= 1 &&
jedges.forall(e => e.joinType == Inner) && // !s.flags.hasFlag(DISTINCT) &&
fact.isInstanceOf[ModularRelation] &&
(fact :: dims).forall(_.isInstanceOf[modular.LeafNode]) &&
dims.nonEmpty => {
val selAliasMap = AttributeMap(s.outputList.collect {
case a: Alias if (a.child.isInstanceOf[Attribute]) => (a.toAttribute, a.child
.asInstanceOf[Attribute])
})
val aggTransMap = findPushThroughAggregates(
g.outputList,
selAliasMap,
fact.asInstanceOf[ModularRelation])
val constraintsAttributeSet = dims.flatMap(s.extractEvaluableConditions(_))
.map(_.references)
.foldLeft(AttributeSet.empty)(_ ++ _)
val groupingAttributeSet = g.predicateList.map(_.references)
.foldLeft(AttributeSet.empty)(_ ++ _)
if (aggTransMap.isEmpty ||
// TODO: the following condition is too pessimistic, more work needed using methods
// similar to those in trait
// QueryPlanConstraints
!constraintsAttributeSet.subsetOf(groupingAttributeSet)) {
g
} else {
val starJExprs = dims.flatMap(dim => s.extractJoinConditions(fact, dim)).toSeq
val gJAttributes = starJExprs.map(expr => expr.references)
.foldLeft(AttributeSet.empty)(_ ++ _).filter(fact.outputSet.contains(_))
val fExprs = s.extractEvaluableConditions(fact)
val gFAttributes = fExprs.map(expr => expr.references)
.foldLeft(AttributeSet.empty)(_ ++ _)
.filter(fact.outputSet.contains(_))
val gGAttributes = g.predicateList.map(expr => expr.references)
.foldLeft(AttributeSet.empty)(_ ++ _).filter(fact.outputSet.contains(_))
val gAttributes = (gJAttributes ++ gFAttributes ++ gGAttributes).toSeq
val oAggregates = aggTransMap.map(_._2).flatMap(_._2).toSeq
val tAliasMap = (aliasm.get(0) match {
case Some(name) => Seq((0, name));
case _ => Seq.empty
}).toMap
val sOutput = (oAggregates.map(_.references).foldLeft(AttributeSet.empty)(_ ++ _) ++
AttributeSet(gAttributes)).toSeq
val hFactSel = plans.modular
.Select(
sOutput,
fact.output,
Seq.empty,
tAliasMap,
Seq.empty,
fact :: Nil,
NoFlags,
Seq.empty,
Seq.empty)
val hFact = plans.modular
.GroupBy(
gAttributes ++ oAggregates,
sOutput,
gAttributes,
None,
hFactSel,
NoFlags,
Seq.empty)
val hFactName = s"gen_harmonized_${
fact.asInstanceOf[ModularRelation]
.databaseName
}_${ fact.asInstanceOf[ModularRelation].tableName }"
val hAliasMap = (aliasm - 0) + (0 -> hFactName)
val hInputList = gAttributes ++ oAggregates.map(_.toAttribute) ++
dims.flatMap(_.asInstanceOf[modular.LeafNode].output).toSeq
// val hPredicateList = s.predicateList
val attrOutputList = s.outputList.filter(expr => (expr.isInstanceOf[Attribute]) ||
(expr.isInstanceOf[Alias] &&
expr.asInstanceOf[Alias].child
.isInstanceOf[Attribute]))
val aggOutputList = aggTransMap.values.flatMap(t => t._2)
.map { ref =>
AttributeReference(ref.name, ref.dataType)(
exprId = ref.exprId,
qualifier = Some(hFactName))
}
val hFactOutputSet = hFact.outputSet
// Update the outputlist qualifier
val hOutputList = (attrOutputList ++ aggOutputList).map {attr =>
attr.transform {
case ref: Attribute if hFactOutputSet.contains(ref) =>
AttributeReference(ref.name, ref.dataType)(
exprId = ref.exprId,
qualifier = Some(hFactName))
}
}.asInstanceOf[Seq[NamedExpression]]
// Update the predicate qualifier
val hPredList = s.predicateList.map{ pred =>
pred.transform {
case ref: Attribute if hFactOutputSet.contains(ref) =>
AttributeReference(ref.name, ref.dataType)(
exprId = ref.exprId,
qualifier = Some(hFactName))
}
}
val hSel = s.copy(
outputList = hOutputList,
inputList = hInputList,
aliasMap = hAliasMap,
predicateList = hPredList,
children = hFact :: dims)
val gOutputList = g.outputList.zipWithIndex
.map { case (expr, index) =>
if (aggTransMap.keySet.contains(index)) {
aggTransMap(index)
._1
} else {
expr
}
}
val wip = g.copy(outputList = gOutputList, inputList = hInputList, child = hSel)
wip.transformExpressions {
case ref: Attribute if hFactOutputSet.contains(ref) =>
AttributeReference(ref.name, ref.dataType)(
exprId = ref.exprId,
qualifier = Some(hFactName))
}
}
}
}
}
}
| jatin9896/incubator-carbondata | datamap/mv/plan/src/main/scala/org/apache/carbondata/mv/plans/modular/Harmonizer.scala | Scala | apache-2.0 | 10,286 |
package com.tam.cobol_interpreter.test.tools
import com.tam.cobol_interpreter.tools.{ByteArrayTool, Comp3Tool}
import org.junit.runner.RunWith
import org.scalatest.FlatSpec
import org.scalatest.Matchers._
import org.scalatest.junit.JUnitRunner
/**
* Created by tamu on 1/2/15.
*/
@RunWith(classOf[JUnitRunner])
class TestComp3Tool extends FlatSpec {
"A Comp3Tool" should "expand packed bytes" in {
Comp3Tool.unpack(Array(0x1D:Byte)) should equal ("-1".toCharArray)
Comp3Tool.unpack(Array(0x1C:Byte)) should equal ("1".toCharArray)
Comp3Tool.unpack(Array(0x02:Byte, 0x1C: Byte)) should equal ("21".toCharArray)
Comp3Tool.unpack(Array(0x42:Byte, 0x1C: Byte)) should equal ("421".toCharArray)
Comp3Tool.unpack(Array(0x42:Byte, 0x1D: Byte)) should equal ("-421".toCharArray)
}
"A Comp3Tool" should "expand large amounts of bytes" in {
ByteArrayTool.makeString(Comp3Tool.unpack(Array(0x42:Byte, 0x42:Byte, 0x42:Byte, 0x42:Byte,
0x42:Byte, 0x42:Byte, 0x42:Byte, 0x42:Byte,
0x42:Byte, 0x42:Byte, 0x1C: Byte))) should equal ("424242424242424242421")
}
"A Comp3Tool" should "pack integers" in {
Comp3Tool.pack("-123") should equal (Array(0x12, 0x3D))
Comp3Tool.pack("123") should equal (Array(0x12, 0x3C))
Comp3Tool.pack("1323") should equal (Array(0x01, 0x32, 0x3C))
Comp3Tool.pack("01323") should equal (Array(0x01, 0x32, 0x3C))
Comp3Tool.pack("-01323") should equal (Array(0x01, 0x32, 0x3D))
Comp3Tool.pack("11323") should equal (Array(0x11, 0x32, 0x3C))
Comp3Tool.pack(11323) should equal (Array(0x11, 0x32, 0x3C))
Comp3Tool.pack(-1323) should equal (Array(0x01, 0x32, 0x3D))
}
"A Comp3Tool" should "pack integers into requested sizes" in {
Comp3Tool.pack(11323, 4) should equal (Array(0x00, 0x11, 0x32, 0x3C))
Comp3Tool.pack(-1323, 10).length should equal (10)
Comp3Tool.pack(11323, 3) should equal (Array(0x11, 0x32, 0x3C))
intercept[AssertionError]{Comp3Tool.pack(11323, 2)}.getClass should equal (new AssertionError().getClass)
}
}
| tamsanh/scala-cobol-interpreter | src/test/scala/com/tam/cobol_interpreter/tools/TestComp3Tool.scala | Scala | apache-2.0 | 2,080 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.