code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package korolev.web
import korolev.web.PathAndQuery._
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
import scala.language.implicitConversions
class PathAndQuerySpec extends AnyFlatSpec with Matchers {
".fromString" should "parse path with Root" in {
val path = PathAndQuery.fromString("/page/1")
path shouldBe Root / "page" / "1"
}
".fromString" should "parse path with empty parameters" in {
val path = PathAndQuery.fromString("/page/1?")
path shouldBe Root / "page" / "1"
}
".fromString" should "parse path with parameters" in {
val path = PathAndQuery.fromString("/page/1?k1=v1")
path shouldBe Root / "page" / "1" :? "k1" -> "v1"
}
".fromString" should "parse path with parameter without value" in {
val path = PathAndQuery.fromString("/page/1?k1")
path shouldBe Root / "page" / "1" :? "k1" -> ""
}
".fromString" should "parse path with many parameters" in {
val path = PathAndQuery.fromString("/page/1?k1=v1&k2=v2&k3=v3&k4=v4")
path shouldBe Root / "page" / "1" :? "k1" -> "v1" :& "k2" -> "v2" :& "k3" -> "v3" :& "k4" -> "v4"
}
".mkString" should "correct render query parameter without value" in {
val result = PathAndQuery.fromString("/test/tset?k1=v1&k2&k3=v3").mkString
result shouldBe "/test/tset?k1=v1&k2&k3=v3"
}
".mkString" should "make from Root" in {
val path = Root
path.mkString shouldBe "/"
}
".mkString" should "make from path with Root" in {
val path = Root / "page"
path.mkString shouldBe "/page"
}
".mkString" should "make from path with complex path" in {
val path: Path = Root / "api" / "v2" / "users"
path.mkString shouldBe "/api/v2/users"
}
".mkString" should "make from path with parameters" in {
val path = Root / "page" / "1" :? "k1" -> "v1"
path.mkString shouldBe "/page/1?k1=v1"
}
".mkString" should "make from path with many parameters" in {
val path = Root / "page" / "1" :? "k1" -> "v1" :& "k2" -> "v2" :& "k3" -> "v3" :& "k4" -> "v4" :& "k5" -> "v5"
path.mkString shouldBe "/page/1?k1=v1&k2=v2&k3=v3&k4=v4&k5=v5"
}
".fromString" should "be equal to .mkString" in {
val result = PathAndQuery.fromString("/page/1?k1=v1&k2=v2&k3=v3&k4=v4&k5=v5")
result.mkString shouldBe "/page/1?k1=v1&k2=v2&k3=v3&k4=v4&k5=v5"
}
".withParams" should "encode query parameters when parse from string" in {
val result = Root.withParams(Some("k%22%7DX%22%5D6%27%3F=pPdqII6%25tS")).mkString
result shouldBe "?k%22%7DX%22%5D6%27%3F=pPdqII6%25tS"
}
".endsWith" should "correct work without parameters" in {
val path = Root / "page"
path.endsWith("page") shouldBe true
}
".endsWith" should "correct not work without parameters" in {
val path = Root / "page"
path.endsWith("size") shouldBe false
}
".endsWith" should "correct work with parameters" in {
val path = Root / "page" / "1" :? "k1" -> "v1"
path.endsWith("1") shouldBe true
}
".endsWith" should "correct not work with parameters" in {
val path = Root / "page" :? "k1" -> "v1"
path.endsWith("size") shouldBe false
}
".startsWith" should "correct work without parameters" in {
val path = Root / "page" / "1"
path.startsWith("page") shouldBe true
}
".startsWith" should "correct not work without parameters" in {
val path = Root / "page" / "1"
path.startsWith("size") shouldBe false
}
".startsWith" should "correct work with parameters" in {
val path = Root / "page" / "1" :? "k1" -> "v1"
path.startsWith("page") shouldBe true
}
".startsWith" should "correct not work with parameters" in {
val path = Root / "page" / "1" :? "k1" -> "v1"
path.startsWith("size") shouldBe false
}
".++" should "correct concatenate complex path" in {
val head = Root / "api" / "v1" / "system"
val tail = Root / "admin" / "parameters" / "edit"
head ++ tail shouldBe Root / "api" / "v1" / "system" / "admin" / "parameters" / "edit"
}
".reverse" should "correct reverse Root path" in {
val path = Root / "api" / "v1" / "system"
path.reverse shouldBe Root / "system" / "v1" / "api"
}
"path matching" should "correct extract parameters as a Map[String, String]" in {
val path = Root / "test" :? "k1" -> "v1"
val pf: PartialFunction[PathAndQuery, Boolean] = {
case Root / "test" :?* params =>
params == Map("k1" -> "v1")
}
pf(path) shouldBe true
}
"path matching" should "correct exact match ignore parameters" in {
val path = Root / "test" :? "k1" -> "v1"
val pf: PartialFunction[PathAndQuery, Boolean] = {
case Root / "test" =>
true
case _ =>
false
}
pf(path) shouldBe true
}
"path matching" should "correct match parameter by name" in {
object K1 extends QP("k1")
val path = Root / "test" :? "k1" -> "v1"
val pf: PartialFunction[PathAndQuery, String] = {
case Root / "test" :?* K1(value) =>
value
}
pf(path) shouldBe "v1"
}
"path matching" should "correct match two parameter by name" in {
case object K1 extends QP("k1")
case object K2 extends QP("k2")
val path: PathAndQuery = Root :? "k1" -> "v1" :& "k2" -> "v2"
val result = path match {
case Root :?* K1(_) *& K2(_) =>
true
case _ =>
false
}
result shouldBe true
}
"path matching" should "fail if mandatory parameter not found" in {
object K1 extends QP("k1")
val path = Root / "test" :? "k2" -> "v2"
val pf: PartialFunction[PathAndQuery, String] = {
case Root / "test" :?* K1(value) =>
value
}
pf.isDefinedAt(path) shouldBe false
}
"path matching" should "not fail if optional not found" in {
object K1 extends OQP("k1")
val path = Root / "test" :? "k2" -> "v2"
val pf: PartialFunction[PathAndQuery, Boolean] = {
case Root / "test" :?* K1(_) =>
true
}
pf(path) shouldBe true
}
"path matching" should "correct match optional parameter" in {
object K1 extends OQP("k1")
val path = Root / "test" :? "k2" -> "v2"
val pf: PartialFunction[PathAndQuery, Option[String]] = {
case Root / "test" :?* K1(value) =>
value
}
pf(path) shouldBe None
}
"path matching" should "correct match mixed parameter requirement" in {
object K1 extends OQP("k1")
object K2 extends QP("k2")
object K3 extends QP("k3")
val path = Root / "test" :? "k1" -> "v1" :& "k2" -> "v2" :& "k3" -> "v3"
val pf: PartialFunction[PathAndQuery, (Option[String], String, String)] = {
case Root / "test" :?* K1(v1) *& K2(v2) *& K3(v3) =>
(v1, v2, v3)
}
pf(path) shouldBe (Some("v1"), "v2", "v3")
}
"path matching" should "correct match mixed parameter requirement with one undefined" in {
object K1 extends OQP("k1")
object K2 extends QP("k2")
val path = Root / "test" :? "k2" -> "v2"
val pf: PartialFunction[PathAndQuery, (Option[String], String)] = {
case Root / "test" :?* K1(v1) *& K2(v2) =>
(v1, v2)
}
pf(path) shouldBe (None, "v2")
}
}
|
fomkin/korolev
|
modules/web/src/test/scala/korolev/web/PathAndQuerySpec.scala
|
Scala
|
apache-2.0
| 7,144 |
package controllers
import java.util.UUID
import models.AssetSupport._
import play.api.libs.concurrent.Akka
import play.api.libs.json.Json
import play.api.mvc.{Controller, Action}
import services.ConfigSupport._
import services.mailer.OrderMailer
import services.{MailRequest, OrderUpdateMailRequest, OrderCreateMailRequest, runtime}
import play.api.Play.current
import scala.concurrent.Future
import services.storage.S3Bucket
object Mails extends Controller with S3Bucket {
lazy val orderPrintService = runtime orderPrintService
lazy val crudService = runtime crudService
lazy val mongo = runtime mongo
val mailAttachmentFolder = configKey("aws.s3.mailattachment.folder", "attachments")
lazy val mailer = Akka.system.actorOf(OrderMailer.props(mongo, crudService, orderPrintService), name = "mailer")
def sendOrderCreateMail(orderId: IdType) = Action.async {
send(orderId, mailAttachmentFolder) {
(uid, oid, folder) => OrderCreateMailRequest(uid, oid, folder)
}
}
def sendOrderUpdateMail(orderId: IdType) = Action.async {
send(orderId, mailAttachmentFolder) {
(uid, oid, folder) => OrderUpdateMailRequest(uid, oid, folder)
}
}
private def send(orderId: IdType, folder: String)(reqFactory: (UUID, String, String) => MailRequest) = {
val uid = UUID.randomUUID()
val req = reqFactory(uid, orderId, folder)
mailer ! req
Future.successful(Ok(Json.obj("resultUrl" -> req.resultUrl(bucketName))))
}
}
|
tsechov/shoehorn
|
app/controllers/Mails.scala
|
Scala
|
apache-2.0
| 1,473 |
/*
* Copyright 2020 Precog Data
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.ejson
import slamdata.Predef._
import quasar.contrib.matryoshka.{envT => cenvT}
import quasar.contrib.iota.copkTraverse
import matryoshka._
import matryoshka.implicits._
import matryoshka.patterns.EnvT
import scalaz._
import scalaz.syntax.applicative._
import scalaz.syntax.plus._
import simulacrum.typeclass
/** Typeclass for higher-kinded types that can be decoded from EJson. */
@typeclass
trait DecodeEJsonK[F[_]] {
def decodeK[J](implicit JC: Corecursive.Aux[J, EJson], JR: Recursive.Aux[J, EJson]): CoalgebraM[Decoded, F, J]
def mapK[G[_]](f: F ~> G): DecodeEJsonK[G] = {
val orig = this
new DecodeEJsonK[G] {
def decodeK[J](implicit JC: Corecursive.Aux[J, EJson], JR: Recursive.Aux[J, EJson]): CoalgebraM[Decoded, G, J] =
j => orig.decodeK[J].apply(j) map f
}
}
}
object DecodeEJsonK extends DecodeEJsonKInstances {
def apply[F[_]](implicit ev: DecodeEJsonK[F]): DecodeEJsonK[F] = ev
def envT[E: DecodeEJson, F[_]: DecodeEJsonK](
askLabel: String,
lowerLabel: String
): DecodeEJsonK[EnvT[E, F, ?]] =
new DecodeEJsonK[EnvT[E, F, ?]] {
def decodeK[J](implicit JC: Corecursive.Aux[J, EJson], JR: Recursive.Aux[J, EJson]): CoalgebraM[Decoded, EnvT[E, F, ?], J] = {
val J = Fixed[J]
val F = DecodeEJsonK[F].decodeK[J]
_ match {
case J.map((J.str(`askLabel`), ask) :: (J.str(`lowerLabel`), lower) :: Nil) =>
(DecodeEJson[E].decode[J](ask) |@| F(lower))(cenvT)
case J.map((J.str(`lowerLabel`), lower) :: (J.str(`askLabel`), ask) :: Nil) =>
(DecodeEJson[E].decode[J](ask) |@| F(lower))(cenvT)
case j =>
Decoded.failureFor[EnvT[E, F, J]](j, s"EnvT($askLabel, $lowerLabel).")
}
}
}
}
sealed abstract class DecodeEJsonKInstances {
implicit val ejsonDecodeEJsonK: DecodeEJsonK[EJson] =
new DecodeEJsonK[EJson] {
def decodeK[J](implicit JC: Corecursive.Aux[J, EJson], JR: Recursive.Aux[J, EJson]): CoalgebraM[Decoded, EJson, J] =
_.project.point[Decoded]
}
implicit def coproductDecodeEJsonK[F[_], G[_]](
implicit
F: DecodeEJsonK[F],
G: DecodeEJsonK[G]
): DecodeEJsonK[Coproduct[F, G, ?]] =
new DecodeEJsonK[Coproduct[F, G, ?]] {
def decodeK[J](implicit JC: Corecursive.Aux[J, EJson], JR: Recursive.Aux[J, EJson]): CoalgebraM[Decoded, Coproduct[F, G, ?], J] =
j => F.decodeK[J].apply(j).map(Coproduct.left[G](_)) <+>
G.decodeK[J].apply(j).map(Coproduct.right[F](_))
}
}
|
quasar-analytics/quasar
|
ejson/src/main/scala/quasar/ejson/DecodeEJsonK.scala
|
Scala
|
apache-2.0
| 3,124 |
package com.learn.spark.akka
import akka.actor.{Actor, ActorSystem, Props}
import com.typesafe.config.ConfigFactory
import org.apache.spark.{SparkConf, SparkContext}
/**
* Created by xiaojie on 17/7/13.
*/
class Master_old extends Actor {
println("constructor invoked")
override def preStart(): Unit = {
println("preStart invoked")
}
//用于接收消息,sender就是发送者的代理
def receive: Actor.Receive = {
case "connect" => {
println("a client connected")
sender ! "reply"
}
case "hello" => {
println("hello")
}
case n: Long => {
squareSum(n)
}
}
//启动spark计算结果
private def squareSum(n: Long): Long = {
val conf = new SparkConf().setMaster("local[4]").setAppName("Simple Application")
val sc = new SparkContext(conf)
val squareSum = sc.parallelize(1L until n).map { i =>
i * i
}.reduce(_ + _)
println(s"============== The square sum of $n is $squareSum. ==============")
squareSum
}
}
object Master_old {
def main(args: Array[String]): Unit = {
val host = "127.0.0.1"
val port = 8888
// 准备配置
val configStr =
s"""
|akka.actor.provider = "akka.remote.RemoteActorRefProvider"
|akka.remote.netty.tcp.hostname = "$host"
|akka.remote.netty.tcp.port = "$port"
""".stripMargin
val config = ConfigFactory.parseString(configStr)
//ActorSystem老大,辅助创建和监控下面的Actor,他是单例的
val actorSystem = ActorSystem("MasterSystem", config)
val master = actorSystem.actorOf(Props(new Master_old), "Master") //Master主构造器会执行
master ! "hello" //发送信息
actorSystem.awaitTermination() //让进程等待着, 先别结束
}
}
|
xiaoJacky/sparkLearning
|
sparkLearning/src/main/scala/com/learn/spark/akka/Master_old.scala
|
Scala
|
apache-2.0
| 2,000 |
package odfi.h2dl.indesign.h2dl.interpreter
import tcl.TclInterpreter
import tcl.ExtendedTclList
import odfi.h2dl.indesign.h2dl.H2DLObject
import nx.NXObject
class H2DLInterpreter extends TclInterpreter {
def getRootH2DLInstances: List[H2DLObject] = {
var objects = this.eval("""
## Get Instances
set nsfBaseObjects [lsort -unique [info commands ::nsf::*]]
set nsfObjects [odfi::flist::MutableList fromList $nsfBaseObjects]
set list [$nsfObjects filter {
if {[::odfi::common::isClass $it odfi::dev::hw::h2dl::Instance] && [$it isRoot]==true} {
return true
} else {
return false
}
}]
$list toTCLList
""")
objects match {
case lst: ExtendedTclList => lst.toList.map(v => H2DLObject.NXObjectToH2DLObject(NXObject.fromTclValue(v)))
case obj => List(NXObject.fromTclValue(obj))
}
}
}
|
richnou/h2dl-indesign
|
src/main/scala/odfi/h2dl/indesign/h2dl/interpreter/H2DLInterpreter.scala
|
Scala
|
gpl-2.0
| 844 |
/*
* Copyright 2014-2022 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.core.stacklang
class SetSuite extends BaseWordSuite {
def interpreter: Interpreter = Interpreter(StandardVocabulary.allWords)
def word: Word = StandardVocabulary.Set
def shouldMatch: List[(String, List[Any])] = List(
"a,b" -> List.empty[Any],
"a,b,c" -> List("a")
)
def shouldNotMatch: List[String] = List("", "a")
}
|
Netflix/atlas
|
atlas-core/src/test/scala/com/netflix/atlas/core/stacklang/SetSuite.scala
|
Scala
|
apache-2.0
| 970 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.testingUtils
import akka.actor.{ActorRef, Cancellable, Terminated}
import akka.pattern.{ask, pipe}
import org.apache.flink.api.common.JobID
import org.apache.flink.runtime.FlinkActor
import org.apache.flink.runtime.checkpoint.savepoint.SavepointStore
import org.apache.flink.runtime.execution.ExecutionState
import org.apache.flink.runtime.jobgraph.JobStatus
import org.apache.flink.runtime.jobmanager.JobManager
import org.apache.flink.runtime.jobmanager.slots.ActorTaskManagerGateway
import org.apache.flink.runtime.messages.Acknowledge
import org.apache.flink.runtime.messages.ExecutionGraphMessages.JobStatusChanged
import org.apache.flink.runtime.messages.JobManagerMessages.{GrantLeadership, RegisterJobClient, RequestClassloadingProps}
import org.apache.flink.runtime.messages.Messages.Disconnect
import org.apache.flink.runtime.messages.RegistrationMessages.RegisterTaskManager
import org.apache.flink.runtime.messages.TaskManagerMessages.Heartbeat
import org.apache.flink.runtime.testingUtils.TestingJobManagerMessages._
import org.apache.flink.runtime.testingUtils.TestingMessages._
import org.apache.flink.runtime.testingUtils.TestingTaskManagerMessages.AccumulatorsChanged
import scala.collection.mutable
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.language.postfixOps
/** This mixin can be used to decorate a JobManager with messages for testing purpose. */
trait TestingJobManagerLike extends FlinkActor {
that: JobManager =>
import context._
import scala.collection.JavaConverters._
val waitForAllVerticesToBeRunning = scala.collection.mutable.HashMap[JobID, Set[ActorRef]]()
val waitForTaskManagerToBeTerminated = scala.collection.mutable.HashMap[String, Set[ActorRef]]()
val waitForAllVerticesToBeRunningOrFinished =
scala.collection.mutable.HashMap[JobID, Set[ActorRef]]()
var periodicCheck: Option[Cancellable] = None
val waitForJobStatus = scala.collection.mutable.HashMap[JobID,
collection.mutable.HashMap[JobStatus, Set[ActorRef]]]()
val waitForAccumulatorUpdate = scala.collection.mutable.HashMap[JobID, (Boolean, Set[ActorRef])]()
val waitForLeader = scala.collection.mutable.HashSet[ActorRef]()
val waitForNumRegisteredTaskManagers = mutable.PriorityQueue.newBuilder(
new Ordering[(Int, ActorRef)] {
override def compare(x: (Int, ActorRef), y: (Int, ActorRef)): Int = y._1 - x._1
})
val waitForClient = scala.collection.mutable.HashSet[ActorRef]()
val waitForShutdown = scala.collection.mutable.HashSet[ActorRef]()
var disconnectDisabled = false
var postStopEnabled = true
abstract override def postStop(): Unit = {
if (postStopEnabled) {
super.postStop()
} else {
// only stop leader election service to revoke the leadership of this JM so that a new JM
// can be elected leader
leaderElectionService.stop()
}
}
abstract override def handleMessage: Receive = {
handleTestingMessage orElse super.handleMessage
}
def handleTestingMessage: Receive = {
case Alive => sender() ! Acknowledge.get()
case RequestExecutionGraph(jobID) =>
currentJobs.get(jobID) match {
case Some((executionGraph, jobInfo)) => sender() ! decorateMessage(
ExecutionGraphFound(
jobID,
executionGraph)
)
case None => archive.tell(decorateMessage(RequestExecutionGraph(jobID)), sender())
}
case WaitForAllVerticesToBeRunning(jobID) =>
if(checkIfAllVerticesRunning(jobID)){
sender() ! decorateMessage(AllVerticesRunning(jobID))
}else{
val waiting = waitForAllVerticesToBeRunning.getOrElse(jobID, Set[ActorRef]())
waitForAllVerticesToBeRunning += jobID -> (waiting + sender())
if(periodicCheck.isEmpty){
periodicCheck =
Some(
context.system.scheduler.schedule(
0 seconds,
200 millis,
self,
decorateMessage(NotifyListeners)
)
)
}
}
case WaitForAllVerticesToBeRunningOrFinished(jobID) =>
if(checkIfAllVerticesRunningOrFinished(jobID)){
sender() ! decorateMessage(AllVerticesRunning(jobID))
}else{
val waiting = waitForAllVerticesToBeRunningOrFinished.getOrElse(jobID, Set[ActorRef]())
waitForAllVerticesToBeRunningOrFinished += jobID -> (waiting + sender())
if(periodicCheck.isEmpty){
periodicCheck =
Some(
context.system.scheduler.schedule(
0 seconds,
200 millis,
self,
decorateMessage(NotifyListeners)
)
)
}
}
case NotifyListeners =>
for(jobID <- currentJobs.keySet){
notifyListeners(jobID)
}
if(waitForAllVerticesToBeRunning.isEmpty && waitForAllVerticesToBeRunningOrFinished.isEmpty) {
periodicCheck foreach { _.cancel() }
periodicCheck = None
}
case NotifyWhenJobRemoved(jobID) =>
val gateways = instanceManager.getAllRegisteredInstances.asScala.map(_.getTaskManagerGateway)
val responses = gateways.map{
gateway => gateway match {
case actorGateway: ActorTaskManagerGateway =>
actorGateway.getActorGateway.ask(NotifyWhenJobRemoved(jobID), timeout).mapTo[Boolean]
case _ =>
throw new IllegalStateException("The task manager gateway is not of type " +
s"${classOf[ActorTaskManagerGateway].getSimpleName}")
}
}
val jobRemovedOnJobManager = (self ? CheckIfJobRemoved(jobID))(timeout).mapTo[Boolean]
val allFutures = responses ++ Seq(jobRemovedOnJobManager)
import context.dispatcher
Future.fold(allFutures)(true)(_ & _) map(decorateMessage(_)) pipeTo sender()
case CheckIfJobRemoved(jobID) =>
if(currentJobs.contains(jobID)) {
context.system.scheduler.scheduleOnce(
200 milliseconds,
self,
decorateMessage(CheckIfJobRemoved(jobID))
)(context.dispatcher, sender())
} else {
sender() ! decorateMessage(true)
}
case NotifyWhenTaskManagerTerminated(taskManager) =>
val waiting = waitForTaskManagerToBeTerminated.getOrElse(taskManager.path.name, Set())
waitForTaskManagerToBeTerminated += taskManager.path.name -> (waiting + sender)
case msg@Terminated(taskManager) =>
super.handleMessage(msg)
waitForTaskManagerToBeTerminated.remove(taskManager.path.name) foreach {
_ foreach {
listener =>
listener ! decorateMessage(TaskManagerTerminated(taskManager))
}
}
// see shutdown method for reply
case NotifyOfComponentShutdown =>
waitForShutdown += sender()
case NotifyWhenAccumulatorChange(jobID) =>
val (updated, registered) = waitForAccumulatorUpdate.
getOrElse(jobID, (false, Set[ActorRef]()))
waitForAccumulatorUpdate += jobID -> (updated, registered + sender)
sender ! true
/**
* Notification from the task manager that changed accumulator are transferred on next
* Hearbeat. We need to keep this state to notify the listeners on next Heartbeat report.
*/
case AccumulatorsChanged(jobID: JobID) =>
waitForAccumulatorUpdate.get(jobID) match {
case Some((updated, registered)) =>
waitForAccumulatorUpdate.put(jobID, (true, registered))
case None =>
}
/**
* Disabled async processing of accumulator values and send accumulators to the listeners if
* we previously received an [[AccumulatorsChanged]] message.
*/
case msg : Heartbeat =>
super.handleMessage(msg)
waitForAccumulatorUpdate foreach {
case (jobID, (updated, actors)) if updated =>
currentJobs.get(jobID) match {
case Some((graph, jobInfo)) =>
val userAccumulators = graph.aggregateUserAccumulators
actors foreach {
actor => actor ! UpdatedAccumulators(jobID, userAccumulators)
}
case None =>
}
waitForAccumulatorUpdate.put(jobID, (false, actors))
case _ =>
}
case RequestWorkingTaskManager(jobID) =>
currentJobs.get(jobID) match {
case Some((eg, _)) =>
if(eg.getAllExecutionVertices.asScala.isEmpty){
sender ! decorateMessage(WorkingTaskManager(None))
} else {
val resource = eg.getAllExecutionVertices.asScala.head.getCurrentAssignedResource
if(resource == null){
sender ! decorateMessage(WorkingTaskManager(None))
} else {
sender ! decorateMessage(
WorkingTaskManager(
Some(
resource.getTaskManagerGateway() match {
case actorTaskManagerGateway: ActorTaskManagerGateway =>
actorTaskManagerGateway.getActorGateway
case _ => throw new IllegalStateException(
"The task manager gateway is not of type " +
s"${classOf[ActorTaskManagerGateway].getSimpleName}")
}
)
)
)
}
}
case None => sender ! decorateMessage(WorkingTaskManager(None))
}
case NotifyWhenJobStatus(jobID, state) =>
val jobStatusListener = waitForJobStatus.getOrElseUpdate(jobID,
scala.collection.mutable.HashMap[JobStatus, Set[ActorRef]]())
val listener = jobStatusListener.getOrElse(state, Set[ActorRef]())
jobStatusListener += state -> (listener + sender)
case msg@JobStatusChanged(jobID, newJobStatus, _, _) =>
super.handleMessage(msg)
val cleanup = waitForJobStatus.get(jobID) match {
case Some(stateListener) =>
stateListener.remove(newJobStatus) match {
case Some(listeners) =>
listeners foreach {
_ ! decorateMessage(JobStatusIs(jobID, newJobStatus))
}
case _ =>
}
stateListener.isEmpty
case _ => false
}
if (cleanup) {
waitForJobStatus.remove(jobID)
}
case DisableDisconnect =>
disconnectDisabled = true
case DisablePostStop =>
postStopEnabled = false
case RequestSavepoint(savepointPath) =>
try {
//TODO user class loader ?
val savepoint = SavepointStore.loadSavepoint(
savepointPath,
Thread.currentThread().getContextClassLoader)
sender ! ResponseSavepoint(savepoint)
}
catch {
case e: Exception =>
sender ! ResponseSavepoint(null)
}
case msg: Disconnect =>
if (!disconnectDisabled) {
super.handleMessage(msg)
val taskManager = sender()
waitForTaskManagerToBeTerminated.remove(taskManager.path.name) foreach {
_ foreach {
listener =>
listener ! decorateMessage(TaskManagerTerminated(taskManager))
}
}
}
case NotifyWhenLeader =>
if (leaderElectionService.hasLeadership) {
sender() ! true
} else {
waitForLeader += sender()
}
case msg: GrantLeadership =>
super.handleMessage(msg)
waitForLeader.foreach(_ ! true)
waitForLeader.clear()
case NotifyWhenClientConnects =>
waitForClient += sender()
sender() ! true
case msg: RegisterJobClient =>
super.handleMessage(msg)
waitForClient.foreach(_ ! ClientConnected)
case msg: RequestClassloadingProps =>
super.handleMessage(msg)
waitForClient.foreach(_ ! ClassLoadingPropsDelivered)
case NotifyWhenAtLeastNumTaskManagerAreRegistered(numRegisteredTaskManager) =>
if (that.instanceManager.getNumberOfRegisteredTaskManagers >= numRegisteredTaskManager) {
// there are already at least numRegisteredTaskManager registered --> send Acknowledge
sender() ! Acknowledge.get()
} else {
// wait until we see at least numRegisteredTaskManager being registered at the JobManager
waitForNumRegisteredTaskManagers += ((numRegisteredTaskManager, sender()))
}
// TaskManager may be registered on these two messages
case msg @ (_: RegisterTaskManager) =>
super.handleMessage(msg)
// dequeue all senders which wait for instanceManager.getNumberOfStartedTaskManagers or
// fewer registered TaskManagers
while (waitForNumRegisteredTaskManagers.nonEmpty &&
waitForNumRegisteredTaskManagers.head._1 <=
instanceManager.getNumberOfRegisteredTaskManagers) {
val receiver = waitForNumRegisteredTaskManagers.dequeue()._2
receiver ! Acknowledge.get()
}
}
def checkIfAllVerticesRunning(jobID: JobID): Boolean = {
currentJobs.get(jobID) match {
case Some((eg, _)) =>
eg.getAllExecutionVertices.asScala.forall( _.getExecutionState == ExecutionState.RUNNING)
case None => false
}
}
def checkIfAllVerticesRunningOrFinished(jobID: JobID): Boolean = {
currentJobs.get(jobID) match {
case Some((eg, _)) =>
eg.getAllExecutionVertices.asScala.forall {
case vertex =>
(vertex.getExecutionState == ExecutionState.RUNNING
|| vertex.getExecutionState == ExecutionState.FINISHED)
}
case None => false
}
}
def notifyListeners(jobID: JobID): Unit = {
if(checkIfAllVerticesRunning(jobID)) {
waitForAllVerticesToBeRunning.remove(jobID) match {
case Some(listeners) =>
for (listener <- listeners) {
listener ! decorateMessage(AllVerticesRunning(jobID))
}
case _ =>
}
}
if(checkIfAllVerticesRunningOrFinished(jobID)) {
waitForAllVerticesToBeRunningOrFinished.remove(jobID) match {
case Some(listeners) =>
for (listener <- listeners) {
listener ! decorateMessage(AllVerticesRunning(jobID))
}
case _ =>
}
}
}
/**
* No killing of the VM for testing.
*/
override protected def shutdown(): Unit = {
log.info("Shutting down TestingJobManager.")
waitForShutdown.foreach(_ ! ComponentShutdown(self))
waitForShutdown.clear()
}
}
|
WangTaoTheTonic/flink
|
flink-runtime/src/test/scala/org/apache/flink/runtime/testingUtils/TestingJobManagerLike.scala
|
Scala
|
apache-2.0
| 15,263 |
package io.reactors.transport
import io.reactors._
/** Delivers locally, to reactors within the current reactor system.
*
* This is usually the default transport in the reactor system, but it is only
* used with newly created channels when the configuration option
* `system.channels.create-as-local` is set to `"false"`.
* Otherwise, local channels use a fast, direct delivery mode, which bypasses
* the transport layer.
*/
class LocalTransport(val system: ReactorSystem) extends Remote.Transport {
def schema = "local"
def port = -1
def newChannel[@spec(Int, Long, Double) T: Arrayable](url: ChannelUrl): Channel[T] = {
new LocalTransport.LocalChannel(this, url)
}
override def shutdown(): Unit = {
}
}
object LocalTransport {
private[transport] class LocalChannel[@spec(Int, Long, Double) T](
val transport: LocalTransport, val url: ChannelUrl
) extends Channel[T] {
def send(x: T): Unit = {
val isoName = url.reactorUrl.name
val chName = url.anchor
transport.system.channels.getLocal[T](isoName, chName) match {
case Some(ch) => ch ! x
case None =>
}
}
}
}
|
storm-enroute/reactors
|
reactors-core/shared/src/main/scala/io/reactors/transport/LocalTransport.scala
|
Scala
|
bsd-3-clause
| 1,158 |
/*
* Copyright (c) 2010 by Alexander Grünewald
*
* This file is part of gruenewa-grid, a grid computing runtime.
*
* gruenewa-grid is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package gruenewa.grid
class Discovery(val group: String, val port: Int) {
import gruenewa.prelude._
import gruenewa.multicast.{ Sender, Receiver }
private val packetSize = 4
private def isMessage(message: Array[Byte]) = message.length == 4
private def createMessage(i: Int): Array[Byte] = Array(((i & 0xFF000000) >> 24).toByte, ((i & 0x00FF0000) >> 16).toByte, ((i & 0x0000FF00) >> 8).toByte, (i & 0x000000FF).toByte)
private def parseMessage(b: Array[Byte]): Int = b(3) & 0xFF | (b(2) & 0xFF) << 8 | (b(1) & 0xFF) << 16 | (b(0) & 0xFF) << 24
private val sendMessage = Sender.send(group, port)(_)
def startHeartbeat(servicePort: Int, sleep: Long = 2000l): Closable = {
import scala.concurrent.ops.spawn
@volatile var shutdown = false
spawn {
while(!shutdown) {
sendMessage(createMessage(servicePort))
Thread.sleep(sleep)
}
}
return new {
def close() { shutdown = true }
}
}
def startListener(callback: (String, Int) => Unit): Closable = {
Receiver.start(group, port, packetSize) { (message, address) =>
if (isMessage(message)) {
callback(address.getHostName, parseMessage(message))
}
}
}
}
object Discovery extends Discovery("230.0.0.1", 4321)
|
gruenewa/gruenewa-grid
|
src/main/scala/gruenewa/grid/Discovery.scala
|
Scala
|
gpl-3.0
| 2,024 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.twitter.zipkin.query.adjusters
import com.twitter.zipkin.Constants
import com.twitter.zipkin.common.{Annotation, Endpoint, Span}
import com.twitter.zipkin.query.Trace
import org.scalatest.FunSuite
import scala.collection._
class TimeSkewAdjusterTest extends FunSuite {
val endpoint1 = Some(Endpoint(123, 123, "service"))
val endpoint2 = Some(Endpoint(321, 321, "service"))
val endpoint3 = Some(Endpoint(456, 456, "service"))
/*
* The trace looks as follows
* endpoint1 calls method1 on endpoint2
* endpoint2 calls method2 on endpoint3
*
* endpoint2 has a clock that is 10 ms before the other endpoints
*
* Timings from a constant perspective (with skew in parenthesis)
* e1 send e2: 100
* e2 rcvd : 105 (-10ms e2 skew = 95)
* e2 send e3: 110 (-10ms e2 skew = 100)
* e3 rcvd : 115
* e3 repl e2: 120
* e2 rcvd : 125 (-10ms e2 skew = 115)
* e2 repl e1: 130 (-10ms e2 skew = 120)
* e1 rcvd : 135
*/
val skewAnn1 = Annotation(100, Constants.ClientSend, endpoint1)
val skewAnn2 = Annotation(95, Constants.ServerRecv, endpoint2) // skewed
val skewAnn3 = Annotation(120, Constants.ServerSend, endpoint2) // skewed
val skewAnn4 = Annotation(135, Constants.ClientRecv, endpoint1)
val skewSpan1 = Span(1, "method1", 666, None,
List(skewAnn1, skewAnn2, skewAnn3, skewAnn4), Nil)
val skewAnn5 = Annotation(100, Constants.ClientSend, endpoint2) // skewed
val skewAnn6 = Annotation(115, Constants.ServerRecv, endpoint3)
val skewAnn7 = Annotation(120, Constants.ServerSend, endpoint3)
val skewAnn8 = Annotation(115, Constants.ClientRecv, endpoint2) // skewed
val skewSpan2 = Span(1, "method2", 777, Some(666),
List(skewAnn5, skewAnn6, skewAnn7, skewAnn8), Nil)
val inputTrace = new Trace(List[Span](skewSpan1, skewSpan2))
/*
* Adjusted timings from a constant perspective
*
* Timings from a constant perspective (with skew in parenthesis)
* e1 send e2: 100
* e2 rcvd : 105 (-10ms e2 skew = 95)
* e2 send e3: 110 (-10ms e2 skew = 100)
* e3 rcvd : 115
* e3 repl e2: 120
* e2 rcvd : 125 (-10ms e2 skew = 115)
* e2 repl e1: 130 (-10ms e2 skew = 120)
* e1 rcvd : 135
*/
val expectedAnn1 = Annotation(100, Constants.ClientSend, endpoint1)
val expectedAnn2 = Annotation(105, Constants.ServerRecv, endpoint2)
val expectedAnn3 = Annotation(130, Constants.ServerSend, endpoint2)
val expectedAnn4 = Annotation(135, Constants.ClientRecv, endpoint1)
val expectedSpan1 = Span(1, "method1", 666, None,
List(expectedAnn1, expectedAnn2, expectedAnn3, expectedAnn4), Nil)
val expectedAnn5 = Annotation(110, Constants.ClientSend, endpoint2)
val expectedAnn6 = Annotation(115, Constants.ServerRecv, endpoint3)
val expectedAnn7 = Annotation(120, Constants.ServerSend, endpoint3)
val expectedAnn8 = Annotation(125, Constants.ClientRecv, endpoint2)
val expectedSpan2 = Span(1, "method2", 777, Some(666),
List(expectedAnn5, expectedAnn6, expectedAnn7, expectedAnn8), Nil)
val expectedTrace = new Trace(List[Span](expectedSpan1, expectedSpan2))
/*
* This represents an RPC call where e2 and e3 was not trace enabled.
*
* Timings from a constant perspective (with skew in parenthesis)
* e1 send e2: 100
* e2 rcvd : 105 (missing)
* e2 send e3: 110 (missing)
* e3 rcvd : 115 (missing)
* e3 repl e2: 120 (missing)
* e2 rcvd : 125 (missing)
* e2 repl e1: 130 (missing)
* e1 rcvd : 135
*/
val incompleteAnn1 = Annotation(100, Constants.ClientSend, endpoint1)
val incompleteAnn4 = Annotation(135, Constants.ClientRecv, endpoint1)
val incompleteSpan1 = Span(1, "method1", 666, None,
List(incompleteAnn1, incompleteAnn4), Nil)
val incompleteTrace = new Trace(List[Span](expectedSpan1))
val epKoalabird = Some(Endpoint(123, 123, "koalabird-cuckoo"))
val epCuckoo = Some(Endpoint(321, 321, "cuckoo.thrift"))
val epCassie = Some(Endpoint(456, 456, "cassie"))
// This is real trace data that currently is not handled well by the adjuster
val ann1 = Annotation(0, Constants.ServerRecv, epCuckoo) // the server recv is reported as before client send
val ann2 = Annotation(1, Constants.ClientSend, epKoalabird)
val ann3 = Annotation(1, Constants.ClientSend, epCassie)
val ann3F = Annotation(0, Constants.ClientSend, epCassie)
val ann4 = Annotation(85, Constants.ServerSend, epCuckoo) // reported at the same time, ok
val ann5 = Annotation(85, Constants.ClientRecv, epKoalabird)
val ann6 = Annotation(87, Constants.ClientRecv, epCassie)
val ann6F = Annotation(86, Constants.ClientRecv, epCassie)
val span1a = Span(1, "ValuesFromSource", 2209720933601260005L, None,
List(ann3, ann6), Nil)
val span1aFixed = Span(1, "ValuesFromSource", 2209720933601260005L, None,
List(ann3F, ann6F), Nil)
val span1b = Span(1, "ValuesFromSource", 2209720933601260005L, None,
List(ann1, ann4), Nil)
// the above two spans are part of the same actual span
val span2 = Span(1, "multiget_slice", -855543208864892776L, Some(2209720933601260005L),
List(ann2, ann5), Nil)
val realTrace = new Trace(List(span1a, span1b, span2))
val expectedRealTrace = new Trace(List(span1aFixed, span1b, span2))
val adjuster = new TimeSkewAdjuster
test("adjust span time from machine with incorrect clock") {
assert(adjuster.adjust(inputTrace) === expectedTrace)
}
test("not adjust when there is no clock skew") {
assert(adjuster.adjust(expectedTrace) === expectedTrace)
}
// this happens if the server in an rpc is not trace enabled
test("not adjust when there are no server spans") {
assert(adjuster.adjust(incompleteTrace) === incompleteTrace)
}
test("not adjust when core annotations are fine") {
val epTfe = Some(Endpoint(123, 123, "tfe"))
val epMonorail = Some(Endpoint(456, 456, "monorail"))
val unicornCs = Annotation(1L, Constants.ClientSend, epTfe)
val monorailSr = Annotation(2L, Constants.ServerRecv, epMonorail)
val monorailSs = Annotation(3L, Constants.ServerSend, epMonorail)
val unicornCr = Annotation(4L, Constants.ClientRecv, epTfe)
val goodSpan = Span(1, "friendships/create", 12345L, None, List(unicornCs, monorailSr, monorailSs, unicornCr), Nil)
val goodTrace = new Trace(Seq(goodSpan))
assert(adjuster.adjust(goodTrace) === goodTrace)
}
test("adjust live case") {
val epTfe = Some(Endpoint(123, 123, "tfe"))
val epMonorail = Some(Endpoint(456, 456, "monorail"))
val rootSr = Annotation(1330539326400951L, Constants.ServerRecv, epTfe)
val rootSs = Annotation(1330539327264251L, Constants.ServerSend, epTfe)
val spanTfe = Span(1, "POST", 7264365917420400007L, None, List(rootSr, rootSs), Nil)
val unicornCs = Annotation(1330539326401999L, Constants.ClientSend, epTfe)
val monorailSr = Annotation(1330539325900366L, Constants.ServerRecv, epMonorail)
val monorailSs = Annotation(1330539326524407L, Constants.ServerSend, epMonorail)
val unicornCr = Annotation(1330539327263984L, Constants.ClientRecv, epTfe)
val spanMonorailUnicorn = Span(1, "friendships/create", 6379677665629798877L, Some(7264365917420400007L), List(unicornCs, monorailSr, monorailSs, unicornCr), Nil)
val adjustedMonorailSr = Annotation(1330539326520971L, Constants.ServerRecv, epMonorail)
val adjustedMonorailSs = Annotation(1330539327145012L, Constants.ServerSend, epMonorail)
val spanAdjustedMonorail = Span(1, "friendships/create", 6379677665629798877L, Some(7264365917420400007L), List(unicornCs, adjustedMonorailSr, adjustedMonorailSs, unicornCr), Nil)
val realTrace = new Trace(Seq(spanTfe, spanMonorailUnicorn))
val expectedAdjustedTrace = new Trace(Seq(spanTfe, spanAdjustedMonorail))
val adjusted = adjuster.adjust(realTrace)
val adjustedSpans = adjusted.spans
val expectedSpans = expectedAdjustedTrace.spans
assert(expectedSpans.length === adjustedSpans.length)
assert(adjustedSpans.length === adjustedSpans.intersect(expectedSpans).length)
}
test("adjust trace with depth 3") {
val epTfe = Some(Endpoint(123, 123, "tfe"))
val epPassbird = Some(Endpoint(456, 456, "passbird"))
val epGizmoduck = Some(Endpoint(789, 789, "gizmoduck"))
val tfeSr = Annotation(1330647964054410L, Constants.ServerRecv, epTfe)
val tfeSs = Annotation(1330647964057394L, Constants.ServerSend, epTfe)
val spanTfe = Span(1, "GET", 583798990668970003L, None, List(tfeSr, tfeSs), Nil)
val tfeCs = Annotation(1330647964054881L, Constants.ClientSend, epTfe)
val passbirdSr = Annotation(1330647964055250L, Constants.ServerRecv, epPassbird)
val passbirdSs = Annotation(1330647964057394L, Constants.ServerSend, epPassbird)
val tfeCr = Annotation(1330647964057764L, Constants.ClientRecv, epTfe)
val spanPassbird = Span(1, "get_user_by_auth_token", 7625434200987291951L, Some(583798990668970003L), List(tfeCs, passbirdSr, passbirdSs, tfeCr), Nil)
// Gizmoduck server entries are missing
val passbirdCs = Annotation(1330647964055324L, Constants.ClientSend, epPassbird)
val passbirdCr = Annotation(1330647964057127L, Constants.ClientRecv, epPassbird)
val spanGizmoduck = Span(1, "get_by_auth_token", 119310086840195752L, Some(7625434200987291951L), List(passbirdCs, passbirdCr), Nil)
val gizmoduckCs = Annotation(1330647963542175L, Constants.ClientSend, epGizmoduck)
val gizmoduckCr = Annotation(1330647963542565L, Constants.ClientRecv, epGizmoduck)
val spanMemcache = Span(1, "Get", 3983355768376203472L, Some(119310086840195752L), List(gizmoduckCs, gizmoduckCr), Nil)
// Adjusted/created annotations
val createdGizmoduckSr = Annotation(1330647964055324L, Constants.ServerRecv, epGizmoduck)
val createdGizmoduckSs = Annotation(1330647964057127L, Constants.ServerSend, epGizmoduck)
val adjustedGizmoduckCs = Annotation(1330647964056030L, Constants.ClientSend, epGizmoduck)
val adjustedGizmoduckCr = Annotation(1330647964056420L, Constants.ClientRecv, epGizmoduck)
val spanAdjustedGizmoduck = Span(1, "get_by_auth_token", 119310086840195752L, Some(7625434200987291951L), List(passbirdCs, passbirdCr, createdGizmoduckSr, createdGizmoduckSs), Nil)
val spanAdjustedMemcache = Span(1, "Get", 3983355768376203472L, Some(119310086840195752L), List(adjustedGizmoduckCs, adjustedGizmoduckCr), Nil)
val realTrace = new Trace(Seq(spanTfe, spanPassbird, spanGizmoduck, spanMemcache))
val adjustedTrace = new Trace(Seq(spanTfe, spanPassbird, spanAdjustedGizmoduck, spanAdjustedMemcache))
assert(adjustedTrace === adjuster.adjust(realTrace))
}
val ep1 = Some(Endpoint(1, 1, "ep1"))
val ep2 = Some(Endpoint(2, 2, "ep2"))
test("not adjust trace if invalid span") {
val cs = Annotation(1L, Constants.ClientSend, ep1)
val sr = Annotation(10L, Constants.ServerRecv, ep2)
val ss = Annotation(11L, Constants.ServerSend, ep2)
val cr = Annotation(4L, Constants.ClientRecv, ep1)
val cr2 = Annotation(5L, Constants.ClientRecv, ep1)
val spanBad = Span(1, "method", 123L, None, List(cs, sr, ss, cr, cr2), Nil)
val spanGood = Span(1, "method", 123L, None, List(cs, sr, ss, cr), Nil)
val trace1 = new Trace(Seq(spanGood))
assert(trace1 != adjuster.adjust(trace1))
val trace2 = new Trace(Seq(spanBad))
assert(trace2 != adjuster.adjust(trace2))
}
test("not adjust trace if child longer than parent") {
val cs = Annotation(1L, Constants.ClientSend, ep1)
val sr = Annotation(2L, Constants.ServerRecv, ep2)
val ss = Annotation(11L, Constants.ServerSend, ep2)
val cr = Annotation(4L, Constants.ClientRecv, ep1)
val span = Span(1, "method", 123L, None, List(cs, sr, ss, cr), Nil)
val trace1 = new Trace(Seq(span))
assert(trace1 === adjuster.adjust(trace1))
}
test("adjust even if we only have client send") {
val tfeService = Endpoint(123, 9455, "api.twitter.com-ssl")
val tfe = Span(142224153997690008L, "GET", 142224153997690008L, None, List(
Annotation(60498165L, Constants.ServerRecv, Some(tfeService)),
Annotation(61031100L, Constants.ServerSend, Some(tfeService))
), Nil)
val monorailService = Endpoint(456, 8000, "monorail")
val clusterTwitterweb = Endpoint(123, -13145, "cluster_twitterweb_unicorn")
val monorail = Span(142224153997690008L, "following/index", 7899774722699781565L, Some(142224153997690008L), List(
Annotation(59501663L, Constants.ServerRecv, Some(monorailService)),
Annotation(59934508L, Constants.ServerSend, Some(monorailService)),
Annotation(60499730L, Constants.ClientSend, Some(clusterTwitterweb)),
Annotation(61030844L, Constants.ClientRecv, Some(clusterTwitterweb))
), Nil)
val tflockService = Endpoint(456, -14238, "tflock")
val flockdbEdgesService = Endpoint(789, 6915, "flockdb_edges")
val tflock = Span(142224153997690008L, "select", 6924056367845423617L, Some(7899774722699781565L), List(
Annotation(59541848L, Constants.ClientSend, Some(tflockService)),
Annotation(59544889L, Constants.ClientRecv, Some(tflockService)),
Annotation(59541031L, Constants.ServerRecv, Some(flockdbEdgesService)),
Annotation(59542894L, Constants.ServerSend, Some(flockdbEdgesService))
), Nil)
val flockService = Endpoint(2130706433, 0, "flock")
val flock = Span(142224153997690008L, "select", 7330066031642813936L, Some(6924056367845423617L), List(
Annotation(59541299L, Constants.ClientSend, Some(flockService)),
Annotation(59542778L, Constants.ClientRecv, Some(flockService))
), Nil)
val trace = new Trace(Seq(monorail, tflock, tfe, flock))
val adjusted = adjuster.adjust(trace)
// let's see how we did
val adjustedFlock = adjusted.getSpanById(7330066031642813936L).get
val adjustedTflock = adjusted.getSpanById(6924056367845423617L).get
val flockCs = adjustedFlock.getAnnotation(Constants.ClientSend).get
val tflockSr = adjustedTflock.getAnnotation(Constants.ServerRecv).get
// tflock must receive the request before it send a request to flock
assert(flockCs.timestamp > tflockSr.timestamp)
}
}
|
Yelp/zipkin
|
zipkin-query/src/test/scala/com/twitter/zipkin/query/adjusters/TimeSkewAdjusterSpec.scala
|
Scala
|
apache-2.0
| 14,818 |
package scribe.output
sealed trait LogOutput extends Any {
def plainText: String
def length: Int = plainText.length
def map(f: String => String): LogOutput
def splitAt(index: Int): (LogOutput, LogOutput)
}
object LogOutput {
lazy val NewLine: LogOutput = new TextOutput("\\n")
}
object EmptyOutput extends LogOutput {
override val plainText: String = ""
override def map(f: String => String): LogOutput = f(plainText) match {
case "" => EmptyOutput
case s => new TextOutput(s)
}
override def splitAt(index: Int): (LogOutput, LogOutput) = (EmptyOutput, EmptyOutput)
override def toString: String = "empty"
}
class TextOutput(val plainText: String) extends AnyVal with LogOutput {
override def map(f: String => String): LogOutput = new TextOutput(f(plainText))
override def splitAt(index: Int): (LogOutput, LogOutput) =
(new TextOutput(plainText.substring(0, index)), new TextOutput(plainText.substring(index)))
override def toString: String = s"text($plainText)"
}
class CompositeOutput(val entries: List[LogOutput]) extends LogOutput {
override lazy val plainText: String = entries.map(_.plainText).mkString
override def map(f: String => String): LogOutput = new CompositeOutput(entries.map(_.map(f)))
override def splitAt(index: Int): (LogOutput, LogOutput) = {
def recurse(left: List[LogOutput], right: List[LogOutput], chars: Int): (LogOutput, LogOutput) = {
if (right.isEmpty) {
(new CompositeOutput(left), EmptyOutput)
} else {
val head = right.head
val length = head.length
chars + length match {
case l if l == index => (new CompositeOutput(left ::: List(head)), new CompositeOutput(right.tail))
case l if l > index =>
val (left1, left2) = head.splitAt(index - chars)
(new CompositeOutput(left ::: List(left1)), new CompositeOutput(left2 :: right.tail))
case l => recurse(left ::: List(head), right.tail, l)
}
}
}
recurse(Nil, entries, 0)
}
override def toString: String = s"composite(${entries.mkString(", ")})"
}
class ColoredOutput(val color: Color, val output: LogOutput) extends LogOutput {
override lazy val plainText: String = output.plainText
override def map(f: String => String): LogOutput = new ColoredOutput(color, output.map(f))
override def splitAt(index: Int): (LogOutput, LogOutput) = {
val (left, right) = output.splitAt(index)
(new ColoredOutput(color, left), new ColoredOutput(color, right))
}
}
class BackgroundColoredOutput(val color: Color, val output: LogOutput) extends LogOutput {
override lazy val plainText: String = output.plainText
override def map(f: String => String): LogOutput = new BackgroundColoredOutput(color, output.map(f))
override def splitAt(index: Int): (LogOutput, LogOutput) = {
val (left, right) = output.splitAt(index)
(new BackgroundColoredOutput(color, left), new BackgroundColoredOutput(color, right))
}
}
class URLOutput(val url: String, val output: LogOutput) extends LogOutput {
override def plainText: String = output.plainText
override def map(f: String => String): LogOutput = new URLOutput(url, output.map(f))
override def splitAt(index: Int): (LogOutput, LogOutput) = {
val (left, right) = output.splitAt(index)
(new URLOutput(url, left), new URLOutput(url, right))
}
}
class BoldOutput(val output: LogOutput) extends AnyVal with LogOutput {
override def plainText: String = output.plainText
override def map(f: String => String): LogOutput = new BoldOutput(output.map(f))
override def splitAt(index: Int): (LogOutput, LogOutput) = {
val (left, right) = output.splitAt(index)
(new BoldOutput(left), new BoldOutput(right))
}
}
class ItalicOutput(val output: LogOutput) extends AnyVal with LogOutput {
override def plainText: String = output.plainText
override def map(f: String => String): LogOutput = new ItalicOutput(output.map(f))
override def splitAt(index: Int): (LogOutput, LogOutput) = {
val (left, right) = output.splitAt(index)
(new ItalicOutput(left), new ItalicOutput(right))
}
}
class UnderlineOutput(val output: LogOutput) extends AnyVal with LogOutput {
override def plainText: String = output.plainText
override def map(f: String => String): LogOutput = new UnderlineOutput(output.map(f))
override def splitAt(index: Int): (LogOutput, LogOutput) = {
val (left, right) = output.splitAt(index)
(new UnderlineOutput(left), new UnderlineOutput(right))
}
}
class StrikethroughOutput(val output: LogOutput) extends AnyVal with LogOutput {
override def plainText: String = output.plainText
override def map(f: String => String): LogOutput = new StrikethroughOutput(output.map(f))
override def splitAt(index: Int): (LogOutput, LogOutput) = {
val (left, right) = output.splitAt(index)
(new StrikethroughOutput(left), new StrikethroughOutput(right))
}
}
sealed trait Color {
lazy val name: String = getClass.getSimpleName.replace("$", "").toLowerCase
}
object Color {
case object Black extends Color
case object Blue extends Color
case object Cyan extends Color
case object Green extends Color
case object Magenta extends Color
case object Red extends Color
case object White extends Color
case object Yellow extends Color
case object Gray extends Color
case object BrightBlue extends Color
case object BrightCyan extends Color
case object BrightGreen extends Color
case object BrightMagenta extends Color
case object BrightRed extends Color
case object BrightWhite extends Color
case object BrightYellow extends Color
}
|
outr/scribe
|
core/shared/src/main/scala/scribe/output/LogOutput.scala
|
Scala
|
mit
| 5,643 |
package de.tu_berlin.formic.datastructure.json.persistence
import java.io.File
import akka.actor._
import akka.testkit.{ImplicitSender, TestKit}
import com.typesafe.config._
import org.apache.commons.io.FileUtils
import org.scalatest._
import scala.util._
abstract class PersistenceSpec(system: ActorSystem) extends TestKit(system)
with ImplicitSender
with WordSpecLike
with Matchers
with BeforeAndAfterAll
with PersistenceCleanup {
def this(name: String, config: Config) = this(ActorSystem(name, config))
override protected def beforeAll() = deleteStorageLocations()
override protected def afterAll() = {
deleteStorageLocations()
TestKit.shutdownActorSystem(system)
}
def killActors(actors: ActorRef*) = {
actors.foreach { actor =>
watch(actor)
system.stop(actor)
expectTerminated(actor)
Thread.sleep(1000) // the actor name is not unique intermittently on travis when creating it again after killActors, this is ducktape.
}
}
}
trait PersistenceCleanup {
def system: ActorSystem
val storageLocations = List(
"akka.persistence.journal.leveldb.dir",
"akka.persistence.journal.leveldb-shared.store.dir",
"akka.persistence.snapshot-store.local.dir").map { s =>
new File(system.settings.config.getString(s))
}
def deleteStorageLocations(): Unit = {
storageLocations.foreach(dir => Try(FileUtils.deleteDirectory(dir)))
}
}
|
rbraeunlich/formic
|
json/jvm/src/test/scala/de/tu_berlin/formic/datastructure/json/persistence/PersistenceSpec.scala
|
Scala
|
apache-2.0
| 1,421 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gearpump.external.kudu
import org.apache.gearpump.Message
import org.apache.gearpump.cluster.UserConfig
import org.apache.gearpump.external.kudu.KuduSink.KuduWriterFactory
import org.apache.gearpump.streaming.sink.DataSink
import org.apache.gearpump.streaming.task.TaskContext
import org.apache.kudu.Type._
import org.apache.kudu.client._
class KuduSink private[kudu](userConfig: UserConfig, tableName: String, factory: KuduWriterFactory)
extends DataSink {
private lazy val kuduWriter = factory.getKuduWriter(userConfig, tableName)
def this(userConfig: UserConfig, tableName: String) = {
this(userConfig, tableName, new KuduWriterFactory)
}
override def open(context: TaskContext): Unit = {}
override def write(message: Message): Unit = {
kuduWriter.put(message.value)
}
override def close(): Unit = {
kuduWriter.close()
}
}
object KuduSink {
val KUDUSINK = "kudusink"
val TABLE_NAME = "kudu.table.name"
val KUDU_MASTERS = "kudu.masters"
val KUDU_USER = "kudu.user"
def apply[T](userConfig: UserConfig, tableName: String): KuduSink = {
new KuduSink(userConfig, tableName)
}
class KuduWriterFactory extends java.io.Serializable {
def getKuduWriter(userConfig: UserConfig, tableName: String): KuduWriter = {
new KuduWriter(userConfig, tableName)
}
}
class KuduWriter(kuduClient: KuduClient, tableName: String) {
private val table: KuduTable = kuduClient.openTable(tableName)
private lazy val session = kuduClient.newSession()
def this(userConfig: UserConfig, tableName: String) = {
this(new KuduClient.KuduClientBuilder(userConfig.getString(KUDU_MASTERS).get).build(),
tableName)
}
def put(msg: Any): Unit = {
val insert = table.newUpsert()
var partialRow = insert.getRow
msg match {
case tuple: Product =>
for (column <- tuple.productIterator) {
column match {
case (_, _) =>
val columnName: String = column.asInstanceOf[(_, _)]._1.toString
val colValue: String = column.asInstanceOf[(_, _)]._2.toString
val col = table.getSchema.getColumn (columnName)
col.getType match {
case INT8 => partialRow.addByte(columnName, colValue.toByte)
case INT16 => partialRow.addShort(columnName, colValue.toShort)
case INT32 => partialRow.addInt(columnName, colValue.toInt)
case INT64 => partialRow.addLong(columnName, colValue.toLong)
case STRING => partialRow.addString(columnName, colValue)
case BOOL => partialRow.addBoolean(columnName, colValue.toBoolean)
case FLOAT => partialRow.addFloat(columnName, colValue.toFloat)
case DOUBLE => partialRow.addDouble(columnName, colValue.toDouble)
case BINARY => partialRow.addByte(columnName, colValue.toByte)
case _ => throw new UnsupportedOperationException(s"Unknown type ${col.getType}")
}
case _ => throw new UnsupportedOperationException(s"Unknown input format")
}
}
session.apply(insert)
case _ => throw new UnsupportedOperationException(s"Unknown input format")
}
}
def close(): Unit = {
session.close()
kuduClient.close()
}
}
}
|
manuzhang/incubator-gearpump
|
external/kudu/src/main/scala/org/apache/gearpump/external/kudu/KuduSink.scala
|
Scala
|
apache-2.0
| 4,217 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package controllers.declaration
import javax.inject.Inject
import connectors.DataCacheConnector
import controllers.{AmlsBaseController, CommonPlayDependencies}
import forms._
import models.declaration.{RenewRegistration, RenewRegistrationNo, RenewRegistrationYes}
import services.{ProgressService, RenewalService, StatusService}
import uk.gov.hmrc.http.HeaderCarrier
import utils.{AuthAction, DeclarationHelper}
import play.api.mvc.MessagesControllerComponents
import views.html.declaration.renew_registration
import scala.concurrent.Future
class RenewRegistrationController @Inject()(val dataCacheConnector: DataCacheConnector,
val authAction: AuthAction,
val progressService: ProgressService,
implicit val statusService: StatusService,
implicit val renewalService: RenewalService,
val ds: CommonPlayDependencies,
val cc: MessagesControllerComponents,
renew_registration: renew_registration) extends AmlsBaseController(ds, cc) {
def get() = authAction.async {
implicit request =>
DeclarationHelper.statusEndDate(request.amlsRefNumber, request.accountTypeId, request.credId) flatMap { maybeEndDate =>
dataCacheConnector.fetch[RenewRegistration](request.credId, RenewRegistration.key) map {
renewRegistration =>
val form = (for {
renew <- renewRegistration
} yield Form2[RenewRegistration](renew)).getOrElse(EmptyForm)
Ok(renew_registration(form, maybeEndDate))
}
}
}
def post() = authAction.async {
implicit request => {
Form2[RenewRegistration](request.body) match {
case f: InvalidForm =>
DeclarationHelper.statusEndDate(request.amlsRefNumber, request.accountTypeId, request.credId) flatMap { maybeEndDate =>
Future.successful(BadRequest(renew_registration(f, maybeEndDate)))
}
case ValidForm(_, data) =>
dataCacheConnector.save[RenewRegistration](request.credId, RenewRegistration.key, data)
redirectDependingOnResponse(data, request.amlsRefNumber, request.accountTypeId, request.credId)
}
}
}
private def redirectDependingOnResponse(data: RenewRegistration,
amlsRefNo: Option[String],
accountTypeId: (String, String),
credId: String)(implicit hc: HeaderCarrier)= data match {
case RenewRegistrationYes => Future.successful(Redirect(controllers.renewal.routes.WhatYouNeedController.get))
case RenewRegistrationNo => resolveDeclarationDest(amlsRefNo, accountTypeId, credId)
}
private def resolveDeclarationDest(amlsRefNo: Option[String],
accountTypeId: (String, String),
credId: String)(implicit hc: HeaderCarrier) = {
progressService.getSubmitRedirect(amlsRefNo, accountTypeId, credId) map {
case Some(url) => Redirect(url)
case _ => InternalServerError("Could not get data for redirect")
}
}
}
|
hmrc/amls-frontend
|
app/controllers/declaration/RenewRegistrationController.scala
|
Scala
|
apache-2.0
| 3,945 |
package jp.que.ti.sv.validator.immutable.pram1
import jp.que.ti.sv.NotRequiredBase
import jp.que.ti.sv.ParameterInfo
/**
* パラメータの文字列長が、指定の長さ以下であることをチェックする Validator
*/
class Maxlength(val max: Int) extends NotRequiredBase("maxlength") {
def isValidInputed(paramValue: String): Boolean = { max >= trim(paramValue).length() }
override def messageArgs(parameterInfo: ParameterInfo): Seq[String] = parameterInfo.nam4Msg :: max + "" :: Nil
}
object Maxlength {
def apply(max: Int) = if (max <= 24 && max >= 1) {
valids(max)
} else {
new Maxlength(max)
}
private val valids = new Array[Maxlength](25)
(1 to 24).foreach { idx =>
valids(idx) = new Maxlength(idx)
}
}
|
yangiYA/simple-validator
|
simple-valid/src/main/scala/jp/que/ti/sv/validator/immutable/pram1/Maxlength.scala
|
Scala
|
mit
| 759 |
/*
* HashMultiSet.scala
* A hashed MultiSet
*
* Created By: Avi Pfeffer ([email protected])
* Creation Date: Jan 1, 2009
*
* Copyright 2013 Avrom J. Pfeffer and Charles River Analytics, Inc.
* See http://www.cra.com or email [email protected] for information.
*
* See http://www.github.com/p2t2/figaro for a copy of the software license.
*/
package com.cra.figaro.util
import scala.collection.mutable.HashMap
/**
* An implementation of a MultiSet backed by a HashMap
*/
class HashMultiSet[T] extends MultiSet[T] {
private val map = HashMap[T, Int]()
def apply(t: T): Int = map.getOrElse(t, 0)
/* All modification operations are in place. */
def addOne(t: T): HashMultiSet[T] = {
map += t -> (apply(t) + 1)
this
}
def addMany(t: T, count: Int): HashMultiSet[T] = {
require(count > 0)
map += t -> (apply(t) + count)
this
}
def removeOne(t: T): HashMultiSet[T] = {
apply(t) match {
case 0 => () // Attempting to remove an element not present does not do anything - no failure
case 1 => map -= t
case n => map += t -> (n - 1)
}
this
}
def removeAll(t: T): HashMultiSet[T] = {
map -= t
this
}
def union(that: MultiSet[T]): MultiSet[T] = {
val result = new HashMultiSet[T]()
// This will catch all keys in this map, whether or not they are contained in the other
for { (key, value) <- map } { result.addMany(key, value + that(key)) }
// This will catch all keys in the other map, whether or not they are contained in this
for { (key, value) <- that.counts; if apply(key) == 0 } { result.addMany(key, value) }
result
}
def counts: Set[(T, Int)] = map.toSet
def elements: List[T] = map.toList.map(pair => List.fill(pair._2)(pair._1)).flatten
def map[U](fn: T => U): HashMultiSet[U] = {
val result = new HashMultiSet[U]()
// Different Ts might map to the same U; this is correctly handled by addMany below.
for { (key, value) <- map } { result.addMany(fn(key), value) }
result
}
def foreach[U](fn: T => U): Unit = {
for { (key, value) <- map; i <- 1 to value } { fn(key) }
}
}
object HashMultiSet {
/**
* Create a new HashMultiSet and adds the given values
*/
def apply[T](elems: T*): HashMultiSet[T] = {
val result = new HashMultiSet[T]()
elems.foreach(result.addOne(_))
result
}
}
|
bruttenberg/figaro
|
Figaro/src/main/scala/com/cra/figaro/util/HashMultiSet.scala
|
Scala
|
bsd-3-clause
| 2,372 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.time.{Duration, Period, ZoneId}
import java.util.Comparator
import scala.collection.mutable
import scala.reflect.ClassTag
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.{TypeCheckResult, TypeCoercion, UnresolvedAttribute, UnresolvedSeed}
import org.apache.spark.sql.catalyst.expressions.ArraySortLike.NullOrder
import org.apache.spark.sql.catalyst.expressions.codegen._
import org.apache.spark.sql.catalyst.expressions.codegen.Block._
import org.apache.spark.sql.catalyst.trees.BinaryLike
import org.apache.spark.sql.catalyst.trees.TreePattern.{ARRAYS_ZIP, CONCAT, TreePattern}
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.catalyst.util.DateTimeConstants._
import org.apache.spark.sql.catalyst.util.DateTimeUtils._
import org.apache.spark.sql.errors.QueryExecutionErrors
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.sql.util.SQLOpenHashSet
import org.apache.spark.unsafe.UTF8StringBuilder
import org.apache.spark.unsafe.array.ByteArrayMethods
import org.apache.spark.unsafe.array.ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH
import org.apache.spark.unsafe.types.{ByteArray, CalendarInterval, UTF8String}
/**
* Base trait for [[BinaryExpression]]s with two arrays of the same element type and implicit
* casting.
*/
trait BinaryArrayExpressionWithImplicitCast extends BinaryExpression
with ImplicitCastInputTypes {
@transient protected lazy val elementType: DataType =
inputTypes.head.asInstanceOf[ArrayType].elementType
override def inputTypes: Seq[AbstractDataType] = {
(left.dataType, right.dataType) match {
case (ArrayType(e1, hasNull1), ArrayType(e2, hasNull2)) =>
TypeCoercion.findTightestCommonType(e1, e2) match {
case Some(dt) => Seq(ArrayType(dt, hasNull1), ArrayType(dt, hasNull2))
case _ => Seq.empty
}
case _ => Seq.empty
}
}
override def checkInputDataTypes(): TypeCheckResult = {
(left.dataType, right.dataType) match {
case (ArrayType(e1, _), ArrayType(e2, _)) if e1.sameType(e2) =>
TypeCheckResult.TypeCheckSuccess
case _ => TypeCheckResult.TypeCheckFailure(s"input to function $prettyName should have " +
s"been two ${ArrayType.simpleString}s with same element type, but it's " +
s"[${left.dataType.catalogString}, ${right.dataType.catalogString}]")
}
}
}
/**
* Given an array or map, returns total number of elements in it.
*/
@ExpressionDescription(
usage = """
_FUNC_(expr) - Returns the size of an array or a map.
The function returns null for null input if spark.sql.legacy.sizeOfNull is set to false or
spark.sql.ansi.enabled is set to true. Otherwise, the function returns -1 for null input.
With the default settings, the function returns -1 for null input.
""",
examples = """
Examples:
> SELECT _FUNC_(array('b', 'd', 'c', 'a'));
4
> SELECT _FUNC_(map('a', 1, 'b', 2));
2
""",
since = "1.5.0",
group = "collection_funcs")
case class Size(child: Expression, legacySizeOfNull: Boolean)
extends UnaryExpression with ExpectsInputTypes {
def this(child: Expression) = this(child, SQLConf.get.legacySizeOfNull)
override def dataType: DataType = IntegerType
override def inputTypes: Seq[AbstractDataType] = Seq(TypeCollection(ArrayType, MapType))
override def nullable: Boolean = if (legacySizeOfNull) false else super.nullable
override def eval(input: InternalRow): Any = {
val value = child.eval(input)
if (value == null) {
if (legacySizeOfNull) -1 else null
} else child.dataType match {
case _: ArrayType => value.asInstanceOf[ArrayData].numElements()
case _: MapType => value.asInstanceOf[MapData].numElements()
case other => throw QueryExecutionErrors.unsupportedOperandTypeForSizeFunctionError(other)
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
if (legacySizeOfNull) {
val childGen = child.genCode(ctx)
ev.copy(code = code"""
boolean ${ev.isNull} = false;
${childGen.code}
${CodeGenerator.javaType(dataType)} ${ev.value} = ${childGen.isNull} ? -1 :
(${childGen.value}).numElements();""", isNull = FalseLiteral)
} else {
defineCodeGen(ctx, ev, c => s"($c).numElements()")
}
}
override protected def withNewChildInternal(newChild: Expression): Size = copy(child = newChild)
}
object Size {
def apply(child: Expression): Size = new Size(child)
}
/**
* Returns an unordered array containing the keys of the map.
*/
@ExpressionDescription(
usage = "_FUNC_(map) - Returns an unordered array containing the keys of the map.",
examples = """
Examples:
> SELECT _FUNC_(map(1, 'a', 2, 'b'));
[1,2]
""",
group = "map_funcs",
since = "2.0.0")
case class MapKeys(child: Expression)
extends UnaryExpression with ExpectsInputTypes with NullIntolerant {
override def inputTypes: Seq[AbstractDataType] = Seq(MapType)
override def dataType: DataType = ArrayType(child.dataType.asInstanceOf[MapType].keyType)
override def nullSafeEval(map: Any): Any = {
map.asInstanceOf[MapData].keyArray()
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, c => s"${ev.value} = ($c).keyArray();")
}
override def prettyName: String = "map_keys"
override protected def withNewChildInternal(newChild: Expression): MapKeys =
copy(child = newChild)
}
/**
* Returns an unordered array containing the keys of the map.
*/
@ExpressionDescription(
usage = "_FUNC_(map, key) - Returns true if the map contains the key.",
examples = """
Examples:
> SELECT _FUNC_(map(1, 'a', 2, 'b'), 1);
true
> SELECT _FUNC_(map(1, 'a', 2, 'b'), 3);
false
""",
group = "map_funcs",
since = "3.3.0")
case class MapContainsKey(left: Expression, right: Expression)
extends RuntimeReplaceable with BinaryLike[Expression] with ImplicitCastInputTypes {
override lazy val replacement: Expression = ArrayContains(MapKeys(left), right)
override def inputTypes: Seq[AbstractDataType] = {
(left.dataType, right.dataType) match {
case (_, NullType) => Seq.empty
case (MapType(kt, vt, valueContainsNull), dt) =>
TypeCoercion.findWiderTypeWithoutStringPromotionForTwo(kt, dt) match {
case Some(widerType) => Seq(MapType(widerType, vt, valueContainsNull), widerType)
case _ => Seq.empty
}
case _ => Seq.empty
}
}
override def checkInputDataTypes(): TypeCheckResult = {
(left.dataType, right.dataType) match {
case (_, NullType) =>
TypeCheckResult.TypeCheckFailure("Null typed values cannot be used as arguments")
case (MapType(kt, _, _), dt) if kt.sameType(dt) =>
TypeUtils.checkForOrderingExpr(kt, s"function $prettyName")
case _ => TypeCheckResult.TypeCheckFailure(s"Input to function $prettyName should have " +
s"been ${MapType.simpleString} followed by a value with same key type, but it's " +
s"[${left.dataType.catalogString}, ${right.dataType.catalogString}].")
}
}
override def prettyName: String = "map_contains_key"
override protected def withNewChildrenInternal(
newLeft: Expression, newRight: Expression): Expression = {
copy(newLeft, newRight)
}
}
@ExpressionDescription(
usage = """
_FUNC_(a1, a2, ...) - Returns a merged array of structs in which the N-th struct contains all
N-th values of input arrays.
""",
examples = """
Examples:
> SELECT _FUNC_(array(1, 2, 3), array(2, 3, 4));
[{"0":1,"1":2},{"0":2,"1":3},{"0":3,"1":4}]
> SELECT _FUNC_(array(1, 2), array(2, 3), array(3, 4));
[{"0":1,"1":2,"2":3},{"0":2,"1":3,"2":4}]
""",
group = "array_funcs",
since = "2.4.0")
case class ArraysZip(children: Seq[Expression], names: Seq[Expression])
extends Expression with ExpectsInputTypes {
def this(children: Seq[Expression]) = {
this(
children,
children.zipWithIndex.map {
case (u: UnresolvedAttribute, _) => Literal(u.nameParts.last)
case (e: NamedExpression, _) if e.resolved => Literal(e.name)
case (e: NamedExpression, _) => NamePlaceholder
case (_, idx) => Literal(idx.toString)
})
}
if (children.size != names.size) {
throw new IllegalArgumentException(
"The numbers of zipped arrays and field names should be the same")
}
final override val nodePatterns: Seq[TreePattern] = Seq(ARRAYS_ZIP)
override lazy val resolved: Boolean =
childrenResolved && checkInputDataTypes().isSuccess && names.forall(_.resolved)
override def inputTypes: Seq[AbstractDataType] = Seq.fill(children.length)(ArrayType)
@transient override lazy val dataType: DataType = {
val fields = arrayElementTypes.zip(names).map {
case (elementType, Literal(name, StringType)) =>
StructField(name.toString, elementType, nullable = true)
}
ArrayType(StructType(fields), containsNull = false)
}
override def nullable: Boolean = children.exists(_.nullable)
@transient private lazy val arrayElementTypes =
children.map(_.dataType.asInstanceOf[ArrayType].elementType)
private def genericArrayData = classOf[GenericArrayData].getName
def emptyInputGenCode(ev: ExprCode): ExprCode = {
ev.copy(code"""
|${CodeGenerator.javaType(dataType)} ${ev.value} = new $genericArrayData(new Object[0]);
|boolean ${ev.isNull} = false;
""".stripMargin)
}
def nonEmptyInputGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val genericInternalRow = classOf[GenericInternalRow].getName
val arrVals = ctx.freshName("arrVals")
val biggestCardinality = ctx.freshName("biggestCardinality")
val currentRow = ctx.freshName("currentRow")
val j = ctx.freshName("j")
val i = ctx.freshName("i")
val args = ctx.freshName("args")
val evals = children.map(_.genCode(ctx))
val getValuesAndCardinalities = evals.zipWithIndex.map { case (eval, index) =>
s"""
|if ($biggestCardinality != -1) {
| ${eval.code}
| if (!${eval.isNull}) {
| $arrVals[$index] = ${eval.value};
| $biggestCardinality = Math.max($biggestCardinality, ${eval.value}.numElements());
| } else {
| $biggestCardinality = -1;
| }
|}
""".stripMargin
}
val splittedGetValuesAndCardinalities = ctx.splitExpressionsWithCurrentInputs(
expressions = getValuesAndCardinalities,
funcName = "getValuesAndCardinalities",
returnType = "int",
makeSplitFunction = body =>
s"""
|$body
|return $biggestCardinality;
""".stripMargin,
foldFunctions = _.map(funcCall => s"$biggestCardinality = $funcCall;").mkString("\\n"),
extraArguments =
("ArrayData[]", arrVals) ::
("int", biggestCardinality) :: Nil)
val getValueForType = arrayElementTypes.zipWithIndex.map { case (eleType, idx) =>
val g = CodeGenerator.getValue(s"$arrVals[$idx]", eleType, i)
s"""
|if ($i < $arrVals[$idx].numElements() && !$arrVals[$idx].isNullAt($i)) {
| $currentRow[$idx] = $g;
|} else {
| $currentRow[$idx] = null;
|}
""".stripMargin
}
val getValueForTypeSplitted = ctx.splitExpressions(
expressions = getValueForType,
funcName = "extractValue",
arguments =
("int", i) ::
("Object[]", currentRow) ::
("ArrayData[]", arrVals) :: Nil)
val initVariables = s"""
|ArrayData[] $arrVals = new ArrayData[${children.length}];
|int $biggestCardinality = 0;
|${CodeGenerator.javaType(dataType)} ${ev.value} = null;
""".stripMargin
ev.copy(code"""
|$initVariables
|$splittedGetValuesAndCardinalities
|boolean ${ev.isNull} = $biggestCardinality == -1;
|if (!${ev.isNull}) {
| Object[] $args = new Object[$biggestCardinality];
| for (int $i = 0; $i < $biggestCardinality; $i ++) {
| Object[] $currentRow = new Object[${children.length}];
| $getValueForTypeSplitted
| $args[$i] = new $genericInternalRow($currentRow);
| }
| ${ev.value} = new $genericArrayData($args);
|}
""".stripMargin)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
if (children.isEmpty) {
emptyInputGenCode(ev)
} else {
nonEmptyInputGenCode(ctx, ev)
}
}
override def eval(input: InternalRow): Any = {
val inputArrays = children.map(_.eval(input).asInstanceOf[ArrayData])
if (inputArrays.contains(null)) {
null
} else {
val biggestCardinality = if (inputArrays.isEmpty) {
0
} else {
inputArrays.map(_.numElements()).max
}
val result = new Array[InternalRow](biggestCardinality)
val zippedArrs: Seq[(ArrayData, Int)] = inputArrays.zipWithIndex
for (i <- 0 until biggestCardinality) {
val currentLayer: Seq[Object] = zippedArrs.map { case (arr, index) =>
if (i < arr.numElements() && !arr.isNullAt(i)) {
arr.get(i, arrayElementTypes(index))
} else {
null
}
}
result(i) = InternalRow.apply(currentLayer: _*)
}
new GenericArrayData(result)
}
}
override def prettyName: String = "arrays_zip"
override protected def withNewChildrenInternal(newChildren: IndexedSeq[Expression]): ArraysZip =
copy(children = newChildren)
}
object ArraysZip {
def apply(children: Seq[Expression]): ArraysZip = {
new ArraysZip(children)
}
}
/**
* Returns an unordered array containing the values of the map.
*/
@ExpressionDescription(
usage = "_FUNC_(map) - Returns an unordered array containing the values of the map.",
examples = """
Examples:
> SELECT _FUNC_(map(1, 'a', 2, 'b'));
["a","b"]
""",
group = "map_funcs",
since = "2.0.0")
case class MapValues(child: Expression)
extends UnaryExpression with ExpectsInputTypes with NullIntolerant {
override def inputTypes: Seq[AbstractDataType] = Seq(MapType)
override def dataType: DataType = ArrayType(child.dataType.asInstanceOf[MapType].valueType)
override def nullSafeEval(map: Any): Any = {
map.asInstanceOf[MapData].valueArray()
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, c => s"${ev.value} = ($c).valueArray();")
}
override def prettyName: String = "map_values"
override protected def withNewChildInternal(newChild: Expression): MapValues =
copy(child = newChild)
}
/**
* Returns an unordered array of all entries in the given map.
*/
@ExpressionDescription(
usage = "_FUNC_(map) - Returns an unordered array of all entries in the given map.",
examples = """
Examples:
> SELECT _FUNC_(map(1, 'a', 2, 'b'));
[{"key":1,"value":"a"},{"key":2,"value":"b"}]
""",
group = "map_funcs",
since = "3.0.0")
case class MapEntries(child: Expression)
extends UnaryExpression with ExpectsInputTypes with NullIntolerant {
override def inputTypes: Seq[AbstractDataType] = Seq(MapType)
@transient private lazy val childDataType: MapType = child.dataType.asInstanceOf[MapType]
private lazy val internalDataType: DataType = {
ArrayType(
StructType(
StructField("key", childDataType.keyType, false) ::
StructField("value", childDataType.valueType, childDataType.valueContainsNull) ::
Nil),
false)
}
override def dataType: DataType = internalDataType
override protected def nullSafeEval(input: Any): Any = {
val childMap = input.asInstanceOf[MapData]
val keys = childMap.keyArray()
val values = childMap.valueArray()
val length = childMap.numElements()
val resultData = new Array[AnyRef](length)
var i = 0
while (i < length) {
val key = keys.get(i, childDataType.keyType)
val value = values.get(i, childDataType.valueType)
val row = new GenericInternalRow(Array[Any](key, value))
resultData.update(i, row)
i += 1
}
new GenericArrayData(resultData)
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, c => {
val arrayData = ctx.freshName("arrayData")
val numElements = ctx.freshName("numElements")
val keys = ctx.freshName("keys")
val values = ctx.freshName("values")
val isKeyPrimitive = CodeGenerator.isPrimitiveType(childDataType.keyType)
val isValuePrimitive = CodeGenerator.isPrimitiveType(childDataType.valueType)
val wordSize = UnsafeRow.WORD_SIZE
val structSize = UnsafeRow.calculateBitSetWidthInBytes(2) + wordSize * 2
val (isPrimitive, elementSize) = if (isKeyPrimitive && isValuePrimitive) {
(true, structSize + wordSize)
} else {
(false, -1)
}
val allocation =
s"""
|ArrayData $arrayData = ArrayData.allocateArrayData(
| $elementSize, $numElements, " $prettyName failed.");
""".stripMargin
val code = if (isPrimitive) {
val genCodeForPrimitive = genCodeForPrimitiveElements(
ctx, arrayData, keys, values, ev.value, numElements, structSize)
s"""
|if ($arrayData instanceof UnsafeArrayData) {
| $genCodeForPrimitive
|} else {
| ${genCodeForAnyElements(ctx, arrayData, keys, values, ev.value, numElements)}
|}
""".stripMargin
} else {
s"${genCodeForAnyElements(ctx, arrayData, keys, values, ev.value, numElements)}"
}
s"""
|final int $numElements = $c.numElements();
|final ArrayData $keys = $c.keyArray();
|final ArrayData $values = $c.valueArray();
|$allocation
|$code
""".stripMargin
})
}
private def getKey(varName: String, index: String) =
CodeGenerator.getValue(varName, childDataType.keyType, index)
private def getValue(varName: String, index: String) =
CodeGenerator.getValue(varName, childDataType.valueType, index)
private def genCodeForPrimitiveElements(
ctx: CodegenContext,
arrayData: String,
keys: String,
values: String,
resultArrayData: String,
numElements: String,
structSize: Int): String = {
val unsafeArrayData = ctx.freshName("unsafeArrayData")
val baseObject = ctx.freshName("baseObject")
val unsafeRow = ctx.freshName("unsafeRow")
val structsOffset = ctx.freshName("structsOffset")
val offset = ctx.freshName("offset")
val z = ctx.freshName("z")
val calculateHeader = "UnsafeArrayData.calculateHeaderPortionInBytes"
val baseOffset = "Platform.BYTE_ARRAY_OFFSET"
val wordSize = UnsafeRow.WORD_SIZE
val structSizeAsLong = s"${structSize}L"
val setKey = CodeGenerator.setColumn(unsafeRow, childDataType.keyType, 0, getKey(keys, z))
val valueAssignmentChecked = CodeGenerator.createArrayAssignment(
unsafeRow, childDataType.valueType, values, "1", z, childDataType.valueContainsNull)
s"""
|UnsafeArrayData $unsafeArrayData = (UnsafeArrayData)$arrayData;
|Object $baseObject = $unsafeArrayData.getBaseObject();
|final int $structsOffset = $calculateHeader($numElements) + $numElements * $wordSize;
|UnsafeRow $unsafeRow = new UnsafeRow(2);
|for (int $z = 0; $z < $numElements; $z++) {
| long $offset = $structsOffset + $z * $structSizeAsLong;
| $unsafeArrayData.setLong($z, ($offset << 32) + $structSizeAsLong);
| $unsafeRow.pointTo($baseObject, $baseOffset + $offset, $structSize);
| $setKey;
| $valueAssignmentChecked
|}
|$resultArrayData = $arrayData;
""".stripMargin
}
private def genCodeForAnyElements(
ctx: CodegenContext,
arrayData: String,
keys: String,
values: String,
resultArrayData: String,
numElements: String): String = {
val z = ctx.freshName("z")
val isValuePrimitive = CodeGenerator.isPrimitiveType(childDataType.valueType)
val getValueWithCheck = if (childDataType.valueContainsNull && isValuePrimitive) {
s"$values.isNullAt($z) ? null : (Object)${getValue(values, z)}"
} else {
getValue(values, z)
}
val rowClass = classOf[GenericInternalRow].getName
val genericArrayDataClass = classOf[GenericArrayData].getName
val genericArrayData = ctx.freshName("genericArrayData")
val rowObject = s"new $rowClass(new Object[]{${getKey(keys, z)}, $getValueWithCheck})"
s"""
|$genericArrayDataClass $genericArrayData = ($genericArrayDataClass)$arrayData;
|for (int $z = 0; $z < $numElements; $z++) {
| $genericArrayData.update($z, $rowObject);
|}
|$resultArrayData = $arrayData;
""".stripMargin
}
override def prettyName: String = "map_entries"
override def withNewChildInternal(newChild: Expression): MapEntries = copy(child = newChild)
}
/**
* Returns the union of all the given maps.
*/
@ExpressionDescription(
usage = "_FUNC_(map, ...) - Returns the union of all the given maps",
examples = """
Examples:
> SELECT _FUNC_(map(1, 'a', 2, 'b'), map(3, 'c'));
{1:"a",2:"b",3:"c"}
""",
group = "map_funcs",
since = "2.4.0")
case class MapConcat(children: Seq[Expression]) extends ComplexTypeMergingExpression {
override def checkInputDataTypes(): TypeCheckResult = {
val funcName = s"function $prettyName"
if (children.exists(!_.dataType.isInstanceOf[MapType])) {
TypeCheckResult.TypeCheckFailure(
s"input to $funcName should all be of type map, but it's " +
children.map(_.dataType.catalogString).mkString("[", ", ", "]"))
} else {
val sameTypeCheck = TypeUtils.checkForSameTypeInputExpr(children.map(_.dataType), funcName)
if (sameTypeCheck.isFailure) {
sameTypeCheck
} else {
TypeUtils.checkForMapKeyType(dataType.keyType)
}
}
}
@transient override lazy val dataType: MapType = {
if (children.isEmpty) {
MapType(StringType, StringType)
} else {
super.dataType.asInstanceOf[MapType]
}
}
override def nullable: Boolean = children.exists(_.nullable)
private lazy val mapBuilder = new ArrayBasedMapBuilder(dataType.keyType, dataType.valueType)
override def eval(input: InternalRow): Any = {
val maps = children.map(_.eval(input).asInstanceOf[MapData])
if (maps.contains(null)) {
return null
}
for (map <- maps) {
mapBuilder.putAll(map.keyArray(), map.valueArray())
}
mapBuilder.build()
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val mapCodes = children.map(_.genCode(ctx))
val argsName = ctx.freshName("args")
val hasNullName = ctx.freshName("hasNull")
val builderTerm = ctx.addReferenceObj("mapBuilder", mapBuilder)
val assignments = mapCodes.zip(children.map(_.nullable)).zipWithIndex.map {
case ((m, true), i) =>
s"""
|if (!$hasNullName) {
| ${m.code}
| if (!${m.isNull}) {
| $argsName[$i] = ${m.value};
| } else {
| $hasNullName = true;
| }
|}
""".stripMargin
case ((m, false), i) =>
s"""
|if (!$hasNullName) {
| ${m.code}
| $argsName[$i] = ${m.value};
|}
""".stripMargin
}
val prepareMaps = ctx.splitExpressionsWithCurrentInputs(
expressions = assignments,
funcName = "getMapConcatInputs",
extraArguments = (s"MapData[]", argsName) :: ("boolean", hasNullName) :: Nil,
returnType = "boolean",
makeSplitFunction = body =>
s"""
|$body
|return $hasNullName;
""".stripMargin,
foldFunctions = _.map(funcCall => s"$hasNullName = $funcCall;").mkString("\\n")
)
val idxName = ctx.freshName("idx")
val mapMerge =
s"""
|for (int $idxName = 0; $idxName < $argsName.length; $idxName++) {
| $builderTerm.putAll($argsName[$idxName].keyArray(), $argsName[$idxName].valueArray());
|}
|${ev.value} = $builderTerm.build();
""".stripMargin
ev.copy(
code = code"""
|MapData[] $argsName = new MapData[${mapCodes.size}];
|boolean $hasNullName = false;
|$prepareMaps
|boolean ${ev.isNull} = $hasNullName;
|MapData ${ev.value} = null;
|if (!$hasNullName) {
| $mapMerge
|}
""".stripMargin)
}
override def prettyName: String = "map_concat"
override def withNewChildrenInternal(newChildren: IndexedSeq[Expression]): MapConcat =
copy(children = newChildren)
}
/**
* Returns a map created from the given array of entries.
*/
@ExpressionDescription(
usage = "_FUNC_(arrayOfEntries) - Returns a map created from the given array of entries.",
examples = """
Examples:
> SELECT _FUNC_(array(struct(1, 'a'), struct(2, 'b')));
{1:"a",2:"b"}
""",
group = "map_funcs",
since = "2.4.0")
case class MapFromEntries(child: Expression) extends UnaryExpression with NullIntolerant {
@transient
private lazy val dataTypeDetails: Option[(MapType, Boolean, Boolean)] = child.dataType match {
case ArrayType(
StructType(Array(
StructField(_, keyType, keyNullable, _),
StructField(_, valueType, valueNullable, _))),
containsNull) => Some((MapType(keyType, valueType, valueNullable), keyNullable, containsNull))
case _ => None
}
@transient private lazy val nullEntries: Boolean = dataTypeDetails.get._3
override def nullable: Boolean = child.nullable || nullEntries
@transient override lazy val dataType: MapType = dataTypeDetails.get._1
override def checkInputDataTypes(): TypeCheckResult = dataTypeDetails match {
case Some((mapType, _, _)) =>
TypeUtils.checkForMapKeyType(mapType.keyType)
case None => TypeCheckResult.TypeCheckFailure(s"'${child.sql}' is of " +
s"${child.dataType.catalogString} type. $prettyName accepts only arrays of pair structs.")
}
private lazy val mapBuilder = new ArrayBasedMapBuilder(dataType.keyType, dataType.valueType)
override protected def nullSafeEval(input: Any): Any = {
val entries = input.asInstanceOf[ArrayData]
val numEntries = entries.numElements()
var i = 0
if (nullEntries) {
while (i < numEntries) {
if (entries.isNullAt(i)) return null
i += 1
}
}
i = 0
while (i < numEntries) {
mapBuilder.put(entries.getStruct(i, 2))
i += 1
}
mapBuilder.build()
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, c => {
val numEntries = ctx.freshName("numEntries")
val builderTerm = ctx.addReferenceObj("mapBuilder", mapBuilder)
val i = ctx.freshName("idx")
ctx.nullArrayElementsSaveExec(nullEntries, ev.isNull, c) {
s"""
|final int $numEntries = $c.numElements();
|for (int $i = 0; $i < $numEntries; $i++) {
| $builderTerm.put($c.getStruct($i, 2));
|}
|${ev.value} = $builderTerm.build();
""".stripMargin
}
})
}
override def prettyName: String = "map_from_entries"
override protected def withNewChildInternal(newChild: Expression): MapFromEntries =
copy(child = newChild)
}
/**
* Common base class for [[SortArray]] and [[ArraySort]].
*/
trait ArraySortLike extends ExpectsInputTypes {
protected def arrayExpression: Expression
protected def nullOrder: NullOrder
@transient private lazy val lt: Comparator[Any] = {
val ordering = arrayExpression.dataType match {
case _ @ ArrayType(n: AtomicType, _) => n.ordering.asInstanceOf[Ordering[Any]]
case _ @ ArrayType(a: ArrayType, _) => a.interpretedOrdering.asInstanceOf[Ordering[Any]]
case _ @ ArrayType(s: StructType, _) => s.interpretedOrdering.asInstanceOf[Ordering[Any]]
}
(o1: Any, o2: Any) => {
if (o1 == null && o2 == null) {
0
} else if (o1 == null) {
nullOrder
} else if (o2 == null) {
-nullOrder
} else {
ordering.compare(o1, o2)
}
}
}
@transient private lazy val gt: Comparator[Any] = {
val ordering = arrayExpression.dataType match {
case _ @ ArrayType(n: AtomicType, _) => n.ordering.asInstanceOf[Ordering[Any]]
case _ @ ArrayType(a: ArrayType, _) => a.interpretedOrdering.asInstanceOf[Ordering[Any]]
case _ @ ArrayType(s: StructType, _) => s.interpretedOrdering.asInstanceOf[Ordering[Any]]
}
(o1: Any, o2: Any) => {
if (o1 == null && o2 == null) {
0
} else if (o1 == null) {
-nullOrder
} else if (o2 == null) {
nullOrder
} else {
ordering.compare(o2, o1)
}
}
}
@transient lazy val elementType: DataType =
arrayExpression.dataType.asInstanceOf[ArrayType].elementType
def containsNull: Boolean = arrayExpression.dataType.asInstanceOf[ArrayType].containsNull
def sortEval(array: Any, ascending: Boolean): Any = {
val data = array.asInstanceOf[ArrayData].toArray[AnyRef](elementType)
if (elementType != NullType) {
java.util.Arrays.sort(data, if (ascending) lt else gt)
}
new GenericArrayData(data.asInstanceOf[Array[Any]])
}
def sortCodegen(ctx: CodegenContext, ev: ExprCode, base: String, order: String): String = {
val genericArrayData = classOf[GenericArrayData].getName
val unsafeArrayData = classOf[UnsafeArrayData].getName
val array = ctx.freshName("array")
val c = ctx.freshName("c")
if (elementType == NullType) {
s"${ev.value} = $base.copy();"
} else {
val elementTypeTerm = ctx.addReferenceObj("elementTypeTerm", elementType)
val sortOrder = ctx.freshName("sortOrder")
val o1 = ctx.freshName("o1")
val o2 = ctx.freshName("o2")
val jt = CodeGenerator.javaType(elementType)
val comp = if (CodeGenerator.isPrimitiveType(elementType)) {
val bt = CodeGenerator.boxedType(elementType)
val v1 = ctx.freshName("v1")
val v2 = ctx.freshName("v2")
s"""
|$jt $v1 = (($bt) $o1).${jt}Value();
|$jt $v2 = (($bt) $o2).${jt}Value();
|int $c = ${ctx.genComp(elementType, v1, v2)};
""".stripMargin
} else {
s"int $c = ${ctx.genComp(elementType, s"(($jt) $o1)", s"(($jt) $o2)")};"
}
val canPerformFastSort =
CodeGenerator.isPrimitiveType(elementType) && elementType != BooleanType && !containsNull
val nonNullPrimitiveAscendingSort = if (canPerformFastSort) {
val javaType = CodeGenerator.javaType(elementType)
val primitiveTypeName = CodeGenerator.primitiveTypeName(elementType)
s"""
|if ($order) {
| $javaType[] $array = $base.to${primitiveTypeName}Array();
| java.util.Arrays.sort($array);
| ${ev.value} = $unsafeArrayData.fromPrimitiveArray($array);
|} else
""".stripMargin
} else {
""
}
s"""
|$nonNullPrimitiveAscendingSort
|{
| Object[] $array = $base.toObjectArray($elementTypeTerm);
| final int $sortOrder = $order ? 1 : -1;
| java.util.Arrays.sort($array, new java.util.Comparator() {
| @Override public int compare(Object $o1, Object $o2) {
| if ($o1 == null && $o2 == null) {
| return 0;
| } else if ($o1 == null) {
| return $sortOrder * $nullOrder;
| } else if ($o2 == null) {
| return -$sortOrder * $nullOrder;
| }
| $comp
| return $sortOrder * $c;
| }
| });
| ${ev.value} = new $genericArrayData($array);
|}
""".stripMargin
}
}
}
object ArraySortLike {
type NullOrder = Int
// Least: place null element at the first of the array for ascending order
// Greatest: place null element at the end of the array for ascending order
object NullOrder {
val Least: NullOrder = -1
val Greatest: NullOrder = 1
}
}
/**
* Sorts the input array in ascending / descending order according to the natural ordering of
* the array elements and returns it.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = """
_FUNC_(array[, ascendingOrder]) - Sorts the input array in ascending or descending order
according to the natural ordering of the array elements. NaN is greater than any non-NaN
elements for double/float type. Null elements will be placed at the beginning of the returned
array in ascending order or at the end of the returned array in descending order.
""",
examples = """
Examples:
> SELECT _FUNC_(array('b', 'd', null, 'c', 'a'), true);
[null,"a","b","c","d"]
""",
group = "array_funcs",
since = "1.5.0")
// scalastyle:on line.size.limit
case class SortArray(base: Expression, ascendingOrder: Expression)
extends BinaryExpression with ArraySortLike with NullIntolerant {
def this(e: Expression) = this(e, Literal(true))
override def left: Expression = base
override def right: Expression = ascendingOrder
override def dataType: DataType = base.dataType
override def inputTypes: Seq[AbstractDataType] = Seq(ArrayType, BooleanType)
override def arrayExpression: Expression = base
override def nullOrder: NullOrder = NullOrder.Least
override def checkInputDataTypes(): TypeCheckResult = base.dataType match {
case ArrayType(dt, _) if RowOrdering.isOrderable(dt) =>
ascendingOrder match {
case Literal(_: Boolean, BooleanType) =>
TypeCheckResult.TypeCheckSuccess
case _ =>
TypeCheckResult.TypeCheckFailure(
"Sort order in second argument requires a boolean literal.")
}
case ArrayType(dt, _) =>
val dtSimple = dt.catalogString
TypeCheckResult.TypeCheckFailure(
s"$prettyName does not support sorting array of type $dtSimple which is not orderable")
case _ =>
TypeCheckResult.TypeCheckFailure(s"$prettyName only supports array input.")
}
override def nullSafeEval(array: Any, ascending: Any): Any = {
sortEval(array, ascending.asInstanceOf[Boolean])
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (b, order) => sortCodegen(ctx, ev, b, order))
}
override def prettyName: String = "sort_array"
override protected def withNewChildrenInternal(
newLeft: Expression, newRight: Expression): SortArray =
copy(base = newLeft, ascendingOrder = newRight)
}
/**
* Returns a random permutation of the given array.
*/
@ExpressionDescription(
usage = "_FUNC_(array) - Returns a random permutation of the given array.",
examples = """
Examples:
> SELECT _FUNC_(array(1, 20, 3, 5));
[3,1,5,20]
> SELECT _FUNC_(array(1, 20, null, 3));
[20,null,3,1]
""",
note = """
The function is non-deterministic.
""",
group = "array_funcs",
since = "2.4.0")
case class Shuffle(child: Expression, randomSeed: Option[Long] = None)
extends UnaryExpression with ExpectsInputTypes with Stateful with ExpressionWithRandomSeed {
def this(child: Expression) = this(child, None)
override def seedExpression: Expression = randomSeed.map(Literal.apply).getOrElse(UnresolvedSeed)
override def withNewSeed(seed: Long): Shuffle = copy(randomSeed = Some(seed))
override lazy val resolved: Boolean =
childrenResolved && checkInputDataTypes().isSuccess && randomSeed.isDefined
override def inputTypes: Seq[AbstractDataType] = Seq(ArrayType)
override def dataType: DataType = child.dataType
@transient lazy val elementType: DataType = dataType.asInstanceOf[ArrayType].elementType
@transient private[this] var random: RandomIndicesGenerator = _
override protected def initializeInternal(partitionIndex: Int): Unit = {
random = RandomIndicesGenerator(randomSeed.get + partitionIndex)
}
override protected def evalInternal(input: InternalRow): Any = {
val value = child.eval(input)
if (value == null) {
null
} else {
val source = value.asInstanceOf[ArrayData]
val numElements = source.numElements()
val indices = random.getNextIndices(numElements)
new GenericArrayData(indices.map(source.get(_, elementType)))
}
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, c => shuffleArrayCodeGen(ctx, ev, c))
}
private def shuffleArrayCodeGen(ctx: CodegenContext, ev: ExprCode, childName: String): String = {
val randomClass = classOf[RandomIndicesGenerator].getName
val rand = ctx.addMutableState(randomClass, "rand", forceInline = true)
ctx.addPartitionInitializationStatement(
s"$rand = new $randomClass(${randomSeed.get}L + partitionIndex);")
val numElements = ctx.freshName("numElements")
val arrayData = ctx.freshName("arrayData")
val indices = ctx.freshName("indices")
val i = ctx.freshName("i")
val initialization = CodeGenerator.createArrayData(
arrayData, elementType, numElements, s" $prettyName failed.")
val assignment = CodeGenerator.createArrayAssignment(arrayData, elementType, childName,
i, s"$indices[$i]", dataType.asInstanceOf[ArrayType].containsNull)
s"""
|int $numElements = $childName.numElements();
|int[] $indices = $rand.getNextIndices($numElements);
|$initialization
|for (int $i = 0; $i < $numElements; $i++) {
| $assignment
|}
|${ev.value} = $arrayData;
""".stripMargin
}
override def freshCopy(): Shuffle = Shuffle(child, randomSeed)
override def withNewChildInternal(newChild: Expression): Shuffle = copy(child = newChild)
}
/**
* Returns a reversed string or an array with reverse order of elements.
*/
@ExpressionDescription(
usage = "_FUNC_(array) - Returns a reversed string or an array with reverse order of elements.",
examples = """
Examples:
> SELECT _FUNC_('Spark SQL');
LQS krapS
> SELECT _FUNC_(array(2, 1, 4, 3));
[3,4,1,2]
""",
group = "collection_funcs",
since = "1.5.0",
note = """
Reverse logic for arrays is available since 2.4.0.
"""
)
case class Reverse(child: Expression)
extends UnaryExpression with ImplicitCastInputTypes with NullIntolerant {
// Input types are utilized by type coercion in ImplicitTypeCasts.
override def inputTypes: Seq[AbstractDataType] = Seq(TypeCollection(StringType, ArrayType))
override def dataType: DataType = child.dataType
override def nullSafeEval(input: Any): Any = doReverse(input)
@transient private lazy val doReverse: Any => Any = dataType match {
case ArrayType(elementType, _) =>
input => {
val arrayData = input.asInstanceOf[ArrayData]
new GenericArrayData(arrayData.toObjectArray(elementType).reverse)
}
case StringType => _.asInstanceOf[UTF8String].reverse()
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, c => dataType match {
case _: StringType => stringCodeGen(ev, c)
case _: ArrayType => arrayCodeGen(ctx, ev, c)
})
}
private def stringCodeGen(ev: ExprCode, childName: String): String = {
s"${ev.value} = ($childName).reverse();"
}
private def arrayCodeGen(ctx: CodegenContext, ev: ExprCode, childName: String): String = {
val numElements = ctx.freshName("numElements")
val arrayData = ctx.freshName("arrayData")
val i = ctx.freshName("i")
val j = ctx.freshName("j")
val elementType = dataType.asInstanceOf[ArrayType].elementType
val initialization = CodeGenerator.createArrayData(
arrayData, elementType, numElements, s" $prettyName failed.")
val assignment = CodeGenerator.createArrayAssignment(
arrayData, elementType, childName, i, j, dataType.asInstanceOf[ArrayType].containsNull)
s"""
|final int $numElements = $childName.numElements();
|$initialization
|for (int $i = 0; $i < $numElements; $i++) {
| int $j = $numElements - $i - 1;
| $assignment
|}
|${ev.value} = $arrayData;
""".stripMargin
}
override def prettyName: String = "reverse"
override protected def withNewChildInternal(newChild: Expression): Reverse =
copy(child = newChild)
}
/**
* Checks if the array (left) has the element (right)
*/
@ExpressionDescription(
usage = "_FUNC_(array, value) - Returns true if the array contains the value.",
examples = """
Examples:
> SELECT _FUNC_(array(1, 2, 3), 2);
true
""",
group = "array_funcs",
since = "1.5.0")
case class ArrayContains(left: Expression, right: Expression)
extends BinaryExpression with ImplicitCastInputTypes with NullIntolerant {
override def dataType: DataType = BooleanType
@transient private lazy val ordering: Ordering[Any] =
TypeUtils.getInterpretedOrdering(right.dataType)
override def inputTypes: Seq[AbstractDataType] = {
(left.dataType, right.dataType) match {
case (_, NullType) => Seq.empty
case (ArrayType(e1, hasNull), e2) =>
TypeCoercion.findWiderTypeWithoutStringPromotionForTwo(e1, e2) match {
case Some(dt) => Seq(ArrayType(dt, hasNull), dt)
case _ => Seq.empty
}
case _ => Seq.empty
}
}
override def checkInputDataTypes(): TypeCheckResult = {
(left.dataType, right.dataType) match {
case (_, NullType) =>
TypeCheckResult.TypeCheckFailure("Null typed values cannot be used as arguments")
case (ArrayType(e1, _), e2) if e1.sameType(e2) =>
TypeUtils.checkForOrderingExpr(e2, s"function $prettyName")
case _ => TypeCheckResult.TypeCheckFailure(s"Input to function $prettyName should have " +
s"been ${ArrayType.simpleString} followed by a value with same element type, but it's " +
s"[${left.dataType.catalogString}, ${right.dataType.catalogString}].")
}
}
override def nullable: Boolean = {
left.nullable || right.nullable || left.dataType.asInstanceOf[ArrayType].containsNull
}
override def nullSafeEval(arr: Any, value: Any): Any = {
var hasNull = false
arr.asInstanceOf[ArrayData].foreach(right.dataType, (i, v) =>
if (v == null) {
hasNull = true
} else if (ordering.equiv(v, value)) {
return true
}
)
if (hasNull) {
null
} else {
false
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (arr, value) => {
val i = ctx.freshName("i")
val getValue = CodeGenerator.getValue(arr, right.dataType, i)
val loopBodyCode = if (nullable) {
s"""
|if ($arr.isNullAt($i)) {
| ${ev.isNull} = true;
|} else if (${ctx.genEqual(right.dataType, value, getValue)}) {
| ${ev.isNull} = false;
| ${ev.value} = true;
| break;
|}
""".stripMargin
} else {
s"""
|if (${ctx.genEqual(right.dataType, value, getValue)}) {
| ${ev.value} = true;
| break;
|}
""".stripMargin
}
s"""
|for (int $i = 0; $i < $arr.numElements(); $i ++) {
| $loopBodyCode
|}
""".stripMargin
})
}
override def prettyName: String = "array_contains"
override protected def withNewChildrenInternal(
newLeft: Expression, newRight: Expression): ArrayContains =
copy(left = newLeft, right = newRight)
}
/**
* Checks if the two arrays contain at least one common element.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(a1, a2) - Returns true if a1 contains at least a non-null element present also in a2. If the arrays have no common element and they are both non-empty and either of them contains a null element null is returned, false otherwise.",
examples = """
Examples:
> SELECT _FUNC_(array(1, 2, 3), array(3, 4, 5));
true
""",
group = "array_funcs",
since = "2.4.0")
// scalastyle:off line.size.limit
case class ArraysOverlap(left: Expression, right: Expression)
extends BinaryArrayExpressionWithImplicitCast with NullIntolerant {
override def checkInputDataTypes(): TypeCheckResult = super.checkInputDataTypes() match {
case TypeCheckResult.TypeCheckSuccess =>
TypeUtils.checkForOrderingExpr(elementType, s"function $prettyName")
case failure => failure
}
@transient private lazy val ordering: Ordering[Any] =
TypeUtils.getInterpretedOrdering(elementType)
@transient private lazy val doEvaluation = if (TypeUtils.typeWithProperEquals(elementType)) {
fastEval _
} else {
bruteForceEval _
}
override def dataType: DataType = BooleanType
override def nullable: Boolean = {
left.nullable || right.nullable || left.dataType.asInstanceOf[ArrayType].containsNull ||
right.dataType.asInstanceOf[ArrayType].containsNull
}
override def nullSafeEval(a1: Any, a2: Any): Any = {
doEvaluation(a1.asInstanceOf[ArrayData], a2.asInstanceOf[ArrayData])
}
/**
* A fast implementation which puts all the elements from the smaller array in a set
* and then performs a lookup on it for each element of the bigger one.
* This eval mode works only for data types which implements properly the equals method.
*/
private def fastEval(arr1: ArrayData, arr2: ArrayData): Any = {
var hasNull = false
val (bigger, smaller) = if (arr1.numElements() > arr2.numElements()) {
(arr1, arr2)
} else {
(arr2, arr1)
}
if (smaller.numElements() > 0) {
val smallestSet = new java.util.HashSet[Any]()
smaller.foreach(elementType, (_, v) =>
if (v == null) {
hasNull = true
} else {
smallestSet.add(v)
})
bigger.foreach(elementType, (_, v1) =>
if (v1 == null) {
hasNull = true
} else if (smallestSet.contains(v1)) {
return true
}
)
}
if (hasNull) {
null
} else {
false
}
}
/**
* A slower evaluation which performs a nested loop and supports all the data types.
*/
private def bruteForceEval(arr1: ArrayData, arr2: ArrayData): Any = {
var hasNull = false
if (arr1.numElements() > 0 && arr2.numElements() > 0) {
arr1.foreach(elementType, (_, v1) =>
if (v1 == null) {
hasNull = true
} else {
arr2.foreach(elementType, (_, v2) =>
if (v2 == null) {
hasNull = true
} else if (ordering.equiv(v1, v2)) {
return true
}
)
})
}
if (hasNull) {
null
} else {
false
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (a1, a2) => {
val smaller = ctx.freshName("smallerArray")
val bigger = ctx.freshName("biggerArray")
val comparisonCode = if (TypeUtils.typeWithProperEquals(elementType)) {
fastCodegen(ctx, ev, smaller, bigger)
} else {
bruteForceCodegen(ctx, ev, smaller, bigger)
}
s"""
|ArrayData $smaller;
|ArrayData $bigger;
|if ($a1.numElements() > $a2.numElements()) {
| $bigger = $a1;
| $smaller = $a2;
|} else {
| $smaller = $a1;
| $bigger = $a2;
|}
|if ($smaller.numElements() > 0) {
| $comparisonCode
|}
""".stripMargin
})
}
/**
* Code generation for a fast implementation which puts all the elements from the smaller array
* in a set and then performs a lookup on it for each element of the bigger one.
* It works only for data types which implements properly the equals method.
*/
private def fastCodegen(ctx: CodegenContext, ev: ExprCode, smaller: String, bigger: String): String = {
val i = ctx.freshName("i")
val getFromSmaller = CodeGenerator.getValue(smaller, elementType, i)
val getFromBigger = CodeGenerator.getValue(bigger, elementType, i)
val javaElementClass = CodeGenerator.boxedType(elementType)
val javaSet = classOf[java.util.HashSet[_]].getName
val set = ctx.freshName("set")
val addToSetFromSmallerCode = nullSafeElementCodegen(
smaller, i, s"$set.add($getFromSmaller);", s"${ev.isNull} = true;")
val setIsNullCode = if (nullable) s"${ev.isNull} = false;" else ""
val elementIsInSetCode = nullSafeElementCodegen(
bigger,
i,
s"""
|if ($set.contains($getFromBigger)) {
| $setIsNullCode
| ${ev.value} = true;
| break;
|}
""".stripMargin,
s"${ev.isNull} = true;")
s"""
|$javaSet<$javaElementClass> $set = new $javaSet<$javaElementClass>();
|for (int $i = 0; $i < $smaller.numElements(); $i ++) {
| $addToSetFromSmallerCode
|}
|for (int $i = 0; $i < $bigger.numElements(); $i ++) {
| $elementIsInSetCode
|}
""".stripMargin
}
/**
* Code generation for a slower evaluation which performs a nested loop and supports all the data types.
*/
private def bruteForceCodegen(ctx: CodegenContext, ev: ExprCode, smaller: String, bigger: String): String = {
val i = ctx.freshName("i")
val j = ctx.freshName("j")
val getFromSmaller = CodeGenerator.getValue(smaller, elementType, j)
val getFromBigger = CodeGenerator.getValue(bigger, elementType, i)
val setIsNullCode = if (nullable) s"${ev.isNull} = false;" else ""
val compareValues = nullSafeElementCodegen(
smaller,
j,
s"""
|if (${ctx.genEqual(elementType, getFromSmaller, getFromBigger)}) {
| $setIsNullCode
| ${ev.value} = true;
|}
""".stripMargin,
s"${ev.isNull} = true;")
val isInSmaller = nullSafeElementCodegen(
bigger,
i,
s"""
|for (int $j = 0; $j < $smaller.numElements() && !${ev.value}; $j ++) {
| $compareValues
|}
""".stripMargin,
s"${ev.isNull} = true;")
s"""
|for (int $i = 0; $i < $bigger.numElements() && !${ev.value}; $i ++) {
| $isInSmaller
|}
""".stripMargin
}
def nullSafeElementCodegen(
arrayVar: String,
index: String,
code: String,
isNullCode: String): String = {
if (inputTypes.exists(_.asInstanceOf[ArrayType].containsNull)) {
s"""
|if ($arrayVar.isNullAt($index)) {
| $isNullCode
|} else {
| $code
|}
""".stripMargin
} else {
code
}
}
override def prettyName: String = "arrays_overlap"
override protected def withNewChildrenInternal(
newLeft: Expression, newRight: Expression): ArraysOverlap =
copy(left = newLeft, right = newRight)
}
/**
* Slices an array according to the requested start index and length
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "_FUNC_(x, start, length) - Subsets array x starting from index start (array indices start at 1, or starting from the end if start is negative) with the specified length.",
examples = """
Examples:
> SELECT _FUNC_(array(1, 2, 3, 4), 2, 2);
[2,3]
> SELECT _FUNC_(array(1, 2, 3, 4), -2, 2);
[3,4]
""",
group = "array_funcs",
since = "2.4.0")
// scalastyle:on line.size.limit
case class Slice(x: Expression, start: Expression, length: Expression)
extends TernaryExpression with ImplicitCastInputTypes with NullIntolerant {
override def dataType: DataType = x.dataType
override def inputTypes: Seq[AbstractDataType] = Seq(ArrayType, IntegerType, IntegerType)
override def first: Expression = x
override def second: Expression = start
override def third: Expression = length
@transient private lazy val elementType: DataType = x.dataType.asInstanceOf[ArrayType].elementType
override def nullSafeEval(xVal: Any, startVal: Any, lengthVal: Any): Any = {
val startInt = startVal.asInstanceOf[Int]
val lengthInt = lengthVal.asInstanceOf[Int]
val arr = xVal.asInstanceOf[ArrayData]
val startIndex = if (startInt == 0) {
throw QueryExecutionErrors.unexpectedValueForStartInFunctionError(prettyName)
} else if (startInt < 0) {
startInt + arr.numElements()
} else {
startInt - 1
}
if (lengthInt < 0) {
throw QueryExecutionErrors.unexpectedValueForLengthInFunctionError(prettyName)
}
// startIndex can be negative if start is negative and its absolute value is greater than the
// number of elements in the array
if (startIndex < 0 || startIndex >= arr.numElements()) {
return new GenericArrayData(Array.empty[AnyRef])
}
val data = arr.toSeq[AnyRef](elementType)
new GenericArrayData(data.slice(startIndex, startIndex + lengthInt))
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (x, start, length) => {
val startIdx = ctx.freshName("startIdx")
val resLength = ctx.freshName("resLength")
val defaultIntValue = CodeGenerator.defaultValue(CodeGenerator.JAVA_INT, false)
s"""
|${CodeGenerator.JAVA_INT} $startIdx = $defaultIntValue;
|${CodeGenerator.JAVA_INT} $resLength = $defaultIntValue;
|if ($start == 0) {
| throw QueryExecutionErrors.unexpectedValueForStartInFunctionError("$prettyName");
|} else if ($start < 0) {
| $startIdx = $start + $x.numElements();
|} else {
| // arrays in SQL are 1-based instead of 0-based
| $startIdx = $start - 1;
|}
|if ($length < 0) {
| throw QueryExecutionErrors.unexpectedValueForLengthInFunctionError("$prettyName");
|} else if ($length > $x.numElements() - $startIdx) {
| $resLength = $x.numElements() - $startIdx;
|} else {
| $resLength = $length;
|}
|${genCodeForResult(ctx, ev, x, startIdx, resLength)}
""".stripMargin
})
}
def genCodeForResult(
ctx: CodegenContext,
ev: ExprCode,
inputArray: String,
startIdx: String,
resLength: String): String = {
val values = ctx.freshName("values")
val i = ctx.freshName("i")
val genericArrayData = classOf[GenericArrayData].getName
val allocation = CodeGenerator.createArrayData(
values, elementType, resLength, s" $prettyName failed.")
val assignment = CodeGenerator.createArrayAssignment(values, elementType, inputArray,
i, s"$i + $startIdx", dataType.asInstanceOf[ArrayType].containsNull)
s"""
|if ($startIdx < 0 || $startIdx >= $inputArray.numElements()) {
| ${ev.value} = new $genericArrayData(new Object[0]);
|} else {
| $allocation
| for (int $i = 0; $i < $resLength; $i ++) {
| $assignment
| }
| ${ev.value} = $values;
|}
""".stripMargin
}
override protected def withNewChildrenInternal(
newFirst: Expression, newSecond: Expression, newThird: Expression): Slice =
copy(x = newFirst, start = newSecond, length = newThird)
}
/**
* Creates a String containing all the elements of the input array separated by the delimiter.
*/
@ExpressionDescription(
usage = """
_FUNC_(array, delimiter[, nullReplacement]) - Concatenates the elements of the given array
using the delimiter and an optional string to replace nulls. If no value is set for
nullReplacement, any null value is filtered.""",
examples = """
Examples:
> SELECT _FUNC_(array('hello', 'world'), ' ');
hello world
> SELECT _FUNC_(array('hello', null ,'world'), ' ');
hello world
> SELECT _FUNC_(array('hello', null ,'world'), ' ', ',');
hello , world
""",
group = "array_funcs",
since = "2.4.0")
case class ArrayJoin(
array: Expression,
delimiter: Expression,
nullReplacement: Option[Expression]) extends Expression with ExpectsInputTypes {
def this(array: Expression, delimiter: Expression) = this(array, delimiter, None)
def this(array: Expression, delimiter: Expression, nullReplacement: Expression) =
this(array, delimiter, Some(nullReplacement))
override def inputTypes: Seq[AbstractDataType] = if (nullReplacement.isDefined) {
Seq(ArrayType(StringType), StringType, StringType)
} else {
Seq(ArrayType(StringType), StringType)
}
override def children: Seq[Expression] = if (nullReplacement.isDefined) {
Seq(array, delimiter, nullReplacement.get)
} else {
Seq(array, delimiter)
}
override protected def withNewChildrenInternal(newChildren: IndexedSeq[Expression]): Expression =
if (nullReplacement.isDefined) {
copy(
array = newChildren(0),
delimiter = newChildren(1),
nullReplacement = Some(newChildren(2)))
} else {
copy(array = newChildren(0), delimiter = newChildren(1))
}
override def nullable: Boolean = children.exists(_.nullable)
override def foldable: Boolean = children.forall(_.foldable)
override def eval(input: InternalRow): Any = {
val arrayEval = array.eval(input)
if (arrayEval == null) return null
val delimiterEval = delimiter.eval(input)
if (delimiterEval == null) return null
val nullReplacementEval = nullReplacement.map(_.eval(input))
if (nullReplacementEval.contains(null)) return null
val buffer = new UTF8StringBuilder()
var firstItem = true
val nullHandling = nullReplacementEval match {
case Some(rep) => (prependDelimiter: Boolean) => {
if (!prependDelimiter) {
buffer.append(delimiterEval.asInstanceOf[UTF8String])
}
buffer.append(rep.asInstanceOf[UTF8String])
true
}
case None => (_: Boolean) => false
}
arrayEval.asInstanceOf[ArrayData].foreach(StringType, (_, item) => {
if (item == null) {
if (nullHandling(firstItem)) {
firstItem = false
}
} else {
if (!firstItem) {
buffer.append(delimiterEval.asInstanceOf[UTF8String])
}
buffer.append(item.asInstanceOf[UTF8String])
firstItem = false
}
})
buffer.build()
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val code = nullReplacement match {
case Some(replacement) =>
val replacementGen = replacement.genCode(ctx)
val nullHandling = (buffer: String, delimiter: String, firstItem: String) => {
s"""
|if (!$firstItem) {
| $buffer.append($delimiter);
|}
|$buffer.append(${replacementGen.value});
|$firstItem = false;
""".stripMargin
}
val execCode = if (replacement.nullable) {
ctx.nullSafeExec(replacement.nullable, replacementGen.isNull) {
genCodeForArrayAndDelimiter(ctx, ev, nullHandling)
}
} else {
genCodeForArrayAndDelimiter(ctx, ev, nullHandling)
}
s"""
|${replacementGen.code}
|$execCode
""".stripMargin
case None => genCodeForArrayAndDelimiter(ctx, ev,
(_: String, _: String, _: String) => "// nulls are ignored")
}
if (nullable) {
ev.copy(
code"""
|boolean ${ev.isNull} = true;
|UTF8String ${ev.value} = null;
|$code
""".stripMargin)
} else {
ev.copy(
code"""
|UTF8String ${ev.value} = null;
|$code
""".stripMargin, FalseLiteral)
}
}
private def genCodeForArrayAndDelimiter(
ctx: CodegenContext,
ev: ExprCode,
nullEval: (String, String, String) => String): String = {
val arrayGen = array.genCode(ctx)
val delimiterGen = delimiter.genCode(ctx)
val buffer = ctx.freshName("buffer")
val bufferClass = classOf[UTF8StringBuilder].getName
val i = ctx.freshName("i")
val firstItem = ctx.freshName("firstItem")
val resultCode =
s"""
|$bufferClass $buffer = new $bufferClass();
|boolean $firstItem = true;
|for (int $i = 0; $i < ${arrayGen.value}.numElements(); $i ++) {
| if (${arrayGen.value}.isNullAt($i)) {
| ${nullEval(buffer, delimiterGen.value, firstItem)}
| } else {
| if (!$firstItem) {
| $buffer.append(${delimiterGen.value});
| }
| $buffer.append(${CodeGenerator.getValue(arrayGen.value, StringType, i)});
| $firstItem = false;
| }
|}
|${ev.value} = $buffer.build();""".stripMargin
if (array.nullable || delimiter.nullable) {
arrayGen.code + ctx.nullSafeExec(array.nullable, arrayGen.isNull) {
delimiterGen.code + ctx.nullSafeExec(delimiter.nullable, delimiterGen.isNull) {
s"""
|${ev.isNull} = false;
|$resultCode""".stripMargin
}
}
} else {
s"""
|${arrayGen.code}
|${delimiterGen.code}
|$resultCode""".stripMargin
}
}
override def dataType: DataType = StringType
override def prettyName: String = "array_join"
}
/**
* Returns the minimum value in the array.
*/
@ExpressionDescription(
usage = """
_FUNC_(array) - Returns the minimum value in the array. NaN is greater than
any non-NaN elements for double/float type. NULL elements are skipped.""",
examples = """
Examples:
> SELECT _FUNC_(array(1, 20, null, 3));
1
""",
group = "array_funcs",
since = "2.4.0")
case class ArrayMin(child: Expression)
extends UnaryExpression with ImplicitCastInputTypes with NullIntolerant {
override def nullable: Boolean = true
override def inputTypes: Seq[AbstractDataType] = Seq(ArrayType)
@transient private lazy val ordering = TypeUtils.getInterpretedOrdering(dataType)
override def checkInputDataTypes(): TypeCheckResult = {
val typeCheckResult = super.checkInputDataTypes()
if (typeCheckResult.isSuccess) {
TypeUtils.checkForOrderingExpr(dataType, s"function $prettyName")
} else {
typeCheckResult
}
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val childGen = child.genCode(ctx)
val javaType = CodeGenerator.javaType(dataType)
val i = ctx.freshName("i")
val item = ExprCode(EmptyBlock,
isNull = JavaCode.isNullExpression(s"${childGen.value}.isNullAt($i)"),
value = JavaCode.expression(CodeGenerator.getValue(childGen.value, dataType, i), dataType))
ev.copy(code =
code"""
|${childGen.code}
|boolean ${ev.isNull} = true;
|$javaType ${ev.value} = ${CodeGenerator.defaultValue(dataType)};
|if (!${childGen.isNull}) {
| for (int $i = 0; $i < ${childGen.value}.numElements(); $i ++) {
| ${ctx.reassignIfSmaller(dataType, ev, item)}
| }
|}
""".stripMargin)
}
override protected def nullSafeEval(input: Any): Any = {
var min: Any = null
input.asInstanceOf[ArrayData].foreach(dataType, (_, item) =>
if (item != null && (min == null || ordering.lt(item, min))) {
min = item
}
)
min
}
@transient override lazy val dataType: DataType = child.dataType match {
case ArrayType(dt, _) => dt
case _ => throw new IllegalStateException(s"$prettyName accepts only arrays.")
}
override def prettyName: String = "array_min"
override protected def withNewChildInternal(newChild: Expression): ArrayMin =
copy(child = newChild)
}
/**
* Returns the maximum value in the array.
*/
@ExpressionDescription(
usage = """
_FUNC_(array) - Returns the maximum value in the array. NaN is greater than
any non-NaN elements for double/float type. NULL elements are skipped.""",
examples = """
Examples:
> SELECT _FUNC_(array(1, 20, null, 3));
20
""",
group = "array_funcs",
since = "2.4.0")
case class ArrayMax(child: Expression)
extends UnaryExpression with ImplicitCastInputTypes with NullIntolerant {
override def nullable: Boolean = true
override def inputTypes: Seq[AbstractDataType] = Seq(ArrayType)
@transient private lazy val ordering = TypeUtils.getInterpretedOrdering(dataType)
override def checkInputDataTypes(): TypeCheckResult = {
val typeCheckResult = super.checkInputDataTypes()
if (typeCheckResult.isSuccess) {
TypeUtils.checkForOrderingExpr(dataType, s"function $prettyName")
} else {
typeCheckResult
}
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val childGen = child.genCode(ctx)
val javaType = CodeGenerator.javaType(dataType)
val i = ctx.freshName("i")
val item = ExprCode(EmptyBlock,
isNull = JavaCode.isNullExpression(s"${childGen.value}.isNullAt($i)"),
value = JavaCode.expression(CodeGenerator.getValue(childGen.value, dataType, i), dataType))
ev.copy(code =
code"""
|${childGen.code}
|boolean ${ev.isNull} = true;
|$javaType ${ev.value} = ${CodeGenerator.defaultValue(dataType)};
|if (!${childGen.isNull}) {
| for (int $i = 0; $i < ${childGen.value}.numElements(); $i ++) {
| ${ctx.reassignIfGreater(dataType, ev, item)}
| }
|}
""".stripMargin)
}
override protected def nullSafeEval(input: Any): Any = {
var max: Any = null
input.asInstanceOf[ArrayData].foreach(dataType, (_, item) =>
if (item != null && (max == null || ordering.gt(item, max))) {
max = item
}
)
max
}
@transient override lazy val dataType: DataType = child.dataType match {
case ArrayType(dt, _) => dt
case _ => throw new IllegalStateException(s"$prettyName accepts only arrays.")
}
override def prettyName: String = "array_max"
override protected def withNewChildInternal(newChild: Expression): ArrayMax =
copy(child = newChild)
}
/**
* Returns the position of the first occurrence of element in the given array as long.
* Returns 0 if the given value could not be found in the array. Returns null if either of
* the arguments are null
*
* NOTE: that this is not zero based, but 1-based index. The first element in the array has
* index 1.
*/
@ExpressionDescription(
usage = """
_FUNC_(array, element) - Returns the (1-based) index of the first element of the array as long.
""",
examples = """
Examples:
> SELECT _FUNC_(array(3, 2, 1), 1);
3
""",
group = "array_funcs",
since = "2.4.0")
case class ArrayPosition(left: Expression, right: Expression)
extends BinaryExpression with ImplicitCastInputTypes with NullIntolerant {
@transient private lazy val ordering: Ordering[Any] =
TypeUtils.getInterpretedOrdering(right.dataType)
override def dataType: DataType = LongType
override def inputTypes: Seq[AbstractDataType] = {
(left.dataType, right.dataType) match {
case (ArrayType(e1, hasNull), e2) =>
TypeCoercion.findTightestCommonType(e1, e2) match {
case Some(dt) => Seq(ArrayType(dt, hasNull), dt)
case _ => Seq.empty
}
case _ => Seq.empty
}
}
override def checkInputDataTypes(): TypeCheckResult = {
(left.dataType, right.dataType) match {
case (ArrayType(e1, _), e2) if e1.sameType(e2) =>
TypeUtils.checkForOrderingExpr(e2, s"function $prettyName")
case _ => TypeCheckResult.TypeCheckFailure(s"Input to function $prettyName should have " +
s"been ${ArrayType.simpleString} followed by a value with same element type, but it's " +
s"[${left.dataType.catalogString}, ${right.dataType.catalogString}].")
}
}
override def nullSafeEval(arr: Any, value: Any): Any = {
arr.asInstanceOf[ArrayData].foreach(right.dataType, (i, v) =>
if (v != null && ordering.equiv(v, value)) {
return (i + 1).toLong
}
)
0L
}
override def prettyName: String = "array_position"
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (arr, value) => {
val pos = ctx.freshName("arrayPosition")
val i = ctx.freshName("i")
val getValue = CodeGenerator.getValue(arr, right.dataType, i)
s"""
|int $pos = 0;
|for (int $i = 0; $i < $arr.numElements(); $i ++) {
| if (!$arr.isNullAt($i) && ${ctx.genEqual(right.dataType, value, getValue)}) {
| $pos = $i + 1;
| break;
| }
|}
|${ev.value} = (long) $pos;
""".stripMargin
})
}
override protected def withNewChildrenInternal(
newLeft: Expression, newRight: Expression): ArrayPosition =
copy(left = newLeft, right = newRight)
}
/**
* Returns the value of index `right` in Array `left` or the value for key `right` in Map `left`.
*/
@ExpressionDescription(
usage = """
_FUNC_(array, index) - Returns element of array at given (1-based) index. If Index is 0,
Spark will throw an error. If index < 0, accesses elements from the last to the first.
The function returns NULL if the index exceeds the length of the array and
`spark.sql.ansi.enabled` is set to false.
If `spark.sql.ansi.enabled` is set to true, it throws ArrayIndexOutOfBoundsException
for invalid indices.
_FUNC_(map, key) - Returns value for given key. The function returns NULL
if the key is not contained in the map and `spark.sql.ansi.enabled` is set to false.
If `spark.sql.ansi.enabled` is set to true, it throws NoSuchElementException instead.
""",
examples = """
Examples:
> SELECT _FUNC_(array(1, 2, 3), 2);
2
> SELECT _FUNC_(map(1, 'a', 2, 'b'), 2);
b
""",
since = "2.4.0",
group = "map_funcs")
case class ElementAt(
left: Expression,
right: Expression,
failOnError: Boolean = SQLConf.get.ansiEnabled)
extends GetMapValueUtil with GetArrayItemUtil with NullIntolerant {
def this(left: Expression, right: Expression) = this(left, right, SQLConf.get.ansiEnabled)
@transient private lazy val mapKeyType = left.dataType.asInstanceOf[MapType].keyType
@transient private lazy val mapValueContainsNull =
left.dataType.asInstanceOf[MapType].valueContainsNull
@transient private lazy val arrayContainsNull = left.dataType.asInstanceOf[ArrayType].containsNull
@transient private lazy val ordering: Ordering[Any] = TypeUtils.getInterpretedOrdering(mapKeyType)
@transient override lazy val dataType: DataType = left.dataType match {
case ArrayType(elementType, _) => elementType
case MapType(_, valueType, _) => valueType
}
override val isElementAtFunction: Boolean = true
override def inputTypes: Seq[AbstractDataType] = {
(left.dataType, right.dataType) match {
case (arr: ArrayType, e2: IntegralType) if (e2 != LongType) =>
Seq(arr, IntegerType)
case (MapType(keyType, valueType, hasNull), e2) =>
TypeCoercion.findTightestCommonType(keyType, e2) match {
case Some(dt) => Seq(MapType(dt, valueType, hasNull), dt)
case _ => Seq.empty
}
case (l, r) => Seq.empty
}
}
override def checkInputDataTypes(): TypeCheckResult = {
(left.dataType, right.dataType) match {
case (_: ArrayType, e2) if e2 != IntegerType =>
TypeCheckResult.TypeCheckFailure(s"Input to function $prettyName should have " +
s"been ${ArrayType.simpleString} followed by a ${IntegerType.simpleString}, but it's " +
s"[${left.dataType.catalogString}, ${right.dataType.catalogString}].")
case (MapType(e1, _, _), e2) if (!e2.sameType(e1)) =>
TypeCheckResult.TypeCheckFailure(s"Input to function $prettyName should have " +
s"been ${MapType.simpleString} followed by a value of same key type, but it's " +
s"[${left.dataType.catalogString}, ${right.dataType.catalogString}].")
case (e1, _) if (!e1.isInstanceOf[MapType] && !e1.isInstanceOf[ArrayType]) =>
TypeCheckResult.TypeCheckFailure(s"The first argument to function $prettyName should " +
s"have been ${ArrayType.simpleString} or ${MapType.simpleString} type, but its " +
s"${left.dataType.catalogString} type.")
case _ => TypeCheckResult.TypeCheckSuccess
}
}
private def nullability(elements: Seq[Expression], ordinal: Int): Boolean = {
if (ordinal == 0) {
false
} else if (elements.length < math.abs(ordinal)) {
!failOnError
} else {
if (ordinal < 0) {
elements(elements.length + ordinal).nullable
} else {
elements(ordinal - 1).nullable
}
}
}
override def nullable: Boolean = left.dataType match {
case _: ArrayType =>
computeNullabilityFromArray(left, right, failOnError, nullability)
case _: MapType => if (failOnError) mapValueContainsNull else true
}
override def nullSafeEval(value: Any, ordinal: Any): Any = doElementAt(value, ordinal)
@transient private lazy val doElementAt: (Any, Any) => Any = left.dataType match {
case _: ArrayType =>
(value, ordinal) => {
val array = value.asInstanceOf[ArrayData]
val index = ordinal.asInstanceOf[Int]
if (array.numElements() < math.abs(index)) {
if (failOnError) {
throw QueryExecutionErrors.invalidElementAtIndexError(index, array.numElements())
} else {
null
}
} else {
val idx = if (index == 0) {
throw QueryExecutionErrors.sqlArrayIndexNotStartAtOneError()
} else if (index > 0) {
index - 1
} else {
array.numElements() + index
}
if (arrayContainsNull && array.isNullAt(idx)) {
null
} else {
array.get(idx, dataType)
}
}
}
case _: MapType =>
(value, ordinal) => getValueEval(value, ordinal, mapKeyType, ordering, failOnError)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
left.dataType match {
case _: ArrayType =>
nullSafeCodeGen(ctx, ev, (eval1, eval2) => {
val index = ctx.freshName("elementAtIndex")
val nullCheck = if (arrayContainsNull) {
s"""
|if ($eval1.isNullAt($index)) {
| ${ev.isNull} = true;
|} else
""".stripMargin
} else {
""
}
val indexOutOfBoundBranch = if (failOnError) {
s"throw QueryExecutionErrors.invalidElementAtIndexError($index, $eval1.numElements());"
} else {
s"${ev.isNull} = true;"
}
s"""
|int $index = (int) $eval2;
|if ($eval1.numElements() < Math.abs($index)) {
| $indexOutOfBoundBranch
|} else {
| if ($index == 0) {
| throw QueryExecutionErrors.sqlArrayIndexNotStartAtOneError();
| } else if ($index > 0) {
| $index--;
| } else {
| $index += $eval1.numElements();
| }
| $nullCheck
| {
| ${ev.value} = ${CodeGenerator.getValue(eval1, dataType, index)};
| }
|}
""".stripMargin
})
case _: MapType =>
doGetValueGenCode(ctx, ev, left.dataType.asInstanceOf[MapType], failOnError)
}
}
override def prettyName: String = "element_at"
override protected def withNewChildrenInternal(
newLeft: Expression, newRight: Expression): ElementAt = copy(left = newLeft, right = newRight)
}
/**
* Returns the value of index `right` in Array `left` or the value for key `right` in Map `left`.
* The function is identical to the function `element_at`, except that it returns `NULL` result
* instead of throwing an exception on array's index out of bound or map's key not found when
* `spark.sql.ansi.enabled` is true.
*/
@ExpressionDescription(
usage = """
_FUNC_(array, index) - Returns element of array at given (1-based) index. If Index is 0,
Spark will throw an error. If index < 0, accesses elements from the last to the first.
The function always returns NULL if the index exceeds the length of the array.
_FUNC_(map, key) - Returns value for given key. The function always returns NULL
if the key is not contained in the map.
""",
examples = """
Examples:
> SELECT _FUNC_(array(1, 2, 3), 2);
2
> SELECT _FUNC_(map(1, 'a', 2, 'b'), 2);
b
""",
since = "3.3.0",
group = "map_funcs")
case class TryElementAt(left: Expression, right: Expression, replacement: Expression)
extends RuntimeReplaceable with InheritAnalysisRules {
def this(left: Expression, right: Expression) =
this(left, right, ElementAt(left, right, failOnError = false))
override def prettyName: String = "try_element_at"
override def parameters: Seq[Expression] = Seq(left, right)
override protected def withNewChildInternal(newChild: Expression): Expression =
this.copy(replacement = newChild)
}
/**
* Concatenates multiple input columns together into a single column.
* The function works with strings, binary and compatible array columns.
*/
@ExpressionDescription(
usage = "_FUNC_(col1, col2, ..., colN) - Returns the concatenation of col1, col2, ..., colN.",
examples = """
Examples:
> SELECT _FUNC_('Spark', 'SQL');
SparkSQL
> SELECT _FUNC_(array(1, 2, 3), array(4, 5), array(6));
[1,2,3,4,5,6]
""",
note = """
Concat logic for arrays is available since 2.4.0.
""",
group = "collection_funcs",
since = "1.5.0")
case class Concat(children: Seq[Expression]) extends ComplexTypeMergingExpression {
private def allowedTypes: Seq[AbstractDataType] = Seq(StringType, BinaryType, ArrayType)
final override val nodePatterns: Seq[TreePattern] = Seq(CONCAT)
override def checkInputDataTypes(): TypeCheckResult = {
if (children.isEmpty) {
TypeCheckResult.TypeCheckSuccess
} else {
val childTypes = children.map(_.dataType)
if (childTypes.exists(tpe => !allowedTypes.exists(_.acceptsType(tpe)))) {
return TypeCheckResult.TypeCheckFailure(
s"input to function $prettyName should have been ${StringType.simpleString}," +
s" ${BinaryType.simpleString} or ${ArrayType.simpleString}, but it's " +
childTypes.map(_.catalogString).mkString("[", ", ", "]"))
}
TypeUtils.checkForSameTypeInputExpr(childTypes, s"function $prettyName")
}
}
@transient override lazy val dataType: DataType = {
if (children.isEmpty) {
StringType
} else {
super.dataType
}
}
private def javaType: String = CodeGenerator.javaType(dataType)
override def nullable: Boolean = children.exists(_.nullable)
override def foldable: Boolean = children.forall(_.foldable)
override def eval(input: InternalRow): Any = doConcat(input)
@transient private lazy val doConcat: InternalRow => Any = dataType match {
case BinaryType =>
input => {
val inputs = children.map(_.eval(input).asInstanceOf[Array[Byte]])
ByteArray.concat(inputs: _*)
}
case StringType =>
input => {
val inputs = children.map(_.eval(input).asInstanceOf[UTF8String])
UTF8String.concat(inputs: _*)
}
case ArrayType(elementType, _) =>
input => {
val inputs = children.toStream.map(_.eval(input))
if (inputs.contains(null)) {
null
} else {
val arrayData = inputs.map(_.asInstanceOf[ArrayData])
val numberOfElements = arrayData.foldLeft(0L)((sum, ad) => sum + ad.numElements())
if (numberOfElements > ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH) {
throw QueryExecutionErrors.concatArraysWithElementsExceedLimitError(numberOfElements)
}
val finalData = new Array[AnyRef](numberOfElements.toInt)
var position = 0
for (ad <- arrayData) {
val arr = ad.toObjectArray(elementType)
Array.copy(arr, 0, finalData, position, arr.length)
position += arr.length
}
new GenericArrayData(finalData)
}
}
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val evals = children.map(_.genCode(ctx))
val args = ctx.freshName("args")
val hasNull = ctx.freshName("hasNull")
val inputs = evals.zip(children.map(_.nullable)).zipWithIndex.map {
case ((eval, true), index) =>
s"""
|if (!$hasNull) {
| ${eval.code}
| if (!${eval.isNull}) {
| $args[$index] = ${eval.value};
| } else {
| $hasNull = true;
| }
|}
""".stripMargin
case ((eval, false), index) =>
s"""
|if (!$hasNull) {
| ${eval.code}
| $args[$index] = ${eval.value};
|}
""".stripMargin
}
val codes = ctx.splitExpressionsWithCurrentInputs(
expressions = inputs,
funcName = "valueConcat",
extraArguments = (s"$javaType[]", args) :: ("boolean", hasNull) :: Nil,
returnType = "boolean",
makeSplitFunction = body =>
s"""
|$body
|return $hasNull;
""".stripMargin,
foldFunctions = _.map(funcCall => s"$hasNull = $funcCall;").mkString("\\n")
)
val (concat, initCode) = dataType match {
case BinaryType =>
(s"${classOf[ByteArray].getName}.concat", s"byte[][] $args = new byte[${evals.length}][];")
case StringType =>
("UTF8String.concat", s"UTF8String[] $args = new UTF8String[${evals.length}];")
case ArrayType(elementType, containsNull) =>
val concat = genCodeForArrays(ctx, elementType, containsNull)
(concat, s"ArrayData[] $args = new ArrayData[${evals.length}];")
}
ev.copy(code =
code"""
|boolean $hasNull = false;
|$initCode
|$codes
|$javaType ${ev.value} = null;
|if (!$hasNull) {
| ${ev.value} = $concat($args);
|}
|boolean ${ev.isNull} = ${ev.value} == null;
""".stripMargin)
}
private def genCodeForNumberOfElements(ctx: CodegenContext) : (String, String) = {
val numElements = ctx.freshName("numElements")
val z = ctx.freshName("z")
val code = s"""
|long $numElements = 0L;
|for (int $z = 0; $z < ${children.length}; $z++) {
| $numElements += args[$z].numElements();
|}
""".stripMargin
(code, numElements)
}
private def genCodeForArrays(
ctx: CodegenContext,
elementType: DataType,
checkForNull: Boolean): String = {
val counter = ctx.freshName("counter")
val arrayData = ctx.freshName("arrayData")
val y = ctx.freshName("y")
val z = ctx.freshName("z")
val (numElemCode, numElemName) = genCodeForNumberOfElements(ctx)
val initialization = CodeGenerator.createArrayData(
arrayData, elementType, numElemName, s" $prettyName failed.")
val assignment = CodeGenerator.createArrayAssignment(
arrayData, elementType, s"args[$y]", counter, z,
dataType.asInstanceOf[ArrayType].containsNull)
val concat = ctx.freshName("concat")
val concatDef =
s"""
|private ArrayData $concat(ArrayData[] args) {
| $numElemCode
| $initialization
| int $counter = 0;
| for (int $y = 0; $y < ${children.length}; $y++) {
| for (int $z = 0; $z < args[$y].numElements(); $z++) {
| $assignment
| $counter++;
| }
| }
| return $arrayData;
|}
""".stripMargin
ctx.addNewFunction(concat, concatDef)
}
override def toString: String = s"concat(${children.mkString(", ")})"
override def sql: String = s"concat(${children.map(_.sql).mkString(", ")})"
override protected def withNewChildrenInternal(newChildren: IndexedSeq[Expression]): Concat =
copy(children = newChildren)
}
/**
* Transforms an array of arrays into a single array.
*/
@ExpressionDescription(
usage = "_FUNC_(arrayOfArrays) - Transforms an array of arrays into a single array.",
examples = """
Examples:
> SELECT _FUNC_(array(array(1, 2), array(3, 4)));
[1,2,3,4]
""",
group = "array_funcs",
since = "2.4.0")
case class Flatten(child: Expression) extends UnaryExpression with NullIntolerant {
private def childDataType: ArrayType = child.dataType.asInstanceOf[ArrayType]
override def nullable: Boolean = child.nullable || childDataType.containsNull
@transient override lazy val dataType: DataType = childDataType.elementType
@transient private lazy val elementType: DataType = dataType.asInstanceOf[ArrayType].elementType
override def checkInputDataTypes(): TypeCheckResult = child.dataType match {
case ArrayType(_: ArrayType, _) =>
TypeCheckResult.TypeCheckSuccess
case _ =>
TypeCheckResult.TypeCheckFailure(
s"The argument should be an array of arrays, " +
s"but '${child.sql}' is of ${child.dataType.catalogString} type."
)
}
override def nullSafeEval(child: Any): Any = {
val elements = child.asInstanceOf[ArrayData].toObjectArray(dataType)
if (elements.contains(null)) {
null
} else {
val arrayData = elements.map(_.asInstanceOf[ArrayData])
val numberOfElements = arrayData.foldLeft(0L)((sum, e) => sum + e.numElements())
if (numberOfElements > ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH) {
throw QueryExecutionErrors.flattenArraysWithElementsExceedLimitError(numberOfElements)
}
val flattenedData = new Array(numberOfElements.toInt)
var position = 0
for (ad <- arrayData) {
val arr = ad.toObjectArray(elementType)
Array.copy(arr, 0, flattenedData, position, arr.length)
position += arr.length
}
new GenericArrayData(flattenedData)
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, c => {
val code = genCodeForFlatten(ctx, c, ev.value)
ctx.nullArrayElementsSaveExec(childDataType.containsNull, ev.isNull, c)(code)
})
}
private def genCodeForNumberOfElements(
ctx: CodegenContext,
childVariableName: String) : (String, String) = {
val variableName = ctx.freshName("numElements")
val code = s"""
|long $variableName = 0;
|for (int z = 0; z < $childVariableName.numElements(); z++) {
| $variableName += $childVariableName.getArray(z).numElements();
|}
""".stripMargin
(code, variableName)
}
private def genCodeForFlatten(
ctx: CodegenContext,
childVariableName: String,
arrayDataName: String): String = {
val counter = ctx.freshName("counter")
val tempArrayDataName = ctx.freshName("tempArrayData")
val k = ctx.freshName("k")
val l = ctx.freshName("l")
val arr = ctx.freshName("arr")
val (numElemCode, numElemName) = genCodeForNumberOfElements(ctx, childVariableName)
val allocation = CodeGenerator.createArrayData(
tempArrayDataName, elementType, numElemName, s" $prettyName failed.")
val assignment = CodeGenerator.createArrayAssignment(
tempArrayDataName, elementType, arr, counter, l,
dataType.asInstanceOf[ArrayType].containsNull)
s"""
|$numElemCode
|$allocation
|int $counter = 0;
|for (int $k = 0; $k < $childVariableName.numElements(); $k++) {
| ArrayData $arr = $childVariableName.getArray($k);
| for (int $l = 0; $l < $arr.numElements(); $l++) {
| $assignment
| $counter++;
| }
|}
|$arrayDataName = $tempArrayDataName;
""".stripMargin
}
override def prettyName: String = "flatten"
override protected def withNewChildInternal(newChild: Expression): Flatten =
copy(child = newChild)
}
@ExpressionDescription(
usage = """
_FUNC_(start, stop, step) - Generates an array of elements from start to stop (inclusive),
incrementing by step. The type of the returned elements is the same as the type of argument
expressions.
Supported types are: byte, short, integer, long, date, timestamp.
The start and stop expressions must resolve to the same type.
If start and stop expressions resolve to the 'date' or 'timestamp' type
then the step expression must resolve to the 'interval' or 'year-month interval' or
'day-time interval' type, otherwise to the same type as the start and stop expressions.
""",
arguments = """
Arguments:
* start - an expression. The start of the range.
* stop - an expression. The end the range (inclusive).
* step - an optional expression. The step of the range.
By default step is 1 if start is less than or equal to stop, otherwise -1.
For the temporal sequences it's 1 day and -1 day respectively.
If start is greater than stop then the step must be negative, and vice versa.
""",
examples = """
Examples:
> SELECT _FUNC_(1, 5);
[1,2,3,4,5]
> SELECT _FUNC_(5, 1);
[5,4,3,2,1]
> SELECT _FUNC_(to_date('2018-01-01'), to_date('2018-03-01'), interval 1 month);
[2018-01-01,2018-02-01,2018-03-01]
> SELECT _FUNC_(to_date('2018-01-01'), to_date('2018-03-01'), interval '0-1' year to month);
[2018-01-01,2018-02-01,2018-03-01]
""",
group = "array_funcs",
since = "2.4.0"
)
case class Sequence(
start: Expression,
stop: Expression,
stepOpt: Option[Expression],
timeZoneId: Option[String] = None)
extends Expression
with TimeZoneAwareExpression {
import Sequence._
def this(start: Expression, stop: Expression) =
this(start, stop, None, None)
def this(start: Expression, stop: Expression, step: Expression) =
this(start, stop, Some(step), None)
override def withTimeZone(timeZoneId: String): TimeZoneAwareExpression =
copy(timeZoneId = Some(timeZoneId))
override def children: Seq[Expression] = Seq(start, stop) ++ stepOpt
override def withNewChildrenInternal(
newChildren: IndexedSeq[Expression]): TimeZoneAwareExpression = {
if (stepOpt.isDefined) {
copy(start = newChildren(0), stop = newChildren(1), stepOpt = Some(newChildren(2)))
} else {
copy(start = newChildren(0), stop = newChildren(1))
}
}
override def foldable: Boolean = children.forall(_.foldable)
override def nullable: Boolean = children.exists(_.nullable)
override def dataType: ArrayType = ArrayType(start.dataType, containsNull = false)
override def checkInputDataTypes(): TypeCheckResult = {
val startType = start.dataType
def stepType = stepOpt.get.dataType
val typesCorrect =
startType.sameType(stop.dataType) &&
(startType match {
case TimestampType | TimestampNTZType =>
stepOpt.isEmpty || CalendarIntervalType.acceptsType(stepType) ||
YearMonthIntervalType.acceptsType(stepType) ||
DayTimeIntervalType.acceptsType(stepType)
case DateType =>
stepOpt.isEmpty || CalendarIntervalType.acceptsType(stepType) ||
YearMonthIntervalType.acceptsType(stepType) ||
DayTimeIntervalType.acceptsType(stepType)
case _: IntegralType =>
stepOpt.isEmpty || stepType.sameType(startType)
case _ => false
})
if (typesCorrect) {
TypeCheckResult.TypeCheckSuccess
} else {
TypeCheckResult.TypeCheckFailure(
s"""
|$prettyName uses the wrong parameter type. The parameter type must conform to:
|1. The start and stop expressions must resolve to the same type.
|2. If start and stop expressions resolve to the 'date' or 'timestamp' type
|then the step expression must resolve to the 'interval' or
|'${YearMonthIntervalType.simpleString}' or '${DayTimeIntervalType.simpleString}' type,
|otherwise to the same type as the start and stop expressions.
""".stripMargin)
}
}
private def isNotIntervalType(expr: Expression) = expr.dataType match {
case CalendarIntervalType | _: AnsiIntervalType => false
case _ => true
}
def coercibleChildren: Seq[Expression] = children.filter(isNotIntervalType)
def castChildrenTo(widerType: DataType): Expression = Sequence(
Cast(start, widerType),
Cast(stop, widerType),
stepOpt.map(step => if (isNotIntervalType(step)) Cast(step, widerType) else step),
timeZoneId)
@transient private lazy val impl: InternalSequence = dataType.elementType match {
case iType: IntegralType =>
type T = iType.InternalType
val ct = ClassTag[T](iType.tag.mirror.runtimeClass(iType.tag.tpe))
new IntegralSequenceImpl(iType)(ct, iType.integral)
case TimestampType | TimestampNTZType =>
if (stepOpt.isEmpty || CalendarIntervalType.acceptsType(stepOpt.get.dataType)) {
new TemporalSequenceImpl[Long](LongType, start.dataType, 1, identity, zoneId)
} else if (YearMonthIntervalType.acceptsType(stepOpt.get.dataType)) {
new PeriodSequenceImpl[Long](LongType, start.dataType, 1, identity, zoneId)
} else {
new DurationSequenceImpl[Long](LongType, start.dataType, 1, identity, zoneId)
}
case DateType =>
if (stepOpt.isEmpty || CalendarIntervalType.acceptsType(stepOpt.get.dataType)) {
new TemporalSequenceImpl[Int](IntegerType, start.dataType, MICROS_PER_DAY, _.toInt, zoneId)
} else if (YearMonthIntervalType.acceptsType(stepOpt.get.dataType)) {
new PeriodSequenceImpl[Int](IntegerType, start.dataType, MICROS_PER_DAY, _.toInt, zoneId)
} else {
new DurationSequenceImpl[Int](IntegerType, start.dataType, MICROS_PER_DAY, _.toInt, zoneId)
}
}
override def eval(input: InternalRow): Any = {
val startVal = start.eval(input)
if (startVal == null) return null
val stopVal = stop.eval(input)
if (stopVal == null) return null
val stepVal = stepOpt.map(_.eval(input)).getOrElse(impl.defaultStep(startVal, stopVal))
if (stepVal == null) return null
ArrayData.toArrayData(impl.eval(startVal, stopVal, stepVal))
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val startGen = start.genCode(ctx)
val stopGen = stop.genCode(ctx)
val stepGen = stepOpt.map(_.genCode(ctx)).getOrElse(
impl.defaultStep.genCode(ctx, startGen, stopGen))
val resultType = CodeGenerator.javaType(dataType)
val resultCode = {
val arr = ctx.freshName("arr")
val arrElemType = CodeGenerator.javaType(dataType.elementType)
s"""
|final $arrElemType[] $arr = null;
|${impl.genCode(ctx, startGen.value, stopGen.value, stepGen.value, arr, arrElemType)}
|${ev.value} = UnsafeArrayData.fromPrimitiveArray($arr);
""".stripMargin
}
if (nullable) {
val nullSafeEval =
startGen.code + ctx.nullSafeExec(start.nullable, startGen.isNull) {
stopGen.code + ctx.nullSafeExec(stop.nullable, stopGen.isNull) {
stepGen.code + ctx.nullSafeExec(stepOpt.exists(_.nullable), stepGen.isNull) {
s"""
|${ev.isNull} = false;
|$resultCode
""".stripMargin
}
}
}
ev.copy(code =
code"""
|boolean ${ev.isNull} = true;
|$resultType ${ev.value} = null;
|$nullSafeEval
""".stripMargin)
} else {
ev.copy(code =
code"""
|${startGen.code}
|${stopGen.code}
|${stepGen.code}
|$resultType ${ev.value} = null;
|$resultCode
""".stripMargin,
isNull = FalseLiteral)
}
}
}
object Sequence {
private type LessThanOrEqualFn = (Any, Any) => Boolean
private class DefaultStep(lteq: LessThanOrEqualFn, stepType: DataType, one: Any) {
private val negativeOne = UnaryMinus(Literal(one)).eval()
def apply(start: Any, stop: Any): Any = {
if (lteq(start, stop)) one else negativeOne
}
def genCode(ctx: CodegenContext, startGen: ExprCode, stopGen: ExprCode): ExprCode = {
val Seq(oneVal, negativeOneVal) = Seq(one, negativeOne).map(Literal(_).genCode(ctx).value)
ExprCode.forNonNullValue(JavaCode.expression(
s"${startGen.value} <= ${stopGen.value} ? $oneVal : $negativeOneVal",
stepType))
}
}
private trait InternalSequence {
def eval(start: Any, stop: Any, step: Any): Any
def genCode(
ctx: CodegenContext,
start: String,
stop: String,
step: String,
arr: String,
elemType: String): String
val defaultStep: DefaultStep
}
private class IntegralSequenceImpl[T: ClassTag]
(elemType: IntegralType)(implicit num: Integral[T]) extends InternalSequence {
override val defaultStep: DefaultStep = new DefaultStep(
(elemType.ordering.lteq _).asInstanceOf[LessThanOrEqualFn],
elemType,
num.one)
override def eval(input1: Any, input2: Any, input3: Any): Array[T] = {
import num._
val start = input1.asInstanceOf[T]
val stop = input2.asInstanceOf[T]
val step = input3.asInstanceOf[T]
var i: Int = getSequenceLength(start, stop, step, step)
val arr = new Array[T](i)
while (i > 0) {
i -= 1
arr(i) = start + step * num.fromInt(i)
}
arr
}
override def genCode(
ctx: CodegenContext,
start: String,
stop: String,
step: String,
arr: String,
elemType: String): String = {
val i = ctx.freshName("i")
s"""
|${genSequenceLengthCode(ctx, start, stop, step, step, i)}
|$arr = new $elemType[$i];
|while ($i > 0) {
| $i--;
| $arr[$i] = ($elemType) ($start + $step * $i);
|}
""".stripMargin
}
}
private class PeriodSequenceImpl[T: ClassTag]
(dt: IntegralType, outerDataType: DataType, scale: Long, fromLong: Long => T, zoneId: ZoneId)
(implicit num: Integral[T])
extends InternalSequenceBase(dt, outerDataType, scale, fromLong, zoneId) {
override val defaultStep: DefaultStep = new DefaultStep(
(dt.ordering.lteq _).asInstanceOf[LessThanOrEqualFn],
YearMonthIntervalType(),
Period.of(0, 1, 0))
val intervalType: DataType = YearMonthIntervalType()
def splitStep(input: Any): (Int, Int, Long) = {
(input.asInstanceOf[Int], 0, 0)
}
def stepSplitCode(
stepMonths: String, stepDays: String, stepMicros: String, step: String): String = {
s"""
|final int $stepMonths = $step;
|final int $stepDays = 0;
|final long $stepMicros = 0L;
""".stripMargin
}
}
private class DurationSequenceImpl[T: ClassTag]
(dt: IntegralType, outerDataType: DataType, scale: Long, fromLong: Long => T, zoneId: ZoneId)
(implicit num: Integral[T])
extends InternalSequenceBase(dt, outerDataType, scale, fromLong, zoneId) {
override val defaultStep: DefaultStep = new DefaultStep(
(dt.ordering.lteq _).asInstanceOf[LessThanOrEqualFn],
DayTimeIntervalType(),
Duration.ofDays(1))
val intervalType: DataType = DayTimeIntervalType()
def splitStep(input: Any): (Int, Int, Long) = {
val duration = input.asInstanceOf[Long]
val days = IntervalUtils.getDays(duration)
val micros = duration - days * MICROS_PER_DAY
(0, days, micros)
}
def stepSplitCode(
stepMonths: String, stepDays: String, stepMicros: String, step: String): String = {
s"""
|final int $stepMonths = 0;
|final int $stepDays =
| (int) org.apache.spark.sql.catalyst.util.IntervalUtils.getDays($step);
|final long $stepMicros = $step - $stepDays * ${MICROS_PER_DAY}L;
""".stripMargin
}
}
private class TemporalSequenceImpl[T: ClassTag]
(dt: IntegralType, outerDataType: DataType, scale: Long, fromLong: Long => T, zoneId: ZoneId)
(implicit num: Integral[T])
extends InternalSequenceBase(dt, outerDataType, scale, fromLong, zoneId) {
override val defaultStep: DefaultStep = new DefaultStep(
(dt.ordering.lteq _).asInstanceOf[LessThanOrEqualFn],
CalendarIntervalType,
new CalendarInterval(0, 1, 0))
val intervalType: DataType = CalendarIntervalType
def splitStep(input: Any): (Int, Int, Long) = {
val step = input.asInstanceOf[CalendarInterval]
(step.months, step.days, step.microseconds)
}
def stepSplitCode(
stepMonths: String, stepDays: String, stepMicros: String, step: String): String = {
s"""
|final int $stepMonths = $step.months;
|final int $stepDays = $step.days;
|final long $stepMicros = $step.microseconds;
""".stripMargin
}
}
private abstract class InternalSequenceBase[T: ClassTag]
(dt: IntegralType, outerDataType: DataType, scale: Long, fromLong: Long => T, zoneId: ZoneId)
(implicit num: Integral[T]) extends InternalSequence {
val defaultStep: DefaultStep
private val backedSequenceImpl = new IntegralSequenceImpl[T](dt)
// We choose a minimum days(28) in one month to calculate the `intervalStepInMicros`
// in order to make sure the estimated array length is long enough
private val microsPerMonth = 28 * MICROS_PER_DAY
protected val intervalType: DataType
protected def splitStep(input: Any): (Int, Int, Long)
private val addInterval: (Long, Int, Int, Long, ZoneId) => Long = outerDataType match {
case TimestampType | DateType => timestampAddInterval
case TimestampNTZType => timestampNTZAddInterval
}
override def eval(input1: Any, input2: Any, input3: Any): Array[T] = {
val start = input1.asInstanceOf[T]
val stop = input2.asInstanceOf[T]
val (stepMonths, stepDays, stepMicros) = splitStep(input3)
if (scale == MICROS_PER_DAY && stepMonths == 0 && stepDays == 0) {
throw new IllegalArgumentException(s"sequence step must be an ${intervalType.typeName}" +
" of day granularity if start and end values are dates")
}
if (stepMonths == 0 && stepMicros == 0 && scale == MICROS_PER_DAY) {
// Adding pure days to date start/end
backedSequenceImpl.eval(start, stop, fromLong(stepDays))
} else if (stepMonths == 0 && stepDays == 0 && scale == 1) {
// Adding pure microseconds to timestamp start/end
backedSequenceImpl.eval(start, stop, fromLong(stepMicros))
} else {
// To estimate the resulted array length we need to make assumptions
// about a month length in days and a day length in microseconds
val intervalStepInMicros =
stepMicros + stepMonths * microsPerMonth + stepDays * MICROS_PER_DAY
val startMicros: Long = num.toLong(start) * scale
val stopMicros: Long = num.toLong(stop) * scale
val maxEstimatedArrayLength =
getSequenceLength(startMicros, stopMicros, input3, intervalStepInMicros)
val stepSign = if (intervalStepInMicros > 0) +1 else -1
val exclusiveItem = stopMicros + stepSign
val arr = new Array[T](maxEstimatedArrayLength)
var t = startMicros
var i = 0
while (t < exclusiveItem ^ stepSign < 0) {
arr(i) = fromLong(t / scale)
i += 1
t = addInterval(startMicros, i * stepMonths, i * stepDays, i * stepMicros, zoneId)
}
// truncate array to the correct length
if (arr.length == i) arr else arr.slice(0, i)
}
}
protected def stepSplitCode(
stepMonths: String, stepDays: String, stepMicros: String, step: String): String
private val addIntervalCode = outerDataType match {
case TimestampType | DateType =>
"org.apache.spark.sql.catalyst.util.DateTimeUtils.timestampAddInterval"
case TimestampNTZType =>
"org.apache.spark.sql.catalyst.util.DateTimeUtils.timestampNTZAddInterval"
}
override def genCode(
ctx: CodegenContext,
start: String,
stop: String,
step: String,
arr: String,
elemType: String): String = {
val stepMonths = ctx.freshName("stepMonths")
val stepDays = ctx.freshName("stepDays")
val stepMicros = ctx.freshName("stepMicros")
val stepScaled = ctx.freshName("stepScaled")
val intervalInMicros = ctx.freshName("intervalInMicros")
val startMicros = ctx.freshName("startMicros")
val stopMicros = ctx.freshName("stopMicros")
val arrLength = ctx.freshName("arrLength")
val stepSign = ctx.freshName("stepSign")
val exclusiveItem = ctx.freshName("exclusiveItem")
val t = ctx.freshName("t")
val i = ctx.freshName("i")
val zid = ctx.addReferenceObj("zoneId", zoneId, classOf[ZoneId].getName)
val sequenceLengthCode =
s"""
|final long $intervalInMicros =
| $stepMicros + $stepMonths * ${microsPerMonth}L + $stepDays * ${MICROS_PER_DAY}L;
|${genSequenceLengthCode(
ctx, startMicros, stopMicros, step, intervalInMicros, arrLength)}
""".stripMargin
val check = if (scale == MICROS_PER_DAY) {
s"""
|if ($stepMonths == 0 && $stepDays == 0) {
| throw new IllegalArgumentException(
| "sequence step must be an ${intervalType.typeName} " +
| "of day granularity if start and end values are dates");
|}
""".stripMargin
} else {
""
}
val stepSplits = stepSplitCode(stepMonths, stepDays, stepMicros, step)
s"""
|$stepSplits
|
|$check
|
|if ($stepMonths == 0 && $stepMicros == 0 && ${scale}L == ${MICROS_PER_DAY}L) {
| ${backedSequenceImpl.genCode(ctx, start, stop, stepDays, arr, elemType)};
|
|} else if ($stepMonths == 0 && $stepDays == 0 && ${scale}L == 1) {
| ${backedSequenceImpl.genCode(ctx, start, stop, stepMicros, arr, elemType)};
|} else {
| final long $startMicros = $start * ${scale}L;
| final long $stopMicros = $stop * ${scale}L;
|
| $sequenceLengthCode
|
| final int $stepSign = $intervalInMicros > 0 ? +1 : -1;
| final long $exclusiveItem = $stopMicros + $stepSign;
|
| $arr = new $elemType[$arrLength];
| long $t = $startMicros;
| int $i = 0;
|
| while ($t < $exclusiveItem ^ $stepSign < 0) {
| $arr[$i] = ($elemType) ($t / ${scale}L);
| $i += 1;
| $t = $addIntervalCode(
| $startMicros, $i * $stepMonths, $i * $stepDays, $i * $stepMicros, $zid);
| }
|
| if ($arr.length > $i) {
| $arr = java.util.Arrays.copyOf($arr, $i);
| }
|}
""".stripMargin
}
}
private def getSequenceLength[U](start: U, stop: U, step: Any, estimatedStep: U)
(implicit num: Integral[U]): Int = {
import num._
require(
(estimatedStep > num.zero && start <= stop)
|| (estimatedStep < num.zero && start >= stop)
|| (estimatedStep == num.zero && start == stop),
s"Illegal sequence boundaries: $start to $stop by $step")
val len = if (start == stop) 1L else 1L + (stop.toLong - start.toLong) / estimatedStep.toLong
require(
len <= MAX_ROUNDED_ARRAY_LENGTH,
s"Too long sequence: $len. Should be <= $MAX_ROUNDED_ARRAY_LENGTH")
len.toInt
}
private def genSequenceLengthCode(
ctx: CodegenContext,
start: String,
stop: String,
step: String,
estimatedStep: String,
len: String): String = {
val longLen = ctx.freshName("longLen")
s"""
|if (!(($estimatedStep > 0 && $start <= $stop) ||
| ($estimatedStep < 0 && $start >= $stop) ||
| ($estimatedStep == 0 && $start == $stop))) {
| throw new IllegalArgumentException(
| "Illegal sequence boundaries: " + $start + " to " + $stop + " by " + $step);
|}
|long $longLen = $stop == $start ? 1L : 1L + ((long) $stop - $start) / $estimatedStep;
|if ($longLen > $MAX_ROUNDED_ARRAY_LENGTH) {
| throw new IllegalArgumentException(
| "Too long sequence: " + $longLen + ". Should be <= $MAX_ROUNDED_ARRAY_LENGTH");
|}
|int $len = (int) $longLen;
""".stripMargin
}
}
/**
* Returns the array containing the given input value (left) count (right) times.
*/
@ExpressionDescription(
usage = "_FUNC_(element, count) - Returns the array containing element count times.",
examples = """
Examples:
> SELECT _FUNC_('123', 2);
["123","123"]
""",
group = "array_funcs",
since = "2.4.0")
case class ArrayRepeat(left: Expression, right: Expression)
extends BinaryExpression with ExpectsInputTypes {
override def dataType: ArrayType = ArrayType(left.dataType, left.nullable)
override def inputTypes: Seq[AbstractDataType] = Seq(AnyDataType, IntegerType)
override def nullable: Boolean = right.nullable
override def eval(input: InternalRow): Any = {
val count = right.eval(input)
if (count == null) {
null
} else {
if (count.asInstanceOf[Int] > ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH) {
throw QueryExecutionErrors.createArrayWithElementsExceedLimitError(count)
}
val element = left.eval(input)
new GenericArrayData(Array.fill(count.asInstanceOf[Int])(element))
}
}
override def prettyName: String = "array_repeat"
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val leftGen = left.genCode(ctx)
val rightGen = right.genCode(ctx)
val element = leftGen.value
val count = rightGen.value
val et = dataType.elementType
val coreLogic = genCodeForElement(ctx, et, element, count, leftGen.isNull, ev.value)
val resultCode = nullElementsProtection(ev, rightGen.isNull, coreLogic)
ev.copy(code =
code"""
|boolean ${ev.isNull} = false;
|${leftGen.code}
|${rightGen.code}
|${CodeGenerator.javaType(dataType)} ${ev.value} =
| ${CodeGenerator.defaultValue(dataType)};
|$resultCode
""".stripMargin)
}
private def nullElementsProtection(
ev: ExprCode,
rightIsNull: String,
coreLogic: String): String = {
if (nullable) {
s"""
|if ($rightIsNull) {
| ${ev.isNull} = true;
|} else {
| ${coreLogic}
|}
""".stripMargin
} else {
coreLogic
}
}
private def genCodeForNumberOfElements(ctx: CodegenContext, count: String): (String, String) = {
val numElements = ctx.freshName("numElements")
val numElementsCode =
s"""
|int $numElements = 0;
|if ($count > 0) {
| $numElements = $count;
|}
""".stripMargin
(numElements, numElementsCode)
}
private def genCodeForElement(
ctx: CodegenContext,
elementType: DataType,
element: String,
count: String,
leftIsNull: String,
arrayDataName: String): String = {
val tempArrayDataName = ctx.freshName("tempArrayData")
val k = ctx.freshName("k")
val (numElemName, numElemCode) = genCodeForNumberOfElements(ctx, count)
val allocation = CodeGenerator.createArrayData(
tempArrayDataName, elementType, numElemName, s" $prettyName failed.")
val assignment =
CodeGenerator.setArrayElement(tempArrayDataName, elementType, k, element)
s"""
|$numElemCode
|$allocation
|if (!$leftIsNull) {
| for (int $k = 0; $k < $tempArrayDataName.numElements(); $k++) {
| $assignment
| }
|} else {
| for (int $k = 0; $k < $tempArrayDataName.numElements(); $k++) {
| $tempArrayDataName.setNullAt($k);
| }
|}
|$arrayDataName = $tempArrayDataName;
""".stripMargin
}
override protected def withNewChildrenInternal(
newLeft: Expression, newRight: Expression): ArrayRepeat = copy(left = newLeft, right = newRight)
}
/**
* Remove all elements that equal to element from the given array
*/
@ExpressionDescription(
usage = "_FUNC_(array, element) - Remove all elements that equal to element from array.",
examples = """
Examples:
> SELECT _FUNC_(array(1, 2, 3, null, 3), 3);
[1,2,null]
""",
group = "array_funcs",
since = "2.4.0")
case class ArrayRemove(left: Expression, right: Expression)
extends BinaryExpression with ImplicitCastInputTypes with NullIntolerant {
override def dataType: DataType = left.dataType
override def inputTypes: Seq[AbstractDataType] = {
(left.dataType, right.dataType) match {
case (ArrayType(e1, hasNull), e2) =>
TypeCoercion.findTightestCommonType(e1, e2) match {
case Some(dt) => Seq(ArrayType(dt, hasNull), dt)
case _ => Seq.empty
}
case _ => Seq.empty
}
}
override def checkInputDataTypes(): TypeCheckResult = {
(left.dataType, right.dataType) match {
case (ArrayType(e1, _), e2) if e1.sameType(e2) =>
TypeUtils.checkForOrderingExpr(e2, s"function $prettyName")
case _ => TypeCheckResult.TypeCheckFailure(s"Input to function $prettyName should have " +
s"been ${ArrayType.simpleString} followed by a value with same element type, but it's " +
s"[${left.dataType.catalogString}, ${right.dataType.catalogString}].")
}
}
private def elementType: DataType = left.dataType.asInstanceOf[ArrayType].elementType
@transient private lazy val ordering: Ordering[Any] =
TypeUtils.getInterpretedOrdering(right.dataType)
override def nullSafeEval(arr: Any, value: Any): Any = {
val newArray = new Array[Any](arr.asInstanceOf[ArrayData].numElements())
var pos = 0
arr.asInstanceOf[ArrayData].foreach(right.dataType, (i, v) =>
if (v == null || !ordering.equiv(v, value)) {
newArray(pos) = v
pos += 1
}
)
new GenericArrayData(newArray.slice(0, pos))
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
nullSafeCodeGen(ctx, ev, (arr, value) => {
val numsToRemove = ctx.freshName("numsToRemove")
val newArraySize = ctx.freshName("newArraySize")
val i = ctx.freshName("i")
val getValue = CodeGenerator.getValue(arr, elementType, i)
val isEqual = ctx.genEqual(elementType, value, getValue)
s"""
|int $numsToRemove = 0;
|for (int $i = 0; $i < $arr.numElements(); $i ++) {
| if (!$arr.isNullAt($i) && $isEqual) {
| $numsToRemove = $numsToRemove + 1;
| }
|}
|int $newArraySize = $arr.numElements() - $numsToRemove;
|${genCodeForResult(ctx, ev, arr, value, newArraySize)}
""".stripMargin
})
}
def genCodeForResult(
ctx: CodegenContext,
ev: ExprCode,
inputArray: String,
value: String,
newArraySize: String): String = {
val values = ctx.freshName("values")
val i = ctx.freshName("i")
val pos = ctx.freshName("pos")
val getValue = CodeGenerator.getValue(inputArray, elementType, i)
val isEqual = ctx.genEqual(elementType, value, getValue)
val allocation = CodeGenerator.createArrayData(
values, elementType, newArraySize, s" $prettyName failed.")
val assignment = CodeGenerator.createArrayAssignment(
values, elementType, inputArray, pos, i, false)
s"""
|$allocation
|int $pos = 0;
|for (int $i = 0; $i < $inputArray.numElements(); $i ++) {
| if ($inputArray.isNullAt($i)) {
| $values.setNullAt($pos);
| $pos = $pos + 1;
| }
| else {
| if (!($isEqual)) {
| $assignment
| $pos = $pos + 1;
| }
| }
|}
|${ev.value} = $values;
""".stripMargin
}
override def prettyName: String = "array_remove"
override protected def withNewChildrenInternal(
newLeft: Expression, newRight: Expression): ArrayRemove = copy(left = newLeft, right = newRight)
}
/**
* Will become common base class for [[ArrayDistinct]], [[ArrayUnion]], [[ArrayIntersect]],
* and [[ArrayExcept]].
*/
trait ArraySetLike {
protected def dt: DataType
protected def et: DataType
@transient protected lazy val canUseSpecializedHashSet = et match {
case ByteType | ShortType | IntegerType | LongType | FloatType | DoubleType => true
case _ => false
}
@transient protected lazy val ordering: Ordering[Any] =
TypeUtils.getInterpretedOrdering(et)
protected def genGetValue(array: String, i: String): String =
CodeGenerator.getValue(array, et, i)
@transient protected lazy val (hsPostFix, hsTypeName) = {
val ptName = CodeGenerator.primitiveTypeName(et)
et match {
// we cast byte/short to int when writing to the hash set.
case ByteType | ShortType | IntegerType => ("$mcI$sp", "Int")
case LongType => ("$mcJ$sp", ptName)
case FloatType => ("$mcF$sp", ptName)
case DoubleType => ("$mcD$sp", ptName)
}
}
// we cast byte/short to int when writing to the hash set.
@transient protected lazy val hsValueCast = et match {
case ByteType | ShortType => "(int) "
case _ => ""
}
// When hitting a null value, put a null holder in the ArrayBuilder. Finally we will
// convert ArrayBuilder to ArrayData and setNull on the slot with null holder.
@transient protected lazy val nullValueHolder = et match {
case ByteType => "(byte) 0"
case ShortType => "(short) 0"
case LongType => "0L"
case FloatType => "0.0f"
case DoubleType => "0.0"
case _ => "0"
}
protected def withResultArrayNullCheck(
body: String,
value: String,
nullElementIndex: String): String = {
if (dt.asInstanceOf[ArrayType].containsNull) {
s"""
|$body
|if ($nullElementIndex >= 0) {
| // result has null element
| $value.setNullAt($nullElementIndex);
|}
""".stripMargin
} else {
body
}
}
def buildResultArray(
builder: String,
value : String,
size : String,
nullElementIndex : String): String = withResultArrayNullCheck(
s"""
|if ($size > ${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}) {
| throw QueryExecutionErrors.createArrayWithElementsExceedLimitError($size);
|}
|
|if (!UnsafeArrayData.shouldUseGenericArrayData(${et.defaultSize}, $size)) {
| $value = UnsafeArrayData.fromPrimitiveArray($builder.result());
|} else {
| $value = new ${classOf[GenericArrayData].getName}($builder.result());
|}
""".stripMargin, value, nullElementIndex)
}
/**
* Removes duplicate values from the array.
*/
@ExpressionDescription(
usage = "_FUNC_(array) - Removes duplicate values from the array.",
examples = """
Examples:
> SELECT _FUNC_(array(1, 2, 3, null, 3));
[1,2,3,null]
""",
group = "array_funcs",
since = "2.4.0")
case class ArrayDistinct(child: Expression)
extends UnaryExpression with ArraySetLike with ExpectsInputTypes with NullIntolerant {
override def inputTypes: Seq[AbstractDataType] = Seq(ArrayType)
override def dataType: DataType = child.dataType
@transient private lazy val elementType: DataType = dataType.asInstanceOf[ArrayType].elementType
override protected def dt: DataType = dataType
override protected def et: DataType = elementType
override def checkInputDataTypes(): TypeCheckResult = {
super.checkInputDataTypes() match {
case f: TypeCheckResult.TypeCheckFailure => f
case TypeCheckResult.TypeCheckSuccess =>
TypeUtils.checkForOrderingExpr(elementType, s"function $prettyName")
}
}
override def nullSafeEval(array: Any): Any = {
val data = array.asInstanceOf[ArrayData]
doEvaluation(data)
}
@transient private lazy val doEvaluation = if (TypeUtils.typeWithProperEquals(elementType)) {
(array: ArrayData) =>
val arrayBuffer = new scala.collection.mutable.ArrayBuffer[Any]
val hs = new SQLOpenHashSet[Any]()
val withNaNCheckFunc = SQLOpenHashSet.withNaNCheckFunc(elementType, hs,
(value: Any) =>
if (!hs.contains(value)) {
if (arrayBuffer.size > ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH) {
ArrayBinaryLike.throwUnionLengthOverflowException(arrayBuffer.size)
}
arrayBuffer += value
hs.add(value)
},
(valueNaN: Any) => arrayBuffer += valueNaN)
val withNullCheckFunc = SQLOpenHashSet.withNullCheckFunc(elementType, hs,
(value: Any) => withNaNCheckFunc(value),
() => arrayBuffer += null)
var i = 0
while (i < array.numElements()) {
withNullCheckFunc(array, i)
i += 1
}
new GenericArrayData(arrayBuffer.toSeq)
} else {
(data: ArrayData) => {
val array = data.toArray[AnyRef](elementType)
val arrayBuffer = new scala.collection.mutable.ArrayBuffer[AnyRef]
var alreadyStoredNull = false
for (i <- 0 until array.length) {
if (array(i) != null) {
var found = false
var j = 0
while (!found && j < arrayBuffer.size) {
val va = arrayBuffer(j)
found = (va != null) && ordering.equiv(va, array(i))
j += 1
}
if (!found) {
arrayBuffer += array(i)
}
} else {
// De-duplicate the null values.
if (!alreadyStoredNull) {
arrayBuffer += array(i)
alreadyStoredNull = true
}
}
}
new GenericArrayData(arrayBuffer.toSeq)
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val i = ctx.freshName("i")
val value = ctx.freshName("value")
val size = ctx.freshName("size")
if (canUseSpecializedHashSet) {
val jt = CodeGenerator.javaType(elementType)
val ptName = CodeGenerator.primitiveTypeName(jt)
nullSafeCodeGen(ctx, ev, (array) => {
val nullElementIndex = ctx.freshName("nullElementIndex")
val builder = ctx.freshName("builder")
val openHashSet = classOf[SQLOpenHashSet[_]].getName
val classTag = s"scala.reflect.ClassTag$$.MODULE$$.$hsTypeName()"
val hashSet = ctx.freshName("hashSet")
val arrayBuilder = classOf[mutable.ArrayBuilder[_]].getName
val arrayBuilderClass = s"$arrayBuilder$$of$ptName"
// Only need to track null element index when array's element is nullable.
val declareNullTrackVariables = if (dataType.asInstanceOf[ArrayType].containsNull) {
s"""
|int $nullElementIndex = -1;
""".stripMargin
} else {
""
}
val body =
s"""
|if (!$hashSet.contains($hsValueCast$value)) {
| if (++$size > ${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}) {
| break;
| }
| $hashSet.add$hsPostFix($hsValueCast$value);
| $builder.$$plus$$eq($value);
|}
""".stripMargin
val withNaNCheckCodeGenerator =
(array: String, index: String) =>
s"$jt $value = ${genGetValue(array, index)};" +
SQLOpenHashSet.withNaNCheckCode(elementType, value, hashSet, body,
(valueNaN: String) =>
s"""
|$size++;
|$builder.$$plus$$eq($valueNaN);
""".stripMargin)
val processArray = SQLOpenHashSet.withNullCheckCode(
dataType.asInstanceOf[ArrayType].containsNull,
dataType.asInstanceOf[ArrayType].containsNull,
array, i, hashSet, withNaNCheckCodeGenerator,
s"""
|$nullElementIndex = $size;
|$size++;
|$builder.$$plus$$eq($nullValueHolder);
""".stripMargin)
s"""
|$openHashSet $hashSet = new $openHashSet$hsPostFix($classTag);
|$declareNullTrackVariables
|$arrayBuilderClass $builder = new $arrayBuilderClass();
|int $size = 0;
|for (int $i = 0; $i < $array.numElements(); $i++) {
| $processArray
|}
|${buildResultArray(builder, ev.value, size, nullElementIndex)}
""".stripMargin
})
} else {
nullSafeCodeGen(ctx, ev, (array) => {
val expr = ctx.addReferenceObj("arrayDistinctExpr", this)
s"${ev.value} = (ArrayData)$expr.nullSafeEval($array);"
})
}
}
override def prettyName: String = "array_distinct"
override protected def withNewChildInternal(newChild: Expression): ArrayDistinct =
copy(child = newChild)
}
/**
* Will become common base class for [[ArrayUnion]], [[ArrayIntersect]], and [[ArrayExcept]].
*/
trait ArrayBinaryLike
extends BinaryArrayExpressionWithImplicitCast with ArraySetLike with NullIntolerant {
override protected def dt: DataType = dataType
override protected def et: DataType = elementType
override def checkInputDataTypes(): TypeCheckResult = {
val typeCheckResult = super.checkInputDataTypes()
if (typeCheckResult.isSuccess) {
TypeUtils.checkForOrderingExpr(dataType.asInstanceOf[ArrayType].elementType,
s"function $prettyName")
} else {
typeCheckResult
}
}
}
object ArrayBinaryLike {
def throwUnionLengthOverflowException(length: Int): Unit = {
throw QueryExecutionErrors.unionArrayWithElementsExceedLimitError(length)
}
}
/**
* Returns an array of the elements in the union of x and y, without duplicates
*/
@ExpressionDescription(
usage = """
_FUNC_(array1, array2) - Returns an array of the elements in the union of array1 and array2,
without duplicates.
""",
examples = """
Examples:
> SELECT _FUNC_(array(1, 2, 3), array(1, 3, 5));
[1,2,3,5]
""",
group = "array_funcs",
since = "2.4.0")
case class ArrayUnion(left: Expression, right: Expression) extends ArrayBinaryLike
with ComplexTypeMergingExpression {
@transient lazy val evalUnion: (ArrayData, ArrayData) => ArrayData = {
if (TypeUtils.typeWithProperEquals(elementType)) {
(array1, array2) =>
val arrayBuffer = new scala.collection.mutable.ArrayBuffer[Any]
val hs = new SQLOpenHashSet[Any]()
val withNaNCheckFunc = SQLOpenHashSet.withNaNCheckFunc(elementType, hs,
(value: Any) =>
if (!hs.contains(value)) {
if (arrayBuffer.size > ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH) {
ArrayBinaryLike.throwUnionLengthOverflowException(arrayBuffer.size)
}
arrayBuffer += value
hs.add(value)
},
(valueNaN: Any) => arrayBuffer += valueNaN)
val withNullCheckFunc = SQLOpenHashSet.withNullCheckFunc(elementType, hs,
(value: Any) => withNaNCheckFunc(value),
() => arrayBuffer += null
)
Seq(array1, array2).foreach { array =>
var i = 0
while (i < array.numElements()) {
withNullCheckFunc(array, i)
i += 1
}
}
new GenericArrayData(arrayBuffer.toSeq)
} else {
(array1, array2) =>
val arrayBuffer = new scala.collection.mutable.ArrayBuffer[Any]
var alreadyIncludeNull = false
Seq(array1, array2).foreach(_.foreach(elementType, (_, elem) => {
var found = false
if (elem == null) {
if (alreadyIncludeNull) {
found = true
} else {
alreadyIncludeNull = true
}
} else {
// check elem is already stored in arrayBuffer or not?
var j = 0
while (!found && j < arrayBuffer.size) {
val va = arrayBuffer(j)
if (va != null && ordering.equiv(va, elem)) {
found = true
}
j = j + 1
}
}
if (!found) {
if (arrayBuffer.length > ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH) {
ArrayBinaryLike.throwUnionLengthOverflowException(arrayBuffer.length)
}
arrayBuffer += elem
}
}))
new GenericArrayData(arrayBuffer.toSeq)
}
}
override def nullSafeEval(input1: Any, input2: Any): Any = {
val array1 = input1.asInstanceOf[ArrayData]
val array2 = input2.asInstanceOf[ArrayData]
evalUnion(array1, array2)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val i = ctx.freshName("i")
val value = ctx.freshName("value")
val size = ctx.freshName("size")
if (canUseSpecializedHashSet) {
val jt = CodeGenerator.javaType(elementType)
val ptName = CodeGenerator.primitiveTypeName(jt)
nullSafeCodeGen(ctx, ev, (array1, array2) => {
val nullElementIndex = ctx.freshName("nullElementIndex")
val builder = ctx.freshName("builder")
val array = ctx.freshName("array")
val arrays = ctx.freshName("arrays")
val arrayDataIdx = ctx.freshName("arrayDataIdx")
val openHashSet = classOf[SQLOpenHashSet[_]].getName
val classTag = s"scala.reflect.ClassTag$$.MODULE$$.$hsTypeName()"
val hashSet = ctx.freshName("hashSet")
val arrayBuilder = classOf[mutable.ArrayBuilder[_]].getName
val arrayBuilderClass = s"$arrayBuilder$$of$ptName"
val body =
s"""
|if (!$hashSet.contains($hsValueCast$value)) {
| if (++$size > ${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}) {
| break;
| }
| $hashSet.add$hsPostFix($hsValueCast$value);
| $builder.$$plus$$eq($value);
|}
""".stripMargin
val withNaNCheckCodeGenerator =
(array: String, index: String) =>
s"$jt $value = ${genGetValue(array, index)};" +
SQLOpenHashSet.withNaNCheckCode(elementType, value, hashSet, body,
(valueNaN: String) =>
s"""
|$size++;
|$builder.$$plus$$eq($valueNaN);
""".stripMargin)
val processArray = SQLOpenHashSet.withNullCheckCode(
dataType.asInstanceOf[ArrayType].containsNull,
dataType.asInstanceOf[ArrayType].containsNull,
array, i, hashSet, withNaNCheckCodeGenerator,
s"""
|$nullElementIndex = $size;
|$size++;
|$builder.$$plus$$eq($nullValueHolder);
""".stripMargin)
// Only need to track null element index when result array's element is nullable.
val declareNullTrackVariables = if (dataType.asInstanceOf[ArrayType].containsNull) {
s"""
|int $nullElementIndex = -1;
""".stripMargin
} else {
""
}
s"""
|$openHashSet $hashSet = new $openHashSet$hsPostFix($classTag);
|$declareNullTrackVariables
|int $size = 0;
|$arrayBuilderClass $builder = new $arrayBuilderClass();
|ArrayData[] $arrays = new ArrayData[]{$array1, $array2};
|for (int $arrayDataIdx = 0; $arrayDataIdx < 2; $arrayDataIdx++) {
| ArrayData $array = $arrays[$arrayDataIdx];
| for (int $i = 0; $i < $array.numElements(); $i++) {
| $processArray
| }
|}
|${buildResultArray(builder, ev.value, size, nullElementIndex)}
""".stripMargin
})
} else {
nullSafeCodeGen(ctx, ev, (array1, array2) => {
val expr = ctx.addReferenceObj("arrayUnionExpr", this)
s"${ev.value} = (ArrayData)$expr.nullSafeEval($array1, $array2);"
})
}
}
override def prettyName: String = "array_union"
override protected def withNewChildrenInternal(
newLeft: Expression, newRight: Expression): ArrayUnion = copy(left = newLeft, right = newRight)
}
object ArrayUnion {
def unionOrdering(
array1: ArrayData,
array2: ArrayData,
elementType: DataType,
ordering: Ordering[Any]): ArrayData = {
val arrayBuffer = new scala.collection.mutable.ArrayBuffer[Any]
var alreadyIncludeNull = false
Seq(array1, array2).foreach(_.foreach(elementType, (_, elem) => {
var found = false
if (elem == null) {
if (alreadyIncludeNull) {
found = true
} else {
alreadyIncludeNull = true
}
} else {
// check elem is already stored in arrayBuffer or not?
var j = 0
while (!found && j < arrayBuffer.size) {
val va = arrayBuffer(j)
if (va != null && ordering.equiv(va, elem)) {
found = true
}
j = j + 1
}
}
if (!found) {
if (arrayBuffer.length > ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH) {
ArrayBinaryLike.throwUnionLengthOverflowException(arrayBuffer.length)
}
arrayBuffer += elem
}
}))
new GenericArrayData(arrayBuffer.toSeq)
}
}
/**
* Returns an array of the elements in the intersect of x and y, without duplicates
*/
@ExpressionDescription(
usage = """
_FUNC_(array1, array2) - Returns an array of the elements in the intersection of array1 and
array2, without duplicates.
""",
examples = """
Examples:
> SELECT _FUNC_(array(1, 2, 3), array(1, 3, 5));
[1,3]
""",
group = "array_funcs",
since = "2.4.0")
case class ArrayIntersect(left: Expression, right: Expression) extends ArrayBinaryLike
with ComplexTypeMergingExpression {
private lazy val internalDataType: DataType = {
dataTypeCheck
ArrayType(elementType,
left.dataType.asInstanceOf[ArrayType].containsNull &&
right.dataType.asInstanceOf[ArrayType].containsNull)
}
override def dataType: DataType = internalDataType
@transient lazy val evalIntersect: (ArrayData, ArrayData) => ArrayData = {
if (TypeUtils.typeWithProperEquals(elementType)) {
(array1, array2) =>
if (array1.numElements() != 0 && array2.numElements() != 0) {
val hs = new SQLOpenHashSet[Any]
val hsResult = new SQLOpenHashSet[Any]
val arrayBuffer = new scala.collection.mutable.ArrayBuffer[Any]
val withArray2NaNCheckFunc = SQLOpenHashSet.withNaNCheckFunc(elementType, hs,
(value: Any) => hs.add(value),
(valueNaN: Any) => {} )
val withArray2NullCheckFunc = SQLOpenHashSet.withNullCheckFunc(elementType, hs,
(value: Any) => withArray2NaNCheckFunc(value),
() => {}
)
val withArray1NaNCheckFunc = SQLOpenHashSet.withNaNCheckFunc(elementType, hsResult,
(value: Any) =>
if (hs.contains(value) && !hsResult.contains(value)) {
arrayBuffer += value
hsResult.add(value)
},
(valueNaN: Any) =>
if (hs.containsNaN()) {
arrayBuffer += valueNaN
})
val withArray1NullCheckFunc = SQLOpenHashSet.withNullCheckFunc(elementType, hsResult,
(value: Any) => withArray1NaNCheckFunc(value),
() =>
if (hs.containsNull()) {
arrayBuffer += null
}
)
var i = 0
while (i < array2.numElements()) {
withArray2NullCheckFunc(array2, i)
i += 1
}
i = 0
while (i < array1.numElements()) {
withArray1NullCheckFunc(array1, i)
i += 1
}
new GenericArrayData(arrayBuffer.toSeq)
} else {
new GenericArrayData(Array.emptyObjectArray)
}
} else {
(array1, array2) =>
if (array1.numElements() != 0 && array2.numElements() != 0) {
val arrayBuffer = new scala.collection.mutable.ArrayBuffer[Any]
var alreadySeenNull = false
var i = 0
while (i < array1.numElements()) {
var found = false
val elem1 = array1.get(i, elementType)
if (array1.isNullAt(i)) {
if (!alreadySeenNull) {
var j = 0
while (!found && j < array2.numElements()) {
found = array2.isNullAt(j)
j += 1
}
// array2 is scanned only once for null element
alreadySeenNull = true
}
} else {
var j = 0
while (!found && j < array2.numElements()) {
if (!array2.isNullAt(j)) {
val elem2 = array2.get(j, elementType)
if (ordering.equiv(elem1, elem2)) {
// check whether elem1 is already stored in arrayBuffer
var foundArrayBuffer = false
var k = 0
while (!foundArrayBuffer && k < arrayBuffer.size) {
val va = arrayBuffer(k)
foundArrayBuffer = (va != null) && ordering.equiv(va, elem1)
k += 1
}
found = !foundArrayBuffer
}
}
j += 1
}
}
if (found) {
arrayBuffer += elem1
}
i += 1
}
new GenericArrayData(arrayBuffer.toSeq)
} else {
new GenericArrayData(Array.emptyObjectArray)
}
}
}
override def nullSafeEval(input1: Any, input2: Any): Any = {
val array1 = input1.asInstanceOf[ArrayData]
val array2 = input2.asInstanceOf[ArrayData]
evalIntersect(array1, array2)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val i = ctx.freshName("i")
val value = ctx.freshName("value")
val size = ctx.freshName("size")
if (canUseSpecializedHashSet) {
val jt = CodeGenerator.javaType(elementType)
val ptName = CodeGenerator.primitiveTypeName(jt)
nullSafeCodeGen(ctx, ev, (array1, array2) => {
val nullElementIndex = ctx.freshName("nullElementIndex")
val builder = ctx.freshName("builder")
val openHashSet = classOf[SQLOpenHashSet[_]].getName
val classTag = s"scala.reflect.ClassTag$$.MODULE$$.$hsTypeName()"
val hashSet = ctx.freshName("hashSet")
val hashSetResult = ctx.freshName("hashSetResult")
val arrayBuilder = classOf[mutable.ArrayBuilder[_]].getName
val arrayBuilderClass = s"$arrayBuilder$$of$ptName"
val withArray2NaNCheckCodeGenerator =
(array: String, index: String) =>
s"$jt $value = ${genGetValue(array, index)};" +
SQLOpenHashSet.withNaNCheckCode(elementType, value, hashSet,
s"$hashSet.add$hsPostFix($hsValueCast$value);",
(valueNaN: String) => "")
val writeArray2ToHashSet = SQLOpenHashSet.withNullCheckCode(
right.dataType.asInstanceOf[ArrayType].containsNull,
left.dataType.asInstanceOf[ArrayType].containsNull,
array2, i, hashSet, withArray2NaNCheckCodeGenerator, "")
val body =
s"""
|if ($hashSet.contains($hsValueCast$value) &&
| !$hashSetResult.contains($hsValueCast$value)) {
| if (++$size > ${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}) {
| break;
| }
| $hashSetResult.add$hsPostFix($hsValueCast$value);
| $builder.$$plus$$eq($value);
|}
""".stripMargin
val withArray1NaNCheckCodeGenerator =
(array: String, index: String) =>
s"$jt $value = ${genGetValue(array, index)};" +
SQLOpenHashSet.withNaNCheckCode(elementType, value, hashSetResult, body,
(valueNaN: Any) =>
s"""
|if ($hashSet.containsNaN()) {
| ++$size;
| $builder.$$plus$$eq($valueNaN);
|}
""".stripMargin)
val processArray1 = SQLOpenHashSet.withNullCheckCode(
left.dataType.asInstanceOf[ArrayType].containsNull,
right.dataType.asInstanceOf[ArrayType].containsNull,
array1, i, hashSetResult, withArray1NaNCheckCodeGenerator,
s"""
|$nullElementIndex = $size;
|$size++;
|$builder.$$plus$$eq($nullValueHolder);
""".stripMargin)
// Only need to track null element index when result array's element is nullable.
val declareNullTrackVariables = if (dataType.asInstanceOf[ArrayType].containsNull) {
s"""
|int $nullElementIndex = -1;
""".stripMargin
} else {
""
}
s"""
|$openHashSet $hashSet = new $openHashSet$hsPostFix($classTag);
|$openHashSet $hashSetResult = new $openHashSet$hsPostFix($classTag);
|$declareNullTrackVariables
|for (int $i = 0; $i < $array2.numElements(); $i++) {
| $writeArray2ToHashSet
|}
|$arrayBuilderClass $builder = new $arrayBuilderClass();
|int $size = 0;
|for (int $i = 0; $i < $array1.numElements(); $i++) {
| $processArray1
|}
|${buildResultArray(builder, ev.value, size, nullElementIndex)}
""".stripMargin
})
} else {
nullSafeCodeGen(ctx, ev, (array1, array2) => {
val expr = ctx.addReferenceObj("arrayIntersectExpr", this)
s"${ev.value} = (ArrayData)$expr.nullSafeEval($array1, $array2);"
})
}
}
override def prettyName: String = "array_intersect"
override protected def withNewChildrenInternal(
newLeft: Expression, newRight: Expression): ArrayIntersect =
copy(left = newLeft, right = newRight)
}
/**
* Returns an array of the elements in the intersect of x and y, without duplicates
*/
@ExpressionDescription(
usage = """
_FUNC_(array1, array2) - Returns an array of the elements in array1 but not in array2,
without duplicates.
""",
examples = """
Examples:
> SELECT _FUNC_(array(1, 2, 3), array(1, 3, 5));
[2]
""",
group = "array_funcs",
since = "2.4.0")
case class ArrayExcept(left: Expression, right: Expression) extends ArrayBinaryLike
with ComplexTypeMergingExpression {
private lazy val internalDataType: DataType = {
dataTypeCheck
left.dataType
}
override def dataType: DataType = internalDataType
@transient lazy val evalExcept: (ArrayData, ArrayData) => ArrayData = {
if (TypeUtils.typeWithProperEquals(elementType)) {
(array1, array2) =>
val hs = new SQLOpenHashSet[Any]
val arrayBuffer = new scala.collection.mutable.ArrayBuffer[Any]
val withArray2NaNCheckFunc = SQLOpenHashSet.withNaNCheckFunc(elementType, hs,
(value: Any) => hs.add(value),
(valueNaN: Any) => {})
val withArray2NullCheckFunc = SQLOpenHashSet.withNullCheckFunc(elementType, hs,
(value: Any) => withArray2NaNCheckFunc(value),
() => {}
)
val withArray1NaNCheckFunc = SQLOpenHashSet.withNaNCheckFunc(elementType, hs,
(value: Any) =>
if (!hs.contains(value)) {
arrayBuffer += value
hs.add(value)
},
(valueNaN: Any) => arrayBuffer += valueNaN)
val withArray1NullCheckFunc = SQLOpenHashSet.withNullCheckFunc(elementType, hs,
(value: Any) => withArray1NaNCheckFunc(value),
() => arrayBuffer += null
)
var i = 0
while (i < array2.numElements()) {
withArray2NullCheckFunc(array2, i)
i += 1
}
i = 0
while (i < array1.numElements()) {
withArray1NullCheckFunc(array1, i)
i += 1
}
new GenericArrayData(arrayBuffer.toSeq)
} else {
(array1, array2) =>
val arrayBuffer = new scala.collection.mutable.ArrayBuffer[Any]
var scannedNullElements = false
var i = 0
while (i < array1.numElements()) {
var found = false
val elem1 = array1.get(i, elementType)
if (elem1 == null) {
if (!scannedNullElements) {
var j = 0
while (!found && j < array2.numElements()) {
found = array2.isNullAt(j)
j += 1
}
// array2 is scanned only once for null element
scannedNullElements = true
} else {
found = true
}
} else {
var j = 0
while (!found && j < array2.numElements()) {
val elem2 = array2.get(j, elementType)
if (elem2 != null) {
found = ordering.equiv(elem1, elem2)
}
j += 1
}
if (!found) {
// check whether elem1 is already stored in arrayBuffer
var k = 0
while (!found && k < arrayBuffer.size) {
val va = arrayBuffer(k)
found = (va != null) && ordering.equiv(va, elem1)
k += 1
}
}
}
if (!found) {
arrayBuffer += elem1
}
i += 1
}
new GenericArrayData(arrayBuffer.toSeq)
}
}
override def nullSafeEval(input1: Any, input2: Any): Any = {
val array1 = input1.asInstanceOf[ArrayData]
val array2 = input2.asInstanceOf[ArrayData]
evalExcept(array1, array2)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val i = ctx.freshName("i")
val value = ctx.freshName("value")
val size = ctx.freshName("size")
if (canUseSpecializedHashSet) {
val jt = CodeGenerator.javaType(elementType)
val ptName = CodeGenerator.primitiveTypeName(jt)
nullSafeCodeGen(ctx, ev, (array1, array2) => {
val nullElementIndex = ctx.freshName("nullElementIndex")
val builder = ctx.freshName("builder")
val openHashSet = classOf[SQLOpenHashSet[_]].getName
val classTag = s"scala.reflect.ClassTag$$.MODULE$$.$hsTypeName()"
val hashSet = ctx.freshName("hashSet")
val arrayBuilder = classOf[mutable.ArrayBuilder[_]].getName
val arrayBuilderClass = s"$arrayBuilder$$of$ptName"
val withArray2NaNCheckCodeGenerator =
(array: String, index: String) =>
s"$jt $value = ${genGetValue(array, i)};" +
SQLOpenHashSet.withNaNCheckCode(elementType, value, hashSet,
s"$hashSet.add$hsPostFix($hsValueCast$value);",
(valueNaN: Any) => "")
val writeArray2ToHashSet = SQLOpenHashSet.withNullCheckCode(
right.dataType.asInstanceOf[ArrayType].containsNull,
left.dataType.asInstanceOf[ArrayType].containsNull,
array2, i, hashSet, withArray2NaNCheckCodeGenerator, "")
val body =
s"""
|if (!$hashSet.contains($hsValueCast$value)) {
| if (++$size > ${ByteArrayMethods.MAX_ROUNDED_ARRAY_LENGTH}) {
| break;
| }
| $hashSet.add$hsPostFix($hsValueCast$value);
| $builder.$$plus$$eq($value);
|}
""".stripMargin
val withArray1NaNCheckCodeGenerator =
(array: String, index: String) =>
s"$jt $value = ${genGetValue(array, index)};" +
SQLOpenHashSet.withNaNCheckCode(elementType, value, hashSet, body,
(valueNaN: String) =>
s"""
|$size++;
|$builder.$$plus$$eq($valueNaN);
""".stripMargin)
val processArray1 = SQLOpenHashSet.withNullCheckCode(
left.dataType.asInstanceOf[ArrayType].containsNull,
left.dataType.asInstanceOf[ArrayType].containsNull,
array1, i, hashSet, withArray1NaNCheckCodeGenerator,
s"""
|$nullElementIndex = $size;
|$size++;
|$builder.$$plus$$eq($nullValueHolder);
""".stripMargin)
// Only need to track null element index when array1's element is nullable.
val declareNullTrackVariables = if (left.dataType.asInstanceOf[ArrayType].containsNull) {
s"""
|int $nullElementIndex = -1;
""".stripMargin
} else {
""
}
s"""
|$openHashSet $hashSet = new $openHashSet$hsPostFix($classTag);
|$declareNullTrackVariables
|for (int $i = 0; $i < $array2.numElements(); $i++) {
| $writeArray2ToHashSet
|}
|$arrayBuilderClass $builder = new $arrayBuilderClass();
|int $size = 0;
|for (int $i = 0; $i < $array1.numElements(); $i++) {
| $processArray1
|}
|${buildResultArray(builder, ev.value, size, nullElementIndex)}
""".stripMargin
})
} else {
nullSafeCodeGen(ctx, ev, (array1, array2) => {
val expr = ctx.addReferenceObj("arrayExceptExpr", this)
s"${ev.value} = (ArrayData)$expr.nullSafeEval($array1, $array2);"
})
}
}
override def prettyName: String = "array_except"
override protected def withNewChildrenInternal(
newLeft: Expression, newRight: Expression): ArrayExcept = copy(left = newLeft, right = newRight)
}
|
vinodkc/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/collectionOperations.scala
|
Scala
|
apache-2.0
| 151,224 |
package gdg.blaze.ext.hadoop
import gdg.blaze._
import gdg.blaze.codec.{PlainCodec, JSONCodec}
import org.apache.hadoop.io.{LongWritable, Text}
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
/* add_field => ... # hash (optional), default: {}
codec => ... # codec (optional), default: "plain"
discover_interval => ... # number (optional), default: 15
exclude => ... # array (optional)
path => ... # array (required)
sincedb_path => ... # string (optional)
sincedb_write_interval => ... # number (optional), default: 15
start_position => ... # string, one of ["beginning", "end"] (optional), default: "end"
stat_interval => ... # number (optional), default: 1
tags => ... # array (optional)
type => ... # string (optional)
format = (text, or binary, etc)
*/
class HdfsInput(config: HdfsConfig, @transient bc: BlazeContext) extends Input {
override def apply(): DStream[Message] = {
if(config.path.isEmpty) {
throw new IllegalStateException("hdfs plugin requires path field")
}
val mstream = config.format match {
case "text" =>text(config.path, newFiles = config.new_files_only)
}
mstream.flatMap { str => config.codec.decode(str)}
}
def text(path: String, newFiles: Boolean): DStream[String] = {
val stream: InputDStream[(LongWritable, Text)] = bc.sc.fileStream[LongWritable, Text, TextInputFormat](path, Function.const(true) _, newFiles)
stream.print()
stream.map(_._2.toString)
}
}
case class HdfsConfig(
format:String = "text",
path:String,
codec:Codec = PlainCodec.single,
new_files_only:Boolean = false)
object HdfsInput extends PluginFactory[HdfsInput] {
override def apply(config: PluginConfig, bc: BlazeContext) = {
new HdfsInput(config.convert(classOf[HdfsConfig]), bc)
}
}
|
micahrupersburg/blaze-of-glory
|
src/main/scala/gdg/blaze/ext/hadoop/HdfsInput.scala
|
Scala
|
apache-2.0
| 1,941 |
package wandou.math.algebra
import wandou.math.CardinalityException
import wandou.math.IndexException
/**
* sparse matrix with general element values whose columns are accessible quickly. Implemented as a column array of
* SparseVectors.
* Construct a matrix of the given cardinality with the given data columns
*
* @param rows
* @param columns a RandomAccessSparseVector[] array of columns
* @param columnVectors
*/
class SparseColumnMatrix private (_rows: Int, _columns: Int, private var columnVectors: Array[RandomAccessSparseVector]) extends AbstractMatrix(_rows, _columns) {
override def clone: Matrix = {
val x = super.clone.asInstanceOf[SparseColumnMatrix]
x.columnVectors = new Array[RandomAccessSparseVector](columnVectors.length)
var i = 0
while (i < columnVectors.length) {
x.columnVectors(i) = columnVectors(i).clone
i += 1
}
x
}
/**
* Abstracted out for the iterator
* @return {@link #numCols()}
*/
override def numSlices: Int = {
numCols
}
override def apply(row: Int, column: Int): Double = {
if (columnVectors(column) == null) 0.0 else columnVectors(column)(row)
}
override def like(): Matrix = {
SparseColumnMatrix(rowSize, columnSize)
}
override def like(rows: Int, columns: Int): Matrix = {
SparseColumnMatrix(rows, columns)
}
override def update(row: Int, column: Int, value: Double) {
if (columnVectors(column) == null) {
columnVectors(column) = RandomAccessSparseVector(rowSize)
}
columnVectors(column)(row) = value
}
override def getNumNondefaultElements: Array[Int] = {
import AbstractMatrix._
val result = new Array[Int](2)
result(COL) = columnVectors.length;
var col = 0
while (col < columnSize) {
result(ROW) = math.max(result(ROW), columnVectors(col).getNumNondefaultElements)
col += 1
}
result
}
override def viewPart(offset: Array[Int], size: Array[Int]): Matrix = {
import AbstractMatrix._
if (offset(ROW) < 0) {
throw new IndexException(offset(ROW), columnVectors(COL).size)
}
if (offset(ROW) + size(ROW) > columnVectors(COL).size) {
throw new IndexException(offset(ROW) + size(ROW), columnVectors(COL).size)
}
if (offset(COL) < 0) {
throw new IndexException(offset(COL), columnVectors.length)
}
if (offset(COL) + size(COL) > columnVectors.length) {
throw new IndexException(offset(COL) + size(COL), columnVectors.length)
}
MatrixView(this, offset, size)
}
override def assignColumn(column: Int, other: Vector): Matrix = {
if (rowSize != other.size) {
throw new CardinalityException(rowSize, other.size)
}
if (column < 0 || column >= columnSize) {
throw new IndexException(column, columnSize)
}
columnVectors(column).assign(other)
this
}
override def assignRow(row: Int, other: Vector): Matrix = {
if (columnSize != other.size) {
throw new CardinalityException(columnSize, other.size)
}
if (row < 0 || row >= rowSize) {
throw new IndexException(row, rowSize)
}
var col = 0
while (col < columnSize) {
columnVectors(col)(row) = other(col)
col += 1
}
this
}
override def viewColumn(column: Int): Vector = {
if (column < 0 || column >= columnSize) {
throw new IndexException(column, columnSize)
}
columnVectors(column)
}
}
object SparseColumnMatrix {
def apply(rows: Int, columns: Int, columnVectors: Array[RandomAccessSparseVector]) = {
val columnVectors1 = columnVectors.clone
var col = 0
while (col < columns) {
columnVectors1(col) = columnVectors(col).clone
col += 1
}
new SparseColumnMatrix(rows, columns, columnVectors)
}
/**
* Construct a matrix of the given cardinality
*
* @param rows
* @param columns
*/
def apply(rows: Int, columns: Int) = {
val columnVectors = new Array[RandomAccessSparseVector](columns)
var col = 0
while (col < columns) {
columnVectors(col) = RandomAccessSparseVector(rows)
col += 1
}
new SparseColumnMatrix(rows, columns, columnVectors)
}
}
|
wandoulabs/wandou-math
|
wandou-math/src/main/scala/wandou/math/algebra/SparseColumnMatrix.scala
|
Scala
|
apache-2.0
| 4,157 |
package scwebapp.servlet.extension
import jakarta.servlet.http.*
import scwebapp.servlet.HttpAttribute
object HttpSessionExtensions {
extension(peer:HttpSession) {
def attribute[T<:AnyRef](name:String):HttpAttribute[T] =
new HttpAttribute[T](
() => peer.getAttribute(name),
(it) => peer.setAttribute(name, it),
() => peer.removeAttribute(name),
)
}
}
|
ritschwumm/scwebapp
|
modules/servlet/src/main/scala/scwebapp/servlet/extension/HttpSessionExtensions.scala
|
Scala
|
bsd-2-clause
| 378 |
// Project: angulate2-examples
// Module: 06 AngelloLite
// Description: Component for rendering of StoryS
package angellolite
import angulate2._
@Component(
selector = "story",
templateUrl = "src/main/resources/html/story.html",
inputs = @@("story")
)
class StoryComponent {
var story: Story = _
}
|
jokade/angulate2-examples
|
archive/06_angelloLite/js/src/main/scala/angellolite/StoryComponent.scala
|
Scala
|
mit
| 319 |
package org.juitar.util.time
import org.juitar.util.time.TimeSampler._
import org.specs2.mutable.SpecificationWithJUnit
import org.specs2.specification.Scope
import scala.concurrent.ExecutionContext
class BufferedReporterTest extends SpecificationWithJUnit {
private[this] implicit val executionContext = ExecutionContext.global
"report" should {
"report samples only after the buffer is full" in new Context {
val s1 = TimeSample(Series1, 1)
val s2 = TimeSample(Series2, 2)
val s3 = TimeSample(Series1, 3)
val s4 = TimeSample(Series2, 4)
val s5 = TimeSample(Series1, 5)
val s6 = TimeSample(Series1, 6)
bufferedReporter.report(s1)
bufferedReporter.report(s2)
reported must beEmpty
bufferedReporter.report(s3)
reported must (contain(s1, s2, s3) and haveSize(3))
bufferedReporter.report(s4)
bufferedReporter.report(s5)
reported must (contain(s1, s2, s3) and haveSize(3))
bufferedReporter.report(s6)
reported must (contain(s1, s2, s3, s4, s5, s6) and haveSize(6))
}
}
"flush" should {
"report all samples in the buffer immediately" in new Context {
val s1 = TimeSample(Series1, 1)
val s2 = TimeSample(Series2, 2)
bufferedReporter.report(s1)
bufferedReporter.report(s2)
reported must beEmpty
bufferedReporter.flush()
reported must (contain(s1, s2) and haveSize(2))
}
"not fail when buffer is empty" in new Context {
bufferedReporter.flush()
reported must beEmpty
val s1 = TimeSample(Series1, 1)
bufferedReporter.report(s1)
bufferedReporter.flush()
reported must (contain(s1) and haveSize(1))
bufferedReporter.flush()
}
}
trait Context extends Scope {
val Series1 = "S1"
val Series2 = "S2"
var reported: Seq[TimeSample] = Seq()
implicit val report: ReportSample = s => {
this.synchronized {
reported = reported :+ s
}
}
val bufferedReporter = new BufferedReporter(report, 3)
}
}
|
sha1n/scala-time
|
src/test/scala/org/juitar/util/time/BufferedReporterTest.scala
|
Scala
|
apache-2.0
| 2,068 |
package com.datawizards.dqm.configuration.loader
import java.io.File
import com.datawizards.dqm.configuration.DataQualityMonitoringConfiguration
import com.typesafe.config.{Config, ConfigFactory}
/**
* Loads multiple tables configuration from file.
* <br/>
* Expected format:
* <pre>
*tablesConfiguration = [
{
location = {type = Hive, table = clients},
rules = {
rowRules = [
{
field = client_id,
rules = [
{type = NotNull},
{type = min, value = 0}
]
},
{
field = client_name,
rules = [
{type = NotNull}
]
}
]
}
},
{
location = {type = Hive, table = companies},
rules = {
rowRules = [
{
field = company_id,
rules = [
{type = NotNull},
{type = max, value = 100}
]
},
{
field = company_name,
rules = [
{type = NotNull}
]
}
]
}
}
]
* <pre>
*
* @param path configuration file
*/
class FileMultipleTablesConfigurationLoader(path: String) extends ConfigurationLoader {
override def loadConfiguration(): DataQualityMonitoringConfiguration = {
val config = ConfigFactory.parseFile(new File(path))
parseConfig(config)
}
private def parseConfig(config: Config): DataQualityMonitoringConfiguration = {
val tablesConfiguration = config.getList("tablesConfiguration")
DataQualityMonitoringConfiguration(parseTablesConfiguration(tablesConfiguration))
}
}
|
piotr-kalanski/data-quality-monitoring
|
src/main/scala/com/datawizards/dqm/configuration/loader/FileMultipleTablesConfigurationLoader.scala
|
Scala
|
apache-2.0
| 1,684 |
package models.machine
import akka.actor._
import models._
import models.connector.Connector.PositionUpdate
/** Keeps track of the current row number. */
private object RowTracker {
case class NextRow(carriage: Carriage)
case class WorkingZone(from: Needle, until: Needle) {
private[RowTracker] def contains(pos: CarriagePosition) = pos match {
case CarriageOverNeedles(n) => n >= from && n <= until
case _ => false
}
private[RowTracker] def relative(pos: CarriagePosition) = pos match {
case CarriageOverNeedles(n) => if (n < from) Left else Right
case CarriageLeft(_) => Left
case CarriageRight(_) => Right
case CarriageRemoved => throw new IllegalStateException("carriage is removed")
}
}
def props(commander: ActorRef) = Props(new RowTracker(commander))
private object Position {
def unapply(msg: Any): Option[(Carriage, CarriagePosition, Direction)] = msg match {
case PositionUpdate(pos, direction, Some(carriage)) => Some(carriage, pos, direction)
case _ => None
}
}
private class RowTracker(commander: ActorRef) extends Actor {
var workingZone = WorkingZone(Needle.middle - 1, Needle.middle + 1)
var lastPos: CarriagePosition = CarriageLeft(0)
context watch commander
def receive = {
case Position(carriage, pos, dir) if workingZone.contains(pos) =>
lastPos = pos
context become track(carriage, dir)
case Position(carriage, pos, _) =>
lastPos = pos
context become track(carriage, workingZone.relative(pos).direction.reverse)
case w: WorkingZone =>
workingZone = w
}
def track(trackedCarriage: Carriage, direction: Direction): Receive = {
case Position(carriage, pos, _) if carriage == trackedCarriage && !workingZone.contains(pos) && workingZone.relative(pos) == direction.towards =>
commander ! NextRow(carriage)
lastPos = pos
context become track(carriage, direction.reverse)
case Position(carriage, pos, _) if carriage == trackedCarriage =>
lastPos = pos
case Position(carriage, pos, _) => //switched carriage
context become track(carriage, workingZone.relative(pos).direction.reverse)
case w: WorkingZone if workingZone != w =>
workingZone = w
if (!w.contains(lastPos))
context become track(trackedCarriage, w.relative(lastPos).direction.reverse)
}
}
}
|
knittery/knittery-ui
|
app/models/machine/RowTracker.scala
|
Scala
|
gpl-2.0
| 2,437 |
package com.datastax.spark.connector
import scala.language.implicitConversions
import com.datastax.spark.connector.cql.TableDef
sealed trait ColumnSelector {
def aliases: Map[String, String]
def selectFrom(table: TableDef): IndexedSeq[ColumnRef]
}
case object AllColumns extends ColumnSelector {
override def aliases: Map[String, String] = Map.empty.withDefault(x => x)
override def selectFrom(table: TableDef) =
table.columns.map(_.ref)
}
case object PartitionKeyColumns extends ColumnSelector {
override def aliases: Map[String, String] = Map.empty.withDefault(x => x)
override def selectFrom(table: TableDef) =
table.partitionKey.map(_.ref).toIndexedSeq
}
case object PrimaryKeyColumns extends ColumnSelector {
override def aliases: Map[String, String] = Map.empty.withDefault(x => x)
override def selectFrom(table: TableDef) =
table.primaryKey.map(_.ref)
}
case class SomeColumns(columns: ColumnRef*) extends ColumnSelector {
override def aliases: Map[String, String] = columns.map {
case ref => (ref.selectedAs, ref.cqlValueName)
}.toMap
override def selectFrom(table: TableDef): IndexedSeq[ColumnRef] = {
val missing = table.missingColumns {
columns flatMap {
case f: FunctionCallRef => f.requiredColumns //Replaces function calls by their required columns
case RowCountRef => Seq.empty //Filters RowCountRef from the column list
case other => Seq(other)
}
}
if (missing.nonEmpty) throw new NoSuchElementException(
s"Columns not found in table ${table.name}: ${missing.mkString(", ")}")
columns.toIndexedSeq
}
}
object SomeColumns {
@deprecated("Use com.datastax.spark.connector.rdd.SomeColumns instead of Seq", "1.0")
implicit def seqToSomeColumns(columns: Seq[String]): SomeColumns =
SomeColumns(columns.map(x => x: ColumnRef): _*)
}
|
shashwat7/spark-cassandra-connector
|
spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/ColumnSelector.scala
|
Scala
|
apache-2.0
| 1,861 |
/*******************************************************************************
Copyright (c) 2012-2013, KAIST, S-Core.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.interpreter.objects
import kr.ac.kaist.jsaf.scala_src.useful.Lists._
import kr.ac.kaist.jsaf.scala_src.useful.Options._
import kr.ac.kaist.jsaf.interpreter.{InterpreterPredefine => IP, _}
class JSBooleanConstructor(_I: Interpreter, _proto: JSObject)
extends JSFunction13(_I, _proto, "Function", true,
propTable, _I.IH.dummyFtn(1), EmptyEnv(), true) {
def init(): Unit = {
/*
* 15.6.3 Properties of the Boolean Constructor
* { [[Writable]]: false, [[Enumerable]]: false, [[Configurable]]: false }
*/
property.put("length", I.IH.numProp(1))
property.put("prototype", I.IH.mkDataProp(I.IS.BooleanPrototype))
}
/*
* 15.6.2 The Boolean Constructor
* 15.6.2.1 new Boolean (value)
*/
def construct(value: Val): JSBoolean = {
new JSBoolean(I, I.IS.BooleanPrototype, "Boolean", true,
I.IH.boolPropTable(I.IH.toBoolean(value)))
}
override def _construct(argsObj: JSObject): JSBoolean = construct(argsObj._get("0"))
/*
* 15.6.1 The Boolean Constructor Called as a Function
* 15.6.1.1 Boolean (value)
*/
override def _call(tb: Val, argsObj: JSObject): Unit = {
I.IS.comp.setReturn(PVal(I.IH.mkIRBool(I.IH.toBoolean(argsObj._get("0")))))
}
}
|
darkrsw/safe
|
src/main/scala/kr/ac/kaist/jsaf/interpreter/objects/JSBooleanConstructor.scala
|
Scala
|
bsd-3-clause
| 1,616 |
package com.monsanto.arch.cloudformation.model.resource
import com.monsanto.arch.cloudformation.model._
import spray.json._
/**
* The AWS::ElasticLoadBalancingV2::LoadBalancer resource creates an Elastic Load Balancing Application load balancer
* that distributes incoming application traffic across multiple targets (such as EC2 instances) in multiple
* Availability Zones. For more information, see the
* [Application Load Balancers Guide](http://docs.aws.amazon.com/elasticloadbalancing/latest/application/).
*
* @param name CloudFormation logical name
* @param Subnets Specifies a list of at least two IDs of the subnets to associate with the load balancer. The subnets
* must be in different Availability Zones.
* @param LoadBalancerAttributes Specifies the load balancer configuration.
* @param Name Specifies a name for the load balancer. This name must be unique within your AWS account and can have a
* maximum of 32 alphanumeric characters and hyphens. A name can't begin or end with a hyphen.
* @param Scheme Specifies whether the load balancer is internal or Internet-facing. An internal load balancer routes
* requests to targets using private IP addresses. An Internet-facing load balancer routes requests from
* clients over the Internet to targets in your public subnets.
* @param SecurityGroups Specifies a list of the IDs of the security groups to assign to the load balancer.
* @param Tags Specifies an arbitrary set of tags (key–value pairs) to associate with this load balancer. Use tags to
* manage your resources.
* @param Condition Define conditions by using the intrinsic condition functions. These conditions determine when AWS
* CloudFormation creates the associated resources.
* @param DependsOn Declare dependencies for resources that must be created or deleted in a specific order.
*/
case class `AWS::ElasticLoadBalancingV2::LoadBalancer`(
name: String,
Subnets: Seq[Token[ResourceRef[`AWS::EC2::Subnet`]]],
LoadBalancerAttributes: Option[Seq[LoadBalancerAttribute]] = None,
Name: Option[Token[String]] = None,
Scheme: Option[ELBScheme] = None,
SecurityGroups: Option[Seq[Token[ResourceRef[`AWS::EC2::SecurityGroup`]]]] = None,
Tags: Option[Seq[AmazonTag]] = None,
override val Condition: Option[ConditionRef] = None,
override val DependsOn: Option[Seq[String]] = None
) extends Resource[`AWS::ElasticLoadBalancingV2::LoadBalancer`] with HasArn {
def when(newCondition: Option[ConditionRef] = Condition): `AWS::ElasticLoadBalancingV2::LoadBalancer` = copy(Condition = newCondition)
def arn: Token[String] = ResourceRef(this)
def dnsName: Token[String] = `Fn::GetAtt`(Seq(name, "DNSName"))
def canonicalHostedZoneID: Token[String] = `Fn::GetAtt`(Seq(name, "CanonicalHostedZoneID"))
def loadBalancerFullName: Token[String] = `Fn::GetAtt`(Seq(name, "LoadBalancerFullName"))
def loadBalancerName: Token[String] = `Fn::GetAtt`(Seq(name, "LoadBalancerName"))
def securityGroups: Token[String] = `Fn::GetAtt`(Seq(name, "SecurityGroups"))
}
object `AWS::ElasticLoadBalancingV2::LoadBalancer` extends DefaultJsonProtocol {
implicit val format: JsonFormat[`AWS::ElasticLoadBalancingV2::LoadBalancer`] = jsonFormat9(`AWS::ElasticLoadBalancingV2::LoadBalancer`.apply)
}
/**
* The AWS::ElasticLoadBalancingV2::Listener resource creates a listener for an Elastic Load Balancing Application load
* balancer. The listener checks for connection requests and forwards them to one or more target groups. For more
* information, see the [Listeners for Your Application Load Balancers](http://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-listeners.html)
* in the Application Load Balancers Guide.
*
* @param name CloudFormation logical name
* @param DefaultActions The default actions that the listener takes when handling incoming requests.
* @param LoadBalancerArn The Amazon Resource Name (ARN) of the load balancer to associate with the listener.
* @param Port The port on which the listener listens for requests.
* @param Protocol The protocol that clients must use to send requests to the listener.
* @param Certificates The SSL server certificate for the listener. With a certificate, you can encrypt traffic between
* the load balancer and the clients that initiate HTTPS sessions, and traffic between the load
* balancer and your targets.
* @param SslPolicy The security policy that defines the ciphers and protocols that the load balancer supports.
* @param Condition Define conditions by using the intrinsic condition functions. These conditions determine when AWS
* CloudFormation creates the associated resources.
*/
case class `AWS::ElasticLoadBalancingV2::Listener`(
name: String,
DefaultActions: Seq[ListenerAction],
LoadBalancerArn: Token[String],
Port: Token[Int],
Protocol: ALBProtocol,
Certificates: Option[Seq[Certificate]] = None,
SslPolicy: Option[ELBSecurityPolicy] = None,
override val Condition: Option[ConditionRef] = None,
override val DependsOn: Option[Seq[String]] = None
) extends Resource[`AWS::ElasticLoadBalancingV2::Listener`] with HasArn {
if (Protocol == ALBProtocol.HTTPS && !Certificates.exists(_.nonEmpty))
throw new IllegalArgumentException("Certificates is required for an HTTPS listener")
def when(newCondition: Option[ConditionRef] = Condition): `AWS::ElasticLoadBalancingV2::Listener` = copy(Condition = newCondition)
def arn: Token[String] = ResourceRef(this)
}
object `AWS::ElasticLoadBalancingV2::Listener` extends DefaultJsonProtocol {
def forHttp(name: String,
DefaultActions: Seq[ListenerAction],
LoadBalancerArn: Token[String],
Port: Token[Int] = 80,
Condition: Option[ConditionRef] = None,
DependsOn: Option[Seq[String]] = None): `AWS::ElasticLoadBalancingV2::Listener` =
`AWS::ElasticLoadBalancingV2::Listener`(
name = name,
Protocol = ALBProtocol.HTTP,
DefaultActions = DefaultActions,
LoadBalancerArn = LoadBalancerArn,
Port = Port,
Condition = Condition,
DependsOn = DependsOn
)
def forHttps(name: String,
DefaultActions: Seq[ListenerAction],
LoadBalancerArn: Token[String],
Certificates: Seq[Certificate],
Port: Token[Int] = 443,
SslPolicy: Option[ELBSecurityPolicy] = None,
Condition: Option[ConditionRef] = None,
DependsOn: Option[Seq[String]] = None): `AWS::ElasticLoadBalancingV2::Listener` =
`AWS::ElasticLoadBalancingV2::Listener`(
name = name,
Protocol = ALBProtocol.HTTPS,
Certificates = Some(Certificates),
SslPolicy = SslPolicy,
DefaultActions = DefaultActions,
LoadBalancerArn = LoadBalancerArn,
Port = Port,
Condition = Condition,
DependsOn = DependsOn
)
implicit val format: JsonFormat[`AWS::ElasticLoadBalancingV2::Listener`] = jsonFormat9(`AWS::ElasticLoadBalancingV2::Listener`.apply)
}
/**
* The AWS::ElasticLoadBalancingV2::ListenerRule resource defines which requests an Elastic Load Balancing listener
* takes action on and the action that it takes. For more information, see the [Listeners for Your Application Load
* Balancers](http://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-listeners.html) in the
* Application Load Balancers Guide.
*
* @param name CloudFormation logical name
* @param Actions The action that the listener takes when a request meets the specified condition.
* @param Conditions The conditions under which a rule takes effect.
* @param ListenerArn The Amazon Resource Name (ARN) of the listener that the rule applies to.
* @param Priority The priority for the rule. Elastic Load Balancing evaluates rules in priority order, from the lowest
* value to the highest value. If a request satisfies a rule, Elastic Load Balancing ignores all
* subsequent rules.
* @param Condition Define conditions by using the intrinsic condition functions. These conditions determine when AWS
* CloudFormation creates the associated resources.
*/
case class `AWS::ElasticLoadBalancingV2::ListenerRule`(
name: String,
Actions: Seq[ListenerAction],
Conditions: Seq[RuleCondition],
ListenerArn: Token[String],
Priority: Token[Int],
override val Condition: Option[ConditionRef] = None,
override val DependsOn: Option[Seq[String]] = None
) extends Resource[`AWS::ElasticLoadBalancingV2::ListenerRule`] with HasArn {
def when(newCondition: Option[ConditionRef] = Condition): `AWS::ElasticLoadBalancingV2::ListenerRule` = copy(Condition = newCondition)
def arn: Token[String] = ResourceRef(this)
}
object `AWS::ElasticLoadBalancingV2::ListenerRule` extends DefaultJsonProtocol {
implicit val format: JsonFormat[`AWS::ElasticLoadBalancingV2::ListenerRule`] = jsonFormat7(`AWS::ElasticLoadBalancingV2::ListenerRule`.apply)
}
/**
* The AWS::ElasticLoadBalancingV2::TargetGroup resource creates an Elastic Load Balancing target group that routes
* requests to one or more registered targets, such as EC2 instances. For more information, see the [Target Groups
* for Your Application Load Balancers](http://docs.aws.amazon.com/elasticloadbalancing/latest/application/load-balancer-target-groups.html)
* in the Application Load Balancers Guide.
*
* @param name CloudFormation logical name
* @param Protocol The protocol to use for routing traffic to the targets.
* @param Port The port on which the targets receive traffic. This port is used unless you specify a port override when
* registering the target.
* @param VpcId The identifier of the virtual private cloud (VPC).
* @param HealthCheckIntervalSeconds The approximate number of seconds between health checks for an individual target.
* The default is 30 seconds.
* @param HealthCheckPath The ping path destination where Elastic Load Balancing sends health check requests. The
* default is /.
* @param HealthCheckPort The port that the load balancer uses when performing health checks on the targets. The
* default is `traffic-port`, which indicates the port on which each target receives traffic
* from the load balancer.
* @param HealthCheckProtocol The protocol the load balancer uses when performing health checks on targets. The default
* is the HTTP protocol.
* @param HealthCheckTimeoutSeconds The amount of time, in seconds, during which no response from a target means a
* failed health check. The default is 5 seconds.
* @param HealthyThresholdCount The number of consecutive health checks successes required before considering an
* unhealthy target healthy. The default is 5.
* @param UnhealthyThresholdCount The number of consecutive health check failures required before considering a target
* unhealthy. The default is 2.
* @param Matcher The HTTP codes to use when checking for a successful response from a target. The default is 200.
* @param Name The name of the target group. This name must be unique per region per account, can have a maximum of 32
* characters, must contain only alphanumeric characters or hyphens, and must not begin or end with a
* hyphen.
* @param Tags An arbitrary set of tags (key–value pairs) for the target group. Use tags to help manage resources.
* @param TargetGroupAttributes Target group configurations.
* @param Targets The targets to add to this target group.
* @param Condition Define conditions by using the intrinsic condition functions. These conditions determine when AWS
* CloudFormation creates the associated resources.
* @param DependsOn Declare dependencies for resources that must be created or deleted in a specific order.
*/
case class `AWS::ElasticLoadBalancingV2::TargetGroup`(
name: String,
Protocol: ALBProtocol,
Port: Token[Int],
VpcId: VpcId,
Matcher: Option[Matcher],
HealthCheckIntervalSeconds: Option[Token[Int]] = None,
HealthCheckPath: Option[Token[String]] = None,
HealthCheckPort: Option[Token[String]] = None,
HealthCheckProtocol: Option[ALBProtocol] = None,
HealthCheckTimeoutSeconds: Option[Token[Int]] = None,
HealthyThresholdCount: Option[Token[Int]] = None,
UnhealthyThresholdCount: Option[Token[Int]] = None,
Name: Option[Token[String]] = None,
TargetGroupAttributes: Option[Seq[TargetGroupAttribute]] = None,
Targets: Option[Seq[TargetDescription]] = None,
Tags: Option[Seq[AmazonTag]] = None,
override val Condition: Option[ConditionRef] = None,
override val DependsOn: Option[Seq[String]] = None
) extends Resource[`AWS::ElasticLoadBalancingV2::TargetGroup`] with HasArn {
def when(newCondition: Option[ConditionRef] = Condition): `AWS::ElasticLoadBalancingV2::TargetGroup` = copy(Condition = newCondition)
def arn: Token[String] = ResourceRef(this)
def loadBalancerArns: Token[String] = `Fn::GetAtt`(Seq(name, "LoadBalancerArns"))
def targetGroupFullName: Token[String] = `Fn::GetAtt`(Seq(name, "TargetGroupFullName"))
}
object `AWS::ElasticLoadBalancingV2::TargetGroup` extends DefaultJsonProtocol {
implicit val format: JsonFormat[`AWS::ElasticLoadBalancingV2::TargetGroup`] = jsonFormat18(`AWS::ElasticLoadBalancingV2::TargetGroup`.apply)
}
sealed trait ALBProtocol
object ALBProtocol extends DefaultJsonProtocol {
case object HTTP extends ALBProtocol
case object HTTPS extends ALBProtocol
val values = Seq(HTTPS, HTTPS)
implicit val format: JsonFormat[ALBProtocol] = new EnumFormat[ALBProtocol](values)
}
/**
* @see http://docs.aws.amazon.com/elasticloadbalancing/latest/application/create-https-listener.html
*/
sealed trait ELBSecurityPolicy
object ELBSecurityPolicy extends DefaultJsonProtocol {
case object `ELBSecurityPolicy-2016-08` extends ELBSecurityPolicy
case object `ELBSecurityPolicy-TLS-1-2-2017-01` extends ELBSecurityPolicy
case object `ELBSecurityPolicy-TLS-1-1-2017-01` extends ELBSecurityPolicy
case object `ELBSecurityPolicy-2015-05` extends ELBSecurityPolicy
case class Custom(name: String) extends ELBSecurityPolicy
val values = Seq(`ELBSecurityPolicy-2016-08`, `ELBSecurityPolicy-TLS-1-2-2017-01`, `ELBSecurityPolicy-TLS-1-1-2017-01`, `ELBSecurityPolicy-2015-05`)
implicit val format: JsonFormat[ELBSecurityPolicy] = new EnumFormat[ELBSecurityPolicy](values, stringifier = {
case Custom(name) => name
case other => other.toString
}) {
override def read(json: JsValue): ELBSecurityPolicy = try super.read(json) catch {
case e: DeserializationException => json match {
case JsString(value) => Custom(value)
case _ => throw e
}
}
}
}
/**
* @param HttpCode The HTTP codes that a healthy target must use when responding to a health check, such as 200,202 or
* 200-399.
*/
case class Matcher(HttpCode: Token[String])
object Matcher extends DefaultJsonProtocol {
implicit val format: JsonFormat[Matcher] = jsonFormat1(Matcher.apply)
}
sealed trait TargetGroupStickinessType
object TargetGroupStickinessType extends DefaultJsonProtocol {
case object lb_cookie extends TargetGroupStickinessType
val values = Seq(lb_cookie)
implicit val format: JsonFormat[TargetGroupStickinessType] = new EnumFormat[TargetGroupStickinessType](values)
}
case class TargetGroupAttribute private (Key: Option[Token[String]], Value: Option[Token[String]])
object TargetGroupAttribute extends DefaultJsonProtocol {
/**
* @param seconds The amount of time for Elastic Load Balancing to wait before changing the state of a deregistering
* target from draining to unused. The range is 0-3600 seconds. The default value is 300 seconds.
* @return
*/
def `deregistration_delay.timeout_seconds`(seconds: Token[String]): TargetGroupAttribute = TargetGroupAttribute(Some("deregistration_delay.timeout_seconds"), seconds)
/**
* @param enabled Indicates whether sticky sessions are enabled.
* @return
*/
def `stickiness.enabled`(enabled: Token[String]): TargetGroupAttribute = TargetGroupAttribute(Some("stickiness.enabled"), enabled)
/**
* @param seconds The cookie expiration period, in seconds. After this period, the cookie is considered stale. The
* minimum value is 1 second and the maximum value is 7 days (604800 seconds). The default value is
* 1 day (86400 seconds).
* @return
*/
def `stickiness.lb_cookie.duration_seconds`(seconds: Token[String]): TargetGroupAttribute = TargetGroupAttribute(Some("stickiness.lb_cookie.duration_seconds"), seconds)
/**
* @param stickyType The type of stickiness.
* @return
*/
def `stickiness.type`(stickyType: TargetGroupStickinessType): TargetGroupAttribute = TargetGroupAttribute(Some("stickiness.type"), Some(stickyType.toString))
implicit val format: JsonFormat[TargetGroupAttribute] = jsonFormat2(TargetGroupAttribute.apply)
}
/**
* @param Id The ID of the target, such as an EC2 instance ID.
* @param Port The port number on which the target is listening for traffic.
*/
case class TargetDescription(Id: Token[String], Port: Option[Token[Int]] = None)
object TargetDescription extends DefaultJsonProtocol {
implicit val format: JsonFormat[TargetDescription] = jsonFormat2(TargetDescription.apply)
}
/**
* @param Key The name of an attribute that you want to configure.
* @param Value A value for the attribute.
*/
case class LoadBalancerAttribute private (Key: Option[Token[String]] = None, Value: Option[Token[String]] = None)
object LoadBalancerAttribute extends DefaultJsonProtocol {
/**
* @param enabled Indicates whether access logs stored in Amazon S3 are enabled. The value is true or false.
* @return
*/
def `access_logs.s3.enabled`(enabled: Token[String]): LoadBalancerAttribute = LoadBalancerAttribute(Some("access_logs.s3.enabled"), enabled)
/**
* @param bucket The name of the S3 bucket for the access logs. This attribute is required if access logs in Amazon
* S3 are enabled. The bucket must exist in the same region as the load balancer and have a bucket
* policy that grants Elastic Load Balancing permission to write to the bucket.
* @return
*/
def `access_logs.s3.bucket`(bucket: Token[String]): LoadBalancerAttribute = LoadBalancerAttribute(Some("access_logs.s3.bucket"), bucket)
/**
*
* @param prefix The prefix for the location in the S3 bucket. If you don't specify a prefix, the access logs are
* stored in the root of the bucket.
* @return
*/
def `access_logs.s3.prefix`(prefix: Token[String]): LoadBalancerAttribute = LoadBalancerAttribute(Some("access_logs.s3.prefix"), prefix)
/**
*
* @param enabled Indicates whether deletion protection is enabled.
* @return
*/
def `deletion_protection.enabled`(enabled: Token[String]): LoadBalancerAttribute = LoadBalancerAttribute(Some("deletion_protection.enabled"), enabled)
/**
*
* @param seconds The idle timeout value, in seconds.
* @return
*/
def `idle_timeout.timeout_seconds`(seconds: Token[String]): LoadBalancerAttribute = LoadBalancerAttribute(Some("idle_timeout.timeout_seconds"), seconds)
implicit val format: JsonFormat[LoadBalancerAttribute] = jsonFormat2(LoadBalancerAttribute.apply)
}
/**
* @param CertificateArn The Amazon Resource Name (ARN) of the certificate to associate with the listener.
*/
case class Certificate(CertificateArn: Token[String])
object Certificate extends DefaultJsonProtocol {
implicit val format: JsonFormat[Certificate] = jsonFormat1(Certificate.apply)
}
/**
* @param TargetGroupArn The Amazon Resource Name (ARN) of the target group to which Elastic Load Balancing routes the
* traffic.
* @param Type The type of action.
*/
case class ListenerAction private (TargetGroupArn: Token[String], Type: Token[String])
object ListenerAction extends DefaultJsonProtocol {
/**
* @param TargetGroupArn The Amazon Resource Name (ARN) of the target group to which Elastic Load Balancing forwards
* the traffic.
* @return
*/
def forward(TargetGroupArn: Token[String]): ListenerAction = ListenerAction(TargetGroupArn, "forward")
implicit val format: JsonFormat[ListenerAction] = jsonFormat2(ListenerAction.apply)
}
/**
* @param Field The name of the condition that you want to define, such as path-pattern (which forwards requests based
* on the URL of the request).
* @param Values The value for the field that you specified in the Field property.
*/
case class RuleCondition private (Field: Option[Token[String]] = None, Values: Option[Seq[Token[String]]] = None)
object RuleCondition extends DefaultJsonProtocol {
/**
* @param Values The value for the host-header.
* @return
*/
def `host-header`(Values: Seq[Token[String]]): RuleCondition = RuleCondition(Some("host-header"), Some(Values))
/**
* @param Values The value for the path-pattern.
* @return
*/
def `path-pattern`(Values: Seq[Token[String]]): RuleCondition = RuleCondition(Some("path-pattern"), Some(Values))
implicit val format: JsonFormat[RuleCondition] = jsonFormat2(RuleCondition.apply)
}
|
cibotech/cloudformation-template-generator
|
src/main/scala/com/monsanto/arch/cloudformation/model/resource/ElasticLoadBalancingV2.scala
|
Scala
|
bsd-3-clause
| 22,319 |
/* _____ _
* | ___| __ __ _ _ __ ___ (_) __ _ _ __
* | |_ | '__/ _` | '_ ` _ \\| |/ _` | '_ \\
* | _|| | | (_| | | | | | | | (_| | | | |
* |_| |_| \\__,_|_| |_| |_|_|\\__,_|_| |_|
*
* Copyright 2014 Pellucid Analytics
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package framian
import scala.language.experimental.macros
import scala.{specialized => sp }
import scala.annotation.unspecialized
import spire.algebra.{ Semigroup, Monoid }
import framian.column._
sealed trait Column[+A] { // TODO: Can't specialize in 2.10, but can in 2.11.
// @unspecialized -- See TODO above.
def foldRow[B](row: Int)(na: B, nm: B, f: A => B): B = macro ColumnMacros.foldRowImpl[A, B]
/**
* Equivalent to calling `foreach(from, until, rows, true)(f)`.
*/
def foreach[U](from: Int, until: Int, rows: Int => Int)(f: (Int, A) => U): Boolean = macro ColumnMacros.foreachImpl[A, U]
/**
* Iterates from `from` until `until`, and for each value `i` in this range,
* it retreives a row via `rows(i)`. If this row is `NM` and `abortOnNM` is
* `true`, then iteration stops immediately and `false` is returned.
* Otherwise, if the row is a value, it calls `f` with `i` and the value of
* the row. If iteration terminates normally (ie. no [[NM]]s), then `true` is
* returned.
*
* This is implemented as a macro and desugars into a while loop that access
* the column using `apply` if it is a [[BoxedColumn]] and
* `isValueAt`/`valueAt`/`nonValueAt` if it is an [[UnboxedColumn]]. It will
* also inline `rows` and `f` if they are function literals.
*
* @param from the value to start iterating at (inclusive)
* @param until the value to stop iterating at (exclusive)
* @param rows the function used to retrieve the row for an iteration
* @param f the function to call at each value
* @param abortOnNM terminate early if an `NM` is found
* @return true if no NMs were found (or `abortOnNM` is false) and terminate completed successfully, false otherwise
*/
def foreach[U](from: Int, until: Int, rows: Int => Int, abortOnNM: Boolean)(f: (Int, A) => U): Boolean = macro ColumnMacros.foreachExtraImpl[A, U]
/**
* Returns the [[Cell]] at row `row`.
*/
def apply(row: Int): Cell[A]
/**
* Map all values of this `Column` using `f`. All [[NA]] and [[NM]] values
* remain as they were.
*/
def map[@sp(Int,Long,Double) B](f: A => B): Column[B]
/**
* Map the values of this `Column` to a new [[Cell]]. All [[NA]] and [[NM]]
* values remain the same.
*
* @param f function use to transform this column's values
*/
def flatMap[B](f: A => Cell[B]): Column[B]
/**
* Filters the values of this `Column` so that any value for which `p` is
* true remains a value and all other values are turned into [[NA]]s.
*
* @param p predicate to filter this column's values with
*/
def filter(p: A => Boolean): Column[A]
/**
* Returns a column that will fallback to `that` for any row that is [[NA]],
* or if the row is [[NM]] and the row in `that` is a [[Value]], then that
* is returned, otherwise [[NM]] is returned. That is, row `i` is defined as
* `this(i) orElse that(i)`, though may be more efficient.
*
* To put the definition in more definite terms:
*
* {{{
* Value(a) orElse Value(b) == Value(a)
* Value(a) orElse NA == Value(a)
* Value(a) orElse NM == Value(a)
* NA orElse Value(b) == Value(b)
* NA orElse NA == NA
* NA orElse NM == NM
* NM orElse Value(b) == Value(b)
* NM orElse NM == NM
* NM orElse NA == NM
* }}}
*
* @param that the column to fallback on for NA values
*/
def orElse[A0 >: A](that: Column[A0]): Column[A0]
/**
* Returns a column whose `i`-th row maps to row `index(i)` in this column.
* If `i < 0` or `i >= index.length` then the returned column returns
* [[NA]]. This always forces all rows in `index` and the returned column is
* *dense* and unboxed.
*/
def reindex(index: Array[Int]): Column[A]
/**
* Returns a column which has had all rows between `0` and `len` (exclusive)
* forced (evaluated) and stored in memory, while all rows outside of `0` and
* `len` are set to [[NA]]. The returned column is *dense* and unboxed.
*
* @param len the upper bound of the range of values to force
*/
def force(len: Int): Column[A]
/**
* Returns a column with rows contained in `na` masked to [[NA]]s.
*
* @param na the rows to mask in the column
*/
def mask(na: Mask): Column[A]
/**
* Returns a column with a single row forced to [[NA]] and all others
* remaining the same. This is equivalent to, but possibly more efficient
* than `col.mask(Mask(row))`.
*
* @param row the row that will be forced to [[NA]]
*/
def setNA(row: Int): Column[A]
/**
* Returns a copy of this column whose values will be memoized if they are
* evaluated. That is, if `this` column is an *eval* column, then memoizing
* it will ensure that, for each row, the value is only computed once,
* regardless of the number of times it is accessed.
*
* By default, the memoization is always pessimistic (guaranteed at-most-once
* evaluation). If `optimistic` is `true`, then the memoizing may use an
* optimistic update strategy, which means a value *may* be evaluated more
* than once if it accessed concurrently.
*
* For dense, empty, and previously-memoized columns, this just returns the
* column itself.
*
* @param optimistic if true, memoized column may use optimistic updates
*/
def memoize(optimistic: Boolean = false): Column[A]
/**
* Shifts all values in the column up by `rows` rows. So,
* `col.shift(n).apply(row) == col(row - n)`. If this is a dense column,
* then it will only remain dense if `rows` is non-negative.
*/
def shift(rows: Int): Column[A]
/**
* For each `row` in the resulting column, this will return
* `this(row).zipMap(that(row))`. Specifically, if `this(row)` or `that(row)`
* is [[NA]], then the row is [[NA]], if both sides are values, then the row
* is the result of applying `f`, otherwise the row is [[NM]].
*
* @param that the column to zip this column with
* @param f the function to use to combine 2 values to a single value
*/
def zipMap[B, C](that: Column[B])(f: (A, B) => C): Column[C]
override def toString: String =
(0 to 5).map(apply(_).toString).mkString("Column(", ", ", ", ...)")
}
trait BoxedColumn[A] extends Column[A] {
/**
* Maps the cells of this [[Column]] using `f`. This method will always force
* the column into an eval column and should be used with caution.
*/
def cellMap[B](f: Cell[A] => Cell[B]): Column[B]
def map[@sp(Int,Long,Double) B](f: A => B): Column[B] = cellMap {
case Value(a) => Value(f(a))
case (nonValue: NonValue) => nonValue
}
def flatMap[B](f: A => Cell[B]): Column[B] = cellMap {
case Value(a) => f(a)
case (nonValue: NonValue) => nonValue
}
def filter(p: A => Boolean): Column[A] = cellMap {
case Value(a) if p(a) => Value(a)
case Value(_) => NA
case nonValue => nonValue
}
}
trait UnboxedColumn[@sp(Int,Long,Double) A] extends Column[A] {
def isValueAt(row: Int): Boolean
def nonValueAt(row: Int): NonValue
def valueAt(row: Int): A
def apply(row: Int): Cell[A] =
if (isValueAt(row)) Value(valueAt(row))
else nonValueAt(row)
}
object Column {
final def newBuilder[A: GenColumnBuilder](): ColumnBuilder[A] = ColumnBuilder[A]()
/**
* Construct a column whose `i`-th row is the `i`-th element in `cells`. All
* other rows are [[NA]].
*/
def apply[A: GenColumnBuilder](cells: Cell[A]*): Column[A] = {
val bldr = newBuilder[A]()
cells.foreach(bldr += _)
bldr.result()
}
/**
* Returns a column which returns `Value(a)` for all rows.
*
* @note The `value` argument is strict.
*/
def value[A](value: A): Column[A] = {
val cell = Value(value)
EvalColumn(_ => cell)
}
/**
* Returns a column whose values are obtained using `get`. Each time a row is
* accessed, `get` will be re-evaluated. To ensure values are evaluated
* only once, you can [[memoize]] the column or use on of the *forcing*
* methods, such as [[reindex]] or [[force]].
*/
def eval[A](get: Int => Cell[A]): Column[A] = EvalColumn(get)
/**
* Create a dense column from an array of values. A dense column can still
* have empty values, [[NA]] and [[NM]], as specified with the `na` and `nm`
* masks respectively. Dense columns are unboxed and only values that aren't
* masked by `na` and `nm` will ever be returned (so they can be `null`,
* `NaN`, etc.)
*
* The [[NM]] mask (`nm`) always takes precedence over the [[NA]] mask
* (`na`). If a row is outside of the range 0 until `values.length`, then if
* `nm(row)` is true, [[NM]] will be returned, otherwise [[NA]] is returned.
*
* @param values the values of the column, rows correspond to indices
* @param na masked rows that will return [[NA]]
* @param nm masked rows that will return [[NM]]
*/
def dense[A](values: Array[A], na: Mask = Mask.empty, nm: Mask = Mask.empty): Column[A] = values match {
case (values: Array[Double]) => DoubleColumn(values, na, nm)
case (values: Array[Int]) => IntColumn(values, na, nm)
case (values: Array[Long]) => LongColumn(values, na, nm)
case _ => GenericColumn[A](values, na, nm)
}
def values[A](values: Seq[A]): Column[A] =
AnyColumn[A]((values: Seq[Any]).toArray, Mask.empty, Mask.empty)
/**
* Returns a column that returns [[NM]] for any row in `nmValues` and [[NA]]
* for all others. If all you need is a column that always returns [[NA]],
* then use [[Empty]].
*/
def empty[A](nmValues: Mask = Mask.empty): Column[A] =
AnyColumn[A](new Array[Any](0), Mask.empty, nmValues)
implicit def columnMonoid[A]: Monoid[Column[A]] =
new Monoid[Column[A]] {
def id: Column[A] = empty[A]()
def op(lhs: Column[A], rhs: Column[A]): Column[A] =
lhs orElse rhs
}
}
|
longcao/framian
|
framian/src/main/scala/framian/Column.scala
|
Scala
|
apache-2.0
| 10,753 |
/*
* Copyright 2015-2020 Noel Welsh
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package doodle
package java2d
package effect
import cats.effect.IO
import doodle.core.{Point,Transform}
// import doodle.java2d.algebra.Algebra
import java.awt.event._
import java.util.concurrent.atomic.AtomicReference
import javax.swing.{JFrame, Timer, WindowConstants}
import monix.reactive.subjects.PublishSubject
/**
* A [[Canvas]] is an area on the screen to which Pictures can be drawn.
*/
final class Canvas(frame: Frame) extends JFrame(frame.title) {
val panel = new Java2DPanel(frame)
/**
* The current global transform from logical to screen coordinates
*/
private val currentInverseTx: AtomicReference[Transform] =
new AtomicReference(Transform.identity)
/**
* Draw the given Picture to this [[Canvas]].
*/
def render[A](picture: Picture[A]): IO[A] = {
// Possible race condition here setting the currentInverseTx
def register(cb: Either[Throwable, Java2DPanel.RenderResult[A]] => Unit): Unit = {
// val drawing = picture(algebra)
// val (bb, rdr) = drawing.runA(List.empty).value
// val (w, h) = Java2d.size(bb, frame.size)
// val rr = Java2DPanel.RenderRequest(bb, w, h, rdr, cb)
panel.render(Java2DPanel.RenderRequest(picture, frame, cb))
}
IO.async(register).map{result =>
val inverseTx = Java2d.inverseTransform(result.boundingBox, result.width, result.height, frame.center)
currentInverseTx.set(inverseTx)
result.value
}
}
val redraw = PublishSubject[Int]()
val frameRateMs = (1000.0 * (1 / 60.0)).toInt
val frameEvent = {
/** Delay between frames when rendering at 60fps */
var firstFrame = true
var lastFrameTime = 0L
new ActionListener {
def actionPerformed(e: ActionEvent): Unit = {
val now = e.getWhen()
if (firstFrame) {
firstFrame = false
lastFrameTime = now
redraw.onNext(0)
()
} else {
redraw.onNext((now - lastFrameTime).toInt)
lastFrameTime = now
}
}
}
}
val timer = new Timer(frameRateMs, frameEvent)
val mouseMove = PublishSubject[Point]()
this.addMouseMotionListener(
new MouseMotionListener {
import scala.concurrent.duration.Duration
import scala.concurrent.Await
def mouseDragged(e: MouseEvent): Unit = ()
def mouseMoved(e: MouseEvent): Unit = {
val pt = e.getPoint()
val inverseTx = currentInverseTx.get()
val ack = mouseMove.onNext(inverseTx(Point(pt.getX(), pt.getY())))
Await.ready(ack, Duration.Inf)
()
}
}
)
this.addWindowListener(
new WindowAdapter {
override def windowClosed(evt: WindowEvent): Unit =
timer.stop()
}
)
getContentPane().add(panel)
pack()
setVisible(true)
setDefaultCloseOperation(WindowConstants.DISPOSE_ON_CLOSE)
repaint()
timer.start()
}
|
underscoreio/doodle
|
java2d/src/main/scala/doodle/java2d/effect/Canvas.scala
|
Scala
|
apache-2.0
| 3,467 |
/*
* Copyright 2015 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.feature
import java.util
import java.util.{Collection => JCollection, List => JList}
import com.vividsolutions.jts.geom.Geometry
import org.geotools.feature.`type`.{AttributeDescriptorImpl, Types}
import org.geotools.feature.{AttributeImpl, GeometryAttributeImpl}
import org.geotools.filter.identity.FeatureIdImpl
import org.geotools.geometry.jts.ReferencedEnvelope
import org.geotools.util.Converters
import org.opengis.feature.`type`.{AttributeDescriptor, Name}
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.opengis.feature.{GeometryAttribute, Property}
import org.opengis.geometry.BoundingBox
import scala.collection.JavaConversions._
/**
* Simple feature implementation optimized to instantiate from serialization
*
* @param initialId
* @param sft
* @param initialValues if provided, must already be converted into the appropriate types
*/
class ScalaSimpleFeature(initialId: String, sft: SimpleFeatureType, initialValues: Array[AnyRef] = null)
extends SimpleFeature {
val featureId = new FeatureIdImpl(initialId)
val values = if (initialValues == null) Array.ofDim[AnyRef](sft.getAttributeCount) else initialValues
lazy private[this] val userData = collection.mutable.HashMap.empty[AnyRef, AnyRef]
lazy private[this] val geomDesc = sft.getGeometryDescriptor
lazy private[this] val geomIndex = if (geomDesc == null) -1 else sft.indexOf(geomDesc.getLocalName)
override def getFeatureType = sft
override def getType = sft
override def getIdentifier = featureId
override def getID = featureId.getID // this needs to reference the featureId, as it can be updated
override def getName: Name = sft.getName
override def getUserData = userData
override def getAttribute(name: Name) = getAttribute(name.getLocalPart)
override def getAttribute(name: String) = {
val index = sft.indexOf(name)
if (index == -1) null else getAttribute(index)
}
override def getAttribute(index: Int) = values(index)
override def setAttribute(name: Name, value: Object) = setAttribute(name.getLocalPart, value)
override def setAttribute(name: String, value: Object) = {
val index = sft.indexOf(name)
if (index == -1) {
throw new IllegalArgumentException(s"Attribute $name does not exist in type $sft")
}
setAttribute(index, value)
}
override def setAttribute(index: Int, value: Object) = {
val binding = sft.getDescriptor(index).getType.getBinding
values(index) = Converters.convert(value, binding).asInstanceOf[AnyRef]
}
// following methods delegate to setAttribute to get type conversion
override def setAttributes(vals: JList[Object]) = {
var i = 0
while (i < vals.size) {
setAttribute(i, vals.get(i))
i += 1
}
}
override def setAttributes(vals: Array[Object]) = {
var i = 0
while (i < vals.length) {
setAttribute(i, vals(i))
i += 1
}
}
override def getAttributeCount = values.length
override def getAttributes: JList[Object] = values.toList
override def getDefaultGeometry: Object = if (geomIndex == -1) null else getAttribute(geomIndex)
override def setDefaultGeometry(geo: Object) = setAttribute(geomIndex, geo)
override def getBounds: BoundingBox = getDefaultGeometry match {
case g: Geometry => new ReferencedEnvelope(g.getEnvelopeInternal, sft.getCoordinateReferenceSystem)
case _ => new ReferencedEnvelope(sft.getCoordinateReferenceSystem)
}
override def getDefaultGeometryProperty =
if (geomDesc == null) null else new GeometryAttributeImpl(getDefaultGeometry, geomDesc, null)
override def setDefaultGeometryProperty(geoAttr: GeometryAttribute) =
if (geoAttr == null) setDefaultGeometry(null) else setDefaultGeometry(geoAttr.getValue)
override def getProperties: JCollection[Property] = {
val attributes = getAttributes
val descriptors = sft.getAttributeDescriptors
assert(attributes.size == descriptors.size)
val properties = new util.ArrayList[Property](attributes.size)
var i = 0
while (i < attributes.size) {
properties.add(new AttributeImpl(attributes.get(i), descriptors.get(i), featureId))
i += 1
}
properties
}
override def getProperties(name: Name) = getProperties(name.getLocalPart)
override def getProperties(name: String) = getProperties.filter(_.getName.toString == name)
override def getProperty(name: Name) = getProperty(name.getLocalPart)
override def getProperty(name: String) = {
val descriptor = sft.getDescriptor(name)
if (descriptor == null) null else new AttributeImpl(getAttribute(name), descriptor, featureId)
}
override def getValue = getProperties
override def setValue(newValue: Object) = setValue(newValue.asInstanceOf[JCollection[Property]])
override def setValue(values: JCollection[Property]) = {
var i = 0
values.foreach { p =>
setAttribute(i, p.getValue)
i += 1
}
}
override def getDescriptor: AttributeDescriptor =
new AttributeDescriptorImpl(sft, sft.getName, 0, Int.MaxValue, true, null)
override def isNillable = true
override def validate() = {
var i = 0
while (i < values.length) {
Types.validate(sft.getDescriptor(i), values(i))
i += 1
}
}
}
object ScalaSimpleFeature {
implicit class RichSimpleFeature(val sf: ScalaSimpleFeature) extends AnyVal {
def getAttribute[T](name: String) = sf.getAttribute(name).asInstanceOf[T]
def getAttribute[T](index: Int) = sf.getAttribute(index).asInstanceOf[T]
def getGeometry() = sf.getDefaultGeometry.asInstanceOf[Geometry]
}
}
|
kevinwheeler/geomesa
|
geomesa-feature/src/main/scala/org/locationtech/geomesa/feature/ScalaSimpleFeature.scala
|
Scala
|
apache-2.0
| 6,217 |
package io.vertx.ext.asyncsql.impl
import io.vertx.core.Vertx
import io.vertx.core.json.JsonObject
import io.vertx.ext.asyncsql.impl.pool.MysqlAsyncConnectionPool
/**
* @author <a href="http://www.campudus.com">Joern Bernhardt</a>.
*/
class MySQLClient(val vertx: Vertx, val config: JsonObject) extends BaseSQLClient {
override protected val poolFactory = MysqlAsyncConnectionPool.apply _
override protected val defaultHost: String = "localhost"
override protected val defaultPort: Int = 3306
override protected val defaultDatabase: Option[String] = Some("testdb")
override protected val defaultUser: String = "vertx"
override protected val defaultPassword: Option[String] = Some("password")
}
|
InfoSec812/vertx-mysql-postgresql-service
|
src/main/scala/io/vertx/ext/asyncsql/impl/MySQLClient.scala
|
Scala
|
apache-2.0
| 717 |
package org.gbougeard.api
import com.typesafe.config.ConfigFactory
import scala.concurrent.Future
import scala.util.{Success, Failure}
import akka.actor.ActorSystem
import akka.event.Logging
import play.api.libs.json._
import play.api.libs.functional.syntax._
import spray.http._
import spray.client.pipelining._
import org.gbougeard.model.changes.ChangeInfo
/**
* Created with IntelliJ IDEA.
* User: gbougeard
* Date: 15/07/13
* Time: 23:09
* To change this template use File | Settings | File Templates.
*/
object Common {
val config = ConfigFactory.load()
val host = config.getString("gerrit.url")
val PREFIX = ")]}'"
def GetChanges(path: String) {
implicit val system = ActorSystem("changes-client")
import system.dispatcher
// execution context for futures below
val log = Logging(system, getClass)
val pipeline: HttpRequest => Future[HttpResponse] = (
addHeader("Content-Type", "application/json")
~> sendReceive
~> logResponse(log)
// ~> unmarshal[ChangeInfo]
)
log.info("Get " + host + path)
val responseFuture: Future[HttpResponse] = pipeline(Get(host + path))
responseFuture onComplete {
case Success(response) =>
// log.info("response is: {} ", response.entity)
val body = response.entity.toOption
body.map {
b =>
val msg = b.asString
// log.info(s"${msg.indexOf(PREFIX) + PREFIX.length}")
val json = msg.substring(msg.indexOf(PREFIX) + PREFIX.length)
log.info(s"json ${json.substring(0, 50)}")
// TODO : if there are more than one query, the result is a List[List[ChangeInfo]]
val changeInfo = Json.parse(json).as[List[ChangeInfo]]
log.info("changeInfo {}", changeInfo)
}
shutdown()
case Failure(error) =>
log.error(error, "Couldn't get changes")
shutdown()
}
def shutdown(): Unit = {
log.warning("shutdown!")
// IO(Http).ask(Http.CloseAll)(1.second).await
system.shutdown()
}
}
def url(path: String): String = {
host + path
}
}
|
gbougeard/gas
|
src/main/scala/org/gbougeard/api/Common.scala
|
Scala
|
apache-2.0
| 2,156 |
package epic.parser.models
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import breeze.linalg._
import epic.lexicon.TagScorer
/**
*
* @author dlwh
*
*/
class FeaturizedLexicon[L, L2, W](val weights: DenseVector[Double],
val featureIndexer: IndexedFeaturizer[L, L2, W]) extends TagScorer[L2, W] {
def anchor(w: IndexedSeq[W]): Anchoring = new Anchoring {
val fi = featureIndexer.anchor(w)
def words: IndexedSeq[W] = w
def scoreTag(pos: Int, l: L2): Double = {
fi.computeWeight(pos, featureIndexer.labelIndex(l), weights)
}
}
}
|
maxim-rabinovich/epic
|
src/main/scala/epic/parser/models/FeaturizedLexicon.scala
|
Scala
|
apache-2.0
| 1,112 |
/*
* Copyright (c) 2012, 2013 Roberto Tyley
*
* This file is part of 'BFG Repo-Cleaner' - a tool for removing large
* or troublesome blobs from Git repositories.
*
* BFG Repo-Cleaner is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* BFG Repo-Cleaner is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see http://www.gnu.org/licenses/ .
*/
package com.madgag.git.bfg.cli
import org.specs2.mutable.Specification
class TextReplacementConfigSpec extends Specification {
"text replacement config" should {
"default to using ***REMOVED*** for the replacement text" in {
TextReplacementConfig("1234").apply("password:1234") mustEqual("password:***REMOVED***")
}
"use empty string as replacement text if specified" in {
TextReplacementConfig("1234==>").apply("password:1234") mustEqual("password:")
}
"use literal replacement text if specified" in {
TextReplacementConfig("1234==>mypass").apply("password:1234") mustEqual("password:mypass")
}
"support sub-group references in replacement text" in {
TextReplacementConfig("""regex:Copyright \\w+ (\\d{4})==>Copyright Yutan $1""").apply("Copyright Roberto 2012") mustEqual("Copyright Yutan 2012")
}
"treat dollars and slashes in replacement text as literal if the matcher text was literal" in {
TextReplacementConfig("""Copyright 1999==>Copyright 2013 : Price $1""").apply("Totally Copyright 1999. Boom.") mustEqual("Totally Copyright 2013 : Price $1. Boom.")
}
"apply transforms in the order they occur" in {
TextReplacementConfig(Seq("awesome","some")).get.apply("Totally awesome") mustEqual("Totally ***REMOVED***")
TextReplacementConfig(Seq("some","awesome")).get.apply("Totally awesome") mustEqual("Totally awe***REMOVED***")
}
}
}
|
NeilBryant/bfg-repo-cleaner
|
bfg/src/test/scala/com/madgag/git/bfg/cli/TextReplacementConfigSpec.scala
|
Scala
|
gpl-3.0
| 2,264 |
/**
* (c) Copyright 2013 WibiData, Inc.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kiji.schema.extra.tools;
import java.util.{List => JList}
import java.util.Locale
import scala.collection.JavaConverters.asScalaIteratorConverter
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.KeyValue
import org.apache.hadoop.hbase.client.HTableInterface
import org.apache.hadoop.hbase.client.Put
import org.apache.hadoop.hbase.client.Scan
import org.apache.hadoop.hbase.io.hfile.CacheConfig
import org.apache.hadoop.hbase.io.hfile.HFile
import org.apache.hadoop.hbase.io.hfile.HFileScanner
import org.kiji.common.flags.Flag
import org.kiji.common.flags.FlagParser
import org.kiji.schema.Kiji
import org.kiji.schema.KijiURI
import org.kiji.schema.hbase.HBaseFactory
import org.kiji.schema.platform.SchemaPlatformBridge
import org.kiji.schema.tools.BaseTool
import org.slf4j.LoggerFactory
import org.kiji.schema.impl.hbase.HBaseKijiTable
/** HFile testing utilities. */
class HFileTool extends BaseTool {
private final val Log = LoggerFactory.getLogger(classOf[InspectFileTool])
@Flag(name="path",
usage="Path of the file to read from/write to.")
var pathFlag: String = null
@Flag(name="do",
usage="Either 'import' or 'export'.")
var doFlag: String = null
@Flag(name="table",
usage="""URI of a Kiji table to read from/write to.
Only one of --kiji and --htable may be specified at a time.""")
var tableFlag: String = null
@Flag(name="hbase",
usage="""URI of a Kiji instance to read from/write to.
Only one of --table and --hbase may be specified at a time.""")
var hbaseFlag: String = null
@Flag(name="htable",
usage="""Name of the HTable to read from/write to.
Requires specifying an HBase instance with --hbase.""")
var htableFlag: String = null
@Flag(name="hfile-compression",
usage="HFile compression algorithm: one of 'none', 'gz', 'lz4', 'lzo', 'snappy'.")
var hfileCompressionFlag: String = "NONE"
@Flag(name="hfile-block-size",
usage="HFile block size, in bytes. Defaults to 64M.")
var hfileBlockSizeFlag: Int = 64 * 1024 * 1024
override def getName(): String = {
return "hfile"
}
override def getCategory(): String = {
return "extra"
}
override def getDescription(): String = {
return "Exports/imports HBase/Kiji tables to/from HFile."
}
override def getUsageString(): String = {
return """Usage:
| kiji hfile [--do=](import|export) \\
| --path=<hfile-path> \\
| ( --table=<kiji-table-uri>
| | --hbase=<hbase-instance-uri> --htable=<hbase-table-name> )
|
|Examples:
| Export Kiji table kiji://zkhost:port/default/table to /path/to/hfile:
| kiji hfile export --path=/path/to/hfile --table=kiji://zkhost:port/default/table
|
| Import /path/to/hfile into HBase instance kiji://zkhost:port and table named 'table':
| kiji hfile import --path=/path/to/hfile --hbase=kiji://zkhost:port --htable=table
|"""
.stripMargin
}
/**
* Dumps an HBase table to an HFile.
*
* @param table is the HBase table to dump.
* @param path is the path of the HFile to write to.
* @param compression is the algorithm to use to compress the HFile content.
* @param blockSize is the block size, in bytes.
*/
def writeToHFile(
table: HTableInterface,
path: Path,
compression: String = "none",
blockSize: Int = 64 * 1024 * 1024
): Unit = {
val conf = HBaseConfiguration.create()
val cacheConf = new CacheConfig(conf)
val fs = FileSystem.get(conf)
val writer: HFile.Writer = SchemaPlatformBridge.get().createHFileWriter(
conf, fs, path, blockSize, compression)
try {
val scanner = table.getScanner(new Scan().setMaxVersions())
try {
for (result <- scanner.iterator.asScala) {
val rowKey = result.getRow
for (fentry <- result.getMap.entrySet.iterator.asScala) {
val (family, qmap) = (fentry.getKey, fentry.getValue)
for (qentry <- qmap.entrySet.iterator.asScala) {
val (qualifier, series) = (qentry.getKey, qentry.getValue)
for (tentry <- series.descendingMap.entrySet.iterator.asScala) {
val (timestamp, value) = (tentry.getKey, tentry.getValue)
val keyValue = new KeyValue(rowKey, family, qualifier, timestamp, value)
writer.append(keyValue)
}
}
}
}
} finally {
scanner.close()
}
} finally {
writer.close()
}
}
/**
* Populates an HBase table from an HFile.
*
* @param table is the HBase table to populate.
* @param path is the path of the HFile to read from.
*/
def readFromHFile(table: HTableInterface, path: Path): Unit = {
val conf = HBaseConfiguration.create()
val cacheConf = new CacheConfig(conf)
val fs = FileSystem.get(conf)
val reader: HFile.Reader = HFile.createReader(fs, path, cacheConf)
try {
val cacheBlocks = false
val positionalRead = false
/** HFileScanner has no close() method. */
val scanner: HFileScanner = reader.getScanner(cacheBlocks, positionalRead)
var hasNext = scanner.seekTo()
while (hasNext) {
val keyValue = scanner.getKeyValue
val rowKey = keyValue.getRow
val family = keyValue.getFamily
val qualifier = keyValue.getQualifier
val timestamp = keyValue.getTimestamp
val value = keyValue.getValue
table.put(new Put(rowKey).add(family, qualifier, timestamp, value))
hasNext = scanner.next()
}
} finally {
reader.close()
}
}
/**
* Program entry point.
*
* @param unparsed is the array of command-line arguments.
*/
override def run(unparsed: JList[String]): Int = {
// Requires either --do=(import|export) or a single unnamed argument (exclusive OR):
if (!((unparsed.size == 1) ^ ((doFlag != null) && unparsed.isEmpty))) {
FlagParser.printUsage(this, Console.out)
return BaseTool.FAILURE
}
val action = if (doFlag != null) doFlag else unparsed.get(0)
if (!Set("import", "export").contains(action)) {
print("Unknown action '%s': specify either 'import' or 'export'.".format(action))
FlagParser.printUsage(this, Console.out)
return BaseTool.FAILURE
}
require(pathFlag != null, "Specify the file to read from/write to with --path=...")
val filePath = new Path(pathFlag)
val hfileCompression = hfileCompressionFlag.toUpperCase(Locale.ROOT)
def runAction(htable: HTableInterface, path: Path) {
action match {
case "import" => readFromHFile(htable, path)
case "export" => writeToHFile(htable, path, hfileCompression, hfileBlockSizeFlag)
case _ => sys.error("Unknown action: %s".format(action))
}
}
require((tableFlag != null) ^ (hbaseFlag != null), "Specify exactly one of --table and --hbase")
require((htableFlag != null) == (hbaseFlag != null),
"--htable must be specified along with --hbase, and may not specified if --table is.")
if (tableFlag != null) {
val tableURI = KijiURI.newBuilder(tableFlag).build()
val kiji = Kiji.Factory.open(tableURI)
try {
val table = kiji.openTable(tableURI.getTable)
try {
val htable = table.asInstanceOf[HBaseKijiTable].openHTableConnection()
try {
runAction(htable, filePath)
} finally {
htable.close()
}
} finally {
table.release()
}
} finally {
kiji.release()
}
} else if ((hbaseFlag != null) && (htableFlag != null)) {
val hbaseURI = KijiURI.newBuilder(hbaseFlag).build()
val htableFactory = HBaseFactory.Provider.get().getHTableInterfaceFactory(hbaseURI)
val htable = htableFactory.create(getConf, htableFlag)
try {
runAction(htable, filePath)
} finally {
htable.close()
}
} else {
sys.error("No table specified")
}
return BaseTool.SUCCESS
}
}
|
rpinzon/kiji-schema
|
kiji-schema-extras/src/main/scala/org/kiji/schema/extra/tools/HFileTool.scala
|
Scala
|
apache-2.0
| 8,944 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.airframe.fluentd
import java.io.BufferedInputStream
import java.net.ServerSocket
import java.util.concurrent.atomic.AtomicBoolean
import javax.annotation.{PostConstruct, PreDestroy}
import wvlet.airframe.codec.PrimitiveCodec.ValueCodec
import wvlet.airframe._
import wvlet.airspec.AirSpec
import wvlet.log.LogSupport
import wvlet.log.io.IOUtil
case class MockFluentdConfig(port: Int)
trait MockFluentd extends LogSupport {
lazy val socket = bind { config: MockFluentdConfig => new ServerSocket(config.port) }
val shutdown = new AtomicBoolean(false)
val t = new Thread(new Runnable {
override def run(): Unit = {
val clientSocket = socket.accept()
val out = clientSocket.getOutputStream
val in = new BufferedInputStream(clientSocket.getInputStream)
while (!shutdown.get()) {
var b = new Array[Byte](8192)
var totalReadLen = 0
var readLen = in.read(b)
while (readLen != -1) {
val nextReadLen = in.read(b, totalReadLen, readLen)
totalReadLen += readLen
readLen = nextReadLen
}
if (totalReadLen > 0) {
val v = ValueCodec.unpackMsgPack(b, 0, totalReadLen)
logger.debug(s"Received event: ${v}")
}
}
}
})
@PostConstruct
def start: Unit = {
debug(s"starting MockFluentd")
t.start()
}
@PreDestroy
def stop: Unit = {
debug(s"stopping MockFluentd")
shutdown.set(true)
socket.close()
t.interrupt()
}
}
case class FluencyMetric(id: Int, name: String) extends TaggedMetric {
def metricTag = "fluency_metric"
}
/**
*/
class FluencyTest extends AirSpec {
private val fluentdPort = IOUtil.randomPort
protected override val design: Design = {
newDesign
.bind[MockFluentdConfig].toInstance(new MockFluentdConfig(fluentdPort))
.bind[MockFluentd].toEagerSingleton
.add(
fluentd
.withFluentdLogger(
port = fluentdPort,
// Do not send ack for simplicity
ackResponseMode = false
)
)
}
test("should send metrics to fluentd through Fluency") { (f: MetricLoggerFactory) =>
// Use a regular emit method
f.getLogger.emit("mytag", Map("data" -> "hello"))
// Use object metric logger
val l = f.getTypedLogger[FluencyMetric]
l.emit(FluencyMetric(1, "leo"))
f.getLoggerWithTagPrefix("system").emit("mytag", Map("data" -> "metric value"))
}
test(
"test extended time",
design = fluentd.withFluentdLogger(port = fluentdPort, ackResponseMode = false, useExtendedEventTime = true)
) { f: MetricLoggerFactory =>
val l = f.getLogger
l.emit("mytag", Map("data" -> "hello"))
l.emitMsgPack("tag", Array(0xc6.toByte))
}
}
|
wvlet/airframe
|
airframe-fluentd/src/test/scala/wvlet/airframe/fluentd/FluencyTest.scala
|
Scala
|
apache-2.0
| 3,347 |
// Exercise code paths for different types of cached term refs.
// Specifically, `NonNullTermRef`s are cached separately from regular `TermRefs`.
// If the two kinds of trefs weren't cached separately, then the code below would
// error out, because every time `x` is accessed the nullable or non-null denotation
// would replace the other one, causing errors during -Ychecks.
class Test {
def foo(): Unit = {
val x: String|Null = ??? // regular tref `x`
if (x != null) {
val y = x.length // non-null tref `x`
x.length // 2nd access to non-null tref `x`
val z = x.length // 3rd access to non-null tref `x`
} else {
val y = x // regular tref `x`
}
val x2 = x // regular tref `x`
}
}
|
dotty-staging/dotty
|
tests/explicit-nulls/pos/flow-tref-caching.scala
|
Scala
|
apache-2.0
| 753 |
package test
import org.scalatest.FunSuite
import offheap._
@data class Inner(var v: Int)
@data class Outer(@embed var inner: Inner)
@data class Inner2(v: Long)
@data class Outer2 { @embed var inner: Inner2 = _ }
class EmbedSuite extends FunSuite {
implicit val alloc = malloc
test("inner pointer") {
val inner = Inner(42)
val outer = Outer(inner)
assert(outer.addr == outer.inner.addr)
}
test("modify after copy") {
val outer = Outer(Inner(42))
assert(outer.inner.v == 42)
val outer2 = outer.copy()
assert(outer2.inner.v == 42)
outer2.inner.v = 43
assert(outer.inner.v == 42)
assert(outer2.inner.v == 43)
}
test("assign embedded") {
val outer = Outer(Inner(42))
assert(outer.inner.v == 42)
val inner = Inner(43)
outer.inner = inner
assert(inner.v == 43)
assert(outer.inner.v == 43)
inner.v = 44
assert(inner.v == 44)
assert(outer.inner.v == 43)
}
test("assign embeded null") {
val outer = Outer(Inner(42))
intercept[NullPointerException] {
outer.inner = Inner.empty
}
}
test("default init in-body var") {
assert(Outer2().inner.v == 0L)
}
}
|
ignasi35/scala-offheap
|
tests/src/test/scala/EmbedSuite.scala
|
Scala
|
bsd-3-clause
| 1,170 |
package io.kaitai.struct
import io.kaitai.struct.datatype.DataType.{CalcIntType, KaitaiStreamType, UserTypeInstream}
import io.kaitai.struct.datatype.{BigEndian, CalcEndian, Endianness, FixedEndian, InheritedEndian, LittleEndian}
import io.kaitai.struct.exprlang.Ast
import io.kaitai.struct.format._
import io.kaitai.struct.languages.GoCompiler
import io.kaitai.struct.languages.components.ExtraAttrs
class GoClassCompiler(
classSpecs: ClassSpecs,
override val topClass: ClassSpec,
config: RuntimeConfig
) extends ClassCompiler(classSpecs, topClass, config, GoCompiler) {
override def compileClass(curClass: ClassSpec): Unit = {
provider.nowClass = curClass
val extraAttrs = List(
AttrSpec(List(), IoIdentifier, KaitaiStreamType),
AttrSpec(List(), RootIdentifier, UserTypeInstream(topClassName, None)),
AttrSpec(List(), ParentIdentifier, curClass.parentType)
) ++ ExtraAttrs.forClassSpec(curClass, lang)
if (!curClass.doc.isEmpty)
lang.classDoc(curClass.name, curClass.doc)
// Enums declaration defines types, so they need to go first
compileEnums(curClass)
// Basic struct declaration
lang.classHeader(curClass.name)
compileAttrDeclarations(curClass.seq ++ curClass.params ++ extraAttrs)
curClass.instances.foreach { case (instName, instSpec) =>
compileInstanceDeclaration(instName, instSpec)
}
lang.classFooter(curClass.name)
// Constructor = Read() function
compileReadFunction(curClass)
compileInstances(curClass)
compileAttrReaders(curClass.seq ++ extraAttrs)
// Recursive types
compileSubclasses(curClass)
}
def compileReadFunction(curClass: ClassSpec) = {
lang.classConstructorHeader(
curClass.name,
curClass.parentType,
topClassName,
curClass.meta.endian.contains(InheritedEndian),
curClass.params
)
compileEagerRead(curClass.seq, curClass.meta.endian)
lang.classConstructorFooter
}
override def compileInstance(className: List[String], instName: InstanceIdentifier, instSpec: InstanceSpec, endian: Option[Endianness]): Unit = {
// Determine datatype
val dataType = instSpec.dataTypeComposite
if (!instSpec.doc.isEmpty)
lang.attributeDoc(instName, instSpec.doc)
lang.instanceHeader(className, instName, dataType, instSpec.isNullable)
lang.instanceCheckCacheAndReturn(instName, dataType)
instSpec match {
case vi: ValueInstanceSpec =>
lang.attrParseIfHeader(instName, vi.ifExpr)
lang.instanceCalculate(instName, dataType, vi.value)
lang.attrParseIfFooter(vi.ifExpr)
case i: ParseInstanceSpec =>
lang.attrParse(i, instName, endian)
}
lang.instanceSetCalculated(instName)
lang.instanceReturn(instName, dataType)
lang.instanceFooter
}
override def compileCalcEndian(ce: CalcEndian): Unit = {
def renderProc(result: FixedEndian): Unit = {
val v = result match {
case LittleEndian => Ast.expr.IntNum(1)
case BigEndian => Ast.expr.IntNum(0)
}
lang.instanceCalculate(IS_LE_ID, CalcIntType, v)
}
lang.switchCases[FixedEndian](IS_LE_ID, ce.on, ce.cases, renderProc, renderProc)
}
}
|
kaitai-io/kaitai_struct_compiler
|
shared/src/main/scala/io/kaitai/struct/GoClassCompiler.scala
|
Scala
|
gpl-3.0
| 3,203 |
package kmeans
import scala.annotation.tailrec
import scala.collection._
import scala.util.Random
import org.scalameter._
import common._
class KMeans {
def generatePoints(k: Int, num: Int): Seq[Point] = {
val randx = new Random(1)
val randy = new Random(3)
val randz = new Random(5)
(0 until num)
.map({ i =>
val x = ((i + 1) % k) * 1.0 / k + randx.nextDouble() * 0.5
val y = ((i + 5) % k) * 1.0 / k + randy.nextDouble() * 0.5
val z = ((i + 7) % k) * 1.0 / k + randz.nextDouble() * 0.5
new Point(x, y, z)
}).to[mutable.ArrayBuffer]
}
def initializeMeans(k: Int, points: Seq[Point]): Seq[Point] = {
val rand = new Random(7)
(0 until k).map(_ => points(rand.nextInt(points.length))).to[mutable.ArrayBuffer]
}
def findClosest(p: Point, means: GenSeq[Point]): Point = {
assert(means.size > 0)
var minDistance = p.squareDistance(means(0))
var closest = means(0)
var i = 1
while (i < means.length) {
val distance = p.squareDistance(means(i))
if (distance < minDistance) {
minDistance = distance
closest = means(i)
}
i += 1
}
closest
}
def classify(points: GenSeq[Point], means: GenSeq[Point]): GenMap[Point, GenSeq[Point]] = {
if (points.length == 0) {
means.map(p => (p, List())).toMap
} else {
points.map(p => (findClosest(p, means), p))
.groupBy(_._1)
.map(pair => (pair._1, pair._2.map(_._2)))
}
}
def findAverage(oldMean: Point, points: GenSeq[Point]): Point = if (points.length == 0) oldMean else {
var x = 0.0
var y = 0.0
var z = 0.0
points.seq.foreach { p =>
x += p.x
y += p.y
z += p.z
}
new Point(x / points.length, y / points.length, z / points.length)
}
def update(classified: GenMap[Point, GenSeq[Point]], oldMeans: GenSeq[Point]): GenSeq[Point] = {
oldMeans.map {p => findAverage(p, classified(p))}
}
def converged(eta: Double)(oldMeans: GenSeq[Point], newMeans: GenSeq[Point]): Boolean = {
val distance = oldMeans.zip(newMeans)
.map {case (p1, p2) => p1.squareDistance(p2)}
.sum
distance <= eta
}
@tailrec
final def kMeans(points: GenSeq[Point], means: GenSeq[Point], eta: Double): GenSeq[Point] = {
val newMeans = update(classify(points, means), means)
if (!converged(eta)(means, newMeans)) kMeans(points, newMeans, eta)
else newMeans
}
}
/** Describes one point in three-dimensional space.
*
* Note: deliberately uses reference equality.
*/
class Point(val x: Double, val y: Double, val z: Double) {
private def square(v: Double): Double = v * v
def squareDistance(that: Point): Double = {
square(that.x - x) + square(that.y - y) + square(that.z - z)
}
private def round(v: Double): Double = (v * 100).toInt / 100.0
override def toString = s"(${round(x)}, ${round(y)}, ${round(z)})"
}
object KMeansRunner {
val standardConfig = config(
Key.exec.minWarmupRuns -> 20,
Key.exec.maxWarmupRuns -> 40,
Key.exec.benchRuns -> 25,
Key.verbose -> true
) withWarmer(new Warmer.Default)
def main(args: Array[String]) {
val kMeans = new KMeans()
val numPoints = 500000
val eta = 0.01
val k = 32
val points = kMeans.generatePoints(k, numPoints)
val means = kMeans.initializeMeans(k, points)
val seqtime = standardConfig measure {
kMeans.kMeans(points, means, eta)
}
println(s"sequential time: $seqtime ms")
val partime = standardConfig measure {
val parPoints = points.par
val parMeans = means.par
kMeans.kMeans(parPoints, parMeans, eta)
}
println(s"parallel time: $partime ms")
println(s"speedup: ${seqtime / partime}")
}
}
|
huajianmao/learning
|
coursera/parprog1/week3/kmeans/src/main/scala/kmeans/KMeans.scala
|
Scala
|
mit
| 3,806 |
package io.buoyant.http
import com.twitter.finagle.{Status => _, _}
import com.twitter.finagle.http._
import io.buoyant.test.FunSuite
class StatusTest extends FunSuite {
def lookup(path: Path) =
await(Namer.global.lookup(path).values.toFuture).get
test("status") {
val client = Http.newService("/$/io.buoyant.http.status/401/foo/bar.a.b/bah")
val rsp = await(client(Request()))
assert(rsp.status == Status.Unauthorized)
}
test("status: invalid") {
val path = Path.read("/$/io.buoyant.http.status/foo/bar.a.b/bah")
assert(lookup(path) == NameTree.Neg)
}
test("status: no code") {
val path = Path.read("/$/io.buoyant.http.status")
assert(lookup(path) == NameTree.Neg)
}
}
|
linkerd/linkerd
|
router/http/src/test/scala/io/buoyant/http/StatusTest.scala
|
Scala
|
apache-2.0
| 723 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.plans.logical.statsEstimation
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, Statistics}
import org.apache.spark.sql.internal.SQLConf
object AggregateEstimation {
import EstimationUtils._
/**
* Estimate the number of output rows based on column stats of group-by columns, and propagate
* column stats for aggregate expressions.
*/
def estimate(conf: SQLConf, agg: Aggregate): Option[Statistics] = {
val childStats = agg.child.stats(conf)
// Check if we have column stats for all group-by columns.
val colStatsExist = agg.groupingExpressions.forall { e =>
e.isInstanceOf[Attribute] && childStats.attributeStats.contains(e.asInstanceOf[Attribute])
}
if (rowCountsExist(conf, agg.child) && colStatsExist) {
// Multiply distinct counts of group-by columns. This is an upper bound, which assumes
// the data contains all combinations of distinct values of group-by columns.
var outputRows: BigInt = agg.groupingExpressions.foldLeft(BigInt(1))(
(res, expr) => res * childStats.attributeStats(expr.asInstanceOf[Attribute]).distinctCount)
outputRows = if (agg.groupingExpressions.isEmpty) {
// If there's no group-by columns, the output is a single row containing values of aggregate
// functions: aggregated results for non-empty input or initial values for empty input.
1
} else {
// Here we set another upper bound for the number of output rows: it must not be larger than
// child's number of rows.
outputRows.min(childStats.rowCount.get)
}
val outputAttrStats = getOutputMap(childStats.attributeStats, agg.output)
Some(Statistics(
sizeInBytes = getOutputSize(agg.output, outputRows, outputAttrStats),
rowCount = Some(outputRows),
attributeStats = outputAttrStats,
isBroadcastable = childStats.isBroadcastable))
} else {
None
}
}
}
|
MLnick/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/statsEstimation/AggregateEstimation.scala
|
Scala
|
apache-2.0
| 2,842 |
package cvx
import breeze.linalg.{DenseMatrix, DenseVector, diag, sum}
import breeze.numerics.{exp, log}
/**
* Created by oar on 10.10.17.
*
* Let $P=(p_j)_{j=1}^n$ and $Q=(q_j)_{j=1}^n$ denote discrete probability
* distributions, i.e. $p_j>=0$ and $\\sum p_j=1$ and likewise for $Q$.
*
* Given any matrices $H$, $A$ with n columns (i.e. such that the products
* $HQ$ and $AQ$ are defined when $Q$ is viewed as a column vector) and vectors
* u and r of appropriate size this class implements the problem
*
* ? = argmin_Q dist_KL(P,Q) subject to HQ <= u and AQ=r.
*
* Here the inequality HQ >= u is to be interpreted coordinatewise
* ($(HQ)_i>=u_i$, for all $i$) and
* [ dist_KL(Q,P)= E_Q[dQ/dP] = \\sum_jq_j(log(q_j)-log(p_j)) ]
* denotes the Kullback-Leibler distance of Q from P, where $E_Q$ denotes
* the expectation with respect to the probability Q.
* This is also known as the negentropy of P with respect to Q.
*
* Here we average the difference of the probabilities of
* atomic events $E={j}$ in the probabilities P and Q, where the average is taken
* under Q since we consider Q to be the "true" probability.
*
* The matrix-vector product HQ has the following probabilistic interpretation:
* the discrete probabilities P,Q are viewed as probabilities on the set
* $\\Omega = \\{1,2,\\dots,n\\}$. Now consider the random vector $X:\\Omega\\mapsto R_n$
* given by
* [ X(j) = col_j(H). ]
* Then we have
* [ HQ = \\sum_jq_jcol_j(H) = \\sum_jq_jX(j) = E_Q(X) ].
* Thus the constraints are expectation constraints
* [ E_Q(X)\\leq u] and [ E_Q(Y)=r ],
* where the random vector Y is defined analogeously.
*
* Each row of the matrix H defines and expectation constraint for a scalar
* random variable. Indeed $row_i(H)$ defines the constraint
* [ row_i(H)\\cdot Q = \\sum_jq_jH_{ij} \\leq u_i. ]
*
* In other words $row_i(H)$ defines the constraint $E_Q(X_i)\\leq u_i$
* on the scalar random variable $X_i$ with values $X_i(j)=H_{ij}$, i.e. $X_i$
* can be identified with $row_i(H)$.
*
* Similarly $row_i(A)$ defines the constraint $E_Q(Y_i)\\leq u_i$ on the
* scalar random variable $Y_i:j\\in\\Omega\\mapsto A_{ij}$ which can be identified
* with $row_i(A)$.
*
* The Kullback-Leibler distance dist_KL(Q,P) is convex as a function of Q
* and the minimization problem is implemented as an OptimizationProblem with
* Duality so that the solution can either be computed directly or via a solution
* of the dual problem.
*
* When passing to the dual problem the constraints q_i>=0 can be dropped, since
* they will be satisfied automatically from the way the q_i are computed from the
* dual variables.
* Thus the dimension of the dual problem (H.rows = number of inequality constraints
* plus A.rows = number of equality constraints plus 1 (for the constraint sum(Q)=1))
* is typically much smaller than the dimension n of the variable Q in the primal
* problem. Consequently the preferred approach is the solution via the dual problem.
*/
class Dist_KL(
override val id:String,
val n:Int,
val H:Option[DenseMatrix[Double]],
val u:Option[DenseVector[Double]],
val A:Option[DenseMatrix[Double]],
val r:Option[DenseVector[Double]],
override val solver:Solver,
override val logger:Logger
)
extends OptimizationProblem(id,Dist_KL.objectiveFunction(n),solver,logger)
with Duality { self =>
require(n>0,s"\\nn=${n} is not positive.\\n")
require(if(H.nonEmpty) H.get.cols==n else true,
s"\\nH.cols=${n} required, but H.cols=${H.get.cols}.\\n"
)
require(if(A.nonEmpty) A.get.cols==n else true,
s"\\nA.cols=${n} required, but H.cols=${A.get.cols}.\\n"
)
// if H is given, the so must be u, if A is given we need r
require(if(H.nonEmpty) u.nonEmpty else true,"\\nH is given but u is missing.\\n")
require(if(A.nonEmpty) r.nonEmpty else true,"\\nA is given but r is missing.\\n")
require(if (r.nonEmpty) A.nonEmpty else true, "\\nr is given but A is missing.\\n")
require(H.nonEmpty || A.nonEmpty,"\\nMust have some inequality or equality constraints")
////------------------- dual problem --------------------////
//
// Note that not both H and A can be empty since we require either equality
// or inequality constraints. This we have either
//
// (a) both H,u and A,r or
// (b) A,r but not H,u or
// (c) H,u but not A,r
//
// and we will deal with all these cases simultaneously.
//
// Note that we must always add the constraint E_Q[1]=1 (i.e. Q is a probability).
// Thus when we set up the problem we will always have a matrix A and corresponding
// equality constraints.
// summand 1 is for the constraint E_Q[1]=1
val dualDim:Int = if(A.isEmpty) 1+H.get.rows else
if(H.isEmpty) 1+A.get.rows else 1+H.get.rows+A.get.rows
val numInequalities:Int = if(H.isEmpty) 0 else H.get.rows
val e = 2.7182811828459045 // exp(1.0)
/** See docs/maxent.pdf, before eq.(20).*/
val vec_R:DenseVector[Double] = DenseVector.fill[Double](n)(1.0/(n*e))
/** The vector w=(u,r), see docs/maxent.pdf, after eq.(18).
* Note that we have to add the right hand side of the equality constraint
* E_Q[1]=sum(Q)=1 to the vector r (as first coordinate), even if we have no
* matrix A and vector r.
*/
val vec_w:DenseVector[Double] = {
// extend r with new first coordinate 1.0 from the constraint E_Q[1]=sum(Q)=1.
val r_extended = Dist_KL.r_with_probEQ(n,r)
if (H.isEmpty) r_extended else DenseVector.vertcat(u.get,r_extended)
}
/** The vertically stacked matrix B=(H',A')' with A stacked below H, see
* docs/maxent.pdf, after eq.(18). Note that A has to be augmented by the
* constraint E_Q[1]=sum(Q)=1 (as first row).
*/
val mat_B:DenseMatrix[Double] = {
// extend A with new first row of 1s from the constraint E_Q[1]=sum(Q)=1.
val A_extended = Dist_KL.A_with_probEQ(n,A)
if (H.isEmpty) A_extended else DenseMatrix.vertcat(H.get,A_extended)
}
/** The dual objective function $L_*(z)$, where $z=\\theta=(\\lambda,\\nu)$,
* see docs/maxent.pdf, eq.(20).
*/
def dualObjFAt(z:DenseVector[Double]):Double =
-((vec_w dot z) + (vec_R dot exp(-mat_B.t*z)))
/** The gradient of the dual objective function $-L_*(z)$, where $z=\\theta=(\\lambda,\\nu)$,
* see docs/maxent.pdf, eq.(21).
*/
def gradientDualObjFAt(z:DenseVector[Double]):DenseVector[Double] =
-vec_w + mat_B*(vec_R:*exp(-mat_B.t*z))
/** The Hessian of the dual objective function $-L_*(z)$, where $z=\\theta=(\\lambda,\\nu)$,
* see docs/maxent.pdf, eq.(22).
*/
def hessianDualObjFAt(z:DenseVector[Double]):DenseMatrix[Double] = {
val y:DenseVector[Double] = vec_R:*exp(-mat_B.t*z)
// B*diag(y): multiply col_j(B) with y(j):
val Bdy = DenseMatrix.zeros[Double](mat_B.rows,mat_B.cols)
for(j <- 0 until Bdy.cols; i <- 0 until Bdy.rows) Bdy(i,j) = mat_B(i,j)*y(j)
-Bdy*mat_B.t
}
/** The function $Q=Q(z)=Q(\\lambda,\\nu)$ which computes the optimal primal
* solution $Q_*$ from the optimal dual solution $z_*=(\\lambda_*,\\nu_*)$.
* See docs/maxent.pdf
*/
def primalOptimum(z:DenseVector[Double]):DenseVector[Double] = vec_R:*exp(-mat_B.t*z)
/** Add the known (unique) solution to the minimization problem.
* For testing purposes.
*/
def addKnownMinimizer(optSol:KnownMinimizer):
OptimizationProblem with Duality with KnownMinimizer =
new Dist_KL(id,n,H,u,A,r,solver,logger) with KnownMinimizer {
override def theMinimizer: DenseVector[Double] = optSol.theMinimizer
def isMinimizer(x:DenseVector[Double],tol:Double):Boolean = optSol.isMinimizer(x,tol)
def minimumValue:Double = optSol.minimumValue
}
}
object Dist_KL {
/** Given optional equality constraints AQ=r
* extend A with new first row of 1s from the equality constraint
* E_Q[1]=sum(Q)=1.
* @param n dimension dim(Q) of KL-problem.
*/
def A_with_probEQ(n:Int,A:Option[DenseMatrix[Double]]):DenseMatrix[Double] = {
val EQ1:DenseMatrix[Double] = DenseMatrix.fill(1,n)(1.0)
if(A.isEmpty) EQ1 else DenseMatrix.vertcat(EQ1,A.get)
}
/** Given optional equality constraints AQ=r
* extend r with new first coordinate 1.0 from the equality constraint
* E_Q[1]=sum(Q)=1.
* @param n dimension dim(Q) of KL-problem.
*/
def r_with_probEQ(n:Int,r:Option[DenseVector[Double]]):DenseVector[Double] = {
val r1 = DenseVector.fill(1)(1.0)
if(r.isEmpty) r1 else DenseVector.vertcat(r1,r.get)
}
/** Kullback-Leibler distance
*
* d_KL(x,p) = sum_jp_j\\log(p_j/x_j) = c-sum_jp_j\\log(x_j)
* = c-sum_j\\log(x_j)/n
*
* from a discrete uniform distribution p on Omega={1,2,...,n}, p_j=1/n; j=1,2,...,n.
* Here c is the constant c = -log(n)
* and even though it is irrelevant in minimization we will not neglect it, since
* the KL-distance has an information theoretic interpretation.
*/
def objectiveFunction(n:Int) = new ObjectiveFunction(n) {
override def valueAt(x: DenseVector[Double]): Double = {
assert(x.length==n,"\\nDimension mismatch x.length = "+x.length+"dim(d_KL) = "+n+"\\n")
x dot log(x*n.toDouble)
}
override def gradientAt(x: DenseVector[Double]): DenseVector[Double] =
DenseVector.tabulate[Double](n)(j => 1+log(x(j))+log(n))
override def hessianAt(x: DenseVector[Double]): DenseMatrix[Double] = {
// diagonal
val d = DenseVector.tabulate[Double](n)(j => 1.0/x(j))
diag(d)
}
}
def setWhereDefined(n:Int):ConvexSet = ConvexSets.firstQuadrant(n)
/** The equality constraint Ax=r combined with the constraint sum(x)=1.
*/
def equalityConstraint(
n:Int, A:Option[DenseMatrix[Double]],r:Option[DenseVector[Double]]
):EqualityConstraint = {
val probEq:EqualityConstraint = Constraints.sumToOne(n)
if(A.isEmpty) probEq else {
assert(r.nonEmpty)
val eqsAr = EqualityConstraint(A.get,r.get)
eqsAr.addEqualities(probEq)
}
}
/** The problem of minimizing the Kullback-Leibler dist_KL(Q,P) distance from the
* discrete uniform distribution P=(p_j) on Omega = {1,2,...,n} (all p_j=1/n) subject
* to the constraints E_Q[H]<=u and E_Q[A]=r.
*
* Here the matrix H is identified with the random vector H on Omega given by
* H: j\\in Omega --> col_j(H)
* (thus with Q=(q_j) we have E_Q[H] = \\sum_jq_j*col_j(H) = HQ) and similarly for the
* matrix A.
*
* @param solverType: "BR" (barrier solver) or "PD" (primal dual solver).
* @return the optimization problem with Duality.
*/
def apply(
id:String, n:Int,
H:Option[DenseMatrix[Double]], u:Option[DenseVector[Double]],
A:Option[DenseMatrix[Double]], r:Option[DenseVector[Double]],
solverType:String, pars:SolverParams, logger:Logger, debugLevel:Int
):Dist_KL = {
require(n > 0, s"\\nn=${n} is not positive.\\n")
require(if (H.nonEmpty) H.get.cols == n else true,
s"\\nH.cols=${n} required, but H.cols=${H.get.cols}.\\n"
)
require(if (A.nonEmpty) A.get.cols == n else true,
s"\\nA.cols=${n} required, but H.cols=${A.get.cols}.\\n"
)
// if H is given, the so must be u, if A is given we need r
require(if (H.nonEmpty) u.nonEmpty else true, "\\nH is given but u is missing.\\n")
require(if (A.nonEmpty) r.nonEmpty else true, "\\nA is given but r is missing.\\n")
require(if (r.nonEmpty) A.nonEmpty else true, "\\nr is given but A is missing.\\n")
require(H.nonEmpty || A.nonEmpty, "\\nMust have some inequality or equality constraints")
require(solverType == "BR" || solverType == "PD", s"\\nUnknown solver: ${solverType}\\n")
// set where the constraints are defined
val C: ConvexSet = ConvexSets.wholeSpace(n)
val pointWhereDefined = DenseVector.fill[Double](n)(1.0 / n)
// note: equalityConstraint already contains the constraint E_Q[1] = sum(Q) = 1
val eqs = equalityConstraint(n,A,r)
val positivityCnts:List[Constraint] = Constraints.allCoordinatesPositive(n)
// funnily dist_KL_2A only works if we don't add in the constraint E_Q[1]01
// val eqs = equalityConstraint(n,A,r)
val objF = objectiveFunction(n)
val ineqs = if (H.isEmpty) ConstraintSet(n,positivityCnts,C,pointWhereDefined)
else ConstraintSet(H.get,u.get,C).addConstraints(positivityCnts)
val ineqsWithFeasiblePoint = ineqs.withFeasiblePoint(Some(eqs),pars,debugLevel) // phase I
val solver: Solver = if (solverType == "BR")
BarrierSolver(objF,ineqsWithFeasiblePoint,Some(eqs),pars,logger)
else
PrimalDualSolver(C,objF,ineqsWithFeasiblePoint,Some(eqs),pars,logger)
new Dist_KL(id,n,H,u,A,r,solver,logger)
}
}
|
spyqqqdia/cvx
|
src/main/scala/cvx/Dist_KL.scala
|
Scala
|
mit
| 12,728 |
package com.kozlowst.oms.orderbot.cluster
import akka.actor.{Actor, ActorLogging, ActorRef, Props}
import com.kozlowst.oms.common.commands.{Command, CommandResponse}
import com.kozlowst.oms.common.models.Order
import com.kozlowst.oms.orderbot.config.OrderBotConfig
/**
* Created by tomek on 4/12/17.
*/
object ClientCheckAsk {
def props(clientBotProxy: ActorRef, exchangeProxy: ActorRef): Props = Props(new ClientCheckAsk(clientBotProxy, exchangeProxy))
}
class ClientCheckAsk(clientBotProxy: ActorRef, exchangeProxy: ActorRef) extends Actor with ActorLogging with OrderBotConfig {
override def receive: Receive = {
case command: Command[Order] => {
log.info("Send Command ORDER to ClientBot for verification. ORDER: {}", command.obj)
clientBotProxy ! command
context.become(waitForResponse)
}
}
def waitForResponse: Receive = {
case response: CommandResponse[Order] => {
log.info("Received response from ClientBot for ORDER: {}", response.command.obj)
val command = response.command.copy(topic = exchangeTopic)
exchangeProxy ! command
log.info("Response forwarded to OrderBot: {}", command)
context.stop(self)
}
}
}
|
rysiekblah/oms-akka-poc
|
order-bot/src/main/scala/com/kozlowst/oms/orderbot/cluster/ClientCheckAsk.scala
|
Scala
|
mit
| 1,204 |
package sparxles
package engine
sealed trait Observable[A] {
def observe(observer: Observer[A])
def map[B](f: A => B): Observable[B]
def scan[B](seed: B)(f: (B,A) => B): Observable[B]
}
sealed trait Observer[A] {
def onNext[A]: Unit
def onComplete(): Unit
}
object Observable {
def run[A](stream: EventStream[A]): A = {
var translations: Map[EventStream[_], Observable[_]] = Map.empty
def getTranslation[A](stream: EventStream[A]): Option[Observable[A]] =
translations.get(stream).map(_.asInstanceOf[Observable[A]])
def addTranslation[A](stream: EventStream[A], obs: Observable[A]): Unit =
translations = translations + (stream -> obs)
def toObservable[A](stream: EventStream[A]): Observable[A] = {
import EventStream._
val obs: Observable[A] =
stream match {
case map: Map[a,b] =>
val parent: EventStream[a] = map.source
getTranslation(parent) match {
case Some(p) =>
p.map(map.f)
case None =>
val p = toObservable(parent)
p.map(map.f)
}
case scan: Scan[a,b] =>
val parent: EventStream[a] = scan.source
getTranslation(parent) match {
case Some(p) =>
p.scan(scan.seed)(scan.f)
case None =>
val p = toObservable(parent)
p.scan(scan.seed)(scan.f)
}
case join: Join[a, b] =>
???
case Emit(f) =>
???
}
addTranslation(stream, obs)
obs
}
toObservable(stream)
}
}
|
underscoreio/sparxles
|
src/main/scala/sparxles/engine/Observable.scala
|
Scala
|
apache-2.0
| 1,643 |
package eu.gruchala.other
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.mutable.ListBuffer
import scala.annotation.tailrec
import scala.util.control.NoStackTrace
object CookBookApplication {
def main(args: Array[String]) {}
}
object DifficultyLevel extends Enumeration {
val HARD, MEDIUM, EASY = Value
}
case class Product(name: String, cost: Int = 1) {
require(cost < 0)
}
object Product {
def apply(name: String) = new Product(name)
}
case class Recipe(id: Long, name: String, products: List[Product], difficultyLevel: DifficultyLevel.Value) {
def getSummaryCost: Int = products.foldLeft(0) {
(count, prod) => count + prod.cost
}
}
object Recipe {
def apply(name: String, products: List[Product], difficultyLevel: DifficultyLevel.Value) = new Recipe(new AtomicInteger()
.incrementAndGet(), name, products, difficultyLevel)
def apply(name: String, products: Map[String, Int], difficultyLevel: DifficultyLevel.Value): Recipe = {
val l = for {s <- products} yield Product(s._1, s._2)
new Recipe(new AtomicInteger().incrementAndGet(), name, l.toList, difficultyLevel)
}
}
object CookBook {
private val prods: ListBuffer[Recipe] = new ListBuffer[Recipe]
def addRecipe(r: Recipe) {
prods += r
}
def clear() {
prods.clear()
}
def allRecipes: List[Recipe] = {
prods.toList
}
def findById(id: Long): Recipe = {
@tailrec
def loop(restList: List[Recipe]): Recipe = prods.toList match {
case head :: tail =>
if (head.id == id) head else loop(tail)
case Nil => throw NoSuchRecipeException
}
loop(prods.toList)
}
def findByDifficultLevel(lvl: DifficultyLevel.Value): List[Recipe] = for {
r <- prods.toList
if r.difficultyLevel == lvl
} yield r
def removeById(id: Long): Unit = {
prods.find(_.id==id) match {
case Some(x) => prods -= x
case None => throw NoSuchRecipeException
}
}
}
object NoSuchRecipeException extends Exception with NoStackTrace
|
leszekgruchala/scala-exercises
|
src/main/scala/eu/gruchala/other/CookBookApplication.scala
|
Scala
|
apache-2.0
| 2,148 |
package debop4s.rediscala.serializer
/**
* Redis 데이터를 직렬화 합니다.
* @author Sunghyouk Bae
*/
trait RedisSerializer[@miniboxed T] {
val EMPTY_BYTES = Array[Byte]()
/**
* 객체를 직렬화 합니다.
* @param graph serialized 될 객체
* @return serialized 된 데이터
*/
def serialize(graph: T): Array[Byte]
/**
* 객체를 역 직렬화 합니다.
* @param bytes serialized 된 데이터
* @return 원본 객체
*/
def deserialize(bytes: Array[Byte]): T
}
|
debop/debop4s
|
debop4s-rediscala/src/main/scala/debop4s/rediscala/serializer/RedisSerializer.scala
|
Scala
|
apache-2.0
| 520 |
package blended.security
import java.util
import blended.container.context.api.ContainerContext
import blended.security.boot.{GroupPrincipal, UserPrincipal}
import blended.security.internal.BlendedConfiguration
import blended.util.logging.Logger
import com.typesafe.config.{Config, ConfigFactory}
import javax.security.auth.Subject
import javax.security.auth.callback.{CallbackHandler, NameCallback, PasswordCallback}
import javax.security.auth.login.LoginException
import javax.security.auth.spi.LoginModule
import scala.reflect.ClassTag
abstract class AbstractLoginModule extends LoginModule {
private[this] val log = Logger[AbstractLoginModule]
protected var subject : Option[Subject] = None
protected var cbHandler : Option[CallbackHandler] = None
protected var loginConfig : Config = ConfigFactory.empty()
protected var loggedInUser : Option[String] = None
protected var ctCtxt : Option[ContainerContext] = None
protected val moduleName : String
override def initialize(
subject : Subject,
callbackHandler : CallbackHandler,
sharedState : util.Map[String, _],
options : util.Map[String, _]
) : Unit = {
def getOption[T](name : String)(implicit classTag : ClassTag[T]) : Option[T] =
Option(options.get(name)) match {
case Some(v) if classTag.runtimeClass.isAssignableFrom(v.getClass) =>
Some(v.asInstanceOf[T])
case Some(v) =>
log.warn(s"Expected configuration object [$name] of type [${classOf[Config].getName()}], got [${v.getClass().getName()}]")
None
case None => None
}
log.info(s"Initialising Login module ...[$moduleName]")
loginConfig = getOption[Config](BlendedConfiguration.configProp).getOrElse(ConfigFactory.empty())
ctCtxt = getOption[ContainerContext](BlendedConfiguration.ctCtxtProp)
// This is the subject which needs to be enriched with the user and group information
this.subject = Option(subject)
// This is the callback handler passed in to determine the username and password
this.cbHandler = Option(callbackHandler)
}
@throws[LoginException]
protected def extractCredentials() : (String, String) = {
cbHandler match {
case None => throw new LoginException(s"No Callback Handler defined for module [$moduleName]")
case Some(cbh) =>
val nameCallback = new NameCallback("User: ")
val passwordCallback = new PasswordCallback("Password: ", false)
cbh.handle(Array(nameCallback, passwordCallback))
val user = nameCallback.getName()
val pwd = new String(passwordCallback.getPassword())
log.info(s"Authenticating user [$user]")
(user, pwd)
}
}
@throws[LoginException]
final def login() : Boolean = {
try {
doLogin()
} finally {
postLogin()
}
}
@throws[LoginException]
protected def doLogin() : Boolean
@throws[LoginException]
override def commit() : Boolean = {
loggedInUser match {
case None => false
case Some(u) =>
log.debug(s"User [$u] logged in successfully")
subject.foreach { s =>
s.getPrincipals().add(new UserPrincipal(u))
val groups = getGroups(u)
log.debug(s"Found groups [$groups] for [$u]")
groups.foreach { g =>
s.getPrincipals().add(new GroupPrincipal(g))
}
}
postCommit()
true
}
}
@throws[LoginException]
override def abort() : Boolean = {
loggedInUser = None
postAbort()
true
}
@throws[LoginException]
override def logout() : Boolean = {
loggedInUser = None
postLogout()
true
}
protected def getGroups(user : String) : List[String]
protected def postLogin() : Unit = {}
protected def postAbort() : Unit = {}
protected def postLogout() : Unit = {}
protected def postCommit() : Unit = {}
}
|
woq-blended/blended
|
blended.security/jvm/src/main/scala/blended/security/AbstractLoginModule.scala
|
Scala
|
apache-2.0
| 3,872 |
package org.jetbrains.plugins.scala
package lang
package psi
package stubs
import com.intellij.psi.stubs.StubElement
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScPackaging
/**
* @author ilyas
*/
trait ScPackagingStub extends StubElement[ScPackaging] {
def parentPackageName: String
def packageName: String
def isExplicit: Boolean
}
|
ilinum/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/psi/stubs/ScPackagingStub.scala
|
Scala
|
apache-2.0
| 359 |
/* sbt -- Simple Build Tool
* Copyright 2008, 2009, 2010 Mark Harrah
*/
package sbt.internal.util
import sbt.util._
// note that setting the logging level on this logger has no effect on its behavior, only
// on the behavior of the delegates.
class MultiLogger(delegates: List[AbstractLogger]) extends BasicLogger {
override lazy val ansiCodesSupported = delegates exists supported
private[this] lazy val allSupportCodes = delegates forall supported
private[this] def supported = (_: AbstractLogger).ansiCodesSupported
override def setLevel(newLevel: Level.Value): Unit = {
super.setLevel(newLevel)
dispatch(new SetLevel(newLevel))
}
override def setTrace(level: Int): Unit = {
super.setTrace(level)
dispatch(new SetTrace(level))
}
override def setSuccessEnabled(flag: Boolean): Unit = {
super.setSuccessEnabled(flag)
dispatch(new SetSuccess(flag))
}
def trace(t: => Throwable): Unit = dispatch(new Trace(t))
def log(level: Level.Value, message: => String): Unit = dispatch(new Log(level, message))
def success(message: => String): Unit = dispatch(new Success(message))
def logAll(events: Seq[LogEvent]): Unit = delegates.foreach(_.logAll(events))
def control(event: ControlEvent.Value, message: => String): Unit = delegates.foreach(_.control(event, message))
private[this] def dispatch(event: LogEvent): Unit = {
val plainEvent = if (allSupportCodes) event else removeEscapes(event)
for (d <- delegates)
if (d.ansiCodesSupported)
d.log(event)
else
d.log(plainEvent)
}
private[this] def removeEscapes(event: LogEvent): LogEvent =
{
import ConsoleLogger.{ removeEscapeSequences => rm }
event match {
case s: Success => new Success(rm(s.msg))
case l: Log => new Log(l.level, rm(l.msg))
case ce: ControlEvent => new ControlEvent(ce.event, rm(ce.msg))
case _: Trace | _: SetLevel | _: SetTrace | _: SetSuccess => event
}
}
}
|
Duhemm/util
|
internal/util-logging/src/main/scala/sbt/internal/util/MultiLogger.scala
|
Scala
|
bsd-3-clause
| 1,982 |
package org.template.recommendation
import io.prediction.controller.PPreparator
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
class Preparator
extends PPreparator[TrainingData, PreparedData] {
def prepare(sc: SparkContext, trainingData: TrainingData): PreparedData = {
new PreparedData(ratings = trainingData.ratings)
}
}
case class PreparedData(
ratings: RDD[Rating]
)
|
wangmiao1981/PredictionIO
|
examples/scala-parallel-recommendation/custom-serving/src/main/scala/Preparator.scala
|
Scala
|
apache-2.0
| 449 |
import javax.servlet.ServletContext
import org.scalatra.LifeCycle
import de.tu_berlin.impro3.frontend._
class ScalatraBootstrap extends LifeCycle {
override def init(context: ServletContext) {
// Mount servlets.
context.mount(new LocationController, "/api/location/*")
context.mount(new HashtagController, "/api/hashtag/*")
context.mount(new MainServlet, "/*")
}
}
|
joroKr21/spatio-temporal-dynamics
|
impro3-ws14-frontend/src/main/scala/ScalatraBootstrap.scala
|
Scala
|
apache-2.0
| 387 |
package com.twitter.finagle.redis.integration
import com.twitter.finagle.redis.ClientError
import com.twitter.finagle.redis.naggati.RedisClientServerIntegrationTest
import com.twitter.finagle.redis.protocol._
import com.twitter.finagle.redis.tags.{ClientServerTest, RedisTest}
import com.twitter.finagle.redis.util.{BytesToString, StringToBuf, StringToChannelBuffer}
import com.twitter.io.Buf
import com.twitter.util.Await
import org.jboss.netty.buffer.ChannelBuffer
import org.junit.Ignore
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import scala.language.implicitConversions
@Ignore
@RunWith(classOf[JUnitRunner])
final class StringClientServerIntegrationSuite extends RedisClientServerIntegrationTest {
implicit def convertToChannelBuffer(s: String): ChannelBuffer = StringToChannelBuffer(s)
implicit def convertToChannelBuf(s: String): Buf = StringToBuf(s)
test("APPEND should work correctly", ClientServerTest, RedisTest) {
withRedisClient { client =>
assert(Await.result(client(Append("append1", "Hello"))) == IntegerReply(5))
assert(Await.result(client(Append("append1", " World"))) == IntegerReply(11))
assertBulkReply(client(Get("append1")), "Hello World")
}
}
test("BITCOUNT should work correctly", ClientServerTest, RedisTest) {
withRedisClient { client =>
assert(Await.result(client(BitCount("bitcount"))) == IntegerReply(0L))
assert(Await.result(client(Set("bitcount", "bar"))) == StatusReply("OK"))
assert(Await.result(client(BitCount("bitcount"))) == IntegerReply(10L))
assert(Await.result(client(BitCount("bitcount", Some(2), Some(4)))) == IntegerReply(4L))
}
}
test("BITOP should work correctly", ClientServerTest, RedisTest) {
withRedisClient { client =>
assert(Await.result(client(SetBit("bitop1", 0, 1))) == IntegerReply(0L))
assert(Await.result(client(SetBit("bitop1", 3, 1))) == IntegerReply(0L))
assert(Await.result(client(SetBit("bitop2", 2, 1))) == IntegerReply(0L))
assert(Await.result(client(SetBit("bitop2", 3, 1))) == IntegerReply(0L))
assert(Await.result(client(BitOp(BitOp.And, "bitop3", Seq("bitop1", "bitop2")))) ==
IntegerReply(1L))
assert(Await.result(client(GetBit("bitop3", 0))) == IntegerReply(0L))
assert(Await.result(client(GetBit("bitop3", 3))) == IntegerReply(1L))
assert(Await.result(client(BitOp(BitOp.Or, "bitop3", Seq("bitop1", "bitop2")))) ==
IntegerReply(1L))
assert(Await.result(client(GetBit("bitop3", 0))) == IntegerReply(1L))
assert(Await.result(client(GetBit("bitop3", 1))) == IntegerReply(0L))
assert(Await.result(client(BitOp(BitOp.Xor, "bitop3", Seq("bitop1", "bitop2")))) ==
IntegerReply(1L))
assert(Await.result(client(GetBit("bitop3", 0))) == IntegerReply(1L))
assert(Await.result(client(GetBit("bitop3", 1))) == IntegerReply(0L))
assert(Await.result(client(BitOp(BitOp.Not, "bitop3", Seq("bitop1")))) == IntegerReply(1L))
assert(Await.result(client(GetBit("bitop3", 0))) == IntegerReply(0L))
assert(Await.result(client(GetBit("bitop3", 1))) == IntegerReply(1L))
assert(Await.result(client(GetBit("bitop3", 4))) == IntegerReply(1L))
}
}
test("DECR should work correctly", ClientServerTest, RedisTest) {
withRedisClient { client =>
assert(Await.result(client(Set(bufFoo, bufBar))) == StatusReply("OK"))
assert(Await.result(client(Decr("decr1"))) == IntegerReply(-1))
assert(Await.result(client(Decr("decr1"))) == IntegerReply(-2))
assert(Await.result(client(Decr(bufFoo))).isInstanceOf[ErrorReply])
}
}
test("DECRBY should work correctly", ClientServerTest, RedisTest) {
withRedisClient { client =>
assert(Await.result(client(Set(bufFoo, bufBar))) == StatusReply("OK"))
assert(Await.result(client(DecrBy("decrby1", 1))) == IntegerReply(-1))
assert(Await.result(client(DecrBy("decrby1", 10))) == IntegerReply(-11))
assert(Await.result(client(DecrBy(bufFoo, 1))).isInstanceOf[ErrorReply])
}
}
test("GET should work correctly", ClientServerTest, RedisTest) {
withRedisClient { client =>
assert(Await.result(client(Set(bufFoo, bufBar))) == StatusReply("OK"))
assert(Await.result(client(Get("thing"))).isInstanceOf[EmptyBulkReply])
assertBulkReply(client(Get(bufFoo)), "bar")
intercept[ClientError] {
Await.result(client(Get(null: Buf)))
}
intercept[ClientError] {
Await.result(client(Get(null: List[Array[Byte]])))
}
}
}
test("GETBIT should work correctly", ClientServerTest, RedisTest) {
withRedisClient { client =>
assert(Await.result(client(SetBit("getbit", 7, 1))) == IntegerReply(0))
assert(Await.result(client(GetBit("getbit", 0))) == IntegerReply(0))
assert(Await.result(client(GetBit("getbit", 7))) == IntegerReply(1))
assert(Await.result(client(GetBit("getbit", 100))) == IntegerReply(0))
}
}
test("GETRANGE should work correctly", ClientServerTest, RedisTest) {
withRedisClient { client =>
val key = Buf.Utf8("getrange")
val value = "This is a string"
assert(Await.result(client(Set(key, value))) == StatusReply("OK"))
assertBulkReply(client(GetRange(key, 0, 3)), "This")
assertBulkReply(client(GetRange(key, -3, -1)), "ing")
assertBulkReply(client(GetRange(key, 0, -1)), value)
assertBulkReply(client(GetRange(key, 10, 100)), "string")
}
}
test("GETSET should work correctly", ClientServerTest, RedisTest) {
withRedisClient { client =>
val key = Buf.Utf8("getset")
assert(Await.result(client(Incr(key))) == IntegerReply(1))
assertBulkReply(client(GetSet(key, "0")), "1")
assertBulkReply(client(Get(key)), "0")
assert(Await.result(client(GetSet("brandnewkey", "foo"))) == EmptyBulkReply())
}
}
test("INCR should work correctly", ClientServerTest, RedisTest) {
withRedisClient { client =>
assert(Await.result(client(Set(bufFoo, bufBar))) == StatusReply("OK"))
assert(Await.result(client(Incr("incr1"))) == IntegerReply(1))
assert(Await.result(client(Incr("incr1"))) == IntegerReply(2))
assert(Await.result(client(Incr(bufFoo))).isInstanceOf[ErrorReply])
}
}
test("INCRBY should work correctly", ClientServerTest, RedisTest) {
withRedisClient { client =>
assert(Await.result(client(Set(bufFoo, bufBar))) == StatusReply("OK"))
assert(Await.result(client(IncrBy("incrby1", 1))) == IntegerReply(1))
assert(Await.result(client(IncrBy("incrby1", 10))) == IntegerReply(11))
assert(Await.result(client(IncrBy(bufFoo, 1))).isInstanceOf[ErrorReply])
}
}
test("MGET should work correctly", ClientServerTest, RedisTest) {
withRedisClient { client =>
assert(Await.result(client(Set(bufFoo, bufBar))) == StatusReply("OK"))
val expects = List(
BytesToString(RedisCodec.NIL_VALUE_BA.array),
BytesToString(bar.array)
)
val req = client(MGet(List(Buf.Utf8("thing"), bufFoo)))
assertMBulkReply(req, expects)
intercept[ClientError] {
Await.result(client(MGet(List.empty[Buf])))
}
}
}
test("MSET should work correctly", ClientServerTest, RedisTest) {
withRedisClient { client =>
val input = Map(
Buf.Utf8("thing") -> Buf.Utf8("thang"),
bufFoo -> bufBar,
Buf.Utf8("stuff") -> Buf.Utf8("bleh")
)
assert(Await.result(client(MSet(input))) == StatusReply("OK"))
val req = client(MGet(List(Buf.Utf8("thing"), bufFoo,
Buf.Utf8("noexists"),
Buf.Utf8("stuff"))))
val expects = List("thang", "bar", BytesToString(RedisCodec.NIL_VALUE_BA.array), "bleh")
assertMBulkReply(req, expects)
}
}
test("MSETNX should work correctly", ClientServerTest, RedisTest) {
withRedisClient { client =>
val input1 = Map(
Buf.Utf8("msnx.key1") -> Buf.Utf8("Hello"),
Buf.Utf8("msnx.key2") -> Buf.Utf8("there")
)
assert(Await.result(client(MSetNx(input1))) == IntegerReply(1))
val input2 = Map(
Buf.Utf8("msnx.key2") -> Buf.Utf8("there"),
Buf.Utf8("msnx.key3") -> Buf.Utf8("world")
)
assert(Await.result(client(MSetNx(input2))) == IntegerReply(0))
val expects = List("Hello", "there", BytesToString(RedisCodec.NIL_VALUE_BA.array))
assertMBulkReply(client(MGet(List(Buf.Utf8("msnx.key1"),
Buf.Utf8("msnx.key2"),
Buf.Utf8("msnx.key3")))), expects)
}
}
test("PSETEX should work correctly", ClientServerTest, RedisTest) {
withRedisClient { client =>
intercept[ClientError] {
Await.result(client(PSetEx(null, 300000L, "value")))
}
intercept[ClientError] {
Await.result(client(PSetEx("psetex1", 300000L, null)))
}
intercept[ClientError] {
Await.result(client(PSetEx("psetex1", 0L, "value")))
}
assert(Await.result(client(PSetEx("psetex1", 300000L, "value"))) == StatusReply("OK"))
}
}
test("SET should work correctly", ClientServerTest, RedisTest) {
withRedisClient { client =>
intercept[ClientError] {
Await.result(client(Set(null, null)))
}
intercept[ClientError] {
Await.result(client(Set("key1", null)))
}
intercept[ClientError] {
Await.result(client(Set(null, "value1")))
}
}
}
test("SETBIT should work correctly", ClientServerTest, RedisTest) {
withRedisClient { client =>
assert(Await.result(client(SetBit("setbit", 7, 1))) == IntegerReply(0))
assert(Await.result(client(SetBit("setbit", 7, 0))) == IntegerReply(1))
assertBulkReply(client(Get("setbit")), BytesToString(Array[Byte](0)))
}
}
test("SETEX should work correctly", ClientServerTest, RedisTest) {
withRedisClient { client =>
val key = StringToBuf("setex")
assert(Await.result(client(SetEx(key, 10, "Hello"))) == StatusReply("OK"))
Await.result(client(Ttl(key))) match {
//TODO: match must beCloseTo(10, 2)
case IntegerReply(seconds) => assert(seconds.toInt - 10 < 2)
case _ => fail("Expected IntegerReply")
}
assertBulkReply(client(Get(key)), "Hello")
}
}
test("SETNX should work correctly", ClientServerTest, RedisTest) {
withRedisClient { client =>
val key = "setnx"
val value1 = "Hello"
val value2 = "World"
assert(Await.result(client(SetNx(key, value1))) == IntegerReply(1))
assert(Await.result(client(SetNx(key, value2))) == IntegerReply(0))
assertBulkReply(client(Get(key)), value1)
}
}
test("SETRANGE should work correctly", ClientServerTest, RedisTest) {
withRedisClient { client =>
val key = "setrange"
val value = "Hello World"
assert(Await.result(client(Set(key, value))) == StatusReply("OK"))
assert(Await.result(client(SetRange(key, 6, "Redis"))) == IntegerReply(11))
assertBulkReply(client(Get(key)), "Hello Redis")
}
}
test("STRLEN should work correctly", ClientServerTest, RedisTest) {
withRedisClient { client =>
val key = "strlen"
val value = "Hello World"
assert(Await.result(client(Set(key, value))) == StatusReply("OK"))
assert(Await.result(client(Strlen(key))) == IntegerReply(11))
assert(Await.result(client(Strlen("nosuchkey"))) == IntegerReply(0))
}
}
}
|
sveinnfannar/finagle
|
finagle-redis/src/test/scala/com/twitter/finagle/redis/commands/string/StringClientServerIntegrationSuite.scala
|
Scala
|
apache-2.0
| 11,444 |
class C[T] extends D[T] {
private def c1 = 0
private[this] def c2 = 0
}
trait D[T] {
self: C[T] =>
private def d1 = 0
private[this] def d2 = 0
c1 // a member, but inaccessible.
c2 // a member, but inaccessible.
d1 // okay
d2 // okay
class C {
d1
d2
}
def x(other: D[Any]): Unit = {
other.d1
other.d2 // not a member
}
}
|
folone/dotty
|
tests/untried/neg/t7475f.scala
|
Scala
|
bsd-3-clause
| 369 |
package fpinscala.answers
/**
* See [[fpinscala.gettingstarted.MyModule]]
*/
object Exercise3 {
// Exercise 3: Implement `curry`.
// Note that `=>` associates to the right, so we could
// write the return type as `A => B => C`
def curry[A,B,C](f: (A, B) => C): A => (B => C) =
a => b => f(a, b)
// NB: The `Function2` trait has a `curried` method already
// Exercise 4: Implement `uncurry`
def uncurry[A,B,C](f: A => B => C): (A, B) => C =
(a, b) => f(a)(b)
/*
NB: There is a method on the `Function` object in the standard library,
`Function.uncurried` that you can use for uncurrying.
Note that we can go back and forth between the two forms. We can curry
and uncurry and the two forms are in some sense "the same". In FP jargon,
we say that they are _isomorphic_ ("iso" = same; "morphe" = shape, form),
a term we inherit from category theory.
*/
}
|
tobyweston/fpinscala
|
answers/src/main/scala/fpinscala/answers/Exercise3.scala
|
Scala
|
mit
| 899 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.rdd
import org.apache.hadoop.fs.{ FileSystem, Path }
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.util.ADAMFunSuite
import org.seqdoop.hadoop_bam.CRAMInputFormat
class ParallelFileMergerSuite extends ADAMFunSuite {
sparkTest("cannot write both empty gzip block and cram eof") {
intercept[IllegalArgumentException] {
// we don't need to pass real paths here
ParallelFileMerger.mergeFiles(sc,
new Path("output"),
new Path("head"),
writeEmptyGzipBlock = true,
writeCramEOF = true)
}
}
sparkTest("buffer size must be non-negative") {
intercept[IllegalArgumentException] {
// we don't need to pass real paths here
ParallelFileMerger.mergeFiles(sc,
new Path("output"),
new Path("head"),
optBufferSize = Some(0))
}
}
sparkTest("get the size of several files") {
val files = Seq(testFile("unmapped.sam"),
testFile("small.sam"))
.map(new Path(_))
val fileSizes = Seq(29408, 3093)
val filesWithSizes = files.zip(fileSizes)
val fs = FileSystem.get(sc.hadoopConfiguration)
val (size, sizes) = ParallelFileMerger.getFullSize(fs, files)
assert(size === fileSizes.sum.toLong)
sizes.map(_._2)
.zip(fileSizes)
.foreach(p => assert(p._1 === p._2))
}
sparkTest("block size must be positive and non-zero when trying to merge files") {
intercept[AssertionError] {
ParallelFileMerger.generateMerges(0, Seq((new Path(testFile("small.sam")), 3093)))
}
}
sparkTest("must provide files to merge") {
intercept[AssertionError] {
ParallelFileMerger.generateMerges(1024, Seq.empty)
}
}
sparkTest("if two files are both below the block size, they should merge into one shard") {
val files = Seq(testFile("unmapped.sam"),
testFile("small.sam"))
.map(new Path(_))
val fs = FileSystem.get(sc.hadoopConfiguration)
val fileSizesMap = files.map(f => (f, fs.getFileStatus(f).getLen().toInt))
.toMap
val (_, filesWithSizes) = ParallelFileMerger.getFullSize(fs, files)
val merges = ParallelFileMerger.generateMerges(Int.MaxValue,
filesWithSizes)
assert(merges.size === 1)
val (index, paths) = merges.head
assert(index === 0)
assert(paths.size === 2)
paths.foreach(t => {
val (file, start, end) = t
val path = new Path(file)
assert(start === 0)
assert(fileSizesMap.contains(path))
val fileSize = fileSizesMap(path)
assert(end === fileSize - 1)
})
}
sparkTest("merge two files where one is greater than the block size") {
// unmapped.sam -> slightly under 29k
// small.sam -> 3k
val files = Seq(testFile("unmapped.sam"),
testFile("small.sam"))
.map(new Path(_))
val fs = FileSystem.get(sc.hadoopConfiguration)
val fileSizesMap = files.map(f => (f, fs.getFileStatus(f).getLen().toInt))
.toMap
val (_, filesWithSizes) = ParallelFileMerger.getFullSize(fs, files)
val merges = ParallelFileMerger.generateMerges(16 * 1024, // 16KB
filesWithSizes)
assert(merges.size === 2)
val optFirstMerge = merges.filter(_._1 == 0)
.headOption
assert(optFirstMerge.isDefined)
optFirstMerge.foreach(firstMerge => {
val (_, merges) = firstMerge
assert(merges.size === 1)
val (file, start, end) = merges.head
val path = new Path(file)
assert(path.getName === "unmapped.sam")
assert(start === 0)
assert(end === 16 * 1024 - 1)
})
val optSecondMerge = merges.filter(_._1 == 1)
.headOption
assert(optSecondMerge.isDefined)
optSecondMerge.foreach(firstMerge => {
val (_, merges) = firstMerge
assert(merges.size === 2)
val (file0, start0, end0) = merges.head
val path0 = new Path(file0)
assert(path0.getName === "unmapped.sam")
assert(start0 === 16 * 1024)
assert(end0 === (fs.getFileStatus(path0).getLen().toInt - 1))
val (file1, start1, end1) = merges.tail.head
val path1 = new Path(file1)
assert(path1.getName === "small.sam")
assert(start1 === 0)
assert(end1 === (fs.getFileStatus(path1).getLen().toInt - 1))
})
}
sparkTest("merge a sharded sam file") {
val reads = sc.loadAlignments(testFile("unmapped.sam"))
val outPath = tmpFile("out.sam")
reads.transform(_.repartition(4))
.saveAsSam(outPath, asSingleFile = true, deferMerging = true)
val fs = FileSystem.get(sc.hadoopConfiguration)
val filesToMerge = (Seq(outPath + "_head") ++ (0 until 4).map(i => {
outPath + "_tail/part-r-0000%d".format(i)
})).map(new Path(_))
.map(p => (p.toString, 0L, fs.getFileStatus(p).getLen().toLong - 1L))
ParallelFileMerger.mergePaths(outPath,
filesToMerge,
sc.broadcast(sc.hadoopConfiguration),
false,
false)
val mergedReads = sc.loadAlignments(outPath)
assert(mergedReads.rdd.count === reads.rdd.count)
}
sparkTest("merge a sharded bam file") {
val reads = sc.loadAlignments(testFile("unmapped.sam"))
val outPath = tmpFile("out.bam")
reads.transform(_.repartition(4))
.saveAsSam(outPath, asSingleFile = true, deferMerging = true)
val fs = FileSystem.get(sc.hadoopConfiguration)
val filesToMerge = (Seq(outPath + "_head") ++ (0 until 4).map(i => {
outPath + "_tail/part-r-0000%d".format(i)
})).map(new Path(_))
.map(p => (p.toString, 0L, fs.getFileStatus(p).getLen().toLong - 1L))
ParallelFileMerger.mergePaths(outPath,
filesToMerge,
sc.broadcast(sc.hadoopConfiguration),
true,
false)
val mergedReads = sc.loadAlignments(outPath)
assert(mergedReads.rdd.count === reads.rdd.count)
}
sparkTest("merge a sharded cram file") {
val referencePath = resourceUrl("artificial.fa").toString
sc.hadoopConfiguration.set(CRAMInputFormat.REFERENCE_SOURCE_PATH_PROPERTY,
referencePath)
val reads = sc.loadAlignments(testFile("artificial.cram"))
val outPath = tmpFile("out.cram")
reads.transform(_.repartition(4))
.saveAsSam(outPath, isSorted = true, asSingleFile = true, deferMerging = true)
val fs = FileSystem.get(sc.hadoopConfiguration)
val filesToMerge = (Seq(outPath + "_head") ++ (0 until 4).map(i => {
outPath + "_tail/part-r-0000%d".format(i)
})).map(new Path(_))
.map(p => (p.toString, 0L, fs.getFileStatus(p).getLen().toLong - 1L))
ParallelFileMerger.mergePaths(outPath,
filesToMerge,
sc.broadcast(sc.hadoopConfiguration),
false,
true)
val mergedReads = sc.loadAlignments(outPath)
assert(mergedReads.rdd.count === reads.rdd.count)
}
test("can't turn a negative index into a path") {
intercept[AssertionError] {
ParallelFileMerger.indexToPath(-1, "nonsense")
}
}
test("generate a path from an index") {
val path = ParallelFileMerger.indexToPath(2, "nonsense")
assert(path.toString === "nonsense_part-r-00002")
}
}
|
massie/adam
|
adam-core/src/test/scala/org/bdgenomics/adam/rdd/ParallelFileMergerSuite.scala
|
Scala
|
apache-2.0
| 7,825 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.metastore
import slamdata.Predef._
import quasar.Variables
import quasar.fs.FileSystemType
import quasar.fs.mount.{ConnectionUri, MountConfig, MountType}
import quasar.fs.mount.cache.ViewCache
import quasar.sql._
import java.time.Instant
import doobie.util.transactor.Transactor
import eu.timepit.refined.numeric.NonNegative
import eu.timepit.refined.refineMV
import org.specs2.mutable._
import pathy.Path
import Path._
import doobie.specs2.analysisspec.TaskChecker
import scalaz.concurrent.Task
abstract class MetaStoreAccessSpec extends Specification with TaskChecker with MetaStoreFixture {
val schema = Schema.schema
def rawTransactor: Transactor[Task]
def refineNonNeg = refineMV[NonNegative]
"static query checks" >> {
val f = rootDir </> file("α")
val instant = Instant.ofEpochSecond(0)
val viewCache = ViewCache(
MountConfig.ViewConfig(sqlB"α", Variables.empty), None, None, 0, None, None,
0, instant, ViewCache.Status.Pending, None, f, None)
val pathedMountConfig = PathedMountConfig(
rootDir </> file("mimir"),
MountType.fileSystemMount(FileSystemType("local")),
ConnectionUri("/tmp/local"))
// NB: these tests do not execute the queries or validate results, but only
// type-check them against the schema available via the transactor.
check(Queries.fsMounts)
check(Queries.mounts)
check(Queries.insertPathedMountConfig(pathedMountConfig))
check(Queries.mountsHavingPrefix(rootDir))
check(Queries.lookupMountType(rootDir))
check(Queries.lookupMountConfig(rootDir))
check(Queries.insertMount(rootDir, MountConfig.fileSystemConfig(FileSystemType(""), ConnectionUri(""))))
check(Queries.deleteMount(rootDir))
check(Queries.viewCachePaths)
check(Queries.lookupViewCache(f))
check(Queries.insertViewCache(f, viewCache))
check(Queries.updateViewCache(f, viewCache))
check(Queries.updateViewCacheErrorMsg(f, "err"))
check(Queries.deleteViewCache(f))
check(Queries.staleCachedViews(instant))
check(Queries.cacheRefreshAssigneStart(f, "α", instant, f))
check(Queries.updatePerSuccesfulCacheRefresh(f, instant, 0, instant))
}
"fsMounts" should {
"not attempt to load views and modules" >> {
interpretIO(for {
_ <- MetaStoreAccess.insertMount(rootDir </> file("view"), MountConfig.viewConfig0(sqlB"1"))
_ <- MetaStoreAccess.insertMount(rootDir </> dir("module"), MountConfig.moduleConfig(sqlM"IMPORT `/f/`"))
fsMounts <- MetaStoreAccess.fsMounts
} yield {
fsMounts must_=== Map.empty
}).unsafePerformSync
}
}
}
class EphemeralH2AccessSpec extends MetaStoreAccessSpec with H2MetaStoreFixture
|
jedesah/Quasar
|
core/src/test/scala/quasar/metastore/MetaStoreAccessSpec.scala
|
Scala
|
apache-2.0
| 3,335 |
package commands
import codeoptimus.sgir.comm.IRCPacket
/**
*
* Created with IntelliJ IDEA.
* User: Aaron Allred
*/
object Command {
def recvMsg(ircPacket: IRCPacket) {
ircPacket.msg
}
// Parse out the message sent for any possible commands.
def parseCommand(msg: String): (String, String) = {
val command = msg.split(" ").head.toLowerCase
val argument = msg.replace(command + " ", "")
(command, argument)
}
// Take first part as command and rest as Arguments sent.
def parseArguments(msg: String): (String, List[String]) = {
val command = msg.split(" ").head.toLowerCase
val arguments = msg.split(" ").drop(1).toList
(command, arguments)
}
}
|
digicyc/SGir
|
src/main/scala/commands/Command.scala
|
Scala
|
mit
| 698 |
package com.github.gdefacci.briscola.web.util
import org.eclipse.jetty.server.Server
import org.eclipse.jetty.server.ServerConnector
import org.eclipse.jetty.util.component.LifeCycle
import org.eclipse.jetty.webapp.WebAppContext
import org.eclipse.jetty.websocket.jsr356.server.deploy.WebSocketServerContainerInitializer
import com.github.gdefacci.briscola.web.util.ServletContextPlanAdder.toServletPlanAdder
import javax.servlet.ServletContext
import javax.websocket.server.ServerContainer
case class JettyWebAppConfig(port: Int, context: WebAppContext, configurator: Containerconfigurator)
object JettyWebAppConfig {
def defaultWebAppContext() = {
val basePath = "src/main/webapp"
val context = new WebAppContext();
context.setResourceBase(basePath);
context.setInitParameter("org.eclipse.jetty.servlet.Default.dirAllowed", "true");
context.setInitParameter("org.eclipse.jetty.servlet.Default.useFileMappedBuffer", "false");
context
}
def apply(port:Int, contextPath:String, plan:ServletPlan, plans:ServletPlan*):JettyWebAppConfig = {
val allPlans = plan +: plans
apply(port, contextPath, allPlans)
}
def apply(port:Int, contextPath:String, allPlans:Seq[ServletPlan]):JettyWebAppConfig = {
val context = JettyWebAppConfig.defaultWebAppContext()
context.setContextPath((if (contextPath.startsWith("/")) "" else "/") + contextPath);
JettyWebAppConfig(port, context, new Containerconfigurator {
def configureWerbSockets(container: ServerContainer) = {}
def configureWeb(context: ServletContext) = {
allPlans.foreach(context.addPlan(_))
}
})
}
}
object JettyServerFactory {
def createServers(jettyConfig: => JettyWebAppConfig): (Server, org.eclipse.jetty.websocket.jsr356.server.ServerContainer) = {
val server = new Server();
val connector = new ServerConnector(server);
connector.setPort(jettyConfig.port);
server.addConnector(connector);
server.setStopAtShutdown(true)
val context = jettyConfig.context
server.setHandler(context);
val wscontainer = WebSocketServerContainerInitializer.configureContext(context);
wscontainer.setDefaultMaxSessionIdleTimeout(0)
context.addLifeCycleListener(new LifeCycle.Listener {
def lifeCycleFailure(l: org.eclipse.jetty.util.component.LifeCycle, err: Throwable): Unit = {}
def lifeCycleStarted(l: org.eclipse.jetty.util.component.LifeCycle): Unit = {}
def lifeCycleStarting(l: org.eclipse.jetty.util.component.LifeCycle): Unit = {
jettyConfig.configurator.configureWeb(context.getServletContext)
jettyConfig.configurator.configureWerbSockets(wscontainer)
}
def lifeCycleStopped(l: org.eclipse.jetty.util.component.LifeCycle): Unit = {
}
def lifeCycleStopping(l: org.eclipse.jetty.util.component.LifeCycle): Unit = {}
})
server -> wscontainer
}
def createWebServer(jettyConfig: => JettyWebAppConfig): Server = {
val server = new Server();
val connector = new ServerConnector(server);
connector.setPort(jettyConfig.port);
server.addConnector(connector);
server.setStopAtShutdown(true)
val context = jettyConfig.context
server.setHandler(context);
context.addLifeCycleListener(new LifeCycle.Listener {
def lifeCycleFailure(l: org.eclipse.jetty.util.component.LifeCycle, err: Throwable): Unit = {}
def lifeCycleStarted(l: org.eclipse.jetty.util.component.LifeCycle): Unit = {}
def lifeCycleStarting(l: org.eclipse.jetty.util.component.LifeCycle): Unit = {
jettyConfig.configurator.configureWeb(context.getServletContext)
}
def lifeCycleStopped(l: org.eclipse.jetty.util.component.LifeCycle): Unit = {
}
def lifeCycleStopping(l: org.eclipse.jetty.util.component.LifeCycle): Unit = {}
})
server
}
}
|
gdefacci/briscola
|
ddd-briscola-web/src/main/scala/com/github/gdefacci/briscola/web/util/jettyUtil.scala
|
Scala
|
bsd-3-clause
| 3,869 |
/*
* Copyright (c) 2018. Fengguo Wei and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License v2.0
* which accompanies this distribution, and is available at
* https://www.apache.org/licenses/LICENSE-2.0
*
* Detailed contributors are listed in the CONTRIBUTOR.md
*/
package org.argus.jawa.flow.summary.model
import org.argus.jawa.flow.pta._
import org.argus.jawa.flow.pta.rfa.RFAFact
import org.argus.jawa.core.elements.JawaType
/**
* Created by fgwei on 6/15/17.
*/
class SetSuTest extends SuTestBase("Set.safsu") {
"Ljava/util/Set;.add:(Ljava/lang/Object;)Z" with_input (
RFAFact(VarSlot("v0"), PTAInstance(new JawaType("java.util.Set"), defContext)),
RFAFact(FieldSlot(PTAInstance(new JawaType("java.util.Set"), defContext), "items"), PTAConcreteStringInstance("String", defContext2)),
RFAFact(VarSlot("v1"), PTAConcreteStringInstance("String", defContext3))
) produce (
RFAFact(VarSlot("v0"), PTAInstance(new JawaType("java.util.Set"), defContext)),
RFAFact(FieldSlot(PTAInstance(new JawaType("java.util.Set"), defContext), "items"), PTAConcreteStringInstance("String", defContext2)),
RFAFact(FieldSlot(PTAInstance(new JawaType("java.util.Set"), defContext), "items"), PTAConcreteStringInstance("String", defContext3)),
RFAFact(VarSlot("v1"), PTAConcreteStringInstance("String", defContext3))
)
"Ljava/util/Set;.clear:()V" with_input (
RFAFact(VarSlot("v0"), PTAInstance(new JawaType("java.util.Set"), defContext)),
RFAFact(FieldSlot(PTAInstance(new JawaType("java.util.Set"), defContext), "items"), PTAConcreteStringInstance("String", defContext2))
) produce RFAFact(VarSlot("v0"), PTAInstance(new JawaType("java.util.Set"), defContext))
"Ljava/util/Set;.clone:()Ljava/lang/Object;" with_input (
RFAFact(VarSlot("v0"), PTAInstance(new JawaType("java.util.Set"), defContext)),
RFAFact(FieldSlot(PTAInstance(new JawaType("java.util.Set"), defContext), "items"), PTAConcreteStringInstance("String", defContext2))
) produce (
RFAFact(VarSlot("v0"), PTAInstance(new JawaType("java.util.Set"), defContext)),
RFAFact(FieldSlot(PTAInstance(new JawaType("java.util.Set"), defContext), "items"), PTAConcreteStringInstance("String", defContext2)),
RFAFact(VarSlot("temp"), PTAInstance(new JawaType("java.util.Set"), defContext))
)
"Ljava/util/Set;.contains:(Ljava/lang/Object;)Z" with_input (
RFAFact(VarSlot("v0"), PTAInstance(new JawaType("java.util.Set"), defContext)),
RFAFact(FieldSlot(PTAInstance(new JawaType("java.util.Set"), defContext), "items"), PTAConcreteStringInstance("String", defContext2))
) produce (
RFAFact(VarSlot("v0"), PTAInstance(new JawaType("java.util.Set"), defContext)),
RFAFact(FieldSlot(PTAInstance(new JawaType("java.util.Set"), defContext), "items"), PTAConcreteStringInstance("String", defContext2))
)
"Ljava/util/Set;.isEmpty:()Z" with_input (
RFAFact(VarSlot("v0"), PTAInstance(new JawaType("java.util.Set"), defContext)),
RFAFact(FieldSlot(PTAInstance(new JawaType("java.util.Set"), defContext), "items"), PTAConcreteStringInstance("String", defContext2))
) produce (
RFAFact(VarSlot("v0"), PTAInstance(new JawaType("java.util.Set"), defContext)),
RFAFact(FieldSlot(PTAInstance(new JawaType("java.util.Set"), defContext), "items"), PTAConcreteStringInstance("String", defContext2))
)
"Ljava/util/Set;.iterator:()Ljava/util/Iterator;" with_input (
RFAFact(VarSlot("v0"), PTAInstance(new JawaType("java.util.Set"), defContext)),
RFAFact(FieldSlot(PTAInstance(new JawaType("java.util.Set"), defContext), "items"), PTAConcreteStringInstance("String", defContext2))
) produce (
RFAFact(VarSlot("v0"), PTAInstance(new JawaType("java.util.Set"), defContext)),
RFAFact(FieldSlot(PTAInstance(new JawaType("java.util.Set"), defContext), "items"), PTAConcreteStringInstance("String", defContext2)),
RFAFact(VarSlot("temp"), PTAInstance(new JawaType("java.util.Iterator").toUnknown, currentContext))
)
"Ljava/util/Set;.remove:(Ljava/lang/Object;)Z" with_input (
RFAFact(VarSlot("v0"), PTAInstance(new JawaType("java.util.Set"), defContext)),
RFAFact(FieldSlot(PTAInstance(new JawaType("java.util.Set"), defContext), "items"), PTAConcreteStringInstance("String", defContext2)),
RFAFact(FieldSlot(PTAInstance(new JawaType("java.util.Set"), defContext), "items"), PTAConcreteStringInstance("String", defContext3)),
RFAFact(VarSlot("v1"), PTAConcreteStringInstance("String", defContext3))
) produce (
RFAFact(VarSlot("v0"), PTAInstance(new JawaType("java.util.Set"), defContext)),
RFAFact(FieldSlot(PTAInstance(new JawaType("java.util.Set"), defContext), "items"), PTAConcreteStringInstance("String", defContext2)),
RFAFact(VarSlot("v1"), PTAConcreteStringInstance("String", defContext3))
)
"Ljava/util/Set;.size:()I" with_input (
RFAFact(VarSlot("v0"), PTAInstance(new JawaType("java.util.Set"), defContext)),
RFAFact(FieldSlot(PTAInstance(new JawaType("java.util.Set"), defContext), "items"), PTAConcreteStringInstance("String", defContext2))
) produce (
RFAFact(VarSlot("v0"), PTAInstance(new JawaType("java.util.Set"), defContext)),
RFAFact(FieldSlot(PTAInstance(new JawaType("java.util.Set"), defContext), "items"), PTAConcreteStringInstance("String", defContext2))
)
}
|
arguslab/Argus-SAF
|
jawa/src/test/scala/org/argus/jawa/flow/summary/model/SetSuTest.scala
|
Scala
|
apache-2.0
| 5,390 |
package io.github.ptitjes.scott.api
import io.github.ptitjes.scott.utils.Trie
/**
* @author Didier Villevalois
*/
case class TagSet(tags: IndexedSeq[String]) {
private val stringToCode = Trie[Int]() ++ tags.zipWithIndex
private val maxLength = tags.map(_.length).reduce(math.max)
def size: Int = tags.size
def apply(i: Int): String = tags(i)
def apply(s: String): Int = stringToCode(s).get
def padded(i: Int) = {
val tag = tags(i)
tag + " " * (maxLength - tag.length)
}
}
|
ptitjes/scott
|
scott-core/src/main/scala/io/github/ptitjes/scott/api/TagSet.scala
|
Scala
|
gpl-3.0
| 492 |
/*
* Copyright (C) 2014 - 2017 Contributors as noted in the AUTHORS.md file
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.wegtam.tensei.agent.parsers.FileParsers.CSV
import java.io.{ BufferedWriter, File, FileWriter, StringReader }
import com.wegtam.tensei.adt.ElementReference
import com.wegtam.tensei.agent.DataTreeDocument.DataTreeDocumentMessages
import com.wegtam.tensei.agent.XmlActorSpec
import org.xml.sax.InputSource
class ComplexCSVTest extends XmlActorSpec {
describe("FileParser") {
describe("CSV") {
describe("when given a simple csv with a basic complex description") {
val dataFile = "/com/wegtam/tensei/agent/parsers/FileParsers/CSV/complex-01.csv"
val dfasdlFile = "/com/wegtam/tensei/agent/parsers/FileParsers/CSV/complex-01.xml"
it("should create the correct source structure") {
val expectedFile =
"/com/wegtam/tensei/agent/parsers/FileParsers/CSV/complex-01-expected-structure.xml"
val r = prepareFileParserStructureComparison(dataFile, dfasdlFile, expectedFile)
val expectedNodes = r._1
val actualNodes = r._2
actualNodes.size should be(expectedNodes.size)
compareXmlStructureNodes(expectedNodes, actualNodes)
}
it("should extract the correct data") {
val expectedDataXml = scala.io.Source
.fromInputStream(
getClass.getResourceAsStream(
"/com/wegtam/tensei/agent/parsers/FileParsers/CSV/complex-01-expected-data.xml"
)
)
.mkString
val expectedDataTree =
createTestDocumentBuilder().parse(new InputSource(new StringReader(expectedDataXml)))
val dataTree = prepareFileParserDataComparison(dataFile, dfasdlFile)
compareSimpleDataNodes(expectedDataTree, dataTree)
}
}
describe("when given a simple csv with a basic complex description using a fixseq") {
val dataFile = "/com/wegtam/tensei/agent/parsers/FileParsers/CSV/complex-01.csv"
val dfasdlFile =
"/com/wegtam/tensei/agent/parsers/FileParsers/CSV/complex-01-with-fixseq.xml"
it("should create the correct source structure") {
val expectedFile =
"/com/wegtam/tensei/agent/parsers/FileParsers/CSV/complex-01-with-fixseq-expected-structure.xml"
val r = prepareFileParserStructureComparison(dataFile, dfasdlFile, expectedFile)
val expectedNodes = r._1
val actualNodes = r._2
actualNodes.size should be(expectedNodes.size)
compareXmlStructureNodes(expectedNodes, actualNodes)
}
it("should extract the correct data") {
val expectedDataXml = scala.io.Source
.fromInputStream(
getClass.getResourceAsStream(
"/com/wegtam/tensei/agent/parsers/FileParsers/CSV/complex-01-with-fixseq-expected-data.xml"
)
)
.mkString
val expectedDataTree =
createTestDocumentBuilder().parse(new InputSource(new StringReader(expectedDataXml)))
val dataTree = prepareFileParserDataComparison(dataFile, dfasdlFile)
compareSimpleDataNodes(expectedDataTree, dataTree)
compareSequenceData("firstname", expectedDataTree, dataTree)
compareSequenceData("lastname", expectedDataTree, dataTree)
compareSequenceData("email", expectedDataTree, dataTree)
compareSequenceData("birthday", expectedDataTree, dataTree)
compareSequenceData("phone", expectedDataTree, dataTree)
compareSequenceData("division", expectedDataTree, dataTree)
}
}
describe(
"when given a simple csv including blank lines with a basic complex description using a seq"
) {
val dataFile = "/com/wegtam/tensei/agent/parsers/FileParsers/CSV/complex-02.csv"
val dfasdlFile = "/com/wegtam/tensei/agent/parsers/FileParsers/CSV/complex-02-with-seq.xml"
it("should create the correct source structure") {
val expectedFile =
"/com/wegtam/tensei/agent/parsers/FileParsers/CSV/complex-02-with-seq-expected-structure.xml"
val r = prepareFileParserStructureComparison(dataFile, dfasdlFile, expectedFile)
val expectedNodes = r._1
val actualNodes = r._2
actualNodes.size should be(expectedNodes.size)
compareXmlStructureNodes(expectedNodes, actualNodes)
}
it("should extract the correct data") {
val expectedDataXml = scala.io.Source
.fromInputStream(
getClass.getResourceAsStream(
"/com/wegtam/tensei/agent/parsers/FileParsers/CSV/complex-02-with-seq-expected-data.xml"
)
)
.mkString
val expectedDataTree =
createTestDocumentBuilder().parse(new InputSource(new StringReader(expectedDataXml)))
val dataTree = prepareFileParserDataComparison(dataFile, dfasdlFile)
withClue("It should parse all data including blank lines.") {
dataTree ! DataTreeDocumentMessages.GetSequenceRowCount(
ElementReference("MY-DFASDL", "account_list")
)
val parsedRows = expectMsgType[DataTreeDocumentMessages.SequenceRowCount]
parsedRows.rows.getOrElse(0L) should be(10L)
}
compareSimpleDataNodes(expectedDataTree, dataTree)
compareChoiceInSequence("account_list", expectedDataTree, dataTree)
}
}
describe(
"when given a simple csv including spaces with a basic complex description using a seq and the trim attribute"
) {
val dataFile = "/com/wegtam/tensei/agent/parsers/FileParsers/CSV/complex-03.csv"
val dfasdlFile = "/com/wegtam/tensei/agent/parsers/FileParsers/CSV/complex-03-with-seq.xml"
it("should create the correct source structure") {
val expectedFile =
"/com/wegtam/tensei/agent/parsers/FileParsers/CSV/complex-03-with-seq-expected-structure.xml"
val r = prepareFileParserStructureComparison(dataFile, dfasdlFile, expectedFile)
val expectedNodes = r._1
val actualNodes = r._2
actualNodes.size should be(expectedNodes.size)
compareXmlStructureNodes(expectedNodes, actualNodes)
}
it("should extract the correct data") {
val expectedDataXml = scala.io.Source
.fromInputStream(
getClass.getResourceAsStream(
"/com/wegtam/tensei/agent/parsers/FileParsers/CSV/complex-03-with-seq-expected-data.xml"
)
)
.mkString
val expectedDataTree =
createTestDocumentBuilder().parse(new InputSource(new StringReader(expectedDataXml)))
val dataTree = prepareFileParserDataComparison(dataFile, dfasdlFile)
compareSimpleDataNodes(expectedDataTree, dataTree)
compareSequenceData("firstname", expectedDataTree, dataTree)
compareSequenceData("lastname", expectedDataTree, dataTree)
compareSequenceData("email", expectedDataTree, dataTree)
compareSequenceData("birthday", expectedDataTree, dataTree)
compareSequenceData("phone", expectedDataTree, dataTree)
compareSequenceData("division", expectedDataTree, dataTree)
}
}
describe("when given a simple csv with a simple choice") {
val dataFile = "/com/wegtam/tensei/agent/parsers/FileParsers/CSV/choice-01.csv"
val dfasdlFile = "/com/wegtam/tensei/agent/parsers/FileParsers/CSV/choice-01.xml"
it("should create the correct source structure") {
val expectedFile =
"/com/wegtam/tensei/agent/parsers/FileParsers/CSV/choice-01-expected-structure.xml"
val r = prepareFileParserStructureComparison(dataFile, dfasdlFile, expectedFile)
val expectedNodes = r._1
val actualNodes = r._2
actualNodes.size should be(expectedNodes.size)
compareXmlStructureNodes(expectedNodes, actualNodes)
}
it("should extract the correct data") {
val expectedDataXml = scala.io.Source
.fromInputStream(
getClass.getResourceAsStream(
"/com/wegtam/tensei/agent/parsers/FileParsers/CSV/choice-01-expected-data.xml"
)
)
.mkString
val expectedDataTree =
createTestDocumentBuilder().parse(new InputSource(new StringReader(expectedDataXml)))
val dataTree = prepareFileParserDataComparison(dataFile, dfasdlFile)
compareSimpleDataNodes(expectedDataTree, dataTree)
}
}
describe("when given a simple csv with a simple choice within a sequence") {
val dataFile = "/com/wegtam/tensei/agent/parsers/FileParsers/CSV/choice-02.csv"
val dfasdlFile = "/com/wegtam/tensei/agent/parsers/FileParsers/CSV/choice-02.xml"
it("should create the correct source structure") {
val expectedFile =
"/com/wegtam/tensei/agent/parsers/FileParsers/CSV/choice-02-expected-structure.xml"
val r = prepareFileParserStructureComparison(dataFile, dfasdlFile, expectedFile)
val expectedNodes = r._1
val actualNodes = r._2
actualNodes.size should be(expectedNodes.size)
compareXmlStructureNodes(expectedNodes, actualNodes)
}
it("should extract the correct data") {
val expectedDataXml = scala.io.Source
.fromInputStream(
getClass.getResourceAsStream(
"/com/wegtam/tensei/agent/parsers/FileParsers/CSV/choice-02-expected-data.xml"
)
)
.mkString
val expectedDataTree =
createTestDocumentBuilder().parse(new InputSource(new StringReader(expectedDataXml)))
val dataTree = prepareFileParserDataComparison(dataFile, dfasdlFile)
compareSimpleDataNodes(expectedDataTree, dataTree)
}
}
describe("when given a simple csv with a stacked sequence using sequence stop signs") {
val dataFile = "/com/wegtam/tensei/agent/parsers/FileParsers/CSV/sequence-stop-sign-01.csv"
val dfasdlFile =
"/com/wegtam/tensei/agent/parsers/FileParsers/CSV/sequence-stop-sign-01.xml"
it("should create the correct source structure with the last column empty") {
val expectedFile =
"/com/wegtam/tensei/agent/parsers/FileParsers/CSV/sequence-stop-sign-01-expected-structure.xml"
val r = prepareFileParserStructureComparison(dataFile, dfasdlFile, expectedFile)
val expectedNodes = r._1
val actualNodes = r._2
actualNodes.size should be(expectedNodes.size)
compareXmlStructureNodes(expectedNodes, actualNodes)
}
it("should extract the correct data") {
val expectedDataXml = scala.io.Source
.fromInputStream(
getClass.getResourceAsStream(
"/com/wegtam/tensei/agent/parsers/FileParsers/CSV/sequence-stop-sign-01-expected-data.xml"
)
)
.mkString
val expectedDataTree =
createTestDocumentBuilder().parse(new InputSource(new StringReader(expectedDataXml)))
val dataTree = prepareFileParserDataComparison(dataFile, dfasdlFile)
compareSimpleDataNodes(expectedDataTree, dataTree)
dataTree ! DataTreeDocumentMessages.GetSequenceRowCount(
ElementReference("MY-DFASDL", "columns")
)
val dataColumnCounter = expectMsgType[DataTreeDocumentMessages.SequenceRowCount]
dataColumnCounter.rows.getOrElse(0L) should be(9)
compareStackedSequenceData("data", "columns", expectedDataTree, dataTree)
}
}
describe("when given a CSV with empty columns") {
describe("with one empty column at the end of a line") {
val dataFile = "/com/wegtam/tensei/agent/parsers/FileParsers/CSV/empty-column-at-end.csv"
val dfasdlFile =
"/com/wegtam/tensei/agent/parsers/FileParsers/CSV/empty-column-at-end.xml"
it("should create the correct source structure with the last column empty") {
val expectedFile =
"/com/wegtam/tensei/agent/parsers/FileParsers/CSV/empty-column-at-end-expected-structure.xml"
val r = prepareFileParserStructureComparison(dataFile, dfasdlFile, expectedFile)
val expectedNodes = r._1
val actualNodes = r._2
actualNodes.size should be(expectedNodes.size)
compareXmlStructureNodes(expectedNodes, actualNodes)
}
it("should extract the correct data") {
val expectedDataXml = scala.io.Source
.fromInputStream(
getClass.getResourceAsStream(
"/com/wegtam/tensei/agent/parsers/FileParsers/CSV/empty-column-at-end-expected-data.xml"
)
)
.mkString
val expectedDataTree =
createTestDocumentBuilder().parse(new InputSource(new StringReader(expectedDataXml)))
val dataTree = prepareFileParserDataComparison(dataFile, dfasdlFile)
compareSimpleDataNodes(expectedDataTree, dataTree)
compareSequenceData("entry-0", expectedDataTree, dataTree)
compareSequenceData("entry-1", expectedDataTree, dataTree)
compareSequenceData("entry-2", expectedDataTree, dataTree)
compareSequenceData("entry-3", expectedDataTree, dataTree)
compareSequenceData("entry-4", expectedDataTree, dataTree)
compareSequenceData("entry-5", expectedDataTree, dataTree)
compareSequenceData("entry-6", expectedDataTree, dataTree)
compareSequenceData("entry-7", expectedDataTree, dataTree)
compareSequenceData("entry-8", expectedDataTree, dataTree)
}
}
describe("with one empty column at the end of a line and in DOS mode") {
val tempFile = File.createTempFile("tensei-agent", "test")
val bw = new BufferedWriter(new FileWriter(tempFile))
bw.write("0,211,Ozkan,Douglas,,647,EGZKSobTeknHCbLuHczvWmhTmCSGXD,OFFICE7152,")
bw.write("\\r\\n")
bw.write(
"1,413,Suer,Candice,,314,OfOBVvpzNvHCebxyuxXFwsMju JRU,OFFICE8586,(344) 999-2652"
)
bw.write("\\r\\n")
bw.write("2,246,Somisetty,Jami,P,534,rAHWYkktOXAyPAYHlncZPG,,(984) 538-5366")
bw.write("\\r\\n")
bw.write(
"3,248,Mazurek,Rosalinda,J,364,TJQqsUQQGqWG QleLheUoYlgRNVT,OFFICE8487,(860) 037-6897"
)
bw.write("\\r\\n")
bw.close()
val dataFile = tempFile.getAbsolutePath.replace("\\\\", "/")
val dfasdlFile =
"/com/wegtam/tensei/agent/parsers/FileParsers/CSV/empty-column-at-end.xml"
it("should create the correct source structure with the last column empty") {
val expectedFile =
"/com/wegtam/tensei/agent/parsers/FileParsers/CSV/empty-column-at-end-expected-structure.xml"
val r =
prepareFileParserStructureComparisonForTempFile(dataFile, dfasdlFile, expectedFile)
val expectedNodes = r._1
val actualNodes = r._2
actualNodes.size should be(expectedNodes.size)
compareXmlStructureNodes(expectedNodes, actualNodes)
}
it("should extract the correct data") {
val expectedDataXml = scala.io.Source
.fromInputStream(
getClass.getResourceAsStream(
"/com/wegtam/tensei/agent/parsers/FileParsers/CSV/empty-column-at-end-expected-data.xml"
)
)
.mkString
val expectedDataTree =
createTestDocumentBuilder().parse(new InputSource(new StringReader(expectedDataXml)))
val dataTree = prepareFileParserDataComparisonForTempFile(dataFile, dfasdlFile)
compareSimpleDataNodes(expectedDataTree, dataTree)
compareSequenceData("entry-0", expectedDataTree, dataTree)
compareSequenceData("entry-1", expectedDataTree, dataTree)
compareSequenceData("entry-2", expectedDataTree, dataTree)
compareSequenceData("entry-3", expectedDataTree, dataTree)
compareSequenceData("entry-4", expectedDataTree, dataTree)
compareSequenceData("entry-5", expectedDataTree, dataTree)
compareSequenceData("entry-6", expectedDataTree, dataTree)
compareSequenceData("entry-7", expectedDataTree, dataTree)
compareSequenceData("entry-8", expectedDataTree, dataTree)
}
}
}
}
}
}
|
Tensei-Data/tensei-agent
|
src/test/scala/com/wegtam/tensei/agent/parsers/FileParsers/CSV/ComplexCSVTest.scala
|
Scala
|
agpl-3.0
| 17,576 |
/** Licensed to Gravity.com under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Gravity.com licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.gravity.hbase.mapreduce
import com.gravity.hbase.schema._
import org.apache.hadoop.conf.Configuration
import java.lang.Iterable
import com.gravity.hbase.schema.HbaseTable
import com.gravity.hadoop.GravityTableOutputFormat
import org.apache.hadoop.hbase.io.ImmutableBytesWritable
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.mapreduce.lib.input.{SequenceFileInputFormat, FileInputFormat}
import org.apache.hadoop.mapreduce.lib.output.{SequenceFileOutputFormat, FileOutputFormat}
import scala.collection.JavaConversions._
import org.apache.hadoop.hbase.client.{Scan, Result}
import org.apache.hadoop.hbase.filter.{FilterList, Filter}
import org.apache.hadoop.hbase.util.Base64
import com.gravity.hbase.schema._
import scala.collection.mutable.Buffer
import org.apache.hadoop.io._
import java.io.{DataInputStream, ByteArrayInputStream, ByteArrayOutputStream}
import org.apache.hadoop.hbase.mapreduce.{MultiTableOutputFormat, TableInputFormat}
import org.joda.time.DateTime
import scala.collection._
import org.apache.hadoop.mapreduce.{Job, Partitioner, Reducer, Mapper}
import org.apache.hadoop.mapred.JobConf
/* )\\._.,--....,'``.
.b--. /; _.. \\ _\\ (`._ ,.
`=,-,-'~~~ `----(,_..'--(,_..'`-.;.' */
object Settings {
object None extends NoSettings
}
class NoSettings extends SettingsBase {
def hithere() = "Hi"
}
class SettingsBase {
def fromSettings(conf: Configuration) {
}
def toSettings(conf: Configuration) {
}
def jobNameQualifier = ""
}
///*
//Experimental support for a non declarative constructor for hjobs.
// */
//class HJobND[S <: SettingsBase](name: String) {
// val tasks = Buffer[HTask[_, _, _, _, S]]()
//
// def addMR(name:String) {
// HMapReduceTask(
// HTaskID(name),
// HTaskConfigs(),
// HIO(),
//
// )
//
// tasks += task
// }
//
//}
abstract class HTaskConfigsBase {
def configs: Seq[HConfigLet]
def init(settings: SettingsBase) {
}
}
case class HTaskSettingsConfigs[S <: SettingsBase](configMaker: (S) => Seq[HConfigLet]) extends HTaskConfigsBase {
var configs: Seq[HConfigLet] = _
override def init(settings: SettingsBase) {
configs = configMaker(settings.asInstanceOf[S])
}
}
/*
Holds a list of configuration objects. Each object should encapsulate a particular set of configuration options (for example, whether or not to reuse the JVM)
*/
case class HTaskConfigs(configs: HConfigLet*) extends HTaskConfigsBase {
}
/*
The base class for a single configuration object.
*/
abstract class HConfigLet() {
def configure(job: Job) {
}
}
case class ReducerCountConf(reducers: Int = 1) extends HConfigLet {
override def configure(job: Job) {
job.setNumReduceTasks(reducers)
}
}
case class SpeculativeExecutionConf(on: Boolean = false) extends HConfigLet {
override def configure(job: Job) {
if (!on) {
job.getConfiguration.set("mapred.map.tasks.speculative.execution", "false")
job.getConfiguration.set("mapred.reduce.tasks.speculative.execution", "false")
}
}
}
case class ReuseJVMConf(reuse: Boolean = true) extends HConfigLet {
override def configure(job: Job) {
if (reuse) {
job.getConfiguration.setInt("mapred.job.reuse.jvm.num.tasks", -1)
}
}
}
case class BigMemoryConf(mapMemoryMB: Int, reduceMemoryMB: Int, mapBufferMB: Int = 800, reduceBufferMB: Int = 800) extends HConfigLet {
override def configure(job: Job) {
val memory = mapMemoryMB
val reducememory = reduceMemoryMB
job.getConfiguration.set("mapred.map.child.java.opts", "-Xmx" + memory + "m" + " -Xms" + memory + "m")
// conf.set("mapred.map.child.java.opts", "-Xmx" + memory + "m")
job.getConfiguration.set("mapred.reduce.child.java.opts", "-Xmx" + reducememory + "m")
job.getConfiguration.setInt("mapred.job.map.memory.mb", memory + mapBufferMB)
job.getConfiguration.setInt("mapred.job.reduce.memory.mb", reducememory + reduceBufferMB)
}
}
case class LongRunningJobConf(timeoutInSeconds: Int) extends HConfigLet {
override def configure(job: Job) {
job.getConfiguration.setInt("mapred.task.timeout", timeoutInSeconds)
}
}
object HJob {
def job(name: String) = new HJobBuilder(name)
}
class HJobBuilder(name: String) {
private val tasks = Buffer[HTask[_, _, _, _]]()
def withTask(task: HTask[_, _, _, _]) = {
tasks += task
this
}
def build[S <: SettingsBase] = new HJob[S](name, tasks: _*)
}
/*
object HTask {
def task(name:String) = new HTaskBuilder(name)
}
class HTaskBuilder(name: String) {
var previousTaskName : String = _
var previousTask : HTask[_,_,_,_] = _
var mapper: HMapper[_,_,_,_] = _
var reducer : HReducer[_,_,_,_] = _
var input :HInput = _
var output : HOutput = _
var configlets = Buffer[HConfigLet]()
def mapFromTable[T <: HbaseTable[T, R, RR], R, RR <: HRow[T, R]](table:HbaseTable[T,R,RR])(families: FamilyExtractor[T, _, _, _, _]*)(tmapper:FromTableBinaryMapper[T,R,RR]) = {
input = HTableInput(table.asInstanceOf[T],Families[T](families:_*))
mapper = tmapper
this
}
def withConfigs(configs:HConfigLet*) = {
configs.foreach{config=> configlets += config}
this
}
def build = {
val taskId = if(previousTaskName != null) HTaskID(name,previousTaskName) else if(previousTask != null) HTaskID(name,requiredTask=previousTask) else HTaskID(name)
val hio = if(input != null && output != null) HIO(input,output) else if(input != null && output == null) HIO(input) else HIO()
val finalConfigs = HTaskConfigs(configlets:_*)
if(reducer != null && mapper != null) {
HMapReduceTask(
taskId,
finalConfigs,
hio,
mapper,
reducer
)
}else if(mapper != null && reducer == null) {
HMapTask(
taskId,
finalConfigs,
hio,
mapper
)
}else {
throw new RuntimeException("Must specify at least a mapper function")
}
}
}
*/
case class JobPriority(name: String)
object JobPriorities {
val VERY_LOW = JobPriority("VERY_LOW")
val LOW = JobPriority("LOW")
val NORMAL = JobPriority("NORMAL")
val HIGH = JobPriority("HIGH")
val VERY_HIGH = JobPriority("VERY_HIGH")
}
/**
* A job encompasses a series of tasks that cooperate to build output. Each task is usually an individual map or map/reduce operation.
*
* To use the job, create a class with a parameterless constructor that inherits HJob, and pass the tasks into the constructor as a sequence.
*/
class HJob[S <: SettingsBase](val name: String, tasks: HTask[_, _, _, _]*) {
type RunResult = (Boolean, Seq[(HTask[_, _, _, _], Job)], mutable.Map[String, DateTime], mutable.Map[String, DateTime])
def run(settings: S, conf: Configuration, dryRun: Boolean = false, skipToTask: String = null, priority: JobPriority = JobPriorities.NORMAL): RunResult = {
require(tasks.size > 0, "HJob requires at least one task to be defined")
conf.setStrings("hpaste.jobchain.jobclass", getClass.getName)
var previousTask: HTask[_, _, _, _] = null
def taskByName(name: String) = tasks.find(_.taskId.name == name)
def getPreviousTask(task: HTask[_, _, _, _]) = {
if (task.taskId.previousTaskName != null) {
try {
taskByName(task.taskId.previousTaskName).get
} catch {
case ex: Exception => {
println("WARNING: Task " + task.taskId.name + " specifies previous task " + task.taskId.previousTaskName + " which was not submitted to the job. Make sure you did this intentionally")
null
// throw new RuntimeException("Task " + task.taskId.name + " requires task " + task.taskId.previousTaskName + " which was not submitted to the job")
}
}
} else {
if (!tasks.exists(_.taskId.name == task.taskId.requiredTask.taskId.name)) {
println("WARNING: Task " + task.taskId.name + " specifies previous task " + task.taskId.requiredTask.taskId.name + " which was not submitted to the job. Make sure you did this intentionally")
null
// throw new RuntimeException("Task " + task.taskId.name + " requires task " + task.taskId.requiredTask.taskId.name + " which has not been submitted to the job")
} else {
task.taskId.requiredTask
}
}
}
for (task <- tasks) {
if (task.taskId.previousTaskName != null || task.taskId.requiredTask != null) {
previousTask = getPreviousTask(task)
task.previousTask = previousTask
if (task.previousTask != null) {
previousTask.nextTasks += task
//If there is a previous HTask, then initialize the input of this task as the output of that task.
if (previousTask.hio.output.isInstanceOf[HRandomSequenceOutput[_, _]] && task.hio.input.isInstanceOf[HRandomSequenceInput[_, _]]) {
task.hio.input.asInstanceOf[HRandomSequenceInput[_, _]].previousPath = previousTask.hio.output.asInstanceOf[HRandomSequenceOutput[_, _]].path
}
}
// task.hio.input = previousTask.hio.output
}
}
var idx = 0
def makeJob(task: HTask[_, _, _, _]) = {
val taskConf = new Configuration(conf)
taskConf.set("mapred.job.priority", priority.name)
taskConf.setInt("hpaste.jobchain.mapper.idx", idx)
taskConf.setInt("hpaste.jobchain.reducer.idx", idx)
settings.toSettings(taskConf)
taskConf.set("hpaste.settingsclass", settings.getClass.getName)
task.configure(taskConf, previousTask)
val job = task.makeJob(previousTask, settings)
job.setJarByClass(getClass)
if (settings.jobNameQualifier.length > 0) {
job.setJobName(name + " : " + task.taskId.name + " (" + (idx + 1) + " of " + tasks.size + ")" + " [" + settings.jobNameQualifier + "]")
}
else {
job.setJobName(name + " : " + task.taskId.name + " (" + (idx + 1) + " of " + tasks.size + ")")
}
previousTask = task
idx = idx + 1
job
}
def declare(tasks: Seq[HTask[_, _, _, _]], level: String = "\\t") {
tasks.map {
task =>
println(level + "Task: " + task.taskId.name)
println(level + "\\twill run after " + (if (task.previousTask == null) "nothing" else task.previousTask.taskId.name))
println(level + "Input: " + task.hio.input)
println(level + "Output: " + task.hio.output)
declare(task.nextTasks, level + "\\t")
}
}
val taskJobBuffer = Buffer[(HTask[_, _, _, _], Job)]()
val taskStartTimes = mutable.Map[String, DateTime]()
val taskEndTimes = mutable.Map[String, DateTime]()
def runrecursively(tasks: Seq[HTask[_, _, _, _]]): RunResult = {
val jobs = tasks.map {
task =>
if (skipToTask != null && task.taskId.name != skipToTask) {
println("Skipping task: " + task.taskId.name + " because we're skipping to : " + skipToTask)
None
} else {
val job = makeJob(task)
taskJobBuffer.add((task, job))
Some(job)
}
}.flatten
jobs.foreach {
job =>
taskStartTimes(job.getJobName) = new DateTime()
val result = job.waitForCompletion(true)
taskEndTimes(job.getJobName) = new DateTime()
if (!job.waitForCompletion(true)) {
return (false, taskJobBuffer, taskStartTimes, taskEndTimes)
}
}
if (jobs.exists(_.isSuccessful == false)) {
(false, taskJobBuffer, taskStartTimes, taskEndTimes)
} else {
val nextTasks = tasks.flatMap(_.nextTasks)
if (nextTasks.size == 0) {
(true, taskJobBuffer, taskStartTimes, taskEndTimes)
} else {
runrecursively(nextTasks)
}
}
}
val firstTasks = tasks.filter(_.previousTask == null)
println("Job: " + name + " has " + tasks.size + " tasks")
declare(firstTasks)
if (!dryRun) {
runrecursively(firstTasks)
} else {
(true, taskJobBuffer, taskStartTimes, taskEndTimes)
}
}
def getMapperFunc[MK, MV, MOK, MOV](idx: Int) = {
val task = tasks(idx)
if (task.isInstanceOf[HMapReduceTask[MK, MV, MOK, MOV, _, _]]) {
val tk = task.asInstanceOf[HMapReduceTask[MK, MV, MOK, MOV, _, _]]
tk.mapper
}
else if (task.isInstanceOf[HMapTask[MK, MV, MOK, MOV]]) {
val tk = task.asInstanceOf[HMapTask[MK, MV, MOK, MOV]]
tk.mapper
} else {
throw new RuntimeException("Unable to find mapper for index " + idx)
}
}
def getReducerFunc[MOK, MOV, ROK, ROV](idx: Int) = {
val task = tasks(idx)
if (task.isInstanceOf[HMapReduceTask[_, _, MOK, MOV, ROK, ROV]]) {
val tk = task.asInstanceOf[HMapReduceTask[_, _, MOK, MOV, ROK, ROV]]
tk.reducer
} else {
throw new RuntimeException("Unable to find reducer for index " + idx)
}
}
}
/**
* Base class for initializing the input to an HJob
*/
abstract class HInput {
def init(job: Job, settings: SettingsBase)
}
/**
* Base class for initializing the output from an HJob
*/
abstract class HOutput {
def init(job: Job, settings: SettingsBase)
}
case class Columns[T <: HbaseTable[T, _, _]](columns: ColumnExtractor[T, _, _, _, _]*)
case class Families[T <: HbaseTable[T, _, _]](families: FamilyExtractor[T, _, _, _, _]*)
case class Filters[T <: HbaseTable[T, _, _]](filters: Filter*)
case class HTableQuery[T <: HbaseTable[T, R, RR], R, RR <: HRow[T, R], S <: SettingsBase](query: Query2[T, R, RR], cacheBlocks: Boolean = false, maxVersions: Int = 1, cacheSize: Int = 100) extends HInput {
override def toString = "Input: From table query"
override def init(job: Job, settings: SettingsBase) {
val thisQuery = query
val scanner = thisQuery.makeScanner(maxVersions, cacheBlocks, cacheSize)
job.getConfiguration.set("mapred.map.tasks.speculative.execution", "false")
val bas = new ByteArrayOutputStream()
val dos = new PrimitiveOutputStream(bas)
scanner.write(dos)
job.getConfiguration.set(TableInputFormat.SCAN, Base64.encodeBytes(bas.toByteArray))
job.getConfiguration.set(TableInputFormat.INPUT_TABLE, thisQuery.table.tableName)
job.getConfiguration.setInt(TableInputFormat.SCAN_CACHEDROWS, cacheSize)
job.setInputFormatClass(classOf[TableInputFormat])
}
}
case class HTableSettingsQuery[T <: HbaseTable[T, R, RR], R, RR <: HRow[T, R], S <: SettingsBase](query: (S) => Query2[T, R, RR], cacheBlocks: Boolean = false, maxVersions: Int = 1, cacheSize: Int = 100) extends HInput {
override def toString = "Input: From table query"
override def init(job: Job, settings: SettingsBase) {
val thisQuery = query(settings.asInstanceOf[S])
val scanner = thisQuery.makeScanner(maxVersions, cacheBlocks, cacheSize)
job.getConfiguration.set("mapred.map.tasks.speculative.execution", "false")
val bas = new ByteArrayOutputStream()
val dos = new PrimitiveOutputStream(bas)
scanner.write(dos)
job.getConfiguration.set(TableInputFormat.SCAN, Base64.encodeBytes(bas.toByteArray))
job.getConfiguration.set(TableInputFormat.INPUT_TABLE, thisQuery.table.tableName)
job.getConfiguration.setInt(TableInputFormat.SCAN_CACHEDROWS, cacheSize)
job.setInputFormatClass(classOf[TableInputFormat])
}
}
/**
* Initializes input from an HPaste Table
*/
case class HTableInput[T <: HbaseTable[T, _, _]](table: T, families: Families[T] = Families[T](), columns: Columns[T] = Columns[T](), filters: Seq[Filter] = Seq(), scan: Scan = new Scan(), scanCache: Int = 100) extends HInput {
override def toString = "Input: From table: \\"" + table.tableName + "\\""
override def init(job: Job, settings: SettingsBase) {
println("Setting input table to: " + table.tableName)
//Disabling speculative execution because it is never useful for a table input.
job.getConfiguration.set("mapred.map.tasks.speculative.execution", "false")
val scanner = scan
scanner.setCacheBlocks(false)
scanner.setCaching(scanCache)
scanner.setMaxVersions(1)
columns.columns.foreach {
col =>
val column = col(table)
scanner.addColumn(column.familyBytes, column.columnBytes)
}
families.families.foreach {
fam =>
val family = fam(table)
scanner.addFamily(family.familyBytes)
}
if (filters.size > 0) {
val filterList = new FilterList(FilterList.Operator.MUST_PASS_ALL)
filters.foreach {filter => filterList.addFilter(filter)}
scanner.setFilter(filterList)
}
val bas = new ByteArrayOutputStream()
val dos = new PrimitiveOutputStream(bas)
scanner.write(dos)
job.getConfiguration.set(TableInputFormat.SCAN, Base64.encodeBytes(bas.toByteArray))
job.getConfiguration.set(TableInputFormat.INPUT_TABLE, table.tableName)
job.getConfiguration.setInt(TableInputFormat.SCAN_CACHEDROWS, scanCache)
job.setInputFormatClass(classOf[TableInputFormat])
}
}
/**
* Initializes input from a series of paths.
*/
case class HPathInput(paths: Seq[String]) extends HInput {
override def toString = "Input: Paths: " + paths.mkString("{", ",", "}")
override def init(job: Job, settings: SettingsBase) {
paths.foreach(path => {
FileInputFormat.addInputPath(job, new Path(path))
})
}
}
/** Allows the output to be written to multiple tables. Currently the list of tables passed in is
* purely for documentation. There is no check in the output that will keep you from writing to other tables.
*/
case class HMultiTableOutput(writeToTransactionLog: Boolean, tables: HbaseTable[_, _, _]*) extends HOutput {
override def toString = "Output: The following tables: " + tables.map(_.tableName).mkString("{", ",", "}")
override def init(job: Job, settings: SettingsBase) {
if (!writeToTransactionLog) {
job.getConfiguration.setBoolean(MultiTableOutputFormat.WAL_PROPERTY, MultiTableOutputFormat.WAL_OFF)
}
job.getConfiguration.set("mapred.reduce.tasks.speculative.execution", "false")
job.setOutputFormatClass(classOf[MultiTableOutputFormat])
}
}
/**
* Outputs to an HPaste Table
*/
case class HTableOutput[T <: HbaseTable[T, _, _]](table: T) extends HOutput {
override def toString = "Output: Table: " + table.tableName
override def init(job: Job, settings: SettingsBase) {
println("Initializing output table to: " + table.tableName)
job.getConfiguration.set("mapred.reduce.tasks.speculative.execution", "false")
job.getConfiguration.set(GravityTableOutputFormat.OUTPUT_TABLE, table.tableName)
job.setOutputFormatClass(classOf[GravityTableOutputFormat[ImmutableBytesWritable]])
}
}
/**
* Outputs to an HDFS directory
*/
case class HPathOutput(path: String) extends HOutput {
override def toString = "Output: File: " + path
override def init(job: Job, settings: SettingsBase) {
FileSystem.get(job.getConfiguration).delete(new Path(path), true)
FileOutputFormat.setOutputPath(job, new Path(path))
}
}
/**
* This is the input to a task that is in the middle of a job.
* It reads from the output of the previous task.
*/
case class HRandomSequenceInput[K, V]() extends HInput {
var previousPath: Path = _
override def toString = "Input: Random Sequence File at " + previousPath.toUri.toString
override def init(job: Job, settings: SettingsBase) {
FileInputFormat.addInputPath(job, previousPath)
job.setInputFormatClass(classOf[SequenceFileInputFormat[K, V]])
}
}
/**
* Specifies the input to be a sequence file or files given the specified paths.
* @param paths Paths for files
* @tparam K Key class of input
* @tparam V Value class of input
*/
case class HSequenceInput[K,V](paths:Seq[String]) extends HInput {
override def toString = "Input: Sequence Files at: " + paths.mkString("{",",","}")
override def init(job: Job, settings: SettingsBase) {
paths.foreach(path => {
FileInputFormat.addInputPath(job, new Path(path))
})
job.setInputFormatClass(classOf[SequenceFileInputFormat[K, V]])
}
}
/**
* Output will be to a sequence file or files at the specified path
* @param seqPath
* @tparam K
* @tparam V
* @tparam S
*/
case class HSequenceSettingsOutput[K : Manifest,V : Manifest, S <: SettingsBase](seqPath:(S)=>String) extends HOutput {
override def toString = "Output: Sequence File"
override def init(job: Job, settings: SettingsBase) {
val path = new Path(seqPath(settings.asInstanceOf[S]))
FileSystem.get(job.getConfiguration).delete(path, true)
job.setOutputFormatClass(classOf[SequenceFileOutputFormat[K, V]])
FileOutputFormat.setOutputPath(job, path)
}
}
/**
* Output will be to a sequence file or files at the specified path
* @param seqPath
* @tparam K
* @tparam V
*/
case class HSequenceOutput[K : Manifest,V : Manifest](seqPath:String) extends HOutput {
override def toString = "Output: Sequence File at " + path.toUri.toString
var path = new Path(seqPath)
override def init(job: Job, settings: SettingsBase) {
FileSystem.get(job.getConfiguration).delete(path, true)
job.setOutputFormatClass(classOf[SequenceFileOutputFormat[K, V]])
FileOutputFormat.setOutputPath(job, path)
}
}
/**
* This is the output from a task in the middle of a job. It writes to a sequence temp file
*/
case class HRandomSequenceOutput[K, V]() extends HOutput {
override def toString = "Output: Random Sequence File at " + path.toUri.toString
var path = new Path(genTmpFile)
override def init(job: Job, settings: SettingsBase) {
job.setOutputFormatClass(classOf[SequenceFileOutputFormat[K, V]])
FileOutputFormat.setOutputPath(job, path)
}
}
case class HIO[IK, IV, OK, OV](var input: HInput = HRandomSequenceInput[IK, IV](), var output: HOutput = HRandomSequenceOutput[OK, OV]())
/**
* This is a single task in an HJob. It is usually a single hadoop job (an HJob being composed of several).
*/
abstract class HTask[IK, IV, OK, OV](val taskId: HTaskID, val configLets: HTaskConfigsBase = HTaskConfigs(), val hio: HIO[IK, IV, OK, OV] = HIO()) {
var configuration: Configuration = _
var previousTask: HTask[_, _, _, _] = _
val nextTasks = Buffer[HTask[_, _, _, _]]()
def configure(conf: Configuration, previousTask: HTask[_, _, _, _]) {
configuration = conf
}
def decorateJob(job: Job)
def makeJob(previousTask: HTask[_, _, _, _], settings: SettingsBase) = {
val job = new Job(configuration)
hio.input.init(job, settings)
hio.output.init(job, settings)
decorateJob(job)
configLets.init(settings)
for (config <- configLets.configs) {
config.configure(job)
}
job
}
}
/** This trait recognizes that mappers and reducers both write key-value pairs, so allows code to be abstracted across mapper and reducer implementations. It is implemented
* by HMapper and HReducer, so there should be no need to implement it again. Instead, write convenience functions that use this trait as a self-type.
*
*/
trait MRWritable[OK, OV] {
def write(key: OK, value: OV)
}
trait BinaryWritable {
this: MRWritable[BytesWritable, BytesWritable] =>
def write(keyWriter: (PrimitiveOutputStream) => Unit, valueWriter: (PrimitiveOutputStream) => Unit) {
write(makeWritable(keyWriter), makeWritable(valueWriter))
}
}
/** Can read reducer input composed of BytesWritable
*
*/
trait BinaryReadable {
this: HReducer[BytesWritable, BytesWritable, _, _] =>
def readKey[T](reader: (PrimitiveInputStream) => T) = readWritable(key) {reader}
def perValue(reader: (PrimitiveInputStream) => Unit) {values.foreach {value => readWritable(value)(reader)}}
def makePerValue[T](reader: (PrimitiveInputStream) => T) = values.map {value => readWritable(value)(reader)}
}
/** Can write to a specific table
*
*/
trait ToTableWritable[T <: HbaseTable[T, R, RR], R, RR <: HRow[T, R]] {
this: MRWritable[NullWritable, Writable] =>
def write(operation: OpBase[T, R]) {
operation.getOperations.foreach {op => write(NullWritable.get(), op)}
}
}
/** Can write to multiple tables
*
*/
trait MultiTableWritable {
this: MRWritable[ImmutableBytesWritable, Writable] =>
val validTableNames: Set[String]
/** Perform a buffered write to one of the tables specified. If the table is not in the specified list, will throw an exception saying so.
*/
def write[T <: HbaseTable[T, R, _], R](operation: OpBase[T, R]) {
if (validTableNames.contains(operation.table.tableName)) {
val tableName = new ImmutableBytesWritable(operation.table.tableName.getBytes("UTF-8"))
operation.getOperations.foreach {op => write(tableName, op)}
} else {
throw new RuntimeException("Attempted to write to table: " + operation.table.tableName + ", when allowed tables are : " + validTableNames.mkString("{", ",", "}"))
}
}
}
abstract class FromTableMapper[T <: HbaseTable[T, R, RR], R, RR <: HRow[T, R], MOK, MOV](table: HbaseTable[T, R, RR], outputKey: Class[MOK], outputValue: Class[MOV])
extends HMapper[ImmutableBytesWritable, Result, MOK, MOV] {
def row = table.buildRow(context.getCurrentValue)
}
/** In a map-only job, this covers a table that will write to itself */
abstract class TableSelfMapper[T <: HbaseTable[T, R, RR], R, RR <: HRow[T, R]](table: HbaseTable[T, R, RR]) extends FromTableToTableMapper(table, table)
abstract class FromTableToTableMapper[T <: HbaseTable[T, R, RR], R, RR <: HRow[T, R], TT <: HbaseTable[TT, RT, TTRR], RT, TTRR <: HRow[TT, RT]](fromTable: HbaseTable[T, R, RR], toTable: HbaseTable[TT, RT, TTRR])
extends FromTableMapper[T, R, RR, NullWritable, Writable](fromTable, classOf[NullWritable], classOf[Writable]) with ToTableWritable[TT, RT, TTRR] {
}
abstract class FromTableBinaryMapper[T <: HbaseTable[T, R, RR], R, RR <: HRow[T, R]](table: HbaseTable[T, R, RR])
extends FromTableMapper[T, R, RR, BytesWritable, BytesWritable](table, classOf[BytesWritable], classOf[BytesWritable]) with BinaryWritable
abstract class FromTableBinaryMapperFx[T <: HbaseTable[T, R, RR], R, RR <: HRow[T, R]](table: HbaseTable[T, R, RR])
extends FromTableMapper[T, R, RR, BytesWritable, BytesWritable](table, classOf[BytesWritable], classOf[BytesWritable]) with BinaryWritable with DelayedInit {
private var initCode: () => Unit = _
override def delayedInit(body: => Unit) {
initCode = (() => body)
}
def map() {
initCode()
}
}
abstract class GroupByRow[T <: HbaseTable[T, R, RR], R, RR <: HRow[T, R]](table: HbaseTable[T, R, RR])(grouper: (RR, PrimitiveOutputStream) => Unit) extends FromTableBinaryMapper[T, R, RR](table) {
def groupBy(row: RR, extractor: PrimitiveOutputStream) {
grouper(row, extractor)
}
final def map() {
val rr = row
val bos = new ByteArrayOutputStream()
val dataOutput = new PrimitiveOutputStream(bos)
groupBy(rr, dataOutput)
write(new BytesWritable(bos.toByteArray), makeWritable {vw => vw.writeRow(table, rr)})
}
}
abstract class GroupingRowMapper[T <: HbaseTable[T, R, RR], R, RR <: HRow[T, R]](table: HbaseTable[T, R, RR]) extends FromTableBinaryMapper[T, R, RR](table) {
def groupBy(row: RR, extractor: PrimitiveOutputStream)
final def map() {
val rr = row
val bos = new ByteArrayOutputStream()
val dataOutput = new PrimitiveOutputStream(bos)
groupBy(rr, dataOutput)
write(new BytesWritable(bos.toByteArray), makeWritable {vw => vw.writeRow(table, rr)})
}
}
object MRFx {
// def groupBy[T <: HbaseTable[T,R,RR],R,RR <: HRow[T,R]](table:HbaseTable[T,R,RR])(grouper:(RR,PrimitiveOutputStream)=>Unit) =
// new GroupingRowMapperFx[T,R,RR](table,grouper){}
}
//
//object MRFx {
//
// def ftb[T <: HbaseTable[T, R, RR], R, RR <: HRow[T, R]](fromTable: HbaseTable[T, R, RR], mapFx: (RR, FromTableBinaryMapper[T, R, RR]) => Unit) = new FromTableBinaryMapperFx(fromTable, mapFx) {
// }
//
// def fromToTableMR[T <: HbaseTable[T, R, RR], R, RR <: HRow[T, R], T2 <: HbaseTable[T2, R2, RR2], R2, RR2 <: HRow[T2, R2]](name: String, prev: String, fromTable: HbaseTable[T, R, RR], toTable: HbaseTable[T2, R2, RR2])(mapFx: (RR, FromTableBinaryMapper[T, R, RR]) => Unit)(reduceFx: (BytesWritable, Iterable[BytesWritable], ToTableBinaryReducer[T2, R2, RR2]) => Unit) = {
// val mrt = new HMapReduceTask(
// HTaskID(name, prev),
// HTaskConfigs(),
// HIO(HTableInput(fromTable.asInstanceOf[T]), HTableOutput(toTable.asInstanceOf[T2])),
// new FromTableBinaryMapper(fromTable) {
// def map() {
// mapFx(row, this)
// }
// },
// new ToTableBinaryReducer(toTable) {
// def reduce() {
// reduceFx(key, values, this)
// }
// }
// ) {}
// mrt
// }
//}
abstract class BinaryToTableReducer[T <: HbaseTable[T, R, RR], R, RR <: HRow[T, R]](table: HbaseTable[T, R, RR])
extends ToTableReducer[T, R, RR, BytesWritable, BytesWritable](table) with BinaryReadable
abstract class ToTableReducer[T <: HbaseTable[T, R, RR], R, RR <: HRow[T, R], MOK, MOV](table: HbaseTable[T, R, RR])
extends HReducer[MOK, MOV, NullWritable, Writable] with ToTableWritable[T, R, RR]
abstract class BinaryToMultiTableReducer(tables: HbaseTable[_, _, _]*) extends ToMultiTableReducer[BytesWritable, BytesWritable](tables: _*)
abstract class ToMultiTableReducer[MOK, MOV](tables: HbaseTable[_, _, _]*) extends HReducer[MOK, MOV, ImmutableBytesWritable, Writable] with MultiTableWritable {
val validTableNames = tables.map(_.tableName).toSet
}
abstract class ToTableBinaryReducer[T <: HbaseTable[T, R, RR], R, RR <: HRow[T, R]](table: HbaseTable[T, R, RR])
extends HReducer[BytesWritable, BytesWritable, NullWritable, Writable] with ToTableWritable[T, R, RR] with BinaryReadable
abstract class ToTableBinaryReducerFx[T <: HbaseTable[T, R, RR], R, RR <: HRow[T, R]](table: HbaseTable[T, R, RR])
extends HReducer[BytesWritable, BytesWritable, NullWritable, Writable] with ToTableWritable[T, R, RR] with BinaryReadable with DelayedInit {
private var initCode: () => Unit = _
override def delayedInit(body: => Unit) {
initCode = (() => body)
}
def reduce() {
initCode()
}
}
abstract class TextToBinaryMapper extends HMapper[LongWritable, Text, BytesWritable, BytesWritable] with BinaryWritable {
}
abstract class BinaryMapper extends HMapper[BytesWritable, BytesWritable, BytesWritable, BytesWritable] with BinaryWritable
abstract class BinaryReducer extends HReducer[BytesWritable, BytesWritable, BytesWritable, BytesWritable] with BinaryWritable with BinaryReadable
abstract class BinaryReducerFx extends BinaryReducer with DelayedInit {
private var initCode: () => Unit = _
override def delayedInit(body: => Unit) {
initCode = (() => body)
}
def reduce() {
initCode()
}
}
abstract class BinaryToTextReducerFx extends BinaryToTextReducer with DelayedInit {
private var initCode: () => Unit = _
override def delayedInit(body: => Unit) {
initCode = (() => body)
}
def reduce() {
initCode()
}
}
abstract class BinaryToTextReducer extends HReducer[BytesWritable, BytesWritable, NullWritable, Text] with BinaryReadable {
def writeln(line: String) {write(NullWritable.get(), new Text(line))}
def writetabs(items: Any*) {
val sb = new StringBuilder()
for (item <- items) {
sb.append(item)
sb.append("\\t")
}
write(NullWritable.get(), new Text(sb.toString))
}
}
//case class FromTableMapper[T <: HbaseTable[T, R], R, MOK, MOV, S <: SettingsBase](table: T, tableMapper: (QueryResult[T, R], HMapContext[ImmutableBytesWritable, Result, MOK, MOV, S]) => Unit)
// extends MapperFx[ImmutableBytesWritable, Result, MOK, MOV, S]((ctx: HMapContext[ImmutableBytesWritable, Result, MOK, MOV, S]) => {
// tableMapper(new QueryResult[T, R](ctx.value, table, table.tableName), ctx)
// })
abstract class HMapper[MK, MV, MOK, MOV] extends Mapper[MK, MV, MOK, MOV] with MRWritable[MOK, MOV] {
type SettingsClass <: SettingsBase
var context: Mapper[MK, MV, MOK, MOV]#Context = null
var settings: SettingsClass = _
def onStart() {
}
final override def setup(context: Mapper[MK, MV, MOK, MOV]#Context) {
this.context = context
settings = Class.forName(context.getConfiguration.get("hpaste.settingsclass")).newInstance().asInstanceOf[SettingsClass]
settings.fromSettings(context.getConfiguration)
onStart()
}
override def cleanup(context: Mapper[MK, MV, MOK, MOV]#Context) {
cleanup()
}
def ctr(message: String, count: Long) {counter(message, count)}
def ctr(message: String) {counter(message, 1l)}
def counter(message: String, count: Long) {
context.getCounter("Custom", message).increment(count)
}
def write(key: MOK, value: MOV) {context.write(key, value)}
def key = context.getCurrentKey
def value = context.getCurrentValue
def map()
def cleanup() {}
override def map(key: MK, value: MV, context: Mapper[MK, MV, MOK, MOV]#Context) {
map()
}
}
abstract class HReducer[MOK, MOV, ROK, ROV] extends Reducer[MOK, MOV, ROK, ROV] with MRWritable[ROK, ROV] {
type SettingsClass <: SettingsBase
var context: Reducer[MOK, MOV, ROK, ROV]#Context = null
var settings: SettingsClass = _
def counter(message: String, count: Long) {
context.getCounter("Custom", message).increment(count)
}
def ctr(message: String, count: Long) {counter(message, count)}
def ctr(message: String) {ctr(message, 1l)}
def write(key: ROK, value: ROV) {context.write(key, value)}
def key = context.getCurrentKey
def values = context.getValues
override def setup(context: Reducer[MOK, MOV, ROK, ROV]#Context) {
this.context = context
settings = Class.forName(context.getConfiguration.get("hpaste.settingsclass")).newInstance().asInstanceOf[SettingsClass]
settings.fromSettings(context.getConfiguration)
}
override def reduce(key: MOK, values: Iterable[MOV], context: Reducer[MOK, MOV, ROK, ROV]#Context) {
reduce()
}
def reduce()
}
object HMapReduceTask {
def apply[MK, MV, MOK: Manifest, MOV: Manifest, ROK: Manifest, ROV: Manifest](name: String, mapper: HMapper[MK, MV, MOK, MOV], reducer: HReducer[MOK, MOV, ROK, ROV]): HMapReduceTask[MK, MV, MOK, MOV, ROK, ROV] = {
HMapReduceTask(
HTaskID(name),
HTaskConfigs(),
HIO(),
mapper,
reducer
)
}
}
case class HGroupingTask[MK, MV, MOK: Manifest, MOV: Manifest, ROK: Manifest, ROV: Manifest](
id: HTaskID,
configs: HTaskConfigsBase = HTaskConfigs(),
io: HIO[MK, MV, ROK, ROV] = HIO(),
mapper: HMapper[MK, MV, MOK, MOV],
reducer: HReducer[MOK, MOV, ROK, ROV],
partitioner: HPartitioner[MOK, MOV],
groupingComparator: HBinaryComparator,
sortComparator: HBinaryComparator
) extends HTask[MK,MV,ROK,ROV](id,configs,io) {
def decorateJob(job: Job) {
job.setMapperClass(mapper.getClass)
job.setMapOutputKeyClass(classManifest[MOK].erasure)
job.setMapOutputValueClass(classManifest[MOV].erasure)
job.setOutputKeyClass(classManifest[ROK].erasure)
job.setOutputValueClass(classManifest[ROV].erasure)
job.setReducerClass(reducer.getClass)
job.setPartitionerClass(partitioner.getClass)
job.setGroupingComparatorClass(groupingComparator.getClass)
job.setSortComparatorClass(sortComparator.getClass)
}
}
/**
* An HTask that wraps a standard mapper and reducer function.
*/
case class HMapReduceTask[MK, MV, MOK: Manifest, MOV: Manifest, ROK: Manifest, ROV: Manifest](
id: HTaskID,
configs: HTaskConfigsBase = HTaskConfigs(),
io: HIO[MK, MV, ROK, ROV] = HIO(),
mapper: HMapper[MK, MV, MOK, MOV],
reducer: HReducer[MOK, MOV, ROK, ROV],
combiner: HReducer[MOK, MOV, MOK, MOV] = null)
extends HTask[MK, MV, ROK, ROV](id, configs, io) {
def decorateJob(job: Job) {
job.setMapperClass(mapper.getClass)
job.setMapOutputKeyClass(classManifest[MOK].erasure)
job.setMapOutputValueClass(classManifest[MOV].erasure)
job.setOutputKeyClass(classManifest[ROK].erasure)
job.setOutputValueClass(classManifest[ROV].erasure)
job.setReducerClass(reducer.getClass)
if (combiner != null) {
job.setCombinerClass(combiner.getClass)
}
}
}
abstract class HBinaryComparator extends RawComparator[BytesWritable] {
// /**Override this for the cheapest comparison */
override def compare(theseBytes: Array[Byte], thisOffset: Int, thisLength: Int, thoseBytes: Array[Byte], thatOffset: Int, thatLength: Int) : Int = {
val thisInput = new ByteArrayInputStream(theseBytes, thisOffset, thisLength)
thisInput.skip(4)
val thatInput = new ByteArrayInputStream(thoseBytes, thatOffset, thatLength)
thatInput.skip(4)
compareBytes(new PrimitiveInputStream(thisInput), new PrimitiveInputStream(thatInput))
}
override def compare(bw:BytesWritable,bw2:BytesWritable) = {
println("Compare called")
0
}
/** Override for a less cheap comparison */
def compareBytes(thisReader: PrimitiveInputStream, thatReader: PrimitiveInputStream) = {
println("Compare bytes called")
0
}
}
//abstract class HBinaryComparator extends WritableComparator(classOf[BytesWritable], true) {
// override def compare(a: WritableComparable[_], b: WritableComparable[_]) = {
// val ab = a.asInstanceOf[BytesWritable]
// val bb = b.asInstanceOf[BytesWritable]
// compareBytes(ab, bb)
// }
//
// def compareBytes(a: BytesWritable, b: BytesWritable): Int = 0
//}
case class HTaskID(name: String, previousTaskName: String = null, requiredTask: HTask[_, _, _, _] = null)
/**
* A Task for a mapper-only job
*/
case class HMapTask[MK, MV, MOK: Manifest, MOV: Manifest](id: HTaskID, configs: HTaskConfigs = HTaskConfigs(), io: HIO[MK, MV, MOK, MOV] = HIO(), mapper: HMapper[MK, MV, MOK, MOV]) extends HTask[MK, MV, MOK, MOV](id, configs, io) {
def decorateJob(job: Job) {
job.setMapperClass(mapper.getClass)
job.setMapOutputKeyClass(classManifest[MOK].erasure)
job.setMapOutputValueClass(classManifest[MOV].erasure)
job.setOutputKeyClass(classManifest[MOK].erasure)
job.setOutputValueClass(classManifest[MOV].erasure)
job.setNumReduceTasks(0)
}
}
/**
* A task for a Mapper / Combiner / Reducer combo
*/
case class HMapCombineReduceTask[MK, MV, MOK: Manifest, MOV: Manifest, ROK, ROV](id: HTaskID, configs: HTaskConfigs = HTaskConfigs(), io: HIO[MK, MV, ROK, ROV] = HIO(), mapper: HMapper[MK, MV, MOK, MOV], combiner: HReducer[MOK, MOV, ROK, ROV], reducer: HReducer[MOK, MOV, ROK, ROV]) extends HTask[MK, MV, ROK, ROV](id, configs, io) {
def decorateJob(job: Job) {
job.setMapperClass(mapper.getClass)
job.setMapOutputKeyClass(classManifest[MOK].erasure)
job.setMapOutputValueClass(classManifest[MOV].erasure)
job.setReducerClass(reducer.getClass)
job.setCombinerClass(combiner.getClass)
}
}
abstract class HPartitioner[MOK, MOV]() extends Partitioner[MOK, MOV] {
override def getPartition(key: MOK, value: MOV, numPartitions: Int): Int = {
0
}
}
/**
* This is the class that gets loaded by Hadoop as a mapper. It delegates the actual mapper functionality back
* to the HTask that it represents.
*/
//class HMapper[MK, MV, MOK, MOV, S <: SettingsBase] extends Mapper[MK, MV, MOK, MOV] {
//
//
// var mapperFx: MapperFxBase[MK, MV, MOK, MOV, S] = _
//
// var hcontext: HMapContext[MK, MV, MOK, MOV, S] = _
// var context: Mapper[MK, MV, MOK, MOV]#Context = _
//
// var job: HJob[S] = _
//
// def counter(message: String, count: Long) {
// context.getCounter("Custom", message).increment(count)
// }
//
// override def setup(ctx: Mapper[MK, MV, MOK, MOV]#Context) {
// context = ctx
//
// job = Class.forName(context.getConfiguration.get("hpaste.jobchain.jobclass")).newInstance.asInstanceOf[HJob[S]]
// HJobRegistry.job = job
// mapperFx = job.getMapperFunc(context.getConfiguration.getInt("hpaste.jobchain.mapper.idx", -1))
//
// hcontext = new HMapContext[MK, MV, MOK, MOV, S](context.getConfiguration, counter, context)
// }
//
// override def map(key: MK, value: MV, context: Mapper[MK, MV, MOK, MOV]#Context) {
// mapperFx.map(hcontext)
// }
//}
/**
* This is the actual Reducer that gets loaded by Hadoop. It delegates the actual reduce functionality back to the
* HTask that it represents.
*/
//class HReducer[MOK, MOV, ROK, ROV, S <: SettingsBase] extends Reducer[MOK, MOV, ROK, ROV] {
// var hcontext: HReduceContext[MOK, MOV, ROK, ROV, S] = _
// var context: Reducer[MOK, MOV, ROK, ROV]#Context = _
// var reducerFx: ReducerFxBase[MOK, MOV, ROK, ROV, S] = _
//
// var job: HJob[S] = _
//
// def counter(message: String, count: Long) {
// context.getCounter("Custom", message).increment(count)
// }
//
// override def setup(ctx: Reducer[MOK, MOV, ROK, ROV]#Context) {
// context = ctx
//
// job = Class.forName(context.getConfiguration.get("hpaste.jobchain.jobclass")).newInstance().asInstanceOf[HJob[S]]
// HJobRegistry.job = job
//
// reducerFx = job.getReducerFunc(context.getConfiguration.getInt("hpaste.jobchain.reducer.idx", -1))
//
// hcontext = new HReduceContext[MOK, MOV, ROK, ROV, S](context.getConfiguration, counter, context)
// }
//
// override def reduce(key: MOK, values: java.lang.Iterable[MOV], context: Reducer[MOK, MOV, ROK, ROV]#Context) {
// reducerFx.reduce(hcontext)
// }
//}
class TableToBinaryMapContext[T <: HbaseTable[T, R, _], R, S <: SettingsBase](table: T, conf: Configuration, counter: (String, Long) => Unit, context: Mapper[ImmutableBytesWritable, Result, BytesWritable, BytesWritable]#Context)
extends HMapContext[ImmutableBytesWritable, Result, BytesWritable, BytesWritable, S](conf, counter, context) {
def row = new QueryResult[T, R](table.convertResult(context.getCurrentValue), table, table.tableName)
}
/**
* This is the context object for a Map function. It gets passed into the mapper function defined in an HTask.
* It contains simplified functions for writing values and incrementing counters.
*/
class HMapContext[MK, MV, MOK, MOV, S <: SettingsBase](conf: Configuration, counter: (String, Long) => Unit, val context: Mapper[MK, MV, MOK, MOV]#Context) extends HContext[S](conf, counter) {
def key = context.getCurrentKey
def value = context.getCurrentValue
def write(key: MOK, value: MOV) {context.write(key, value)}
}
/**
* This is the context object for a Reduce function. It gets passed into the reducer defined in an HTask.
*/
class HReduceContext[MOK, MOV, ROK, ROV, S <: SettingsBase](conf: Configuration, counter: (String, Long) => Unit, val context: Reducer[MOK, MOV, ROK, ROV]#Context) extends HContext[S](conf, counter) {
def key = context.getCurrentKey
def values = context.getValues
def write(key: ROK, value: ROV) {context.write(key, value)}
}
class ToTableReduceContext[MOK, MOV, T <: HbaseTable[T, R, _], R, S <: SettingsBase](conf: Configuration, counter: (String, Long) => Unit, context: Reducer[MOK, MOV, NullWritable, Writable]#Context) extends HReduceContext[MOK, MOV, NullWritable, Writable, S](conf, counter, context) {
def write(operation: OpBase[T, R]) {
operation.getOperations.foreach {op => write(NullWritable.get(), op)}
}
}
/**
* Base class for contextual objects. It handles the business for initializing a context properly.
*/
class HContext[S <: SettingsBase](val conf: Configuration, val counter: (String, Long) => Unit) {
def apply(message: String, count: Long) {counter(message, count)}
val settings = Class.forName(conf.get("hpaste.settingsclass")).newInstance().asInstanceOf[S]
settings.fromSettings(conf)
}
|
bikash/HPaste
|
src/main/scala/com/gravity/hbase/mapreduce/mapreduce2.scala
|
Scala
|
apache-2.0
| 45,969 |
/*******************************************************************************
* Copyright 2010 Maxime Lévesque
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
***************************************************************************** */
package org.squeryl.dsl
import ast._
import boilerplate._
import fsm._
import org.squeryl.internals._
import org.squeryl._
import java.sql.{SQLException, ResultSet}
import collection.mutable.ArrayBuffer
import scala.runtime.NonLocalReturnControl
trait QueryDsl
extends DslFactory
with WhereState[Unconditioned]
with ComputeMeasuresSignaturesFromStartOrWhereState
with StartState
with QueryElements[Unconditioned]
with JoinSignatures
with FromSignatures {
outerQueryDsl =>
def using[A](session: Session)(a: =>A): A =
_using(session, a _)
private def _using[A](session: Session, a: ()=>A): A = {
val s = Session.currentSessionOption
try {
if(s != None) s.get.unbindFromCurrentThread
try {
session.bindToCurrentThread
val r = a()
r
}
finally {
session.unbindFromCurrentThread
session.cleanup
}
}
finally {
if(s != None) s.get.bindToCurrentThread
}
}
def transaction[A](s: Session)(a: =>A) =
_executeTransactionWithin(s, a _)
/**
* 'transaction' causes a new transaction to begin and commit after the block execution, or rollback
* if an exception occurs. Invoking a transaction always cause a new one to
* be created, even if called in the context of an existing transaction.
*/
def transaction[A](a: =>A): A =
if(! Session.hasCurrentSession)
_executeTransactionWithin(SessionFactory.newSession, a _)
else {
val s = Session.currentSession
val res =
try {
s.unbindFromCurrentThread
_executeTransactionWithin(SessionFactory.newSession, a _)
}
finally {
s.bindToCurrentThread
}
res
}
/**
* 'inTransaction' will create a new transaction if none is in progress and commit it upon
* completion or rollback on exceptions. If a transaction already exists, it has no
* effect, the block will execute in the context of the existing transaction. The
* commit/rollback is handled in this case by the parent transaction block.
*/
def inTransaction[A](a: =>A): A =
if(! Session.hasCurrentSession)
_executeTransactionWithin(SessionFactory.newSession, a _)
else {
a
}
private def _executeTransactionWithin[A](s: Session, a: ()=>A) = {
val c = s.connection
if(c.getAutoCommit)
c.setAutoCommit(false)
var txOk = false
try {
val res = _using(s, a)
txOk = true
res
}
catch {
case e:NonLocalReturnControl[_] =>
{
txOk = true
throw e
}
}
finally {
try {
if(txOk)
c.commit
else
c.rollback
}
catch {
case e:SQLException => {
Utils.close(c)
if(txOk) throw e // if an exception occured b4 the commit/rollback we don't want to obscure the original exception
}
}
try{c.close}
catch {
case e:SQLException => {
if(txOk) throw e // if an exception occured b4 the close we don't want to obscure the original exception
}
}
}
}
implicit def __thisDsl:QueryDsl = this
private class QueryElementsImpl[Cond](override val whereClause: Option[()=>LogicalBoolean])
extends QueryElements[Cond]
def where(b: =>LogicalBoolean): WhereState[Conditioned] =
new QueryElementsImpl[Conditioned](Some(b _))
def &[A](i: =>TypedExpressionNode[A]): A =
FieldReferenceLinker.pushExpressionOrCollectValue[A](i _)
implicit def singleColumnQuery2RightHandSideOfIn[A](q: Query[A]) =
new RightHandSideOfIn[A](q.copy(false).ast)
implicit def measureSingleColumnQuery2RightHandSideOfIn[A](q: Query[Measures[A]]) =
new RightHandSideOfIn[A](q.copy(false).ast)
implicit def measureOptionSingleColumnQuery2RightHandSideOfIn[A](q: Query[Measures[Option[A]]]) =
new RightHandSideOfIn[A](q.copy(false).ast)
implicit def groupSingleColumnQuery2RightHandSideOfIn[A](q: Query[Group[A]]) =
new RightHandSideOfIn[A](q.copy(false).ast)
implicit def groupOptionSingleColumnQuery2RightHandSideOfIn[A](q: Query[Group[Option[A]]]) =
new RightHandSideOfIn[A](q.copy(false).ast)
trait SingleRowQuery[R] {
self: Query[R] =>
}
trait SingleColumnQuery[T] {
self: Query[T] =>
}
trait ScalarQuery[T] extends Query[T] with SingleColumnQuery[T] with SingleRowQuery[T]
implicit def scalarQuery2Scalar[T](sq: ScalarQuery[T]) = sq.head
implicit def countQueryableToIntTypeQuery[R](q: Queryable[R]) = new CountSubQueryableQuery(q)
private def _countFunc = count
class CountSubQueryableQuery(q: Queryable[_]) extends Query[LongType] with ScalarQuery[LongType] {
private val _inner:Query[Measures[LongType]] =
from(q)(r => compute(_countFunc))
def iterator = _inner.map(m => m.measures).iterator
def Count: ScalarQuery[LongType] = this
def statement: String = _inner.statement
// Paginating a Count query makes no sense perhaps an org.squeryl.internals.Utils.throwError() would be more appropriate here:
def page(offset:Int, length:Int) = this
def distinct = this
def forUpdate = _inner.forUpdate
def dumpAst = _inner.dumpAst
def ast = _inner.ast
protected[squeryl] def invokeYield(rsm: ResultSetMapper, rs: ResultSet) =
_inner.invokeYield(rsm, rs).measures
override private[squeryl] def copy(asRoot:Boolean) = new CountSubQueryableQuery(q)
def name = _inner.name
private[squeryl] def give(rsm: ResultSetMapper, rs: ResultSet) =
q.invokeYield(rsm, rs)
}
implicit def singleColComputeQuery2ScalarQuery[T](cq: Query[Measures[T]]): ScalarQuery[T] = new ScalarMeasureQuery[T](cq)
implicit def singleColComputeQuery2Scalar[T](cq: Query[Measures[T]]) = new ScalarMeasureQuery[T](cq).head
class ScalarMeasureQuery[T](q: Query[Measures[T]]) extends Query[T] with ScalarQuery[T] {
def iterator = q.map(m => m.measures).iterator
def distinct = this
def forUpdate = q.forUpdate
def dumpAst = q.dumpAst
// TODO: think about this : Paginating a Count query makes no sense perhaps an org.squeryl.internals.Utils.throwError() would be more appropriate here.
def page(offset:Int, length:Int) = this
def statement: String = q.statement
def ast = q.ast
protected[squeryl] def invokeYield(rsm: ResultSetMapper, rs: ResultSet) =
q.invokeYield(rsm, rs).measures
override private[squeryl] def copy(asRoot:Boolean) = new ScalarMeasureQuery(q)
def name = q.name
private[squeryl] def give(rsm: ResultSetMapper, rs: ResultSet) =
q.invokeYield(rsm, rs).measures
}
implicit def queryable2OptionalQueryable[A](q: Queryable[A]) = new OptionalQueryable[A](q)
implicit def view2QueryAll[A](v: View[A]) = from(v)(a=> select(a))
def update[A](t: Table[A])(s: A =>UpdateStatement):Int = t.update(s)
def manyToManyRelation[L <: KeyedEntity[_],R <: KeyedEntity[_],A <: KeyedEntity[_]](l: Table[L], r: Table[R]) = new ManyToManyRelationBuilder(l,r,None)
def manyToManyRelation[L <: KeyedEntity[_],R <: KeyedEntity[_],A <: KeyedEntity[_]](l: Table[L], r: Table[R], nameOfMiddleTable: String) = new ManyToManyRelationBuilder(l,r,Some(nameOfMiddleTable))
class ManyToManyRelationBuilder[L <: KeyedEntity[_], R <: KeyedEntity[_]](l: Table[L], r: Table[R], nameOverride: Option[String]) {
def via[A <: KeyedEntity[_]](f: (L,R,A)=>Pair[EqualityExpression,EqualityExpression])(implicit manifestA: Manifest[A], schema: Schema) = {
val m2m = new ManyToManyRelationImpl(l,r,manifestA.erasure.asInstanceOf[Class[A]], f, schema, nameOverride)
schema._addTable(m2m)
m2m
}
}
class ManyToManyRelationImpl[L <: KeyedEntity[_], R <: KeyedEntity[_], A <: KeyedEntity[_]](val leftTable: Table[L], val rightTable: Table[R], aClass: Class[A], f: (L,R,A)=>Pair[EqualityExpression,EqualityExpression], schema: Schema, nameOverride: Option[String])
extends Table[A](nameOverride.getOrElse(schema.tableNameFromClass(aClass)), aClass, schema, None) with ManyToManyRelation[L,R,A] {
thisTableOfA =>
def thisTable = thisTableOfA
schema._addRelation(this)
private val (_leftEqualityExpr, _rightEqualityExpr) = {
var e2: Option[Pair[EqualityExpression,EqualityExpression]] = None
from(leftTable, rightTable, thisTableOfA)((l,r,a) => {
e2 = Some(f(l,r,a))
select(None)
})
val e2_ = e2.get
//invert Pair[EqualityExpression,EqualityExpression] if it has been declared in reverse :
if(_viewReferedInExpression(leftTable, e2_._1)) {
assert(_viewReferedInExpression(rightTable, e2_._2))
e2_
}
else {
assert(_viewReferedInExpression(leftTable, e2_._2))
assert(_viewReferedInExpression(rightTable, e2_._1))
(e2_._2, e2_._1)
}
}
private def _viewReferedInExpression(v: View[_], ee: EqualityExpression) =
ee.filterDescendantsOfType[SelectElementReference[Any]].filter(
_.selectElement.origin.asInstanceOf[ViewExpressionNode[_]].view == v
).headOption != None
private val (leftPkFmd, leftFkFmd) = _splitEquality(_leftEqualityExpr, thisTable, false)
private val (rightPkFmd, rightFkFmd) = _splitEquality(_rightEqualityExpr, thisTable, false)
val leftForeignKeyDeclaration =
schema._createForeignKeyDeclaration(leftFkFmd.columnName, leftPkFmd.columnName)
val rightForeignKeyDeclaration =
schema._createForeignKeyDeclaration(rightFkFmd.columnName, rightPkFmd.columnName)
private def _associate[T <: KeyedEntity[_]](o: T, m2m: ManyToMany[T,A]): A = {
val aInst = m2m.assign(o)
try {
thisTableOfA.insertOrUpdate(aInst)
}
catch {
case e:SQLException =>
if(Session.currentSession.databaseAdapter.isNotNullConstraintViolation(e))
throw new SquerylException(
"the " + 'associate + " method created and inserted association object of type " +
posoMetaData.clasz.getName + " that has NOT NULL colums, plase use the other signature of " + 'ManyToMany +
" that takes the association object as argument : associate(o,a) for association objects that have NOT NULL columns", e)
else
throw e
}
}
def left(leftSideMember: L): Query[R] with ManyToMany[R,A] = {
val q =
from(thisTableOfA, rightTable)((a,r) => {
val matchClause = f(leftSideMember, r, a)
outerQueryDsl.where(matchClause._1 and matchClause._2).select(r)
})
new DelegateQuery(q) with ManyToMany[R,A] {
private def _assignKeys(r: R, a: AnyRef): Unit = {
val leftPk = leftPkFmd.get(leftSideMember.asInstanceOf[AnyRef])
val rightPk = rightPkFmd.get(r.asInstanceOf[AnyRef])
leftFkFmd.set(a, leftPk)
rightFkFmd.set(a, rightPk)
}
def associationMap =
from(thisTableOfA, rightTable)((a,r) => {
val matchClause = f(leftSideMember, r, a)
outerQueryDsl.where(matchClause._1 and matchClause._2).select((r,a))
})
def assign(o: R, a: A) = {
_assignKeys(o, a.asInstanceOf[AnyRef])
a
}
def associate(o: R, a: A): A = {
assign(o, a)
thisTableOfA.insertOrUpdate(a)
a
}
def assign(o: R): A = {
val aInstAny = thisTableOfA._createInstanceOfRowObject
val aInst = aInstAny.asInstanceOf[A]
_assignKeys(o, aInstAny)
aInst
}
def associate(o: R): A =
_associate(o,this)
def dissociate(o: R) =
thisTableOfA.deleteWhere(a0 => _whereClauseForAssociations(a0) and _equalityForRightSide(a0, o)) > 0
def _whereClauseForAssociations(a0: A) = {
val leftPk = leftPkFmd.get(leftSideMember.asInstanceOf[AnyRef])
leftFkFmd.get(a0.asInstanceOf[AnyRef])
FieldReferenceLinker.createEqualityExpressionWithLastAccessedFieldReferenceAndConstant(leftPk)
}
def _equalityForRightSide(a0: A, r: R) = {
val rightPk = rightPkFmd.get(r.asInstanceOf[AnyRef])
rightFkFmd.get(a0.asInstanceOf[AnyRef])
FieldReferenceLinker.createEqualityExpressionWithLastAccessedFieldReferenceAndConstant(rightPk)
}
def dissociateAll =
thisTableOfA.deleteWhere(a0 => _whereClauseForAssociations(a0))
def associations =
thisTableOfA.where(a0 => _whereClauseForAssociations(a0))
}
}
def right(rightSideMember: R): Query[L] with ManyToMany[L,A] = {
val q =
from(thisTableOfA, leftTable)((a,l) => {
val matchClause = f(l, rightSideMember, a)
outerQueryDsl.where(matchClause._1 and matchClause._2).select(l)
})
new DelegateQuery(q) with ManyToMany[L,A] {
private def _assignKeys(l: L, a: AnyRef): Unit = {
val rightPk = rightPkFmd.get(rightSideMember.asInstanceOf[AnyRef])
val leftPk = leftPkFmd.get(l.asInstanceOf[AnyRef])
rightFkFmd.set(a, rightPk)
leftFkFmd.set(a, leftPk)
}
def associationMap =
from(thisTableOfA, leftTable)((a,l) => {
val matchClause = f(l, rightSideMember, a)
outerQueryDsl.where(matchClause._1 and matchClause._2).select((l, a))
})
def assign(o: L, a: A) = {
_assignKeys(o, a.asInstanceOf[AnyRef])
a
}
def associate(o: L, a: A): A = {
assign(o, a)
thisTableOfA.insertOrUpdate(a)
a
}
def assign(o: L): A = {
val aInstAny = thisTableOfA._createInstanceOfRowObject
val aInst = aInstAny.asInstanceOf[A]
_assignKeys(o, aInstAny)
aInst
}
def associate(o: L): A =
_associate(o,this)
def dissociate(o: L) =
thisTableOfA.deleteWhere(a0 => _whereClauseForAssociations(a0) and _leftEquality(o, a0)) > 0
def _leftEquality(l: L, a0: A) = {
val leftPk = leftPkFmd.get(l.asInstanceOf[AnyRef])
leftFkFmd.get(a0.asInstanceOf[AnyRef])
FieldReferenceLinker.createEqualityExpressionWithLastAccessedFieldReferenceAndConstant(leftPk)
}
def _whereClauseForAssociations(a0: A) = {
val rightPk = rightPkFmd.get(rightSideMember.asInstanceOf[AnyRef])
rightFkFmd.get(a0.asInstanceOf[AnyRef])
FieldReferenceLinker.createEqualityExpressionWithLastAccessedFieldReferenceAndConstant(rightPk)
}
def dissociateAll =
thisTableOfA.deleteWhere(a0 => _whereClauseForAssociations(a0))
def associations =
thisTableOfA.where(a0 => _whereClauseForAssociations(a0))
}
}
}
def oneToManyRelation[O <: KeyedEntity[_],M](ot: Table[O], mt: Table[M]) = new OneToManyRelationBuilder(ot,mt)
class OneToManyRelationBuilder[O <: KeyedEntity[_],M](ot: Table[O], mt: Table[M]) {
def via(f: (O,M)=>EqualityExpression)(implicit schema: Schema) =
new OneToManyRelationImpl(ot,mt,f, schema)
}
class OneToManyRelationImpl[O <: KeyedEntity[_],M](val leftTable: Table[O], val rightTable: Table[M], f: (O,M)=>EqualityExpression, schema: Schema)
extends OneToManyRelation[O,M] {
schema._addRelation(this)
private def _isSelfReference =
leftTable == rightTable
//we obtain the FieldMetaDatas from the 'via' function by creating an EqualityExpression AST and then extract the FieldMetaDatas from it,
// the FieldMetaData will serve to set fields (primary and foreign keys on the objects in the relation)
private val (_leftPkFmd, _rightFkFmd) = {
var ee: Option[EqualityExpression] = None
//we create a query for the sole purpose of extracting the equality (inside the relation's 'via' clause)
from(leftTable,rightTable)((o,m) => {
ee = Some(f(o,m))
select(None)
})
val ee_ = ee.get //here we have the equality AST (_ee) contains a left and right node, SelectElementReference
//that refer to FieldSelectElement, who in turn refer to the FieldMetaData
// now the Tuple with the left and right FieldMetaData
_splitEquality(ee.get, rightTable, _isSelfReference)
}
val foreignKeyDeclaration =
schema._createForeignKeyDeclaration(_rightFkFmd.columnName, _leftPkFmd.columnName)
def left(leftSide: O): OneToMany[M] = {
val q = from(rightTable)(m => where(f(leftSide, m)) select(m))
new DelegateQuery(q) with OneToMany[M] {
def deleteAll =
rightTable.deleteWhere(m => f(leftSide, m))
def assign(m: M) = {
val m0 = m.asInstanceOf[AnyRef]
val l0 = leftSide.asInstanceOf[AnyRef]
val v = _leftPkFmd.get(l0)
_rightFkFmd.set(m0, v)
m
}
def associate(m: M)(implicit ev: M <:< KeyedEntity[_]) = {
assign(m)
rightTable.insertOrUpdate(m)
}
}
}
def right(rightSide: M): ManyToOne[O] = {
val q = from(leftTable)(o => where(f(o,rightSide)) select(o))
new DelegateQuery(q) with ManyToOne[O] {
def assign(one: O) = {
val o = one.asInstanceOf[AnyRef]
val r = rightSide.asInstanceOf[AnyRef]
val v = _rightFkFmd.get(r)
_leftPkFmd.set(o, v)
one
}
def delete =
leftTable.deleteWhere(o => f(o, rightSide)) > 0
}
}
}
/**
* returns a (FieldMetaData, FieldMetaData) where ._1 is the id of the KeyedEntity on the left or right side,
* and where ._2 is the foreign key of the association object/table
*/
private def _splitEquality(ee: EqualityExpression, rightTable: Table[_], isSelfReference: Boolean) = {
if(isSelfReference)
assert(ee.right._fieldMetaData.isIdFieldOfKeyedEntity || ee.left._fieldMetaData.isIdFieldOfKeyedEntity)
if(ee.left._fieldMetaData.parentMetaData.clasz == rightTable.classOfT &&
(!isSelfReference || (isSelfReference && ee.right._fieldMetaData.isIdFieldOfKeyedEntity)) ) {
assert(ee.right._fieldMetaData.isIdFieldOfKeyedEntity)
(ee.right._fieldMetaData, ee.left._fieldMetaData)
}
else {
assert(ee.left._fieldMetaData.isIdFieldOfKeyedEntity)
(ee.left._fieldMetaData, ee.right._fieldMetaData)
}
}
// Composite key syntactic sugar :
def compositeKey[A1,A2](a1: A1, a2: A2) =
new CompositeKey2(a1, a2)
def compositeKey[A1,A2,A3](a1: A1, a2: A2, a3: A3) =
new CompositeKey3(a1, a2, a3)
def compositeKey[A1,A2,A3,A4](a1: A1, a2: A2, a3: A3, a4: A4) =
new CompositeKey4(a1, a2, a3, a4)
def compositeKey[A1,A2,A3,A4,A5](a1: A1, a2: A2, a3: A3, a4: A4, a5: A5) =
new CompositeKey5(a1, a2, a3, a4, a5)
def compositeKey[A1,A2,A3,A4,A5,A6](a1: A1, a2: A2, a3: A3, a4: A4, a5: A5, a6: A6) =
new CompositeKey6(a1, a2, a3, a4, a5, a6)
def compositeKey[A1,A2,A3,A4,A5,A6,A7](a1: A1, a2: A2, a3: A3, a4: A4, a5: A5, a6: A6, a7: A7) =
new CompositeKey7(a1, a2, a3, a4, a5, a6, a7)
def compositeKey[A1,A2,A3,A4,A5,A6,A7,A8](a1: A1, a2: A2, a3: A3, a4: A4, a5: A5, a6: A6, a7: A7, a8: A8) =
new CompositeKey8(a1, a2, a3, a4, a5, a6, a7, a8)
def compositeKey[A1,A2,A3,A4,A5,A6,A7,A8,A9](a1: A1, a2: A2, a3: A3, a4: A4, a5: A5, a6: A6, a7: A7, a8: A8, a9: A9) =
new CompositeKey9(a1, a2, a3, a4, a5, a6, a7, a8, a9)
// Tuple to composite key conversions :
implicit def t2te[A1,A2](t: (A1,A2)) = new CompositeKey2[A1,A2](t._1, t._2)
implicit def t3te[A1,A2,A3](t: (A1,A2,A3)) = new CompositeKey3[A1,A2,A3](t._1, t._2, t._3)
implicit def t4te[A1,A2,A3,A4](t: (A1,A2,A3,A4)) = new CompositeKey4[A1,A2,A3,A4](t._1, t._2, t._3, t._4)
implicit def t5te[A1,A2,A3,A4,A5](t: (A1,A2,A3,A4,A5)) = new CompositeKey5[A1,A2,A3,A4,A5](t._1, t._2, t._3, t._4, t._5)
implicit def t6te[A1,A2,A3,A4,A5,A6](t: (A1,A2,A3,A4,A5,A6)) = new CompositeKey6[A1,A2,A3,A4,A5,A6](t._1, t._2, t._3, t._4, t._5, t._6)
implicit def t7te[A1,A2,A3,A4,A5,A6,A7](t: (A1,A2,A3,A4,A5,A6,A7)) = new CompositeKey7[A1,A2,A3,A4,A5,A6,A7](t._1, t._2, t._3, t._4, t._5, t._6, t._7)
implicit def t8te[A1,A2,A3,A4,A5,A6,A7,A8](t: (A1,A2,A3,A4,A5,A6,A7,A8)) = new CompositeKey8[A1,A2,A3,A4,A5,A6,A7,A8](t._1, t._2, t._3, t._4, t._5, t._6, t._7, t._8)
implicit def t9te[A1,A2,A3,A4,A5,A6,A7,A8,A9](t: (A1,A2,A3,A4,A5,A6,A7,A8,A9)) = new CompositeKey9[A1,A2,A3,A4,A5,A6,A7,A8,A9](t._1, t._2, t._3, t._4, t._5, t._6, t._7, t._8, t._9)
// Case statements :
def caseOf[A](expr: NumericalExpression[A]) = new CaseOfNumericalExpressionMatchStart(expr)
def caseOf[A](expr: NonNumericalExpression[A]) = new CaseOfNonNumericalExpressionMatchStart(expr)
def caseOf = new CaseOfConditionChainStart
}
|
takezoux2/squeryl-experimental
|
src/main/scala/org/squeryl/dsl/QueryDsl.scala
|
Scala
|
apache-2.0
| 21,723 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.columnar
import java.nio.{ByteBuffer, ByteOrder}
import org.apache.spark.sql.catalyst.InternalRow
private[columnar] trait NullableColumnAccessor extends ColumnAccessor {
private var nullsBuffer: ByteBuffer = _
private var nullCount: Int = _
private var seenNulls: Int = 0
private var nextNullIndex: Int = _
private var pos: Int = 0
abstract override protected def initialize(): Unit = {
nullsBuffer = underlyingBuffer.duplicate().order(ByteOrder.nativeOrder())
nullCount = ByteBufferHelper.getInt(nullsBuffer)
nextNullIndex = if (nullCount > 0) ByteBufferHelper.getInt(nullsBuffer) else -1
pos = 0
underlyingBuffer.position(underlyingBuffer.position + 4 + nullCount * 4)
super.initialize()
}
abstract override def extractTo(row: InternalRow, ordinal: Int): Unit = {
if (pos == nextNullIndex) {
seenNulls += 1
if (seenNulls < nullCount) {
nextNullIndex = ByteBufferHelper.getInt(nullsBuffer)
}
row.setNullAt(ordinal)
} else {
super.extractTo(row, ordinal)
}
pos += 1
}
abstract override def hasNext: Boolean = seenNulls < nullCount || super.hasNext
}
|
mike0sv/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/NullableColumnAccessor.scala
|
Scala
|
apache-2.0
| 2,000 |
/*
Copyright (c) 2016, Elliot Stirling
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* Neither the name of the {organization} nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package evolve.core
import scala.concurrent.ExecutionContext
object Evolver {
case class EvolverStrategy( children: Int, factor: Double, optimiseForPipeline: Boolean )
/**
* Given a program, test cases and a scoring function will attempt to evolve a passed program
*
* @param program the program to evolve
* @param score the scoring function to check progress
* @param optimise whether we use the execution cost in the evolution
* @param functions list of functions to map to the program opcodes
* @tparam A the data type we work against
* @return A new program that is not worse than the parent
*/
def apply[A]( program: Program, score: Program => Double, optimise: Boolean )( implicit strategy: EvolverStrategy, functions: Seq[Function[A]], ec: ExecutionContext ): Option[Program] = {
apply(program, _ => true, score, optimise)
}
/**
* Given a program, test cases and a scoring function will attempt to evolve a passed program
*
* @param program the program to evolve
* @param instFilter be specific about which instructions to evolve
* @param score the scoring function to check progress
* @param optimise whether we use the execution cost in the evolution
* @param functions list of functions to map to the program opcodes
* @tparam A the data type we work against
* @return A new program that is not worse than the parent
*/
def apply[A]( program: Program, instFilter: Instruction => Boolean, score: Program => Double, optimise: Boolean )( implicit strategy: EvolverStrategy, functions: Seq[Function[A]], ec: ExecutionContext ): Option[Program] = {
import scala.concurrent._
import scala.concurrent.duration.Duration._
import scala.language.postfixOps
val optimiseForPipeline = strategy.optimiseForPipeline
// score the parent
val programScoreF: Future[Double] = Future {
if( optimiseForPipeline )
score(program) + score(program.pipeline)
else
score(program)
}
// create mutant children
val popF: Future[Seq[(Program, Double)]] = Future.sequence( programScoreF.map( s => (program, s) ) +: Seq.fill(strategy.children)( Future {
val child = Generator.repair( Mutator( program, strategy.factor ) )
if( optimiseForPipeline )
(child, score(child) + score(child.pipeline))
else
(child, score(child))
} ) )
// get children not worse than the parent
val results: Seq[(Program, Double)] = blocking {
Await.result(popF, Inf)
}
val programScore = results.head._2
val childResults: Seq[(Program, Double)] = results.tail.filter(_._2 <= programScore)
// returns the best child not worse than the parent
if(optimise) {
(( program, programScore ) +: childResults)
.map( a => a.copy( _2 = (a._2 + a._1.cost) * ( if( optimiseForPipeline ) a._1.maxPipelineLength.toDouble else 1.0 ) ) )
.reduceOption[(Program, Double)] {
case (a, b) => if( a._2 < b._2 ) a else b
}.map( _._1 ).filterNot( _ == program )
} else if(optimiseForPipeline) {
childResults
.map( a => a.copy( _2 = a._2 * a._1.maxPipelineLength ) )
.reduceOption[(Program, Double)] {
case (a, b) => if( a._2 < b._2 ) a else b
}.map( _._1 )
} else {
childResults
.reduceOption[(Program, Double)] {
case (a, b) => if( a._2 < b._2 ) a else b
}.map( _._1 )
}
}
}
|
Trugath/Evolve
|
src/main/scala/evolve/core/Evolver.scala
|
Scala
|
bsd-3-clause
| 4,980 |
/**********************************************************************************************\\
* Rapture ORM Library *
* Version 0.9.0 *
* *
* The primary distribution site is *
* *
* http://rapture.io/ *
* *
* Copyright 2010-2014 Jon Pretty, Propensive Ltd. *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file *
* except in compliance with the License. You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software distributed under the *
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, *
* either express or implied. See the License for the specific language governing permissions *
* and limitations under the License. *
\\**********************************************************************************************/
package rapture.orm
import scala.collection.mutable.Queue
// Probably doesn't need changing much.
/** Implements a dynamic pool of some resource, e.g. database connections. */
abstract class Pool[Resource]
{
/** Implement to make new resources. */
protected def make(): Resource
/** Implement to dispose of surplus resources. */
protected def dispose(x: Resource): Unit
/** Implement to check resource is still usable. */
protected def check(x: Resource): Boolean
/** Number of resource to always keep in reserve, if we have them. */
protected def spare = 5
/** How long to leave surplus resources unused before discarding them. */
protected def timeout = 10*60000L
private val pool = new Queue[Resource]
private var poolCount = 0
private var lastLow = 0L
/** Acquire a resource for the duration of the body. */
def acquireFor[A](body: Resource => A): A = {
val res = acquireDirect()
try body(res) finally releaseDirect(res)
}
/** Acquire a resource without any nesting guarantees. Avoid this method. */
def acquireDirect(): Resource = pool.synchronized {
if(poolCount == 0) make()
else {
val r = pool.dequeue
poolCount = poolCount - 1
if(check(r)) r else {
dispose(r)
make()
}
}
}
/** Release a directly-acquired resource. */
def releaseDirect(r: Resource): Unit = pool.synchronized {
val now = System.currentTimeMillis()
if(poolCount < spare) lastLow = now
if(lastLow > now - timeout) {
pool.enqueue(r)
poolCount = poolCount + 1
} else dispose(r)
}
/** Dispose of all resources not currently in use. */
def disposeAll() = pool.synchronized {
while(poolCount > 0) {
dispose(pool.dequeue)
poolCount = poolCount - 1
}
}
}
|
propensive/rapture-orm
|
src/pool.scala
|
Scala
|
apache-2.0
| 3,731 |
package ammonite.integration
import ammonite.integration.TestUtils._
import ammonite.ops.ImplicitWd._
import ammonite.ops._
import ammonite.util.Util
import utest._
/**
* Make sure that if we run Scala scripts using the Ammonite executable, and
* they, fail with "expected" failure modes, don't show useless stack traces
* and just show what the user did wrong
*/
object ErrorTruncationTests extends TestSuite{
override def utestTruncateLength = 60000
def checkErrorMessage(file: RelPath, expected: String) = {
val e = fansi.Str(
Util.normalizeNewlines(
intercept[ShelloutException]{ exec(file) }
.result
.err
.string
)
).plainText
//This string gets included on windows due to environment variable set additionally
assert(e.contains(expected))
}
def scriptFile(name: String): Path =
replStandaloneResources/'errorTruncation/name
val tests = TestSuite {
println("ErrorTruncationTests")
'compileError - checkErrorMessage(
file = 'errorTruncation/"compileError.sc",
expected = Util.normalizeNewlines(
s"""${scriptFile("compileError.sc")}:1: not found: value doesntexist
|val res = doesntexist
| ^
|Compilation Failed
|""".stripMargin
)
)
'multiExpressionError - checkErrorMessage(
file = 'errorTruncation/"compileErrorMultiExpr.sc",
expected = Util.normalizeNewlines(
s"""${scriptFile("compileErrorMultiExpr.sc")}:11: not found: value doesntexist
|val res_4 = doesntexist
| ^
|Compilation Failed
|""".stripMargin
)
)
'parseError - {
if(!Util.windowsPlatform){
checkErrorMessage(
file = 'errorTruncation/"parseError.sc",
expected = Util.normalizeNewlines(
"""Syntax Error: End:1:1 ..."}\\n"
|}
|^
|""".stripMargin
)
)
}
}
val tab = '\\t'
val runtimeErrorResourcePackage =
"$file.integration.src.test.resources.ammonite.integration.errorTruncation"
val runtimeErrorSc = scriptFile("runtimeError.sc")
'runtimeError - checkErrorMessage(
file = 'errorTruncation/"runtimeError.sc",
expected = Util.normalizeNewlines(
s"""Exception in thread "main" java.lang.ArithmeticException: / by zero
|${tab}at $runtimeErrorResourcePackage.runtimeError$$.<init>($runtimeErrorSc:1)
|${tab}at $runtimeErrorResourcePackage.runtimeError$$.<clinit>($runtimeErrorSc)
|${tab}at $runtimeErrorResourcePackage.runtimeError.$$main($runtimeErrorSc)
|""".stripMargin
)
)
}
}
|
alexarchambault/ammonium
|
integration/src/test/scala/ammonite/integration/ErrorTruncationTests.scala
|
Scala
|
mit
| 2,719 |
/*
* Copyright (c) 2017 Magomed Abdurakhmanov, Hypertino
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*
*/
package com.hypertino.hyperbus.util
import java.util.concurrent.atomic.{AtomicInteger, AtomicLong}
import scala.collection.concurrent.TrieMap
trait CanFuzzyMatchable[A] {
def indexProperties(bloomFilter: TrieMap[Any, AtomicLong], a: A): Seq[Any]
}
case class FuzzyIndexItemMetaInfo(indexValue: Any, bloomFilterValue: Any)
trait FuzzyMatcher {
def indexProperties: Seq[FuzzyIndexItemMetaInfo]
def matches(other: Any): Boolean
}
class CanFuzzyIndex[A] extends CanComplexElement[Vector[A]] {
def upsert(existing: Vector[A], upsert: Vector[A]): Vector[A] = existing ++ upsert
def remove[A2](existing: Vector[A], remove: A2): Vector[A] = existing.filterNot(_ == remove)
def isEmpty(existing: Vector[A]): Boolean = existing.isEmpty
}
class FuzzyIndex[A <: FuzzyMatcher](seq: A*) {
private implicit val evidence = new CanFuzzyIndex[A]
private val index = new ComplexTrieMap[Any, Vector[A]]()
private val bloomFilter = TrieMap[Any, AtomicLong]()
seq.foreach(add)
def add(a: A): Unit = {
val v = Vector(a)
a.indexProperties.foreach { meta ⇒
index.upsert(meta.indexValue, v)
}
index.upsert(All, v)
synchronized {
a.indexProperties.foreach { meta ⇒
bloomFilter
.putIfAbsent(meta.bloomFilterValue, new AtomicLong())
.foreach(_.incrementAndGet())
}
}
}
def remove(a: A): Unit = {
synchronized {
a.indexProperties.foreach { meta ⇒
bloomFilter
.get(meta.bloomFilterValue)
.foreach { counter ⇒
if (counter.decrementAndGet() <= 0) {
bloomFilter.remove(meta.bloomFilterValue)
}
}
}
}
a.indexProperties.foreach { key ⇒
index.remove(key, a)
}
index.remove(All, a)
}
def clear(): Unit = {
synchronized {
index.clear()
bloomFilter.clear()
}
}
private def candidates[B](b: B)(implicit evidence: CanFuzzyMatchable[B]): Seq[A] = {
evidence
.indexProperties(bloomFilter, b)
.map(index.getOrElse(_, Vector.empty[A]))
.sortBy(_.size)
.:+(index.getOrElse(All, Vector.empty[A]))
.flatten
}
def lookupAll[B](b: B)(implicit evidence: CanFuzzyMatchable[B]): Seq[A] = {
candidates(b).distinct.filter(_.matches(b))
}
private object All
def toSeq: Seq[A] = index.map(_._2).flatten.toSeq.distinct
}
|
hypertino/hyperbus
|
hyperbus/src/main/scala/com/hypertino/hyperbus/util/FuzzyIndex.scala
|
Scala
|
mpl-2.0
| 2,641 |
package akka.persistence.snapshot.local
import com.typesafe.config.ConfigFactory
import akka.persistence.PluginCleanup
import akka.persistence.snapshot.SnapshotStoreSpec
class LocalSnapshotStoreSpec extends SnapshotStoreSpec with PluginCleanup {
lazy val config = ConfigFactory.parseString(
"""
|akka.persistence.snapshot-store.plugin = "akka.persistence.snapshot-store.local"
|akka.persistence.snapshot-store.local.dir = "target/snapshots"
""".stripMargin)
}
|
krasserm/akka-persistence-testkit
|
src/test/scala/akka/persistence/snapshot/local/LocalSnapshotStoreSpec.scala
|
Scala
|
apache-2.0
| 486 |
package de.frosner.metaviz.util
import org.scalajs.dom
import org.scalajs.dom.document
object Util {
def generateElement(parent: dom.Node, id: String, tagName: String): dom.Element = {
val element = document.createElement(tagName)
element.id = id
parent.appendChild(element)
element
}
def generateDiv(parent: dom.Node, id: String): dom.Element = {
generateElement(parent, id, "div")
}
def generateSpan(parent: dom.Node, id: String): dom.Element = {
generateElement(parent, id, "span")
}
def generateTextInput(parent: dom.Node, id: String): dom.Element = {
val input = generateElement(parent, id, "input")
input.setAttribute("type", "text")
input
}
}
|
FRosner/metaviz-js
|
src/main/scala/de/frosner/metaviz/util/Utils.scala
|
Scala
|
apache-2.0
| 709 |
/*
* StringFieldViewImpl.scala
* (LucreSwing)
*
* Copyright (c) 2014-2021 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* [email protected]
*/
package de.sciss.lucre.swing.impl
import de.sciss.lucre.edit.UndoManager
import de.sciss.lucre.expr.CellView
import de.sciss.lucre.swing.LucreSwing.deferTx
import de.sciss.lucre.swing.StringFieldView
import de.sciss.lucre.{Cursor, Disposable, Txn}
import java.awt.event.KeyEvent
import javax.swing.KeyStroke
import scala.swing.event.EditDone
import scala.swing.{Action, TextField}
object StringFieldViewImpl extends CellViewFactory[String] {
def apply[T <: Txn[T]](cell: CellView[T, String], name: String, columns: Int)
(implicit tx: T, cursor: Cursor[T],
undoManager: UndoManager[T]): StringFieldView[T] = {
val res: Impl[T] = new Impl[T](editName = name, columns0 = columns) {
impl =>
protected var (value, committer) = CellViewFactory.mkCommitter(cell, name)(tx)
protected val observer: Disposable[T] = CellViewFactory.mkObserver (cell, impl)
}
deferTx(res.guiInit())
res
}
private abstract class Impl[T <: Txn[T]](editName: String, columns0: Int)
(implicit cursor: Cursor[T], undoManager: UndoManager[T])
extends StringFieldView[T] with CellViewEditor[T, String, TextField] {
override type C = scala.swing.TextField
protected def observer: Disposable[T]
protected def committer: Option[CellViewFactory.Committer[T, String]]
protected def valueToComponent(): Unit = if (component.text != value) component.text = value
protected def createComponent(): TextField = {
val txt = new TextField(value, columns0)
val db = DirtyBorder(txt)
dirty = Some(db)
val aMap = txt.peer.getActionMap
val iMap = txt.peer.getInputMap
val keyAbort = "de.sciss.Abort"
aMap.put(keyAbort, Action("Cancel Editing") {
if (db.visible) {
txt.text = value
clearDirty()
}
} .peer)
iMap.put(KeyStroke.getKeyStroke(KeyEvent.VK_ESCAPE, 0), keyAbort)
committer.foreach { com =>
txt.listenTo(txt)
txt.reactions += {
case EditDone(_) =>
val newValue = txt.text
if (value != newValue) {
cursor.step { implicit tx =>
com.commit(newValue)
}
value = newValue
}
clearDirty()
}
observeDirty(txt)
}
txt
}
}
}
|
Sciss/LucreSwing
|
jvm/src/main/scala/de/sciss/lucre/swing/impl/StringFieldViewImpl.scala
|
Scala
|
agpl-3.0
| 2,731 |
/*
* Copyright (C) 2011-2017 Interfaculty Department of Geoinformatics, University of
* Salzburg (Z_GIS) & Institute of Geological and Nuclear Sciences Limited (GNS Science)
* in the SMART Aquifer Characterisation (SAC) programme funded by the New Zealand
* Ministry of Business, Innovation and Employment (MBIE)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import models.owc._
/**
* Test Spec for [[OwcCreatorApplicationDAO]] with [[info.smart.models.owc100.OwcCreatorApplication]]
*/
class OwcCreatorApplicationDAOSpec extends WithDefaultTestFullAppAndDatabase {
"OwcCreatorApplicationDAO" can {
val demodata = new DemoData
val creatorApp1 = demodata.creatorApp1
val creatorApp2 = demodata.creatorApp2
val creatorApp3 = demodata.creatorApp3
val creatorApp3_1 = demodata.creatorApp3_1
"create OwcCreatorApplication with DB" in {
withTestDatabase { database =>
database.withConnection { implicit connection =>
OwcCreatorApplicationDAO.getAllOwcCreatorApplications.size mustEqual 0
OwcCreatorApplicationDAO.createOwcCreatorApplication(creatorApp1) must contain(creatorApp1)
OwcCreatorApplicationDAO.findOwcCreatorApplicationByUuid(creatorApp1.uuid) must contain(creatorApp1)
OwcCreatorApplicationDAO.createOwcCreatorApplication(creatorApp2) must contain(creatorApp2)
OwcCreatorApplicationDAO.findOwcCreatorApplicationByUuid(creatorApp2.uuid) must contain(creatorApp2)
OwcCreatorApplicationDAO.createOwcCreatorApplication(creatorApp3) must contain(creatorApp3)
OwcCreatorApplicationDAO.findOwcCreatorApplicationByUuid(creatorApp3.uuid) must contain(creatorApp3)
val thrown = the[java.sql.SQLException] thrownBy OwcCreatorApplicationDAO.createOwcCreatorApplication(creatorApp3)
thrown.getErrorCode mustEqual 23505
}
}
}
"update OwcCreatorApplication with DB" in {
withTestDatabase { database =>
database.withConnection { implicit connection =>
OwcCreatorApplicationDAO.createOwcCreatorApplication(creatorApp3) must contain(creatorApp3)
OwcCreatorApplicationDAO.findOwcCreatorApplicationByUuid(creatorApp3.uuid) must contain(creatorApp3)
}
database.withTransaction { implicit connection =>
OwcCreatorApplicationDAO.updateOwcCreatorApplication(creatorApp3_1).get mustEqual creatorApp3_1
OwcCreatorApplicationDAO.findOwcCreatorApplicationByUuid(creatorApp3_1.uuid) must contain(creatorApp3_1)
}
}
}
"delete OwcCreatorApplication with DB" in {
withTestDatabase { database =>
database.withConnection { implicit connection =>
OwcCreatorApplicationDAO.createOwcCreatorApplication(creatorApp2) must contain(creatorApp2)
OwcCreatorApplicationDAO.findOwcCreatorApplicationByUuid(creatorApp2.uuid) must contain(creatorApp2)
}
database.withTransaction { implicit connection =>
OwcCreatorApplicationDAO.deleteOwcCreatorApplication(creatorApp2) mustEqual true
OwcCreatorApplicationDAO.getAllOwcCreatorApplications.size mustEqual 0
}
}
}
}
}
|
ZGIS/smart-portal-backend
|
test/OwcCreatorApplicationDAOSpec.scala
|
Scala
|
apache-2.0
| 3,694 |
/**
*
* @author Richard Li
*/
object puzzle4 extends App {
def howManyElements(collections: Iterable[Iterable[_]]): Int = {
collections.map(_.size).sum
}
// we can get right answers
println(howManyElements(List(List(1, 2, 3), List(4, 5, 6))))
println(howManyElements(Set(List(1, 2, 3), List(4, 5, 6, 7))))
// wrong answer now!
// input type is preserved in scala not like java. Java will erase underlying type.
println(howManyElements(Set(List(1, 2, 3), List(4, 5, 6))))
// following logic equals what's happening inside the method
// x == Set(3, 3) == Set(3)
val x = Set(List(1, 2, 3), Set(4, 5, 6)).map(_.size)
println(x)
def howManyElementsFix(collections: Iterable[Iterable[_]]): Int = {
collections.toSeq.map(_.size).sum
}
println(howManyElementsFix(List(List(1, 2, 3), List(4, 5, 6))))
println(howManyElementsFix(Set(List(1, 2, 3), List(4, 5, 6, 7))))
println(howManyElementsFix(Set(List(1, 2, 3), List(4, 5, 6))))
}
|
precompiler/scala-101
|
puzzles/src/main/scala/puzzle4.scala
|
Scala
|
apache-2.0
| 973 |
package blub
trait SomeFunctionality {
class P(val s: String)
}
object External extends SomeFunctionality
|
martinring/clide2
|
modules/clide-core/src/main/scala/shouldntbethere/package.scala
|
Scala
|
lgpl-3.0
| 111 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import java.io.File
import java.nio.charset.StandardCharsets
import java.sql.{Date, Timestamp}
import java.util.{Locale, Set}
import com.google.common.io.Files
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.TestUtils
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.{EliminateSubqueryAliases, FunctionRegistry}
import org.apache.spark.sql.catalyst.catalog.{CatalogTableType, CatalogUtils, HiveTableRelation}
import org.apache.spark.sql.catalyst.parser.ParseException
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, SubqueryAlias}
import org.apache.spark.sql.execution.datasources.{HadoopFsRelation, LogicalRelation}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.hive.{HiveExternalCatalog, HiveUtils}
import org.apache.spark.sql.hive.test.TestHiveSingleton
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SQLTestUtils
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.CalendarInterval
case class Nested1(f1: Nested2)
case class Nested2(f2: Nested3)
case class Nested3(f3: Int)
case class NestedArray2(b: Seq[Int])
case class NestedArray1(a: NestedArray2)
case class Order(
id: Int,
make: String,
`type`: String,
price: Int,
pdate: String,
customer: String,
city: String,
state: String,
month: Int)
/**
* A collection of hive query tests where we generate the answers ourselves instead of depending on
* Hive to generate them (in contrast to HiveQuerySuite). Often this is because the query is
* valid, but Hive currently cannot execute it.
*/
class SQLQuerySuite extends QueryTest with SQLTestUtils with TestHiveSingleton {
import hiveContext._
import spark.implicits._
test("query global temp view") {
val df = Seq(1).toDF("i1")
df.createGlobalTempView("tbl1")
val global_temp_db = spark.conf.get("spark.sql.globalTempDatabase")
checkAnswer(spark.sql(s"select * from ${global_temp_db}.tbl1"), Row(1))
spark.sql(s"drop view ${global_temp_db}.tbl1")
}
test("non-existent global temp view") {
val global_temp_db = spark.conf.get("spark.sql.globalTempDatabase")
val message = intercept[AnalysisException] {
spark.sql(s"select * from ${global_temp_db}.nonexistentview")
}.getMessage
assert(message.contains("Table or view not found"))
}
test("script") {
assume(TestUtils.testCommandAvailable("/bin/bash"))
assume(TestUtils.testCommandAvailable("echo | sed"))
val scriptFilePath = getTestResourcePath("test_script.sh")
val df = Seq(("x1", "y1", "z1"), ("x2", "y2", "z2")).toDF("c1", "c2", "c3")
df.createOrReplaceTempView("script_table")
val query1 = sql(
s"""
|SELECT col1 FROM (from(SELECT c1, c2, c3 FROM script_table) tempt_table
|REDUCE c1, c2, c3 USING 'bash $scriptFilePath' AS
|(col1 STRING, col2 STRING)) script_test_table""".stripMargin)
checkAnswer(query1, Row("x1_y1") :: Row("x2_y2") :: Nil)
}
test("SPARK-6835: udtf in lateral view") {
val df = Seq((1, 1)).toDF("c1", "c2")
df.createOrReplaceTempView("table1")
val query = sql("SELECT c1, v FROM table1 LATERAL VIEW stack(3, 1, c1 + 1, c1 + 2) d AS v")
checkAnswer(query, Row(1, 1) :: Row(1, 2) :: Row(1, 3) :: Nil)
}
test("SPARK-13651: generator outputs shouldn't be resolved from its child's output") {
withTempView("src") {
Seq(("id1", "value1")).toDF("key", "value").createOrReplaceTempView("src")
val query =
sql("SELECT genoutput.* FROM src " +
"LATERAL VIEW explode(map('key1', 100, 'key2', 200)) genoutput AS key, value")
checkAnswer(query, Row("key1", 100) :: Row("key2", 200) :: Nil)
}
}
test("SPARK-6851: Self-joined converted parquet tables") {
val orders = Seq(
Order(1, "Atlas", "MTB", 234, "2015-01-07", "John D", "Pacifica", "CA", 20151),
Order(3, "Swift", "MTB", 285, "2015-01-17", "John S", "Redwood City", "CA", 20151),
Order(4, "Atlas", "Hybrid", 303, "2015-01-23", "Jones S", "San Mateo", "CA", 20151),
Order(7, "Next", "MTB", 356, "2015-01-04", "Jane D", "Daly City", "CA", 20151),
Order(10, "Next", "YFlikr", 187, "2015-01-09", "John D", "Fremont", "CA", 20151),
Order(11, "Swift", "YFlikr", 187, "2015-01-23", "John D", "Hayward", "CA", 20151),
Order(2, "Next", "Hybrid", 324, "2015-02-03", "Jane D", "Daly City", "CA", 20152),
Order(5, "Next", "Street", 187, "2015-02-08", "John D", "Fremont", "CA", 20152),
Order(6, "Atlas", "Street", 154, "2015-02-09", "John D", "Pacifica", "CA", 20152),
Order(8, "Swift", "Hybrid", 485, "2015-02-19", "John S", "Redwood City", "CA", 20152),
Order(9, "Atlas", "Split", 303, "2015-02-28", "Jones S", "San Mateo", "CA", 20152))
val orderUpdates = Seq(
Order(1, "Atlas", "MTB", 434, "2015-01-07", "John D", "Pacifica", "CA", 20151),
Order(11, "Swift", "YFlikr", 137, "2015-01-23", "John D", "Hayward", "CA", 20151))
orders.toDF.createOrReplaceTempView("orders1")
orderUpdates.toDF.createOrReplaceTempView("orderupdates1")
withTable("orders", "orderupdates") {
sql(
"""CREATE TABLE orders(
| id INT,
| make String,
| type String,
| price INT,
| pdate String,
| customer String,
| city String)
|PARTITIONED BY (state STRING, month INT)
|STORED AS PARQUET
""".stripMargin)
sql(
"""CREATE TABLE orderupdates(
| id INT,
| make String,
| type String,
| price INT,
| pdate String,
| customer String,
| city String)
|PARTITIONED BY (state STRING, month INT)
|STORED AS PARQUET
""".stripMargin)
sql("set hive.exec.dynamic.partition.mode=nonstrict")
sql("INSERT INTO TABLE orders PARTITION(state, month) SELECT * FROM orders1")
sql("INSERT INTO TABLE orderupdates PARTITION(state, month) SELECT * FROM orderupdates1")
checkAnswer(
sql(
"""
|select orders.state, orders.month
|from orders
|join (
| select distinct orders.state,orders.month
| from orders
| join orderupdates
| on orderupdates.id = orders.id) ao
| on ao.state = orders.state and ao.month = orders.month
""".stripMargin),
(1 to 6).map(_ => Row("CA", 20151)))
}
}
test("show functions") {
val allBuiltinFunctions = FunctionRegistry.builtin.listFunction().map(_.unquotedString)
val allFunctions = sql("SHOW functions").collect().map(r => r(0))
allBuiltinFunctions.foreach { f =>
assert(allFunctions.contains(f))
}
withTempDatabase { db =>
def createFunction(names: Seq[String]): Unit = {
names.foreach { name =>
sql(
s"""
|CREATE TEMPORARY FUNCTION $name
|AS '${classOf[PairUDF].getName}'
""".stripMargin)
}
}
def dropFunction(names: Seq[String]): Unit = {
names.foreach { name =>
sql(s"DROP TEMPORARY FUNCTION $name")
}
}
createFunction(Seq("temp_abs", "temp_weekofyear", "temp_sha", "temp_sha1", "temp_sha2"))
checkAnswer(sql("SHOW functions temp_abs"), Row("temp_abs"))
checkAnswer(sql("SHOW functions 'temp_abs'"), Row("temp_abs"))
checkAnswer(sql(s"SHOW functions $db.temp_abs"), Row("temp_abs"))
checkAnswer(sql(s"SHOW functions `$db`.`temp_abs`"), Row("temp_abs"))
checkAnswer(sql(s"SHOW functions `$db`.`temp_abs`"), Row("temp_abs"))
checkAnswer(sql("SHOW functions `a function doens't exist`"), Nil)
checkAnswer(sql("SHOW functions `temp_weekofyea*`"), Row("temp_weekofyear"))
// this probably will failed if we add more function with `sha` prefixing.
checkAnswer(
sql("SHOW functions `temp_sha*`"),
List(Row("temp_sha"), Row("temp_sha1"), Row("temp_sha2")))
// Test '|' for alternation.
checkAnswer(
sql("SHOW functions 'temp_sha*|temp_weekofyea*'"),
List(Row("temp_sha"), Row("temp_sha1"), Row("temp_sha2"), Row("temp_weekofyear")))
dropFunction(Seq("temp_abs", "temp_weekofyear", "temp_sha", "temp_sha1", "temp_sha2"))
}
}
test("describe functions - built-in functions") {
checkKeywordsExist(sql("describe function extended upper"),
"Function: upper",
"Class: org.apache.spark.sql.catalyst.expressions.Upper",
"Usage: upper(str) - Returns `str` with all characters changed to uppercase",
"Extended Usage:",
"Examples:",
"> SELECT upper('SparkSql');",
"SPARKSQL")
checkKeywordsExist(sql("describe functioN Upper"),
"Function: upper",
"Class: org.apache.spark.sql.catalyst.expressions.Upper",
"Usage: upper(str) - Returns `str` with all characters changed to uppercase")
checkKeywordsNotExist(sql("describe functioN Upper"),
"Extended Usage")
checkKeywordsExist(sql("describe functioN abcadf"),
"Function: abcadf not found.")
checkKeywordsExist(sql("describe functioN `~`"),
"Function: ~",
"Class: org.apache.spark.sql.catalyst.expressions.BitwiseNot",
"Usage: ~ expr - Returns the result of bitwise NOT of `expr`.")
// Hard coded describe functions
checkKeywordsExist(sql("describe function `<>`"),
"Function: <>",
"Usage: expr1 <> expr2 - Returns true if `expr1` is not equal to `expr2`")
checkKeywordsExist(sql("describe function `!=`"),
"Function: !=",
"Usage: expr1 != expr2 - Returns true if `expr1` is not equal to `expr2`")
checkKeywordsExist(sql("describe function `between`"),
"Function: between",
"Usage: expr1 [NOT] BETWEEN expr2 AND expr3 - " +
"evaluate if `expr1` is [not] in between `expr2` and `expr3`")
checkKeywordsExist(sql("describe function `case`"),
"Function: case",
"Usage: CASE expr1 WHEN expr2 THEN expr3 " +
"[WHEN expr4 THEN expr5]* [ELSE expr6] END - " +
"When `expr1` = `expr2`, returns `expr3`; " +
"when `expr1` = `expr4`, return `expr5`; else return `expr6`")
}
test("describe functions - user defined functions") {
withUserDefinedFunction("udtf_count" -> false) {
sql(
s"""
|CREATE FUNCTION udtf_count
|AS 'org.apache.spark.sql.hive.execution.GenericUDTFCount2'
|USING JAR '${hiveContext.getHiveFile("TestUDTF.jar").toURI}'
""".stripMargin)
checkKeywordsExist(sql("describe function udtf_count"),
"Function: default.udtf_count",
"Class: org.apache.spark.sql.hive.execution.GenericUDTFCount2",
"Usage: N/A")
checkAnswer(
sql("SELECT udtf_count(a) FROM (SELECT 1 AS a FROM src LIMIT 3) t"),
Row(3) :: Row(3) :: Nil)
checkKeywordsExist(sql("describe function udtf_count"),
"Function: default.udtf_count",
"Class: org.apache.spark.sql.hive.execution.GenericUDTFCount2",
"Usage: N/A")
}
}
test("describe functions - temporary user defined functions") {
withUserDefinedFunction("udtf_count_temp" -> true) {
sql(
s"""
|CREATE TEMPORARY FUNCTION udtf_count_temp
|AS 'org.apache.spark.sql.hive.execution.GenericUDTFCount2'
|USING JAR '${hiveContext.getHiveFile("TestUDTF.jar").toURI}'
""".stripMargin)
checkKeywordsExist(sql("describe function udtf_count_temp"),
"Function: udtf_count_temp",
"Class: org.apache.spark.sql.hive.execution.GenericUDTFCount2",
"Usage: N/A")
checkAnswer(
sql("SELECT udtf_count_temp(a) FROM (SELECT 1 AS a FROM src LIMIT 3) t"),
Row(3) :: Row(3) :: Nil)
checkKeywordsExist(sql("describe function udtf_count_temp"),
"Function: udtf_count_temp",
"Class: org.apache.spark.sql.hive.execution.GenericUDTFCount2",
"Usage: N/A")
}
}
test("SPARK-5371: union with null and sum") {
val df = Seq((1, 1)).toDF("c1", "c2")
df.createOrReplaceTempView("table1")
val query = sql(
"""
|SELECT
| MIN(c1),
| MIN(c2)
|FROM (
| SELECT
| SUM(c1) c1,
| NULL c2
| FROM table1
| UNION ALL
| SELECT
| NULL c1,
| SUM(c2) c2
| FROM table1
|) a
""".stripMargin)
checkAnswer(query, Row(1, 1) :: Nil)
}
test("CTAS with WITH clause") {
val df = Seq((1, 1)).toDF("c1", "c2")
df.createOrReplaceTempView("table1")
withTable("with_table1") {
sql(
"""
|CREATE TABLE with_table1 AS
|WITH T AS (
| SELECT *
| FROM table1
|)
|SELECT *
|FROM T
""".stripMargin)
val query = sql("SELECT * FROM with_table1")
checkAnswer(query, Row(1, 1) :: Nil)
}
}
test("explode nested Field") {
Seq(NestedArray1(NestedArray2(Seq(1, 2, 3)))).toDF.createOrReplaceTempView("nestedArray")
checkAnswer(
sql("SELECT ints FROM nestedArray LATERAL VIEW explode(a.b) a AS ints"),
Row(1) :: Row(2) :: Row(3) :: Nil)
checkAnswer(
sql("SELECT `ints` FROM nestedArray LATERAL VIEW explode(a.b) `a` AS `ints`"),
Row(1) :: Row(2) :: Row(3) :: Nil)
checkAnswer(
sql("SELECT `a`.`ints` FROM nestedArray LATERAL VIEW explode(a.b) `a` AS `ints`"),
Row(1) :: Row(2) :: Row(3) :: Nil)
checkAnswer(
sql(
"""
|SELECT `weird``tab`.`weird``col`
|FROM nestedArray
|LATERAL VIEW explode(a.b) `weird``tab` AS `weird``col`
""".stripMargin),
Row(1) :: Row(2) :: Row(3) :: Nil)
}
test("SPARK-4512 Fix attribute reference resolution error when using SORT BY") {
checkAnswer(
sql("SELECT * FROM (SELECT key + key AS a FROM src SORT BY value) t ORDER BY t.a"),
sql("SELECT key + key as a FROM src ORDER BY a").collect().toSeq
)
}
def checkRelation(
tableName: String,
isDataSourceTable: Boolean,
format: String,
userSpecifiedLocation: Option[String] = None): Unit = {
var relation: LogicalPlan = null
withSQLConf(
HiveUtils.CONVERT_METASTORE_PARQUET.key -> "false",
HiveUtils.CONVERT_METASTORE_ORC.key -> "false") {
relation = EliminateSubqueryAliases(spark.table(tableName).queryExecution.analyzed)
}
val catalogTable =
sessionState.catalog.getTableMetadata(TableIdentifier(tableName))
relation match {
case LogicalRelation(r: HadoopFsRelation, _, _, _) =>
if (!isDataSourceTable) {
fail(
s"${classOf[HiveTableRelation].getCanonicalName} is expected, but found " +
s"${HadoopFsRelation.getClass.getCanonicalName}.")
}
userSpecifiedLocation match {
case Some(location) =>
assert(r.options("path") === location)
case None => // OK.
}
assert(catalogTable.provider.get === format)
case r: HiveTableRelation =>
if (isDataSourceTable) {
fail(
s"${HadoopFsRelation.getClass.getCanonicalName} is expected, but found " +
s"${classOf[HiveTableRelation].getCanonicalName}.")
}
userSpecifiedLocation match {
case Some(location) =>
assert(r.tableMeta.location === CatalogUtils.stringToURI(location))
case None => // OK.
}
// Also make sure that the format and serde are as desired.
assert(catalogTable.storage.inputFormat.get.toLowerCase(Locale.ROOT).contains(format))
assert(catalogTable.storage.outputFormat.get.toLowerCase(Locale.ROOT).contains(format))
val serde = catalogTable.storage.serde.get
format match {
case "sequence" | "text" => assert(serde.contains("LazySimpleSerDe"))
case "rcfile" => assert(serde.contains("LazyBinaryColumnarSerDe"))
case _ => assert(serde.toLowerCase(Locale.ROOT).contains(format))
}
}
// When a user-specified location is defined, the table type needs to be EXTERNAL.
val actualTableType = catalogTable.tableType
userSpecifiedLocation match {
case Some(location) =>
assert(actualTableType === CatalogTableType.EXTERNAL)
case None =>
assert(actualTableType === CatalogTableType.MANAGED)
}
}
test("CTAS without serde without location") {
withSQLConf(SQLConf.CONVERT_CTAS.key -> "true") {
val defaultDataSource = sessionState.conf.defaultDataSourceName
withTable("ctas1") {
sql("CREATE TABLE ctas1 AS SELECT key k, value FROM src ORDER BY k, value")
sql("CREATE TABLE IF NOT EXISTS ctas1 AS SELECT key k, value FROM src ORDER BY k, value")
val message = intercept[AnalysisException] {
sql("CREATE TABLE ctas1 AS SELECT key k, value FROM src ORDER BY k, value")
}.getMessage
assert(message.contains("already exists"))
checkRelation("ctas1", isDataSourceTable = true, defaultDataSource)
}
// Specifying database name for query can be converted to data source write path
// is not allowed right now.
withTable("ctas1") {
sql("CREATE TABLE default.ctas1 AS SELECT key k, value FROM src ORDER BY k, value")
checkRelation("ctas1", isDataSourceTable = true, defaultDataSource)
}
withTable("ctas1") {
sql("CREATE TABLE ctas1 stored as textfile" +
" AS SELECT key k, value FROM src ORDER BY k, value")
checkRelation("ctas1", isDataSourceTable = false, "text")
}
withTable("ctas1") {
sql("CREATE TABLE ctas1 stored as sequencefile" +
" AS SELECT key k, value FROM src ORDER BY k, value")
checkRelation("ctas1", isDataSourceTable = false, "sequence")
}
withTable("ctas1") {
sql("CREATE TABLE ctas1 stored as rcfile AS SELECT key k, value FROM src ORDER BY k, value")
checkRelation("ctas1", isDataSourceTable = false, "rcfile")
}
withTable("ctas1") {
sql("CREATE TABLE ctas1 stored as orc AS SELECT key k, value FROM src ORDER BY k, value")
checkRelation("ctas1", isDataSourceTable = false, "orc")
}
withTable("ctas1") {
sql(
"""
|CREATE TABLE ctas1 stored as parquet
|AS SELECT key k, value FROM src ORDER BY k, value
""".stripMargin)
checkRelation("ctas1", isDataSourceTable = false, "parquet")
}
}
}
test("CTAS with default fileformat") {
val table = "ctas1"
val ctas = s"CREATE TABLE IF NOT EXISTS $table SELECT key k, value FROM src"
Seq("orc", "parquet").foreach { dataSourceFormat =>
withSQLConf(
SQLConf.CONVERT_CTAS.key -> "true",
SQLConf.DEFAULT_DATA_SOURCE_NAME.key -> dataSourceFormat,
"hive.default.fileformat" -> "textfile") {
withTable(table) {
sql(ctas)
// The default datasource file format is controlled by `spark.sql.sources.default`.
// This testcase verifies that setting `hive.default.fileformat` has no impact on
// the target table's fileformat in case of CTAS.
checkRelation(tableName = table, isDataSourceTable = true, format = dataSourceFormat)
}
}
}
}
test("CTAS without serde with location") {
withSQLConf(SQLConf.CONVERT_CTAS.key -> "true") {
withTempDir { dir =>
val defaultDataSource = sessionState.conf.defaultDataSourceName
val tempLocation = dir.toURI.getPath.stripSuffix("/")
withTable("ctas1") {
sql(s"CREATE TABLE ctas1 LOCATION 'file:$tempLocation/c1'" +
" AS SELECT key k, value FROM src ORDER BY k, value")
checkRelation(
"ctas1", isDataSourceTable = true, defaultDataSource, Some(s"file:$tempLocation/c1"))
}
withTable("ctas1") {
sql(s"CREATE TABLE ctas1 LOCATION 'file:$tempLocation/c2'" +
" AS SELECT key k, value FROM src ORDER BY k, value")
checkRelation(
"ctas1", isDataSourceTable = true, defaultDataSource, Some(s"file:$tempLocation/c2"))
}
withTable("ctas1") {
sql(s"CREATE TABLE ctas1 stored as textfile LOCATION 'file:$tempLocation/c3'" +
" AS SELECT key k, value FROM src ORDER BY k, value")
checkRelation(
"ctas1", isDataSourceTable = false, "text", Some(s"file:$tempLocation/c3"))
}
withTable("ctas1") {
sql(s"CREATE TABLE ctas1 stored as sequenceFile LOCATION 'file:$tempLocation/c4'" +
" AS SELECT key k, value FROM src ORDER BY k, value")
checkRelation(
"ctas1", isDataSourceTable = false, "sequence", Some(s"file:$tempLocation/c4"))
}
withTable("ctas1") {
sql(s"CREATE TABLE ctas1 stored as rcfile LOCATION 'file:$tempLocation/c5'" +
" AS SELECT key k, value FROM src ORDER BY k, value")
checkRelation(
"ctas1", isDataSourceTable = false, "rcfile", Some(s"file:$tempLocation/c5"))
}
}
}
}
test("CTAS with serde") {
withTable("ctas1", "ctas2", "ctas3", "ctas4", "ctas5") {
sql("CREATE TABLE ctas1 AS SELECT key k, value FROM src ORDER BY k, value")
sql(
"""CREATE TABLE ctas2
| ROW FORMAT SERDE "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe"
| WITH SERDEPROPERTIES("serde_p1"="p1","serde_p2"="p2")
| STORED AS RCFile
| TBLPROPERTIES("tbl_p1"="p11", "tbl_p2"="p22")
| AS
| SELECT key, value
| FROM src
| ORDER BY key, value""".stripMargin)
val storageCtas2 = spark.sessionState.catalog.
getTableMetadata(TableIdentifier("ctas2")).storage
assert(storageCtas2.inputFormat == Some("org.apache.hadoop.hive.ql.io.RCFileInputFormat"))
assert(storageCtas2.outputFormat == Some("org.apache.hadoop.hive.ql.io.RCFileOutputFormat"))
assert(storageCtas2.serde == Some("org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe"))
sql(
"""CREATE TABLE ctas3
| ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' LINES TERMINATED BY '\012'
| STORED AS textfile AS
| SELECT key, value
| FROM src
| ORDER BY key, value""".stripMargin)
// the table schema may like (key: integer, value: string)
sql(
"""CREATE TABLE IF NOT EXISTS ctas4 AS
| SELECT 1 AS key, value FROM src LIMIT 1""".stripMargin)
// do nothing cause the table ctas4 already existed.
sql(
"""CREATE TABLE IF NOT EXISTS ctas4 AS
| SELECT key, value FROM src ORDER BY key, value""".stripMargin)
checkAnswer(
sql("SELECT k, value FROM ctas1 ORDER BY k, value"),
sql("SELECT key, value FROM src ORDER BY key, value"))
checkAnswer(
sql("SELECT key, value FROM ctas2 ORDER BY key, value"),
sql(
"""
SELECT key, value
FROM src
ORDER BY key, value"""))
checkAnswer(
sql("SELECT key, value FROM ctas3 ORDER BY key, value"),
sql(
"""
SELECT key, value
FROM src
ORDER BY key, value"""))
intercept[AnalysisException] {
sql(
"""CREATE TABLE ctas4 AS
| SELECT key, value FROM src ORDER BY key, value""".stripMargin)
}
checkAnswer(
sql("SELECT key, value FROM ctas4 ORDER BY key, value"),
sql("SELECT key, value FROM ctas4 LIMIT 1").collect().toSeq)
sql(
"""CREATE TABLE ctas5
| STORED AS parquet AS
| SELECT key, value
| FROM src
| ORDER BY key, value""".stripMargin)
val storageCtas5 = spark.sessionState.catalog.
getTableMetadata(TableIdentifier("ctas5")).storage
assert(storageCtas5.inputFormat ==
Some("org.apache.hadoop.hive.ql.io.parquet.MapredParquetInputFormat"))
assert(storageCtas5.outputFormat ==
Some("org.apache.hadoop.hive.ql.io.parquet.MapredParquetOutputFormat"))
assert(storageCtas5.serde ==
Some("org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe"))
// use the Hive SerDe for parquet tables
withSQLConf(HiveUtils.CONVERT_METASTORE_PARQUET.key -> "false") {
checkAnswer(
sql("SELECT key, value FROM ctas5 ORDER BY key, value"),
sql("SELECT key, value FROM src ORDER BY key, value"))
}
}
}
test("specifying the column list for CTAS") {
withTempView("mytable1") {
Seq((1, "111111"), (2, "222222")).toDF("key", "value").createOrReplaceTempView("mytable1")
withTable("gen__tmp") {
sql("create table gen__tmp as select key as a, value as b from mytable1")
checkAnswer(
sql("SELECT a, b from gen__tmp"),
sql("select key, value from mytable1").collect())
}
withTable("gen__tmp") {
val e = intercept[AnalysisException] {
sql("create table gen__tmp(a int, b string) as select key, value from mytable1")
}.getMessage
assert(e.contains("Schema may not be specified in a Create Table As Select (CTAS)"))
}
withTable("gen__tmp") {
val e = intercept[AnalysisException] {
sql(
"""
|CREATE TABLE gen__tmp
|PARTITIONED BY (key string)
|AS SELECT key, value FROM mytable1
""".stripMargin)
}.getMessage
assert(e.contains("A Create Table As Select (CTAS) statement is not allowed to " +
"create a partitioned table using Hive's file formats"))
}
}
}
test("command substitution") {
sql("set tbl=src")
checkAnswer(
sql("SELECT key FROM ${hiveconf:tbl} ORDER BY key, value limit 1"),
sql("SELECT key FROM src ORDER BY key, value limit 1").collect().toSeq)
sql("set spark.sql.variable.substitute=false") // disable the substitution
sql("set tbl2=src")
intercept[Exception] {
sql("SELECT key FROM ${hiveconf:tbl2} ORDER BY key, value limit 1").collect()
}
sql("set spark.sql.variable.substitute=true") // enable the substitution
checkAnswer(
sql("SELECT key FROM ${hiveconf:tbl2} ORDER BY key, value limit 1"),
sql("SELECT key FROM src ORDER BY key, value limit 1").collect().toSeq)
}
test("ordering not in select") {
checkAnswer(
sql("SELECT key FROM src ORDER BY value"),
sql("SELECT key FROM (SELECT key, value FROM src ORDER BY value) a").collect().toSeq)
}
test("ordering not in agg") {
checkAnswer(
sql("SELECT key FROM src GROUP BY key, value ORDER BY value"),
sql("""
SELECT key
FROM (
SELECT key, value
FROM src
GROUP BY key, value
ORDER BY value) a""").collect().toSeq)
}
test("double nested data") {
withTable("test_ctas_1234") {
sparkContext.parallelize(Nested1(Nested2(Nested3(1))) :: Nil)
.toDF().createOrReplaceTempView("nested")
checkAnswer(
sql("SELECT f1.f2.f3 FROM nested"),
Row(1))
sql("CREATE TABLE test_ctas_1234 AS SELECT * from nested")
checkAnswer(
sql("SELECT * FROM test_ctas_1234"),
sql("SELECT * FROM nested").collect().toSeq)
intercept[AnalysisException] {
sql("CREATE TABLE test_ctas_1234 AS SELECT * from notexists").collect()
}
}
}
test("test CTAS") {
withTable("test_ctas_1234") {
sql("CREATE TABLE test_ctas_123 AS SELECT key, value FROM src")
checkAnswer(
sql("SELECT key, value FROM test_ctas_123 ORDER BY key"),
sql("SELECT key, value FROM src ORDER BY key").collect().toSeq)
}
}
test("SPARK-4825 save join to table") {
withTable("test1", "test2", "test") {
val testData = sparkContext.parallelize(1 to 10).map(i => TestData(i, i.toString)).toDF()
sql("CREATE TABLE test1 (key INT, value STRING)")
testData.write.mode(SaveMode.Append).insertInto("test1")
sql("CREATE TABLE test2 (key INT, value STRING)")
testData.write.mode(SaveMode.Append).insertInto("test2")
testData.write.mode(SaveMode.Append).insertInto("test2")
sql("CREATE TABLE test AS SELECT COUNT(a.value) FROM test1 a JOIN test2 b ON a.key = b.key")
checkAnswer(
table("test"),
sql("SELECT COUNT(a.value) FROM test1 a JOIN test2 b ON a.key = b.key").collect().toSeq)
}
}
test("SPARK-3708 Backticks aren't handled correctly is aliases") {
checkAnswer(
sql("SELECT k FROM (SELECT `key` AS `k` FROM src) a"),
sql("SELECT `key` FROM src").collect().toSeq)
}
test("SPARK-3834 Backticks not correctly handled in subquery aliases") {
checkAnswer(
sql("SELECT a.key FROM (SELECT key FROM src) `a`"),
sql("SELECT `key` FROM src").collect().toSeq)
}
test("SPARK-3814 Support Bitwise & operator") {
checkAnswer(
sql("SELECT case when 1&1=1 then 1 else 0 end FROM src"),
sql("SELECT 1 FROM src").collect().toSeq)
}
test("SPARK-3814 Support Bitwise | operator") {
checkAnswer(
sql("SELECT case when 1|0=1 then 1 else 0 end FROM src"),
sql("SELECT 1 FROM src").collect().toSeq)
}
test("SPARK-3814 Support Bitwise ^ operator") {
checkAnswer(
sql("SELECT case when 1^0=1 then 1 else 0 end FROM src"),
sql("SELECT 1 FROM src").collect().toSeq)
}
test("SPARK-3814 Support Bitwise ~ operator") {
checkAnswer(
sql("SELECT case when ~1=-2 then 1 else 0 end FROM src"),
sql("SELECT 1 FROM src").collect().toSeq)
}
test("SPARK-4154 Query does not work if it has 'not between' in Spark SQL and HQL") {
checkAnswer(sql("SELECT key FROM src WHERE key not between 0 and 10 order by key"),
sql("SELECT key FROM src WHERE key between 11 and 500 order by key").collect().toSeq)
}
test("SPARK-2554 SumDistinct partial aggregation") {
checkAnswer(sql("SELECT sum( distinct key) FROM src group by key order by key"),
sql("SELECT distinct key FROM src order by key").collect().toSeq)
}
test("SPARK-4963 DataFrame sample on mutable row return wrong result") {
sql("SELECT * FROM src WHERE key % 2 = 0")
.sample(withReplacement = false, fraction = 0.3)
.createOrReplaceTempView("sampled")
(1 to 10).foreach { i =>
checkAnswer(
sql("SELECT * FROM sampled WHERE key % 2 = 1"),
Seq.empty[Row])
}
}
test("SPARK-4699 SparkSession with Hive Support should be case insensitive by default") {
checkAnswer(
sql("SELECT KEY FROM Src ORDER BY value"),
sql("SELECT key FROM src ORDER BY value").collect().toSeq)
}
test("SPARK-5284 Insert into Hive throws NPE when a inner complex type field has a null value") {
val schema = StructType(
StructField("s",
StructType(
StructField("innerStruct", StructType(StructField("s1", StringType, true) :: Nil)) ::
StructField("innerArray", ArrayType(IntegerType), true) ::
StructField("innerMap", MapType(StringType, IntegerType)) :: Nil), true) :: Nil)
val row = Row(Row(null, null, null))
val rowRdd = sparkContext.parallelize(row :: Nil)
spark.createDataFrame(rowRdd, schema).createOrReplaceTempView("testTable")
sql(
"""CREATE TABLE nullValuesInInnerComplexTypes
| (s struct<innerStruct: struct<s1:string>,
| innerArray:array<int>,
| innerMap: map<string, int>>)
""".stripMargin).collect()
sql(
"""
|INSERT OVERWRITE TABLE nullValuesInInnerComplexTypes
|SELECT * FROM testTable
""".stripMargin)
checkAnswer(
sql("SELECT * FROM nullValuesInInnerComplexTypes"),
Row(Row(null, null, null))
)
sql("DROP TABLE nullValuesInInnerComplexTypes")
dropTempTable("testTable")
}
test("SPARK-4296 Grouping field with Hive UDF as sub expression") {
val ds = Seq("""{"a": "str", "b":"1", "c":"1970-01-01 00:00:00"}""").toDS()
read.json(ds).createOrReplaceTempView("data")
checkAnswer(
sql("SELECT concat(a, '-', b), year(c) FROM data GROUP BY concat(a, '-', b), year(c)"),
Row("str-1", 1970))
dropTempTable("data")
read.json(ds).createOrReplaceTempView("data")
checkAnswer(sql("SELECT year(c) + 1 FROM data GROUP BY year(c) + 1"), Row(1971))
dropTempTable("data")
}
test("resolve udtf in projection #1") {
val ds = (1 to 5).map(i => s"""{"a":[$i, ${i + 1}]}""").toDS()
read.json(ds).createOrReplaceTempView("data")
val df = sql("SELECT explode(a) AS val FROM data")
val col = df("val")
}
test("resolve udtf in projection #2") {
val ds = (1 to 2).map(i => s"""{"a":[$i, ${i + 1}]}""").toDS()
read.json(ds).createOrReplaceTempView("data")
checkAnswer(sql("SELECT explode(map(1, 1)) FROM data LIMIT 1"), Row(1, 1) :: Nil)
checkAnswer(sql("SELECT explode(map(1, 1)) as (k1, k2) FROM data LIMIT 1"), Row(1, 1) :: Nil)
intercept[AnalysisException] {
sql("SELECT explode(map(1, 1)) as k1 FROM data LIMIT 1")
}
intercept[AnalysisException] {
sql("SELECT explode(map(1, 1)) as (k1, k2, k3) FROM data LIMIT 1")
}
}
// TGF with non-TGF in project is allowed in Spark SQL, but not in Hive
test("TGF with non-TGF in projection") {
val ds = Seq("""{"a": "1", "b":"1"}""").toDS()
read.json(ds).createOrReplaceTempView("data")
checkAnswer(
sql("SELECT explode(map(a, b)) as (k1, k2), a, b FROM data"),
Row("1", "1", "1", "1") :: Nil)
}
test("logical.Project should not be resolved if it contains aggregates or generators") {
// This test is used to test the fix of SPARK-5875.
// The original issue was that Project's resolved will be true when it contains
// AggregateExpressions or Generators. However, in this case, the Project
// is not in a valid state (cannot be executed). Because of this bug, the analysis rule of
// PreInsertionCasts will actually start to work before ImplicitGenerate and then
// generates an invalid query plan.
val ds = (1 to 5).map(i => s"""{"a":[$i, ${i + 1}]}""").toDS()
read.json(ds).createOrReplaceTempView("data")
withSQLConf(SQLConf.CONVERT_CTAS.key -> "false") {
sql("CREATE TABLE explodeTest (key bigInt)")
table("explodeTest").queryExecution.analyzed match {
case SubqueryAlias(_, r: HiveTableRelation) => // OK
case _ =>
fail("To correctly test the fix of SPARK-5875, explodeTest should be a MetastoreRelation")
}
sql(s"INSERT OVERWRITE TABLE explodeTest SELECT explode(a) AS val FROM data")
checkAnswer(
sql("SELECT key from explodeTest"),
(1 to 5).flatMap(i => Row(i) :: Row(i + 1) :: Nil)
)
sql("DROP TABLE explodeTest")
dropTempTable("data")
}
}
test("sanity test for SPARK-6618") {
val threads: Seq[Thread] = (1 to 10).map { i =>
new Thread("test-thread-" + i) {
override def run(): Unit = {
val tableName = s"SPARK_6618_table_$i"
sql(s"CREATE TABLE $tableName (col1 string)")
sessionState.catalog.lookupRelation(TableIdentifier(tableName))
table(tableName)
tables()
sql(s"DROP TABLE $tableName")
}
}
}
threads.foreach(_.start())
threads.foreach(_.join(10000))
}
test("SPARK-5203 union with different decimal precision") {
Seq.empty[(java.math.BigDecimal, java.math.BigDecimal)]
.toDF("d1", "d2")
.select($"d1".cast(DecimalType(10, 5)).as("d"))
.createOrReplaceTempView("dn")
sql("select d from dn union all select d * 2 from dn")
.queryExecution.analyzed
}
test("Star Expansion - script transform") {
assume(TestUtils.testCommandAvailable("/bin/bash"))
val data = (1 to 100000).map { i => (i, i, i) }
data.toDF("d1", "d2", "d3").createOrReplaceTempView("script_trans")
assert(100000 === sql("SELECT TRANSFORM (*) USING 'cat' FROM script_trans").count())
}
test("test script transform for stdout") {
assume(TestUtils.testCommandAvailable("/bin/bash"))
val data = (1 to 100000).map { i => (i, i, i) }
data.toDF("d1", "d2", "d3").createOrReplaceTempView("script_trans")
assert(100000 ===
sql("SELECT TRANSFORM (d1, d2, d3) USING 'cat' AS (a,b,c) FROM script_trans").count())
}
test("test script transform for stderr") {
assume(TestUtils.testCommandAvailable("/bin/bash"))
val data = (1 to 100000).map { i => (i, i, i) }
data.toDF("d1", "d2", "d3").createOrReplaceTempView("script_trans")
assert(0 ===
sql("SELECT TRANSFORM (d1, d2, d3) USING 'cat 1>&2' AS (a,b,c) FROM script_trans").count())
}
test("test script transform data type") {
assume(TestUtils.testCommandAvailable("/bin/bash"))
val data = (1 to 5).map { i => (i, i) }
data.toDF("key", "value").createOrReplaceTempView("test")
checkAnswer(
sql("""FROM
|(FROM test SELECT TRANSFORM(key, value) USING 'cat' AS (`thing1` int, thing2 string)) t
|SELECT thing1 + 1
""".stripMargin), (2 to 6).map(i => Row(i)))
}
test("Sorting columns are not in Generate") {
withTempView("data") {
spark.range(1, 5)
.select(array($"id", $"id" + 1).as("a"), $"id".as("b"), (lit(10) - $"id").as("c"))
.createOrReplaceTempView("data")
// case 1: missing sort columns are resolvable if join is true
checkAnswer(
sql("SELECT explode(a) AS val, b FROM data WHERE b < 2 order by val, c"),
Row(1, 1) :: Row(2, 1) :: Nil)
// case 2: missing sort columns are resolvable if join is false
checkAnswer(
sql("SELECT explode(a) AS val FROM data order by val, c"),
Seq(1, 2, 2, 3, 3, 4, 4, 5).map(i => Row(i)))
// case 3: missing sort columns are resolvable if join is true and outer is true
checkAnswer(
sql(
"""
|SELECT C.val, b FROM data LATERAL VIEW OUTER explode(a) C as val
|where b < 2 order by c, val, b
""".stripMargin),
Row(1, 1) :: Row(2, 1) :: Nil)
}
}
test("test case key when") {
(1 to 5).map(i => (i, i.toString)).toDF("k", "v").createOrReplaceTempView("t")
checkAnswer(
sql("SELECT CASE k WHEN 2 THEN 22 WHEN 4 THEN 44 ELSE 0 END, v FROM t"),
Row(0, "1") :: Row(22, "2") :: Row(0, "3") :: Row(44, "4") :: Row(0, "5") :: Nil)
}
test("SPARK-7269 Check analysis failed in case in-sensitive") {
Seq(1, 2, 3).map { i =>
(i.toString, i.toString)
}.toDF("key", "value").createOrReplaceTempView("df_analysis")
sql("SELECT kEy from df_analysis group by key").collect()
sql("SELECT kEy+3 from df_analysis group by key+3").collect()
sql("SELECT kEy+3, a.kEy, A.kEy from df_analysis A group by key").collect()
sql("SELECT cast(kEy+1 as Int) from df_analysis A group by cast(key+1 as int)").collect()
sql("SELECT cast(kEy+1 as Int) from df_analysis A group by key+1").collect()
sql("SELECT 2 from df_analysis A group by key+1").collect()
intercept[AnalysisException] {
sql("SELECT kEy+1 from df_analysis group by key+3")
}
intercept[AnalysisException] {
sql("SELECT cast(key+2 as Int) from df_analysis A group by cast(key+1 as int)")
}
}
test("Cast STRING to BIGINT") {
checkAnswer(sql("SELECT CAST('775983671874188101' as BIGINT)"), Row(775983671874188101L))
}
test("dynamic partition value test") {
try {
sql("set hive.exec.dynamic.partition.mode=nonstrict")
// date
sql("drop table if exists dynparttest1")
sql("create table dynparttest1 (value int) partitioned by (pdate date)")
sql(
"""
|insert into table dynparttest1 partition(pdate)
| select count(*), cast('2015-05-21' as date) as pdate from src
""".stripMargin)
checkAnswer(
sql("select * from dynparttest1"),
Seq(Row(500, java.sql.Date.valueOf("2015-05-21"))))
// decimal
sql("drop table if exists dynparttest2")
sql("create table dynparttest2 (value int) partitioned by (pdec decimal(5, 1))")
sql(
"""
|insert into table dynparttest2 partition(pdec)
| select count(*), cast('100.12' as decimal(5, 1)) as pdec from src
""".stripMargin)
checkAnswer(
sql("select * from dynparttest2"),
Seq(Row(500, new java.math.BigDecimal("100.1"))))
} finally {
sql("drop table if exists dynparttest1")
sql("drop table if exists dynparttest2")
sql("set hive.exec.dynamic.partition.mode=strict")
}
}
test("Call add jar in a different thread (SPARK-8306)") {
@volatile var error: Option[Throwable] = None
val thread = new Thread {
override def run() {
// To make sure this test works, this jar should not be loaded in another place.
sql(
s"ADD JAR ${hiveContext.getHiveFile("hive-contrib-0.13.1.jar").getCanonicalPath()}")
try {
sql(
"""
|CREATE TEMPORARY FUNCTION example_max
|AS 'org.apache.hadoop.hive.contrib.udaf.example.UDAFExampleMax'
""".stripMargin)
} catch {
case throwable: Throwable =>
error = Some(throwable)
}
}
}
thread.start()
thread.join()
error match {
case Some(throwable) =>
fail("CREATE TEMPORARY FUNCTION should not fail.", throwable)
case None => // OK
}
}
test("SPARK-6785: HiveQuerySuite - Date comparison test 2") {
checkAnswer(
sql("SELECT CAST(CAST(0 AS timestamp) AS date) > CAST(0 AS timestamp) FROM src LIMIT 1"),
Row(false))
}
test("SPARK-6785: HiveQuerySuite - Date cast") {
// new Date(0) == 1970-01-01 00:00:00.0 GMT == 1969-12-31 16:00:00.0 PST
checkAnswer(
sql(
"""
| SELECT
| CAST(CAST(0 AS timestamp) AS date),
| CAST(CAST(CAST(0 AS timestamp) AS date) AS string),
| CAST(0 AS timestamp),
| CAST(CAST(0 AS timestamp) AS string),
| CAST(CAST(CAST('1970-01-01 23:00:00' AS timestamp) AS date) AS timestamp)
| FROM src LIMIT 1
""".stripMargin),
Row(
Date.valueOf("1969-12-31"),
String.valueOf("1969-12-31"),
Timestamp.valueOf("1969-12-31 16:00:00"),
String.valueOf("1969-12-31 16:00:00"),
Timestamp.valueOf("1970-01-01 00:00:00")))
}
test("SPARK-8588 HiveTypeCoercion.inConversion fires too early") {
val df =
createDataFrame(Seq((1, "2014-01-01"), (2, "2015-01-01"), (3, "2016-01-01")))
df.toDF("id", "datef").createOrReplaceTempView("test_SPARK8588")
checkAnswer(
sql(
"""
|select id, concat(year(datef))
|from test_SPARK8588 where concat(year(datef), ' year') in ('2015 year', '2014 year')
""".stripMargin),
Row(1, "2014") :: Row(2, "2015") :: Nil
)
dropTempTable("test_SPARK8588")
}
test("SPARK-9371: fix the support for special chars in column names for hive context") {
val ds = Seq("""{"a": {"c.b": 1}, "b.$q": [{"a@!.q": 1}], "q.w": {"w.i&": [1]}}""").toDS()
read.json(ds).createOrReplaceTempView("t")
checkAnswer(sql("SELECT a.`c.b`, `b.$q`[0].`a@!.q`, `q.w`.`w.i&`[0] FROM t"), Row(1, 1, 1))
}
test("Convert hive interval term into Literal of CalendarIntervalType") {
checkAnswer(sql("select interval '10-9' year to month"),
Row(CalendarInterval.fromString("interval 10 years 9 months")))
checkAnswer(sql("select interval '20 15:40:32.99899999' day to second"),
Row(CalendarInterval.fromString("interval 2 weeks 6 days 15 hours 40 minutes " +
"32 seconds 99 milliseconds 899 microseconds")))
checkAnswer(sql("select interval '30' year"),
Row(CalendarInterval.fromString("interval 30 years")))
checkAnswer(sql("select interval '25' month"),
Row(CalendarInterval.fromString("interval 25 months")))
checkAnswer(sql("select interval '-100' day"),
Row(CalendarInterval.fromString("interval -14 weeks -2 days")))
checkAnswer(sql("select interval '40' hour"),
Row(CalendarInterval.fromString("interval 1 days 16 hours")))
checkAnswer(sql("select interval '80' minute"),
Row(CalendarInterval.fromString("interval 1 hour 20 minutes")))
checkAnswer(sql("select interval '299.889987299' second"),
Row(CalendarInterval.fromString(
"interval 4 minutes 59 seconds 889 milliseconds 987 microseconds")))
}
test("specifying database name for a temporary view is not allowed") {
withTempPath { dir =>
withTempView("db.t") {
val path = dir.toURI.toString
val df = sparkContext.parallelize(1 to 10).map(i => (i, i.toString)).toDF("num", "str")
df
.write
.format("parquet")
.save(path)
// We don't support creating a temporary table while specifying a database
intercept[AnalysisException] {
spark.sql(
s"""
|CREATE TEMPORARY VIEW db.t
|USING parquet
|OPTIONS (
| path '$path'
|)
""".stripMargin)
}
// If you use backticks to quote the name then it's OK.
spark.sql(
s"""
|CREATE TEMPORARY VIEW `db.t`
|USING parquet
|OPTIONS (
| path '$path'
|)
""".stripMargin)
checkAnswer(spark.table("`db.t`"), df)
}
}
}
test("SPARK-10593 same column names in lateral view") {
val df = spark.sql(
"""
|select
|insideLayer2.json as a2
|from (select '{"layer1": {"layer2": "text inside layer 2"}}' json) test
|lateral view json_tuple(json, 'layer1') insideLayer1 as json
|lateral view json_tuple(insideLayer1.json, 'layer2') insideLayer2 as json
""".stripMargin
)
checkAnswer(df, Row("text inside layer 2") :: Nil)
}
ignore("SPARK-10310: " +
"script transformation using default input/output SerDe and record reader/writer") {
spark
.range(5)
.selectExpr("id AS a", "id AS b")
.createOrReplaceTempView("test")
val scriptFilePath = getTestResourcePath("data")
checkAnswer(
sql(
s"""FROM(
| FROM test SELECT TRANSFORM(a, b)
| USING 'python $scriptFilePath/scripts/test_transform.py "\t"'
| AS (c STRING, d STRING)
|) t
|SELECT c
""".stripMargin),
(0 until 5).map(i => Row(i + "#")))
}
ignore("SPARK-10310: script transformation using LazySimpleSerDe") {
spark
.range(5)
.selectExpr("id AS a", "id AS b")
.createOrReplaceTempView("test")
val scriptFilePath = getTestResourcePath("data")
val df = sql(
s"""FROM test
|SELECT TRANSFORM(a, b)
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
|WITH SERDEPROPERTIES('field.delim' = '|')
|USING 'python $scriptFilePath/scripts/test_transform.py "|"'
|AS (c STRING, d STRING)
|ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe'
|WITH SERDEPROPERTIES('field.delim' = '|')
""".stripMargin)
checkAnswer(df, (0 until 5).map(i => Row(i + "#", i + "#")))
}
test("SPARK-10741: Sort on Aggregate using parquet") {
withTable("test10741") {
withTempView("src") {
Seq("a" -> 5, "a" -> 9, "b" -> 6).toDF("c1", "c2").createOrReplaceTempView("src")
sql("CREATE TABLE test10741 STORED AS PARQUET AS SELECT * FROM src")
}
checkAnswer(sql(
"""
|SELECT c1, AVG(c2) AS c_avg
|FROM test10741
|GROUP BY c1
|HAVING (AVG(c2) > 5) ORDER BY c1
""".stripMargin), Row("a", 7.0) :: Row("b", 6.0) :: Nil)
checkAnswer(sql(
"""
|SELECT c1, AVG(c2) AS c_avg
|FROM test10741
|GROUP BY c1
|ORDER BY AVG(c2)
""".stripMargin), Row("b", 6.0) :: Row("a", 7.0) :: Nil)
}
}
test("run sql directly on files - parquet") {
val df = spark.range(100).toDF()
withTempPath(f => {
df.write.parquet(f.getCanonicalPath)
// data source type is case insensitive
checkAnswer(sql(s"select id from Parquet.`${f.getCanonicalPath}`"),
df)
checkAnswer(sql(s"select id from `org.apache.spark.sql.parquet`.`${f.getCanonicalPath}`"),
df)
checkAnswer(sql(s"select a.id from parquet.`${f.getCanonicalPath}` as a"),
df)
})
}
test("run sql directly on files - orc") {
val df = spark.range(100).toDF()
withTempPath(f => {
df.write.orc(f.getCanonicalPath)
// data source type is case insensitive
checkAnswer(sql(s"select id from ORC.`${f.getCanonicalPath}`"),
df)
checkAnswer(sql(s"select id from `org.apache.spark.sql.hive.orc`.`${f.getCanonicalPath}`"),
df)
checkAnswer(sql(s"select a.id from orc.`${f.getCanonicalPath}` as a"),
df)
})
}
test("run sql directly on files - csv") {
val df = spark.range(100).toDF()
withTempPath(f => {
df.write.csv(f.getCanonicalPath)
// data source type is case insensitive
checkAnswer(sql(s"select cast(_c0 as int) id from CSV.`${f.getCanonicalPath}`"),
df)
checkAnswer(
sql(s"select cast(_c0 as int) id from `com.databricks.spark.csv`.`${f.getCanonicalPath}`"),
df)
checkAnswer(sql(s"select cast(a._c0 as int) id from csv.`${f.getCanonicalPath}` as a"),
df)
})
}
test("run sql directly on files - json") {
val df = spark.range(100).toDF()
withTempPath(f => {
df.write.json(f.getCanonicalPath)
// data source type is case insensitive
checkAnswer(sql(s"select id from jsoN.`${f.getCanonicalPath}`"),
df)
checkAnswer(sql(s"select id from `org.apache.spark.sql.json`.`${f.getCanonicalPath}`"),
df)
checkAnswer(sql(s"select a.id from json.`${f.getCanonicalPath}` as a"),
df)
})
}
test("run sql directly on files - hive") {
withTempPath(f => {
spark.range(100).toDF.write.parquet(f.getCanonicalPath)
var e = intercept[AnalysisException] {
sql(s"select id from hive.`${f.getCanonicalPath}`")
}
assert(e.message.contains("Unsupported data source type for direct query on files: hive"))
// data source type is case insensitive
e = intercept[AnalysisException] {
sql(s"select id from HIVE.`${f.getCanonicalPath}`")
}
assert(e.message.contains("Unsupported data source type for direct query on files: HIVE"))
})
}
test("SPARK-8976 Wrong Result for Rollup #1") {
Seq("grouping_id()", "grouping__id").foreach { gid =>
checkAnswer(sql(
s"SELECT count(*) AS cnt, key % 5, $gid FROM src GROUP BY key%5 WITH ROLLUP"),
Seq(
(113, 3, 0),
(91, 0, 0),
(500, null, 1),
(84, 1, 0),
(105, 2, 0),
(107, 4, 0)
).map(i => Row(i._1, i._2, i._3)))
}
}
test("SPARK-8976 Wrong Result for Rollup #2") {
Seq("grouping_id()", "grouping__id").foreach { gid =>
checkAnswer(sql(
s"""
|SELECT count(*) AS cnt, key % 5 AS k1, key-5 AS k2, $gid AS k3
|FROM src GROUP BY key%5, key-5
|WITH ROLLUP ORDER BY cnt, k1, k2, k3 LIMIT 10
""".stripMargin),
Seq(
(1, 0, 5, 0),
(1, 0, 15, 0),
(1, 0, 25, 0),
(1, 0, 60, 0),
(1, 0, 75, 0),
(1, 0, 80, 0),
(1, 0, 100, 0),
(1, 0, 140, 0),
(1, 0, 145, 0),
(1, 0, 150, 0)
).map(i => Row(i._1, i._2, i._3, i._4)))
}
}
test("SPARK-8976 Wrong Result for Rollup #3") {
Seq("grouping_id()", "grouping__id").foreach { gid =>
checkAnswer(sql(
s"""
|SELECT count(*) AS cnt, key % 5 AS k1, key-5 AS k2, $gid AS k3
|FROM (SELECT key, key%2, key - 5 FROM src) t GROUP BY key%5, key-5
|WITH ROLLUP ORDER BY cnt, k1, k2, k3 LIMIT 10
""".stripMargin),
Seq(
(1, 0, 5, 0),
(1, 0, 15, 0),
(1, 0, 25, 0),
(1, 0, 60, 0),
(1, 0, 75, 0),
(1, 0, 80, 0),
(1, 0, 100, 0),
(1, 0, 140, 0),
(1, 0, 145, 0),
(1, 0, 150, 0)
).map(i => Row(i._1, i._2, i._3, i._4)))
}
}
test("SPARK-8976 Wrong Result for CUBE #1") {
Seq("grouping_id()", "grouping__id").foreach { gid =>
checkAnswer(sql(
s"SELECT count(*) AS cnt, key % 5, $gid FROM src GROUP BY key%5 WITH CUBE"),
Seq(
(113, 3, 0),
(91, 0, 0),
(500, null, 1),
(84, 1, 0),
(105, 2, 0),
(107, 4, 0)
).map(i => Row(i._1, i._2, i._3)))
}
}
test("SPARK-8976 Wrong Result for CUBE #2") {
Seq("grouping_id()", "grouping__id").foreach { gid =>
checkAnswer(sql(
s"""
|SELECT count(*) AS cnt, key % 5 AS k1, key-5 AS k2, $gid AS k3
|FROM (SELECT key, key%2, key - 5 FROM src) t GROUP BY key%5, key-5
|WITH CUBE ORDER BY cnt, k1, k2, k3 LIMIT 10
""".stripMargin),
Seq(
(1, null, -3, 2),
(1, null, -1, 2),
(1, null, 3, 2),
(1, null, 4, 2),
(1, null, 5, 2),
(1, null, 6, 2),
(1, null, 12, 2),
(1, null, 14, 2),
(1, null, 15, 2),
(1, null, 22, 2)
).map(i => Row(i._1, i._2, i._3, i._4)))
}
}
test("SPARK-8976 Wrong Result for GroupingSet") {
Seq("grouping_id()", "grouping__id").foreach { gid =>
checkAnswer(sql(
s"""
|SELECT count(*) AS cnt, key % 5 AS k1, key-5 AS k2, $gid AS k3
|FROM (SELECT key, key%2, key - 5 FROM src) t GROUP BY key%5, key-5
|GROUPING SETS (key%5, key-5) ORDER BY cnt, k1, k2, k3 LIMIT 10
""".stripMargin),
Seq(
(1, null, -3, 2),
(1, null, -1, 2),
(1, null, 3, 2),
(1, null, 4, 2),
(1, null, 5, 2),
(1, null, 6, 2),
(1, null, 12, 2),
(1, null, 14, 2),
(1, null, 15, 2),
(1, null, 22, 2)
).map(i => Row(i._1, i._2, i._3, i._4)))
}
}
ignore("SPARK-10562: partition by column with mixed case name") {
withTable("tbl10562") {
val df = Seq(2012 -> "a").toDF("Year", "val")
df.write.partitionBy("Year").saveAsTable("tbl10562")
checkAnswer(sql("SELECT year FROM tbl10562"), Row(2012))
checkAnswer(sql("SELECT Year FROM tbl10562"), Row(2012))
checkAnswer(sql("SELECT yEAr FROM tbl10562"), Row(2012))
// TODO(ekl) this is causing test flakes [SPARK-18167], but we think the issue is derby specific
// checkAnswer(sql("SELECT val FROM tbl10562 WHERE Year > 2015"), Nil)
checkAnswer(sql("SELECT val FROM tbl10562 WHERE Year == 2012"), Row("a"))
}
}
test("SPARK-11453: append data to partitioned table") {
withTable("tbl11453") {
Seq("1" -> "10", "2" -> "20").toDF("i", "j")
.write.partitionBy("i").saveAsTable("tbl11453")
Seq("3" -> "30").toDF("i", "j")
.write.mode(SaveMode.Append).partitionBy("i").saveAsTable("tbl11453")
checkAnswer(
spark.read.table("tbl11453").select("i", "j").orderBy("i"),
Row("1", "10") :: Row("2", "20") :: Row("3", "30") :: Nil)
// make sure case sensitivity is correct.
Seq("4" -> "40").toDF("i", "j")
.write.mode(SaveMode.Append).partitionBy("I").saveAsTable("tbl11453")
checkAnswer(
spark.read.table("tbl11453").select("i", "j").orderBy("i"),
Row("1", "10") :: Row("2", "20") :: Row("3", "30") :: Row("4", "40") :: Nil)
}
}
test("SPARK-11590: use native json_tuple in lateral view") {
checkAnswer(sql(
"""
|SELECT a, b
|FROM (SELECT '{"f1": "value1", "f2": 12}' json) test
|LATERAL VIEW json_tuple(json, 'f1', 'f2') jt AS a, b
""".stripMargin), Row("value1", "12"))
// we should use `c0`, `c1`... as the name of fields if no alias is provided, to follow hive.
checkAnswer(sql(
"""
|SELECT c0, c1
|FROM (SELECT '{"f1": "value1", "f2": 12}' json) test
|LATERAL VIEW json_tuple(json, 'f1', 'f2') jt
""".stripMargin), Row("value1", "12"))
// we can also use `json_tuple` in project list.
checkAnswer(sql(
"""
|SELECT json_tuple(json, 'f1', 'f2')
|FROM (SELECT '{"f1": "value1", "f2": 12}' json) test
""".stripMargin), Row("value1", "12"))
// we can also mix `json_tuple` with other project expressions.
checkAnswer(sql(
"""
|SELECT json_tuple(json, 'f1', 'f2'), 3.14, str
|FROM (SELECT '{"f1": "value1", "f2": 12}' json, 'hello' as str) test
""".stripMargin), Row("value1", "12", BigDecimal("3.14"), "hello"))
}
test("multi-insert with lateral view") {
withTempView("source") {
spark.range(10)
.select(array($"id", $"id" + 1).as("arr"), $"id")
.createOrReplaceTempView("source")
withTable("dest1", "dest2") {
sql("CREATE TABLE dest1 (i INT)")
sql("CREATE TABLE dest2 (i INT)")
sql(
"""
|FROM source
|INSERT OVERWRITE TABLE dest1
|SELECT id
|WHERE id > 3
|INSERT OVERWRITE TABLE dest2
|select col LATERAL VIEW EXPLODE(arr) exp AS col
|WHERE col > 3
""".stripMargin)
checkAnswer(
spark.table("dest1"),
sql("SELECT id FROM source WHERE id > 3"))
checkAnswer(
spark.table("dest2"),
sql("SELECT col FROM source LATERAL VIEW EXPLODE(arr) exp AS col WHERE col > 3"))
}
}
}
test("derived from Hive query file: drop_database_removes_partition_dirs.q") {
// This test verifies that if a partition exists outside a table's current location when the
// database is dropped the partition's location is dropped as well.
sql("DROP database if exists test_database CASCADE")
sql("CREATE DATABASE test_database")
val previousCurrentDB = sessionState.catalog.getCurrentDatabase
sql("USE test_database")
sql("drop table if exists test_table")
val tempDir = System.getProperty("test.tmp.dir")
assert(tempDir != null, "TestHive should set test.tmp.dir.")
sql(
"""
|CREATE TABLE test_table (key int, value STRING)
|PARTITIONED BY (part STRING)
|STORED AS RCFILE
|LOCATION 'file:${system:test.tmp.dir}/drop_database_removes_partition_dirs_table'
""".stripMargin)
sql(
"""
|ALTER TABLE test_table ADD PARTITION (part = '1')
|LOCATION 'file:${system:test.tmp.dir}/drop_database_removes_partition_dirs_table2/part=1'
""".stripMargin)
sql(
"""
|INSERT OVERWRITE TABLE test_table PARTITION (part = '1')
|SELECT * FROM default.src
""".stripMargin)
checkAnswer(
sql("select part, key, value from test_table"),
sql("select '1' as part, key, value from default.src")
)
val path = new Path(
new Path(s"file:$tempDir"),
"drop_database_removes_partition_dirs_table2")
val fs = path.getFileSystem(sparkContext.hadoopConfiguration)
// The partition dir is not empty.
assert(fs.listStatus(new Path(path, "part=1")).nonEmpty)
sql(s"USE $previousCurrentDB")
sql("DROP DATABASE test_database CASCADE")
// This table dir should not exist after we drop the entire database with the mode
// of CASCADE. This probably indicates a Hive bug, which returns the wrong table
// root location. So, the table's directory still there. We should change the condition
// to fs.exists(path) after we handle fs operations.
assert(
fs.exists(path),
"Thank you for making the changes of letting Spark SQL handle filesystem operations " +
"for DDL commands. Originally, Hive metastore does not delete the table root directory " +
"for this case. Now, please change this condition to !fs.exists(path).")
}
test("derived from Hive query file: drop_table_removes_partition_dirs.q") {
// This test verifies that if a partition exists outside the table's current location when the
// table is dropped the partition's location is dropped as well.
sql("drop table if exists test_table")
val tempDir = System.getProperty("test.tmp.dir")
assert(tempDir != null, "TestHive should set test.tmp.dir.")
sql(
"""
|CREATE TABLE test_table (key int, value STRING)
|PARTITIONED BY (part STRING)
|STORED AS RCFILE
|LOCATION 'file:${system:test.tmp.dir}/drop_table_removes_partition_dirs_table2'
""".stripMargin)
sql(
"""
|ALTER TABLE test_table ADD PARTITION (part = '1')
|LOCATION 'file:${system:test.tmp.dir}/drop_table_removes_partition_dirs_table2/part=1'
""".stripMargin)
sql(
"""
|INSERT OVERWRITE TABLE test_table PARTITION (part = '1')
|SELECT * FROM default.src
""".stripMargin)
checkAnswer(
sql("select part, key, value from test_table"),
sql("select '1' as part, key, value from src")
)
val path = new Path(new Path(s"file:$tempDir"), "drop_table_removes_partition_dirs_table2")
val fs = path.getFileSystem(sparkContext.hadoopConfiguration)
// The partition dir is not empty.
assert(fs.listStatus(new Path(path, "part=1")).nonEmpty)
sql("drop table test_table")
assert(fs.exists(path), "This is an external table, so the data should not have been dropped")
}
test("select partitioned table") {
val table = "table_with_partition"
withTable(table) {
sql(
s"""
|CREATE TABLE $table(c1 string)
|PARTITIONED BY (p1 string,p2 string,p3 string,p4 string,p5 string)
""".stripMargin)
sql(
s"""
|INSERT OVERWRITE TABLE $table
|PARTITION (p1='a',p2='b',p3='c',p4='d',p5='e')
|SELECT 'blarr'
""".stripMargin)
// project list is the same order of paritioning columns in table definition
checkAnswer(
sql(s"SELECT p1, p2, p3, p4, p5, c1 FROM $table"),
Row("a", "b", "c", "d", "e", "blarr") :: Nil)
// project list does not have the same order of paritioning columns in table definition
checkAnswer(
sql(s"SELECT p2, p3, p4, p1, p5, c1 FROM $table"),
Row("b", "c", "d", "a", "e", "blarr") :: Nil)
// project list contains partial partition columns in table definition
checkAnswer(
sql(s"SELECT p2, p1, p5, c1 FROM $table"),
Row("b", "a", "e", "blarr") :: Nil)
}
}
test("SPARK-14981: DESC not supported for sorting columns") {
withTable("t") {
val cause = intercept[ParseException] {
sql(
"""CREATE TABLE t USING PARQUET
|OPTIONS (PATH '/path/to/file')
|CLUSTERED BY (a) SORTED BY (b DESC) INTO 2 BUCKETS
|AS SELECT 1 AS a, 2 AS b
""".stripMargin
)
}
assert(cause.getMessage.contains("Column ordering must be ASC, was 'DESC'"))
}
}
test("insert into datasource table") {
withTable("tbl") {
sql("CREATE TABLE tbl(i INT, j STRING) USING parquet")
Seq(1 -> "a").toDF("i", "j").write.mode("overwrite").insertInto("tbl")
checkAnswer(sql("SELECT * FROM tbl"), Row(1, "a"))
}
}
test("spark-15557 promote string test") {
withTable("tbl") {
sql("CREATE TABLE tbl(c1 string, c2 string)")
sql("insert into tbl values ('3', '2.3')")
checkAnswer(
sql("select (cast (99 as decimal(19,6)) + cast('3' as decimal)) * cast('2.3' as decimal)"),
Row(204.0)
)
checkAnswer(
sql("select (cast(99 as decimal(19,6)) + '3') *'2.3' from tbl"),
Row(234.6)
)
checkAnswer(
sql("select (cast(99 as decimal(19,6)) + c1) * c2 from tbl"),
Row(234.6)
)
}
}
test("SPARK-15752 optimize metadata only query for hive table") {
withSQLConf(SQLConf.OPTIMIZER_METADATA_ONLY.key -> "true") {
withTable("data_15752", "srcpart_15752", "srctext_15752") {
val df = Seq((1, "2"), (3, "4")).toDF("key", "value")
df.createOrReplaceTempView("data_15752")
sql(
"""
|CREATE TABLE srcpart_15752 (col1 INT, col2 STRING)
|PARTITIONED BY (partcol1 INT, partcol2 STRING) STORED AS parquet
""".stripMargin)
for (partcol1 <- Seq(0, 1); partcol2 <- Seq("a", "b")) {
sql(
s"""
|INSERT OVERWRITE TABLE srcpart_15752
|PARTITION (partcol1='$partcol1', partcol2='$partcol2')
|select key, value from data_15752
""".stripMargin)
}
checkAnswer(
sql("select partcol1 from srcpart_15752 group by partcol1"),
Row(0) :: Row(1) :: Nil)
checkAnswer(
sql("select partcol1 from srcpart_15752 where partcol1 = 1 group by partcol1"),
Row(1))
checkAnswer(
sql("select partcol1, count(distinct partcol2) from srcpart_15752 group by partcol1"),
Row(0, 2) :: Row(1, 2) :: Nil)
checkAnswer(
sql("select partcol1, count(distinct partcol2) from srcpart_15752 where partcol1 = 1 " +
"group by partcol1"),
Row(1, 2) :: Nil)
checkAnswer(sql("select distinct partcol1 from srcpart_15752"), Row(0) :: Row(1) :: Nil)
checkAnswer(sql("select distinct partcol1 from srcpart_15752 where partcol1 = 1"), Row(1))
checkAnswer(
sql("select distinct col from (select partcol1 + 1 as col from srcpart_15752 " +
"where partcol1 = 1) t"),
Row(2))
checkAnswer(sql("select distinct partcol1 from srcpart_15752 where partcol1 = 1"), Row(1))
checkAnswer(sql("select max(partcol1) from srcpart_15752"), Row(1))
checkAnswer(sql("select max(partcol1) from srcpart_15752 where partcol1 = 1"), Row(1))
checkAnswer(sql("select max(partcol1) from (select partcol1 from srcpart_15752) t"), Row(1))
checkAnswer(
sql("select max(col) from (select partcol1 + 1 as col from srcpart_15752 " +
"where partcol1 = 1) t"),
Row(2))
sql(
"""
|CREATE TABLE srctext_15752 (col1 INT, col2 STRING)
|PARTITIONED BY (partcol1 INT, partcol2 STRING) STORED AS textfile
""".stripMargin)
for (partcol1 <- Seq(0, 1); partcol2 <- Seq("a", "b")) {
sql(
s"""
|INSERT OVERWRITE TABLE srctext_15752
|PARTITION (partcol1='$partcol1', partcol2='$partcol2')
|select key, value from data_15752
""".stripMargin)
}
checkAnswer(
sql("select partcol1 from srctext_15752 group by partcol1"),
Row(0) :: Row(1) :: Nil)
checkAnswer(
sql("select partcol1 from srctext_15752 where partcol1 = 1 group by partcol1"),
Row(1))
checkAnswer(
sql("select partcol1, count(distinct partcol2) from srctext_15752 group by partcol1"),
Row(0, 2) :: Row(1, 2) :: Nil)
checkAnswer(
sql("select partcol1, count(distinct partcol2) from srctext_15752 where partcol1 = 1 " +
"group by partcol1"),
Row(1, 2) :: Nil)
checkAnswer(sql("select distinct partcol1 from srctext_15752"), Row(0) :: Row(1) :: Nil)
checkAnswer(sql("select distinct partcol1 from srctext_15752 where partcol1 = 1"), Row(1))
checkAnswer(
sql("select distinct col from (select partcol1 + 1 as col from srctext_15752 " +
"where partcol1 = 1) t"),
Row(2))
checkAnswer(sql("select max(partcol1) from srctext_15752"), Row(1))
checkAnswer(sql("select max(partcol1) from srctext_15752 where partcol1 = 1"), Row(1))
checkAnswer(sql("select max(partcol1) from (select partcol1 from srctext_15752) t"), Row(1))
checkAnswer(
sql("select max(col) from (select partcol1 + 1 as col from srctext_15752 " +
"where partcol1 = 1) t"),
Row(2))
}
}
}
test("SPARK-17354: Partitioning by dates/timestamps works with Parquet vectorized reader") {
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "true") {
sql(
"""CREATE TABLE order(id INT)
|PARTITIONED BY (pd DATE, pt TIMESTAMP)
|STORED AS PARQUET
""".stripMargin)
sql("set hive.exec.dynamic.partition.mode=nonstrict")
sql(
"""INSERT INTO TABLE order PARTITION(pd, pt)
|SELECT 1 AS id, CAST('1990-02-24' AS DATE) AS pd, CAST('1990-02-24' AS TIMESTAMP) AS pt
""".stripMargin)
val actual = sql("SELECT * FROM order")
val expected = sql(
"SELECT 1 AS id, CAST('1990-02-24' AS DATE) AS pd, CAST('1990-02-24' AS TIMESTAMP) AS pt")
checkAnswer(actual, expected)
sql("DROP TABLE order")
}
}
test("SPARK-17108: Fix BIGINT and INT comparison failure in spark sql") {
withTable("t1", "t2", "t3") {
sql("create table t1(a map<bigint, array<string>>)")
sql("select * from t1 where a[1] is not null")
sql("create table t2(a map<int, array<string>>)")
sql("select * from t2 where a[1] is not null")
sql("create table t3(a map<bigint, array<string>>)")
sql("select * from t3 where a[1L] is not null")
}
}
test("SPARK-17796 Support wildcard character in filename for LOAD DATA LOCAL INPATH") {
withTempDir { dir =>
val path = dir.toURI.toString.stripSuffix("/")
val dirPath = dir.getAbsoluteFile
for (i <- 1 to 3) {
Files.write(s"$i", new File(dirPath, s"part-r-0000$i"), StandardCharsets.UTF_8)
}
for (i <- 5 to 7) {
Files.write(s"$i", new File(dirPath, s"part-s-0000$i"), StandardCharsets.UTF_8)
}
withTable("load_t") {
sql("CREATE TABLE load_t (a STRING)")
sql(s"LOAD DATA LOCAL INPATH '$path/*part-r*' INTO TABLE load_t")
checkAnswer(sql("SELECT * FROM load_t"), Seq(Row("1"), Row("2"), Row("3")))
val m = intercept[AnalysisException] {
sql("LOAD DATA LOCAL INPATH '/non-exist-folder/*part*' INTO TABLE load_t")
}.getMessage
assert(m.contains("LOAD DATA input path does not exist"))
val m2 = intercept[AnalysisException] {
sql(s"LOAD DATA LOCAL INPATH '$path*/*part*' INTO TABLE load_t")
}.getMessage
assert(m2.contains("LOAD DATA input path allows only filename wildcard"))
}
}
}
test("Insert overwrite with partition") {
withTable("tableWithPartition") {
sql(
"""
|CREATE TABLE tableWithPartition (key int, value STRING)
|PARTITIONED BY (part STRING)
""".stripMargin)
sql(
"""
|INSERT OVERWRITE TABLE tableWithPartition PARTITION (part = '1')
|SELECT * FROM default.src
""".stripMargin)
checkAnswer(
sql("SELECT part, key, value FROM tableWithPartition"),
sql("SELECT '1' AS part, key, value FROM default.src")
)
sql(
"""
|INSERT OVERWRITE TABLE tableWithPartition PARTITION (part = '1')
|SELECT * FROM VALUES (1, "one"), (2, "two"), (3, null) AS data(key, value)
""".stripMargin)
checkAnswer(
sql("SELECT part, key, value FROM tableWithPartition"),
sql(
"""
|SELECT '1' AS part, key, value FROM VALUES
|(1, "one"), (2, "two"), (3, null) AS data(key, value)
""".stripMargin)
)
}
}
test("SPARK-19292: filter with partition columns should be case-insensitive on Hive tables") {
withTable("tbl") {
withSQLConf(SQLConf.CASE_SENSITIVE.key -> "false") {
sql("CREATE TABLE tbl(i int, j int) USING hive PARTITIONED BY (j)")
sql("INSERT INTO tbl PARTITION(j=10) SELECT 1")
checkAnswer(spark.table("tbl"), Row(1, 10))
checkAnswer(sql("SELECT i, j FROM tbl WHERE J=10"), Row(1, 10))
checkAnswer(spark.table("tbl").filter($"J" === 10), Row(1, 10))
}
}
}
test("SPARK-17409: Do Not Optimize Query in CTAS (Hive Serde Table) More Than Once") {
withTable("bar") {
withTempView("foo") {
sql("select 0 as id").createOrReplaceTempView("foo")
// If we optimize the query in CTAS more than once, the following saveAsTable will fail
// with the error: `GROUP BY position 0 is not in select list (valid range is [1, 1])`
sql("SELECT * FROM foo group by id").toDF().write.format("hive").saveAsTable("bar")
checkAnswer(spark.table("bar"), Row(0) :: Nil)
val tableMetadata = spark.sessionState.catalog.getTableMetadata(TableIdentifier("bar"))
assert(tableMetadata.provider == Some("hive"), "the expected table is a Hive serde table")
}
}
}
test("Auto alias construction of get_json_object") {
val df = Seq(("1", """{"f1": "value1", "f5": 5.23}""")).toDF("key", "jstring")
val expectedMsg = "Cannot create a table having a column whose name contains commas " +
"in Hive metastore. Table: `default`.`t`; Column: get_json_object(jstring, $.f1)"
withTable("t") {
val e = intercept[AnalysisException] {
df.select($"key", functions.get_json_object($"jstring", "$.f1"))
.write.format("hive").saveAsTable("t")
}.getMessage
assert(e.contains(expectedMsg))
}
withTempView("tempView") {
withTable("t") {
df.createTempView("tempView")
val e = intercept[AnalysisException] {
sql("CREATE TABLE t AS SELECT key, get_json_object(jstring, '$.f1') FROM tempView")
}.getMessage
assert(e.contains(expectedMsg))
}
}
}
test("SPARK-19912 String literals should be escaped for Hive metastore partition pruning") {
withTable("spark_19912") {
Seq(
(1, "p1", "q1"),
(2, "'", "q2"),
(3, "\"", "q3"),
(4, "p1\" and q=\"q1", "q4")
).toDF("a", "p", "q").write.partitionBy("p", "q").saveAsTable("spark_19912")
val table = spark.table("spark_19912")
checkAnswer(table.filter($"p" === "'").select($"a"), Row(2))
checkAnswer(table.filter($"p" === "\"").select($"a"), Row(3))
checkAnswer(table.filter($"p" === "p1\" and q=\"q1").select($"a"), Row(4))
}
}
test("SPARK-21101 UDTF should override initialize(ObjectInspector[] args)") {
withUserDefinedFunction("udtf_stack1" -> true, "udtf_stack2" -> true) {
sql(
s"""
|CREATE TEMPORARY FUNCTION udtf_stack1
|AS 'org.apache.spark.sql.hive.execution.UDTFStack'
|USING JAR '${hiveContext.getHiveFile("SPARK-21101-1.0.jar").toURI}'
""".stripMargin)
val cnt =
sql("SELECT udtf_stack1(2, 'A', 10, date '2015-01-01', 'B', 20, date '2016-01-01')").count()
assert(cnt === 2)
sql(
s"""
|CREATE TEMPORARY FUNCTION udtf_stack2
|AS 'org.apache.spark.sql.hive.execution.UDTFStack2'
|USING JAR '${hiveContext.getHiveFile("SPARK-21101-1.0.jar").toURI}'
""".stripMargin)
val e = intercept[org.apache.spark.sql.AnalysisException] {
sql("SELECT udtf_stack2(2, 'A', 10, date '2015-01-01', 'B', 20, date '2016-01-01')")
}
assert(
e.getMessage.contains("public StructObjectInspector initialize(ObjectInspector[] args)"))
}
}
test("SPARK-21721: Clear FileSystem deleterOnExit cache if path is successfully removed") {
val table = "test21721"
withTable(table) {
val deleteOnExitField = classOf[FileSystem].getDeclaredField("deleteOnExit")
deleteOnExitField.setAccessible(true)
val fs = FileSystem.get(spark.sparkContext.hadoopConfiguration)
val setOfPath = deleteOnExitField.get(fs).asInstanceOf[Set[Path]]
val testData = sparkContext.parallelize(1 to 10).map(i => TestData(i, i.toString)).toDF()
sql(s"CREATE TABLE $table (key INT, value STRING)")
val pathSizeToDeleteOnExit = setOfPath.size()
(0 to 10).foreach(_ => testData.write.mode(SaveMode.Append).insertInto(table))
assert(setOfPath.size() == pathSizeToDeleteOnExit)
}
}
test("SPARK-21912 ORC/Parquet table should not create invalid column names") {
Seq(" ", ",", ";", "{", "}", "(", ")", "\n", "\t", "=").foreach { name =>
Seq("ORC", "PARQUET").foreach { source =>
withTable("t21912") {
val m = intercept[AnalysisException] {
sql(s"CREATE TABLE t21912(`col$name` INT) USING $source")
}.getMessage
assert(m.contains(s"contains invalid character(s)"))
val m2 = intercept[AnalysisException] {
sql(s"CREATE TABLE t21912 USING $source AS SELECT 1 `col$name`")
}.getMessage
assert(m2.contains(s"contains invalid character(s)"))
withSQLConf(HiveUtils.CONVERT_METASTORE_PARQUET.key -> "false") {
val m3 = intercept[AnalysisException] {
sql(s"CREATE TABLE t21912(`col$name` INT) USING hive OPTIONS (fileFormat '$source')")
}.getMessage
assert(m3.contains(s"contains invalid character(s)"))
}
sql(s"CREATE TABLE t21912(`col` INT) USING $source")
val m4 = intercept[AnalysisException] {
sql(s"ALTER TABLE t21912 ADD COLUMNS(`col$name` INT)")
}.getMessage
assert(m4.contains(s"contains invalid character(s)"))
}
}
}
}
Seq("orc", "parquet").foreach { format =>
test(s"SPARK-18355 Read data from a hive table with a new column - $format") {
val client =
spark.sharedState.externalCatalog.unwrapped.asInstanceOf[HiveExternalCatalog].client
Seq("true", "false").foreach { value =>
withSQLConf(
HiveUtils.CONVERT_METASTORE_ORC.key -> value,
HiveUtils.CONVERT_METASTORE_PARQUET.key -> value) {
withTempDatabase { db =>
client.runSqlHive(
s"""
|CREATE TABLE $db.t(
| click_id string,
| search_id string,
| uid bigint)
|PARTITIONED BY (
| ts string,
| hour string)
|STORED AS $format
""".stripMargin)
client.runSqlHive(
s"""
|INSERT INTO TABLE $db.t
|PARTITION (ts = '98765', hour = '01')
|VALUES (12, 2, 12345)
""".stripMargin
)
checkAnswer(
sql(s"SELECT click_id, search_id, uid, ts, hour FROM $db.t"),
Row("12", "2", 12345, "98765", "01"))
client.runSqlHive(s"ALTER TABLE $db.t ADD COLUMNS (dummy string)")
checkAnswer(
sql(s"SELECT click_id, search_id FROM $db.t"),
Row("12", "2"))
checkAnswer(
sql(s"SELECT search_id, click_id FROM $db.t"),
Row("2", "12"))
checkAnswer(
sql(s"SELECT search_id FROM $db.t"),
Row("2"))
checkAnswer(
sql(s"SELECT dummy, click_id FROM $db.t"),
Row(null, "12"))
checkAnswer(
sql(s"SELECT click_id, search_id, uid, dummy, ts, hour FROM $db.t"),
Row("12", "2", 12345, null, "98765", "01"))
}
}
}
}
}
test("SPARK-24085 scalar subquery in partitioning expression") {
Seq("orc", "parquet").foreach { format =>
Seq(true, false).foreach { isConverted =>
withSQLConf(
HiveUtils.CONVERT_METASTORE_ORC.key -> s"$isConverted",
HiveUtils.CONVERT_METASTORE_PARQUET.key -> s"$isConverted",
"hive.exec.dynamic.partition.mode" -> "nonstrict") {
withTable(format) {
withTempPath { tempDir =>
sql(
s"""
|CREATE TABLE ${format} (id_value string)
|PARTITIONED BY (id_type string)
|LOCATION '${tempDir.toURI}'
|STORED AS ${format}
""".stripMargin)
sql(s"insert into $format values ('1','a')")
sql(s"insert into $format values ('2','a')")
sql(s"insert into $format values ('3','b')")
sql(s"insert into $format values ('4','b')")
checkAnswer(
sql(s"SELECT * FROM $format WHERE id_type = (SELECT 'b')"),
Row("3", "b") :: Row("4", "b") :: Nil)
}
}
}
}
}
}
}
|
bravo-zhang/spark
|
sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
|
Scala
|
apache-2.0
| 81,994 |
package components
import javax.inject._
import com.sksamuel.elastic4s.ElasticsearchClientUri
import com.sksamuel.elastic4s.http.HttpClient
import org.biosys.pubmed.search.PubmedSearchApi
import play.api.Configuration
import play.api.inject.ApplicationLifecycle
import scala.concurrent.Future
@Singleton
class PubmedSearchComponent @Inject() (lifecycle: ApplicationLifecycle,configuration: Configuration) extends PubmedSearchApi {
val elasticsearchClientHost = configuration.get[String]("elastic.client.host")
val elasticsearchClientPort = configuration.get[Int]("elastic.client.port")
val elasticSearchClient: HttpClient = HttpClient(ElasticsearchClientUri(elasticsearchClientHost, elasticsearchClientPort))
lifecycle.addStopHook { () =>
Future.successful(elasticSearchClient.close())
}
}
|
sdor/biosys
|
labnotes/app/components/PubmedSearchComponent.scala
|
Scala
|
gpl-2.0
| 809 |
/*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package swave.core.util
import org.scalacheck.Gen
import org.scalatest.prop.GeneratorDrivenPropertyChecks
import org.scalatest.{FreeSpec, Matchers}
import swave.core.impl.util.ResizableRingBuffer
class ResizableRingBufferSpec extends FreeSpec with Matchers with GeneratorDrivenPropertyChecks {
"A ResizableRingBuffer should" - {
val bufferGen = for {
bit ← Gen.choose(0, 8)
} yield new ResizableRingBuffer[String](initialCap = 1, maxCap = 1 << bit)
"take in exactly `maxAvailable` elements" in {
forAll(bufferGen) { buf ⇒
Stream.continually("x").takeWhile(buf.write).toArray.length shouldEqual buf.maxCapacity
}
}
"read back exactly the number of elems previously written" in {
val gen = for {
buf ← bufferGen
count ← Gen.choose(0, buf.maxCapacity)
} yield (buf, count)
forAll(gen) {
case (buf, count) ⇒
val values = List.tabulate(count)(_.toString)
values.foreach(s ⇒ buf.write(s) shouldBe true)
List.fill(count)(buf.read()) shouldEqual values
buf.isEmpty shouldBe true
a[NoSuchElementException] should be thrownBy buf.read()
}
}
"pass a simple stress-test" in {
val gen = for {
buf ← bufferGen
opCount ← Gen.choose(5, 50)
ops ← Gen.listOfN(opCount, Gen.choose(-20, 50))
} yield (buf, ops)
forAll(gen) {
case (buf, ops) ⇒
val queue = collection.mutable.Queue[String]()
val ints = Iterator.from(0)
ops foreach {
case readCount if readCount < 0 ⇒
-readCount times {
buf.isEmpty shouldEqual queue.isEmpty
if (queue.nonEmpty) queue.dequeue() shouldEqual buf.read()
else a[NoSuchElementException] should be thrownBy buf.read()
}
case writeCount if writeCount > 0 ⇒
writeCount times {
val next = ints.next().toString
if (buf.write(next)) queue.enqueue(next)
}
case 0 ⇒ // ignore
}
}
}
}
}
|
sirthias/swave
|
core/src/test/scala/swave/core/util/ResizableRingBufferSpec.scala
|
Scala
|
mpl-2.0
| 2,368 |
package com.scalableQuality.quick.mantle.parsing
import org.scalatest.{FlatSpec, Matchers}
class DelimitedRowIdentifierTest extends FlatSpec with Matchers {
"DelimitedRowIdentifier.apply(columnIdentifiers)" should
"return Left[ErrorMessage] when no columnIdentifiers are supplied" in {
val delimiterEither = LiteralDelimiter(",")
delimiterEither match {
case Right(delimiter) =>
val delimiterRowIdentifier = DelimitedRowIdentifier(Nil, delimiter)
delimiterRowIdentifier shouldBe a[Left[_, _]]
case _ => fail()
}
}
"DelimitedRowIdentifier.canIdentify" should "identify a row when the row is right" in {
val track2dataRow = RawRow(";,1234567890123445,=,99011200XXXX00000000?*", 1)
val firstColumnIdentifierElem =
<ColumnIdentifier matchAgainst=";" label="Start sentinel" position="1" />
val secondColumnIdentifierElem =
<ColumnIdentifier matchAgainst="[0-9]{16}" label="Card Number" position="2"/>
val thirdColumnIdentifierElem =
<ColumnIdentifier matchAgainst="=" label="Field separator" position="3"/>
val firstColumnIdentifierEither =
DelimitedColumnIdentifier(firstColumnIdentifierElem.attributes)
val secondColumnIdentifierEither =
DelimitedColumnIdentifier(secondColumnIdentifierElem.attributes)
val thirdColumnIdentifierEither =
DelimitedColumnIdentifier(thirdColumnIdentifierElem.attributes)
val delimiterEither = LiteralDelimiter(",")
(firstColumnIdentifierEither,
secondColumnIdentifierEither,
thirdColumnIdentifierEither,
delimiterEither) match {
case (Right((_, firstColumnIdentifier)),
Right((_, secondColumnIdentifier)),
Right((_, thirdColumnIdentifier)),
Right(delimiter)) =>
val identifiersList = List(firstColumnIdentifier,
secondColumnIdentifier,
thirdColumnIdentifier)
val rowIdentifierEither =
DelimitedRowIdentifier(identifiersList, delimiter)
rowIdentifierEither match {
case Right(rowIdentifier) =>
rowIdentifier.canIdentify(track2dataRow) shouldBe true
case _ => fail()
}
case _ => fail
}
}
it should "not identify a row when the row is not right" in {
val track2dataRow = RawRow("not track 2", 1)
val firstColumnIdentifierElem =
<ColumnIdentifier matchAgainst=";" label="Start sentinel" position="1" />
val secondColumnIdentifierElem =
<ColumnIdentifier matchAgainst="[0-9]{16}" label="Card Number" position="2"/>
val thirdColumnIdentifierElem =
<ColumnIdentifier matchAgainst="=" label="Field separator" position="3"/>
val firstColumnIdentifierEither =
DelimitedColumnIdentifier(firstColumnIdentifierElem.attributes)
val secondColumnIdentifierEither =
DelimitedColumnIdentifier(secondColumnIdentifierElem.attributes)
val thirdColumnIdentifierEither =
DelimitedColumnIdentifier(thirdColumnIdentifierElem.attributes)
val delimiterEither = LiteralDelimiter(",")
(firstColumnIdentifierEither,
secondColumnIdentifierEither,
thirdColumnIdentifierEither,
delimiterEither) match {
case (Right((_, firstColumnIdentifier)),
Right((_, secondColumnIdentifier)),
Right((_, thirdColumnIdentifier)),
Right(delimiter)) =>
val identifiersList = List(firstColumnIdentifier,
secondColumnIdentifier,
thirdColumnIdentifier)
val rowIdentifierEither =
DelimitedRowIdentifier(identifiersList, delimiter)
rowIdentifierEither match {
case Right(rowIdentifier) =>
rowIdentifier.canIdentify(track2dataRow) shouldBe false
case _ => fail()
}
case _ => fail
}
}
it should "not identify an empty row" in {
val track2dataRow = RawRow("", 1)
val firstColumnIdentifierElem =
<ColumnIdentifier matchAgainst=";" label="Start sentinel" position="1" />
val secondColumnIdentifierElem =
<ColumnIdentifier matchAgainst="[0-9]{16}" label="Card Number" position="2"/>
val thirdColumnIdentifierElem =
<ColumnIdentifier matchAgainst="=" label="Field separator" position="3"/>
val firstColumnIdentifierEither =
DelimitedColumnIdentifier(firstColumnIdentifierElem.attributes)
val secondColumnIdentifierEither =
DelimitedColumnIdentifier(secondColumnIdentifierElem.attributes)
val thirdColumnIdentifierEither =
DelimitedColumnIdentifier(thirdColumnIdentifierElem.attributes)
val delimiterEither = LiteralDelimiter(",")
(firstColumnIdentifierEither,
secondColumnIdentifierEither,
thirdColumnIdentifierEither,
delimiterEither) match {
case (Right((_, firstColumnIdentifier)),
Right((_, secondColumnIdentifier)),
Right((_, thirdColumnIdentifier)),
Right(delimiter)) =>
val identifiersList = List(firstColumnIdentifier,
secondColumnIdentifier,
thirdColumnIdentifier)
val rowIdentifierEither =
DelimitedRowIdentifier(identifiersList, delimiter)
rowIdentifierEither match {
case Right(rowIdentifier) =>
rowIdentifier.canIdentify(track2dataRow) shouldBe false
case _ => fail()
}
case _ => fail
}
}
}
|
MouslihAbdelhakim/Quick
|
src/test/scala/com/scalableQuality/quick/mantle/parsing/DelimitedRowIdentifierTest.scala
|
Scala
|
apache-2.0
| 5,490 |
package net.flaviusb.atomish
import scala.collection.mutable.{Map => MMap, MutableList => MList}
class PreUniverse { self =>
var gensyms: MMap[Int, AtomishThing] = MMap[Int, AtomishThing]()
var currgs: Int = 1
var scopes: MList[MMap[String, AtomishThing]] = MList() // New scopes go on the front of the list
class AtomishFn(code: AtomishThing, args: AtomishArray, activatable: Boolean = true, docstring: Option[String] = None) extends
AtomishFnPre(code, args, activatable, docstring) {
override def activate(received_args: AtomishArgs): AtomishThing = {
val expected_args = cells("args") match {
case (x: AtomishArray) => x
case _ => AtomishArray(Array()) // Should possible signal a condition here - malformed arglist condition?
}
val the_code = cells("code")
if(expected_args.value.length == 0) {
if(received_args.args.length == 0) {
self.roots("eval").asInstanceOf[AlienProxy].activate(AtomishArgs(List(Left(the_code))))
} else {
println("Received too many arguments: received " + received_args.args.length.toString() + " but expected 0.")
AtomishUnset
}
} else {
// Trim leading newlines in each argument
var trimmed_args = expected_args.value.map(_ match {
case AtomishForm(arg_chain) => {
AtomishForm(arg_chain.dropWhile(_ == AtomishNL))
}
case x => x
})
// Separate arguments into kinds
var (slurpy: Option[(String, Option[AtomishCode])], kwslurpy: Option[(String, Option[AtomishCode])]) = (None, None)
val (finargs_a: Array[(String, (String, Option[AtomishCode]))], kwargs_a: Array[(String, (String, Option[AtomishCode]))]) = trimmed_args.flatMap(_ match {
case AtomishMessage(name) => {
if(name.startsWith("+:")) {
kwslurpy = Some((name.substring(2, name.length), None))
Array[(String, (String, Option[AtomishCode]))]()
} else if(name.startsWith("+")) {
slurpy = Some((name.substring(1, name.length), None))
Array[(String, (String, Option[AtomishCode]))]()
} else {
Array[(String, (String, Option[AtomishCode]))](((if(name.endsWith(":")) { "kw" } else {"positional" }),
((if(name.endsWith(":")) { name.substring(0, name.length - 1) } else { name }), None)))
}
}
case AtomishForm(List(AtomishMessage(name), rest @ _*)) => {
if(name.startsWith("+:")) {
kwslurpy = Some((name.substring(2, name.length), (if(rest.length == 0) { None } else { Some(AtomishForm(rest.toList)) } )))
Array[(String, (String, Option[AtomishCode]))]()
} else if(name.startsWith("+")) {
slurpy = Some((name.substring(1, name.length), (if(rest.length == 0) { None } else { Some(AtomishForm(rest.toList)) } )))
Array[(String, (String, Option[AtomishCode]))]()
} else {
Array[(String, (String, Option[AtomishCode]))](((if(name.endsWith(":")) { "kw" } else {"positional" }),
((if(name.endsWith(":")) { name.substring(0, name.length - 1) } else { name }),
(if(rest.length == 0) { None } else { Some(AtomishForm(rest.toList)) } ))))
}
}
case _ => Array[(String, (String, Option[AtomishCode]))]()
}).partition(x => x._1 != "kw")
val (finargs: Array[(String, Option[AtomishCode])], kwargs: Array[(String, Option[AtomishCode])]) = (finargs_a.map(x => x._2),
kwargs_a.map(x => x._2))
val needed_positional_args = finargs.count(x => x._2 == None)
val needed_keyword_args = kwargs.count(x => x._2 == None)
//println(finargs.toList)
//println(x.args)
val (fpositional_a: Array[Either[AtomishThing, (String, AtomishThing)]], fkeyword_a: Array[Either[AtomishThing, (String,
AtomishThing)]]) = received_args.args.toArray.partition(_ match {
case _: Left[AtomishThing, (String, AtomishThing)] => true
case _: Right[AtomishThing, (String, AtomishThing)] => false
})
val (fpositional: Array[AtomishThing], fkeyword: Array[(String, AtomishThing)]) = (fpositional_a.map(_.left.get),
fkeyword_a.map(_.right.get))
if((fpositional.length < needed_positional_args) || (fkeyword.length < needed_keyword_args)) {
println("Too few args.")
println("Got "+fpositional.length.toString()+" positional, needed "+needed_positional_args.toString())
println("Got "+fkeyword.length.toString()+" keyword, needed "+needed_keyword_args.toString())
println(finargs.toList.toString())
println(kwargs.toList.toString())
null // Should raise a condition - too few arguments
} else {
val slurped_positional_args: Array[AtomishThing] = (if(slurpy != None) { fpositional.drop(finargs.length) } else { Array() })
val slurped_keyword_args: Array[(AtomishThing, AtomishThing)] = (if(kwslurpy != None) { fkeyword.drop(kwargs.length) } else {
Array() }).map(a => (AtomishString(a._1), a._2))
val letified_args: Array[(String, AtomishThing)] = (fpositional.dropRight(slurped_positional_args.length).zip(finargs).map(a
=> (a._2._1, a._1)) ++ finargs.drop(fpositional.length).map(a => (a._1, a._2.get)) ++ fkeyword.dropRight(slurped_keyword_args.length).zip(kwargs).map(a
=> (a._2._1, a._1._2)) ++ kwargs.drop(fkeyword.length).map(a => (a._1, a._2.get)) ++
slurpy.map(a => (if(slurped_positional_args.length != 0) { (a._1, AtomishArray(slurped_positional_args)) } else if (a._2
!= None) { (a._1,
a._2.get) } else { (a._1, AtomishArray(Array())) })) ++
kwslurpy.map(a => (if(slurped_keyword_args.length != 0) { (a._1, AtomishMap(MMap() ++ slurped_keyword_args)) } else if (a._2 !=
None) { (a._1,
a._2.get) } else { (a._1, AtomishMap(MMap[AtomishThing, AtomishThing]())) }))
).map(a => (a._1,
self.roots("eval").asInstanceOf[AlienProxy].activate(AtomishArgs(List(Left(a._2 /*.asInstanceOf[AtomishThing]*/))))))
scopes = (MMap() ++ letified_args) +: scopes;
//println(code.toString())
//println("Array(" + letified_args.map(ar => "(\\"" + ar._1 + "\\", " + ar._2.toString() + ")").mkString(", ") + ")")
//println(scopes)
var result = self.roots("eval").asInstanceOf[AlienProxy].activate(AtomishArgs(List(Left(code))));
var sco = scopes.tail;
scopes = sco;
//println(result.toString())
//AtomishUnset
result
}
}
}
}
def fn(activatable: Boolean) = {
QAlienProxy(ctd => {
if(ctd.args.length == 0) {
new AtomishFn(AtomishUnset, AtomishArray(Array()), activatable)
} else if(ctd.args.length == 1) {
//AlienProxy(_ => self.roots("eval").asInstanceOf[AlienProxy].activate(AtomishArgs(List(Left(ctd.args(0))))))
new AtomishFn(ctd.args(0), AtomishArray(Array()), activatable)
} else {
// We have at least one arg and a body; that arg may be a docstring though
var (docstring: Option[String], args: Array[AtomishCode], code: AtomishCode) = (ctd.args(0) match {
case AtomishInterpolatedString(chunks) => {
var docstring: String = chunks.map(_ match {
case x: AtomishString => x
case x: AtomishCode => (self.roots("eval").asInstanceOf[AlienProxy].activate(AtomishArgs(List(Left(x)))) match {
case y: AtomishString => y
case AtomishInt(y) => y.toString()
case AtomishDecimal(y) => y.toString()
case z => z.toString()
})}).mkString;
var args = ctd.args.drop(1).dropRight(1)
//println(args.toList)
var code = ctd.args.last
(Some(docstring), args, code)
}
case AtomishString(docstring) => {
var args = ctd.args.drop(1).dropRight(1)
//println(args.toList)
var code = ctd.args.last
(Some(docstring), args, code)
}
case _ => {
var args = ctd.args.dropRight(1)
//println(args.toList)
var code = ctd.args.last
(None, args, code)
}
})
// Trim leading newlines in each argument
var trimmed_args: Array[AtomishThing] = args.map(_ match {
case AtomishForm(arg_chain) => {
AtomishForm(arg_chain.dropWhile(_ == AtomishNL))
}
case x => x
})
new AtomishFn(code, AtomishArray(trimmed_args), activatable, docstring)
}
})
}
var roots: MMap[String, AtomishThing] = MMap[String, AtomishThing](
"version" -> AtomishDecimal(0.1),
"say" -> AlienProxy(_.args match {
case List(Left(AtomishString(x))) => {
println(x)
AtomishUnset
}
case x => {
println(x.toString())
AtomishUnset
}
}),
"setCell" -> AlienProxy(_.args match {
case List(Left(AtomishString(name)), Left(value: AtomishThing)) => {
self(AtomishPlace(AtomishMessage(name))) = Option(value)
value
}
}),
"cell" -> AlienProxy(_.args match {
case List(Left(AtomishString(name))) => {
self(AtomishPlace(AtomishMessage(name))).get
}
}),
"hasCell" -> AlienProxy(_.args match {
case List(Left(AtomishString(name))) => {
val thing = self(AtomishPlace(AtomishMessage(name)))
AtomishBoolean((thing != None) && (thing != Some(AtomishUnset)))
}
}),
"let" -> QAlienProxy(ctd => {
var args = (ctd.args grouped(2) filter(_.length == 2) map((x: Array[AtomishCode]) => ((x(0) match {
case AtomishForm(things) => things.filter(_ != AtomishNL)(0).asInstanceOf[AtomishMessage].name
case AtomishMessage(name) => name
}), self.roots("eval").asInstanceOf[AlienProxy].activate(AtomishArgs(List(Left(x(1)))))))).toMap;
var arrcode: Array[AtomishCode] = ((ctd.args grouped(2) filter(_.length == 1) flatMap((x: Array[AtomishCode]) => x)).toArray);
var code = arrcode(0)
scopes = (MMap() ++ args) +: scopes;
//println(code.toString())
var result = self.roots("eval").asInstanceOf[AlienProxy].activate(AtomishArgs(List(Left(code))));
var sco = scopes.tail;
scopes = sco;
//println(result.toString())
//AtomishUnset
result
}),
"true" -> AtomishBoolean(true),
"false" -> AtomishBoolean(false),
"if" -> QAlienProxy(ctd => {
if(ctd.args.length == 0) {
AtomishUnset
} else {
var real_arg_zero = ctd.args(0)
var it_name: Option[String] = (ctd.args(0) match {
case AtomishForm(List(AtomishMessage(maybe_name), x @_*)) => {
if(maybe_name.endsWith(":")) {
real_arg_zero = AtomishForm(x.toList)
Some(maybe_name.substring(0, maybe_name.length - 1))
} else {
None
}
}
case _ => None
})
var test = self.roots("eval").asInstanceOf[AlienProxy].activate(AtomishArgs(List(Left(real_arg_zero))))
if (
(test == AtomishBoolean(true)) ||
(test.cells.isDefinedAt("isTruthy") &&
(self.roots("eval").asInstanceOf[AlienProxy].activate(AtomishArgs(List(Left(test.cells("isTruthy"))))) == AtomishBoolean(true))) ||
(test.cells.isDefinedAt("asBool") &&
(self.roots("eval").asInstanceOf[AlienProxy].activate(AtomishArgs(List(Left(test.cells("asBool"))))) == AtomishBoolean(true)))
) {
if(ctd.args.length >= 2) {
for(name <- it_name) {
scopes = MMap(name -> test) +: scopes;
}
var result = self.roots("eval").asInstanceOf[AlienProxy].activate(AtomishArgs(List(Left(ctd.args(1)))));
if (it_name != None) {
var sco = scopes.tail;
scopes = sco;
}
result
} else {
AtomishUnset
}
} else if((ctd.args.length >= 3) && (
(test == AtomishBoolean(false)) ||
(test.cells.isDefinedAt("isFalsy") &&
(self.roots("eval").asInstanceOf[AlienProxy].activate(AtomishArgs(List(Left(test.cells("isFalsy"))))) == AtomishBoolean(true))) ||
(test.cells.isDefinedAt("asBool") &&
(self.roots("eval").asInstanceOf[AlienProxy].activate(AtomishArgs(List(Left(test.cells("asBool"))))) == AtomishBoolean(false)))
)) {
for(name <- it_name) {
scopes = MMap(name -> test) +: scopes;
}
var result = self.roots("eval").asInstanceOf[AlienProxy].activate(AtomishArgs(List(Left(ctd.args(2)))));
if (it_name != None) {
var sco = scopes.tail;
scopes = sco;
}
result
} else {
AtomishUnset
}
}
}),
"fn" -> fn(true),
"fnx" -> fn(false),
"'" -> QAlienProxy(ctd => {
var quoted = AtomishCall("'", ctd.args)
quoted.cells("asArray") = AlienProxy(_ => AtomishArray(ctd.args.asInstanceOf[Array[net.flaviusb.atomish.AtomishThing]]))
quoted
}),
"unquote" -> AlienProxy(_.args match {
case List(Left(AtomishCall("'", x))) => {
if(x.length != 1) {
AtomishCommated(x)
} else {
x(0)
}
}
}),
"''" -> QAlienProxy(ctd => {
def unqq(code: AtomishCode): AtomishCode = code match {
case AtomishCall("'", x) => AtomishCall("'", x)
case AtomishCall("`", x) => { // Unquote
var qq_bits = x.flatMap(arg =>
self.roots("eval").asInstanceOf[AlienProxy].activate(AtomishArgs(List(Left(arg)))) match {
case a: AtomishCode => Array[AtomishCode](a)
case _ => Array[AtomishCode]()
})
if(qq_bits.length != 1) {
AtomishCommated(qq_bits)
} else {
qq_bits(0)
}
}
case AtomishCall("`*", x) => { // Unquote-splicing
var qq_bits = x.flatMap(arg =>
self.roots("eval").asInstanceOf[AlienProxy].activate(AtomishArgs(List(Left(arg)))) match {
case AtomishArray(a) => a.flatMap(_ match {
case actual_code: AtomishCode => Array[AtomishCode](actual_code)
case _ => Array[AtomishCode]()
}) //TODO: this means that passing in eg functions will cause explosions
case AtomishMap(a) => a.toArray.map(a => AtomishForm(List(AtomishMessage(a._1.asInstanceOf[AtomishString].value + ":"),
a._2.asInstanceOf[AtomishCode]))).asInstanceOf[Array[AtomishCode]] //TODO: this means that non-code types can't be spliced into kwargs, which is bad.
case a: AtomishCode => Array[AtomishCode](a)
case _ => Array[AtomishCode]()
})
if(qq_bits.length != 1) {
AtomishCommated(qq_bits)
} else {
qq_bits(0)
}
}
case AtomishCall(call, args) => AtomishCall(call, args.flatMap(x => {
var retpre = unqq(x)
retpre match {
case AtomishCommated(preargs) => preargs
case _ => Array(retpre)
}
}))
case AtomishForm(forms) => {
// If forms is all '(.) except for the last elem, which is '(`) or '(`*), treat it as bare '(`) or '(`*) respectively
if((forms.length >= 1) && (forms.dropRight(1).forall(x => (x == AtomishNL)))) {
forms.last match {
case AtomishCall("`", x) => unqq(forms.last)
case AtomishCall("`*", x) => unqq(forms.last)
case _ => AtomishForm(forms.map(form => unqq(form)))
}
} else {
AtomishForm(forms.map(form => unqq(form)))
}
}
/*y.flatMap(x => {
//println("Forms: "+PreScalaPrinter.print(AtomishForm(y)))
var retpre = unqq(x)
retpre match {
case AtomishCommated(preargs) => preargs
case _ => Array(retpre)
}
})*/
case AtomishCommated(commated) => AtomishCommated(commated.map(bit => unqq(bit)))
case x: AtomishCode => x
}
AtomishCall("'",
ctd.args.map(x => unqq(x)))
}),
"macro" -> QAlienProxy(ctd => {
var mac: QAlienProxy = QAlienProxy(null)
mac.call = (macargs => {
var arg_scope = MMap[String, AtomishThing](
"arguments" -> AtomishArray(macargs.args.map(_ match { // We have to unwrap unneeded AtomishForms
case AtomishForm(List(x)) => x
case x => x
}).asInstanceOf[Array[AtomishThing]])
)
scopes = arg_scope +: scopes;
var result: AtomishThing = AtomishUnset
if(mac.cells.isDefinedAt("code")) {
result = self.roots("eval").asInstanceOf[AlienProxy].activate(AtomishArgs(List(Left(mac.cells("code")))))
}
var sco = scopes.tail;
scopes = sco;
result
})
mac.cells("code") = if(ctd.args.length > 0) { ctd.args(0) } else { AtomishNL }
mac
}),
"Array" -> AlienProxy(arg_blob => AtomishArray(arg_blob.args.flatMap(_ match {
case Left(x) => Array[AtomishThing](x)
case _ => Array[AtomishThing]()
}).toArray)),
"Map" -> AlienProxy(arg_blob => AtomishMap(arg_blob.args.map(_ match {
case Right((x, y)) => MMap[AtomishThing, AtomishThing](AtomishString(x) -> y)
case Left(x: AtomishOrigin) => {
if(x.cells.isDefinedAt("pair?") && x.cells("pair?") == AtomishBoolean(true)) {
MMap[AtomishThing, AtomishThing](x.cells("key") -> x.cells("value"))
} else {
MMap[AtomishThing, AtomishThing]()
}
}
case _ => MMap[AtomishThing, AtomishThing]()
}).foldLeft(MMap[AtomishThing, AtomishThing]())(_ ++ _))),
"Origin" -> AtomishOrigin(),
"Mirror" -> AtomishMap(MMap[AtomishThing, AtomishThing](
AtomishString("pre_scala_mirror") -> PreScalaMirror.mirror
)),
"nil" -> AtomishUnset,
"primfn" -> AlienProxy(_.args match {
case List(Left(AtomishString(str))) => {
new AtomishMacro(this,
roots("read").asInstanceOf[AlienProxy].activate(AtomishArgs(List(Left(AtomishString(str))))).asInstanceOf[AtomishCode])
}
case _ => null //boom
}),
"⇒" -> AlienProxy(_.args match {
case List(Left(key), Left(value)) => {
AtomishOrigin(MMap[String, AtomishThing](
"key" -> key,
"value" -> value,
"pair?" -> AtomishBoolean(true)
))
}
}),
":" -> QAlienProxy(_.args match {
case Array(AtomishMessage(name)) => {
AtomishSymbol(name)
}
}),
"=" -> QAlienProxy(ctd => ctd.args match {
case Array(AtomishMessage(cell_name), x) => {
var ret = roots("eval").asInstanceOf[AlienProxy].activate(AtomishArgs(List(Left(x))))
var found = false
scopes.foreach(scope => {
if(!found) {
if(scope.isDefinedAt(cell_name) && (scope(cell_name) != AtomishUnset)) {
found = true
scope(cell_name) = ret
}
}
})
if(!found) {
roots(cell_name) = ret
}
ret
}
//case _ => {
// println(PreScalaPrinter.print_with_forms(ctd))
// null
//}
})/*,
"gensym" -> AlienProxy(_.args match {
case _ => {
AtomishGenSym(currgs++)
}
})*/
)
// Aliases
roots("[]") = roots("Array")
roots("{}") = roots("Map")
def recapply(base: AtomishThing, path: Seq[AtomishMessage]): Option[AtomishThing] = path match {
case Seq(AtomishMessage(first), rest @ _*) => {
base match {
case thing: AtomishThing => {
thing.cells.get(first) match {
case Some(AtomishUnset) => None
case Some(cell) => {
if(rest.isEmpty) {
Some(cell)
} else {
recapply(cell, rest)
}
}
case None => {
AtomishThing.bootstrap_cells.get(first) match {
case Some(cell) => {
if(rest.isEmpty) {
Some(cell(base))
} else {
recapply(cell(base), rest)
}
}
case None => None
}
}
}
}
case _ => None
}
}
}
def apply(key: AtomishPlace): Option[AtomishThing] = {
// Try from each scope backwards, or from root if all scopes fail
for (base <- scopes) {
var foo = apply_by_parts(key, base)
if (foo != None) {
return foo;
}
}
return apply_by_parts(key, roots)
}
def apply_by_parts(key: AtomishPlace, base: MMap[String, AtomishThing]): Option[AtomishThing] = {
key.form match {
case AtomishMessage(name) => {
return base.get(name)
}
case AtomishCall(name, _) => {
return base.get(name)
}
//case AtomishForm(head :: rest) => {
// var root = roots.get(head)
//}
case MessageChain(Array(AtomishMessage(first), messages @ _*)) => {
var root = base.get(first)
root match {
case Some(actual) => {
if (messages.isEmpty) {
return Some(actual)
}
return recapply(actual, messages)
}
case None => return None
}
}
case _ => None
}
}
def update(key: AtomishPlace, value: Option[AtomishThing]) {
// Try from each scope backwards, or from root if all scopes fail
for (base <- scopes) {
var foo = apply_by_parts(key, base)
if (foo != None) {
update_internal(key, value, base)
return;
}
}
update_internal(key, value, roots)
}
def update_internal(key: AtomishPlace, value: Option[AtomishThing], base: MMap[String, AtomishThing]) {
// For the moment, just deal with 'stringlike' keys
var true_val = value match {
case Some(x) => x
case _ => AtomishUnset
}
key.form match {
case AtomishMessage(name) => {
base(name) = true_val
}
case AtomishCall(name, _) => {
base(name) = true_val
}
}
}
}
|
flaviusb/Atomish
|
bootstrap/PreUniverse.scala
|
Scala
|
gpl-3.0
| 23,238 |
package org.bowlerframework.model
import collection.mutable.HashMap
/**
* Initializes itself and its validators with a bean.
*/
trait ModelValidatorBuilder[T] {
/**
* Should initialize all Validators with the use of the bean.
*/
def initialize(bean: T): ModelValidator
}
/**
* Holds default validations for beans that have registered ModelValidatorBuilders with the registry.
*/
object ModelValidatorBuilder {
private val registry = new HashMap[Class[_], ModelValidatorBuilder[_]]
def registerValidatorBuilder(cls: Class[_], validator: ModelValidatorBuilder[_]) = registry.put(cls, validator)
def apply(cls: Class[_]): Option[ModelValidatorBuilder[_]] = {
try {
return Some(registry(cls))
} catch {
case e: NoSuchElementException => return None
}
}
}
|
rkpandey/Bowler
|
core/src/main/scala/org/bowlerframework/model/ModelValidatorBuilder.scala
|
Scala
|
bsd-3-clause
| 802 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.examples
import org.apache.hadoop.conf.Configuration
import org.apache.carbondata.examples.util.ExampleUtils
import org.apache.carbondata.hadoop.{CarbonInputFormat, CarbonProjection}
// scalastyle:off println
object HadoopFileExample {
def main(args: Array[String]): Unit = {
val cc = ExampleUtils.createCarbonContext("HadoopFileExample")
ExampleUtils.writeSampleCarbonFile(cc, "carbon1")
// read two columns
val projection = new CarbonProjection
projection.addColumn("c1") // column c1
projection.addColumn("c3") // column c3
val conf = new Configuration()
CarbonInputFormat.setColumnProjection(conf, projection)
val sc = cc.sparkContext
val input = sc.newAPIHadoopFile(s"${cc.storePath}/default/carbon1",
classOf[CarbonInputFormat[Array[Object]]],
classOf[Void],
classOf[Array[Object]],
conf)
val result = input.map(x => x._2.toList).collect
result.foreach(x => println(x.mkString(", ")))
// delete carbondata file
ExampleUtils.cleanSampleCarbonFile(cc, "carbon1")
}
}
// scalastyle:on println
|
ksimar/incubator-carbondata
|
examples/spark/src/main/scala/org/apache/carbondata/examples/HadoopFileExample.scala
|
Scala
|
apache-2.0
| 1,922 |
package models
import io.circe.{Decoder, Encoder}
import scala.collection.immutable
/**
* Created by alex on 16/02/16.
*/
case class EntryTemplate[D](user: Option[String], seasons: Set[SeasonTemplate[D]], links: Links[EntryRel] = Links[EntryRel]())
sealed trait EntryRel extends Rel
object EntryRel extends RelEnum[EntryRel] {
val values: immutable.IndexedSeq[EntryRel] = findValues
object LOGIN extends Rel_("login") with EntryRel
object LOGOUT extends Rel_("logout") with EntryRel
}
object EntryTemplate {
implicit def entryEncoder[D](implicit ev: Encoder[D]): Encoder[EntryTemplate[D]] =
Encoder.forProduct3("user", "seasons", "links")(entry => (entry.user, entry.seasons, entry.links))
implicit def entryDecoder[D](implicit ev: Decoder[D]): Decoder[EntryTemplate[D]] =
Decoder.forProduct3("user", "seasons", "links")(EntryTemplate.apply[D])
}
|
unclealex72/west-ham-calendar
|
shared/src/main/scala/models/EntryTemplate.scala
|
Scala
|
apache-2.0
| 875 |
package skinny.mailer.implicits
import scala.language.implicitConversions
import javax.mail._
import javax.mail.internet._
import skinny.mailer._
/**
* Implicit conversions for SkinnyMailer.
*/
trait SkinnyMailerImplicits {
implicit def convertMimeMessageToRichMimeMessage[T >: SkinnyMessage <: Message](m: T): RichMimeMessage = m match {
case m: MimeMessage => new RichMimeMessage {
override def underlying = m
}
}
implicit def convertMimeBodyPartToRichMimeBodyPart[T >: MimeBodyPart <: BodyPart](b: T): RichMimeBodyPart = b match {
case b: MimeBodyPart => new RichMimeBodyPart(b)
}
}
|
Kuchitama/skinny-framework
|
mailer/src/main/scala/skinny/mailer/implicits/SkinnyMailerImplicits.scala
|
Scala
|
mit
| 620 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.fs.mount
import slamdata.Predef.List
import quasar.Variables
import quasar.contrib.pathy.{ADir, AFile, APath}
import quasar.fs.FileSystemType
import quasar.sql.{ScopedExpr, Sql, Statement}
import matryoshka.data.Fix
import monocle.Prism
sealed abstract class MountRequest {
import MountRequest._
def path: APath =
this match {
case MountView(f, _, _) => f
case MountFileSystem(d, _, _) => d
case MountModule(d, _) => d
}
def toConfig: MountConfig =
this match {
case MountView(_, q, vs) => MountConfig.viewConfig(q, vs)
case MountFileSystem(_, t, u) => MountConfig.fileSystemConfig(t, u)
case MountModule(_, s) => MountConfig.moduleConfig(s)
}
}
object MountRequest {
final case class MountView private[mount] (
file: AFile,
scopedExpr: ScopedExpr[Fix[Sql]],
vars: Variables
) extends MountRequest
final case class MountFileSystem private[mount] (
dir: ADir,
typ: FileSystemType,
uri: ConnectionUri
) extends MountRequest
final case class MountModule private[mount] (
dir: ADir,
statements: List[Statement[Fix[Sql]]]
) extends MountRequest
val mountView = Prism.partial[MountRequest, (AFile, ScopedExpr[Fix[Sql]], Variables)] {
case MountView(f, q, vs) => (f, q, vs)
} ((MountView(_, _, _)).tupled)
val mountFileSystem = Prism.partial[MountRequest, (ADir, FileSystemType, ConnectionUri)] {
case MountFileSystem(d, t, u) => (d, t, u)
} ((MountFileSystem(_, _, _)).tupled)
val mountModule = Prism.partial[MountRequest, (ADir, List[Statement[Fix[Sql]]])] {
case MountModule(d, s) => (d, s)
} ((MountModule(_, _)).tupled)
}
|
jedesah/Quasar
|
core/src/main/scala/quasar/fs/mount/MountRequest.scala
|
Scala
|
apache-2.0
| 2,302 |
import sbt._
object Common {
import Keys._
val servletApiDep = "javax.servlet" % "javax.servlet-api" % "3.0.1" % "provided"
val jettyVersion = "8.1.13.v20130916"
def specs2Dep(sv: String) =
sv.split("[.-]").toList match {
case "2" :: "9" :: _ => "org.specs2" %% "specs2" % "1.12.4.1"
case _ => "org.specs2" %% "specs2" % "2.3.11"
}
val dispatchVersion = "0.8.10"
def dispatchDeps =
"net.databinder" %% "dispatch-mime" % dispatchVersion ::
"net.databinder" %% "dispatch-http" % dispatchVersion :: Nil
def dispatchOAuthDep =
"net.databinder" %% "dispatch-oauth" % dispatchVersion
def integrationTestDeps(sv: String) = (specs2Dep(sv) :: dispatchDeps) map { _ % "test" }
val settings: Seq[Setting[_]] = Seq(
organization := "net.databinder",
version := "0.8.1",
crossScalaVersions := Seq("2.11.2", "2.10.4"),
scalaVersion := crossScalaVersions.value.head,
scalacOptions ++=
Seq("-Xcheckinit", "-encoding", "utf8", "-deprecation", "-unchecked", "-feature"),
javacOptions in Compile ++= Seq("-source", "1.6", "-target", "1.6"),
incOptions := incOptions.value.withNameHashing(true),
parallelExecution in Test := false, // :( test servers collide on same port
homepage := Some(new java.net.URL("http://unfiltered.databinder.net/")),
publishMavenStyle := true,
publishTo := Some("releases" at
"https://oss.sonatype.org/service/local/staging/deploy/maven2"),
publishArtifact in Test := false,
licenses := Seq("MIT" -> url("http://www.opensource.org/licenses/MIT")),
pomExtra := (
<scm>
<url>[email protected]:unfiltered/unfiltered.git</url>
<connection>scm:git:[email protected]:unfiltered/unfiltered.git</connection>
</scm>
<developers>
<developer>
<id>n8han</id>
<name>Nathan Hamblen</name>
<url>http://twitter.com/n8han</url>
</developer>
<developer>
<id>softprops</id>
<name>Doug Tangren</name>
<url>http://twitter.com/softprops</url>
</developer>
</developers>
),
// this should resolve artifacts recently published to sonatype oss not yet mirrored to maven central
resolvers += "sonatype releases" at "https://oss.sonatype.org/content/repositories/releases"
)
}
|
benhutchison/unfiltered
|
project/common.scala
|
Scala
|
mit
| 2,346 |
package collins.provisioning
/**
* Part of the provisioning profile, describes the role requirements as well as default roles for an
* asset being provisioned
*/
case class ProvisionerRoleData(
primary_role: Option[String],
pool: Option[String],
secondary_role: Option[String],
contact: Option[String],
contact_notes: Option[String],
allowed_classes: Option[Set[String]],
attributes: Map[String, String],
clear_attributes: Set[String],
requires_primary_role: Boolean,
requires_pool: Boolean,
requires_secondary_role: Boolean) {
def this() = this(None, None, None, None, None, None, Map(), Set(), false, false, false)
}
|
discordianfish/collins
|
app/collins/provisioning/ProvisionerRoleData.scala
|
Scala
|
apache-2.0
| 671 |
package me.gregd.cineworld.domain.repository
import java.time.LocalDate
import cats.effect.Async
import com.typesafe.scalalogging.LazyLogging
import io.circe.generic.auto._
import io.circe.parser._
import io.circe.syntax._
import me.gregd.cineworld.config.ListingsTableName
import me.gregd.cineworld.domain.model.{Movie, Performance}
import me.gregd.cineworld.domain.repository.SlickListingsRepository._
import cats.syntax.functor._
import slick.jdbc.PostgresProfile
import slick.jdbc.PostgresProfile.api._
import scala.concurrent.{ExecutionContext, Future}
class SlickListingsRepository[F[_]: Async](db: PostgresProfile.backend.DatabaseDef, tableName: ListingsTableName) extends ListingsRepository[F] with LazyLogging {
private val table = tableName.value
override def fetch(cinemaId: String, date: LocalDate): F[Seq[(Movie, Seq[Performance])]] = {
db.run(select(table, cinemaId, date)).toAsync.map(deserialize)
}
override def persist(cinemaId: String, date: LocalDate)(listings: Seq[(Movie, Seq[Performance])]): F[Unit] = {
val json = listings.asJson.noSpaces
db.run(insertOrUpdate(table, cinemaId, date, json)).toAsync.map(_ => ())
}
implicit class FutureToAsync[T](f: => Future[T]) {
def toAsync: F[T] = {
Async[F].async(cb => f.onComplete(t => cb(t.toEither))(ExecutionContext.global))
}
}
}
object SlickListingsRepository {
def select(table: String, cinemaId: String, date: LocalDate) =
sql"select listings from #$table where cinema_id = $cinemaId and date = ${date.toEpochDay.toString}"
.as[String]
.head
def insertOrUpdate(table: String, cinemaId: String, date: LocalDate, json: String) =
sqlu"insert into #$table values ($cinemaId, ${date.toEpochDay}, $json) on conflict (cinema_id, date) do update set listings = $json"
def deserialize(json: String) = decode[Seq[(Movie, Seq[Performance])]](json).toTry.get
}
|
Grogs/cinema-service
|
domain/src/main/scala/me/gregd/cineworld/domain/repository/SlickListingsRepository.scala
|
Scala
|
gpl-3.0
| 1,896 |
/**
* Copyright 2015 Ram Sriharsha
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package magellan
import magellan.TestingUtils._
import org.apache.spark.sql.Row
import org.apache.spark.sql.functions._
import org.apache.spark.sql.magellan.MagellanContext
import org.apache.spark.sql.magellan.dsl.expressions._
import org.apache.spark.sql.types.StringType
import org.scalatest.FunSuite
case class UberRecord(tripId: String, timestamp: String, point: Point)
class ShapefileSuite extends FunSuite with TestSparkContext {
test("shapefile-relation: points") {
val sqlCtx = new MagellanContext(sc)
val path = this.getClass.getClassLoader.getResource("testpoint/").getPath
val df = sqlCtx.read.format("magellan").load(path)
import sqlCtx.implicits._
assert(df.count() === 1)
val point = df.select($"point").map {case Row(x: Point) => x}.first()
assert(point.x ~== -99.796 absTol 0.2)
}
test("shapefile-relation: polygons") {
val sqlCtx = new MagellanContext(sc)
val path = this.getClass.getClassLoader.getResource("testpolygon/").getPath
val df = sqlCtx.read.format("magellan").load(path)
import sqlCtx.implicits._
assert(df.count() === 1)
val polygon = df.select($"polygon").map {case Row(x: Polygon) => x}.first()
assert(polygon.indices.size === 1)
assert(polygon.points.size === 6)
}
test("shapefile-relation: Zillow Neighborhoods") {
val sqlCtx = new MagellanContext(sc)
val path = this.getClass.getClassLoader.getResource("testzillow/").getPath
val df = sqlCtx.read.format("magellan").load(path)
import sqlCtx.implicits._
assert(df.count() === 1932) // 34 + 948 + 689 + 261
// CA should have some metadata attached to it
val extractValue: (Map[String, String], String) => String =
(map: Map[String, String], key: String) => {
map.getOrElse(key, null)
}
val stateUdf = callUDF(extractValue, StringType, col("metadata"), lit("STATE"))
val dfwithmeta = df.withColumn("STATE", stateUdf)
assert(dfwithmeta.filter($"STATE" === "CA").count() === 948)
assert(df.select($"metadata"("STATE").as("state")).filter($"state" === "CA").count() === 948)
assert(df.select($"metadata"("STATE").as("state")).filter($"state" isNull).count() === 723)
}
test("shapefile-relation: polylines") {
val sqlCtx = new MagellanContext(sc)
val path = this.getClass.getClassLoader.getResource("testpolyline/").getPath
val df = sqlCtx.read.format("magellan").load(path)
import sqlCtx.implicits._
assert(df.count() === 14959)
// 5979762.107174277,2085850.5510566086,6024890.0635061115,2130875.5735391825
val start = new Point(5989880.123683602, 2107393.125753522)
val end = new Point(5988698.112268105, 2107728.9863022715)
assert(df.filter($"polyline" intersects new Line(start, end)).count() > 0)
}
test("shapefile-relation: points and polygons") {
val sqlCtx = new MagellanContext(sc)
val path = this.getClass.getClassLoader.getResource("testcomposite/").getPath
val df = sqlCtx.read.format("magellan").load(path)
assert(df.count() === 2)
// each row should either contain a point or a polygon but not both
import sqlCtx.implicits._
assert(df.filter($"point" isNull).count() === 1)
assert(df.filter($"polygon" isNull).count() === 1)
}
test("shapefile-relation: valid") {
val sqlCtx = new MagellanContext(sc)
val path = this.getClass.getClassLoader.getResource("testpolyline/").getPath
val df = sqlCtx.read.format("magellan").load(path)
import sqlCtx.implicits._
assert(df.filter($"valid").count() == 14959)
}
}
|
YanjieGao/magellan
|
src/test/scala/magellan/ShapefileSuite.scala
|
Scala
|
apache-2.0
| 4,142 |
package com.eevolution.context.dictionary.domain.api.repository
import com.eevolution.context.dictionary._
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: [email protected], http://www.e-evolution.com , http://github.com/EmerisScala
* Created by [email protected] , www.e-evolution.com on 01/11/17.
*/
trait LdapAccessRepository [LdapAccess , Int] extends api.Repostory [LdapAccess , Int] {
}
|
adempiere/ADReactiveSystem
|
dictionary-api/src/main/scala/com/eevolution/context/dictionary/domain/api/repository/LdapAccessRepository.scala
|
Scala
|
gpl-3.0
| 1,133 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy
import java.io._
import java.net.URL
import java.nio.charset.StandardCharsets
import java.util.concurrent.TimeoutException
import scala.collection.mutable.ListBuffer
import scala.concurrent.{Future, Promise}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.sys.process._
import org.json4s._
import org.json4s.jackson.JsonMethods
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.deploy.master.RecoveryState
import org.apache.spark.internal.Logging
import org.apache.spark.util.{ThreadUtils, Utils}
/**
* This suite tests the fault tolerance of the Spark standalone scheduler, mainly the Master.
* In order to mimic a real distributed cluster more closely, Docker is used.
* Execute using
* ./bin/spark-class org.apache.spark.deploy.FaultToleranceTest
*
* Make sure that that the environment includes the following properties in SPARK_DAEMON_JAVA_OPTS
* *and* SPARK_JAVA_OPTS:
* - spark.deploy.recoveryMode=ZOOKEEPER
* - spark.deploy.zookeeper.url=172.17.42.1:2181
* Note that 172.17.42.1 is the default docker ip for the host and 2181 is the default ZK port.
*
* In case of failure, make sure to kill off prior docker containers before restarting:
* docker kill $(docker ps -q)
*
* Unfortunately, due to the Docker dependency this suite cannot be run automatically without a
* working installation of Docker. In addition to having Docker, the following are assumed:
* - Docker can run without sudo (see http://docs.docker.io/en/latest/use/basics/)
* - The docker images tagged spark-test-master and spark-test-worker are built from the
* docker/ directory. Run 'docker/spark-test/build' to generate these.
*/
private object FaultToleranceTest extends App with Logging {
private val conf = new SparkConf()
private val ZK_DIR = conf.get("spark.deploy.zookeeper.dir", "/spark")
private val masters = ListBuffer[TestMasterInfo]()
private val workers = ListBuffer[TestWorkerInfo]()
private var sc: SparkContext = _
private val zk = SparkCuratorUtil.newClient(conf)
private var numPassed = 0
private var numFailed = 0
private val sparkHome = System.getenv("SPARK_HOME")
assertTrue(sparkHome != null, "Run with a valid SPARK_HOME")
private val containerSparkHome = "/opt/spark"
private val dockerMountDir = "%s:%s".format(sparkHome, containerSparkHome)
System.setProperty("spark.driver.host", "172.17.42.1") // default docker host ip
private def afterEach() {
if (sc != null) {
sc.stop()
sc = null
}
terminateCluster()
// Clear ZK directories in between tests (for speed purposes)
SparkCuratorUtil.deleteRecursive(zk, ZK_DIR + "/spark_leader")
SparkCuratorUtil.deleteRecursive(zk, ZK_DIR + "/master_status")
}
test("sanity-basic") {
addMasters(1)
addWorkers(1)
createClient()
assertValidClusterState()
}
test("sanity-many-masters") {
addMasters(3)
addWorkers(3)
createClient()
assertValidClusterState()
}
test("single-master-halt") {
addMasters(3)
addWorkers(2)
createClient()
assertValidClusterState()
killLeader()
delay(30 seconds)
assertValidClusterState()
createClient()
assertValidClusterState()
}
test("single-master-restart") {
addMasters(1)
addWorkers(2)
createClient()
assertValidClusterState()
killLeader()
addMasters(1)
delay(30 seconds)
assertValidClusterState()
killLeader()
addMasters(1)
delay(30 seconds)
assertValidClusterState()
}
test("cluster-failure") {
addMasters(2)
addWorkers(2)
createClient()
assertValidClusterState()
terminateCluster()
addMasters(2)
addWorkers(2)
assertValidClusterState()
}
test("all-but-standby-failure") {
addMasters(2)
addWorkers(2)
createClient()
assertValidClusterState()
killLeader()
workers.foreach(_.kill())
workers.clear()
delay(30 seconds)
addWorkers(2)
assertValidClusterState()
}
test("rolling-outage") {
addMasters(1)
delay()
addMasters(1)
delay()
addMasters(1)
addWorkers(2)
createClient()
assertValidClusterState()
assertTrue(getLeader == masters.head)
(1 to 3).foreach { _ =>
killLeader()
delay(30 seconds)
assertValidClusterState()
assertTrue(getLeader == masters.head)
addMasters(1)
}
}
private def test(name: String)(fn: => Unit) {
try {
fn
numPassed += 1
logInfo("==============================================")
logInfo("Passed: " + name)
logInfo("==============================================")
} catch {
case e: Exception =>
numFailed += 1
logInfo("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
logError("FAILED: " + name, e)
logInfo("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
sys.exit(1)
}
afterEach()
}
private def addMasters(num: Int) {
logInfo(s">>>>> ADD MASTERS $num <<<<<")
(1 to num).foreach { _ => masters += SparkDocker.startMaster(dockerMountDir) }
}
private def addWorkers(num: Int) {
logInfo(s">>>>> ADD WORKERS $num <<<<<")
val masterUrls = getMasterUrls(masters)
(1 to num).foreach { _ => workers += SparkDocker.startWorker(dockerMountDir, masterUrls) }
}
/** Creates a SparkContext, which constructs a Client to interact with our cluster. */
private def createClient() = {
logInfo(">>>>> CREATE CLIENT <<<<<")
if (sc != null) { sc.stop() }
// Counter-hack: Because of a hack in SparkEnv#create() that changes this
// property, we need to reset it.
System.setProperty("spark.driver.port", "0")
sc = new SparkContext(getMasterUrls(masters), "fault-tolerance", containerSparkHome)
}
private def getMasterUrls(masters: Seq[TestMasterInfo]): String = {
"spark://" + masters.map(master => master.ip + ":7077").mkString(",")
}
private def getLeader: TestMasterInfo = {
val leaders = masters.filter(_.state == RecoveryState.ALIVE)
assertTrue(leaders.size == 1)
leaders(0)
}
private def killLeader(): Unit = {
logInfo(">>>>> KILL LEADER <<<<<")
masters.foreach(_.readState())
val leader = getLeader
masters -= leader
leader.kill()
}
private def delay(secs: Duration = 5.seconds) = Thread.sleep(secs.toMillis)
private def terminateCluster() {
logInfo(">>>>> TERMINATE CLUSTER <<<<<")
masters.foreach(_.kill())
workers.foreach(_.kill())
masters.clear()
workers.clear()
}
/** This includes Client retry logic, so it may take a while if the cluster is recovering. */
private def assertUsable() = {
val f = Future {
try {
val res = sc.parallelize(0 until 10).collect()
assertTrue(res.toList == (0 until 10).toList)
true
} catch {
case e: Exception =>
logError("assertUsable() had exception", e)
e.printStackTrace()
false
}
}
// Avoid waiting indefinitely (e.g., we could register but get no executors).
assertTrue(ThreadUtils.awaitResult(f, 120 seconds))
}
/**
* Asserts that the cluster is usable and that the expected masters and workers
* are all alive in a proper configuration (e.g., only one leader).
*/
private def assertValidClusterState() = {
logInfo(">>>>> ASSERT VALID CLUSTER STATE <<<<<")
assertUsable()
var numAlive = 0
var numStandby = 0
var numLiveApps = 0
var liveWorkerIPs: Seq[String] = List()
def stateValid(): Boolean = {
(workers.map(_.ip) -- liveWorkerIPs).isEmpty &&
numAlive == 1 && numStandby == masters.size - 1 && numLiveApps >= 1
}
val f = Future {
try {
while (!stateValid()) {
Thread.sleep(1000)
numAlive = 0
numStandby = 0
numLiveApps = 0
masters.foreach(_.readState())
for (master <- masters) {
master.state match {
case RecoveryState.ALIVE =>
numAlive += 1
liveWorkerIPs = master.liveWorkerIPs
case RecoveryState.STANDBY =>
numStandby += 1
case _ => // ignore
}
numLiveApps += master.numLiveApps
}
}
true
} catch {
case e: Exception =>
logError("assertValidClusterState() had exception", e)
false
}
}
try {
assertTrue(ThreadUtils.awaitResult(f, 120 seconds))
} catch {
case e: TimeoutException =>
logError("Master states: " + masters.map(_.state))
logError("Num apps: " + numLiveApps)
logError("IPs expected: " + workers.map(_.ip) + " / found: " + liveWorkerIPs)
throw new RuntimeException("Failed to get into acceptable cluster state after 2 min.", e)
}
}
private def assertTrue(bool: Boolean, message: String = "") {
if (!bool) {
throw new IllegalStateException("Assertion failed: " + message)
}
}
logInfo("Ran %s tests, %s passed and %s failed".format(numPassed + numFailed, numPassed,
numFailed))
}
private class TestMasterInfo(val ip: String, val dockerId: DockerId, val logFile: File)
extends Logging {
implicit val formats = org.json4s.DefaultFormats
var state: RecoveryState.Value = _
var liveWorkerIPs: List[String] = _
var numLiveApps = 0
logDebug("Created master: " + this)
def readState() {
try {
val masterStream = new InputStreamReader(
new URL("http://%s:8080/json".format(ip)).openStream, StandardCharsets.UTF_8)
val json = JsonMethods.parse(masterStream)
val workers = json \\ "workers"
val liveWorkers = workers.children.filter(w => (w \\ "state").extract[String] == "ALIVE")
// Extract the worker IP from "webuiaddress" (rather than "host") because the host name
// on containers is a weird hash instead of the actual IP address.
liveWorkerIPs = liveWorkers.map {
w => (w \\ "webuiaddress").extract[String].stripPrefix("http://").stripSuffix(":8081")
}
numLiveApps = (json \\ "activeapps").children.size
val status = json \\\\ "status"
val stateString = status.extract[String]
state = RecoveryState.values.filter(state => state.toString == stateString).head
} catch {
case e: Exception =>
// ignore, no state update
logWarning("Exception", e)
}
}
def kill() { Docker.kill(dockerId) }
override def toString: String =
"[ip=%s, id=%s, logFile=%s, state=%s]".
format(ip, dockerId.id, logFile.getAbsolutePath, state)
}
private class TestWorkerInfo(val ip: String, val dockerId: DockerId, val logFile: File)
extends Logging {
implicit val formats = org.json4s.DefaultFormats
logDebug("Created worker: " + this)
def kill() { Docker.kill(dockerId) }
override def toString: String =
"[ip=%s, id=%s, logFile=%s]".format(ip, dockerId, logFile.getAbsolutePath)
}
private object SparkDocker {
def startMaster(mountDir: String): TestMasterInfo = {
val cmd = Docker.makeRunCmd("spark-test-master", mountDir = mountDir)
val (ip, id, outFile) = startNode(cmd)
new TestMasterInfo(ip, id, outFile)
}
def startWorker(mountDir: String, masters: String): TestWorkerInfo = {
val cmd = Docker.makeRunCmd("spark-test-worker", args = masters, mountDir = mountDir)
val (ip, id, outFile) = startNode(cmd)
new TestWorkerInfo(ip, id, outFile)
}
private def startNode(dockerCmd: ProcessBuilder) : (String, DockerId, File) = {
val ipPromise = Promise[String]()
val outFile = File.createTempFile("fault-tolerance-test", "", Utils.createTempDir())
val outStream: FileWriter = new FileWriter(outFile)
def findIpAndLog(line: String): Unit = {
if (line.startsWith("CONTAINER_IP=")) {
val ip = line.split("=")(1)
ipPromise.success(ip)
}
outStream.write(line + "\\n")
outStream.flush()
}
dockerCmd.run(ProcessLogger(findIpAndLog _))
val ip = ThreadUtils.awaitResult(ipPromise.future, 30 seconds)
val dockerId = Docker.getLastProcessId
(ip, dockerId, outFile)
}
}
private class DockerId(val id: String) {
override def toString: String = id
}
private object Docker extends Logging {
def makeRunCmd(imageTag: String, args: String = "", mountDir: String = ""): ProcessBuilder = {
val mountCmd = if (mountDir != "") { " -v " + mountDir } else ""
val cmd = "docker run -privileged %s %s %s".format(mountCmd, imageTag, args)
logDebug("Run command: " + cmd)
cmd
}
def kill(dockerId: DockerId) : Unit = {
"docker kill %s".format(dockerId.id).!
}
def getLastProcessId: DockerId = {
var id: String = null
"docker ps -l -q".!(ProcessLogger(line => id = line))
new DockerId(id)
}
}
|
sh-cho/cshSpark
|
deploy/FaultToleranceTest.scala
|
Scala
|
apache-2.0
| 13,729 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.test
import org.scalatest.wordspec.AnyWordSpec
/**
* The purpose of this suite is to make sure that generic WordSpec-based scala
* tests work with a shared spark session
*/
class GenericWordSpecSuite extends AnyWordSpec with SharedSparkSessionBase {
import testImplicits._
private def ds = Seq((1, 1), (2, 1), (3, 2), (4, 2), (5, 3), (6, 3), (7, 4), (8, 4)).toDS
"A Simple Dataset" when {
"looked at as complete rows" should {
"have the specified number of elements" in {
assert(8 === ds.count)
}
"have the specified number of unique elements" in {
assert(8 === ds.distinct.count)
}
}
"refined to specific columns" should {
"have the specified number of elements in each column" in {
assert(8 === ds.select("_1").count)
assert(8 === ds.select("_2").count)
}
"have the correct number of distinct elements in each column" in {
assert(8 === ds.select("_1").distinct.count)
assert(4 === ds.select("_2").distinct.count)
}
}
}
}
|
ueshin/apache-spark
|
sql/core/src/test/scala/org/apache/spark/sql/test/GenericWordSpecSuite.scala
|
Scala
|
apache-2.0
| 1,881 |
package controllers
import java.io.File
import controllers.Utils._
import scala.xml._
object XmlTestResultParser {
// def parse(el: Elem) = TestEntry(Some((el \\\\ "testsuite").text).get,"",false)
def metadata(el: Elem) = getAttributes(el,List("name","time","tests","errors","skipped","failures"))
def getAttributes(el: Elem, atts: List[String]) = atts.map(a => a + ": " +el.attribute(a).get.text) mkString ", "
def getJunitTestsMetadataAsString():String = {//Junit metadata
getJunitTestsMetadata() mkString "\\n"
}
def getJunitTestsMetadata():List[String] = {
val files: List[File] = getListOfFiles("./dataStore");
files.map(f => s"[${f.getName}]: "+metadata(XML.loadFile(f)))
}
}
|
olka/test-reports-agregator
|
app/controllers/XmlTestResultParser.scala
|
Scala
|
mit
| 713 |
package org.vaadin.addons.rinne.mixins
import com.vaadin.server.Resource
import com.vaadin.ui.{Component, UI}
import java.util.Locale
import scala.collection.mutable
trait ComponentMixin extends SizeableMixin {
this: Component =>
lazy val styleNames = new mutable.Set[String] with Serializable {
def contains(key: String) = getStyleName.split(" ").iterator.contains(key)
def iterator: Iterator[String] = getStyleName.split(" ").iterator
def +=(elem: String) = { elem.split(" ").foreach(addStyleName); this }
def -=(elem: String) = { removeStyleName(elem); this }
}
def styleName: Option[String] = Option(getStyleName)
def styleName_=(styleName: Option[String]) {
setStyleName(styleName.orNull)
}
def styleName_=(styleName: String) {
setStyleName(styleName)
}
def enabled: Boolean = isEnabled
def enabled_=(enabled: Boolean) { setEnabled(enabled) }
def visible: Boolean = isVisible
def visible_=(visible: Boolean) { setVisible(visible) }
def readOnly: Boolean = isReadOnly
def readOnly_=(readOnly: Boolean) { setReadOnly(readOnly) }
def caption: Option[String] = Option(getCaption)
def caption_=(caption: Option[String]) {
setCaption(caption.orNull)
}
def caption_=(caption: String) { setCaption(caption) }
def icon: Option[Resource] = Option(getIcon)
def icon_=(icon: Option[Resource]) {
setIcon(icon.orNull)
}
def icon_=(icon: Resource) { setIcon(icon) }
def ui: UI = getUI
def locale: Option[Locale] = Option(getLocale)
def id: Option[String] = Option(getId)
def id_=(id: Option[String]) {
setId(id.orNull)
}
def id_=(id: String) {
setId(id)
}
}
|
LukaszByczynski/rinne
|
src/main/scala/org/vaadin/addons/rinne/mixins/ComponentMixin.scala
|
Scala
|
apache-2.0
| 1,671 |
trait Foo { def i: Int }
trait Bar
case class Spam(i: Int) extends Foo with Bar
object Test {
def matchParent(p:Any) = p match {
case f:Foo if f.i == 1 => 1
case _:Bar => 2
case _:Foo => 3
}
def main(args: Array[String]): Unit = {
println(matchParent(Spam(3)))
}
}
|
yusuke2255/dotty
|
tests/run/t4482.scala
|
Scala
|
bsd-3-clause
| 291 |
package ch.epfl.bluebrain.nexus.iam.client.types
import ch.epfl.bluebrain.nexus.commons.circe.syntax._
import ch.epfl.bluebrain.nexus.iam.client.config.Contexts._
import ch.epfl.bluebrain.nexus.iam.client.config.IamClientConfig
import ch.epfl.bluebrain.nexus.iam.client.config.Vocabulary._
import ch.epfl.bluebrain.nexus.rdf.Iri.Path
import ch.epfl.bluebrain.nexus.rdf.syntax._
import io.circe._
import io.circe.syntax._
import scala.collection.immutable.ListMap
/**
* Type definition representing a mapping of Paths to AccessControlList for a specific resource.
*
* @param value a map of path and AccessControlList
*/
final case class AccessControlLists(value: Map[Path, ResourceAccessControlList]) {
/**
* Adds a key pair of Path and [[ResourceAccessControlList]] to the current ''value'' and returns a new [[AccessControlLists]] with the added acl.
*
* @param entry the key pair of Path and ACL to be added
*/
def +(entry: (Path, ResourceAccessControlList)): AccessControlLists = {
val (path, aclResource) = entry
val toAdd = aclResource.copy(value = value.get(path).map(_.value ++ aclResource.value).getOrElse(aclResource.value))
AccessControlLists(value + (path -> toAdd))
}
/**
* @return new [[AccessControlLists]] with the same elements as the current one but sorted by [[Path]] (alphabetically)
*/
def sorted: AccessControlLists =
AccessControlLists(ListMap(value.toSeq.sortBy { case (path, _) => path.asString }: _*))
/**
* Generates a new [[AccessControlLists]] only containing the provided ''identities''.
*
* @param identities the identities to be filtered
*/
def filter(identities: Set[Identity]): AccessControlLists =
value.foldLeft(AccessControlLists.empty) {
case (acc, (p, aclResource)) =>
val list = aclResource.copy(value = aclResource.value.filter(identities))
acc + (p -> list)
}
}
object AccessControlLists {
/**
* An empty [[AccessControlLists]].
*/
val empty: AccessControlLists = AccessControlLists(Map.empty[Path, ResourceAccessControlList])
/**
* Convenience factory method to build an ACLs from var args of ''Path'' to ''AccessControlList'' tuples.
*/
final def apply(tuple: (Path, ResourceAccessControlList)*): AccessControlLists = AccessControlLists(tuple.toMap)
implicit def aclsEncoder(implicit http: IamClientConfig): Encoder[AccessControlLists] = Encoder.encodeJson.contramap {
case AccessControlLists(value) =>
val arr = value.map {
case (path, acl) =>
Json.obj("_path" -> Json.fromString(path.asString)) deepMerge acl.asJson.removeKeys("@context")
}
Json
.obj(nxv.total.prefix -> Json.fromInt(arr.size), nxv.results.prefix -> Json.arr(arr.toSeq: _*))
.addContext(resourceCtxUri)
.addContext(iamCtxUri)
.addContext(searchCtxUri)
}
implicit def aclsDecoder: Decoder[AccessControlLists] = {
import cats.implicits._
import ch.epfl.bluebrain.nexus.rdf.instances._
def jsonToPathedAcl(hc: HCursor): Either[DecodingFailure, (Path, ResourceAccessControlList)] =
for {
path <- hc.get[Path]("_path")
acl <- hc.value.as[ResourceAccessControlList]
} yield path -> acl
Decoder.instance { hc =>
hc.downField(nxv.results.prefix)
.focus
.flatMap(_.asArray)
.toRight(DecodingFailure(s"'${nxv.results.prefix}' field not found", hc.history))
.flatMap { results =>
results
.foldM(Map.empty[Path, ResourceAccessControlList]) { (acc, json) =>
jsonToPathedAcl(json.hcursor).map(acc + _)
}
.map(AccessControlLists(_))
}
}
}
}
|
hygt/nexus-iam
|
client/src/main/scala/ch/epfl/bluebrain/nexus/iam/client/types/AccessControlLists.scala
|
Scala
|
apache-2.0
| 3,733 |
package com.vorlov
package object commands {
def registerCommands: Unit = {
Main.registerCommand(FetchData, "fetch-data", "fetch")
Main.registerCommand(Normalize, "normalize")
Main.registerCommand("naive-bayes", NaiveBayes)
}
}
|
VolodymyrOrlov/tweets-opinion-mining
|
src/main/scala/com/vorlov/commands/package.scala
|
Scala
|
apache-2.0
| 249 |
package com.aurelpaulovic.scala_kata.s_99
/*
* P10 (*) Run-length encoding of a list.
* Use the result of problem P09 to implement the so-called run-length
* encoding data compression method. Consecutive duplicates of elements are
* encoded as tuples (N, E) where N is the number of duplicates of the
* element E.
*
* Example:
* scala> encode(List('a, 'a, 'a, 'a, 'b, 'c, 'c, 'a, 'a, 'd, 'e, 'e, 'e, 'e))
* res0: List[(Int, Symbol)] = List((4,'a), (1,'b), (2,'c), (2,'a), (1,'d), (4,'e))
*
* (from: http://aperiodic.net/phil/scala/s-99/)
*/
package object p10 {
import p09._
def encode[A](ls: List[A]) =
pack03(ls) map { x => (x.head, x.length) }
}
|
AurelPaulovic/scala-kata
|
src/main/scala/com/aurelpaulovic/scala_kata/s_99/p10/package.scala
|
Scala
|
apache-2.0
| 674 |
/**
* Copyright (C) 2012 Inria, University Lille 1.
*
* This file is part of PowerAPI.
*
* PowerAPI is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* PowerAPI is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with PowerAPI. If not, see <http://www.gnu.org/licenses/>.
*
* Contact: [email protected].
*/
package fr.inria.powerapi.example.adamdemo.full
import fr.inria.powerapi.formula.cpu.api.CpuFormulaMessage
import fr.inria.powerapi.formula.disk.api.DiskFormulaMessage
import fr.inria.powerapi.core.Listener
import javax.swing.SwingUtilities
object DemoListener {
val pidNames = collection.mutable.Map[Int, String](-1 -> "all processes")
private var justTotalRequest = true
private var clearRequest = false
def isNamed(pid: Int) = synchronized {
pidNames.contains(pid)
}
def pidName(pid: Int, name: String) = synchronized {
pidNames += pid -> name
}
def justTotal() = synchronized {
justTotalRequest = true
}
def unJustTotal() = synchronized {
justTotalRequest = false
}
def hasToJustTotal = synchronized {
justTotalRequest
}
def clear() = synchronized {
clearRequest = true
}
def unClear() = synchronized {
clearRequest = false
}
def hasToClear = synchronized {
clearRequest
}
}
class DemoListener extends Listener {
val cache = collection.mutable.Map[Long, Map[Int, Map[String, Double]]]()
def messagesToListen = Array(classOf[CpuFormulaMessage], classOf[DiskFormulaMessage])
init()
def init() {
SwingUtilities.invokeLater(new Runnable {
def run() {
Chart.run()
}
})
}
def acquire = {
case cpuFormulaMessage: CpuFormulaMessage => process(cpuFormulaMessage.tick.timestamp, cpuFormulaMessage.tick.subscription.process.pid, "cpu", cpuFormulaMessage.energy.power)
case diskFormulaMessage: DiskFormulaMessage => process(diskFormulaMessage.tick.timestamp, diskFormulaMessage.tick.subscription.process.pid, "disk", diskFormulaMessage.energy.power)
}
def process(timestamp: Long, pid: Int, device: String, power: Double) {
if (DemoListener.hasToClear) {
clear()
DemoListener.unClear()
}
add(timestamp, pid, device, power)
flush(timestamp)
}
def add(timestamp: Long, pid: Int, device: String, power: Double) {
val cachedPid =
if (DemoListener.isNamed(pid)) {
pid
} else {
-1
}
val processes = cache.getOrElse(timestamp, Map[Int, Map[String, Double]]())
val devices = processes.getOrElse(cachedPid, Map[String, Double]())
val powers = devices.getOrElse(device, 0: Double)
cache += timestamp -> (processes + (cachedPid -> (devices + (device -> (powers + power)))))
}
def flush(limit: Long) {
cache.filter(entry => (entry._1 < limit)).foreach(value => {
display(value._1)
remove(value._1)
})
}
def display(timestamp: Long) {
if (DemoListener.hasToJustTotal) {
cache(timestamp).foreach(process => {
Chart.add(Map(
DemoListener.pidNames(process._1) + " total" -> process._2.foldLeft(0: Double) { (acc, device) => acc + device._2 }), timestamp)
})
} else {
cache(timestamp).foreach(process => {
Chart.add(Map(
DemoListener.pidNames(process._1) + " cpu" -> process._2.getOrElse("cpu", 0: Double),
DemoListener.pidNames(process._1) + " disk" -> process._2.getOrElse("disk", 0: Double)), timestamp)
})
}
}
def remove(timestamp: Long) {
cache -= timestamp
}
def clear() {
cache.clear()
}
}
|
abourdon/powerapi-akka
|
examples/adam-demo/demo-full/src/main/scala/fr/inria/powerapi/example/adamdemo/full/DemoListener.scala
|
Scala
|
agpl-3.0
| 4,032 |
/*
* Copyright (c) 2013-2014 Erik van Oosten
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package imperial.measures
import imperial.wrappers.codahale.CodaHaleBackedGauge
import org.junit.runner.RunWith
import org.mockito.Mockito.when
import org.scalatest.Matchers._
import org.scalatest.{FlatSpec, OneInstancePerTest}
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar._
@RunWith(classOf[JUnitRunner])
class GaugeSpec extends FlatSpec with OneInstancePerTest {
val metric = mock[com.codahale.metrics.Gauge[Int]]
val gauge = new CodaHaleBackedGauge(metric)
"A gauge" should "invoke the underlying function for sugar factory" in {
val sugared = Gauge({ 1 })
sugared.value should equal (1)
}
it should "invoke getValue on the underlying gauge" in {
when(metric.getValue).thenReturn(1)
gauge.value should equal (1)
}
}
|
thecoda/scala-imperial
|
src/test/scala/imperial/measures/GaugeSpec.scala
|
Scala
|
apache-2.0
| 1,397 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.