code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package ar.edu.unq.tip.qsim.ui
/**
* Copyright 2014 Tatiana Molinari.
* Copyright 2014 Susana Rosito
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
import java.awt.Color
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
import org.uqbar.arena.actions.MessageSend
import org.uqbar.arena.bindings.NotNullObservable
import org.uqbar.arena.layout.ColumnLayout
import org.uqbar.arena.layout.HorizontalLayout
import org.uqbar.arena.layout.VerticalLayout
import org.uqbar.arena.widgets.Button
import org.uqbar.arena.widgets.FileSelector
import org.uqbar.arena.widgets.GroupPanel
import org.uqbar.arena.widgets.KeyWordTextArea
import org.uqbar.arena.widgets.Label
import org.uqbar.arena.widgets.List
import org.uqbar.arena.widgets.Panel
import org.uqbar.arena.widgets.Selector
import org.uqbar.arena.widgets.TextBox
import org.uqbar.arena.widgets.TextFilter
import org.uqbar.arena.widgets.TextInputEvent
import org.uqbar.arena.widgets.style.Style
import org.uqbar.arena.windows.Dialog
import org.uqbar.arena.windows.WindowOwner
import org.uqbar.commons.utils.Observable
import ar.edu.unq.tpi.qsim.model.Programa
import ar.edu.unq.tpi.qsim.parser.ArquitecturaQ
import ar.edu.unq.tpi.qsim.parser.Parser
class QSimWindow(owner: WindowOwner, model: QSimMain) extends Dialog[QSimMain](owner, model) {
override def createErrorsPanel(parent: Panel) = {
this.setTaskDescription("Agregue los archivos .qsim que desee ensamblar y luego cargar en memoria")
super.createErrorsPanel(parent)
}
override def createFormPanel(mainPanel: Panel) = {
this.setTitle("QSim")
// de esta forma se tiene que poner cuando generamos el .jar
this.setIconImage("icon.png")
// de esta otra forma es para desarrollo
// this.setIconImage(getClass().getResource("/icon.png").getPath())
var form = new Panel(mainPanel)
form.setLayout(new HorizontalLayout())
var buttonPanel = new GroupPanel(form)
buttonPanel.setTitle("Acciones")
buttonPanel.setLayout(new VerticalLayout())
new FileSelector(buttonPanel)
.setCaption("Agregar")
.bindValueToProperty("pathArchivo")
new Button(buttonPanel).setCaption("Eliminar")
.onClick(new MessageSend(this.getModelObject(), "eliminarArchivo"))
.bindEnabled(new NotNullObservable("actual"))
new Label(buttonPanel).setText("Seleccionar Arquitectura Q:")
val arquitecturasQ = new Selector[ArquitecturaQ](buttonPanel)
arquitecturasQ.setContents(Parser.arquitecturas, "name")
arquitecturasQ.bindValueToProperty("arquitecturaActual")
new Button(buttonPanel).setCaption("Ensamblar")
.onClick(new MessageSend(this.getModelObject(), "ensamblar"))
.bindEnabled(new NotNullObservable("arquitecturaActual"))
val w16Filter = new TextFilter() {
def accept(event: TextInputEvent): Boolean = {
event.getPotentialTextResult().matches("[A-F0-9]{0,4}")
}
}
new Label(buttonPanel).setText("PC:")
val pc = new TextBox(buttonPanel)
pc.bindValueToProperty("pc")
pc.setWidth(110).setHeight(15)
pc.withFilter(w16Filter)
// new Label(buttonPanel).setText("Tamaño de memoria")
// val memoria = new TextBox(buttonPanel)
// memoria.bindValueToProperty("tamañoDeMemoria")
// memoria.setWidth(110).setHeigth(15)
// memoria.withFilter(w16Filter)
new Button(buttonPanel).setCaption("Cargar en memoria")
.onClick(new MessageSend(this, "cargar"))
.bindEnabled(new NotNullObservable("programa"))
crearPanelDeEdicion(form)
}
def cargar() {
val sim = new SimuladorAppmodel(model.programa, model.pc)
new QSimWindows(this, sim).open()
}
def crearPanelDeEdicion(parent: Panel) {
var panelForm = new Panel(parent)
panelForm.setLayout(new ColumnLayout(2))
val list = new List(panelForm, "archivos")
list.setWidth(200)
.setHeight(300)
.bindValueToProperty("actual")
val codeEditor = new KeyWordTextArea(panelForm)
codeEditor.setWidth(300).setHeight(300).bindValueToProperty("actual.codigo")
codeEditor.keyWords("[a-z_]*")
.foreground(Color.RED).fontStyle(Style.ITALIC)
codeEditor.keyWords("MOV", "MUL", "SUB", "DIV", "ADD", "CALL", "RET", "CMP", "JMP", "JE", "JNE", "JLE", "JG", "JL", "JGE", "JLEU", "JGU", "JCS", "JNEG", "JVS")
.foreground(Color.BLUE).fontStyle(Style.BOLD)
codeEditor.keyWords("""//[\\w]+[\\d]*\\n?""")
.foreground(Color.GREEN)
codeEditor.keyWords("R[0-9]{1}")
.foreground(Color.DARK_GRAY).fontStyle(Style.BOLD).fontStyle(Style.ITALIC)
codeEditor.keyWords("0x[0-9A-F]{4}")
.foreground(Color.ORANGE)
codeEditor.keyWords("[\\\\(\\\\)\\\\[\\\\]\\\\{\\\\}]")
.foreground(Color.DARK_GRAY).fontStyle(Style.BOLD)
}
}
@Observable
class QSimMain {
var archivos: java.util.List[Archivo] = scala.collection.immutable.List[Archivo](new Archivo("main", ""))
var actual: Archivo = archivos.get(0)
var arquitecturaActual: ArquitecturaQ = Parser.arquitecturas(0)
var programa: Programa = _
var enabled = false
var pc = "0000"
//var tamañoDeMemoria = "0300"
def cambiarEnabled() {
enabled = !enabled
}
def setPathArchivo(path: String) {
if (path != null) {
var nombre = takeName(path)
var codigo = readFile(path)
var archivo = new Archivo(nombre, codigo)
archivos = archivos.+:(archivo)
println(archivos)
}
}
def getPathArchivo() = ""
def readFile(path: String) = {
val input = io.Source.fromFile(path)
input.mkString
}
def eliminarArchivo() {
archivos = archivos.-(actual)
actual = null
}
def ensamblar() {
programa = null
programa = arquitecturaActual.parser(archivos.map(_.codigo).mkString)
}
def takeName(path: String) = {
var part_path = path.split("/")
part_path(part_path.length - 1)
}
}
@Observable
class Archivo(var nombre: String, var codigo: String) {
override def toString() = nombre
}
object laaa extends App {
var la = new QSimMain()
la.setPathArchivo("src/main/resources/programaQ1.qsim")
}
|
molinarirosito/QSim_UI
|
src/main/scala/ar/edu/unq/tip/qsim/ui/QSimMain.scala
|
Scala
|
gpl-3.0
| 6,649 |
/*
* Copyright 2012-2020 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.edda
import java.util.concurrent.ThreadPoolExecutor
import java.util.concurrent.Executors
import concurrent.ExecutionContext
import com.netflix.servo.monitor.Monitors
import com.netflix.servo.DefaultMonitorRegistry
object ThreadPools {
// 200 for thread parity with tomcat
val queryPool = Executors.newFixedThreadPool(200)
DefaultMonitorRegistry
.getInstance()
.register(
Monitors
.newThreadPoolMonitor("edda.threadpool.query", queryPool.asInstanceOf[ThreadPoolExecutor])
)
val observerPool = Executors.newFixedThreadPool(5)
DefaultMonitorRegistry
.getInstance()
.register(
Monitors.newThreadPoolMonitor(
"edda.threadpool.observer",
observerPool.asInstanceOf[ThreadPoolExecutor]
)
)
var purgePool = Executors.newFixedThreadPool(1)
DefaultMonitorRegistry
.getInstance()
.register(
Monitors
.newThreadPoolMonitor("edda.threadpool.purge", purgePool.asInstanceOf[ThreadPoolExecutor])
)
val electorPool = Executors.newFixedThreadPool(10)
DefaultMonitorRegistry
.getInstance()
.register(
Monitors.newThreadPoolMonitor(
"edda.threadpool.elector",
electorPool.asInstanceOf[ThreadPoolExecutor]
)
)
}
object QueryExecutionContext {
implicit lazy val ec: ExecutionContext =
ExecutionContext.fromExecutorService(ThreadPools.queryPool)
}
object ObserverExecutionContext {
implicit lazy val ec: ExecutionContext =
ExecutionContext.fromExecutorService(ThreadPools.observerPool)
}
object PurgeExecutionContext {
implicit var ec: ExecutionContext = ExecutionContext.fromExecutorService(ThreadPools.purgePool)
}
object ElectorExecutionContext {
implicit lazy val ec: ExecutionContext =
ExecutionContext.fromExecutorService(ThreadPools.electorPool)
}
|
Netflix/edda
|
src/main/scala/com/netflix/edda/ExecutionContexts.scala
|
Scala
|
apache-2.0
| 2,433 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming
/**
* Spark streaming receiver for Flume.
*/
package object flume
|
bravo-zhang/spark
|
external/flume/src/main/scala/org/apache/spark/streaming/flume/package.scala
|
Scala
|
apache-2.0
| 906 |
package me.frmr.stripe
import net.liftweb.json._
import Serialization._
import JsonDSL._
import Extraction._
import java.util.Arrays
import scala.collection.JavaConversions._
trait StripeList[T] extends StripeObject {
val data: List[T]
val hasMore: Boolean
val totalCount: Option[Int]
val url: String
val raw: Option[JValue]
}
case class CardList(
data: List[Card],
hasMore: Boolean = false,
totalCount: Option[Int] = None,
url: String = "",
raw: Option[JValue] = None
) extends StripeList[Card] {
def withRaw(raw: JValue) = this.copy(raw = Some(raw))
}
case class SubscriptionList(
data: List[Subscription],
hasMore: Boolean = false,
totalCount: Option[Int] = None,
url: String = "",
raw: Option[JValue] = None
) extends StripeList[Subscription] {
def withRaw(raw: JValue) = this.copy(raw = Some(raw))
}
case class RefundList(
data: List[Refund],
hasMore: Boolean = false,
totalCount: Option[Int] = None,
url: String = "",
raw: Option[JValue] = None
) extends StripeList[Refund] {
def withRaw(raw: JValue) = this.copy(raw = Some(raw))
}
case class ApplicationFeeList(
data: List[ApplicationFee],
hasMore: Boolean = false,
totalCount: Option[Int] = None,
url: String = "",
raw: Option[JValue] = None
) extends StripeList[ApplicationFee] {
def withRaw(raw: JValue) = this.copy(raw = Some(raw))
}
case class ApplicationFeeRefundList(
data: List[ApplicationFeeRefund],
hasMore: Boolean = false,
totalCount: Option[Int] = None,
url: String = "",
raw: Option[JValue] = None
) extends StripeList[ApplicationFeeRefund] {
def withRaw(raw: JValue) = this.copy(raw = Some(raw))
}
case class CustomerList(
data: List[Customer],
hasMore: Boolean = false,
totalCount: Option[Int] = None,
url: String = "",
raw: Option[JValue] = None
) extends StripeList[Customer] {
def withRaw(raw: JValue) = this.copy(raw = Some(raw))
}
case class RecipientList(
data: List[Recipient],
hasMore: Boolean = false,
totalCount: Option[Int] = None,
url: String = "",
raw: Option[JValue] = None
) extends StripeList[Recipient] {
def withRaw(raw: JValue) = this.copy(raw = Some(raw))
}
case class TransferList(
data: List[Transfer],
hasMore: Boolean = false,
totalCount: Option[Int] = None,
url: String = "",
raw: Option[JValue] = None
) extends StripeList[Transfer] {
def withRaw(raw: JValue) = this.copy(raw = Some(raw))
}
case class ChargeList(
data: List[Charge],
hasMore: Boolean = false,
totalCount: Option[Int] = None,
url: String = "",
raw: Option[JValue] = None
) extends StripeList[Charge] {
def withRaw(raw: JValue) = this.copy(raw = Some(raw))
}
case class PlanList(
data: List[Plan],
hasMore: Boolean = false,
totalCount: Option[Int] = None,
url: String = "",
raw: Option[JValue] = None
) extends StripeList[Plan] {
def withRaw(raw: JValue) = this.copy(raw = Some(raw))
}
case class CouponList(
data: List[Coupon],
hasMore: Boolean = false,
totalCount: Option[Int] = None,
url: String = "",
raw: Option[JValue] = None
) extends StripeList[Coupon] {
def withRaw(raw: JValue) = this.copy(raw = Some(raw))
}
case class InvoiceLineItemList(
data: List[InvoiceLineItem],
hasMore: Boolean = false,
totalCount: Option[Int] = None,
url: String = "",
raw: Option[JValue] = None
) extends StripeList[InvoiceLineItem] {
def this(data: List[InvoiceLineItem], totalCount: Option[Int], url: String, raw: Option[JValue]) =
this(data, false, totalCount, url, raw)
def withRaw(raw: JValue) = this.copy(raw = Some(raw))
}
case class InvoiceList(
data: List[Invoice],
hasMore: Boolean = false,
totalCount: Option[Int] = None,
url: String = "",
raw: Option[JValue] = None
) extends StripeList[Invoice] {
def withRaw(raw: JValue) = this.copy(raw = Some(raw))
}
case class InvoiceItemList(
data: List[InvoiceItem],
hasMore: Boolean = false,
totalCount: Option[Int] = None,
url: String = "",
raw: Option[JValue] = None
) extends StripeList[InvoiceItem] {
def withRaw(raw: JValue) = this.copy(raw = Some(raw))
}
case class EventList(
data: List[Event],
hasMore: Boolean = false,
totalCount: Option[Int] = None,
url: String = "",
raw: Option[JValue] = None
) extends StripeList[Event] {
def withRaw(raw: JValue) = this.copy(raw = Some(raw))
}
|
farmdawgnation/streifen
|
src/main/scala/me/frmr/stripe/StripeList.scala
|
Scala
|
apache-2.0
| 4,317 |
package com.twitter.finagle.httpx.netty
import org.jboss.netty.handler.codec.http.{HttpMessage, HttpRequest, HttpMethod}
/** Proxy for HttpRequest. Used by Request. */
private[finagle] trait HttpRequestProxy extends HttpMessageProxy {
protected[finagle] def httpRequest: HttpRequest
protected[finagle] def getHttpRequest(): HttpRequest = httpRequest
protected[finagle] def httpMessage: HttpMessage = httpRequest
protected[finagle] def getMethod(): HttpMethod = httpRequest.getMethod
protected[finagle] def setMethod(method: HttpMethod) { httpRequest.setMethod(method) }
protected[finagle] def getUri(): String = httpRequest.getUri()
protected[finagle] def setUri(uri: String) { httpRequest.setUri(uri) }
}
|
travisbrown/finagle
|
finagle-httpx/src/main/scala/com/twitter/finagle/httpx/netty/HttpRequestProxy.scala
|
Scala
|
apache-2.0
| 754 |
package mesosphere.marathon
package core.task.termination.impl
import java.time.Clock
import akka.Done
import akka.actor.{ Actor, Cancellable, Props }
import akka.stream.ActorMaterializer
import com.typesafe.scalalogging.StrictLogging
import mesosphere.marathon.core.event.{ InstanceChanged, UnknownInstanceTerminated }
import mesosphere.marathon.core.instance.Instance
import mesosphere.marathon.core.task.Task
import mesosphere.marathon.core.task.Task.Id
import mesosphere.marathon.core.task.termination.InstanceChangedPredicates.considerTerminal
import mesosphere.marathon.core.task.termination.KillConfig
import mesosphere.marathon.core.task.tracker.InstanceStateOpProcessor
import mesosphere.marathon.state.Timestamp
import mesosphere.marathon.stream.Sink
import scala.collection.mutable
import scala.concurrent.{ Future, Promise }
/**
* An actor that handles killing instances in chunks and depending on the instance state.
* Lost instances will simply be expunged from state, while active instances will be killed
* via the scheduler driver. There is be a maximum number of kills in flight, and
* the service will only issue more kills when instances are reported terminal.
*
* If a kill is not acknowledged with a terminal status update within a configurable
* time window, the kill is retried a configurable number of times. If the maximum
* number of retries is exceeded, the instance will be expunged from state similar to a
* lost instance.
*
* For each kill request, a [[KillStreamWatcher]] will be created, which
* is supposed to watch the progress and complete a given promise when all watched
* instances are reportedly terminal.
*
* For pods started via the default executor, it is sufficient to kill 1 task of the group,
* which will cause all tasks to be killed
*
* See [[KillConfig]] for configuration options.
*/
private[impl] class KillServiceActor(
driverHolder: MarathonSchedulerDriverHolder,
stateOpProcessor: InstanceStateOpProcessor,
config: KillConfig,
clock: Clock) extends Actor with StrictLogging {
import KillServiceActor._
import context.dispatcher
val instancesToKill: mutable.HashMap[Instance.Id, ToKill] = mutable.HashMap.empty
val inFlight: mutable.HashMap[Instance.Id, ToKill] = mutable.HashMap.empty
// We instantiate the materializer here so that all materialized streams end up as children of this actor
implicit val materializer = ActorMaterializer()
val retryTimer: RetryTimer = new RetryTimer {
override def createTimer(): Cancellable = {
context.system.scheduler.schedule(config.killRetryTimeout, config.killRetryTimeout, self, Retry)
}
}
override def preStart(): Unit = {
context.system.eventStream.subscribe(self, classOf[InstanceChanged])
context.system.eventStream.subscribe(self, classOf[UnknownInstanceTerminated])
}
override def postStop(): Unit = {
retryTimer.cancel()
context.system.eventStream.unsubscribe(self)
if (instancesToKill.nonEmpty) {
logger.warn(s"Stopping $self, but not all tasks have been killed. Remaining: ${instancesToKill.keySet.mkString(", ")}, inFlight: ${inFlight.keySet.mkString(", ")}")
}
}
override def receive: Receive = {
case KillUnknownTaskById(taskId) =>
killUnknownTaskById(taskId)
case KillInstances(instances, promise) =>
killInstances(instances, promise)
case InstanceChanged(id, _, _, condition, _) if considerTerminal(condition) &&
(inFlight.contains(id) || instancesToKill.contains(id)) =>
handleTerminal(id)
case UnknownInstanceTerminated(id, _, _) if inFlight.contains(id) || instancesToKill.contains(id) =>
handleTerminal(id)
case Retry =>
retry()
}
def killUnknownTaskById(taskId: Task.Id): Unit = {
logger.debug(s"Received KillUnknownTaskById($taskId)")
instancesToKill.update(taskId.instanceId, ToKill(taskId.instanceId, Seq(taskId), maybeInstance = None, attempts = 0))
processKills()
}
def killInstances(instances: Seq[Instance], promise: Promise[Done]): Unit = {
val instanceIds = instances.map(_.instanceId)
logger.debug(s"Adding instances $instanceIds to queue; setting up child actor to track progress")
promise.completeWith(watchForKilledInstances(instanceIds))
instances.foreach { instance =>
// TODO(PODS): do we make sure somewhere that an instance has _at_least_ one task?
val taskIds: IndexedSeq[Id] = instance.tasksMap.values.withFilter(!_.isTerminal).map(_.taskId)(collection.breakOut)
instancesToKill.update(
instance.instanceId,
ToKill(instance.instanceId, taskIds, maybeInstance = Some(instance), attempts = 0)
)
}
processKills()
}
/**
* Begins watching immediately for terminated instances. Future is completed when all instances are seen.
*/
def watchForKilledInstances(instanceIds: Seq[Instance.Id]): Future[Done] = {
// Note - we toss the materialized cancellable. We are okay to do this here because KillServiceActor will continue to retry
// killing the instanceIds in question, forever, until this Future completes.
KillStreamWatcher.
watchForKilledInstances(context.system.eventStream, instanceIds).
runWith(Sink.head)
}
def processKills(): Unit = {
val killCount = config.killChunkSize - inFlight.size
val toKillNow = instancesToKill.take(killCount)
logger.info(s"processing ${toKillNow.size} kills for ${toKillNow.keys}")
toKillNow.foreach {
case (instanceId, data) => processKill(data)
}
if (inFlight.isEmpty) {
retryTimer.cancel()
} else {
retryTimer.setup()
}
}
def processKill(toKill: ToKill): Unit = {
val instanceId = toKill.instanceId
val taskIds = toKill.taskIdsToKill
KillAction(toKill.instanceId, toKill.taskIdsToKill, toKill.maybeInstance) match {
case KillAction.Noop =>
()
case KillAction.IssueKillRequest =>
driverHolder.driver.foreach { driver =>
taskIds.map(_.mesosTaskId).foreach(driver.killTask)
}
val attempts = inFlight.get(toKill.instanceId).fold(1)(_.attempts + 1)
inFlight.update(
toKill.instanceId, ToKill(instanceId, taskIds, toKill.maybeInstance, attempts, issued = clock.now()))
case KillAction.ExpungeFromState =>
stateOpProcessor.forceExpunge(toKill.instanceId)
}
instancesToKill.remove(instanceId)
}
def handleTerminal(instanceId: Instance.Id): Unit = {
instancesToKill.remove(instanceId)
inFlight.remove(instanceId)
logger.debug(s"$instanceId is terminal. (${instancesToKill.size} kills queued, ${inFlight.size} in flight)")
processKills()
}
def retry(): Unit = {
val now = clock.now()
inFlight.foreach {
case (instanceId, toKill) if (toKill.issued + config.killRetryTimeout) < now =>
logger.warn(s"No kill ack received for $instanceId, retrying (${toKill.attempts} attempts so far)")
processKill(toKill)
case _ => // ignore
}
}
}
private[termination] object KillServiceActor {
sealed trait Request extends InternalRequest
case class KillInstances(instances: Seq[Instance], promise: Promise[Done]) extends Request
case class KillUnknownTaskById(taskId: Task.Id) extends Request
sealed trait InternalRequest
case object Retry extends InternalRequest
def props(
driverHolder: MarathonSchedulerDriverHolder,
stateOpProcessor: InstanceStateOpProcessor,
config: KillConfig,
clock: Clock): Props = Props(
new KillServiceActor(driverHolder, stateOpProcessor, config, clock))
/**
* Metadata used to track which instances to kill and how many attempts have been made
*
* @param instanceId id of the instance to kill
* @param taskIdsToKill ids of the tasks to kill
* @param maybeInstance the instance, if available
* @param attempts the number of kill attempts
* @param issued the time of the last issued kill request
*/
case class ToKill(
instanceId: Instance.Id,
taskIdsToKill: Seq[Task.Id],
maybeInstance: Option[Instance],
attempts: Int,
issued: Timestamp = Timestamp.zero)
}
/**
* Wraps a timer into an interface that hides internal mutable state behind simple setup and cancel methods
*/
private[this] trait RetryTimer {
private[this] var retryTimer: Option[Cancellable] = None
/** Creates a new timer when setup() is called */
def createTimer(): Cancellable
/**
* Cancel the timer if there is one.
*/
final def cancel(): Unit = {
retryTimer.foreach(_.cancel())
retryTimer = None
}
/**
* Setup a timer if there is no timer setup already. Will do nothing if there is a timer.
* Note that if the timer is scheduled only once, it will not be removed until you call cancel.
*/
final def setup(): Unit = {
if (retryTimer.isEmpty) {
retryTimer = Some(createTimer())
}
}
}
|
guenter/marathon
|
src/main/scala/mesosphere/marathon/core/task/termination/impl/KillServiceActor.scala
|
Scala
|
apache-2.0
| 8,948 |
package scala.collection.immutable
import org.scalacheck.Properties
import scala.language.higherKinds
import org.scalacheck.Arbitrary.arbInt
import org.scalacheck.Arbitrary
import org.scalacheck.Gen
import org.scalacheck.commands.Commands
import scala.collection.mutable
import scala.util.{Success, Try}
object MapProperties extends Properties("immutable.Map builder implementations"){
type K = String
type V = String
type T = (K, V)
property("ListMap builder stateful testing") = new MapBuilderStateProperties(HashMap.empty[K, V], ListMap.newBuilder[K, V]).property()
property("SortedMap builder stateful testing") = new MapBuilderStateProperties(ListMap.empty[K, V], SortedMap.newBuilder[K, V]).property()
property("HashMap builder stateful testing") = new MapBuilderStateProperties(ListMap.empty[K, V], HashMap.newBuilder[K, V]).property()
property("Map builder stateful testing") = new MapBuilderStateProperties(ListMap.empty[K, V], Map.newBuilder[K, V]).property()
property("VectorMap builder stateful testing") = new MapBuilderStateProperties(ListMap.empty[K, V], VectorMap.newBuilder[K, V]).property()
property("IntMap builder stateful testing") = new MapBuilderStateProperties(ListMap.empty[Int, Long], IntMap.newBuilder[Long]).property()
property("LongMap builder stateful testing") = new MapBuilderStateProperties(ListMap.empty[Long, Int], LongMap.newBuilder[Int]).property()
}
/** Generic stateful property testing for maps
*
* Usage: {{{
* class MyMapProperties extends Properties("my.Map") {
*
* property("MyMapProperties builder stateful testing") =
* new MapBuilderStateProperties[K, V, ListMap, MyMap](ListMap, MyMap).property() &&
* new MapBuilderStateProperties[K, V, Map, MyMap](Map, MyMap).property() &&
* new MapBuilderStateProperties[K, V, VectorMap, MyMap](VectorMap, MyMap).property()
* }
* }}}
*
* @param newEmptyControlMap Produce a new empty control map. This map is assumed to be correct.
* That is, it is assumed it can immutably append/concatenate properly.
* It's a good idea to cross validate against two or more control maps.
* @param newBuilder produce a new builder of the map under test
*
* @param tupleGen gen for the key-values of this map
* @tparam K type of the Key
* @tparam V type of the Value
* @tparam ControlMap the type of the control map implementation
* @tparam M the type of map under test
*/
class MapBuilderStateProperties[K, V, ControlMap <: Map[K, V], M <: Map[K, V]](
newEmptyControlMap: => ControlMap,
newBuilder: => mutable.Builder[(K, V), M])(implicit tupleGen: Arbitrary[(K, V)]) extends Commands {
override type State = ControlMap
override type Sut = mutable.Builder[(K, V), M]
override def genInitialState: Gen[ControlMap] = newEmptyControlMap
override def canCreateNewSut(
newState: State,
initSuts: scala.Iterable[State],
runningSuts: scala.Iterable[mutable.Builder[(K, V), M]]) = true
override def newSut(state: ControlMap) = newBuilder.addAll(state)
override def destroySut(sut: mutable.Builder[(K, V), M]): Unit = ()
override def initialPreCondition(state: ControlMap) = state.isEmpty
override def genCommand(state: State) = {
import Gen._
oneOf(
const(Clear),
const(Result),
arbInt.arbitrary.map(SizeHint),
tupleGen.arbitrary.map(AddOne),
listOf(tupleGen.arbitrary).map(AddAll)
)
}
case object Clear extends UnitCommand {
override def postCondition(state: ControlMap, success: Boolean) = success
override def run(sut: mutable.Builder[(K, V), M]) = sut.clear()
override def nextState(state: ControlMap) = newEmptyControlMap
override def preCondition(state: ControlMap) = true
}
case object Result extends Command {
override type Result = M
override def postCondition(state: ControlMap, result: Try[Result]) = result == Success(state)
override def run(sut: mutable.Builder[(K, V), M]) = sut.result()
override def nextState(state: ControlMap) = state
override def preCondition(state: ControlMap) = true
}
case class SizeHint(size: Int) extends UnitCommand {
override def postCondition(state: ControlMap, success: Boolean) = success
override def run(sut: mutable.Builder[(K, V), M]) = sut.sizeHint(size)
override def nextState(state: ControlMap) = state
override def preCondition(state: ControlMap) = true
}
case class AddOne(elem: (K, V)) extends UnitCommand {
override def postCondition(state: ControlMap, success: Boolean) = success
override def run(sut: mutable.Builder[(K, V), M]) = sut.addOne(elem)
override def nextState(state: ControlMap) =
state.updated(elem._1, elem._2).asInstanceOf[ControlMap]
override def preCondition(state: ControlMap) = true
}
case class AddAll(elems: Seq[(K, V)]) extends UnitCommand {
override def postCondition(state: ControlMap, success: Boolean) = success
override def run(sut: mutable.Builder[(K, V), M]) = sut.addAll(elems)
override def nextState(state: ControlMap) =
state.concat(elems).asInstanceOf[ControlMap]
override def preCondition(state: ControlMap) = true
}
}
|
scala/scala
|
test/scalacheck/scala/collection/immutable/MapProperties.scala
|
Scala
|
apache-2.0
| 5,302 |
object test {import scala.runtime.WorksheetSupport._; def main(args: Array[String])=$execute{;$skip(57);
val problem = new Pouring(Vector(4,7));System.out.println("""problem : <error> = """ + $show(problem ))}
}
|
frojasg/progfun
|
progfun/.worksheet/src/test.scala
|
Scala
|
mit
| 221 |
/*
* Copyright 2001-2008 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.matchers
import org.scalatest._
import org.scalatest.exceptions.TestFailedException
class ShouldBeASymbolSpec extends Spec with ShouldMatchers with FileMocks {
object `The be a ('symbol) syntax` {
def `should do nothing if the object has an appropriately named method, which returns true` {
fileMock should be a ('file)
isFileMock should be a ('file)
}
def `should throw TestFailedException if no <symbol> or is<Symbol> method exists` {
val ex1 = intercept[TestFailedException] {
noPredicateMock should be a ('apple)
}
ex1.getMessage should equal ("NoPredicateMock has neither an apple nor an isApple method")
// Check message for name that starts with a consonant (should use a instead of an)
val ex2 = intercept[TestFailedException] {
noPredicateMock should be a ('file)
}
ex2.getMessage should equal ("NoPredicateMock has neither a file nor an isFile method")
}
def `should do nothing if the object has an appropriately named method, which returns false when used with not` {
notFileMock should not { be a ('file) }
notFileMock should not be a ('file)
isNotFileMock should not { be a ('file) }
isNotFileMock should not be a ('file)
}
def `should throw TestFailedException if no <symbol> or is<Symbol> method exists, when used with not` {
val ex1 = intercept[TestFailedException] {
noPredicateMock should not { be a ('apple) }
}
ex1.getMessage should equal ("NoPredicateMock has neither an apple nor an isApple method")
val ex2 = intercept[TestFailedException] {
noPredicateMock should not (be a ('directory))
}
ex2.getMessage should equal ("NoPredicateMock has neither a directory nor an isDirectory method")
val ex3 = intercept[TestFailedException] {
noPredicateMock should not be a ('apple)
}
ex3.getMessage should equal ("NoPredicateMock has neither an apple nor an isApple method")
val ex4 = intercept[TestFailedException] {
noPredicateMock should not be a ('directory)
}
ex4.getMessage should equal ("NoPredicateMock has neither a directory nor an isDirectory method")
}
def `should do nothing if the object has an appropriately named method, which returns true, when used in a logical-and expression` {
fileMock should ((be a ('file)) and (be a ('file)))
fileMock should (be a ('file) and (be a ('file)))
fileMock should (be a ('file) and be a ('file))
isFileMock should ((be a ('file)) and (be a ('file)))
isFileMock should (be a ('file) and (be a ('file)))
isFileMock should (be a ('file) and be a ('file))
}
def `should do nothing if the object has an appropriately named method, which returns true, when used in a logical-or expression` {
fileMock should ((be a ('directory)) or (be a ('file)))
fileMock should (be a ('directory) or (be a ('file)))
fileMock should (be a ('directory) or be a ('file))
isFileMock should ((be a ('directory)) or (be a ('file)))
isFileMock should (be a ('directory) or (be a ('file)))
isFileMock should (be a ('directory) or be a ('file))
fileMock should ((be a ('file)) or (be a ('directory)))
fileMock should (be a ('file) or (be a ('directory)))
fileMock should (be a ('file) or be a ('directory))
isFileMock should ((be a ('file)) or (be a ('directory)))
isFileMock should (be a ('file) or (be a ('directory)))
isFileMock should (be a ('file) or be a ('directory))
}
def `should do nothing if the object has an appropriately named method, which returns false, when used in a logical-and expression with not` {
notFileMock should (not (be a ('file)) and not (be a ('file)))
notFileMock should ((not be a ('file)) and (not be a ('file)))
notFileMock should (not be a ('file) and not be a ('file))
isNotFileMock should (not (be a ('file)) and not (be a ('file)))
isNotFileMock should ((not be a ('file)) and (not be a ('file)))
isNotFileMock should (not be a ('file) and not be a ('file))
}
def `should do nothing if the object has an appropriately named method, which returns false, when used in a logical-or expression with not` {
notFileMock should (not (be a ('file)) or not (be a ('file)))
notFileMock should ((not be a ('file)) or (not be a ('file)))
notFileMock should (not be a ('file) or not be a ('file))
isNotFileMock should (not (be a ('file)) or not (be a ('file)))
isNotFileMock should ((not be a ('file)) or (not be a ('file)))
isNotFileMock should (not be a ('file) or not be a ('file))
notFileMock should (not (be a ('directory)) or not (be a ('file)))
notFileMock should ((not be a ('directory)) or (not be a ('file)))
notFileMock should (not be a ('directory) or not be a ('file))
isNotFileMock should (not (be a ('directory)) or not (be a ('file)))
isNotFileMock should ((not be a ('directory)) or (not be a ('file)))
isNotFileMock should (not be a ('directory) or not be a ('file))
}
def `should throw TestFailedException if the object has an appropriately named method, which returns false` {
val caught1 = intercept[TestFailedException] {
notFileMock should be a ('file)
}
assert(caught1.getMessage === "NotFileMock was not a file")
val caught2 = intercept[TestFailedException] {
isNotFileMock should be a ('file)
}
assert(caught2.getMessage === "IsNotFileMock was not a file")
}
def `should throw TestFailedException if the object has an appropriately named method, which returns true when used with not` {
val caught1 = intercept[TestFailedException] {
fileMock should not { be a ('file) }
}
assert(caught1.getMessage === "FileMock was a file")
val caught2 = intercept[TestFailedException] {
fileMock should not be a ('file)
}
assert(caught2.getMessage === "FileMock was a file")
val caught3 = intercept[TestFailedException] {
isFileMock should not { be a ('file) }
}
assert(caught3.getMessage === "IsFileMock was a file")
val caught4 = intercept[TestFailedException] {
isFileMock should not be a ('file)
}
assert(caught4.getMessage === "IsFileMock was a file")
}
def `should throw TestFailedException if the object has an appropriately named method, which returns false, when used in a logical-and expression` {
val caught1 = intercept[TestFailedException] {
fileMock should ((be a ('file)) and (be a ('directory)))
}
assert(caught1.getMessage === "FileMock was a file, but FileMock was not a directory")
val caught2 = intercept[TestFailedException] {
fileMock should (be a ('file) and (be a ('directory)))
}
assert(caught2.getMessage === "FileMock was a file, but FileMock was not a directory")
val caught3 = intercept[TestFailedException] {
fileMock should (be a ('file) and be a ('directory))
}
assert(caught3.getMessage === "FileMock was a file, but FileMock was not a directory")
val caught4 = intercept[TestFailedException] {
isFileMock should ((be a ('file)) and (be a ('directory)))
}
assert(caught4.getMessage === "IsFileMock was a file, but IsFileMock was not a directory")
val caught5 = intercept[TestFailedException] {
isFileMock should (be a ('file) and (be a ('directory)))
}
assert(caught5.getMessage === "IsFileMock was a file, but IsFileMock was not a directory")
val caught6 = intercept[TestFailedException] {
isFileMock should (be a ('file) and be a ('directory))
}
assert(caught6.getMessage === "IsFileMock was a file, but IsFileMock was not a directory")
}
def `should throw TestFailedException if the object has an appropriately named method, which returns false, when used in a logical-or expression` {
val caught1 = intercept[TestFailedException] {
notFileMock should ((be a ('file)) or (be a ('file)))
}
assert(caught1.getMessage === "NotFileMock was not a file, and NotFileMock was not a file")
val caught2 = intercept[TestFailedException] {
notFileMock should (be a ('file) or (be a ('file)))
}
assert(caught2.getMessage === "NotFileMock was not a file, and NotFileMock was not a file")
val caught3 = intercept[TestFailedException] {
notFileMock should (be a ('file) or be a ('file))
}
assert(caught3.getMessage === "NotFileMock was not a file, and NotFileMock was not a file")
val caught4 = intercept[TestFailedException] {
isNotFileMock should ((be a ('file)) or (be a ('file)))
}
assert(caught4.getMessage === "IsNotFileMock was not a file, and IsNotFileMock was not a file")
val caught5 = intercept[TestFailedException] {
isNotFileMock should (be a ('file) or (be a ('file)))
}
assert(caught5.getMessage === "IsNotFileMock was not a file, and IsNotFileMock was not a file")
val caught6 = intercept[TestFailedException] {
isNotFileMock should (be a ('file) or be a ('file))
}
assert(caught6.getMessage === "IsNotFileMock was not a file, and IsNotFileMock was not a file")
}
def `should throw TestFailedException if the object has an appropriately named method, which returns true, when used in a logical-and expression with not` {
val caught1 = intercept[TestFailedException] {
fileMock should (not (be a ('directory)) and not (be a ('file)))
}
assert(caught1.getMessage === "FileMock was not a directory, but FileMock was a file")
val caught2 = intercept[TestFailedException] {
fileMock should ((not be a ('directory)) and (not be a ('file)))
}
assert(caught2.getMessage === "FileMock was not a directory, but FileMock was a file")
val caught3 = intercept[TestFailedException] {
fileMock should (not be a ('directory) and not be a ('file))
}
assert(caught3.getMessage === "FileMock was not a directory, but FileMock was a file")
val caught4 = intercept[TestFailedException] {
isFileMock should (not (be a ('directory)) and not (be a ('file)))
}
assert(caught4.getMessage === "IsFileMock was not a directory, but IsFileMock was a file")
val caught5 = intercept[TestFailedException] {
isFileMock should ((not be a ('directory)) and (not be a ('file)))
}
assert(caught5.getMessage === "IsFileMock was not a directory, but IsFileMock was a file")
val caught6 = intercept[TestFailedException] {
isFileMock should (not be a ('directory) and not be a ('file))
}
assert(caught6.getMessage === "IsFileMock was not a directory, but IsFileMock was a file")
// Check that the error message "short circuits"
val caught7 = intercept[TestFailedException] {
fileMock should (not (be a ('file)) and not (be a ('directory)))
}
assert(caught7.getMessage === "FileMock was a file")
}
def `should throw TestFailedException if the object has an appropriately named method, which returns true, when used in a logical-or expression with not` {
val caught1 = intercept[TestFailedException] {
fileMock should (not (be a ('file)) or not (be a ('file)))
}
assert(caught1.getMessage === "FileMock was a file, and FileMock was a file")
val caught2 = intercept[TestFailedException] {
fileMock should ((not be a ('file)) or (not be a ('file)))
}
assert(caught2.getMessage === "FileMock was a file, and FileMock was a file")
val caught3 = intercept[TestFailedException] {
fileMock should (not be a ('file) or not be a ('file))
}
assert(caught3.getMessage === "FileMock was a file, and FileMock was a file")
val caught4 = intercept[TestFailedException] {
isFileMock should (not (be a ('file)) or not (be a ('file)))
}
assert(caught4.getMessage === "IsFileMock was a file, and IsFileMock was a file")
val caught5 = intercept[TestFailedException] {
isFileMock should ((not be a ('file)) or (not be a ('file)))
}
assert(caught5.getMessage === "IsFileMock was a file, and IsFileMock was a file")
val caught6 = intercept[TestFailedException] {
isFileMock should (not be a ('file) or not be a ('file))
}
assert(caught6.getMessage === "IsFileMock was a file, and IsFileMock was a file")
}
}
}
|
hubertp/scalatest
|
src/test/scala/org/scalatest/matchers/ShouldBeASymbolSpec.scala
|
Scala
|
apache-2.0
| 13,246 |
import org.scalatest._
import scalaz.Monad
import annotation.tailrec
import scalaz._
import Scalaz._
class BasicFreeSpec extends FlatSpec with Matchers with Instrumented {
import freez.view._
val testN = Seq[Int](
1000, 1000,
1000
, 200000, 300000, 500000, 800000
, 1000000, 2000000, 3000000, 5000000
, 10000000, 12000000, 15000000 //, 18000000
// , 20000000, 30000000, 40000000, 50000000
)
trait GenericBinds {
//(a flatMap (b flatMap (c flatMap (...))))
def lftBind[S[_]](n: Int)(gen: Int => S[Int])(implicit M: Monad[S]) = {
(1 to n).foldLeft(gen(0)){ case (acc, i) => acc flatMap { a => gen(i) } }
}
// (... flatMap (_ => c flatMap (_ => b flatMap (_ => a))))
def rgtBind[S[_]](n: Int)(gen: Int => S[Int])(implicit M: Monad[S]) = {
(1 to n).foldLeft(gen(n)){ case (acc, i) => gen(n-i) flatMap { _ => acc } }
}
}
object GenericBinds extends GenericBinds
class GenericTests(implicit V: FreeViewer[freez.view.Free], M: Monad[FreeTrampoline]) extends GenericBinds {
def gen[I](i: I): FreeTrampoline[I] = {
V.fromView(FreeView.Impure[Function0, I]( () => FreeTrampoline.done(i) ))
}
def even[A](ns: List[A]): FreeTrampoline[Boolean] = ns match {
case Nil => FreeTrampoline.done(true)
case x :: xs => FreeTrampoline.suspend(odd(xs))
}
def odd[A](ns: List[A]): FreeTrampoline[Boolean] = ns match {
case Nil => FreeTrampoline.done(false)
case x :: xs => FreeTrampoline.suspend(even(xs))
}
def work(name: String) = {
println(s"$name - Left Bind")
initFile(s"src/test/results/${System.currentTimeMillis()}_${name}_left.txt", Seq("nb", s"${name}_left"))
testN foreach { n =>
testTime2File(s"$n") { lftBind(n)(gen _).run }
}
closeFile()
/*println(s"$name - Right Bind")
initFile(s"src/test/results/${name}_right.txt", Seq("nb", s"${name}_right"))
testN foreach { n =>
testTime2File(s"$n") { rgtBind(n)(gen _).run }
}
closeFile()
println(s"$name - Even")
initFile(s"src/test/results/${name}_even.txt", Seq("nb", s"${name}_even"))
testN foreach { n =>
val l = List.fill(n)(0)
testTime2File(s"$n") { even(l).run }
}
closeFile()
println(s"$name - Odd")
initFile(s"src/test/results/${name}_odd.txt", Seq("nb", s"${name}_odd"))
testN foreach { n =>
val l = List.fill(n)(0)
testTime2File(s"$n") { even(0 +: l).run }
}
closeFile()*/
}
}
"Scalaz Free" should "left/right/odd/even" in {
import Free._
import GenericBinds._
def gen[I](i: I): Trampoline[I] = {
Suspend( () => Trampoline.done(i) )
}
def even[A](ns: List[A]): Trampoline[Boolean] = ns match {
case Nil => Trampoline.done(true)
case x :: xs => Trampoline.suspend(odd(xs))
}
def odd[A](ns: List[A]): Trampoline[Boolean] = ns match {
case Nil => Trampoline.done(false)
case x :: xs => Trampoline.suspend(even(xs))
}
println("Scalaz Free - Left Bind")
initFile(s"src/test/results/${System.currentTimeMillis()}_scalaz_free_left.txt", Seq("nb", "scalaz_free_left"))
testN foreach { n =>
testTime2File(s"$n") { lftBind(n)(gen _).run }
}
closeFile()
/*println("Scalaz Free - Right Bind")
initFile("src/test/results/scalaz_free_right.txt", Seq("nb", "scalaz_free_left"))
testN foreach { n =>
testTime2File(s"$n") { rgtBind(n)(gen _).run }
}
closeFile()
println("Scalaz Free - Even")
initFile("src/test/results/scalaz_free_even.txt", Seq("nb", "scalaz_free_even"))
testN foreach { n =>
val l = List.fill(n)(0)
testTime2File(s"$n") { even(l).run }
}
closeFile()
println("Scalaz Free - Odd")
initFile("src/test/results/scalaz_free_odd.txt", Seq("nb", "scalaz_free_odd"))
testN foreach { n =>
val l = List.fill(n)(0)
testTime2File(s"$n") { even(0 +: l).run }
}
closeFile()*/
}
/*
"TFree Strict Free" should "left/right/odd/even" in {
import freez.view._
import FreeView._
import tfingertree.strict.Free
import Free._
val tests = new GenericTests()
import tests._
work("ftree_strict_free")
}
"TFree Lazy Free" should "left/right/odd/even" in {
import freez.view._
import FreeView._
import tfingertree.`lazy`.Free
import Free._
val tests = new GenericTests()
import tests._
work("ftree_lazy_free")
}
"Scalaz FingerTree Free" should "left/right/odd/even" in {
import freez.view._
import FreeView._
import tfingertree.`lazy`.Free
import Free._
val tests = new GenericTests()
import tests._
work("ftree_scalaz_free")
}
*/
}
|
mandubian/freez
|
src/test/scala/BasicFreeSpec.scala
|
Scala
|
apache-2.0
| 4,777 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.commons.util
import io.gatling.BaseSpec
import io.gatling.commons.util.ClassHelper._
class ClassHelperSpec extends BaseSpec {
"getShortName" should "shorten Class name" in {
classOf[java.util.concurrent.TimeoutException].getShortName shouldBe "j.u.c.TimeoutException"
}
"toClassShortName" should "shorten String with package" in {
toClassShortName(classOf[java.util.concurrent.TimeoutException].getName) shouldBe "j.u.c.TimeoutException"
}
it should "shorten String without package" in {
toClassShortName("Foo") shouldBe "Foo"
}
}
|
MykolaB/gatling
|
gatling-commons/src/test/scala/io/gatling/commons/util/ClassHelperSpec.scala
|
Scala
|
apache-2.0
| 1,199 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.filter
import java.util.{Date, UUID}
import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class BoundsTest extends Specification {
"Bounds" should {
"merge different types" >> {
"ints" >> {
val leftLower: Option[java.lang.Integer] = Some(0)
val leftUpper: Option[java.lang.Integer] = Some(10)
val rightLower: Option[java.lang.Integer] = Some(5)
val rightUpper: Option[java.lang.Integer] = Some(15)
val left = Bounds(leftLower, leftUpper, inclusive = true)
val right = Bounds(rightLower, rightUpper, inclusive = true)
Bounds.intersection(left, right) must beSome(Bounds(rightLower, leftUpper, inclusive = true))
Bounds.union(Seq(left), Seq(right)) mustEqual Seq(Bounds(leftLower, rightUpper, inclusive = true))
}
"longs" >> {
val leftLower: Option[java.lang.Long] = Some(0L)
val leftUpper: Option[java.lang.Long] = Some(10L)
val rightLower: Option[java.lang.Long] = Some(5L)
val rightUpper: Option[java.lang.Long] = Some(15L)
val left = Bounds(leftLower, leftUpper, inclusive = true)
val right = Bounds(rightLower, rightUpper, inclusive = true)
Bounds.intersection(left, right) must beSome(Bounds(rightLower, leftUpper, inclusive = true))
Bounds.union(Seq(left), Seq(right)) mustEqual Seq(Bounds(leftLower, rightUpper, inclusive = true))
}
"floats" >> {
val leftLower: Option[java.lang.Float] = Some(0f)
val leftUpper: Option[java.lang.Float] = Some(10f)
val rightLower: Option[java.lang.Float] = Some(5f)
val rightUpper: Option[java.lang.Float] = Some(15f)
val left = Bounds(leftLower, leftUpper, inclusive = true)
val right = Bounds(rightLower, rightUpper, inclusive = true)
Bounds.intersection(left, right) must beSome(Bounds(rightLower, leftUpper, inclusive = true))
Bounds.union(Seq(left), Seq(right)) mustEqual Seq(Bounds(leftLower, rightUpper, inclusive = true))
}
"doubles" >> {
val leftLower: Option[java.lang.Double] = Some(0d)
val leftUpper: Option[java.lang.Double] = Some(10d)
val rightLower: Option[java.lang.Double] = Some(5d)
val rightUpper: Option[java.lang.Double] = Some(15d)
val left = Bounds(leftLower, leftUpper, inclusive = true)
val right = Bounds(rightLower, rightUpper, inclusive = true)
Bounds.intersection(left, right) must beSome(Bounds(rightLower, leftUpper, inclusive = true))
Bounds.union(Seq(left), Seq(right)) mustEqual Seq(Bounds(leftLower, rightUpper, inclusive = true))
}
"strings" >> {
val leftLower: Option[String] = Some("0")
val leftUpper: Option[String] = Some("6")
val rightLower: Option[String] = Some("3")
val rightUpper: Option[String] = Some("9")
val left = Bounds(leftLower, leftUpper, inclusive = true)
val right = Bounds(rightLower, rightUpper, inclusive = true)
Bounds.intersection(left, right) must beSome(Bounds(rightLower, leftUpper, inclusive = true))
Bounds.union(Seq(left), Seq(right)) mustEqual Seq(Bounds(leftLower, rightUpper, inclusive = true))
}
"dates" >> {
val leftLower: Option[Date] = Some(new Date(0))
val leftUpper: Option[Date] = Some(new Date(10))
val rightLower: Option[Date] = Some(new Date(5))
val rightUpper: Option[Date] = Some(new Date(15))
val left = Bounds(leftLower, leftUpper, inclusive = true)
val right = Bounds(rightLower, rightUpper, inclusive = true)
Bounds.intersection(left, right) must beSome(Bounds(rightLower, leftUpper, inclusive = true))
Bounds.union(Seq(left), Seq(right)) mustEqual Seq(Bounds(leftLower, rightUpper, inclusive = true))
}
"uuids" >> {
val leftLower: Option[UUID] = Some(UUID.fromString("00000000-0000-0000-0000-000000000000"))
val leftUpper: Option[UUID] = Some(UUID.fromString("00000000-0000-0000-0000-000000000006"))
val rightLower: Option[UUID] = Some(UUID.fromString("00000000-0000-0000-0000-000000000003"))
val rightUpper: Option[UUID] = Some(UUID.fromString("00000000-0000-0000-0000-000000000009"))
val left = Bounds(leftLower, leftUpper, inclusive = true)
val right = Bounds(rightLower, rightUpper, inclusive = true)
Bounds.intersection(left, right) must beSome(Bounds(rightLower, leftUpper, inclusive = true))
Bounds.union(Seq(left), Seq(right)) mustEqual Seq(Bounds(leftLower, rightUpper, inclusive = true))
}
}
"merge simple ands/ors" >> {
"for strings" >> {
val bounds = Bounds(Some("b"), Some("f"), inclusive = true)
"overlapping" >> {
val toMerge = Bounds(Some("d"), Some("i"), inclusive = true)
Bounds.intersection(bounds, toMerge) must beSome(Bounds(Some("d"), Some("f"), inclusive = true))
Bounds.union(Seq(bounds), Seq(toMerge)) mustEqual Seq(Bounds(Some("b"), Some("i"), inclusive = true))
}
"disjoint" >> {
val toMerge = Bounds(Some("i"), Some("z"), inclusive = true)
Bounds.intersection(bounds, toMerge) must beNone
Bounds.union(Seq(bounds), Seq(toMerge)) mustEqual Seq(
Bounds(Some("b"), Some("f"), inclusive = true),
Bounds(Some("i"), Some("z"), inclusive = true)
)
}
"contained" >> {
val toMerge = Bounds(Some("c"), Some("d"), inclusive = true)
Bounds.intersection(bounds, toMerge) must beSome(Bounds(Some("c"), Some("d"), inclusive = true))
Bounds.union(Seq(bounds), Seq(toMerge)) mustEqual Seq(Bounds(Some("b"), Some("f"), inclusive = true))
}
"containing" >> {
val toMerge = Bounds(Some("a"), Some("i"), inclusive = true)
Bounds.intersection(bounds, toMerge) must beSome(Bounds(Some("b"), Some("f"), inclusive = true))
Bounds.union(Seq(bounds), Seq(toMerge)) mustEqual Seq(Bounds(Some("a"), Some("i"), inclusive = true))
}
}
}
"merge complex ands/ors" >> {
"for strings" >> {
val bounds = Bounds(Some("b"), Some("f"), inclusive = true)
val or = Bounds.union(Seq(bounds), Seq(Bounds(Some("i"), Some("m"), inclusive = true)))
or mustEqual Seq(
Bounds(Some("b"), Some("f"), inclusive = true),
Bounds(Some("i"), Some("m"), inclusive = true)
)
val and = or.flatMap(Bounds.intersection(_, Bounds(Some("e"), Some("k"), inclusive = true)))
and mustEqual Seq(
Bounds(Some("e"), Some("f"), inclusive = true),
Bounds(Some("i"), Some("k"), inclusive = true)
)
val or2 = Bounds.union(and, Seq(Bounds(Some("f"), Some("i"), inclusive = true)))
or2 mustEqual Seq(Bounds(Some("e"), Some("k"), inclusive = true))
}
}
}
}
|
tkunicki/geomesa
|
geomesa-filter/src/test/scala/org/locationtech/geomesa/filter/BoundsTest.scala
|
Scala
|
apache-2.0
| 7,480 |
/* scala-stm - (c) 2009-2014, Stanford University, PPL */
package scala.concurrent.stm
package skel
import org.scalatest.FunSuite
import scala.reflect.ClassTag
class AtomicArraySuite extends FunSuite {
test("Unit") {
runIsolatedTest(List((), (), ()))
}
test("Boolean") {
runIsolatedTest(List(false, true, false, true))
}
test("Byte") {
runIsolatedTest(Array(0 : Byte, 1 : Byte, 2 : Byte))
}
test("Short") {
runIsolatedTest(List(0 : Short, 1 : Short, 2 : Short))
}
test("Char") {
runIsolatedTest("abcdefg".toSeq)
}
test("Int") {
runIsolatedTest(100 until 200)
}
test("Float") {
runIsolatedTest((20 to 30) map { _ * 0.1f })
}
test("Long") {
runIsolatedTest((100 until 200) map { _ - 100L * Int.MaxValue })
}
test("Double") {
runIsolatedTest((10 until 20) map { math.exp(_) })
}
test("AnyRef") {
runIsolatedTest((10 until 20) map { i => "x" + i : AnyRef })
}
test("Any") {
runIsolatedTest[Any]((10 until 20) map { i => i : Any })
}
def runIsolatedTest[A](values: Seq[A])(implicit am: ClassTag[A]): Unit = {
val singleton = AtomicArray[A](1)
if (am != implicitly[ClassTag[Unit]])
assert(singleton(0) === am.newArray(1)(0))
val aa = AtomicArray(values)
for (i <- 0 until aa.length)
assert(values(i) === aa(i))
for (i <- 0 until aa.length)
aa(i) = values(aa.length - 1 - i)
for (i <- 0 until aa.length)
assert(aa(i) === values(aa.length - 1 - i))
for (i <- 0 until aa.length) {
if (aa(i) == values(0)) {
assert(aa.compareAndSet(i, values(0), values(i)))
assert(aa(i) === values(i))
} else {
assert(!aa.compareAndSet(i, values(0), values(i)))
assert(aa(i) === values(aa.length - 1 - i))
}
}
for (i <- 0 until aa.length)
assert(aa(i) === aa.getAndTransform(i)( v => v ))
for (i <- 0 until aa.length) {
val prev = aa(i)
assert(aa.swap(i, values(i)) === prev)
}
intercept[IndexOutOfBoundsException] {
aa(-1)
}
intercept[IndexOutOfBoundsException] {
aa(-1) = aa(0)
}
intercept[IndexOutOfBoundsException] {
aa(aa.length)
}
intercept[IndexOutOfBoundsException] {
aa(aa.length) = aa(0)
}
intercept[IndexOutOfBoundsException] {
aa.compareAndSet(-1, aa(0), aa(0))
}
intercept[IndexOutOfBoundsException] {
aa.compareAndSet(aa.length, aa(0), aa(0))
}
intercept[IndexOutOfBoundsException] {
aa(Int.MinValue)
}
intercept[IndexOutOfBoundsException] {
aa(Int.MaxValue)
}
val copy = aa.clone
for (i <- 0 until aa.length)
assert(copy(i) === aa(i))
val str0 = aa map { _.toString }
val str: AtomicArray[String] = str0
for (i <- 0 until aa.length)
assert(aa(i).toString === str(i))
val seq0 = aa.toList
for (i <- 0 until aa.length)
assert(aa(i) == seq0(i))
val seq1 = aa.iterator.toList
for (i <- 0 until aa.length)
assert(aa(i) == seq1(i))
val bb = aa ++ seq0
assert(bb.length === aa.length * 2)
for (i <- 0 until aa.length) {
assert(aa(i) === bb(i))
assert(aa(i) === bb(i + aa.length))
}
assert(aa.toString.startsWith("AtomicArray"))
}
}
|
nbronson/scala-stm
|
src/test/scala/scala/concurrent/stm/skel/AtomicArraySuite.scala
|
Scala
|
bsd-3-clause
| 3,270 |
package com.greencatsoft.angularjs.core
import scala.scalajs.js
import com.greencatsoft.angularjs.{ Service, injectable }
@js.native
@injectable("$scope")
trait Scope extends js.Object {
def $id: String = js.native
def $apply(exp: js.Any = null): js.Any = js.native
def $broadcast(name: String, args: js.Any*): js.Object = js.native
def $destroy(): Unit = js.native
def $digest(): Unit = js.native
def $emit(name: String, args: js.Any*): js.Object = js.native
def $eval(expression: js.Any = null, locals: js.Object = null): js.Any = js.native
def $evalAsync(expression: js.Any = null): Unit = js.native
def $new(isolate: Boolean): Scope = js.native
def $on(name: String, listener: js.Function): js.Function0[Unit] = js.native
def $watch(watchExpression: js.Any, listener: js.Any = null, objectEquality: Boolean = false): js.Function = js.native
def $watchCollection(obj: js.Any, listener: js.Function): js.Function = js.native
}
@js.native
@injectable("$rootScope")
trait RootScope extends Scope
trait ScopeOps {
this: Service =>
implicit class DynamicScope(scope: Scope) {
def dynamic = scope.asInstanceOf[js.Dynamic]
}
}
@js.native
trait Event extends js.Object {
val name: String = js.native
val defaultPrevented: Boolean = js.native
def preventDefault(): Unit = js.native
def stopPropagation(): Unit = js.native
def currentScope[A <: Scope]: A = js.native
def targetScope[A <: Scope]: A = js.native
}
|
easel/scalajs-angular
|
src/main/scala/com/greencatsoft/angularjs/core/Scope.scala
|
Scala
|
apache-2.0
| 1,477 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.hbase.spark
import com.typesafe.scalalogging.LazyLogging
import org.apache.spark.sql.{DataFrame, SQLContext, SQLTypes, SparkSession}
import org.geotools.data.{Query, Transaction}
import org.geotools.factory.CommonFactoryFinder
import org.junit.runner.RunWith
import org.locationtech.geomesa.hbase.data.HBaseDataStoreFactory
import org.locationtech.geomesa.hbase.data.HBaseDataStoreParams._
import org.locationtech.geomesa.spark.SparkSQLTestUtils
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.JavaConversions._
@RunWith(classOf[JUnitRunner])
class HBaseSparkProviderIntegrationTest extends Specification with LazyLogging {
sequential
// START HBASE INSTANCE MANUALLY
lazy val sftName: String = "chicago"
def spec: String = SparkSQLTestUtils.ChiSpec
private val ff = CommonFactoryFinder.getFilterFactory2
def dtgField: Option[String] = Some("dtg")
lazy val dsParams = Map(HBaseCatalogParam.getName -> "test_sft")
lazy val dsf = new HBaseDataStoreFactory()
lazy val ds = dsf.createDataStore(dsParams)
var spark: SparkSession = null
var sc: SQLContext = null
var df: DataFrame = null
lazy val params = dsParams
"HBase Spark Data Tests" should {
// before
"start spark" >> {
skipped("integration")
spark = SparkSQLTestUtils.createSparkSession()
sc = spark.sqlContext
SQLTypes.init(sc)
SparkSQLTestUtils.ingestChicago(ds)
df = spark.read
.format("geomesa")
.options(params.map { case (k, v) => k -> v.toString })
.option("geomesa.feature", "chicago")
.load()
logger.debug(df.schema.treeString)
df.createOrReplaceTempView("chicago")
true
}
"select by secondary indexed attribute" >> {
skipped("integration")
val cases = df.select("case_number").where("case_number = 1").collect().map(_.getInt(0))
cases.length mustEqual 1
}
"complex st_buffer" >> {
skipped("integration")
val buf = sc.sql("select st_asText(st_bufferPoint(geom,10)) from chicago where case_number = 1").collect().head.getString(0)
sc.sql(
s"""
|select *
|from chicago
|where
| st_contains(st_geomFromWKT('$buf'), geom)
""".stripMargin
).collect().length must beEqualTo(1)
}
"write data and properly index" >> {
skipped("integration")
val subset = sc.sql("select case_number,geom,dtg from chicago")
subset.write.format("geomesa")
.options(params.map { case (k, v) => k -> v.toString })
.option("geomesa.feature", "chicago2")
.save()
val sft = ds.getSchema("chicago2")
val enabledIndexes = sft.getUserData.get("geomesa.indices").asInstanceOf[String]
enabledIndexes.indexOf("z3") must be greaterThan -1
}
"handle reuse __fid__ on write if available" >> {
skipped("integration")
val subset = sc.sql("select __fid__,case_number,geom,dtg from chicago")
subset.write.format("geomesa")
.options(params.map { case (k, v) => k -> v.toString })
.option("geomesa.feature", "fidOnWrite")
.save()
val filter = ff.equals(ff.property("case_number"), ff.literal(1))
val queryOrig = new Query("chicago", filter)
val origResults = SelfClosingIterator(ds.getFeatureReader(queryOrig, Transaction.AUTO_COMMIT)).toList
val query = new Query("fidOnWrite", filter)
val results = SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT)).toList
results.head.getID must be equalTo origResults.head.getID
}
}
}
|
ronq/geomesa
|
geomesa-hbase/geomesa-hbase-spark-runtime/src/test/scala/org/locationtech/geomesa/hbase/spark/HBaseSparkProviderIntegrationTest.scala
|
Scala
|
apache-2.0
| 4,334 |
def &&(p: Prop) = Prop {
(max,n,rng) => run(max,n,rng) match {
case Passed => p.run(max, n, rng)
case x => x
}
}
def ||(p: Prop) = Prop {
(max,n,rng) => run(max,n,rng) match {
// In case of failure, run the other prop.
case Falsified(msg, _) => p.tag(msg).run(max,n,rng)
case x => x
}
}
/* This is rather simplistic - in the event of failure, we simply prepend
* the given message on a newline in front of the existing message.
*/
def tag(msg: String) = Prop {
(max,n,rng) => run(max,n,rng) match {
case Falsified(e, c) => Falsified(msg + "\n" + e, c)
case x => x
}
}
|
lucaviolanti/scala-redbook
|
answerkey/testing/09.answer.scala
|
Scala
|
mit
| 634 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js sbt plugin **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.jsenv
import org.mozilla.javascript._
import org.scalajs.core.tools.io._
package object rhino {
private[rhino] implicit class ContextOps(val self: Context) extends AnyVal {
def evaluateFile(scope: Scriptable, file: VirtualJSFile,
securityDomain: AnyRef = null): Any = {
self.evaluateString(scope, file.content, file.path, 1, securityDomain)
}
}
private[rhino] implicit class ScriptableObjectOps(val self: Scriptable) {
def addFunction(name: String, function: Array[AnyRef] => Any): Unit = {
val rhinoFunction =
new BaseFunction {
ScriptRuntime.setFunctionProtoAndParent(this, self)
override def call(context: Context, scope: Scriptable,
thisObj: Scriptable, args: Array[AnyRef]): AnyRef = {
function(args) match {
case () => Undefined.instance
case r => r.asInstanceOf[AnyRef]
}
}
}
ScriptableObject.putProperty(self, name, rhinoFunction)
}
}
}
|
mdedetrich/scala-js
|
js-envs/src/main/scala/org/scalajs/jsenv/rhino/package.scala
|
Scala
|
bsd-3-clause
| 1,560 |
package net.wrap_trap.goju.samples
import akka.actor.ActorSystem
import akka.testkit.TestActorRef
import akka.testkit.TestKit
import net.wrap_trap.goju.StopSystemAfterAll
import org.scalatest.MustMatchers
import org.scalatest.WordSpecLike
/**
* goju-to: HanoiDB(LSM-trees (Log-Structured Merge Trees) Indexed Storage) clone
* Copyright (c) 2016 Masayuki Takahashi
* This software is released under the MIT License.
* http://opensource.org/licenses/mit-license.php
*/
class HelloAkkaSpec
extends TestKit(ActorSystem("test")) with WordSpecLike with MustMatchers
with StopSystemAfterAll {
"HelloAkka" must {
"reply 'World'" in {
val actor = TestActorRef[HelloAkka]
actor ! "Hello"
actor.underlyingActor.state must equal("Hello")
}
}
}
|
masayuki038/goju
|
src/test/scala/net/wrap_trap/goju/samples/HelloAkkaSpec.scala
|
Scala
|
mit
| 781 |
package fi.pyppe.ircbot.slave
import org.joda.time.Period
import org.joda.time.format.PeriodFormatterBuilder
import org.jsoup.nodes.Document
import java.text.NumberFormat
import org.jsoup.Jsoup
import scala.concurrent.{ExecutionContext, Future}
import scala.util.Try
object Youtube {
private val hmsFormatter = new PeriodFormatterBuilder()
.minimumPrintedDigits(2)
.appendHours().appendSeparator(":")
.appendMinutes().appendSeparator(":")
.appendSeconds().appendSeparator(":")
.toFormatter
def parseUrl(pageUrl: String)(implicit ec: ExecutionContext) = {
Future(Jsoup.connect(pageUrl)).
map(_.get).
map(parsePage)
}
def parsePage(doc: Document) = {
val nf = NumberFormat.getInstance(java.util.Locale.forLanguageTag("fi"))
nf.setGroupingUsed(true)
def number(css: String) =
Try(nf.format(doc.select(css).first.text.replaceAll("[^\\d]", "").toLong)).getOrElse("?")
val title = doc.select("#watch-headline-title").text
val durationText = doc.select("meta[itemprop=duration]").attr("content") // PT4M8S
val duration = Try(hmsFormatter.print(Period.parse(durationText))).getOrElse(durationText)
val views = number(".watch-view-count")
val likes = number(".like-button-renderer-like-button-unclicked")
val dislikes = number(".like-button-renderer-dislike-button-unclicked")
s"Youtube: $title [$duration] ($views views, $likes likes, $dislikes dislikes)"
}
}
|
Pyppe/akka-ircbot
|
slave/src/main/scala/fi/pyppe/ircbot/slave/Youtube.scala
|
Scala
|
mit
| 1,455 |
package uk.gov.dvla.vehicles.presentation.common.mappings
import org.joda.time.LocalDate
import play.api.data.Forms.{of, optional}
import play.api.i18n.Messages
import uk.gov.dvla.vehicles.presentation.common.services.DateService
import Date.notBefore
import Date.notInTheFuture
import Date.formatter
import Date.required
object DateOfBirth {
final val ValidYearsAgo = 110
private def genericDateOfBirth(implicit dateService: DateService) =
of[LocalDate](formatter("error.dateOfBirth.invalid"))
.verifying(notInTheFuture(Messages("error.dateOfBirth.inTheFuture")))
.verifying(notBefore(dateService.now.toDateTime.toLocalDate.minusYears(ValidYearsAgo),
Messages("error.dateOfBirth.110yearsInThePast")))
def dateOfBirth()(implicit dateService: DateService) = genericDateOfBirth verifying required
def optionalDateOfBirth()(implicit dateService: DateService) = optional(genericDateOfBirth)
}
|
dvla/vehicles-presentation-common
|
app/uk/gov/dvla/vehicles/presentation/common/mappings/DateOfBirth.scala
|
Scala
|
mit
| 928 |
package lampetia.meta.feature
import lampetia.meta.{Property, Feature, Model}
import lampetia.model.util._
/**
* @author Hossam Karim
*/
package object sql {
sealed trait SqlFeature extends Any with Feature
case object Optional extends SqlFeature
def optional = Optional
case class SqlName(value: String) extends AnyVal with SqlFeature
def name(value: String): SqlFeature = SqlName(value)
case class SqlType(value: String) extends AnyVal with SqlFeature
def `type`(value: String): SqlFeature = SqlType(value)
case class SqlCast(typeName: String) extends AnyVal with SqlFeature
def cast(typeName: String): SqlFeature= SqlCast(typeName)
case class SqlSchema(value: String) extends AnyVal with SqlFeature
def schema(value: String): SqlSchema = SqlSchema(value)
case class SqlPrimaryKey(name: Option[String], properties: Seq[Property[_]]) extends SqlFeature
def primaryKey(name: String)(property: Property[_], properties: Property[_]*): SqlPrimaryKey =
SqlPrimaryKey(Some(name), property +: properties)
def primaryKey(property: Property[_], properties: Property[_]*): SqlPrimaryKey =
SqlPrimaryKey(None, property +: properties)
case class SqlForeignKey[R](
name: Option[String],
keys: Seq[Property[_]],
refModel: Model[R],
references: Seq[Property[_]]) extends SqlFeature
def foreignKey[R](name: String)(key: Property[_], keys: Property[_]*)
(refModel: Model[R], ref: Property[_], references: Property[_]*): SqlForeignKey[R] =
SqlForeignKey[R](Some(name), key +: keys, refModel, ref +: references)
def foreignKey[R](key: Property[_], keys: Property[_]*)
(refModel: Model[R], ref: Property[_], references: Property[_]*): SqlForeignKey[R] =
SqlForeignKey[R](None, key +: keys, refModel, ref +: references)
case class SqlIndex(name: Option[String], properties: Seq[Property[_]], unique: Boolean) extends SqlFeature
def index(name: String)(property: Property[_], properties: Property[_]*): SqlIndex =
SqlIndex(Some(name), property +: properties, unique = false)
def index(property: Property[_], properties: Property[_]*): SqlIndex =
SqlIndex(None, property +: properties, unique = false)
def uniqueIndex(name: String)(property: Property[_], properties: Property[_]*): SqlIndex =
SqlIndex(Some(name), property +: properties, unique = true)
def uniqueIndex(property: Property[_], properties: Property[_]*): SqlIndex =
SqlIndex(None, property +: properties, unique = true)
case class SqlFunction(name: String, schema: String)
def function(name: String)(schema: String): SqlFunction =
SqlFunction(name, schema)
implicit class SqlFunctionFeatures(val function: SqlFunction) extends AnyVal {
def sqlName: String = function.name
def sqlSchema: Option[String] =
function.schema == null || function.schema.isEmpty match {
case true => None
case false => Some(function.schema)
}
def schemaPrefixed = sqlSchema match {
case Some(schema) => s"$schema.$sqlName"
case None => sqlName
}
}
implicit class ModelFeatures[A](val model: Model[A]) extends AnyVal {
def features = model.features.reverse
def sqlName: String = features.collectFirst {
case SqlName(value) => value
}.getOrElse(model.modelName.snakeCase)
def sqlSchema: Option[String] = features.collectFirst {
case SqlSchema(value) => value
}
def sqlQualifiedName: String = sqlSchema match {
case Some(schema) => s"$schema.$sqlName"
case None => sqlName
}
def sqlPrimaryKey: Option[SqlPrimaryKey] = features.collectFirst {
case pk: SqlPrimaryKey => Some(pk)
}.getOrElse(None)
def sqlForeignKeys: Seq[SqlForeignKey[_]] = features.collect {
case fk: SqlForeignKey[_] => fk
}
def sqlIndexes: Seq[SqlIndex] = features.collect {
case i: SqlIndex => i
}
}
implicit class PropertyFeatures[A](val p: Property[A]) {
def features = p.features.reverse
def sqlName: String = features.collectFirst {
case SqlName(value) => value
}.getOrElse(p.propertyName.snakeCase)
def sqlType(implicit dst: SqlTypes): String = features.collectFirst {
case SqlType(value) => value
}.getOrElse(dst.name(p.propertyType))
def optional: Boolean = features.collectFirst {
case Optional => true
}.getOrElse(false)
def sqlCast: Option[String] = features.collectFirst {
case SqlCast(v) => Some(v)
}.getOrElse(None)
}
}
|
hkarim/lampetia
|
lampetia-model/src/main/scala/lampetia/meta/feature/sql/package.scala
|
Scala
|
mit
| 4,533 |
package net.nightwhistler.nwcsc.rest
import java.util.concurrent.TimeUnit
import akka.actor.ActorRef
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.unmarshalling.PredefinedFromEntityUnmarshallers
import akka.pattern.ask
import akka.util.Timeout
import com.typesafe.scalalogging.Logger
import de.heikoseeberger.akkahttpjson4s.Json4sSupport
import net.nightwhistler.nwcsc.blockchain.BlockChainCommunication.{QueryAll, QueryLatest, ResponseBlock, ResponseBlockChain}
import net.nightwhistler.nwcsc.blockchain.Mining.MineBlock
import net.nightwhistler.nwcsc.blockchain.{Block, GenesisBlock}
import net.nightwhistler.nwcsc.p2p.PeerToPeer.{AddPeer, GetPeers, Peers}
import org.json4s.{DefaultFormats, Formats, native}
import scala.concurrent.{ExecutionContext, Future}
/**
* Created by alex on 16-6-17.
*/
trait RestInterface extends Json4sSupport {
val blockChainActor: ActorRef
val logger: Logger
implicit val serialization = native.Serialization
implicit val stringUnmarshallers = PredefinedFromEntityUnmarshallers.stringUnmarshaller
implicit def json4sFormats: Formats = DefaultFormats
implicit val executionContext: ExecutionContext
implicit val timeout = Timeout(5, TimeUnit.SECONDS)
val routes =
get {
path("blocks") {
val chain: Future[Seq[Block]] = (blockChainActor ? QueryAll).map {
//This is a bit of a hack, since JSON4S doesn't serialize case objects well
case ResponseBlockChain(blockChain) => blockChain.blocks.slice(0, blockChain.blocks.length -1) :+ GenesisBlock.copy()
}
complete(chain)
}~
path("peers") {
complete( (blockChainActor ? GetPeers).mapTo[Peers] )
}~
path("latestBlock") {
complete( (blockChainActor ? QueryLatest).map {
case ResponseBlock(GenesisBlock) => GenesisBlock.copy()
case ResponseBlock(block) => block
})
}
}~
post {
path("mineBlock") {
entity(as[String]) { data =>
logger.info(s"Got request to add new block $data")
complete((blockChainActor ? MineBlock(data)).mapTo[ResponseBlock].map {
case ResponseBlock(block) => block
})
}
}~
path("addPeer") {
entity(as[String]) { peerAddress =>
logger.info(s"Got request to add new peer $peerAddress")
blockChainActor ! AddPeer(peerAddress)
complete(s"Added peer $peerAddress")
}
}
}
}
|
NightWhistler/naivechain-scala
|
src/main/scala/net/nightwhistler/nwcsc/rest/RestInterface.scala
|
Scala
|
apache-2.0
| 2,470 |
package com.sksamuel.elastic4s.mappings
import org.elasticsearch.common.xcontent.XContentBuilder
case class TimestampDefinition(enabled: Boolean,
path: Option[String] = None,
format: Option[String] = None,
default: Option[String] = None,
store: Option[Boolean] = None,
docValuesFormat: Option[Boolean] = None) {
def path(path: String): TimestampDefinition = copy(path = Option(path))
def format(format: String): TimestampDefinition = copy(format = Option(format))
def default(default: String): TimestampDefinition = copy(default = Option(default))
def store(store: Boolean): TimestampDefinition = copy(store = Option(store))
def docValuesFormat(b: Boolean): TimestampDefinition = copy(docValuesFormat = Option(b))
private[elastic4s] def build(builder: XContentBuilder): Unit = {
builder.startObject("_timestamp")
builder.field("enabled", enabled)
path.foreach(builder.field("path", _))
store.foreach(s => builder.field("store", if (s) "yes" else "no"))
format.foreach(builder.field("format", _))
docValuesFormat.foreach(builder.field("doc_values", _))
default.foreach(builder.field("default", _))
builder.endObject()
}
}
|
beni55/elastic4s
|
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/mappings/TimestampDefinition.scala
|
Scala
|
apache-2.0
| 1,326 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package expr
import com.intellij.psi._
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.plugins.scala.extensions.PsiTypeExt
import org.jetbrains.plugins.scala.lang.psi.api.base.ScConstructor
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.ScCaseClause
import org.jetbrains.plugins.scala.lang.psi.api.base.types.{ScSequenceArg, ScTupleTypeElement, ScTypeElement}
import org.jetbrains.plugins.scala.lang.psi.api.statements._
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScParameter
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScTypedDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScObject
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiManager
import org.jetbrains.plugins.scala.lang.psi.types.api._
import org.jetbrains.plugins.scala.lang.psi.types.nonvalue.{Parameter, ScMethodType, ScTypePolymorphicType}
import org.jetbrains.plugins.scala.lang.psi.types.result.{Success, TypeResult, TypingContext}
import org.jetbrains.plugins.scala.lang.psi.types.{api, _}
import org.jetbrains.plugins.scala.lang.resolve.{ResolvableReferenceExpression, ScalaResolveResult}
import scala.annotation.tailrec
import scala.collection.mutable.ArrayBuffer
/**
* @author ilyas
*
* Utility object to calculate expected type of any expression
*/
private[expr] object ExpectedTypes {
/**
* Do not use this method inside of resolve or type inference.
* Using this leads to SOE.
*/
def smartExpectedType(expr: ScExpression, fromUnderscore: Boolean = true): Option[ScType] =
smartExpectedTypeEx(expr, fromUnderscore).map(_._1)
def smartExpectedTypeEx(expr: ScExpression, fromUnderscore: Boolean = true): Option[(ScType, Option[ScTypeElement])] = {
val types = expectedExprTypes(expr, withResolvedFunction = true, fromUnderscore = fromUnderscore)
types.length match {
case 1 => Some(types(0))
case _ => None
}
}
def expectedExprType(expr: ScExpression, fromUnderscore: Boolean = true): Option[(ScType, Option[ScTypeElement])] = {
val types = expr.expectedTypesEx(fromUnderscore)
types.length match {
case 1 => Some(types(0))
case _ => None
}
}
/**
* @return (expectedType, expectedTypeElement)
*/
def expectedExprTypes(expr: ScExpression, withResolvedFunction: Boolean = false,
fromUnderscore: Boolean = true): Array[(ScType, Option[ScTypeElement])] = {
import expr.typeSystem
@tailrec
def fromFunction(tp: (ScType, Option[ScTypeElement])): Array[(ScType, Option[ScTypeElement])] = {
tp._1 match {
case FunctionType(retType, _) => Array[(ScType, Option[ScTypeElement])]((retType, None))
case PartialFunctionType(retType, _) => Array[(ScType, Option[ScTypeElement])]((retType, None))
case ScAbstractType(_, _, upper) => fromFunction(upper, tp._2)
case samType if ScalaPsiUtil.isSAMEnabled(expr) =>
ScalaPsiUtil.toSAMType(samType, expr.getResolveScope) match {
case Some(methodType) => fromFunction(methodType, tp._2)
case _ => Array[(ScType, Option[ScTypeElement])]()
}
case _ => Array[(ScType, Option[ScTypeElement])]()
}
}
def mapResolves(resolves: Array[ResolveResult], types: Array[TypeResult[ScType]]): Array[(TypeResult[ScType], Boolean)] = {
resolves.zip(types).map {
case (r: ScalaResolveResult, tp) =>
val isNamedDynamic = r.isDynamic && r.name == ResolvableReferenceExpression.APPLY_DYNAMIC_NAMED
(tp, isNamedDynamic)
case (_, tp) => (tp, false)
}
}
val result: Array[(ScType, Option[ScTypeElement])] = expr.getContext match {
case p: ScParenthesisedExpr => p.expectedTypesEx(fromUnderscore = false)
//see SLS[6.11]
case b: ScBlockExpr => b.lastExpr match {
case Some(e) if b.needCheckExpectedType && e == expr.getSameElementInContext => b.expectedTypesEx(fromUnderscore = true)
case _ => Array.empty
}
//see SLS[6.16]
case cond: ScIfStmt if cond.condition.getOrElse(null: ScExpression) == expr.getSameElementInContext => Array((api.Boolean, None))
case cond: ScIfStmt if cond.elseBranch.isDefined => cond.expectedTypesEx(fromUnderscore = true)
//see SLA[6.22]
case tb: ScTryBlock => tb.lastExpr match {
case Some(e) if e == expr => tb.getContext.asInstanceOf[ScTryStmt].expectedTypesEx(fromUnderscore = true)
case _ => Array.empty
}
case wh: ScWhileStmt if wh.condition.getOrElse(null: ScExpression) == expr.getSameElementInContext => Array((api.Boolean, None))
case wh: ScWhileStmt => Array((Unit, None))
case d: ScDoStmt if d.condition.getOrElse(null: ScExpression) == expr.getSameElementInContext => Array((api.Boolean, None))
case d: ScDoStmt => Array((api.Unit, None))
case fb: ScFinallyBlock => Array((api.Unit, None))
case cb: ScCatchBlock => Array.empty
case te: ScThrowStmt =>
// Not in the SLS, but in the implementation.
val throwableClass = ScalaPsiManager.instance(te.getProject).getCachedClass(te.getResolveScope, "java.lang.Throwable")
val throwableType = throwableClass.map(new ScDesignatorType(_)).getOrElse(Any)
Array((throwableType, None))
//see SLS[8.4]
case c: ScCaseClause => c.getContext.getContext match {
case m: ScMatchStmt => m.expectedTypesEx(fromUnderscore = true)
case b: ScBlockExpr if b.isInCatchBlock =>
b.getContext.getContext.asInstanceOf[ScTryStmt].expectedTypesEx(fromUnderscore = true)
case b: ScBlockExpr if b.isAnonymousFunction =>
b.expectedTypesEx(fromUnderscore = true).flatMap(tp => fromFunction(tp))
case _ => Array.empty
}
//see SLS[6.23]
case f: ScFunctionExpr => f.expectedTypesEx(fromUnderscore = true).flatMap(tp => fromFunction(tp))
case t: ScTypedStmt if t.getLastChild.isInstanceOf[ScSequenceArg] =>
t.expectedTypesEx(fromUnderscore = true)
//SLS[6.13]
case t: ScTypedStmt =>
t.typeElement match {
case Some(te) => Array((te.getType(TypingContext.empty).getOrAny, Some(te)))
case _ => Array.empty
}
//SLS[6.15]
case a: ScAssignStmt if a.getRExpression.getOrElse(null: ScExpression) == expr.getSameElementInContext =>
a.getLExpression match {
case ref: ScReferenceExpression if (!a.getContext.isInstanceOf[ScArgumentExprList] && !(
a.getContext.isInstanceOf[ScInfixArgumentExpression] && a.getContext.asInstanceOf[ScInfixArgumentExpression].isCall)) ||
ref.qualifier.isDefined ||
ScUnderScoreSectionUtil.isUnderscore(expr) /* See SCL-3512, SCL-3525, SCL-4809, SCL-6785 */ =>
ref.bind() match {
case Some(ScalaResolveResult(named: PsiNamedElement, subst: ScSubstitutor)) =>
ScalaPsiUtil.nameContext(named) match {
case v: ScValue =>
Array((subst.subst(named.asInstanceOf[ScTypedDefinition].
getType(TypingContext.empty).getOrAny), v.typeElement))
case v: ScVariable =>
Array((subst.subst(named.asInstanceOf[ScTypedDefinition].
getType(TypingContext.empty).getOrAny), v.typeElement))
case f: ScFunction if f.paramClauses.clauses.isEmpty =>
a.mirrorMethodCall match {
case Some(call) =>
call.args.exprs.head.expectedTypesEx(fromUnderscore = fromUnderscore)
case None => Array.empty
}
case p: ScParameter =>
//for named parameters
Array((subst.subst(p.getType(TypingContext.empty).getOrAny), p.typeElement))
case f: PsiField =>
Array((subst.subst(f.getType.toScType(f.getProject, expr.getResolveScope)), None))
case _ => Array.empty
}
case _ => Array.empty
}
case ref: ScReferenceExpression => expectedExprTypes(a)
case call: ScMethodCall =>
a.mirrorMethodCall match {
case Some(mirrorCall) => mirrorCall.args.exprs.last.expectedTypesEx(fromUnderscore = fromUnderscore)
case _ => Array.empty
}
case _ => Array.empty
}
//method application
case tuple: ScTuple if tuple.isCall =>
val res = new ArrayBuffer[(ScType, Option[ScTypeElement])]
val exprs: Seq[ScExpression] = tuple.exprs
val actExpr = expr.getDeepSameElementInContext
val i = if (actExpr == null) 0 else exprs.indexWhere(_ == actExpr)
val callExpression = tuple.getContext.asInstanceOf[ScInfixExpr].operation
if (callExpression != null) {
val tps = callExpression match {
case ref: ScReferenceExpression =>
if (!withResolvedFunction) mapResolves(ref.shapeResolve, ref.shapeMultiType)
else mapResolves(ref.multiResolve(false), ref.multiType)
case _ => Array((callExpression.getNonValueType(TypingContext.empty), false))
}
tps.foreach { case (r, isDynamicNamed) =>
processArgsExpected(res, expr, i, r, exprs, isDynamicNamed = isDynamicNamed)
}
}
res.toArray
case tuple: ScTuple =>
val buffer = new ArrayBuffer[(ScType, Option[ScTypeElement])]
val exprs = tuple.exprs
val actExpr = expr.getDeepSameElementInContext
val index = exprs.indexOf(actExpr)
if (index >= 0) {
for (tp: ScType <- tuple.expectedTypes(fromUnderscore = true)) {
tp match {
case TupleType(comps) if comps.length == exprs.length =>
buffer += ((comps(index), None))
case _ =>
}
}
}
buffer.toArray
case infix: ScInfixExpr if ((infix.isLeftAssoc && infix.lOp == expr.getSameElementInContext) ||
(!infix.isLeftAssoc && infix.rOp == expr.getSameElementInContext)) && !expr.isInstanceOf[ScTuple] =>
val res = new ArrayBuffer[(ScType, Option[ScTypeElement])]
val zExpr: ScExpression = expr match {
case p: ScParenthesisedExpr => p.expr.getOrElse(return Array.empty)
case _ => expr
}
val op = infix.operation
var tps =
if (!withResolvedFunction) mapResolves(op.shapeResolve, op.shapeMultiType)
else mapResolves(op.multiResolve(false), op.multiType)
tps = tps.map { case (tp, isDynamicNamed) =>
(infix.updateAccordingToExpectedType(tp), isDynamicNamed)
}
tps.foreach { case (tp, isDynamicNamed) =>
processArgsExpected(res, zExpr, 0, tp, Seq(zExpr), Some(infix), isDynamicNamed = isDynamicNamed)
}
res.toArray
//SLS[4.1]
case v @ ScPatternDefinition.expr(expr) if expr == expr.getSameElementInContext =>
v.typeElement match {
case Some(te) => Array((v.getType(TypingContext.empty).getOrAny, Some(te)))
case _ => Array.empty
}
case v @ ScVariableDefinition.expr(expr) if expr == expr.getSameElementInContext =>
v.typeElement match {
case Some(te) => Array((v.getType(TypingContext.empty).getOrAny, Some(te)))
case _ => Array.empty
}
//SLS[4.6]
case v: ScFunctionDefinition if (v.body match {
case None => false
case Some(b) => b == expr.getSameElementInContext
}) =>
v.returnTypeElement match {
case Some(te) => v.returnType.toOption.map(x => (x, Some(te))).toArray
case None if !v.hasAssign => Array((api.Unit, None))
case _ => v.getInheritedReturnType.map((_, None)).toArray
}
//default parameters
case param: ScParameter =>
param.typeElement match {
case Some(_) => Array((param.getType(TypingContext.empty).getOrAny, param.typeElement))
case _ => Array.empty
}
case ret: ScReturnStmt =>
val fun: ScFunction = PsiTreeUtil.getContextOfType(ret, true, classOf[ScFunction])
if (fun == null) return Array.empty
fun.returnTypeElement match {
case Some(rte: ScTypeElement) =>
fun.returnType match {
case Success(rt: ScType, _) => Array((rt, Some(rte)))
case _ => Array.empty
}
case None => Array.empty
}
case args: ScArgumentExprList =>
val res = new ArrayBuffer[(ScType, Option[ScTypeElement])]
val exprs: Seq[ScExpression] = args.exprs
val actExpr = expr.getDeepSameElementInContext
val i = if (actExpr == null) 0 else exprs.indexWhere(_ == actExpr)
val callExpression = args.callExpression
if (callExpression != null) {
var tps: Array[(TypeResult[ScType], Boolean)] = callExpression match {
case ref: ScReferenceExpression =>
if (!withResolvedFunction) mapResolves(ref.shapeResolve, ref.shapeMultiType)
else mapResolves(ref.multiResolve(false), ref.multiType)
case gen: ScGenericCall =>
if (!withResolvedFunction) {
val multiType = gen.shapeMultiType
gen.shapeMultiResolve.map(mapResolves(_, multiType)).getOrElse(multiType.map((_, false)))
} else {
val multiType = gen.multiType
gen.multiResolve.map(mapResolves(_, multiType)).getOrElse(multiType.map((_, false)))
}
case _ => Array((callExpression.getNonValueType(TypingContext.empty), false))
}
val callOption = args.getParent match {
case call: MethodInvocation => Some(call)
case _ => None
}
callOption.foreach(call => tps = tps.map { case (r, isDynamicNamed) =>
(call.updateAccordingToExpectedType(r), isDynamicNamed)
})
tps.foreach { case (r, isDynamicNamed) =>
processArgsExpected(res, expr, i, r, exprs, callOption, isDynamicNamed = isDynamicNamed)
}
} else {
//it's constructor
args.getContext match {
case constr: ScConstructor =>
val j = constr.arguments.indexOf(args)
val tps =
if (!withResolvedFunction) constr.shapeMultiType(j)
else constr.multiType(j)
tps.foreach(processArgsExpected(res, expr, i, _, exprs))
case s: ScSelfInvocation =>
val j = s.arguments.indexOf(args)
if (!withResolvedFunction) s.shapeMultiType(j).foreach(processArgsExpected(res, expr, i, _, exprs))
else s.multiType(j).foreach(processArgsExpected(res, expr, i, _, exprs))
case _ =>
}
}
res.toArray
case b: ScBlock if b.getContext.isInstanceOf[ScTryBlock]
|| b.getContext.getContext.getContext.isInstanceOf[ScCatchBlock]
|| b.getContext.isInstanceOf[ScCaseClause]
|| b.getContext.isInstanceOf[ScFunctionExpr] => b.lastExpr match {
case Some(e) if expr.getSameElementInContext == e => b.expectedTypesEx(fromUnderscore = true)
case _ => Array.empty
}
case _ => Array.empty
}
@tailrec
def checkIsUnderscore(expr: ScExpression): Boolean = {
expr match {
case p: ScParenthesisedExpr =>
p.expr match {
case Some(e) => checkIsUnderscore(e)
case _ => false
}
case _ => ScUnderScoreSectionUtil.underscores(expr).nonEmpty
}
}
if (fromUnderscore && checkIsUnderscore(expr)) {
val res = new ArrayBuffer[(ScType, Option[ScTypeElement])]
for (tp <- result) {
tp._1 match {
case FunctionType(rt: ScType, _) => res += ((rt, None))
case _ =>
}
}
res.toArray
} else result
}
@tailrec
private def processArgsExpected(res: ArrayBuffer[(ScType, Option[ScTypeElement])], expr: ScExpression, i: Int,
tp: TypeResult[ScType], exprs: Seq[ScExpression], call: Option[MethodInvocation] = None,
forApply: Boolean = false, isDynamicNamed: Boolean = false)
(implicit typeSystem: TypeSystem) {
def applyForParams(params: Seq[Parameter]) {
val p: (ScType, Option[ScTypeElement]) =
if (i >= params.length && params.nonEmpty && params.last.isRepeated)
(params.last.paramType, params.last.paramInCode.flatMap(_.typeElement))
else if (i >= params.length) (Nothing, None)
else (params(i).paramType, params(i).paramInCode.flatMap(_.typeElement))
expr match {
case assign: ScAssignStmt =>
if (isDynamicNamed) {
p match {
case (TupleType(comps), te) if comps.length == 2 =>
res += ((comps(1), te.map {
case t: ScTupleTypeElement if t.components.length == 2 => t.components(1)
case t => t
}))
case _ => res += p
}
} else {
val lE = assign.getLExpression
lE match {
case ref: ScReferenceExpression if ref.qualifier.isEmpty =>
//TODO: probably replace
params.find(parameter => ScalaPsiUtil.memberNamesEquals(parameter.name, ref.refName.inName)) match {
case Some(param) => res += ((param.paramType, param.paramInCode.flatMap(_.typeElement)))
case _ => res += p
}
case _ => res += p
}
}
case typedStmt: ScTypedStmt if typedStmt.isSequenceArg && params.nonEmpty =>
val seqClass: Array[PsiClass] = ScalaPsiManager.instance(expr.getProject).
getCachedClasses(expr.getResolveScope, "scala.collection.Seq").filter(!_.isInstanceOf[ScObject])
if (seqClass.length != 0) {
val tp = ScParameterizedType(ScalaType.designator(seqClass(0)), Seq(params.last.paramType))
res += ((tp, None))
}
case _ => res += p
}
}
tp match {
case Success(ScMethodType(_, params, _), _) =>
if (params.length == 1 && !params.head.isRepeated && exprs.length > 1) {
params.head.paramType match {
case TupleType(args) => applyForParams(args.zipWithIndex.map {
case (tpe, index) => new Parameter("", None, tpe, false, false, false, index)
})
case _ =>
}
} else applyForParams(params)
case Success(t@ScTypePolymorphicType(ScMethodType(_, params, _), typeParams), _) =>
val subst = t.abstractTypeSubstitutor
val newParams = params.map(p => p.copy(paramType = subst.subst(p.paramType)))
if (newParams.length == 1 && !newParams.head.isRepeated && exprs.length > 1) {
newParams.head.paramType match {
case TupleType(args) => applyForParams(args.zipWithIndex.map {
case (tpe, index) => new Parameter("", None, tpe, false, false, false, index)
})
case _ =>
}
} else applyForParams(newParams)
case Success(t@ScTypePolymorphicType(anotherType, typeParams), _) if !forApply =>
val cand = call.getOrElse(expr).applyShapeResolveForExpectedType(anotherType, exprs, call)
if (cand.length == 1) {
cand(0) match {
case r@ScalaResolveResult(fun: ScFunction, s) =>
val isDynamicNamed = r.isDynamic && r.name == ResolvableReferenceExpression.APPLY_DYNAMIC_NAMED
def update(tp: ScType): ScType = {
if (r.isDynamic) ResolvableReferenceExpression.getDynamicReturn(tp)
else tp
}
var polyType: TypeResult[ScType] = Success(s.subst(fun.polymorphicType()) match {
case ScTypePolymorphicType(internal, params) =>
update(ScTypePolymorphicType(internal, params ++ typeParams))
case tp => update(ScTypePolymorphicType(tp, typeParams))
}, Some(expr))
call.foreach(call => polyType = call.updateAccordingToExpectedType(polyType))
processArgsExpected(res, expr, i, polyType, exprs, forApply = true, isDynamicNamed = isDynamicNamed)
case _ =>
}
}
case Success(anotherType, _) if !forApply =>
val cand = call.getOrElse(expr).applyShapeResolveForExpectedType(anotherType, exprs, call)
if (cand.length == 1) {
cand(0) match {
case r@ScalaResolveResult(fun: ScFunction, subst) =>
val isDynamicNamed = r.isDynamic && r.name == ResolvableReferenceExpression.APPLY_DYNAMIC_NAMED
def update(tp: ScType): ScType = {
if (r.isDynamic) ResolvableReferenceExpression.getDynamicReturn(tp)
else tp
}
var polyType: TypeResult[ScType] = Success(update(subst.subst(fun.polymorphicType())), Some(expr))
call.foreach(call => polyType = call.updateAccordingToExpectedType(polyType))
processArgsExpected(res, expr, i, polyType, exprs, forApply = true, isDynamicNamed = isDynamicNamed)
case _ =>
}
}
case _ =>
}
}
}
|
katejim/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/psi/api/expr/ExpectedTypes.scala
|
Scala
|
apache-2.0
| 21,577 |
/**
* This file is part of Facsimile.
*
* (C) Copyright 2017 Taylor Raack.
*
* Facsimile is free software: you can redistribute it and/or modify
* it under the terms of the Affero GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Facsimile is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* Affero GNU General Public License for more details.
*
* You should have received a copy of the Affero GNU General Public License
* along with Facsimile. If not, see <http://www.gnu.org/licenses/>.
*/
package info.raack.facsimile
import java.io.BufferedReader
import java.io.InputStreamReader
import java.nio.file.Path
import java.nio.file.Files
import scala.sys.process.stringToProcess
import scala.sys.process.Process
import scala.sys.process.ProcessLogger
import scala.util.Failure
import scala.util.Success
import scala.util.Try
import scala.util.control.NonFatal
object Encryption {
case class Crypto(process: java.lang.Process, outReader: BufferedReader, errorReader: BufferedReader)
val encoders = scala.collection.mutable.Map[String, Crypto]()
val decoders = scala.collection.mutable.Map[String, Crypto]()
def withEncryptedDir[T](unencryptedDir: String)(block: (String, String) => T): T = {
cryptoActionWithDir(unencryptedDir, "facsimile-temp-encrypted", "encrypted", block) { encryptedDir =>
mountEncFsForBackup(unencryptedDir, encryptedDir)
}
}
def withDecryptedDir[T](encryptedDir: String, encryptionConfigFile: String)(block: (String, String) => T): T = {
cryptoActionWithDir(encryptedDir, "facsimile-temp-decrypted", "decrypted", block) { decryptedDir =>
mountEncFsForRestore(encryptedDir, decryptedDir, encryptionConfigFile)
}
}
def encodePath(rootDir: String, unencryptedPath: String): String = {
cryptoActionOnPath(rootDir, unencryptedPath, encoders, "encode")
}
def decodePath(rootDir: String, encryptedPath: String): String = {
cryptoActionOnPath(rootDir, encryptedPath, decoders, "decode")
}
private def cryptoActionOnPath(rootDir: String, cryptoPath: String, cryptos: scala.collection.mutable.Map[String, Crypto], cryptoCommand: String): String = {
synchronized {
// get the encoder for this rootDir, creating a new one if necessary
val crypto = cryptos.get(rootDir) match {
case None => {
val process = new ProcessBuilder("sudo", "encfsctl", cryptoCommand, s"--extpass='${InternalConfiguration.facsimileShareDir}/facsimile-password'", "--", rootDir).start()
val crypto = Crypto(
process,
new BufferedReader(new InputStreamReader(process.getInputStream())),
new BufferedReader(new InputStreamReader(process.getErrorStream()))
)
cryptos.put(rootDir, crypto)
crypto
}
case Some(crypto) => crypto
}
// write path to external program
crypto.process.getOutputStream.write(cryptoPath.getBytes)
crypto.process.getOutputStream.write(System.lineSeparator.getBytes)
crypto.process.getOutputStream.flush()
// wait for input from program
val output = crypto.outReader.readLine()
if (crypto.errorReader.ready()) {
// there is data on the error stream
val error = crypto.errorReader.readLine()
if (!crypto.process.isAlive()) {
// process died; remove it from the encoder map
cryptos.remove(rootDir)
}
throw new RuntimeException(s"Could not $cryptoCommand path $cryptoPath; error is $error")
}
output
}
}
private def cryptoActionWithDir[T](cryptoBaseDir: String, dirPrefix: String, dirFunction: String, block: (String, String) => T)(function: String => Unit): T = {
val theDir = Files.createTempDirectory(dirPrefix)
function(theDir.toString)
try {
block(theDir.toString, s"$cryptoBaseDir/.encfs6.xml")
} finally {
Try {
ShellUtils.runCommand(s"sudo fusermount -u $theDir", s"Could not unmount $dirFunction directory $theDir")
()
}.recover {
case NonFatal(e) =>
println(e)
}.get
}
}
private def mountEncFsForBackup(source: String, destination: String): Unit = {
mountEncFs(source, destination, "", "--reverse -o ro")
}
private def mountEncFsForRestore(source: String, destination: String, encfsConfigPath: String): Unit = {
System.out.println(s"encfs config path is $encfsConfigPath; ")
mountEncFs(source, destination, s"ENCFS6_CONFIG=$encfsConfigPath", "")
}
private def mountEncFs(source: String, destination: String, prefix: String, extraOptions: String): Unit = {
val initialMessages = scala.collection.mutable.ArrayBuffer.empty[String]
Try {
// TODO - get password from user
// NEVER STORE THE USER'S PASSWORD IN CLEARTEXT ON DISK - why?
// ONLY USE IT TEMPORARILY ONCE WHEN ENCFS CONFIG FILE IS MISSING
System.out.println(s"running sudo mkdir -p $destination && sudo $prefix encfs --standard --extpass='${InternalConfiguration.facsimileShareDir}/facsimile-password' $extraOptions $source $destination")
(Process(s"sudo mkdir -p $destination") #&&
Process(s"sudo $prefix encfs --standard --extpass='${InternalConfiguration.facsimileShareDir}/facsimile-password' $extraOptions $source $destination")).
lineStream(ProcessLogger(line => { initialMessages += line; () })).
foreach(line => { initialMessages += line })
println(initialMessages.mkString(System.lineSeparator))
}.recover {
case NonFatal(ex) =>
val errorRegex = """Nonzero exit code: (\\d+)""".r
ex.getMessage match {
case errorRegex(code) => {
throw new RuntimeException(s"Encfs mount error code $code: ${System.lineSeparator}${initialMessages.mkString(System.lineSeparator)}") // non fatal
}
case _ => {
throw new RuntimeException(s"Encfs mount error:${System.lineSeparator}${initialMessages.mkString(System.lineSeparator)}")
}
}
}.get
}
}
|
facsimile-linux/facsimile
|
src/main/scala/info/raack/facsimile/Encryption.scala
|
Scala
|
agpl-3.0
| 6,255 |
/*
* Copyright (C) 2009-2011 Mathias Doenitz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.parboiled.scala
package parserunners
import org.parboiled.parserunners.{BasicParseRunner => PBasicParseRunner}
/**
* A simple wrapper for org.parboiled.parserunners.BasicParseRunner which returns a scala ParsingResult.
* Note that the ParseRunner only accepts rules with zero or one value type parameter, as parsers leaving more
* than one value on the value stack are considered to be bad style.
*/
object BasicParseRunner {
def apply(rule: Rule0) = new BasicParseRunner[Nothing](new PBasicParseRunner[Nothing](rule))
def apply[V](rule: Rule1[V]) = new BasicParseRunner[V](new PBasicParseRunner[V](rule))
}
class BasicParseRunner[V](val inner: PBasicParseRunner[V]) extends ParseRunner[V] {
def run(input: Input): ParsingResult[V] = ParsingResult(inner.run(input.inputBuffer))
}
|
sirthias/parboiled
|
parboiled-scala/src/main/scala/org/parboiled/scala/parserunners/BasicParseRunner.scala
|
Scala
|
apache-2.0
| 1,411 |
package com.seanshubin.todo.application.contract
import java.io.InputStream
import java.net.URL
import java.util
trait ClassLoaderContract {
def loadClass(name: String): Class[_]
def getResource(name: String): URL
def getResources(name: String): util.Enumeration[URL]
def getSystemResource(name: String): URL
def getSystemResources(name: String): util.Enumeration[URL]
def getResourceAsStream(name: String): InputStream
def getSystemResourceAsStream(name: String): InputStream
def getParent: ClassLoader
def getSystemClassLoader: ClassLoader
def setDefaultAssertionStatus(enabled: Boolean)
def setPackageAssertionStatus(packageName: String, enabled: Boolean)
def setClassAssertionStatus(className: String, enabled: Boolean)
def clearAssertionStatus()
}
|
SeanShubin/todo-application
|
contract/src/main/scala/com/seanshubin/todo/application/contract/ClassLoaderContract.scala
|
Scala
|
unlicense
| 793 |
package gh2011c.models
import net.liftweb.json.JsonAST.JValue
case class GollumPage(title: String, summary: Option[String], sha: String, page_name: String, action: String)
object GollumPage
{
def apply(json: JValue): Option[GollumPage] =
{
val n2s = gh3.node2String(json)(_)
val n2os = gh3.node2OptionString(json)(_)
val title = n2s("title")
val summary = n2os("summary")
val sha = n2s("sha")
val page_name = n2s("page_name")
val action = n2s("action")
val params = Seq(title, summary, sha, page_name, action)
if(params.forall(_.isDefined)) Some(GollumPage(title.get, summary.get, sha.get, page_name.get, action.get))
else None
}
}
|
mgoeminne/github_etl
|
src/main/scala/gh2011c/models/GollumPage.scala
|
Scala
|
mit
| 706 |
/**
* Copyright (C) 2013 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.portlet.liferay
import javax.portlet._
import com.liferay.portal.kernel.language.LanguageUtil
import org.orbeon.oxf.externalcontext.{Credentials, CredentialsSupport, Organization, ParametrizedRole, SimpleRole, UserAndGroup}
import org.orbeon.oxf.util.StringUtils._
import scala.jdk.CollectionConverters._
trait LiferayUser {
// Return Liferay user, group and role information as headers. There can be multiple role headers.
def userHeaders: List[(String, String)]
}
object LiferaySupport {
import LiferayAPI._
def getCredentialsAsSerializedJson(u: UserFacade, c: CompanyFacade): String = {
def ancestorOrSelfLiferayOrgsForUser(u: UserFacade): List[List[OrganizationFacade]] =
getUserOrganizations(u.getUserId) map { org =>
var orgNamesRootToLeaf: List[OrganizationFacade] = List(org)
for (name <- org.getAncestors.asScala map (_.asInstanceOf[OrganizationFacade]))
orgNamesRootToLeaf = name :: orgNamesRootToLeaf
orgNamesRootToLeaf
}
val ancestorOrSelfLiferayOrgs = ancestorOrSelfLiferayOrgsForUser(u)
val simpleRoles =
for {
role <- u.getRoles.asScala
roleName = role.getName
} yield
SimpleRole(roleName)
val parametrizedRoles =
for {
rootOrgs <- ancestorOrSelfLiferayOrgs
org <- rootOrgs
group = org.getGroup
role <- List(LiferayOrganizationOwnerRoleName, LiferayOrganizationAdministratorRoleName)
roleName = role.name
if hasUserGroupRoleMethod(u.getUserId, group.getGroupId, roleName)
} yield
ParametrizedRole(roleName, org.getName)
val username = c.getAuthType match {
case LiferayEmailAddressAuthType.name => u.getEmailAddress
case LiferayUserIdAuthType.name => u.getUserId.toString
case LiferayScreenNameAuthType.name => u.getScreenName
}
CredentialsSupport.serializeCredentials(
Credentials(
userAndGroup = UserAndGroup.fromStringsOrThrow(username, Option(u.getGroup).map(_.getDescriptiveName).getOrElse("")),
roles = simpleRoles ++: parametrizedRoles,
organizations = ancestorOrSelfLiferayOrgs map(org => Organization(org map (_.getName)))
),
encodeForHeader = true
)
}
private val HeaderNamesGetters = List[(String, (UserFacade, CompanyFacade) => List[String])](
"Orbeon-Liferay-User-Id" -> ((u, _) => Option(u.getUserId) map (_.toString) toList),
"Orbeon-Liferay-User-Screen-Name" -> ((u, _) => Option(u.getScreenName) toList),
"Orbeon-Liferay-User-Full-Name" -> ((u, _) => Option(u.getFullName) toList),
"Orbeon-Liferay-User-First-Name" -> ((u, _) => Option(u.getFirstName) toList),
"Orbeon-Liferay-User-Middle-Name" -> ((u, _) => Option(u.getMiddleName) toList),
"Orbeon-Liferay-User-Last-Name" -> ((u, _) => Option(u.getLastName) toList),
"Orbeon-Liferay-User-Email" -> ((u, _) => Option(u.getEmailAddress) toList),
"Orbeon-Liferay-User-Group-Id" -> ((u, _) => Option(u.getGroup) map (_.getGroupId.toString) toList),
"Orbeon-Liferay-User-Group-Name" -> ((u, _) => Option(u.getGroup) map (_.getDescriptiveName) toList),
"Orbeon-Liferay-User-Roles" -> ((u, _) => u.getRoles.asScala map (_.getName) toList),
"Orbeon-Liferay-User-Credentials" -> ((u, c) => List(getCredentialsAsSerializedJson(u, c)) )
)
private val AllHeaderNamesList = HeaderNamesGetters map (_._1)
private val AllHeaderNamesLowerList = AllHeaderNamesList map (_.toLowerCase)
val AllHeaderNamesLower = AllHeaderNamesLowerList.toSet
val AllHeaderNamesLowerToCapitalized = AllHeaderNamesLowerList zip AllHeaderNamesList toMap
// TODO: Use LanguageUtil.getBCP47LanguageId.
def languageHeader(req: PortletRequest) =
LanguageUtil.getLanguageId(req).trimAllToOpt map ("Orbeon-Liferay-Language" ->)
def userHeaders(user: UserFacade, company: CompanyFacade, tests: Boolean): List[(String, String)] =
for {
(name, getter) <- HeaderNamesGetters
if ! (tests && name == "Orbeon-Liferay-User-Credentials") // we can't yet make this work during tests
value <- getter(user, company)
} yield
name -> value
def getLiferayUser(req: PortletRequest): Option[LiferayUser] = {
val httpReq = getHttpServletRequest(req)
Option(getUser(httpReq)) map { user =>
new LiferayUser {
def userHeaders = LiferaySupport.userHeaders(user, getCompany(httpReq), tests = false)
}
}
}
}
|
orbeon/orbeon-forms
|
portlet-support/src/main/scala/org/orbeon/oxf/portlet/liferay/LiferaySupport.scala
|
Scala
|
lgpl-2.1
| 5,652 |
package spire
package math
import org.scalacheck.Arbitrary
import org.scalacheck.Arbitrary.arbitrary
import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks
import org.scalatest.matchers.should.Matchers
import org.scalatest.propspec.AnyPropSpec
class RationalCheck extends AnyPropSpec with Matchers with ScalaCheckDrivenPropertyChecks {
type Q = Rational
implicit val arbRational: Arbitrary[Rational] = Arbitrary(for {
n <- arbitrary[BigInt]
d0 <- arbitrary[BigInt]
} yield {
val d = if (d0.signum == 0) BigInt(1) else d0
Rational(n, d)
})
def rat1(name: String)(f: Q => Unit) =
property(name) {
forAll { (nx: Long, _dx: Long) =>
val dx = if (_dx == 0) 1 else _dx
f(Rational(nx, dx))
}
}
def rat2(name: String)(f: (Q, Q) => Unit) =
property(name) {
forAll { (nx: Long, _dx: Long, ny: Long, _dy: Long) =>
val dx = if (_dx == 0) 1 else _dx
val dy = if (_dy == 0) 1 else _dy
f(Rational(nx, dx), Rational(ny, dy))
}
}
def rat3(name: String)(f: (Q, Q, Q) => Unit) =
property(name) {
forAll { (nx: Long, _dx: Long, ny: Long, _dy: Long, nz: Long, _dz: Long) =>
val dx = if (_dx == 0) 1 else _dx
val dy = if (_dy == 0) 1 else _dy
val dz = if (_dz == 0) 1 else _dz
f(Rational(nx, dx), Rational(ny, dy), Rational(nz, dz))
}
}
property("Internal GCD implementation is similar to the field of fractions implementation") {
forAll { (x: Rational, y: Rational) => x.gcd(y) shouldBe Rational(x.numerator gcd y.numerator, x.denominator lcm y.denominator) }
}
rat1("x + 0 == x") { x: Q => x + Rational(0) shouldBe x }
rat1("x * 1 == x") { x: Q => x * Rational(1) shouldBe x }
rat1("x * 0 == 0") { x: Q => x * Rational(0) shouldBe Rational(0) }
rat1("x.floor <= x.round <= x.ceil") { x: Q =>
x.floor should be <= x.round
x.round should be <= x.ceil
}
rat1("x + x == 2x") { x: Q => (x + x) shouldBe 2 * x }
rat1("x - x == 0") { x: Q => x - x shouldBe Rational(0) }
rat1("x * x == x^2") { x: Q => (x * x) shouldBe x.pow(2) }
rat1("(x^-1)^3 == x^-3") { x: Q => if (x != 0) x.reciprocal.pow(3) shouldBe x.pow(-3) }
rat1("x / x == 1") { x: Q => if (x != 0) x / x shouldBe Rational(1) }
rat2("x + y == y + x") { (x: Q, y: Q) => x + y shouldBe y + x }
rat2("x - y == -y + x") { (x: Q, y: Q) => x - y shouldBe -y + x }
rat2("x + y - x == y") { (x: Q, y: Q) => (x + y) - x shouldBe y }
rat2("x / y == x * (y^-1)") { (x: Q, y: Q) => if (y != 0) x / y shouldBe x * y.reciprocal }
rat3("(x + y) * z == x * z + y * z") { (x: Q, y: Q, z: Q) => (x + y) * z shouldBe x * z + y * z }
rat1("Round-trip to Real") { (x: Q) =>
x.toReal.toRational shouldBe x
}
rat1("Round-trip to Algebraic") { (x: Q) =>
x.toAlgebraic.toRational shouldBe Some(x)
}
property("Round-trip Double") {
forAll("x") { (n: Double) =>
Rational(n).toDouble == n
}
}
property("limitToInt does not change small Rationals") {
forAll { (n: Int, d: Int) =>
val r = Rational(n, if (d < 1) 1 else d)
r.limitToInt shouldBe r
}
}
property("limitToInt regression") {
val n = Int.MinValue
val r = Rational(n, 1)
r.limitToInt shouldBe r
}
property("Rational.numeratorIsValidLong") { (x: Q) =>
x.numeratorIsValidLong shouldBe x.numerator.isValidLong
}
property("Rational.denominatorIsValidLong") { (x: Q) =>
x.denominatorIsValidLong shouldBe x.denominator.isValidLong
}
property("limitTo(n) forces numerator and denominator to be less than n") {
implicit val arbSafeLong: Arbitrary[SafeLong] =
Arbitrary(arbitrary[BigInt].map { n => SafeLong(n.abs) }.filter(_.signum != 0))
forAll { (x: Rational, n: SafeLong) =>
val y = x.limitTo(n.abs)
(y.numerator <= n) shouldBe true
(y.denominator <= n) shouldBe true
}
}
}
|
non/spire
|
tests/src/test/scala/spire/math/RationalCheck.scala
|
Scala
|
mit
| 3,903 |
package irc.utilities.urlparsers
import java.text.{DateFormat, SimpleDateFormat}
import java.util.{Date, TimeZone}
import irc.utilities.URLParser
import org.json.JSONObject
object ChanParser {
def find(url: String): String = {
var title = "none"
val s = url.replace("thread", "res")
val ssplit = s.split("/")
ssplit(5) = ssplit(5).split("\\\\.")(0).split("#")(0)
val urlstring = s.split("res/?")(0) + "res/" + ssplit(5) + ".json"
try {
val jsonstring = URLParser.readUrl(urlstring)
val json = new JSONObject(jsonstring)
val posts = json.getJSONArray("posts")
val op = posts.getJSONObject(0)
val board = ssplit(3)
var subject: String = null
subject = if (op.has("com")) URLParser.makeClean(op.getString("com") + "") else "No Subject"
val no = op.getInt("no")
val replies = posts.length() - 1
if (op.has("sub")) {
subject = "12" + URLParser.makeClean(op.getString("sub")) +
""
}
if (subject.length > 50) {
subject = subject.substring(0, 49).trim() + "..."
}
val date = new Date(op.getLong("time") * 1000)
val df = new SimpleDateFormat("dd MMM yyyy kk:mm:ss z")
df.setTimeZone(TimeZone.getTimeZone("GMT"))
val created = df.format(date)
title = s"/$board/ - $subject | Thread no $no | Created $created | $replies replies"
} catch {
case e: Exception => {
e.printStackTrace()
throw new ParserException
}
}
title
}
}
|
wiiam/IrcBot
|
src/main/scala/irc/utilities/urlparsers/ChanParser.scala
|
Scala
|
agpl-3.0
| 1,529 |
package com.example
import java.nio.charset.StandardCharsets
import java.util.Properties
import kafka.consumer.ConsumerConfig
import org.json4s.{DefaultFormats, jackson}
import scala.collection.immutable.HashMap
class SimpleKafkaConsumer(kafkaSocket: Socket, zooKeeperSocket: Socket, groupId: String, topic: String) {
private def configuration = {
val deserializer = "org.apache.kafka.common.serialization.StringDeserializer"
val props = new Properties()
props.put("bootstrap.servers", kafkaSocket.toString())
props.put("key.deserializer", deserializer)
props.put("value.deserializer", deserializer)
props.put("group.id", groupId)
props.put("consumer.id", "consumer0")
props.put("consumer.timeout", "-1")
props.put("auto.offset.reset", "smallest")
props.put("zookeeper.sync.time.ms", "200")
props.put("zookeeper.session.timeout.ms", "6000")
props.put("zookeeper.connect", zooKeeperSocket.toString())
props.put("num.consumer.fetchers", "2")
props.put("rebalance.max.retries", "4")
props.put("auto.commit.interval.ms", "1000")
props
}
private val consumer = kafka.consumer.Consumer.create(new ConsumerConfig(configuration))
def read[T <: AnyRef]()(implicit m: Manifest[T]): Iterable[T] = {
implicit val serialization = jackson.Serialization
implicit val formats = DefaultFormats
val topicCountMap = HashMap(topic -> 1)
val consumerMap = consumer.createMessageStreams(topicCountMap)
val stream = consumerMap.get(topic).get(0)
val iterator = stream.iterator()
iterator.map(x => serialization.read[T](new String(x.message(), StandardCharsets.UTF_8))).toStream
}
def shutdown() = {
consumer.shutdown()
}
}
|
frossi85/financial-statistics-collector
|
src/main/scala/com/example/SimpleKafkaConsumer.scala
|
Scala
|
apache-2.0
| 1,721 |
/*
* ModLimiter.scala
* (FScape)
*
* Copyright (c) 2001-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* [email protected]
*/
package de.sciss.fscape.modules
import de.sciss.fscape.graph.{AudioFileIn => _, AudioFileOut => _, _}
import de.sciss.lucre.Txn
import de.sciss.proc.{FScape, Widget}
import scala.Predef.{any2stringadd => _}
object ModSlewRateLimiter extends Module {
val name = "Slew Rate Limiter"
/**
* Attributes:
*
* - `"in"`: audio file input
* - `"out"`: audio file output
* - `"out-type"`: audio file output type (AIFF: 0, Wave: 1, Wave64: 2, IRCAM: 3, NeXT: 4)
* - `"out-format"`: audio file output sample format (Int16: 0, Int24: 1, Float: 2, Int32: 3, Double: 4, UInt8: 5, Int8: 6)
* - `"gain-db"`: input boost factor (before entering limiter), in decibels
* - `"ceil-db"`: limiter clipping level, in decibels
* - `"limit"`: limiting slew rate amplitude
* - `"leak-dc"`: whether to remove DC offset (1) or not (0)
*/
def apply[T <: Txn[T]]()(implicit tx: T): FScape[T] = {
import de.sciss.fscape.GE
import de.sciss.fscape.lucre.graph.Ops._
import de.sciss.fscape.lucre.graph._
val f = FScape[T]()
import de.sciss.fscape.lucre.MacroImplicits._
f.setGraph {
// version: 24-Jun-2020
val in0 = AudioFileIn("in")
val sampleRate = in0.sampleRate
val framesIn = in0.numFrames
val framesOut = framesIn
val gainType = "gain-type" .attr(0)
val gainDb = "gain-db" .attr(0.0)
val gainAmt = gainDb.dbAmp
val fileType = "out-type" .attr(0)
val smpFmt = "out-format".attr(1)
val limVal = "limit" .attr(0.1)
val leakDC = "leak-dc" .attr(1)
def mkProgress(x: GE, n: GE, label: String): Unit = {
ProgressFrames(x, n, label)
()
}
val dif = Differentiate(in0)
val lim = dif.clip2(limVal)
val int = RunningSum(lim)
val sig0 = If (leakDC sig_== 0) Then int Else LeakDC(int)
val sig = If (gainType sig_== 0) Then {
val sig0Buf = BufferDisk(sig0)
val rMax = RunningMax(Reduce.max(sig0.abs))
val read = Frames(rMax)
mkProgress(read, framesOut, "analyze")
val maxAmp = rMax.last
val div = maxAmp + (maxAmp sig_== 0.0)
val gainAmtN = gainAmt / div
sig0Buf * gainAmtN
} Else {
sig0 * gainAmt
}
val written = AudioFileOut("out", sig, fileType = fileType,
sampleFormat = smpFmt, sampleRate = sampleRate)
mkProgress(written, framesOut, "written")
}
f
}
def ui[T <: Txn[T]]()(implicit tx: T): Widget[T] = {
import de.sciss.lucre.expr.ExImport._
import de.sciss.lucre.expr.graph._
import de.sciss.lucre.swing.graph._
val w = Widget[T]()
import de.sciss.proc.MacroImplicits._
w.setGraph {
// version: 22-Jul-2021
val r = Runner("run")
val m = r.messages
m.changed.filter(m.nonEmpty) --> PrintLn(m.mkString("\\n"))
val in = AudioFileIn()
in.value <-> Artifact("run:in")
val out = AudioFileOut()
out.value <-> Artifact("run:out")
out.fileType <-> "run:out-type".attr(0)
out.sampleFormat <-> "run:out-format".attr(1)
val ggGain = DoubleField()
ggGain.unit = "dB"
ggGain.min = -180.0
ggGain.max = +180.0
ggGain.value <-> "run:gain-db".attr(0.0)
val ggGainType = ComboBox(
List("Normalized", "Immediate")
)
ggGainType.index <-> "run:gain-type".attr(0)
val ggLimVal = DoubleField()
ggLimVal.min = 0.001
ggLimVal.max = 1.0
ggLimVal.decimals = 3
ggLimVal.value <-> "run:limit".attr(0.1)
val ggLeakDC = CheckBox()
ggLeakDC.selected <-> "run:leak-dc".attr(true)
def mkLabel(text: String) = {
val l = Label(text)
l.hAlign = Align.Trailing
l
}
def left(c: Component*): Component = {
val f = FlowPanel(c: _*)
f.align = Align.Leading
f.vGap = 0
f
}
val p = GridPanel(
mkLabel("Sound Input:" ), in,
mkLabel("Input Output:"), out,
mkLabel("Gain:"), left(ggGain, ggGainType),
Label(" "), Label(""),
mkLabel("Limit:"), left(ggLimVal),
mkLabel("Remove DC:"), left(ggLeakDC)
)
p.columns = 2
p.hGap = 8
p.compact = true
val ggRender = Button(" Render ")
val ggCancel = Button(" X ")
ggCancel.tooltip = "Cancel Rendering"
val pb = ProgressBar()
ggRender.clicked --> r.run
ggCancel.clicked --> r.stop
val stopped = (r.state sig_== 0) || (r.state sig_== 4)
ggRender.enabled = stopped
ggCancel.enabled = !stopped
pb.value = (r.progress * 100).toInt
val bot = BorderPanel(
center = pb,
east = {
val f = FlowPanel(ggCancel, ggRender)
f.vGap = 0
f
}
)
bot.vGap = 0
val bp = BorderPanel(
north = p,
south = bot
)
bp.vGap = 8
bp.border = Border.Empty(8, 8, 0, 4)
bp
}
w
}
}
|
Sciss/FScape-next
|
modules/src/main/scala/de/sciss/fscape/modules/ModSlewRateLimiter.scala
|
Scala
|
agpl-3.0
| 5,362 |
package walfie.gbf.raidfinder.client.views
import com.thoughtworks.binding
import com.thoughtworks.binding.Binding
import com.thoughtworks.binding.Binding._
import org.scalajs.dom
import org.scalajs.dom.raw._
import scala.scalajs.js
import scala.util.Try
import walfie.gbf.raidfinder.client._
import walfie.gbf.raidfinder.client.audio._
import walfie.gbf.raidfinder.client.syntax.{ElementOps, EventOps, HTMLElementOps, StringOps}
import walfie.gbf.raidfinder.client.ViewModel._
import walfie.gbf.raidfinder.protocol._
object SoundSelectionDialog {
@binding.dom
def element(
selectedSoundId: Var[Option[NotificationSoundId]],
onSave: Option[NotificationSoundId] => Unit
): Binding[HTMLElement] = {
// TODO: This is taken directly from MainDialog. Maybe make a generic version.
val dialog = dom.document.createElement("dialog").asInstanceOf[HTMLElement]
dialog.classList.add("mdl-dialog")
dialog.classList.add("gbfrf-dialog")
val dynamicDialog = dialog.asInstanceOf[js.Dynamic]
val closeModal = { (e: Event) => dynamicDialog.close(); () }
if (js.isUndefined(dynamicDialog.showModal)) {
js.Dynamic.global.dialogPolyfill.registerDialog(dialog)
}
// TODO: Write a more generic event delegation helper
val soundOptions = listItems(selectedSoundId).bind
soundOptions.addEventListener("click", { e: Event =>
val soundIdOpt = for {
target <- e.targetElement
element <- target.findParent(_.classList.contains("gbfrf-js-soundSelect"))
soundIdString <- Option(element.getAttribute("data-soundId"))
soundId <- Try(soundIdString.toInt).toOption
sound <- NotificationSounds.findById(soundId)
} yield {
sound.play()
soundId
}
selectedSoundId := soundIdOpt
})
val inner =
<div class="gbfrf-sound-selection-dialog gbfrf-dialog__container mdl-layout mdl-js-layout mdl-layout--fixed-header">
{ header(title = "Title", close = closeModal).bind }
<section class="gbfrf-dialog__content">
<!-- // TODO: Put this in a method -->
<div class="gbfrf-settings__content">
{ soundOptions }
</div>
</section>
<hr style="margin: 0;"/>
{ footer(onSave = (e: Event) => { onSave(selectedSoundId.get); closeModal(e) }, onCancel = closeModal).bind }
</div>
dialog.appendChild(inner)
dialog.mdl
}
@binding.dom
def listItems(selectedSoundId: Binding[Option[NotificationSoundId]]): Binding[HTMLUListElement] = {
<ul class="mdl-list" style="padding: 0; margin: 0;">
{
// Using -1 because it doesn't match any sound ID. This is such a hack.
soundListItem(-1, "None", selectedSoundId).bind
}
{
Constants(NotificationSounds.all: _*).map { sound =>
soundListItem(sound.id, sound.fileName, selectedSoundId).bind
}
}
</ul>
}
@binding.dom
def soundListItem(id: Int, text: String, selectedSoundId: Binding[Option[NotificationSoundId]]): Binding[HTMLLIElement] = {
val htmlId = "gbfrf-sound-option--" + id
val htmlName = "gbfrf-sound-option"
val mdlIsChecked = "is-checked"
val radioButton =
<input class="mdl-radio__button" id={ htmlId } type="radio" value={ id.toString } name={ htmlName }/>
val labelElement =
<label for={ htmlId } class="mdl-list__item-primary-content mdl-radio mdl-js-radio mdl-js-ripple-effect">
{ radioButton }
<span class="mdl-radio__label">{ text }</span>
</label>
Binding {
radioButton.checked = selectedSoundId.bind.contains(id)
// MDL radio input doesn't update automatically if the real radio button is toggled
if (radioButton.checked) labelElement.classList.add(mdlIsChecked)
else labelElement.classList.remove(mdlIsChecked)
}.watch
val liClass = "gbfrf-js-soundSelect gbfrf--sound-select__item gbfrf-settings__item mdl-list__item"
<li class={ liClass } data:data-soundId={ id.toString }>
{ labelElement }
</li>
}
@binding.dom
def header(title: String, close: Event => Unit): Binding[HTMLElement] = {
<header class="mdl-layout__header">
<div class="mdl-layout__header-row gbfrf-column__header-row">
<span class="mdl-layout-title">Notification Sound</span>
<div class="mdl-layout-spacer"></div>
<div class="mdl-button mdl-js-button mdl-button--icon material-icons js-close-dialog" onclick={ close }>
<i class="material-icons">clear</i>
</div>
</div>
</header>
}
@binding.dom
def footer(onSave: Event => Unit, onCancel: Event => Unit): Binding[HTMLElement] = {
<div class="mdl-dialog__actions">
<button type="button" class="mdl-button mdl-button--primary gbfrf-dialog__button" onclick={ onSave }>Save</button>
<button type="button" class="mdl-button gbfrf-dialog__button" onclick={ onCancel }>Cancel</button>
</div>
}
}
|
xheres/api-gbfraidfinder
|
client/src/main/scala/walfie/gbf/raidfinder/client/views/SoundSelectionDialog.scala
|
Scala
|
mit
| 4,968 |
package ch32_matching
import scala.util.control.Breaks.{break, breakable}
object RabinKarp {
def firstIndexOf(main: Array[Char], sub: Array[Char]): Int = {
require(main != null, "main array required")
require(sub != null, "sub array required")
require(main.length >= sub.length, "sub array should be small than main array")
val baseNums: Array[Long] = new Array[Long](sub.length)
for (i <- sub.indices) {
baseNums(i) = scala.math.pow(128, i).longValue()
}
val subHash = hash(sub, baseNums)
var result = -1
breakable {
for (i <- 0 until (main.length - sub.length)) {
if (hash(main.slice(i, i + sub.length), baseNums).equals(subHash)
&& main.slice(i, i + sub.length).sameElements(sub)) {
result = i
break
}
}
}
result
}
def hash(array: Array[Char], baseNums: Array[Long]): Long = {
var hash = 0L
for (i <- array.indices) {
hash += array(i).toInt * baseNums(array.length - 1 - i)
}
hash
}
}
|
wangzheng0822/algo
|
scala/src/main/scala/ch32_matching/RabinKarp.scala
|
Scala
|
apache-2.0
| 1,035 |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.sumologic.sumobot.http_frontend
import akka.actor.{ActorRef, ActorSystem, Props}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.ContentTypes._
import akka.http.scaladsl.model.HttpMethods.{GET, HEAD, OPTIONS}
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.headers._
import akka.http.scaladsl.model.ws.{Message, TextMessage, WebSocketRequest}
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Flow, Keep, Sink, Source}
import akka.testkit.{TestActorRef, TestKit, TestProbe}
import com.sumologic.sumobot.brain.InMemoryBrain
import com.sumologic.sumobot.core.{Bootstrap, HttpReceptionist}
import com.sumologic.sumobot.http_frontend.authentication.NoAuthentication
import com.sumologic.sumobot.plugins.PluginsFromProps
import com.sumologic.sumobot.plugins.help.Help
import com.sumologic.sumobot.plugins.system.System
import com.sumologic.sumobot.test.annotated.SumoBotTestKit
import com.typesafe.config.ConfigFactory
import org.scalatest.BeforeAndAfterAll
import scala.concurrent.duration._
import scala.concurrent.{Await, Promise}
class SumoBotHttpServerTest
extends SumoBotTestKit(ActorSystem("SumoBotHttpServerTest"))
with BeforeAndAfterAll {
private implicit val materializer = ActorMaterializer()
private val host = "localhost"
private val port = 9999
private val origin = "https://sumologic.com"
private val httpServerOptions = SumoBotHttpServerOptions(host, port, origin,
new NoAuthentication(ConfigFactory.empty()), "", None, Seq.empty)
private val httpServer = new SumoBotHttpServer(httpServerOptions)
private val brain = TestActorRef(Props[InMemoryBrain])
private val httpReceptionist = TestActorRef(new HttpReceptionist(brain))
private val pluginCollection = PluginsFromProps(Array(Props(classOf[Help]), Props(classOf[System])))
override def beforeAll: Unit = {
Bootstrap.receptionist = Some(httpReceptionist)
pluginCollection.setup
}
"SumoBotHttpServer" should {
"handle static pages requests" when {
"accessing /" in {
sendHttpRequest("/") {
(response, responseString) =>
response.status should be (StatusCodes.OK)
response.header[`Content-Type`] should be(Some(`Content-Type`(`text/html(UTF-8)`)))
responseString should include("<!doctype html>")
}
}
"accessing /script.js" in {
sendHttpRequest("/script.js") {
(response, responseString) =>
response.status should be (StatusCodes.OK)
response.header[`Content-Type`] should be(Some(`Content-Type`(ContentType(MediaTypes.`application/javascript`, HttpCharsets.`UTF-8`))))
responseString should include("window.addEventListener")
}
}
"accessing invalid page" in {
sendHttpRequest("/blehbleh") {
(response, _) =>
response.status should be (StatusCodes.Forbidden)
}
}
}
"handle WebSocket requests" when {
"sending message to Help plugin" in {
val probe = TestProbe()
val disconnectPromise = sendWebSocketMessage("help", probe.ref)
eventually {
val response = probe.expectMsgClass(classOf[TextMessage.Strict])
response.getStrictText should include("Help")
response.getStrictText should include("System")
}
disconnectPromise.success(None)
}
"sending message to System plugin" in {
val probe = TestProbe()
val disconnectPromise = sendWebSocketMessage("when did you start?", probe.ref)
eventually {
val response = probe.expectMsgClass(classOf[TextMessage.Strict])
response.getStrictText should include("I started at ")
}
disconnectPromise.success(None)
}
"sending multiple messages" in {
val probe = TestProbe()
val disconnectPromise = sendWebSocketMessages(Array("help", "blahblah invalid command", "help"),
probe.ref)
eventually {
val firstResponse = probe.expectMsgClass(classOf[TextMessage.Strict])
firstResponse.getStrictText should include("Help")
}
eventually {
val secondResponse = probe.expectMsgClass(classOf[TextMessage.Strict])
secondResponse.getStrictText should include("Help")
}
disconnectPromise.success(None)
}
}
"send proper AllowOrigin header" when {
"sending HTTP request" in {
sendHttpRequest("/") {
(response, _) =>
response.header[`Access-Control-Allow-Origin`] should be (Some(`Access-Control-Allow-Origin`(origin)))
}
}
"sending WebSocket request" in {
val sink = Sink.ignore
val source = Source.maybe[Message]
val (upgradeResponse, disconnectPromise) = Http().singleWebSocketRequest(webSocketRequest,
Flow.fromSinkAndSourceMat(sink, source)(Keep.right))
val httpResponse = Await.result(upgradeResponse, 5.seconds).response
httpResponse.header[`Access-Control-Allow-Origin`] should be (Some(`Access-Control-Allow-Origin`(origin)))
disconnectPromise.success(None)
}
}
"handle OPTIONS requests" when {
"accessing root page" in {
sendHttpRequest("/", method = OPTIONS) {
(response, _) =>
response.status should be (StatusCodes.OK)
response.header[`Access-Control-Allow-Methods`] should be (Some(`Access-Control-Allow-Methods`(List(GET))))
}
}
"accessing WebSocket endpoint" in {
sendHttpRequest("/websocket", method = OPTIONS) {
(response, _) =>
response.status should be (StatusCodes.OK)
response.header[`Access-Control-Allow-Methods`] should be (Some(`Access-Control-Allow-Methods`(List(GET))))
}
}
}
"handle HEAD requests" in {
sendHttpRequest("/", method = HEAD) {
(response, _) =>
response.status should be (StatusCodes.OK)
response.header[`Content-Type`] should be(Some(`Content-Type`(`text/html(UTF-8)`)))
entityToString(response.entity).isEmpty should be (true)
}
}
}
private def sendHttpRequest(path: String, method: HttpMethod = GET)(handler: (HttpResponse, String) => Unit): Unit = {
val responseFuture = Http().singleRequest(httpRequest(path, method))
Await.result(responseFuture, 5.seconds) match {
case response: HttpResponse =>
val responseStringFuture = Unmarshal(response.entity).to[String]
val responseString = Await.result(responseStringFuture, 5.seconds)
handler(response, responseString)
case _ =>
fail("received invalid HTTP response")
}
}
private def sendWebSocketMessages(msgs: Seq[String], listenerRef: ActorRef): Promise[Option[Message]] = {
val sink = Sink.actorRef(listenerRef, "ended")
val source = Source(msgs.map(msg => TextMessage(msg)).toList).concatMat(Source.maybe[Message])(Keep.right)
// NOTE(pgrabowski, 2019-07-23): Using promise to signal when to close WebSocket.
// https://doc.akka.io/docs/akka-http/current/client-side/websocket-support.html#half-closed-websockets
val (_, disconnectPromise) = Http().singleWebSocketRequest(webSocketRequest,
Flow.fromSinkAndSourceMat(sink, source)(Keep.right))
disconnectPromise
}
private def sendWebSocketMessage(msg: String, listenerRef: ActorRef): Promise[Option[Message]] = {
sendWebSocketMessages(Array(msg), listenerRef)
}
private def httpRequest(path: String, method: HttpMethod): HttpRequest = {
HttpRequest(uri = s"http://$host:$port$path", method = method)
}
private def entityToString(httpEntity: HttpEntity): String = {
Await.result(Unmarshal(httpEntity).to[String], 5.seconds)
}
private val webSocketRequest = WebSocketRequest(s"ws://$host:$port/websocket")
override def afterAll: Unit = {
httpServer.terminate()
Bootstrap.receptionist = None
TestKit.shutdownActorSystem(system, 10.seconds, true)
}
}
|
SumoLogic/sumobot
|
src/test/scala/com/sumologic/sumobot/http_frontend/SumoBotHttpServerTest.scala
|
Scala
|
apache-2.0
| 8,906 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.avro
import java.io._
import java.net.URL
import java.nio.file.{Files, Paths, StandardCopyOption}
import java.sql.{Date, Timestamp}
import java.util.{Locale, UUID}
import scala.collection.JavaConverters._
import org.apache.avro.{AvroTypeException, Schema, SchemaBuilder}
import org.apache.avro.Schema.{Field, Type}
import org.apache.avro.Schema.Type._
import org.apache.avro.file.{DataFileReader, DataFileWriter}
import org.apache.avro.generic.{GenericData, GenericDatumReader, GenericDatumWriter, GenericRecord}
import org.apache.avro.generic.GenericData.{EnumSymbol, Fixed}
import org.apache.commons.io.FileUtils
import org.apache.spark.{SPARK_VERSION_SHORT, SparkConf, SparkException, SparkUpgradeException}
import org.apache.spark.TestUtils.assertExceptionMsg
import org.apache.spark.sql._
import org.apache.spark.sql.TestingUDT.IntervalData
import org.apache.spark.sql.catalyst.expressions.AttributeReference
import org.apache.spark.sql.catalyst.plans.logical.Filter
import org.apache.spark.sql.catalyst.util.DateTimeTestUtils
import org.apache.spark.sql.catalyst.util.DateTimeTestUtils.{withDefaultTimeZone, LA, UTC}
import org.apache.spark.sql.execution.{FormattedMode, SparkPlan}
import org.apache.spark.sql.execution.datasources.{CommonFileDataSourceSuite, DataSource, FilePartition}
import org.apache.spark.sql.execution.datasources.v2.BatchScanExec
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf.LegacyBehaviorPolicy
import org.apache.spark.sql.internal.SQLConf.LegacyBehaviorPolicy._
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types._
import org.apache.spark.sql.v2.avro.AvroScan
import org.apache.spark.util.Utils
abstract class AvroSuite
extends QueryTest
with SharedSparkSession
with CommonFileDataSourceSuite
with NestedDataSourceSuiteBase {
import testImplicits._
override protected def dataSourceFormat = "avro"
override val nestedDataSources = Seq("avro")
val episodesAvro = testFile("episodes.avro")
val testAvro = testFile("test.avro")
override protected def beforeAll(): Unit = {
super.beforeAll()
// initialize SessionCatalog here so it has a clean hadoopConf
spark.sessionState.catalog
spark.conf.set(SQLConf.FILES_MAX_PARTITION_BYTES.key, 1024)
}
def checkReloadMatchesSaved(originalFile: String, newFile: String): Unit = {
val originalEntries = spark.read.format("avro").load(testAvro).collect()
val newEntries = spark.read.format("avro").load(newFile)
checkAnswer(newEntries, originalEntries)
}
def checkAvroSchemaEquals(avroSchema: String, expectedAvroSchema: String): Unit = {
assert(new Schema.Parser().parse(avroSchema) ==
new Schema.Parser().parse(expectedAvroSchema))
}
def getAvroSchemaStringFromFiles(filePath: String): String = {
new DataFileReader({
val file = new File(filePath)
if (file.isFile) {
file
} else {
file.listFiles()
.filter(_.isFile)
.filter(_.getName.endsWith("avro"))
.head
}
}, new GenericDatumReader[Any]()).getSchema.toString(false)
}
private def getResourceAvroFilePath(name: String): String = {
Thread.currentThread().getContextClassLoader.getResource(name).toString
}
test("resolve avro data source") {
val databricksAvro = "com.databricks.spark.avro"
// By default the backward compatibility for com.databricks.spark.avro is enabled.
Seq("org.apache.spark.sql.avro.AvroFileFormat", databricksAvro).foreach { provider =>
assert(DataSource.lookupDataSource(provider, spark.sessionState.conf) ===
classOf[org.apache.spark.sql.avro.AvroFileFormat])
}
withSQLConf(SQLConf.LEGACY_REPLACE_DATABRICKS_SPARK_AVRO_ENABLED.key -> "false") {
val message = intercept[AnalysisException] {
DataSource.lookupDataSource(databricksAvro, spark.sessionState.conf)
}.getMessage
assert(message.contains(s"Failed to find data source: $databricksAvro"))
}
}
test("reading from multiple paths") {
val df = spark.read.format("avro").load(episodesAvro, episodesAvro)
assert(df.count == 16)
}
test("reading and writing partitioned data") {
val df = spark.read.format("avro").load(episodesAvro)
val fields = List("title", "air_date", "doctor")
for (field <- fields) {
withTempPath { dir =>
val outputDir = s"$dir/${UUID.randomUUID}"
df.write.partitionBy(field).format("avro").save(outputDir)
val input = spark.read.format("avro").load(outputDir)
// makes sure that no fields got dropped.
// We convert Rows to Seqs in order to work around SPARK-10325
assert(input.select(field).collect().map(_.toSeq).toSet ===
df.select(field).collect().map(_.toSeq).toSet)
}
}
}
test("request no fields") {
val df = spark.read.format("avro").load(episodesAvro)
df.createOrReplaceTempView("avro_table")
assert(spark.sql("select count(*) from avro_table").collect().head === Row(8))
}
test("convert formats") {
withTempPath { dir =>
val df = spark.read.format("avro").load(episodesAvro)
df.write.parquet(dir.getCanonicalPath)
assert(spark.read.parquet(dir.getCanonicalPath).count() === df.count)
}
}
test("rearrange internal schema") {
withTempPath { dir =>
val df = spark.read.format("avro").load(episodesAvro)
df.select("doctor", "title").write.format("avro").save(dir.getCanonicalPath)
}
}
test("union(int, long) is read as long") {
withTempPath { dir =>
val avroSchema: Schema = {
val union =
Schema.createUnion(List(Schema.create(Type.INT), Schema.create(Type.LONG)).asJava)
val fields = Seq(new Field("field1", union, "doc", null.asInstanceOf[AnyVal])).asJava
val schema = Schema.createRecord("name", "docs", "namespace", false)
schema.setFields(fields)
schema
}
val datumWriter = new GenericDatumWriter[GenericRecord](avroSchema)
val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
dataFileWriter.create(avroSchema, new File(s"$dir.avro"))
val rec1 = new GenericData.Record(avroSchema)
rec1.put("field1", 1.toLong)
dataFileWriter.append(rec1)
val rec2 = new GenericData.Record(avroSchema)
rec2.put("field1", 2)
dataFileWriter.append(rec2)
dataFileWriter.flush()
dataFileWriter.close()
val df = spark.read.format("avro").load(s"$dir.avro")
assert(df.schema.fields === Seq(StructField("field1", LongType, nullable = true)))
assert(df.collect().toSet == Set(Row(1L), Row(2L)))
}
}
test("union(float, double) is read as double") {
withTempPath { dir =>
val avroSchema: Schema = {
val union =
Schema.createUnion(List(Schema.create(Type.FLOAT), Schema.create(Type.DOUBLE)).asJava)
val fields = Seq(new Field("field1", union, "doc", null.asInstanceOf[AnyVal])).asJava
val schema = Schema.createRecord("name", "docs", "namespace", false)
schema.setFields(fields)
schema
}
val datumWriter = new GenericDatumWriter[GenericRecord](avroSchema)
val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
dataFileWriter.create(avroSchema, new File(s"$dir.avro"))
val rec1 = new GenericData.Record(avroSchema)
rec1.put("field1", 1.toFloat)
dataFileWriter.append(rec1)
val rec2 = new GenericData.Record(avroSchema)
rec2.put("field1", 2.toDouble)
dataFileWriter.append(rec2)
dataFileWriter.flush()
dataFileWriter.close()
val df = spark.read.format("avro").load(s"$dir.avro")
assert(df.schema.fields === Seq(StructField("field1", DoubleType, nullable = true)))
assert(df.collect().toSet == Set(Row(1.toDouble), Row(2.toDouble)))
}
}
test("union(float, double, null) is read as nullable double") {
withTempPath { dir =>
val avroSchema: Schema = {
val union = Schema.createUnion(
List(Schema.create(Type.FLOAT),
Schema.create(Type.DOUBLE),
Schema.create(Type.NULL)
).asJava
)
val fields = Seq(new Field("field1", union, "doc", null.asInstanceOf[AnyVal])).asJava
val schema = Schema.createRecord("name", "docs", "namespace", false)
schema.setFields(fields)
schema
}
val datumWriter = new GenericDatumWriter[GenericRecord](avroSchema)
val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
dataFileWriter.create(avroSchema, new File(s"$dir.avro"))
val rec1 = new GenericData.Record(avroSchema)
rec1.put("field1", 1.toFloat)
dataFileWriter.append(rec1)
val rec2 = new GenericData.Record(avroSchema)
rec2.put("field1", null)
dataFileWriter.append(rec2)
dataFileWriter.flush()
dataFileWriter.close()
val df = spark.read.format("avro").load(s"$dir.avro")
assert(df.schema.fields === Seq(StructField("field1", DoubleType, nullable = true)))
assert(df.collect().toSet == Set(Row(1.toDouble), Row(null)))
}
}
test("Union of a single type") {
withTempPath { dir =>
val UnionOfOne = Schema.createUnion(List(Schema.create(Type.INT)).asJava)
val fields = Seq(new Field("field1", UnionOfOne, "doc", null.asInstanceOf[AnyVal])).asJava
val schema = Schema.createRecord("name", "docs", "namespace", false)
schema.setFields(fields)
val datumWriter = new GenericDatumWriter[GenericRecord](schema)
val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
dataFileWriter.create(schema, new File(s"$dir.avro"))
val avroRec = new GenericData.Record(schema)
avroRec.put("field1", 8)
dataFileWriter.append(avroRec)
dataFileWriter.flush()
dataFileWriter.close()
val df = spark.read.format("avro").load(s"$dir.avro")
assert(df.first() == Row(8))
}
}
test("SPARK-27858 Union type: More than one non-null type") {
withTempDir { dir =>
val complexNullUnionType = Schema.createUnion(
List(Schema.create(Type.INT), Schema.create(Type.NULL), Schema.create(Type.STRING)).asJava)
val fields = Seq(
new Field("field1", complexNullUnionType, "doc", null.asInstanceOf[AnyVal])).asJava
val schema = Schema.createRecord("name", "docs", "namespace", false)
schema.setFields(fields)
val datumWriter = new GenericDatumWriter[GenericRecord](schema)
val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
dataFileWriter.create(schema, new File(s"$dir.avro"))
val avroRec = new GenericData.Record(schema)
avroRec.put("field1", 42)
dataFileWriter.append(avroRec)
val avroRec2 = new GenericData.Record(schema)
avroRec2.put("field1", "Alice")
dataFileWriter.append(avroRec2)
dataFileWriter.flush()
dataFileWriter.close()
val df = spark.read.format("avro").load(s"$dir.avro")
assert(df.schema === StructType.fromDDL("field1 struct<member0: int, member1: string>"))
assert(df.collect().toSet == Set(Row(Row(42, null)), Row(Row(null, "Alice"))))
}
}
test("Complex Union Type") {
withTempPath { dir =>
val fixedSchema = Schema.createFixed("fixed_name", "doc", "namespace", 4)
val enumSchema = Schema.createEnum("enum_name", "doc", "namespace", List("e1", "e2").asJava)
val complexUnionType = Schema.createUnion(
List(Schema.create(Type.INT), Schema.create(Type.STRING), fixedSchema, enumSchema).asJava)
val fields = Seq(
new Field("field1", complexUnionType, "doc", null.asInstanceOf[AnyVal]),
new Field("field2", complexUnionType, "doc", null.asInstanceOf[AnyVal]),
new Field("field3", complexUnionType, "doc", null.asInstanceOf[AnyVal]),
new Field("field4", complexUnionType, "doc", null.asInstanceOf[AnyVal])
).asJava
val schema = Schema.createRecord("name", "docs", "namespace", false)
schema.setFields(fields)
val datumWriter = new GenericDatumWriter[GenericRecord](schema)
val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
dataFileWriter.create(schema, new File(s"$dir.avro"))
val avroRec = new GenericData.Record(schema)
val field1 = 1234
val field2 = "Hope that was not load bearing"
val field3 = Array[Byte](1, 2, 3, 4)
val field4 = "e2"
avroRec.put("field1", field1)
avroRec.put("field2", field2)
avroRec.put("field3", new Fixed(fixedSchema, field3))
avroRec.put("field4", new EnumSymbol(enumSchema, field4))
dataFileWriter.append(avroRec)
dataFileWriter.flush()
dataFileWriter.close()
val df = spark.sqlContext.read.format("avro").load(s"$dir.avro")
assertResult(field1)(df.selectExpr("field1.member0").first().get(0))
assertResult(field2)(df.selectExpr("field2.member1").first().get(0))
assertResult(field3)(df.selectExpr("field3.member2").first().get(0))
assertResult(field4)(df.selectExpr("field4.member3").first().get(0))
}
}
test("Lots of nulls") {
withTempPath { dir =>
val schema = StructType(Seq(
StructField("binary", BinaryType, true),
StructField("timestamp", TimestampType, true),
StructField("array", ArrayType(ShortType), true),
StructField("map", MapType(StringType, StringType), true),
StructField("struct", StructType(Seq(StructField("int", IntegerType, true))))))
val rdd = spark.sparkContext.parallelize(Seq[Row](
Row(null, new Timestamp(1), Array[Short](1, 2, 3), null, null),
Row(null, null, null, null, null),
Row(null, null, null, null, null),
Row(null, null, null, null, null)))
val df = spark.createDataFrame(rdd, schema)
df.write.format("avro").save(dir.toString)
assert(spark.read.format("avro").load(dir.toString).count == rdd.count)
}
}
test("Struct field type") {
withTempPath { dir =>
val schema = StructType(Seq(
StructField("float", FloatType, true),
StructField("short", ShortType, true),
StructField("byte", ByteType, true),
StructField("boolean", BooleanType, true)
))
val rdd = spark.sparkContext.parallelize(Seq(
Row(1f, 1.toShort, 1.toByte, true),
Row(2f, 2.toShort, 2.toByte, true),
Row(3f, 3.toShort, 3.toByte, true)
))
val df = spark.createDataFrame(rdd, schema)
df.write.format("avro").save(dir.toString)
assert(spark.read.format("avro").load(dir.toString).count == rdd.count)
}
}
private def createDummyCorruptFile(dir: File): Unit = {
Utils.tryWithResource {
FileUtils.forceMkdir(dir)
val corruptFile = new File(dir, "corrupt.avro")
new BufferedWriter(new FileWriter(corruptFile))
} { writer =>
writer.write("corrupt")
}
}
test("Ignore corrupt Avro file if flag IGNORE_CORRUPT_FILES enabled") {
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "true") {
withTempPath { dir =>
createDummyCorruptFile(dir)
val message = intercept[FileNotFoundException] {
spark.read.format("avro").load(dir.getAbsolutePath).schema
}.getMessage
assert(message.contains("No Avro files found."))
Files.copy(
Paths.get(new URL(episodesAvro).toURI),
Paths.get(dir.getCanonicalPath, "episodes.avro"))
val result = spark.read.format("avro").load(episodesAvro).collect()
checkAnswer(spark.read.format("avro").load(dir.getAbsolutePath), result)
}
}
}
test("Throws IOException on reading corrupt Avro file if flag IGNORE_CORRUPT_FILES disabled") {
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "false") {
withTempPath { dir =>
createDummyCorruptFile(dir)
val message = intercept[org.apache.spark.SparkException] {
spark.read.format("avro").load(dir.getAbsolutePath)
}.getMessage
assert(message.contains("Could not read file"))
}
}
}
test("Date field type") {
withTempPath { dir =>
val schema = StructType(Seq(
StructField("float", FloatType, true),
StructField("date", DateType, true)
))
withDefaultTimeZone(UTC) {
val rdd = spark.sparkContext.parallelize(Seq(
Row(1f, null),
Row(2f, new Date(1451948400000L)),
Row(3f, new Date(1460066400500L))
))
val df = spark.createDataFrame(rdd, schema)
df.write.format("avro").save(dir.toString)
assert(spark.read.format("avro").load(dir.toString).count == rdd.count)
checkAnswer(
spark.read.format("avro").load(dir.toString).select("date"),
Seq(Row(null), Row(new Date(1451865600000L)), Row(new Date(1459987200000L))))
}
}
}
test("Array data types") {
withTempPath { dir =>
val testSchema = StructType(Seq(
StructField("byte_array", ArrayType(ByteType), true),
StructField("short_array", ArrayType(ShortType), true),
StructField("float_array", ArrayType(FloatType), true),
StructField("bool_array", ArrayType(BooleanType), true),
StructField("long_array", ArrayType(LongType), true),
StructField("double_array", ArrayType(DoubleType), true),
StructField("decimal_array", ArrayType(DecimalType(10, 0)), true),
StructField("bin_array", ArrayType(BinaryType), true),
StructField("timestamp_array", ArrayType(TimestampType), true),
StructField("array_array", ArrayType(ArrayType(StringType), true), true),
StructField("struct_array", ArrayType(
StructType(Seq(StructField("name", StringType, true)))))))
val arrayOfByte = new Array[Byte](4)
for (i <- arrayOfByte.indices) {
arrayOfByte(i) = i.toByte
}
val rdd = spark.sparkContext.parallelize(Seq(
Row(arrayOfByte, Array[Short](1, 2, 3, 4), Array[Float](1f, 2f, 3f, 4f),
Array[Boolean](true, false, true, false), Array[Long](1L, 2L), Array[Double](1.0, 2.0),
Array[BigDecimal](BigDecimal.valueOf(3)), Array[Array[Byte]](arrayOfByte, arrayOfByte),
Array[Timestamp](new Timestamp(0)),
Array[Array[String]](Array[String]("CSH, tearing down the walls that divide us", "-jd")),
Array[Row](Row("Bobby G. can't swim")))))
val df = spark.createDataFrame(rdd, testSchema)
df.write.format("avro").save(dir.toString)
assert(spark.read.format("avro").load(dir.toString).count == rdd.count)
}
}
test("write with compression - sql configs") {
withTempPath { dir =>
val uncompressDir = s"$dir/uncompress"
val bzip2Dir = s"$dir/bzip2"
val xzDir = s"$dir/xz"
val deflateDir = s"$dir/deflate"
val snappyDir = s"$dir/snappy"
val zstandardDir = s"$dir/zstandard"
val df = spark.read.format("avro").load(testAvro)
spark.conf.set(SQLConf.AVRO_COMPRESSION_CODEC.key, "uncompressed")
df.write.format("avro").save(uncompressDir)
spark.conf.set(SQLConf.AVRO_COMPRESSION_CODEC.key, "bzip2")
df.write.format("avro").save(bzip2Dir)
spark.conf.set(SQLConf.AVRO_COMPRESSION_CODEC.key, "xz")
df.write.format("avro").save(xzDir)
spark.conf.set(SQLConf.AVRO_COMPRESSION_CODEC.key, "deflate")
spark.conf.set(SQLConf.AVRO_DEFLATE_LEVEL.key, "9")
df.write.format("avro").save(deflateDir)
spark.conf.set(SQLConf.AVRO_COMPRESSION_CODEC.key, "snappy")
df.write.format("avro").save(snappyDir)
spark.conf.set(SQLConf.AVRO_COMPRESSION_CODEC.key, "zstandard")
df.write.format("avro").save(zstandardDir)
val uncompressSize = FileUtils.sizeOfDirectory(new File(uncompressDir))
val bzip2Size = FileUtils.sizeOfDirectory(new File(bzip2Dir))
val xzSize = FileUtils.sizeOfDirectory(new File(xzDir))
val deflateSize = FileUtils.sizeOfDirectory(new File(deflateDir))
val snappySize = FileUtils.sizeOfDirectory(new File(snappyDir))
val zstandardSize = FileUtils.sizeOfDirectory(new File(zstandardDir))
assert(uncompressSize > deflateSize)
assert(snappySize > deflateSize)
assert(snappySize > bzip2Size)
assert(bzip2Size > xzSize)
assert(uncompressSize > zstandardSize)
}
}
test("dsl test") {
val results = spark.read.format("avro").load(episodesAvro).select("title").collect()
assert(results.length === 8)
}
test("old avro data source name works") {
val results =
spark.read.format("com.databricks.spark.avro")
.load(episodesAvro).select("title").collect()
assert(results.length === 8)
}
test("support of various data types") {
// This test uses data from test.avro. You can see the data and the schema of this file in
// test.json and test.avsc
val all = spark.read.format("avro").load(testAvro).collect()
assert(all.length == 3)
val str = spark.read.format("avro").load(testAvro).select("string").collect()
assert(str.map(_(0)).toSet.contains("Terran is IMBA!"))
val simple_map = spark.read.format("avro").load(testAvro).select("simple_map").collect()
assert(simple_map(0)(0).getClass.toString.contains("Map"))
assert(simple_map.map(_(0).asInstanceOf[Map[String, Some[Int]]].size).toSet == Set(2, 0))
val union0 = spark.read.format("avro").load(testAvro).select("union_string_null").collect()
assert(union0.map(_(0)).toSet == Set("abc", "123", null))
val union1 = spark.read.format("avro").load(testAvro).select("union_int_long_null").collect()
assert(union1.map(_(0)).toSet == Set(66, 1, null))
val union2 = spark.read.format("avro").load(testAvro).select("union_float_double").collect()
assert(
union2
.map(x => java.lang.Double.valueOf(x(0).toString))
.exists(p => Math.abs(p - Math.PI) < 0.001))
val fixed = spark.read.format("avro").load(testAvro).select("fixed3").collect()
assert(fixed.map(_(0).asInstanceOf[Array[Byte]]).exists(p => p(1) == 3))
val enum = spark.read.format("avro").load(testAvro).select("enum").collect()
assert(enum.map(_(0)).toSet == Set("SPADES", "CLUBS", "DIAMONDS"))
val record = spark.read.format("avro").load(testAvro).select("record").collect()
assert(record(0)(0).getClass.toString.contains("Row"))
assert(record.map(_(0).asInstanceOf[Row](0)).contains("TEST_STR123"))
val array_of_boolean =
spark.read.format("avro").load(testAvro).select("array_of_boolean").collect()
assert(array_of_boolean.map(_(0).asInstanceOf[scala.collection.Seq[Boolean]].size).toSet ==
Set(3, 1, 0))
val bytes = spark.read.format("avro").load(testAvro).select("bytes").collect()
assert(bytes.map(_(0).asInstanceOf[Array[Byte]].length).toSet == Set(3, 1, 0))
}
test("sql test") {
spark.sql(
s"""
|CREATE TEMPORARY VIEW avroTable
|USING avro
|OPTIONS (path "${episodesAvro}")
""".stripMargin.replaceAll("\\n", " "))
assert(spark.sql("SELECT * FROM avroTable").collect().length === 8)
}
test("conversion to avro and back") {
// Note that test.avro includes a variety of types, some of which are nullable. We expect to
// get the same values back.
withTempPath { dir =>
val avroDir = s"$dir/avro"
spark.read.format("avro").load(testAvro).write.format("avro").save(avroDir)
checkReloadMatchesSaved(testAvro, avroDir)
}
}
test("conversion to avro and back with namespace") {
// Note that test.avro includes a variety of types, some of which are nullable. We expect to
// get the same values back.
withTempPath { tempDir =>
val name = "AvroTest"
val namespace = "org.apache.spark.avro"
val parameters = Map("recordName" -> name, "recordNamespace" -> namespace)
val avroDir = tempDir + "/namedAvro"
spark.read.format("avro").load(testAvro)
.write.options(parameters).format("avro").save(avroDir)
checkReloadMatchesSaved(testAvro, avroDir)
// Look at raw file and make sure has namespace info
val rawSaved = spark.sparkContext.textFile(avroDir)
val schema = rawSaved.collect().mkString("")
assert(schema.contains(name))
assert(schema.contains(namespace))
}
}
test("SPARK-34229: Avro should read decimal values with the file schema") {
withTempPath { path =>
sql("SELECT 3.14 a").write.format("avro").save(path.toString)
val data = spark.read.schema("a DECIMAL(4, 3)").format("avro").load(path.toString).collect()
assert(data.map(_ (0)).contains(new java.math.BigDecimal("3.140")))
}
}
test("converting some specific sparkSQL types to avro") {
withTempPath { tempDir =>
val testSchema = StructType(Seq(
StructField("Name", StringType, false),
StructField("Length", IntegerType, true),
StructField("Time", TimestampType, false),
StructField("Decimal", DecimalType(10, 2), true),
StructField("Binary", BinaryType, false)))
val arrayOfByte = new Array[Byte](4)
for (i <- arrayOfByte.indices) {
arrayOfByte(i) = i.toByte
}
val cityRDD = spark.sparkContext.parallelize(Seq(
Row("San Francisco", 12, new Timestamp(666), null, arrayOfByte),
Row("Palo Alto", null, new Timestamp(777), null, arrayOfByte),
Row("Munich", 8, new Timestamp(42), Decimal(3.14), arrayOfByte)))
val cityDataFrame = spark.createDataFrame(cityRDD, testSchema)
val avroDir = tempDir + "/avro"
cityDataFrame.write.format("avro").save(avroDir)
assert(spark.read.format("avro").load(avroDir).collect().length == 3)
// TimesStamps are converted to longs
val times = spark.read.format("avro").load(avroDir).select("Time").collect()
assert(times.map(_(0)).toSet ==
Set(new Timestamp(666), new Timestamp(777), new Timestamp(42)))
// DecimalType should be converted to string
val decimals = spark.read.format("avro").load(avroDir).select("Decimal").collect()
assert(decimals.map(_(0)).contains(new java.math.BigDecimal("3.14")))
// There should be a null entry
val length = spark.read.format("avro").load(avroDir).select("Length").collect()
assert(length.map(_(0)).contains(null))
val binary = spark.read.format("avro").load(avroDir).select("Binary").collect()
for (i <- arrayOfByte.indices) {
assert(binary(1)(0).asInstanceOf[Array[Byte]](i) == arrayOfByte(i))
}
}
}
test("correctly read long as date/timestamp type") {
withTempPath { tempDir =>
val currentTime = new Timestamp(System.currentTimeMillis())
val currentDate = new Date(System.currentTimeMillis())
val schema = StructType(Seq(
StructField("_1", DateType, false), StructField("_2", TimestampType, false)))
val writeDs = Seq((currentDate, currentTime)).toDS
val avroDir = tempDir + "/avro"
writeDs.write.format("avro").save(avroDir)
assert(spark.read.format("avro").load(avroDir).collect().length == 1)
val readDs = spark.read.schema(schema).format("avro").load(avroDir).as[(Date, Timestamp)]
assert(readDs.collect().sameElements(writeDs.collect()))
}
}
test("support of globbed paths") {
val resourceDir = testFile(".")
val e1 = spark.read.format("avro").load(resourceDir + "../*/episodes.avro").collect()
assert(e1.length == 8)
val e2 = spark.read.format("avro").load(resourceDir + "../../*/*/episodes.avro").collect()
assert(e2.length == 8)
}
test("does not coerce null date/timestamp value to 0 epoch.") {
withTempPath { tempDir =>
val nullTime: Timestamp = null
val nullDate: Date = null
val schema = StructType(Seq(
StructField("_1", DateType, nullable = true),
StructField("_2", TimestampType, nullable = true))
)
val writeDs = Seq((nullDate, nullTime)).toDS
val avroDir = tempDir + "/avro"
writeDs.write.format("avro").save(avroDir)
val readValues =
spark.read.schema(schema).format("avro").load(avroDir).as[(Date, Timestamp)].collect
assert(readValues.size == 1)
assert(readValues.head == ((nullDate, nullTime)))
}
}
test("support user provided avro schema") {
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [{
| "name" : "string",
| "type" : "string",
| "doc" : "Meaningless string of characters"
| }]
|}
""".stripMargin
val result = spark
.read
.option("avroSchema", avroSchema)
.format("avro")
.load(testAvro)
.collect()
val expected = spark.read.format("avro").load(testAvro).select("string").collect()
assert(result.sameElements(expected))
}
test("SPARK-34416: support user provided avro schema url") {
val avroSchemaUrl = testFile("test_sub.avsc")
val result = spark.read.option("avroSchemaUrl", avroSchemaUrl)
.format("avro")
.load(testAvro)
.collect()
val expected = spark.read.format("avro").load(testAvro).select("string").collect()
assert(result.sameElements(expected))
}
test("SPARK-34416: support user provided both avro schema and avro schema url") {
val avroSchemaUrl = testFile("test_sub.avsc")
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [{
| "name" : "union_int_long_null",
| "type" : ["int", "long", "null"]
| }]
|}
""".stripMargin
val result = spark.read
.option("avroSchema", avroSchema)
.option("avroSchemaUrl", avroSchemaUrl)
.format("avro")
.load(testAvro)
.collect()
val expected = spark.read.format("avro").load(testAvro).select("union_int_long_null").collect()
assert(result.sameElements(expected))
}
test("SPARK-34416: support user provided wrong avro schema url") {
val e = intercept[FileNotFoundException] {
spark.read
.option("avroSchemaUrl", "not_exists.avsc")
.format("avro")
.load(testAvro)
.collect()
}
assertExceptionMsg[FileNotFoundException](e, "File not_exists.avsc does not exist")
}
test("support user provided avro schema with defaults for missing fields") {
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [{
| "name" : "missingField",
| "type" : "string",
| "default" : "foo"
| }]
|}
""".stripMargin
val result = spark
.read
.option("avroSchema", avroSchema)
.format("avro").load(testAvro).select("missingField").first
assert(result === Row("foo"))
}
test("support user provided avro schema for writing nullable enum type") {
withTempPath { tempDir =>
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [{
| "name": "Suit",
| "type": [{ "type": "enum",
| "name": "SuitEnumType",
| "symbols" : ["SPADES", "HEARTS", "DIAMONDS", "CLUBS"]
| }, "null"]
| }]
|}
""".stripMargin
val df = spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row("SPADES"), Row(null), Row("HEARTS"), Row("DIAMONDS"),
Row(null), Row("CLUBS"), Row("HEARTS"), Row("SPADES"))),
StructType(Seq(StructField("Suit", StringType, true))))
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").option("avroSchema", avroSchema).save(tempSaveDir)
checkAnswer(df, spark.read.format("avro").load(tempSaveDir))
checkAvroSchemaEquals(avroSchema, getAvroSchemaStringFromFiles(tempSaveDir))
// Writing df containing data not in the enum will throw an exception
val e = intercept[SparkException] {
spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row("SPADES"), Row("NOT-IN-ENUM"), Row("HEARTS"), Row("DIAMONDS"))),
StructType(Seq(StructField("Suit", StringType, true))))
.write.format("avro").option("avroSchema", avroSchema)
.save(s"$tempDir/${UUID.randomUUID()}")
}
assertExceptionMsg[IncompatibleSchemaException](e,
""""NOT-IN-ENUM" cannot be written since it's not defined in enum""")
}
}
test("support user provided avro schema for writing non-nullable enum type") {
withTempPath { tempDir =>
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [{
| "name": "Suit",
| "type": { "type": "enum",
| "name": "SuitEnumType",
| "symbols" : ["SPADES", "HEARTS", "DIAMONDS", "CLUBS"]
| }
| }]
|}
""".stripMargin
val dfWithNull = spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row("SPADES"), Row(null), Row("HEARTS"), Row("DIAMONDS"),
Row(null), Row("CLUBS"), Row("HEARTS"), Row("SPADES"))),
StructType(Seq(StructField("Suit", StringType, true))))
val df = spark.createDataFrame(dfWithNull.na.drop().rdd,
StructType(Seq(StructField("Suit", StringType, false))))
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").option("avroSchema", avroSchema).save(tempSaveDir)
checkAnswer(df, spark.read.format("avro").load(tempSaveDir))
checkAvroSchemaEquals(avroSchema, getAvroSchemaStringFromFiles(tempSaveDir))
// Writing df containing nulls without using avro union type will
// throw an exception as avro uses union type to handle null.
val e1 = intercept[SparkException] {
dfWithNull.write.format("avro")
.option("avroSchema", avroSchema).save(s"$tempDir/${UUID.randomUUID()}")
}
assertExceptionMsg[AvroTypeException](e1, "Not an enum: null")
// Writing df containing data not in the enum will throw an exception
val e2 = intercept[SparkException] {
spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row("SPADES"), Row("NOT-IN-ENUM"), Row("HEARTS"), Row("DIAMONDS"))),
StructType(Seq(StructField("Suit", StringType, false))))
.write.format("avro").option("avroSchema", avroSchema)
.save(s"$tempDir/${UUID.randomUUID()}")
}
assertExceptionMsg[IncompatibleSchemaException](e2,
""""NOT-IN-ENUM" cannot be written since it's not defined in enum""")
}
}
test("support user provided avro schema for writing nullable fixed type") {
withTempPath { tempDir =>
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [{
| "name": "fixed2",
| "type": [{ "type": "fixed",
| "size": 2,
| "name": "fixed2"
| }, "null"]
| }]
|}
""".stripMargin
val df = spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row(Array(192, 168).map(_.toByte)), Row(null))),
StructType(Seq(StructField("fixed2", BinaryType, true))))
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").option("avroSchema", avroSchema).save(tempSaveDir)
checkAnswer(df, spark.read.format("avro").load(tempSaveDir))
checkAvroSchemaEquals(avroSchema, getAvroSchemaStringFromFiles(tempSaveDir))
// Writing df containing binary data that doesn't fit FIXED size will throw an exception
val e1 = intercept[SparkException] {
spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row(Array(192, 168, 1).map(_.toByte)))),
StructType(Seq(StructField("fixed2", BinaryType, true))))
.write.format("avro").option("avroSchema", avroSchema)
.save(s"$tempDir/${UUID.randomUUID()}")
}
assertExceptionMsg[IncompatibleSchemaException](e1,
"3 bytes of binary data cannot be written into FIXED type with size of 2 bytes")
// Writing df containing binary data that doesn't fit FIXED size will throw an exception
val e2 = intercept[SparkException] {
spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row(Array(192).map(_.toByte)))),
StructType(Seq(StructField("fixed2", BinaryType, true))))
.write.format("avro").option("avroSchema", avroSchema)
.save(s"$tempDir/${UUID.randomUUID()}")
}
assertExceptionMsg[IncompatibleSchemaException](e2,
"1 byte of binary data cannot be written into FIXED type with size of 2 bytes")
}
}
test("support user provided avro schema for writing non-nullable fixed type") {
withTempPath { tempDir =>
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [{
| "name": "fixed2",
| "type": { "type": "fixed",
| "size": 2,
| "name": "fixed2"
| }
| }]
|}
""".stripMargin
val df = spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row(Array(192, 168).map(_.toByte)), Row(Array(1, 1).map(_.toByte)))),
StructType(Seq(StructField("fixed2", BinaryType, false))))
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").option("avroSchema", avroSchema).save(tempSaveDir)
checkAnswer(df, spark.read.format("avro").load(tempSaveDir))
checkAvroSchemaEquals(avroSchema, getAvroSchemaStringFromFiles(tempSaveDir))
// Writing df containing binary data that doesn't fit FIXED size will throw an exception
val e1 = intercept[SparkException] {
spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row(Array(192, 168, 1).map(_.toByte)))),
StructType(Seq(StructField("fixed2", BinaryType, false))))
.write.format("avro").option("avroSchema", avroSchema)
.save(s"$tempDir/${UUID.randomUUID()}")
}
assertExceptionMsg[IncompatibleSchemaException](e1,
"3 bytes of binary data cannot be written into FIXED type with size of 2 bytes")
// Writing df containing binary data that doesn't fit FIXED size will throw an exception
val e2 = intercept[SparkException] {
spark.createDataFrame(spark.sparkContext.parallelize(Seq(
Row(Array(192).map(_.toByte)))),
StructType(Seq(StructField("fixed2", BinaryType, false))))
.write.format("avro").option("avroSchema", avroSchema)
.save(s"$tempDir/${UUID.randomUUID()}")
}
assertExceptionMsg[IncompatibleSchemaException](e2,
"1 byte of binary data cannot be written into FIXED type with size of 2 bytes")
}
}
test("support user provided avro schema for writing / reading fields with different ordering") {
withTempPath { tempDir =>
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [
| {"name": "Age", "type": "int"},
| {"name": "Name", "type": "string"}
| ]
|}
""".stripMargin
val avroSchemaReversed =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [
| {"name": "Name", "type": "string"},
| {"name": "Age", "type": "int"}
| ]
|}
""".stripMargin
val df = spark.createDataFrame(spark.sparkContext.parallelize(Seq(Row(2, "Aurora"))),
StructType(Seq(
StructField("Age", IntegerType, false),
StructField("Name", StringType, false))))
val tempSaveDir = s"$tempDir/save/"
// Writing avro file with reversed field ordering
df.write.format("avro").option("avroSchema", avroSchemaReversed).save(tempSaveDir)
// Reading reversed avro file
checkAnswer(df.select("Name", "Age"), spark.read.format("avro").load(tempSaveDir))
checkAvroSchemaEquals(avroSchemaReversed, getAvroSchemaStringFromFiles(tempSaveDir))
// Reading reversed avro file with provided original schema
val avroDf = spark.read.format("avro").option("avroSchema", avroSchema).load(tempSaveDir)
checkAnswer(df, avroDf)
assert(avroDf.schema.fieldNames.sameElements(Array("Age", "Name")))
}
}
test("support user provided non-nullable avro schema " +
"for nullable catalyst schema without any null record") {
withTempPath { tempDir =>
val catalystSchema =
StructType(Seq(
StructField("Age", IntegerType, true),
StructField("Name", StringType, true)))
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [
| {"name": "Age", "type": "int"},
| {"name": "Name", "type": "string"}
| ]
|}
""".stripMargin
val df = spark.createDataFrame(
spark.sparkContext.parallelize(Seq(Row(2, "Aurora"))), catalystSchema)
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").option("avroSchema", avroSchema).save(tempSaveDir)
checkAvroSchemaEquals(avroSchema, getAvroSchemaStringFromFiles(tempSaveDir))
val message = intercept[Exception] {
spark.createDataFrame(spark.sparkContext.parallelize(Seq(Row(2, null))), catalystSchema)
.write.format("avro").option("avroSchema", avroSchema)
.save(s"$tempDir/${UUID.randomUUID()}")
}.getCause.getMessage
assert(message.contains("Caused by: java.lang.NullPointerException: "))
assert(message.contains(
"null of string in string in field Name of test_schema in test_schema"))
}
}
test("support user provided nullable avro schema " +
"for non-nullable catalyst schema without any null record") {
val catalystSchema =
StructType(Seq(
StructField("Age", IntegerType, nullable = false),
StructField("Name", StringType, nullable = false)))
val avroSchema = """
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [
| {"name": "Age", "type": ["null", "int"]},
| {"name": "Name", "type": ["null", "string"]}
| ]
|}
""".stripMargin
val df = spark.createDataFrame(
spark.sparkContext.parallelize(Seq(Row(2, "Aurora"))), catalystSchema)
withTempPath { tempDir =>
df.write.format("avro").option("avroSchema", avroSchema).save(tempDir.getPath)
checkAvroSchemaEquals(avroSchema, getAvroSchemaStringFromFiles(tempDir.getPath))
}
}
test("SPARK-34365: support reading renamed schema using positionalFieldMatching") {
val renamedSchema = new StructType()
.add("foo", StringType)
.add("foo_map", MapType(StringType, IntegerType))
val dfLoaded = spark
.read
.option("positionalFieldMatching", true.toString)
.schema(renamedSchema)
.format("avro")
.load(testAvro)
assert(dfLoaded.schema === renamedSchema)
val expectedDf = spark.read.format("avro").load(testAvro).select("string", "simple_map")
assert(dfLoaded.select($"foo".as("string"), $"foo_map".as("simple_map")).collect().toSet ===
expectedDf.collect().toSet)
}
test("SPARK-34365: support writing with renamed schema using positionalFieldMatching") {
withTempDir { tempDir =>
val avroSchema = SchemaBuilder.record("renamed").fields()
.requiredString("foo")
.name("foo_map").`type`(Schema.createMap(Schema.create(Schema.Type.INT))).noDefault()
.endRecord()
val expectedDf = spark.read.format("avro").load(testAvro).select("string", "simple_map")
val savePath = s"$tempDir/save"
expectedDf.write
.option("avroSchema", avroSchema.toString)
.option("positionalFieldMatching", true.toString)
.format("avro")
.save(savePath)
val reloadedDf = spark.read.format("avro").load(savePath)
assert(reloadedDf.schema ===
new StructType().add("foo", StringType).add("foo_map", MapType(StringType, IntegerType)))
assert(reloadedDf.select($"foo".as("string"), $"foo_map".as("simple_map")).collect().toSet ===
expectedDf.collect().toSet)
}
}
test("unsupported nullable avro type") {
val catalystSchema =
StructType(Seq(
StructField("Age", IntegerType, nullable = false),
StructField("Name", StringType, nullable = false)))
for (unsupportedAvroType <- Seq("""["null", "int", "long"]""", """["int", "long"]""")) {
val avroSchema = s"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [
| {"name": "Age", "type": $unsupportedAvroType},
| {"name": "Name", "type": ["null", "string"]}
| ]
|}
""".stripMargin
val df = spark.createDataFrame(
spark.sparkContext.parallelize(Seq(Row(2, "Aurora"))), catalystSchema)
withTempPath { tempDir =>
val message = intercept[SparkException] {
df.write.format("avro").option("avroSchema", avroSchema).save(tempDir.getPath)
}.getCause.getMessage
assert(message.contains("Only UNION of a null type and a non-null type is supported"))
}
}
}
test("error handling for unsupported Interval data types") {
withSQLConf(SQLConf.LEGACY_INTERVAL_ENABLED.key -> "true") {
withTempDir { dir =>
val tempDir = new File(dir, "files").getCanonicalPath
var msg = intercept[AnalysisException] {
sql("select interval 1 days").write.format("avro").mode("overwrite").save(tempDir)
}.getMessage
assert(msg.contains("Cannot save interval data type into external storage.") ||
msg.contains("AVRO data source does not support interval data type."))
msg = intercept[AnalysisException] {
spark.udf.register("testType", () => new IntervalData())
sql("select testType()").write.format("avro").mode("overwrite").save(tempDir)
}.getMessage
assert(msg.toLowerCase(Locale.ROOT)
.contains(s"avro data source does not support interval data type."))
}
}
}
test("support Null data types") {
withTempDir { dir =>
val tempDir = new File(dir, "files").getCanonicalPath
val df = sql("select null")
df.write.format("avro").mode("overwrite").save(tempDir)
checkAnswer(spark.read.format("avro").load(tempDir), df)
}
}
test("throw exception if unable to write with user provided Avro schema") {
val input: Seq[(DataType, Schema.Type)] = Seq(
(NullType, NULL),
(BooleanType, BOOLEAN),
(ByteType, INT),
(ShortType, INT),
(IntegerType, INT),
(LongType, LONG),
(FloatType, FLOAT),
(DoubleType, DOUBLE),
(BinaryType, BYTES),
(DateType, INT),
(TimestampType, LONG),
(DecimalType(4, 2), BYTES)
)
def assertException(f: () => AvroSerializer): Unit = {
val message = intercept[org.apache.spark.sql.avro.IncompatibleSchemaException] {
f()
}.getMessage
assert(message.contains("Cannot convert SQL type"))
}
def resolveNullable(schema: Schema, nullable: Boolean): Schema = {
if (nullable && schema.getType != NULL) {
Schema.createUnion(schema, Schema.create(NULL))
} else {
schema
}
}
for {
i <- input
j <- input
nullable <- Seq(true, false)
} if (i._2 != j._2) {
val avroType = resolveNullable(Schema.create(j._2), nullable)
val avroArrayType = resolveNullable(Schema.createArray(avroType), nullable)
val avroMapType = resolveNullable(Schema.createMap(avroType), nullable)
val name = "foo"
val avroField = new Field(name, avroType, "", null.asInstanceOf[AnyVal])
val recordSchema = Schema.createRecord("name", "doc", "space", true, Seq(avroField).asJava)
val avroRecordType = resolveNullable(recordSchema, nullable)
val catalystType = i._1
val catalystArrayType = ArrayType(catalystType, nullable)
val catalystMapType = MapType(StringType, catalystType, nullable)
val catalystStructType = StructType(Seq(StructField(name, catalystType, nullable)))
for {
avro <- Seq(avroType, avroArrayType, avroMapType, avroRecordType)
catalyst <- Seq(catalystType, catalystArrayType, catalystMapType, catalystStructType)
} {
assertException(() => new AvroSerializer(catalyst, avro, nullable))
}
}
}
test("reading from invalid path throws exception") {
// Directory given has no avro files
intercept[AnalysisException] {
withTempPath(dir => spark.read.format("avro").load(dir.getCanonicalPath))
}
intercept[AnalysisException] {
spark.read.format("avro").load("very/invalid/path/123.avro")
}
// In case of globbed path that can't be matched to anything, another exception is thrown (and
// exception message is helpful)
intercept[AnalysisException] {
spark.read.format("avro").load("*/*/*/*/*/*/*/something.avro")
}
intercept[FileNotFoundException] {
withTempPath { dir =>
FileUtils.touch(new File(dir, "test"))
withSQLConf(AvroFileFormat.IgnoreFilesWithoutExtensionProperty -> "true") {
spark.read.format("avro").load(dir.toString)
}
}
}
intercept[FileNotFoundException] {
withTempPath { dir =>
FileUtils.touch(new File(dir, "test"))
spark
.read
.option("ignoreExtension", false)
.format("avro")
.load(dir.toString)
}
}
}
test("SQL test insert overwrite") {
withTempPath { tempDir =>
val tempEmptyDir = s"$tempDir/sqlOverwrite"
// Create a temp directory for table that will be overwritten
new File(tempEmptyDir).mkdirs()
spark.sql(
s"""
|CREATE TEMPORARY VIEW episodes
|USING avro
|OPTIONS (path "${episodesAvro}")
""".stripMargin.replaceAll("\\n", " "))
spark.sql(
s"""
|CREATE TEMPORARY VIEW episodesEmpty
|(name string, air_date string, doctor int)
|USING avro
|OPTIONS (path "$tempEmptyDir")
""".stripMargin.replaceAll("\\n", " "))
assert(spark.sql("SELECT * FROM episodes").collect().length === 8)
assert(spark.sql("SELECT * FROM episodesEmpty").collect().isEmpty)
spark.sql(
s"""
|INSERT OVERWRITE TABLE episodesEmpty
|SELECT * FROM episodes
""".stripMargin.replaceAll("\\n", " "))
assert(spark.sql("SELECT * FROM episodesEmpty").collect().length == 8)
}
}
test("test save and load") {
// Test if load works as expected
withTempPath { tempDir =>
val df = spark.read.format("avro").load(episodesAvro)
assert(df.count == 8)
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").save(tempSaveDir)
val newDf = spark.read.format("avro").load(tempSaveDir)
assert(newDf.count == 8)
}
}
test("test load with non-Avro file") {
// Test if load works as expected
withTempPath { tempDir =>
val df = spark.read.format("avro").load(episodesAvro)
assert(df.count == 8)
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").save(tempSaveDir)
Files.createFile(new File(tempSaveDir, "non-avro").toPath)
withSQLConf(AvroFileFormat.IgnoreFilesWithoutExtensionProperty -> "true") {
val newDf = spark.read.format("avro").load(tempSaveDir)
assert(newDf.count() == 8)
}
}
}
test("SPARK-34378: support writing user provided avro schema with missing optional fields") {
withTempDir { tempDir =>
val avroSchema = SchemaBuilder.builder().record("test").fields()
.requiredString("f1").optionalString("f2").endRecord().toString()
val data = Seq("foo", "bar")
// Fail if required field f1 is missing
val e = intercept[SparkException] {
data.toDF("f2").write.option("avroSchema", avroSchema).format("avro").save(s"$tempDir/fail")
}
assertExceptionMsg[IncompatibleSchemaException](e,
"Found field 'f1' in Avro schema but there is no match in the SQL schema")
val tempSaveDir = s"$tempDir/save/"
// Succeed if optional field f2 is missing
data.toDF("f1").write.option("avroSchema", avroSchema).format("avro").save(tempSaveDir)
val newDf = spark.read.format("avro").load(tempSaveDir)
assert(newDf.schema === new StructType().add("f1", StringType).add("f2", StringType))
val rows = newDf.collect()
assert(rows.map(_.getAs[String]("f1")).sorted === data.sorted)
rows.foreach(row => assert(row.isNullAt(1)))
}
}
test("SPARK-34133: Reading user provided schema respects case sensitivity for field matching") {
val wrongCaseSchema = new StructType()
.add("STRING", StringType, nullable = false)
.add("UNION_STRING_NULL", StringType, nullable = true)
val withSchema = spark.read
.schema(wrongCaseSchema)
.format("avro").load(testAvro).collect()
val withOutSchema = spark.read.format("avro").load(testAvro)
.select("STRING", "UNION_STRING_NULL")
.collect()
assert(withSchema.sameElements(withOutSchema))
withSQLConf((SQLConf.CASE_SENSITIVE.key, "true")) {
val out = spark.read.format("avro").schema(wrongCaseSchema).load(testAvro).collect()
assert(out.forall(_.isNullAt(0)))
assert(out.forall(_.isNullAt(1)))
}
}
test("SPARK-34133: Writing user provided schema respects case sensitivity for field matching") {
withTempDir { tempDir =>
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [
| {"name": "foo", "type": "int"},
| {"name": "BAR", "type": "int"}
| ]
|}
""".stripMargin
val df = Seq((1, 3), (2, 4)).toDF("FOO", "bar")
val savePath = s"$tempDir/save"
df.write.option("avroSchema", avroSchema).format("avro").save(savePath)
val loaded = spark.read.format("avro").load(savePath)
assert(loaded.schema === new StructType().add("foo", IntegerType).add("BAR", IntegerType))
assert(loaded.collect().map(_.getInt(0)).toSet === Set(1, 2))
assert(loaded.collect().map(_.getInt(1)).toSet === Set(3, 4))
withSQLConf((SQLConf.CASE_SENSITIVE.key, "true")) {
val e = intercept[SparkException] {
df.write.option("avroSchema", avroSchema).format("avro").save(s"$tempDir/save2")
}
assertExceptionMsg[IncompatibleSchemaException](e, "Cannot find field 'FOO' in Avro schema")
}
}
}
test("SPARK-34133: Writing user provided schema with multiple matching Avro fields fails") {
withTempDir { tempDir =>
val avroSchema =
"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [
| {"name": "foo", "type": "int"},
| {"name": "FOO", "type": "string"}
| ]
|}
""".stripMargin
val errorMsg = "Searching for 'foo' in Avro schema at top-level record gave 2 matches. " +
"Candidates: [foo, FOO]"
assertExceptionMsg(intercept[SparkException] {
val fooBarDf = Seq((1, "3"), (2, "4")).toDF("foo", "bar")
fooBarDf.write.option("avroSchema", avroSchema).format("avro").save(s"$tempDir/save-fail")
}, errorMsg)
val savePath = s"$tempDir/save"
withSQLConf((SQLConf.CASE_SENSITIVE.key, "true")) {
val fooFooDf = Seq((1, "3"), (2, "4")).toDF("foo", "FOO")
fooFooDf.write.option("avroSchema", avroSchema).format("avro").save(savePath)
val loadedDf = spark.read.format("avro").schema(fooFooDf.schema).load(savePath)
assert(loadedDf.collect().toSet === fooFooDf.collect().toSet)
}
assertExceptionMsg(intercept[SparkException] {
val fooSchema = new StructType().add("foo", IntegerType)
spark.read.format("avro").schema(fooSchema).load(savePath).collect()
}, errorMsg)
}
}
test("read avro with user defined schema: read partial columns") {
val partialColumns = StructType(Seq(
StructField("string", StringType, false),
StructField("simple_map", MapType(StringType, IntegerType), false),
StructField("complex_map", MapType(StringType, MapType(StringType, StringType)), false),
StructField("union_string_null", StringType, true),
StructField("union_int_long_null", LongType, true),
StructField("fixed3", BinaryType, true),
StructField("fixed2", BinaryType, true),
StructField("enum", StringType, false),
StructField("record", StructType(Seq(StructField("value_field", StringType, false))), false),
StructField("array_of_boolean", ArrayType(BooleanType), false),
StructField("bytes", BinaryType, true)))
val withSchema = spark.read.schema(partialColumns).format("avro").load(testAvro).collect()
val withOutSchema = spark
.read
.format("avro")
.load(testAvro)
.select("string", "simple_map", "complex_map", "union_string_null", "union_int_long_null",
"fixed3", "fixed2", "enum", "record", "array_of_boolean", "bytes")
.collect()
assert(withSchema.sameElements(withOutSchema))
}
test("read avro with user defined schema: read non-exist columns") {
val schema =
StructType(
Seq(
StructField("non_exist_string", StringType, true),
StructField(
"record",
StructType(Seq(
StructField("non_exist_field", StringType, false),
StructField("non_exist_field2", StringType, false))),
false)))
val withEmptyColumn = spark.read.schema(schema).format("avro").load(testAvro).collect()
assert(withEmptyColumn.forall(_ == Row(null: String, Row(null: String, null: String))))
}
test("read avro file partitioned") {
withTempPath { dir =>
val df = (0 to 1024 * 3).toDS.map(i => s"record${i}").toDF("records")
val outputDir = s"$dir/${UUID.randomUUID}"
df.write.format("avro").save(outputDir)
val input = spark.read.format("avro").load(outputDir)
assert(input.collect.toSet.size === 1024 * 3 + 1)
assert(input.rdd.partitions.size > 2)
}
}
case class NestedBottom(id: Int, data: String)
case class NestedMiddle(id: Int, data: NestedBottom)
case class NestedTop(id: Int, data: NestedMiddle)
test("Validate namespace in avro file that has nested records with the same name") {
withTempPath { dir =>
val writeDf = spark.createDataFrame(List(NestedTop(1, NestedMiddle(2, NestedBottom(3, "1")))))
writeDf.write.format("avro").save(dir.toString)
val schema = getAvroSchemaStringFromFiles(dir.toString)
assert(schema.contains("\\"namespace\\":\\"topLevelRecord\\""))
assert(schema.contains("\\"namespace\\":\\"topLevelRecord.data\\""))
}
}
test("saving avro that has nested records with the same name") {
withTempPath { tempDir =>
// Save avro file on output folder path
val writeDf = spark.createDataFrame(List(NestedTop(1, NestedMiddle(2, NestedBottom(3, "1")))))
val outputFolder = s"$tempDir/duplicate_names/"
writeDf.write.format("avro").save(outputFolder)
// Read avro file saved on the last step
val readDf = spark.read.format("avro").load(outputFolder)
// Check if the written DataFrame is equals than read DataFrame
assert(readDf.collect().sameElements(writeDf.collect()))
}
}
test("check namespace - toAvroType") {
val sparkSchema = StructType(Seq(
StructField("name", StringType, nullable = false),
StructField("address", StructType(Seq(
StructField("city", StringType, nullable = false),
StructField("state", StringType, nullable = false))),
nullable = false)))
val employeeType = SchemaConverters.toAvroType(sparkSchema,
recordName = "employee",
nameSpace = "foo.bar")
assert(employeeType.getFullName == "foo.bar.employee")
assert(employeeType.getName == "employee")
assert(employeeType.getNamespace == "foo.bar")
val addressType = employeeType.getField("address").schema()
assert(addressType.getFullName == "foo.bar.employee.address")
assert(addressType.getName == "address")
assert(addressType.getNamespace == "foo.bar.employee")
}
test("check empty namespace - toAvroType") {
val sparkSchema = StructType(Seq(
StructField("name", StringType, nullable = false),
StructField("address", StructType(Seq(
StructField("city", StringType, nullable = false),
StructField("state", StringType, nullable = false))),
nullable = false)))
val employeeType = SchemaConverters.toAvroType(sparkSchema,
recordName = "employee")
assert(employeeType.getFullName == "employee")
assert(employeeType.getName == "employee")
assert(employeeType.getNamespace == null)
val addressType = employeeType.getField("address").schema()
assert(addressType.getFullName == "employee.address")
assert(addressType.getName == "address")
assert(addressType.getNamespace == "employee")
}
case class NestedMiddleArray(id: Int, data: Array[NestedBottom])
case class NestedTopArray(id: Int, data: NestedMiddleArray)
test("saving avro that has nested records with the same name inside an array") {
withTempPath { tempDir =>
// Save avro file on output folder path
val writeDf = spark.createDataFrame(
List(NestedTopArray(1, NestedMiddleArray(2, Array(
NestedBottom(3, "1"), NestedBottom(4, "2")
))))
)
val outputFolder = s"$tempDir/duplicate_names_array/"
writeDf.write.format("avro").save(outputFolder)
// Read avro file saved on the last step
val readDf = spark.read.format("avro").load(outputFolder)
// Check if the written DataFrame is equals than read DataFrame
assert(readDf.collect().sameElements(writeDf.collect()))
}
}
case class NestedMiddleMap(id: Int, data: Map[String, NestedBottom])
case class NestedTopMap(id: Int, data: NestedMiddleMap)
test("saving avro that has nested records with the same name inside a map") {
withTempPath { tempDir =>
// Save avro file on output folder path
val writeDf = spark.createDataFrame(
List(NestedTopMap(1, NestedMiddleMap(2, Map(
"1" -> NestedBottom(3, "1"), "2" -> NestedBottom(4, "2")
))))
)
val outputFolder = s"$tempDir/duplicate_names_map/"
writeDf.write.format("avro").save(outputFolder)
// Read avro file saved on the last step
val readDf = spark.read.format("avro").load(outputFolder)
// Check if the written DataFrame is equals than read DataFrame
assert(readDf.collect().sameElements(writeDf.collect()))
}
}
test("SPARK-24805: do not ignore files without .avro extension by default") {
withTempDir { dir =>
Files.copy(
Paths.get(new URL(episodesAvro).toURI),
Paths.get(dir.getCanonicalPath, "episodes"))
val fileWithoutExtension = s"${dir.getCanonicalPath}/episodes"
val df1 = spark.read.format("avro").load(fileWithoutExtension)
assert(df1.count == 8)
val schema = new StructType()
.add("title", StringType)
.add("air_date", StringType)
.add("doctor", IntegerType)
val df2 = spark.read.schema(schema).format("avro").load(fileWithoutExtension)
assert(df2.count == 8)
}
}
test("SPARK-24836: checking the ignoreExtension option") {
withTempPath { tempDir =>
val df = spark.read.format("avro").load(episodesAvro)
assert(df.count == 8)
val tempSaveDir = s"$tempDir/save/"
df.write.format("avro").save(tempSaveDir)
Files.createFile(new File(tempSaveDir, "non-avro").toPath)
val newDf = spark
.read
.option("ignoreExtension", false)
.format("avro")
.load(tempSaveDir)
assert(newDf.count == 8)
}
}
test("SPARK-24836: ignoreExtension must override hadoop's config") {
withTempDir { dir =>
Files.copy(
Paths.get(new URL(episodesAvro).toURI),
Paths.get(dir.getCanonicalPath, "episodes"))
val hadoopConf = spark.sessionState.newHadoopConf()
withSQLConf(AvroFileFormat.IgnoreFilesWithoutExtensionProperty -> "true") {
val newDf = spark
.read
.option("ignoreExtension", "true")
.format("avro")
.load(s"${dir.getCanonicalPath}/episodes")
assert(newDf.count() == 8)
}
}
}
test("SPARK-24881: write with compression - avro options") {
def getCodec(dir: String): Option[String] = {
val files = new File(dir)
.listFiles()
.filter(_.isFile)
.filter(_.getName.endsWith("avro"))
files.map { file =>
val reader = new DataFileReader(file, new GenericDatumReader[Any]())
val r = reader.getMetaString("avro.codec")
r
}.map(v => if (v == "null") "uncompressed" else v).headOption
}
def checkCodec(df: DataFrame, dir: String, codec: String): Unit = {
val subdir = s"$dir/$codec"
df.write.option("compression", codec).format("avro").save(subdir)
assert(getCodec(subdir) == Some(codec))
}
withTempPath { dir =>
val path = dir.toString
val df = spark.read.format("avro").load(testAvro)
checkCodec(df, path, "uncompressed")
checkCodec(df, path, "deflate")
checkCodec(df, path, "snappy")
checkCodec(df, path, "bzip2")
checkCodec(df, path, "xz")
}
}
private def checkSchemaWithRecursiveLoop(avroSchema: String): Unit = {
val message = intercept[IncompatibleSchemaException] {
SchemaConverters.toSqlType(new Schema.Parser().parse(avroSchema))
}.getMessage
assert(message.contains("Found recursive reference in Avro schema"))
}
test("Detect recursive loop") {
checkSchemaWithRecursiveLoop("""
|{
| "type": "record",
| "name": "LongList",
| "fields" : [
| {"name": "value", "type": "long"}, // each element has a long
| {"name": "next", "type": ["null", "LongList"]} // optional next element
| ]
|}
""".stripMargin)
checkSchemaWithRecursiveLoop("""
|{
| "type": "record",
| "name": "LongList",
| "fields": [
| {
| "name": "value",
| "type": {
| "type": "record",
| "name": "foo",
| "fields": [
| {
| "name": "parent",
| "type": "LongList"
| }
| ]
| }
| }
| ]
|}
""".stripMargin)
checkSchemaWithRecursiveLoop("""
|{
| "type": "record",
| "name": "LongList",
| "fields" : [
| {"name": "value", "type": "long"},
| {"name": "array", "type": {"type": "array", "items": "LongList"}}
| ]
|}
""".stripMargin)
checkSchemaWithRecursiveLoop("""
|{
| "type": "record",
| "name": "LongList",
| "fields" : [
| {"name": "value", "type": "long"},
| {"name": "map", "type": {"type": "map", "values": "LongList"}}
| ]
|}
""".stripMargin)
}
test("log a warning of ignoreExtension deprecation") {
val logAppender = new LogAppender("deprecated Avro option 'ignoreExtension'")
withTempPath { dir =>
Seq(("a", 1, 2), ("b", 1, 2), ("c", 2, 1), ("d", 2, 1))
.toDF("value", "p1", "p2")
.repartition(2)
.write
.format("avro")
.save(dir.getCanonicalPath)
withLogAppender(logAppender) {
spark
.read
.format("avro")
.option(AvroOptions.ignoreExtensionKey, false)
.load(dir.getCanonicalPath)
.count()
}
val deprecatedEvents = logAppender.loggingEvents
.filter(_.getMessage.getFormattedMessage.contains(
s"Option ${AvroOptions.ignoreExtensionKey} is deprecated"))
assert(deprecatedEvents.size === 1)
}
}
// It generates input files for the test below:
// "SPARK-31183, SPARK-37705: compatibility with Spark 2.4/3.2 in reading dates/timestamps"
ignore("SPARK-31855: generate test files for checking compatibility with Spark 2.4/3.2") {
val resourceDir = "external/avro/src/test/resources"
val version = SPARK_VERSION_SHORT.replaceAll("\\\\.", "_")
def save(
in: Seq[String],
t: String,
dstFile: String,
options: Map[String, String] = Map.empty): Unit = {
withTempDir { dir =>
in.toDF("dt")
.select($"dt".cast(t))
.repartition(1)
.write
.mode("overwrite")
.options(options)
.format("avro")
.save(dir.getCanonicalPath)
Files.copy(
dir.listFiles().filter(_.getName.endsWith(".avro")).head.toPath,
Paths.get(resourceDir, dstFile),
StandardCopyOption.REPLACE_EXISTING)
}
}
withDefaultTimeZone(LA) {
withSQLConf(
SQLConf.SESSION_LOCAL_TIMEZONE.key -> LA.getId) {
save(
Seq("1001-01-01"),
"date",
s"before_1582_date_v$version.avro")
save(
Seq("1001-01-01 01:02:03.123"),
"timestamp",
s"before_1582_timestamp_millis_v$version.avro",
// scalastyle:off line.size.limit
Map("avroSchema" ->
s"""
| {
| "namespace": "logical",
| "type": "record",
| "name": "test",
| "fields": [
| {"name": "dt", "type": ["null", {"type": "long","logicalType": "timestamp-millis"}], "default": null}
| ]
| }
|""".stripMargin))
// scalastyle:on line.size.limit
save(
Seq("1001-01-01 01:02:03.123456"),
"timestamp",
s"before_1582_timestamp_micros_v$version.avro")
}
}
}
private def runInMode(
modes: Seq[LegacyBehaviorPolicy.Value])(f: Map[String, String] => Unit): Unit = {
modes.foreach { mode =>
withSQLConf(SQLConf.AVRO_REBASE_MODE_IN_READ.key -> mode.toString) {
f(Map.empty)
}
}
withSQLConf(SQLConf.AVRO_REBASE_MODE_IN_READ.key -> EXCEPTION.toString) {
modes.foreach { mode =>
f(Map(AvroOptions.DATETIME_REBASE_MODE -> mode.toString))
}
}
}
test("SPARK-31183, SPARK-37705: compatibility with Spark 2.4/3.2 in reading dates/timestamps") {
// test reading the existing 2.4/3.2 files and new 3.x files (with rebase on/off) together.
def checkReadMixedFiles(
fileName: String,
dt: String,
dataStr: String,
checkDefaultLegacyRead: String => Unit): Unit = {
withTempPaths(2) { paths =>
paths.foreach(_.delete())
val oldPath = getResourceAvroFilePath(fileName)
val path3_x = paths(0).getCanonicalPath
val path3_x_rebase = paths(1).getCanonicalPath
if (dt == "date") {
val df = Seq(dataStr).toDF("str").select($"str".cast("date").as("dt"))
checkDefaultLegacyRead(oldPath)
withSQLConf(SQLConf.AVRO_REBASE_MODE_IN_WRITE.key -> CORRECTED.toString) {
df.write.format("avro").mode("overwrite").save(path3_x)
}
withSQLConf(SQLConf.AVRO_REBASE_MODE_IN_WRITE.key -> LEGACY.toString) {
df.write.format("avro").save(path3_x_rebase)
}
// For Avro files written by Spark 3.0, we know the writer info and don't need the config
// to guide the rebase behavior.
runInMode(Seq(LEGACY)) { options =>
checkAnswer(
spark.read.options(options).format("avro").load(oldPath, path3_x, path3_x_rebase),
1.to(3).map(_ => Row(java.sql.Date.valueOf(dataStr))))
}
} else {
val df = Seq(dataStr).toDF("str").select($"str".cast("timestamp").as("dt"))
val avroSchema =
s"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [
| {"name": "dt", "type": {"type": "long", "logicalType": "$dt"}}
| ]
|}""".stripMargin
// By default we should fail to write ancient datetime values.
val e = intercept[SparkException] {
df.write.format("avro").option("avroSchema", avroSchema).save(path3_x)
}
assert(e.getCause.getCause.getCause.isInstanceOf[SparkUpgradeException])
checkDefaultLegacyRead(oldPath)
withSQLConf(SQLConf.AVRO_REBASE_MODE_IN_WRITE.key -> CORRECTED.toString) {
df.write.format("avro").option("avroSchema", avroSchema).mode("overwrite").save(path3_x)
}
withSQLConf(SQLConf.AVRO_REBASE_MODE_IN_WRITE.key -> LEGACY.toString) {
df.write.format("avro").option("avroSchema", avroSchema).save(path3_x_rebase)
}
// For Avro files written by Spark 3.0, we know the writer info and don't need the config
// to guide the rebase behavior.
runInMode(Seq(LEGACY)) { options =>
checkAnswer(
spark.read.options(options).format("avro").load(oldPath, path3_x, path3_x_rebase),
1.to(3).map(_ => Row(java.sql.Timestamp.valueOf(dataStr))))
}
}
}
}
def failInRead(path: String): Unit = {
val e = intercept[SparkException](spark.read.format("avro").load(path).collect())
assert(e.getCause.isInstanceOf[SparkUpgradeException])
}
def successInRead(path: String): Unit = spark.read.format("avro").load(path).collect()
Seq(
// By default we should fail to read ancient datetime values when parquet files don't
// contain Spark version.
"2_4_5" -> failInRead _,
"2_4_6" -> successInRead _,
"3_2_0" -> successInRead _
).foreach { case (version, checkDefaultRead) =>
checkReadMixedFiles(
s"before_1582_date_v$version.avro",
"date",
"1001-01-01",
checkDefaultRead)
checkReadMixedFiles(
s"before_1582_timestamp_micros_v$version.avro",
"timestamp-micros",
"1001-01-01 01:02:03.123456",
checkDefaultRead)
checkReadMixedFiles(
s"before_1582_timestamp_millis_v$version.avro",
"timestamp-millis",
"1001-01-01 01:02:03.123",
checkDefaultRead)
}
}
test("SPARK-31183, SPARK-37705: rebasing microseconds timestamps in write") {
// Ignore the default JVM time zone and use the session time zone instead of it in rebasing.
DateTimeTestUtils.withDefaultTimeZone(DateTimeTestUtils.JST) {
withSQLConf(SQLConf.SESSION_LOCAL_TIMEZONE.key -> DateTimeTestUtils.LA.getId) {
val tsStr = "1001-01-01 01:02:03.123456"
val nonRebased = "1001-01-07 01:09:05.123456"
withTempPath { dir =>
val path = dir.getAbsolutePath
withSQLConf(SQLConf.AVRO_REBASE_MODE_IN_WRITE.key -> LEGACY.toString) {
Seq(tsStr).toDF("tsS")
.select($"tsS".cast("timestamp").as("ts"))
.write.format("avro")
.save(path)
}
// The file metadata indicates if it needs rebase or not, so we can always get the correct
// result regardless of the "rebase mode" config.
runInMode(Seq(LEGACY, CORRECTED, EXCEPTION)) { options =>
checkAnswer(
spark.read.options(options).format("avro").load(path).select($"ts".cast("string")),
Row(tsStr))
}
// Force to not rebase to prove the written datetime values are rebased and we will get
// wrong result if we don't rebase while reading.
withSQLConf("spark.test.forceNoRebase" -> "true") {
checkAnswer(
spark.read.format("avro").load(path).select($"ts".cast("string")),
Row(nonRebased))
}
}
}
}
}
test("SPARK-31183, SPARK-37705: rebasing milliseconds timestamps in write") {
// Ignore the default JVM time zone and use the session time zone instead of it in rebasing.
DateTimeTestUtils.withDefaultTimeZone(DateTimeTestUtils.JST) {
withSQLConf(SQLConf.SESSION_LOCAL_TIMEZONE.key -> DateTimeTestUtils.LA.getId) {
val tsStr = "1001-01-01 01:02:03.123456"
val rebased = "1001-01-01 01:02:03.123"
val nonRebased = "1001-01-07 01:09:05.123"
Seq(
"""{"type": "long","logicalType": "timestamp-millis"}""",
""""long"""").foreach { tsType =>
val timestampSchema = s"""
|{
| "namespace": "logical",
| "type": "record",
| "name": "test",
| "fields": [
| {"name": "ts", "type": $tsType}
| ]
|}""".stripMargin
withTempPath { dir =>
val path = dir.getAbsolutePath
withSQLConf(SQLConf.AVRO_REBASE_MODE_IN_WRITE.key -> LEGACY.toString) {
Seq(tsStr).toDF("tsS")
.select($"tsS".cast("timestamp").as("ts"))
.write
.option("avroSchema", timestampSchema)
.format("avro")
.save(path)
}
// The file metadata indicates if it needs rebase or not, so we can always get
// the correct result regardless of the "rebase mode" config.
runInMode(Seq(LEGACY, CORRECTED, EXCEPTION)) { options =>
checkAnswer(
spark.read
.options(options)
.schema("ts timestamp")
.format("avro").load(path)
.select($"ts".cast("string")),
Row(rebased))
}
// Force to not rebase to prove the written datetime values are rebased and we will get
// wrong result if we don't rebase while reading.
withSQLConf("spark.test.forceNoRebase" -> "true") {
checkAnswer(
spark.read
.schema("ts timestamp")
.format("avro").load(path)
.select($"ts".cast("string")),
Row(nonRebased))
}
}
}
}
}
}
test("SPARK-31183: rebasing dates in write") {
withTempPath { dir =>
val path = dir.getAbsolutePath
withSQLConf(SQLConf.AVRO_REBASE_MODE_IN_WRITE.key -> LEGACY.toString) {
Seq("1001-01-01").toDF("dateS")
.select($"dateS".cast("date").as("date"))
.write.format("avro")
.save(path)
}
// The file metadata indicates if it needs rebase or not, so we can always get the correct
// result regardless of the "rebase mode" config.
runInMode(Seq(LEGACY, CORRECTED, EXCEPTION)) { options =>
checkAnswer(
spark.read.options(options).format("avro").load(path),
Row(Date.valueOf("1001-01-01")))
}
// Force to not rebase to prove the written datetime values are rebased and we will get
// wrong result if we don't rebase while reading.
withSQLConf("spark.test.forceNoRebase" -> "true") {
checkAnswer(spark.read.format("avro").load(path), Row(Date.valueOf("1001-01-07")))
}
}
}
private def checkMetaData(path: java.io.File, key: String, expectedValue: String): Unit = {
val avroFiles = path.listFiles()
.filter(f => f.isFile && !f.getName.startsWith(".") && !f.getName.startsWith("_"))
assert(avroFiles.length === 1)
val reader = DataFileReader.openReader(avroFiles(0), new GenericDatumReader[GenericRecord]())
val value = reader.asInstanceOf[DataFileReader[_]].getMetaString(key)
assert(value === expectedValue)
}
test("SPARK-31327: Write Spark version into Avro file metadata") {
withTempPath { path =>
spark.range(1).repartition(1).write.format("avro").save(path.getCanonicalPath)
checkMetaData(path, SPARK_VERSION_METADATA_KEY, SPARK_VERSION_SHORT)
}
}
test("SPARK-33163, SPARK-37705: write the metadata key 'org.apache.spark.legacyDateTime' " +
"and 'org.apache.spark.timeZone'") {
def saveTs(dir: java.io.File): Unit = {
Seq(Timestamp.valueOf("2020-10-15 01:02:03")).toDF()
.repartition(1)
.write
.format("avro")
.save(dir.getAbsolutePath)
}
withSQLConf(SQLConf.AVRO_REBASE_MODE_IN_WRITE.key -> LEGACY.toString) {
withTempPath { dir =>
saveTs(dir)
checkMetaData(dir, SPARK_LEGACY_DATETIME_METADATA_KEY, "")
checkMetaData(dir, SPARK_TIMEZONE_METADATA_KEY, SQLConf.get.sessionLocalTimeZone)
}
}
Seq(CORRECTED, EXCEPTION).foreach { mode =>
withSQLConf(SQLConf.AVRO_REBASE_MODE_IN_WRITE.key -> mode.toString) {
withTempPath { dir =>
saveTs(dir)
checkMetaData(dir, SPARK_LEGACY_DATETIME_METADATA_KEY, null)
checkMetaData(dir, SPARK_TIMEZONE_METADATA_KEY, null)
}
}
}
}
test("SPARK-33314: RowReader doesn't over-consume when hasNextRow called twice") {
withTempPath { dir =>
Seq((1), (2), (3))
.toDF("index")
.write
.format("avro")
.save(dir.getCanonicalPath)
val df = spark
.read
.format("avro")
.load(dir.getCanonicalPath)
.orderBy("index")
checkAnswer(df,
Seq(Row(1), Row(2), Row(3)))
}
}
test("SPARK-35427: datetime rebasing in the EXCEPTION mode") {
withSQLConf(SQLConf.AVRO_REBASE_MODE_IN_WRITE.key -> EXCEPTION.toString) {
Seq("timestamp-millis", "timestamp-micros").foreach { dt =>
withTempPath { dir =>
val df = Seq("1001-01-01 01:02:03.123456")
.toDF("str")
.select($"str".cast("timestamp").as("dt"))
val avroSchema =
s"""
|{
| "type" : "record",
| "name" : "test_schema",
| "fields" : [
| {"name": "dt", "type": {"type": "long", "logicalType": "$dt"}}
| ]
|}""".stripMargin
val e = intercept[SparkException] {
df.write.format("avro").option("avroSchema", avroSchema).save(dir.getCanonicalPath)
}
val errMsg = e.getCause.getCause.getCause.asInstanceOf[SparkUpgradeException].getMessage
assert(errMsg.contains("You may get a different result due to the upgrading"))
}
}
withTempPath { dir =>
val df = Seq(java.sql.Date.valueOf("1001-01-01")).toDF("dt")
val e = intercept[SparkException] {
df.write.format("avro").save(dir.getCanonicalPath)
}
val errMsg = e.getCause.getCause.getCause.asInstanceOf[SparkUpgradeException].getMessage
assert(errMsg.contains("You may get a different result due to the upgrading"))
}
}
withSQLConf(SQLConf.AVRO_REBASE_MODE_IN_READ.key -> EXCEPTION.toString) {
Seq(
"before_1582_date_v2_4_5.avro",
"before_1582_timestamp_micros_v2_4_5.avro",
"before_1582_timestamp_millis_v2_4_5.avro"
).foreach { fileName =>
val e = intercept[SparkException] {
spark.read.format("avro").load(getResourceAvroFilePath(fileName)).collect()
}
val errMsg = e.getCause.asInstanceOf[SparkUpgradeException].getMessage
assert(errMsg.contains("You may get a different result due to the upgrading"))
}
}
}
test("SPARK-33865: CREATE TABLE DDL with avro should check col name") {
withTable("test_ddl") {
withView("v") {
spark.range(1).createTempView("v")
withTempDir { dir =>
val e = intercept[AnalysisException] {
sql(
s"""
|CREATE TABLE test_ddl USING AVRO
|LOCATION '${dir}'
|AS SELECT ID, IF(ID=1,1,0) FROM v""".stripMargin)
}.getMessage
assert(e.contains("Column name \\"(IF((ID = 1), 1, 0))\\" contains invalid character(s)."))
}
withTempDir { dir =>
spark.sql(
s"""
|CREATE TABLE test_ddl USING AVRO
|LOCATION '${dir}'
|AS SELECT ID, IF(ID=1,ID,0) AS A, ABS(ID) AS B
|FROM v""".stripMargin)
val expectedSchema = StructType(Seq(StructField("ID", LongType, true),
StructField("A", LongType, true), StructField("B", LongType, true)))
assert(spark.table("test_ddl").schema == expectedSchema)
}
}
}
}
test("SPARK-37225: Support reading and writing ANSI intervals") {
Seq(
YearMonthIntervalType() -> ((i: Int) => java.time.Period.of(i, i, 0)),
DayTimeIntervalType() -> ((i: Int) => java.time.Duration.ofDays(i).plusSeconds(i))
).foreach { case (it, f) =>
val data = (1 to 10).map(i => Row(i, f(i)))
val schema = StructType(Array(StructField("d", IntegerType, false),
StructField("i", it, false)))
withTempPath { file =>
val df = spark.createDataFrame(sparkContext.parallelize(data), schema)
df.write.format("avro").save(file.getCanonicalPath)
val df2 = spark.read.format("avro").load(file.getCanonicalPath)
checkAnswer(df2, df.collect().toSeq)
}
}
// Tests for ANSI intervals in complex types.
withTempPath { file =>
val df = spark.sql(
"""SELECT
| named_struct('interval', interval '1-2' year to month) a,
| array(interval '1 2:3' day to minute) b,
| map('key', interval '10' year) c""".stripMargin)
df.write.format("avro").save(file.getCanonicalPath)
val df2 = spark.read.format("avro").load(file.getCanonicalPath)
checkAnswer(df2, df.collect().toSeq)
}
}
}
class AvroV1Suite extends AvroSuite {
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_LIST, "avro")
test("SPARK-36271: V1 insert should check schema field name too") {
withView("v") {
spark.range(1).createTempView("v")
withTempDir { dir =>
val e = intercept[AnalysisException] {
sql("SELECT ID, IF(ID=1,1,0) FROM v").write.mode(SaveMode.Overwrite)
.format("avro").save(dir.getCanonicalPath)
}.getMessage
assert(e.contains("Column name \\"(IF((ID = 1), 1, 0))\\" contains invalid character(s)."))
}
withTempDir { dir =>
val e = intercept[AnalysisException] {
sql("SELECT NAMED_STRUCT('(IF((ID = 1), 1, 0))', IF(ID=1,ID,0)) AS col1 FROM v")
.write.mode(SaveMode.Overwrite)
.format("avro").save(dir.getCanonicalPath)
}.getMessage
assert(e.contains("Column name \\"(IF((ID = 1), 1, 0))\\" contains invalid character(s)."))
}
}
}
}
class AvroV2Suite extends AvroSuite with ExplainSuiteHelper {
import testImplicits._
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_LIST, "")
test("Avro source v2: support partition pruning") {
withTempPath { dir =>
Seq(("a", 1, 2), ("b", 1, 2), ("c", 2, 1))
.toDF("value", "p1", "p2")
.write
.format("avro")
.partitionBy("p1", "p2")
.save(dir.getCanonicalPath)
val df = spark
.read
.format("avro")
.load(dir.getCanonicalPath)
.where("p1 = 1 and p2 = 2 and value != \\"a\\"")
val filterCondition = df.queryExecution.optimizedPlan.collectFirst {
case f: Filter => f.condition
}
assert(filterCondition.isDefined)
// The partitions filters should be pushed down and no need to be reevaluated.
assert(filterCondition.get.collectFirst {
case a: AttributeReference if a.name == "p1" || a.name == "p2" => a
}.isEmpty)
val fileScan = df.queryExecution.executedPlan collectFirst {
case BatchScanExec(_, f: AvroScan, _) => f
}
assert(fileScan.nonEmpty)
assert(fileScan.get.partitionFilters.nonEmpty)
assert(fileScan.get.dataFilters.nonEmpty)
assert(fileScan.get.planInputPartitions().forall { partition =>
partition.asInstanceOf[FilePartition].files.forall { file =>
file.filePath.contains("p1=1") && file.filePath.contains("p2=2")
}
})
checkAnswer(df, Row("b", 1, 2))
}
}
test("Avro source v2: support passing data filters to FileScan without partitionFilters") {
withTempPath { dir =>
Seq(("a", 1, 2), ("b", 1, 2), ("c", 2, 1))
.toDF("value", "p1", "p2")
.write
.format("avro")
.save(dir.getCanonicalPath)
val df = spark
.read
.format("avro")
.load(dir.getCanonicalPath)
.where("value = 'a'")
val filterCondition = df.queryExecution.optimizedPlan.collectFirst {
case f: Filter => f.condition
}
assert(filterCondition.isDefined)
val fileScan = df.queryExecution.executedPlan collectFirst {
case BatchScanExec(_, f: AvroScan, _) => f
}
assert(fileScan.nonEmpty)
assert(fileScan.get.partitionFilters.isEmpty)
assert(fileScan.get.dataFilters.nonEmpty)
checkAnswer(df, Row("a", 1, 2))
}
}
private def getBatchScanExec(plan: SparkPlan): BatchScanExec = {
plan.find(_.isInstanceOf[BatchScanExec]).get.asInstanceOf[BatchScanExec]
}
test("Avro source v2: same result with different orders of data filters and partition filters") {
withTempPath { path =>
val tmpDir = path.getCanonicalPath
spark
.range(10)
.selectExpr("id as a", "id + 1 as b", "id + 2 as c", "id + 3 as d")
.write
.partitionBy("a", "b")
.format("avro")
.save(tmpDir)
val df = spark.read.format("avro").load(tmpDir)
// partition filters: a > 1 AND b < 9
// data filters: c > 1 AND d < 9
val plan1 = df.where("a > 1 AND b < 9 AND c > 1 AND d < 9").queryExecution.sparkPlan
val plan2 = df.where("b < 9 AND a > 1 AND d < 9 AND c > 1").queryExecution.sparkPlan
assert(plan1.sameResult(plan2))
val scan1 = getBatchScanExec(plan1)
val scan2 = getBatchScanExec(plan2)
assert(scan1.sameResult(scan2))
}
}
test("explain formatted on an avro data source v2") {
withTempDir { dir =>
val basePath = dir.getCanonicalPath + "/avro"
val expected_plan_fragment =
s"""
|\\\\(1\\\\) BatchScan
|Output \\\\[2\\\\]: \\\\[value#xL, id#x\\\\]
|DataFilters: \\\\[isnotnull\\\\(value#xL\\\\), \\\\(value#xL > 2\\\\)\\\\]
|Format: avro
|Location: InMemoryFileIndex\\\\([0-9]+ paths\\\\)\\\\[.*\\\\]
|PartitionFilters: \\\\[isnotnull\\\\(id#x\\\\), \\\\(id#x > 1\\\\)\\\\]
|PushedFilters: \\\\[IsNotNull\\\\(value\\\\), GreaterThan\\\\(value,2\\\\)\\\\]
|ReadSchema: struct\\\\<value:bigint\\\\>
|""".stripMargin.trim
spark.range(10)
.select(col("id"), col("id").as("value"))
.write.option("header", true)
.partitionBy("id")
.format("avro")
.save(basePath)
val df = spark
.read
.format("avro")
.load(basePath).where($"id" > 1 && $"value" > 2)
val normalizedOutput = getNormalizedExplain(df, FormattedMode)
assert(expected_plan_fragment.r.findAllMatchIn(normalizedOutput).length == 1,
normalizedOutput)
}
}
test("SPARK-32346: filters pushdown to Avro datasource v2") {
Seq(true, false).foreach { filtersPushdown =>
withSQLConf(SQLConf.AVRO_FILTER_PUSHDOWN_ENABLED.key -> filtersPushdown.toString) {
withTempPath { dir =>
Seq(("a", 1, 2), ("b", 1, 2), ("c", 2, 1))
.toDF("value", "p1", "p2")
.write
.format("avro")
.save(dir.getCanonicalPath)
val df = spark
.read
.format("avro")
.load(dir.getCanonicalPath)
.where("value = 'a'")
val fileScan = df.queryExecution.executedPlan collectFirst {
case BatchScanExec(_, f: AvroScan, _) => f
}
assert(fileScan.nonEmpty)
if (filtersPushdown) {
assert(fileScan.get.pushedFilters.nonEmpty)
} else {
assert(fileScan.get.pushedFilters.isEmpty)
}
checkAnswer(df, Row("a", 1, 2))
}
}
}
}
}
|
ueshin/apache-spark
|
external/avro/src/test/scala/org/apache/spark/sql/avro/AvroSuite.scala
|
Scala
|
apache-2.0
| 94,015 |
package org.example.algorithms.trees
import java.util.NoSuchElementException
import scala.annotation.tailrec
/** Prove: n keys -> height = O(log n)
* Lemma: minimum F(H+2) + 1 vertices //F(n) denotes Fibonacci numbers
* Proof.
* Let m(h) - minimal amount of vertices for a given h
* Obviously: m(h+2) = m(h+1) + m(h) + 1
* induction.
* Base: m1 = F(3) - 1 correct
* Transition: obv.
*
*
* Proof.
* F(h) = Omega( phi ^h ), where phi = ( sqrt(5) + 1) / 2
* Then, n >= phi^h , take log and you re done
* */
object AVLTree {
class AlreadyExistsException extends Exception
abstract class Tree[+T <% Ordered[T]] extends Iterable[T] {
def getOr[U >: T](default: U): U
def getOr[U, TT >: T](default: U, f: NonEmptyTree[TT] => U): U
def _left = asInstanceOf[NonEmptyTree[T]].left
def _right = asInstanceOf[NonEmptyTree[T]].right
def _elem = asInstanceOf[NonEmptyTree[T]].elem
def -[U >: T <% Ordered[U]](e: U) = remove(this, e)
def +[U >: T <% Ordered[U]](e: U) = insert(this, e)
}
case object Empty extends Tree[Nothing] {
override def getOr[U >: Nothing](default: U): U = default
override def getOr[U, TT >: Nothing](default: U, f: (NonEmptyTree[TT]) => U): U = default
override def toString = ""
override def iterator: Iterator[Nothing] = Iterator.empty
}
case class NonEmptyTree[+T <% Ordered[T]](elem: T, left: Tree[T], right: Tree[T]) extends Tree[T] {
private[AVLTree] val depthValue: Int = (depth(left) max depth(right)) + 1
override def getOr[U, TT >: T](default: U, f: (NonEmptyTree[TT]) => U): U = f(this)
override def getOr[U >: T](default: U): U = elem
override def toString = s"< $left ]$elem[ $right >"
def isLeaf = left == Empty && right == Empty
override def iterator: Iterator[T] = left.iterator ++ Iterator(elem) ++ right.iterator
}
private def balance[T](t: Tree[T]) = t.getOr(0, (t: NonEmptyTree[T]) => depth(t left) - depth(t right))
def depth[T](tree: Tree[T]): Int = tree.getOr(0, (t: NonEmptyTree[T]) => t.depthValue)
def Root[T <% Ordered[T]](element: T) = NonEmptyTree(element, Empty, Empty)
private def insert[T <% Ordered[T]](tree: Tree[T], elem: T): NonEmptyTree[T] =
tree match {
case Empty => Root(elem)
case tree: NonEmptyTree[T] => rebalance(
elem.compareTo(tree.elem) match {
case -1 => NonEmptyTree(tree.elem, insert(tree.left, elem), tree.right)
case 0 => throw new AlreadyExistsException
case 1 => NonEmptyTree(tree.elem, tree.left, insert(tree.right, elem))
}
)
}
@tailrec
def min[T <% Ordered[T]](tree: NonEmptyTree[T]): NonEmptyTree[T] = tree.left match {
case NonEmptyTree(_, l: NonEmptyTree[T], _) => min(l)
case _ => tree
}
def max[T <% Ordered[T]](tree: NonEmptyTree[T]): NonEmptyTree[T] = tree.right match {
case NonEmptyTree(_, _, r: NonEmptyTree[T]) => min(r)
case _ => tree
}
private def removeElem[T <% Ordered[T]](tree: Tree[T], element: T): Tree[T] = tree match {
case Empty => throw new NoSuchElementException()
case t@NonEmptyTree(e, l, r) => element compareTo e match {
case 1 => NonEmptyTree(e, l, remove(r, element))
case -1 => NonEmptyTree(e, remove(l, element), r)
case 0 => (l, r) match {
case (Empty, Empty) => Empty
case (Empty, r: NonEmptyTree[T]) => r
case (l: NonEmptyTree[T], Empty) => l
case (l: NonEmptyTree[T], r: NonEmptyTree[T]) =>
val substElem = min(r).elem
val newR = remove(r, substElem)
NonEmptyTree(substElem, l, newR)
}
}
}
private def remove[T <% Ordered[T]](tree: Tree[T], element: T): Tree[T] = removeElem(tree, element) match {
case Empty => Empty
case ne: NonEmptyTree[T] => rebalance(ne)
}
private def rebalance[T <% Ordered[T]](t: NonEmptyTree[T]): NonEmptyTree[T] =
balance(t) match {
case -1 | 0 | 1 => t
case 2 => if (balance(t.left) == -1) leftRight(t) else leftLeft(t)
case -2 => if (balance(t.right) == 1) rightLeft(t) else rightRight(t)
case _ => throw new IllegalStateException("Balance factor can not be " + balance(t))
}
private def leftRight[T <% Ordered[T]](tree: NonEmptyTree[T]): NonEmptyTree[T] = {
val A = tree.left._left
val B = tree.left._right._left
val C = tree.left._right._right
val D = tree.right
val newLeft = NonEmptyTree(tree.left._elem, A, B)
val newRight = NonEmptyTree(tree.elem, C, D)
NonEmptyTree(tree.left._right._elem, newLeft, newRight)
}
private def leftLeft[T <% Ordered[T]](tree: NonEmptyTree[T]): NonEmptyTree[T] = {
val A = tree.left._left._left
val B = tree.left._left._right
val C = tree.left._right
val D = tree.right
val newLeft = NonEmptyTree(tree.left._left._elem, A, B)
val newRight = NonEmptyTree(tree.elem, C, D)
NonEmptyTree(tree.left._elem, newLeft, newRight)
}
private def rightLeft[T <% Ordered[T]](tree: NonEmptyTree[T]): NonEmptyTree[T] = {
val A = tree.left
val B = tree.right._left._left
val C = tree.right._left._right
val D = tree.right._right
val newLeft = NonEmptyTree(tree.elem, A, B)
val newRight = NonEmptyTree(tree._right._elem, C, D)
NonEmptyTree(tree.right._left._elem, newLeft, newRight)
}
private def rightRight[T <% Ordered[T]](tree: NonEmptyTree[T]): NonEmptyTree[T] = {
val A = tree.left
val B = tree.right._left
val C = tree.right._right._left
val D = tree.right._right._right
val newLeft = NonEmptyTree(tree.elem, A, B)
val newRight = NonEmptyTree(tree.right._right._elem, C, D)
NonEmptyTree(tree.right._elem, newLeft, newRight)
}
def apply[T <% Ordered[T]](x: T, elems: T*): NonEmptyTree[T] = {
var t = Root(x)
for (elem <- elems)
t += elem
t
}
}
|
sayon/scala-algorithm
|
src/org/example/algorithms/trees/AVLTree.scala
|
Scala
|
bsd-3-clause
| 5,855 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import java.util
import java.util.Locale
import java.util.concurrent.atomic.AtomicBoolean
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.util.Random
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst._
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.encoders.OuterScopes
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.SubExprUtils._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.expressions.objects._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.catalyst.trees.TreeNodeRef
import org.apache.spark.sql.catalyst.util.toPrettySQL
import org.apache.spark.sql.connector.catalog._
import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._
import org.apache.spark.sql.connector.catalog.TableChange.{AddColumn, After, ColumnChange, ColumnPosition, DeleteColumn, RenameColumn, UpdateColumnComment, UpdateColumnNullability, UpdateColumnPosition, UpdateColumnType}
import org.apache.spark.sql.connector.expressions.{FieldReference, IdentityTransform, Transform}
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf.{PartitionOverwriteMode, StoreAssignmentPolicy}
import org.apache.spark.sql.types._
import org.apache.spark.sql.util.CaseInsensitiveStringMap
/**
* A trivial [[Analyzer]] with a dummy [[SessionCatalog]] and [[EmptyFunctionRegistry]].
* Used for testing when all relations are already filled in and the analyzer needs only
* to resolve attribute references.
*/
object SimpleAnalyzer extends Analyzer(
new CatalogManager(
new SQLConf().copy(SQLConf.CASE_SENSITIVE -> true),
FakeV2SessionCatalog,
new SessionCatalog(
new InMemoryCatalog,
EmptyFunctionRegistry,
new SQLConf().copy(SQLConf.CASE_SENSITIVE -> true)) {
override def createDatabase(dbDefinition: CatalogDatabase, ignoreIfExists: Boolean): Unit = {}
}),
new SQLConf().copy(SQLConf.CASE_SENSITIVE -> true))
object FakeV2SessionCatalog extends TableCatalog {
private def fail() = throw new UnsupportedOperationException
override def listTables(namespace: Array[String]): Array[Identifier] = fail()
override def loadTable(ident: Identifier): Table = {
throw new NoSuchTableException(ident.toString)
}
override def createTable(
ident: Identifier,
schema: StructType,
partitions: Array[Transform],
properties: util.Map[String, String]): Table = fail()
override def alterTable(ident: Identifier, changes: TableChange*): Table = fail()
override def dropTable(ident: Identifier): Boolean = fail()
override def renameTable(oldIdent: Identifier, newIdent: Identifier): Unit = fail()
override def initialize(name: String, options: CaseInsensitiveStringMap): Unit = fail()
override def name(): String = CatalogManager.SESSION_CATALOG_NAME
}
/**
* Provides a way to keep state during the analysis, this enables us to decouple the concerns
* of analysis environment from the catalog.
* The state that is kept here is per-query.
*
* Note this is thread local.
*
* @param catalogAndNamespace The catalog and namespace used in the view resolution. This overrides
* the current catalog and namespace when resolving relations inside
* views.
* @param nestedViewDepth The nested depth in the view resolution, this enables us to limit the
* depth of nested views.
* @param relationCache A mapping from qualified table names to resolved relations. This can ensure
* that the table is resolved only once if a table is used multiple times
* in a query.
*/
case class AnalysisContext(
catalogAndNamespace: Seq[String] = Nil,
nestedViewDepth: Int = 0,
relationCache: mutable.Map[Seq[String], LogicalPlan] = mutable.Map.empty)
object AnalysisContext {
private val value = new ThreadLocal[AnalysisContext]() {
override def initialValue: AnalysisContext = AnalysisContext()
}
def get: AnalysisContext = value.get()
def reset(): Unit = value.remove()
private def set(context: AnalysisContext): Unit = value.set(context)
def withAnalysisContext[A](catalogAndNamespace: Seq[String])(f: => A): A = {
val originContext = value.get()
val context = AnalysisContext(
catalogAndNamespace, originContext.nestedViewDepth + 1, originContext.relationCache)
set(context)
try f finally { set(originContext) }
}
}
/**
* Provides a logical query plan analyzer, which translates [[UnresolvedAttribute]]s and
* [[UnresolvedRelation]]s into fully typed objects using information in a [[SessionCatalog]].
*/
class Analyzer(
override val catalogManager: CatalogManager,
conf: SQLConf,
maxIterations: Int)
extends RuleExecutor[LogicalPlan] with CheckAnalysis with LookupCatalog {
private val v1SessionCatalog: SessionCatalog = catalogManager.v1SessionCatalog
override def isView(nameParts: Seq[String]): Boolean = v1SessionCatalog.isView(nameParts)
// Only for tests.
def this(catalog: SessionCatalog, conf: SQLConf) = {
this(
new CatalogManager(conf, FakeV2SessionCatalog, catalog),
conf,
conf.analyzerMaxIterations)
}
def this(catalogManager: CatalogManager, conf: SQLConf) = {
this(catalogManager, conf, conf.analyzerMaxIterations)
}
def executeAndCheck(plan: LogicalPlan, tracker: QueryPlanningTracker): LogicalPlan = {
AnalysisHelper.markInAnalyzer {
val analyzed = executeAndTrack(plan, tracker)
try {
checkAnalysis(analyzed)
analyzed
} catch {
case e: AnalysisException =>
val ae = new AnalysisException(e.message, e.line, e.startPosition, Option(analyzed))
ae.setStackTrace(e.getStackTrace)
throw ae
}
}
}
override def execute(plan: LogicalPlan): LogicalPlan = {
AnalysisContext.reset()
try {
executeSameContext(plan)
} finally {
AnalysisContext.reset()
}
}
private def executeSameContext(plan: LogicalPlan): LogicalPlan = super.execute(plan)
def resolver: Resolver = conf.resolver
/**
* If the plan cannot be resolved within maxIterations, analyzer will throw exception to inform
* user to increase the value of SQLConf.ANALYZER_MAX_ITERATIONS.
*/
protected val fixedPoint =
FixedPoint(
maxIterations,
errorOnExceed = true,
maxIterationsSetting = SQLConf.ANALYZER_MAX_ITERATIONS.key)
/**
* Override to provide additional rules for the "Resolution" batch.
*/
val extendedResolutionRules: Seq[Rule[LogicalPlan]] = Nil
/**
* Override to provide rules to do post-hoc resolution. Note that these rules will be executed
* in an individual batch. This batch is to run right after the normal resolution batch and
* execute its rules in one pass.
*/
val postHocResolutionRules: Seq[Rule[LogicalPlan]] = Nil
lazy val batches: Seq[Batch] = Seq(
Batch("Hints", fixedPoint,
new ResolveHints.ResolveJoinStrategyHints(conf),
new ResolveHints.ResolveCoalesceHints(conf)),
Batch("Simple Sanity Check", Once,
LookupFunctions),
Batch("Substitution", fixedPoint,
CTESubstitution,
WindowsSubstitution,
EliminateUnions,
new SubstituteUnresolvedOrdinals(conf)),
Batch("Resolution", fixedPoint,
ResolveTableValuedFunctions ::
ResolveNamespace(catalogManager) ::
new ResolveCatalogs(catalogManager) ::
ResolveInsertInto ::
ResolveRelations ::
ResolveTables ::
ResolveReferences ::
ResolveCreateNamedStruct ::
ResolveDeserializer ::
ResolveNewInstance ::
ResolveUpCast ::
ResolveGroupingAnalytics ::
ResolvePivot ::
ResolveOrdinalInOrderByAndGroupBy ::
ResolveAggAliasInGroupBy ::
ResolveMissingReferences ::
ExtractGenerator ::
ResolveGenerate ::
ResolveFunctions ::
ResolveAliases ::
ResolveSubquery ::
ResolveSubqueryColumnAliases ::
ResolveWindowOrder ::
ResolveWindowFrame ::
ResolveNaturalAndUsingJoin ::
ResolveOutputRelation ::
ExtractWindowExpressions ::
GlobalAggregates ::
ResolveAggregateFunctions ::
TimeWindowing ::
ResolveInlineTables(conf) ::
ResolveHigherOrderFunctions(v1SessionCatalog) ::
ResolveLambdaVariables(conf) ::
ResolveTimeZone(conf) ::
ResolveRandomSeed ::
ResolveBinaryArithmetic(conf) ::
TypeCoercion.typeCoercionRules(conf) ++
extendedResolutionRules : _*),
Batch("Post-Hoc Resolution", Once, postHocResolutionRules: _*),
Batch("Normalize Alter Table", Once, ResolveAlterTableChanges),
Batch("Remove Unresolved Hints", Once,
new ResolveHints.RemoveAllHints(conf)),
Batch("Nondeterministic", Once,
PullOutNondeterministic),
Batch("UDF", Once,
HandleNullInputsForUDF),
Batch("UpdateNullability", Once,
UpdateAttributeNullability),
Batch("Subquery", Once,
UpdateOuterReferences),
Batch("Cleanup", fixedPoint,
CleanupAliases)
)
/**
* For [[Add]]:
* 1. if both side are interval, stays the same;
* 2. else if one side is interval, turns it to [[TimeAdd]];
* 3. else if one side is date, turns it to [[DateAdd]] ;
* 4. else stays the same.
*
* For [[Subtract]]:
* 1. if both side are interval, stays the same;
* 2. else if the right side is an interval, turns it to [[TimeSub]];
* 3. else if one side is timestamp, turns it to [[SubtractTimestamps]];
* 4. else if the right side is date, turns it to [[DateDiff]]/[[SubtractDates]];
* 5. else if the left side is date, turns it to [[DateSub]];
* 6. else turns it to stays the same.
*
* For [[Multiply]]:
* 1. If one side is interval, turns it to [[MultiplyInterval]];
* 2. otherwise, stays the same.
*
* For [[Divide]]:
* 1. If the left side is interval, turns it to [[DivideInterval]];
* 2. otherwise, stays the same.
*/
case class ResolveBinaryArithmetic(conf: SQLConf) extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p: LogicalPlan => p.transformExpressionsUp {
case a @ Add(l, r) if a.childrenResolved => (l.dataType, r.dataType) match {
case (CalendarIntervalType, CalendarIntervalType) => a
case (_, CalendarIntervalType) => Cast(TimeAdd(l, r), l.dataType)
case (CalendarIntervalType, _) => Cast(TimeAdd(r, l), r.dataType)
case (DateType, _) => DateAdd(l, r)
case (_, DateType) => DateAdd(r, l)
case _ => a
}
case s @ Subtract(l, r) if s.childrenResolved => (l.dataType, r.dataType) match {
case (CalendarIntervalType, CalendarIntervalType) => s
case (_, CalendarIntervalType) => Cast(TimeSub(l, r), l.dataType)
case (TimestampType, _) => SubtractTimestamps(l, r)
case (_, TimestampType) => SubtractTimestamps(l, r)
case (_, DateType) => SubtractDates(l, r)
case (DateType, _) => DateSub(l, r)
case _ => s
}
case m @ Multiply(l, r) if m.childrenResolved => (l.dataType, r.dataType) match {
case (CalendarIntervalType, _) => MultiplyInterval(l, r)
case (_, CalendarIntervalType) => MultiplyInterval(r, l)
case _ => m
}
case d @ Divide(l, r) if d.childrenResolved => (l.dataType, r.dataType) match {
case (CalendarIntervalType, _) => DivideInterval(l, r)
case _ => d
}
}
}
}
/**
* Substitute child plan with WindowSpecDefinitions.
*/
object WindowsSubstitution extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
// Lookup WindowSpecDefinitions. This rule works with unresolved children.
case WithWindowDefinition(windowDefinitions, child) => child.resolveExpressions {
case UnresolvedWindowExpression(c, WindowSpecReference(windowName)) =>
val errorMessage =
s"Window specification $windowName is not defined in the WINDOW clause."
val windowSpecDefinition =
windowDefinitions.getOrElse(windowName, failAnalysis(errorMessage))
WindowExpression(c, windowSpecDefinition)
}
}
}
/**
* Replaces [[UnresolvedAlias]]s with concrete aliases.
*/
object ResolveAliases extends Rule[LogicalPlan] {
private def assignAliases(exprs: Seq[NamedExpression]) = {
exprs.map(_.transformUp { case u @ UnresolvedAlias(child, optGenAliasFunc) =>
child match {
case ne: NamedExpression => ne
case go @ GeneratorOuter(g: Generator) if g.resolved => MultiAlias(go, Nil)
case e if !e.resolved => u
case g: Generator => MultiAlias(g, Nil)
case c @ Cast(ne: NamedExpression, _, _) => Alias(c, ne.name)()
case e: ExtractValue => Alias(e, toPrettySQL(e))()
case e if optGenAliasFunc.isDefined =>
Alias(child, optGenAliasFunc.get.apply(e))()
case e => Alias(e, toPrettySQL(e))()
}
}
).asInstanceOf[Seq[NamedExpression]]
}
private def hasUnresolvedAlias(exprs: Seq[NamedExpression]) =
exprs.exists(_.find(_.isInstanceOf[UnresolvedAlias]).isDefined)
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case Aggregate(groups, aggs, child) if child.resolved && hasUnresolvedAlias(aggs) =>
Aggregate(groups, assignAliases(aggs), child)
case g: GroupingSets if g.child.resolved && hasUnresolvedAlias(g.aggregations) =>
g.copy(aggregations = assignAliases(g.aggregations))
case Pivot(groupByOpt, pivotColumn, pivotValues, aggregates, child)
if child.resolved && groupByOpt.isDefined && hasUnresolvedAlias(groupByOpt.get) =>
Pivot(Some(assignAliases(groupByOpt.get)), pivotColumn, pivotValues, aggregates, child)
case Project(projectList, child) if child.resolved && hasUnresolvedAlias(projectList) =>
Project(assignAliases(projectList), child)
}
}
object ResolveGroupingAnalytics extends Rule[LogicalPlan] {
/*
* GROUP BY a, b, c WITH ROLLUP
* is equivalent to
* GROUP BY a, b, c GROUPING SETS ( (a, b, c), (a, b), (a), ( ) ).
* Group Count: N + 1 (N is the number of group expressions)
*
* We need to get all of its subsets for the rule described above, the subset is
* represented as sequence of expressions.
*/
def rollupExprs(exprs: Seq[Expression]): Seq[Seq[Expression]] = exprs.inits.toIndexedSeq
/*
* GROUP BY a, b, c WITH CUBE
* is equivalent to
* GROUP BY a, b, c GROUPING SETS ( (a, b, c), (a, b), (b, c), (a, c), (a), (b), (c), ( ) ).
* Group Count: 2 ^ N (N is the number of group expressions)
*
* We need to get all of its subsets for a given GROUPBY expression, the subsets are
* represented as sequence of expressions.
*/
def cubeExprs(exprs: Seq[Expression]): Seq[Seq[Expression]] = {
// `cubeExprs0` is recursive and returns a lazy Stream. Here we call `toIndexedSeq` to
// materialize it and avoid serialization problems later on.
cubeExprs0(exprs).toIndexedSeq
}
def cubeExprs0(exprs: Seq[Expression]): Seq[Seq[Expression]] = exprs.toList match {
case x :: xs =>
val initial = cubeExprs0(xs)
initial.map(x +: _) ++ initial
case Nil =>
Seq(Seq.empty)
}
private[analysis] def hasGroupingFunction(e: Expression): Boolean = {
e.collectFirst {
case g: Grouping => g
case g: GroupingID => g
}.isDefined
}
private def replaceGroupingFunc(
expr: Expression,
groupByExprs: Seq[Expression],
gid: Expression): Expression = {
expr transform {
case e: GroupingID =>
if (e.groupByExprs.isEmpty ||
e.groupByExprs.map(_.canonicalized) == groupByExprs.map(_.canonicalized)) {
Alias(gid, toPrettySQL(e))()
} else {
throw new AnalysisException(
s"Columns of grouping_id (${e.groupByExprs.mkString(",")}) does not match " +
s"grouping columns (${groupByExprs.mkString(",")})")
}
case e @ Grouping(col: Expression) =>
val idx = groupByExprs.indexWhere(_.semanticEquals(col))
if (idx >= 0) {
Alias(Cast(BitwiseAnd(ShiftRight(gid, Literal(groupByExprs.length - 1 - idx)),
Literal(1L)), ByteType), toPrettySQL(e))()
} else {
throw new AnalysisException(s"Column of grouping ($col) can't be found " +
s"in grouping columns ${groupByExprs.mkString(",")}")
}
}
}
/*
* Create new alias for all group by expressions for `Expand` operator.
*/
private def constructGroupByAlias(groupByExprs: Seq[Expression]): Seq[Alias] = {
groupByExprs.map {
case e: NamedExpression => Alias(e, e.name)()
case other => Alias(other, other.toString)()
}
}
/*
* Construct [[Expand]] operator with grouping sets.
*/
private def constructExpand(
selectedGroupByExprs: Seq[Seq[Expression]],
child: LogicalPlan,
groupByAliases: Seq[Alias],
gid: Attribute): LogicalPlan = {
// Change the nullability of group by aliases if necessary. For example, if we have
// GROUPING SETS ((a,b), a), we do not need to change the nullability of a, but we
// should change the nullability of b to be TRUE.
// TODO: For Cube/Rollup just set nullability to be `true`.
val expandedAttributes = groupByAliases.map { alias =>
if (selectedGroupByExprs.exists(!_.contains(alias.child))) {
alias.toAttribute.withNullability(true)
} else {
alias.toAttribute
}
}
val groupingSetsAttributes = selectedGroupByExprs.map { groupingSetExprs =>
groupingSetExprs.map { expr =>
val alias = groupByAliases.find(_.child.semanticEquals(expr)).getOrElse(
failAnalysis(s"$expr doesn't show up in the GROUP BY list $groupByAliases"))
// Map alias to expanded attribute.
expandedAttributes.find(_.semanticEquals(alias.toAttribute)).getOrElse(
alias.toAttribute)
}
}
Expand(groupingSetsAttributes, groupByAliases, expandedAttributes, gid, child)
}
/*
* Construct new aggregate expressions by replacing grouping functions.
*/
private def constructAggregateExprs(
groupByExprs: Seq[Expression],
aggregations: Seq[NamedExpression],
groupByAliases: Seq[Alias],
groupingAttrs: Seq[Expression],
gid: Attribute): Seq[NamedExpression] = aggregations.map {
// collect all the found AggregateExpression, so we can check an expression is part of
// any AggregateExpression or not.
val aggsBuffer = ArrayBuffer[Expression]()
// Returns whether the expression belongs to any expressions in `aggsBuffer` or not.
def isPartOfAggregation(e: Expression): Boolean = {
aggsBuffer.exists(a => a.find(_ eq e).isDefined)
}
replaceGroupingFunc(_, groupByExprs, gid).transformDown {
// AggregateExpression should be computed on the unmodified value of its argument
// expressions, so we should not replace any references to grouping expression
// inside it.
case e: AggregateExpression =>
aggsBuffer += e
e
case e if isPartOfAggregation(e) => e
case e =>
// Replace expression by expand output attribute.
val index = groupByAliases.indexWhere(_.child.semanticEquals(e))
if (index == -1) {
e
} else {
groupingAttrs(index)
}
}.asInstanceOf[NamedExpression]
}
/*
* Construct [[Aggregate]] operator from Cube/Rollup/GroupingSets.
*/
private def constructAggregate(
selectedGroupByExprs: Seq[Seq[Expression]],
groupByExprs: Seq[Expression],
aggregationExprs: Seq[NamedExpression],
child: LogicalPlan): LogicalPlan = {
// In case of ANSI-SQL compliant syntax for GROUPING SETS, groupByExprs is optional and
// can be null. In such case, we derive the groupByExprs from the user supplied values for
// grouping sets.
val finalGroupByExpressions = if (groupByExprs == Nil) {
selectedGroupByExprs.flatten.foldLeft(Seq.empty[Expression]) { (result, currentExpr) =>
// Only unique expressions are included in the group by expressions and is determined
// based on their semantic equality. Example. grouping sets ((a * b), (b * a)) results
// in grouping expression (a * b)
if (result.find(_.semanticEquals(currentExpr)).isDefined) {
result
} else {
result :+ currentExpr
}
}
} else {
groupByExprs
}
if (finalGroupByExpressions.size > GroupingID.dataType.defaultSize * 8) {
throw new AnalysisException(
s"Grouping sets size cannot be greater than ${GroupingID.dataType.defaultSize * 8}")
}
// Expand works by setting grouping expressions to null as determined by the
// `selectedGroupByExprs`. To prevent these null values from being used in an aggregate
// instead of the original value we need to create new aliases for all group by expressions
// that will only be used for the intended purpose.
val groupByAliases = constructGroupByAlias(finalGroupByExpressions)
val gid = AttributeReference(VirtualColumn.groupingIdName, GroupingID.dataType, false)()
val expand = constructExpand(selectedGroupByExprs, child, groupByAliases, gid)
val groupingAttrs = expand.output.drop(child.output.length)
val aggregations = constructAggregateExprs(
finalGroupByExpressions, aggregationExprs, groupByAliases, groupingAttrs, gid)
Aggregate(groupingAttrs, aggregations, expand)
}
private def findGroupingExprs(plan: LogicalPlan): Seq[Expression] = {
plan.collectFirst {
case a: Aggregate =>
// this Aggregate should have grouping id as the last grouping key.
val gid = a.groupingExpressions.last
if (!gid.isInstanceOf[AttributeReference]
|| gid.asInstanceOf[AttributeReference].name != VirtualColumn.groupingIdName) {
failAnalysis(s"grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup")
}
a.groupingExpressions.take(a.groupingExpressions.length - 1)
}.getOrElse {
failAnalysis(s"grouping()/grouping_id() can only be used with GroupingSets/Cube/Rollup")
}
}
// This require transformUp to replace grouping()/grouping_id() in resolved Filter/Sort
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperatorsUp {
case a if !a.childrenResolved => a // be sure all of the children are resolved.
// Ensure group by expressions and aggregate expressions have been resolved.
case Aggregate(Seq(c @ Cube(groupByExprs)), aggregateExpressions, child)
if (groupByExprs ++ aggregateExpressions).forall(_.resolved) =>
constructAggregate(cubeExprs(groupByExprs), groupByExprs, aggregateExpressions, child)
case Aggregate(Seq(r @ Rollup(groupByExprs)), aggregateExpressions, child)
if (groupByExprs ++ aggregateExpressions).forall(_.resolved) =>
constructAggregate(rollupExprs(groupByExprs), groupByExprs, aggregateExpressions, child)
// Ensure all the expressions have been resolved.
case x: GroupingSets if x.expressions.forall(_.resolved) =>
constructAggregate(x.selectedGroupByExprs, x.groupByExprs, x.aggregations, x.child)
// We should make sure all expressions in condition have been resolved.
case f @ Filter(cond, child) if hasGroupingFunction(cond) && cond.resolved =>
val groupingExprs = findGroupingExprs(child)
// The unresolved grouping id will be resolved by ResolveMissingReferences
val newCond = replaceGroupingFunc(cond, groupingExprs, VirtualColumn.groupingIdAttribute)
f.copy(condition = newCond)
// We should make sure all [[SortOrder]]s have been resolved.
case s @ Sort(order, _, child)
if order.exists(hasGroupingFunction) && order.forall(_.resolved) =>
val groupingExprs = findGroupingExprs(child)
val gid = VirtualColumn.groupingIdAttribute
// The unresolved grouping id will be resolved by ResolveMissingReferences
val newOrder = order.map(replaceGroupingFunc(_, groupingExprs, gid).asInstanceOf[SortOrder])
s.copy(order = newOrder)
}
}
object ResolvePivot extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case p: Pivot if !p.childrenResolved || !p.aggregates.forall(_.resolved)
|| (p.groupByExprsOpt.isDefined && !p.groupByExprsOpt.get.forall(_.resolved))
|| !p.pivotColumn.resolved || !p.pivotValues.forall(_.resolved) => p
case Pivot(groupByExprsOpt, pivotColumn, pivotValues, aggregates, child) =>
if (!RowOrdering.isOrderable(pivotColumn.dataType)) {
throw new AnalysisException(
s"Invalid pivot column '${pivotColumn}'. Pivot columns must be comparable.")
}
// Check all aggregate expressions.
aggregates.foreach(checkValidAggregateExpression)
// Check all pivot values are literal and match pivot column data type.
val evalPivotValues = pivotValues.map { value =>
val foldable = value match {
case Alias(v, _) => v.foldable
case _ => value.foldable
}
if (!foldable) {
throw new AnalysisException(
s"Literal expressions required for pivot values, found '$value'")
}
if (!Cast.canCast(value.dataType, pivotColumn.dataType)) {
throw new AnalysisException(s"Invalid pivot value '$value': " +
s"value data type ${value.dataType.simpleString} does not match " +
s"pivot column data type ${pivotColumn.dataType.catalogString}")
}
Cast(value, pivotColumn.dataType, Some(conf.sessionLocalTimeZone)).eval(EmptyRow)
}
// Group-by expressions coming from SQL are implicit and need to be deduced.
val groupByExprs = groupByExprsOpt.getOrElse {
val pivotColAndAggRefs = pivotColumn.references ++ AttributeSet(aggregates)
child.output.filterNot(pivotColAndAggRefs.contains)
}
val singleAgg = aggregates.size == 1
def outputName(value: Expression, aggregate: Expression): String = {
val stringValue = value match {
case n: NamedExpression => n.name
case _ =>
val utf8Value =
Cast(value, StringType, Some(conf.sessionLocalTimeZone)).eval(EmptyRow)
Option(utf8Value).map(_.toString).getOrElse("null")
}
if (singleAgg) {
stringValue
} else {
val suffix = aggregate match {
case n: NamedExpression => n.name
case _ => toPrettySQL(aggregate)
}
stringValue + "_" + suffix
}
}
if (aggregates.forall(a => PivotFirst.supportsDataType(a.dataType))) {
// Since evaluating |pivotValues| if statements for each input row can get slow this is an
// alternate plan that instead uses two steps of aggregation.
val namedAggExps: Seq[NamedExpression] = aggregates.map(a => Alias(a, a.sql)())
val namedPivotCol = pivotColumn match {
case n: NamedExpression => n
case _ => Alias(pivotColumn, "__pivot_col")()
}
val bigGroup = groupByExprs :+ namedPivotCol
val firstAgg = Aggregate(bigGroup, bigGroup ++ namedAggExps, child)
val pivotAggs = namedAggExps.map { a =>
Alias(PivotFirst(namedPivotCol.toAttribute, a.toAttribute, evalPivotValues)
.toAggregateExpression()
, "__pivot_" + a.sql)()
}
val groupByExprsAttr = groupByExprs.map(_.toAttribute)
val secondAgg = Aggregate(groupByExprsAttr, groupByExprsAttr ++ pivotAggs, firstAgg)
val pivotAggAttribute = pivotAggs.map(_.toAttribute)
val pivotOutputs = pivotValues.zipWithIndex.flatMap { case (value, i) =>
aggregates.zip(pivotAggAttribute).map { case (aggregate, pivotAtt) =>
Alias(ExtractValue(pivotAtt, Literal(i), resolver), outputName(value, aggregate))()
}
}
Project(groupByExprsAttr ++ pivotOutputs, secondAgg)
} else {
val pivotAggregates: Seq[NamedExpression] = pivotValues.flatMap { value =>
def ifExpr(e: Expression) = {
If(
EqualNullSafe(
pivotColumn,
Cast(value, pivotColumn.dataType, Some(conf.sessionLocalTimeZone))),
e, Literal(null))
}
aggregates.map { aggregate =>
val filteredAggregate = aggregate.transformDown {
// Assumption is the aggregate function ignores nulls. This is true for all current
// AggregateFunction's with the exception of First and Last in their default mode
// (which we handle) and possibly some Hive UDAF's.
case First(expr, _) =>
First(ifExpr(expr), Literal(true))
case Last(expr, _) =>
Last(ifExpr(expr), Literal(true))
case a: AggregateFunction =>
a.withNewChildren(a.children.map(ifExpr))
}.transform {
// We are duplicating aggregates that are now computing a different value for each
// pivot value.
// TODO: Don't construct the physical container until after analysis.
case ae: AggregateExpression => ae.copy(resultId = NamedExpression.newExprId)
}
Alias(filteredAggregate, outputName(value, aggregate))()
}
}
Aggregate(groupByExprs, groupByExprs ++ pivotAggregates, child)
}
}
// Support any aggregate expression that can appear in an Aggregate plan except Pandas UDF.
// TODO: Support Pandas UDF.
private def checkValidAggregateExpression(expr: Expression): Unit = expr match {
case _: AggregateExpression => // OK and leave the argument check to CheckAnalysis.
case expr: PythonUDF if PythonUDF.isGroupedAggPandasUDF(expr) =>
failAnalysis("Pandas UDF aggregate expressions are currently not supported in pivot.")
case e: Attribute =>
failAnalysis(
s"Aggregate expression required for pivot, but '${e.sql}' " +
s"did not appear in any aggregate function.")
case e => e.children.foreach(checkValidAggregateExpression)
}
}
case class ResolveNamespace(catalogManager: CatalogManager)
extends Rule[LogicalPlan] with LookupCatalog {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case s @ ShowTables(UnresolvedNamespace(Seq()), _) =>
s.copy(namespace = ResolvedNamespace(currentCatalog, catalogManager.currentNamespace))
case UnresolvedNamespace(Seq()) =>
ResolvedNamespace(currentCatalog, Seq.empty[String])
case UnresolvedNamespace(CatalogAndNamespace(catalog, ns)) =>
ResolvedNamespace(catalog, ns)
}
}
private def isResolvingView: Boolean = AnalysisContext.get.catalogAndNamespace.nonEmpty
/**
* Resolve relations to temp views. This is not an actual rule, and is called by
* [[ResolveTables]] and [[ResolveRelations]].
*/
object ResolveTempViews extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case u @ UnresolvedRelation(ident) =>
lookupTempView(ident).getOrElse(u)
case i @ InsertIntoStatement(UnresolvedRelation(ident), _, _, _, _) =>
lookupTempView(ident)
.map(view => i.copy(table = view))
.getOrElse(i)
case u @ UnresolvedTable(ident) =>
lookupTempView(ident).foreach { _ =>
u.failAnalysis(s"${ident.quoted} is a temp view not table.")
}
u
case u @ UnresolvedTableOrView(ident) =>
lookupTempView(ident).map(_ => ResolvedView(ident.asIdentifier)).getOrElse(u)
}
def lookupTempView(identifier: Seq[String]): Option[LogicalPlan] = {
// Permanent View can't refer to temp views, no need to lookup at all.
if (isResolvingView) return None
identifier match {
case Seq(part1) => v1SessionCatalog.lookupTempView(part1)
case Seq(part1, part2) => v1SessionCatalog.lookupGlobalTempView(part1, part2)
case _ => None
}
}
}
// If we are resolving relations insides views, we need to expand single-part relation names with
// the current catalog and namespace of when the view was created.
private def expandRelationName(nameParts: Seq[String]): Seq[String] = {
if (!isResolvingView) return nameParts
if (nameParts.length == 1) {
AnalysisContext.get.catalogAndNamespace :+ nameParts.head
} else if (catalogManager.isCatalogRegistered(nameParts.head)) {
nameParts
} else {
AnalysisContext.get.catalogAndNamespace.head +: nameParts
}
}
/**
* Resolve table relations with concrete relations from v2 catalog.
*
* [[ResolveRelations]] still resolves v1 tables.
*/
object ResolveTables extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = ResolveTempViews(plan).resolveOperatorsUp {
case u: UnresolvedRelation =>
lookupV2Relation(u.multipartIdentifier)
.map { rel =>
val ident = rel.identifier.get
SubqueryAlias(rel.catalog.get.name +: ident.namespace :+ ident.name, rel)
}.getOrElse(u)
case u @ UnresolvedTable(NonSessionCatalogAndIdentifier(catalog, ident)) =>
CatalogV2Util.loadTable(catalog, ident)
.map(ResolvedTable(catalog.asTableCatalog, ident, _))
.getOrElse(u)
case u @ UnresolvedTableOrView(NonSessionCatalogAndIdentifier(catalog, ident)) =>
CatalogV2Util.loadTable(catalog, ident)
.map(ResolvedTable(catalog.asTableCatalog, ident, _))
.getOrElse(u)
case i @ InsertIntoStatement(u: UnresolvedRelation, _, _, _, _) if i.query.resolved =>
lookupV2Relation(u.multipartIdentifier)
.map(v2Relation => i.copy(table = v2Relation))
.getOrElse(i)
case alter @ AlterTable(_, _, u: UnresolvedV2Relation, _) =>
CatalogV2Util.loadRelation(u.catalog, u.tableName)
.map(rel => alter.copy(table = rel))
.getOrElse(alter)
case u: UnresolvedV2Relation =>
CatalogV2Util.loadRelation(u.catalog, u.tableName).getOrElse(u)
}
/**
* Performs the lookup of DataSourceV2 Tables from v2 catalog.
*/
private def lookupV2Relation(identifier: Seq[String]): Option[DataSourceV2Relation] =
expandRelationName(identifier) match {
case NonSessionCatalogAndIdentifier(catalog, ident) =>
CatalogV2Util.loadTable(catalog, ident) match {
case Some(table) =>
Some(DataSourceV2Relation.create(table, Some(catalog), Some(ident)))
case None => None
}
case _ => None
}
}
/**
* Replaces [[UnresolvedRelation]]s with concrete relations from the catalog.
*/
object ResolveRelations extends Rule[LogicalPlan] {
// The current catalog and namespace may be different from when the view was created, we must
// resolve the view logical plan here, with the catalog and namespace stored in view metadata.
// This is done by keeping the catalog and namespace in `AnalysisContext`, and analyzer will
// look at `AnalysisContext.catalogAndNamespace` when resolving relations with single-part name.
// If `AnalysisContext.catalogAndNamespace` is non-empty, analyzer will expand single-part names
// with it, instead of current catalog and namespace.
private def resolveViews(plan: LogicalPlan): LogicalPlan = plan match {
// The view's child should be a logical plan parsed from the `desc.viewText`, the variable
// `viewText` should be defined, or else we throw an error on the generation of the View
// operator.
case view @ View(desc, _, child) if !child.resolved =>
// Resolve all the UnresolvedRelations and Views in the child.
val newChild = AnalysisContext.withAnalysisContext(desc.viewCatalogAndNamespace) {
if (AnalysisContext.get.nestedViewDepth > conf.maxNestedViewDepth) {
view.failAnalysis(s"The depth of view ${desc.identifier} exceeds the maximum " +
s"view resolution depth (${conf.maxNestedViewDepth}). Analysis is aborted to " +
s"avoid errors. Increase the value of ${SQLConf.MAX_NESTED_VIEW_DEPTH.key} to work " +
"around this.")
}
executeSameContext(child)
}
view.copy(child = newChild)
case p @ SubqueryAlias(_, view: View) =>
p.copy(child = resolveViews(view))
case _ => plan
}
def apply(plan: LogicalPlan): LogicalPlan = ResolveTempViews(plan).resolveOperatorsUp {
case i @ InsertIntoStatement(table, _, _, _, _) if i.query.resolved =>
val relation = table match {
case u: UnresolvedRelation =>
lookupRelation(u.multipartIdentifier).getOrElse(u)
case other => other
}
EliminateSubqueryAliases(relation) match {
case v: View =>
table.failAnalysis(s"Inserting into a view is not allowed. View: ${v.desc.identifier}.")
case other => i.copy(table = other)
}
case u: UnresolvedRelation =>
lookupRelation(u.multipartIdentifier).map(resolveViews).getOrElse(u)
case u @ UnresolvedTable(identifier) =>
lookupTableOrView(identifier).map {
case v: ResolvedView =>
u.failAnalysis(s"${v.identifier.quoted} is a view not table.")
case table => table
}.getOrElse(u)
case u @ UnresolvedTableOrView(identifier) =>
lookupTableOrView(identifier).getOrElse(u)
}
private def lookupTableOrView(identifier: Seq[String]): Option[LogicalPlan] = {
expandRelationName(identifier) match {
case SessionCatalogAndIdentifier(catalog, ident) =>
CatalogV2Util.loadTable(catalog, ident).map {
case v1Table: V1Table if v1Table.v1Table.tableType == CatalogTableType.VIEW =>
ResolvedView(ident)
case table =>
ResolvedTable(catalog.asTableCatalog, ident, table)
}
case _ => None
}
}
// Look up a relation from the session catalog with the following logic:
// 1) If the resolved catalog is not session catalog, return None.
// 2) If a relation is not found in the catalog, return None.
// 3) If a v1 table is found, create a v1 relation. Otherwise, create a v2 relation.
private def lookupRelation(identifier: Seq[String]): Option[LogicalPlan] = {
expandRelationName(identifier) match {
case SessionCatalogAndIdentifier(catalog, ident) =>
def loaded = CatalogV2Util.loadTable(catalog, ident).map {
case v1Table: V1Table =>
v1SessionCatalog.getRelation(v1Table.v1Table)
case table =>
SubqueryAlias(
catalog.name +: ident.asMultipartIdentifier,
DataSourceV2Relation.create(table, Some(catalog), Some(ident)))
}
val key = catalog.name +: ident.namespace :+ ident.name
Option(AnalysisContext.get.relationCache.getOrElseUpdate(key, loaded.orNull))
case _ => None
}
}
}
object ResolveInsertInto extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case i @ InsertIntoStatement(r: DataSourceV2Relation, _, _, _, _) if i.query.resolved =>
// ifPartitionNotExists is append with validation, but validation is not supported
if (i.ifPartitionNotExists) {
throw new AnalysisException(
s"Cannot write, IF NOT EXISTS is not supported for table: ${r.table.name}")
}
val partCols = partitionColumnNames(r.table)
validatePartitionSpec(partCols, i.partitionSpec)
val staticPartitions = i.partitionSpec.filter(_._2.isDefined).mapValues(_.get)
val query = addStaticPartitionColumns(r, i.query, staticPartitions)
val dynamicPartitionOverwrite = partCols.size > staticPartitions.size &&
conf.partitionOverwriteMode == PartitionOverwriteMode.DYNAMIC
if (!i.overwrite) {
AppendData.byPosition(r, query)
} else if (dynamicPartitionOverwrite) {
OverwritePartitionsDynamic.byPosition(r, query)
} else {
OverwriteByExpression.byPosition(r, query, staticDeleteExpression(r, staticPartitions))
}
}
private def partitionColumnNames(table: Table): Seq[String] = {
// get partition column names. in v2, partition columns are columns that are stored using an
// identity partition transform because the partition values and the column values are
// identical. otherwise, partition values are produced by transforming one or more source
// columns and cannot be set directly in a query's PARTITION clause.
table.partitioning.flatMap {
case IdentityTransform(FieldReference(Seq(name))) => Some(name)
case _ => None
}
}
private def validatePartitionSpec(
partitionColumnNames: Seq[String],
partitionSpec: Map[String, Option[String]]): Unit = {
// check that each partition name is a partition column. otherwise, it is not valid
partitionSpec.keySet.foreach { partitionName =>
partitionColumnNames.find(name => conf.resolver(name, partitionName)) match {
case Some(_) =>
case None =>
throw new AnalysisException(
s"PARTITION clause cannot contain a non-partition column name: $partitionName")
}
}
}
private def addStaticPartitionColumns(
relation: DataSourceV2Relation,
query: LogicalPlan,
staticPartitions: Map[String, String]): LogicalPlan = {
if (staticPartitions.isEmpty) {
query
} else {
// add any static value as a literal column
val withStaticPartitionValues = {
// for each static name, find the column name it will replace and check for unknowns.
val outputNameToStaticName = staticPartitions.keySet.map(staticName =>
relation.output.find(col => conf.resolver(col.name, staticName)) match {
case Some(attr) =>
attr.name -> staticName
case _ =>
throw new AnalysisException(
s"Cannot add static value for unknown column: $staticName")
}).toMap
val queryColumns = query.output.iterator
// for each output column, add the static value as a literal, or use the next input
// column. this does not fail if input columns are exhausted and adds remaining columns
// at the end. both cases will be caught by ResolveOutputRelation and will fail the
// query with a helpful error message.
relation.output.flatMap { col =>
outputNameToStaticName.get(col.name).flatMap(staticPartitions.get) match {
case Some(staticValue) =>
Some(Alias(Cast(Literal(staticValue), col.dataType), col.name)())
case _ if queryColumns.hasNext =>
Some(queryColumns.next)
case _ =>
None
}
} ++ queryColumns
}
Project(withStaticPartitionValues, query)
}
}
private def staticDeleteExpression(
relation: DataSourceV2Relation,
staticPartitions: Map[String, String]): Expression = {
if (staticPartitions.isEmpty) {
Literal(true)
} else {
staticPartitions.map { case (name, value) =>
relation.output.find(col => conf.resolver(col.name, name)) match {
case Some(attr) =>
// the delete expression must reference the table's column names, but these attributes
// are not available when CheckAnalysis runs because the relation is not a child of
// the logical operation. instead, expressions are resolved after
// ResolveOutputRelation runs, using the query's column names that will match the
// table names at that point. because resolution happens after a future rule, create
// an UnresolvedAttribute.
EqualTo(UnresolvedAttribute(attr.name), Cast(Literal(value), attr.dataType))
case None =>
throw new AnalysisException(s"Unknown static partition column: $name")
}
}.reduce(And)
}
}
}
/**
* Replaces [[UnresolvedAttribute]]s with concrete [[AttributeReference]]s from
* a logical plan node's children.
*/
object ResolveReferences extends Rule[LogicalPlan] {
/**
* Generate a new logical plan for the right child with different expression IDs
* for all conflicting attributes.
*/
private def dedupRight (left: LogicalPlan, right: LogicalPlan): LogicalPlan = {
val conflictingAttributes = left.outputSet.intersect(right.outputSet)
logDebug(s"Conflicting attributes ${conflictingAttributes.mkString(",")} " +
s"between $left and $right")
/**
* For LogicalPlan likes MultiInstanceRelation, Project, Aggregate, etc, whose output doesn't
* inherit directly from its children, we could just stop collect on it. Because we could
* always replace all the lower conflict attributes with the new attributes from the new
* plan. Theoretically, we should do recursively collect for Generate and Window but we leave
* it to the next batch to reduce possible overhead because this should be a corner case.
*/
def collectConflictPlans(plan: LogicalPlan): Seq[(LogicalPlan, LogicalPlan)] = plan match {
// Handle base relations that might appear more than once.
case oldVersion: MultiInstanceRelation
if oldVersion.outputSet.intersect(conflictingAttributes).nonEmpty =>
val newVersion = oldVersion.newInstance()
Seq((oldVersion, newVersion))
case oldVersion: SerializeFromObject
if oldVersion.outputSet.intersect(conflictingAttributes).nonEmpty =>
Seq((oldVersion, oldVersion.copy(
serializer = oldVersion.serializer.map(_.newInstance()))))
// Handle projects that create conflicting aliases.
case oldVersion @ Project(projectList, _)
if findAliases(projectList).intersect(conflictingAttributes).nonEmpty =>
Seq((oldVersion, oldVersion.copy(projectList = newAliases(projectList))))
case oldVersion @ Aggregate(_, aggregateExpressions, _)
if findAliases(aggregateExpressions).intersect(conflictingAttributes).nonEmpty =>
Seq((oldVersion, oldVersion.copy(
aggregateExpressions = newAliases(aggregateExpressions))))
case oldVersion @ FlatMapGroupsInPandas(_, _, output, _)
if oldVersion.outputSet.intersect(conflictingAttributes).nonEmpty =>
Seq((oldVersion, oldVersion.copy(output = output.map(_.newInstance()))))
case oldVersion: Generate
if oldVersion.producedAttributes.intersect(conflictingAttributes).nonEmpty =>
val newOutput = oldVersion.generatorOutput.map(_.newInstance())
Seq((oldVersion, oldVersion.copy(generatorOutput = newOutput)))
case oldVersion: Expand
if oldVersion.producedAttributes.intersect(conflictingAttributes).nonEmpty =>
val producedAttributes = oldVersion.producedAttributes
val newOutput = oldVersion.output.map { attr =>
if (producedAttributes.contains(attr)) {
attr.newInstance()
} else {
attr
}
}
Seq((oldVersion, oldVersion.copy(output = newOutput)))
case oldVersion @ Window(windowExpressions, _, _, child)
if AttributeSet(windowExpressions.map(_.toAttribute)).intersect(conflictingAttributes)
.nonEmpty =>
Seq((oldVersion, oldVersion.copy(windowExpressions = newAliases(windowExpressions))))
case _ => plan.children.flatMap(collectConflictPlans)
}
val conflictPlans = collectConflictPlans(right)
/*
* Note that it's possible `conflictPlans` can be empty which implies that there
* is a logical plan node that produces new references that this rule cannot handle.
* When that is the case, there must be another rule that resolves these conflicts.
* Otherwise, the analysis will fail.
*/
if (conflictPlans.isEmpty) {
right
} else {
val attributeRewrites = AttributeMap(conflictPlans.flatMap {
case (oldRelation, newRelation) => oldRelation.output.zip(newRelation.output)})
val conflictPlanMap = conflictPlans.toMap
// transformDown so that we can replace all the old Relations in one turn due to
// the reason that `conflictPlans` are also collected in pre-order.
right transformDown {
case r => conflictPlanMap.getOrElse(r, r)
} transformUp {
case other => other transformExpressions {
case a: Attribute =>
dedupAttr(a, attributeRewrites)
case s: SubqueryExpression =>
s.withNewPlan(dedupOuterReferencesInSubquery(s.plan, attributeRewrites))
}
}
}
}
private def dedupAttr(attr: Attribute, attrMap: AttributeMap[Attribute]): Attribute = {
val exprId = attrMap.getOrElse(attr, attr).exprId
attr.withExprId(exprId)
}
/**
* The outer plan may have been de-duplicated and the function below updates the
* outer references to refer to the de-duplicated attributes.
*
* For example (SQL):
* {{{
* SELECT * FROM t1
* INTERSECT
* SELECT * FROM t1
* WHERE EXISTS (SELECT 1
* FROM t2
* WHERE t1.c1 = t2.c1)
* }}}
* Plan before resolveReference rule.
* 'Intersect
* :- Project [c1#245, c2#246]
* : +- SubqueryAlias t1
* : +- Relation[c1#245,c2#246] parquet
* +- 'Project [*]
* +- Filter exists#257 [c1#245]
* : +- Project [1 AS 1#258]
* : +- Filter (outer(c1#245) = c1#251)
* : +- SubqueryAlias t2
* : +- Relation[c1#251,c2#252] parquet
* +- SubqueryAlias t1
* +- Relation[c1#245,c2#246] parquet
* Plan after the resolveReference rule.
* Intersect
* :- Project [c1#245, c2#246]
* : +- SubqueryAlias t1
* : +- Relation[c1#245,c2#246] parquet
* +- Project [c1#259, c2#260]
* +- Filter exists#257 [c1#259]
* : +- Project [1 AS 1#258]
* : +- Filter (outer(c1#259) = c1#251) => Updated
* : +- SubqueryAlias t2
* : +- Relation[c1#251,c2#252] parquet
* +- SubqueryAlias t1
* +- Relation[c1#259,c2#260] parquet => Outer plan's attributes are de-duplicated.
*/
private def dedupOuterReferencesInSubquery(
plan: LogicalPlan,
attrMap: AttributeMap[Attribute]): LogicalPlan = {
plan transformDown { case currentFragment =>
currentFragment transformExpressions {
case OuterReference(a: Attribute) =>
OuterReference(dedupAttr(a, attrMap))
case s: SubqueryExpression =>
s.withNewPlan(dedupOuterReferencesInSubquery(s.plan, attrMap))
}
}
}
/**
* Resolves the attribute and extract value expressions(s) by traversing the
* input expression in top down manner. The traversal is done in top-down manner as
* we need to skip over unbound lamda function expression. The lamda expressions are
* resolved in a different rule [[ResolveLambdaVariables]]
*
* Example :
* SELECT transform(array(1, 2, 3), (x, i) -> x + i)"
*
* In the case above, x and i are resolved as lamda variables in [[ResolveLambdaVariables]]
*
* Note : In this routine, the unresolved attributes are resolved from the input plan's
* children attributes.
*/
private def resolveExpressionTopDown(e: Expression, q: LogicalPlan): Expression = {
if (e.resolved) return e
e match {
case f: LambdaFunction if !f.bound => f
case u @ UnresolvedAttribute(nameParts) =>
// Leave unchanged if resolution fails. Hopefully will be resolved next round.
val result =
withPosition(u) {
q.resolveChildren(nameParts, resolver)
.orElse(resolveLiteralFunction(nameParts, u, q))
.getOrElse(u)
}
logDebug(s"Resolving $u to $result")
result
case UnresolvedExtractValue(child, fieldExpr) if child.resolved =>
ExtractValue(child, fieldExpr, resolver)
case _ => e.mapChildren(resolveExpressionTopDown(_, q))
}
}
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p: LogicalPlan if !p.childrenResolved => p
// If the projection list contains Stars, expand it.
case p: Project if containsStar(p.projectList) =>
p.copy(projectList = buildExpandedProjectList(p.projectList, p.child))
// If the aggregate function argument contains Stars, expand it.
case a: Aggregate if containsStar(a.aggregateExpressions) =>
if (a.groupingExpressions.exists(_.isInstanceOf[UnresolvedOrdinal])) {
failAnalysis(
"Star (*) is not allowed in select list when GROUP BY ordinal position is used")
} else {
a.copy(aggregateExpressions = buildExpandedProjectList(a.aggregateExpressions, a.child))
}
// If the script transformation input contains Stars, expand it.
case t: ScriptTransformation if containsStar(t.input) =>
t.copy(
input = t.input.flatMap {
case s: Star => s.expand(t.child, resolver)
case o => o :: Nil
}
)
case g: Generate if containsStar(g.generator.children) =>
failAnalysis("Invalid usage of '*' in explode/json_tuple/UDTF")
// To resolve duplicate expression IDs for Join and Intersect
case j @ Join(left, right, _, _, _) if !j.duplicateResolved =>
j.copy(right = dedupRight(left, right))
case f @ FlatMapCoGroupsInPandas(leftAttributes, rightAttributes, _, _, left, right) =>
val leftRes = leftAttributes
.map(x => resolveExpressionBottomUp(x, left).asInstanceOf[Attribute])
val rightRes = rightAttributes
.map(x => resolveExpressionBottomUp(x, right).asInstanceOf[Attribute])
f.copy(leftAttributes = leftRes, rightAttributes = rightRes)
// intersect/except will be rewritten to join at the begininng of optimizer. Here we need to
// deduplicate the right side plan, so that we won't produce an invalid self-join later.
case i @ Intersect(left, right, _) if !i.duplicateResolved =>
i.copy(right = dedupRight(left, right))
case e @ Except(left, right, _) if !e.duplicateResolved =>
e.copy(right = dedupRight(left, right))
case u @ Union(children) if !u.duplicateResolved =>
// Use projection-based de-duplication for Union to avoid breaking the checkpoint sharing
// feature in streaming.
val newChildren = children.foldRight(Seq.empty[LogicalPlan]) { (head, tail) =>
head +: tail.map {
case child if head.outputSet.intersect(child.outputSet).isEmpty =>
child
case child =>
val projectList = child.output.map { attr =>
Alias(attr, attr.name)()
}
Project(projectList, child)
}
}
u.copy(children = newChildren)
// When resolve `SortOrder`s in Sort based on child, don't report errors as
// we still have chance to resolve it based on its descendants
case s @ Sort(ordering, global, child) if child.resolved && !s.resolved =>
val newOrdering =
ordering.map(order => resolveExpressionBottomUp(order, child).asInstanceOf[SortOrder])
Sort(newOrdering, global, child)
// A special case for Generate, because the output of Generate should not be resolved by
// ResolveReferences. Attributes in the output will be resolved by ResolveGenerate.
case g @ Generate(generator, _, _, _, _, _) if generator.resolved => g
case g @ Generate(generator, join, outer, qualifier, output, child) =>
val newG = resolveExpressionBottomUp(generator, child, throws = true)
if (newG.fastEquals(generator)) {
g
} else {
Generate(newG.asInstanceOf[Generator], join, outer, qualifier, output, child)
}
// Skips plan which contains deserializer expressions, as they should be resolved by another
// rule: ResolveDeserializer.
case plan if containsDeserializer(plan.expressions) => plan
// SPARK-25942: Resolves aggregate expressions with `AppendColumns`'s children, instead of
// `AppendColumns`, because `AppendColumns`'s serializer might produce conflict attribute
// names leading to ambiguous references exception.
case a @ Aggregate(groupingExprs, aggExprs, appendColumns: AppendColumns) =>
a.mapExpressions(resolveExpressionTopDown(_, appendColumns))
case o: OverwriteByExpression if !o.outputResolved =>
// do not resolve expression attributes until the query attributes are resolved against the
// table by ResolveOutputRelation. that rule will alias the attributes to the table's names.
o
case m @ MergeIntoTable(targetTable, sourceTable, _, _, _)
if !m.resolved && targetTable.resolved && sourceTable.resolved =>
EliminateSubqueryAliases(targetTable) match {
case r: NamedRelation if r.skipSchemaResolution =>
// Do not resolve the expression if the target table accepts any schema.
// This allows data sources to customize their own resolution logic using
// custom resolution rules.
m
case _ =>
val newMatchedActions = m.matchedActions.map {
case DeleteAction(deleteCondition) =>
val resolvedDeleteCondition = deleteCondition.map(resolveExpressionTopDown(_, m))
DeleteAction(resolvedDeleteCondition)
case UpdateAction(updateCondition, assignments) =>
val resolvedUpdateCondition = updateCondition.map(resolveExpressionTopDown(_, m))
// The update value can access columns from both target and source tables.
UpdateAction(
resolvedUpdateCondition,
resolveAssignments(assignments, m, resolveValuesWithSourceOnly = false))
case o => o
}
val newNotMatchedActions = m.notMatchedActions.map {
case InsertAction(insertCondition, assignments) =>
// The insert action is used when not matched, so its condition and value can only
// access columns from the source table.
val resolvedInsertCondition =
insertCondition.map(resolveExpressionTopDown(_, Project(Nil, m.sourceTable)))
InsertAction(
resolvedInsertCondition,
resolveAssignments(assignments, m, resolveValuesWithSourceOnly = true))
case o => o
}
val resolvedMergeCondition = resolveExpressionTopDown(m.mergeCondition, m)
m.copy(mergeCondition = resolvedMergeCondition,
matchedActions = newMatchedActions,
notMatchedActions = newNotMatchedActions)
}
case q: LogicalPlan =>
logTrace(s"Attempting to resolve ${q.simpleString(SQLConf.get.maxToStringFields)}")
q.mapExpressions(resolveExpressionTopDown(_, q))
}
def resolveAssignments(
assignments: Seq[Assignment],
mergeInto: MergeIntoTable,
resolveValuesWithSourceOnly: Boolean): Seq[Assignment] = {
if (assignments.isEmpty) {
val expandedColumns = mergeInto.targetTable.output
val expandedValues = mergeInto.sourceTable.output
expandedColumns.zip(expandedValues).map(kv => Assignment(kv._1, kv._2))
} else {
assignments.map { assign =>
val resolvedKey = assign.key match {
case c if !c.resolved =>
resolveExpressionTopDown(c, Project(Nil, mergeInto.targetTable))
case o => o
}
val resolvedValue = assign.value match {
// The update values may contain target and/or source references.
case c if !c.resolved =>
if (resolveValuesWithSourceOnly) {
resolveExpressionTopDown(c, Project(Nil, mergeInto.sourceTable))
} else {
resolveExpressionTopDown(c, mergeInto)
}
case o => o
}
Assignment(resolvedKey, resolvedValue)
}
}
}
def newAliases(expressions: Seq[NamedExpression]): Seq[NamedExpression] = {
expressions.map {
case a: Alias => Alias(a.child, a.name)()
case other => other
}
}
def findAliases(projectList: Seq[NamedExpression]): AttributeSet = {
AttributeSet(projectList.collect { case a: Alias => a.toAttribute })
}
/**
* Build a project list for Project/Aggregate and expand the star if possible
*/
private def buildExpandedProjectList(
exprs: Seq[NamedExpression],
child: LogicalPlan): Seq[NamedExpression] = {
exprs.flatMap {
// Using Dataframe/Dataset API: testData2.groupBy($"a", $"b").agg($"*")
case s: Star => s.expand(child, resolver)
// Using SQL API without running ResolveAlias: SELECT * FROM testData2 group by a, b
case UnresolvedAlias(s: Star, _) => s.expand(child, resolver)
case o if containsStar(o :: Nil) => expandStarExpression(o, child) :: Nil
case o => o :: Nil
}.map(_.asInstanceOf[NamedExpression])
}
/**
* Returns true if `exprs` contains a [[Star]].
*/
def containsStar(exprs: Seq[Expression]): Boolean =
exprs.exists(_.collect { case _: Star => true }.nonEmpty)
/**
* Expands the matching attribute.*'s in `child`'s output.
*/
def expandStarExpression(expr: Expression, child: LogicalPlan): Expression = {
expr.transformUp {
case f1: UnresolvedFunction if containsStar(f1.arguments) =>
f1.copy(arguments = f1.arguments.flatMap {
case s: Star => s.expand(child, resolver)
case o => o :: Nil
})
case c: CreateNamedStruct if containsStar(c.valExprs) =>
val newChildren = c.children.grouped(2).flatMap {
case Seq(k, s : Star) => CreateStruct(s.expand(child, resolver)).children
case kv => kv
}
c.copy(children = newChildren.toList )
case c: CreateArray if containsStar(c.children) =>
c.copy(children = c.children.flatMap {
case s: Star => s.expand(child, resolver)
case o => o :: Nil
})
case p: Murmur3Hash if containsStar(p.children) =>
p.copy(children = p.children.flatMap {
case s: Star => s.expand(child, resolver)
case o => o :: Nil
})
case p: XxHash64 if containsStar(p.children) =>
p.copy(children = p.children.flatMap {
case s: Star => s.expand(child, resolver)
case o => o :: Nil
})
// count(*) has been replaced by count(1)
case o if containsStar(o.children) =>
failAnalysis(s"Invalid usage of '*' in expression '${o.prettyName}'")
}
}
}
private def containsDeserializer(exprs: Seq[Expression]): Boolean = {
exprs.exists(_.find(_.isInstanceOf[UnresolvedDeserializer]).isDefined)
}
/**
* Literal functions do not require the user to specify braces when calling them
* When an attributes is not resolvable, we try to resolve it as a literal function.
*/
private def resolveLiteralFunction(
nameParts: Seq[String],
attribute: UnresolvedAttribute,
plan: LogicalPlan): Option[Expression] = {
if (nameParts.length != 1) return None
val isNamedExpression = plan match {
case Aggregate(_, aggregateExpressions, _) => aggregateExpressions.contains(attribute)
case Project(projectList, _) => projectList.contains(attribute)
case Window(windowExpressions, _, _, _) => windowExpressions.contains(attribute)
case _ => false
}
val wrapper: Expression => Expression =
if (isNamedExpression) f => Alias(f, toPrettySQL(f))() else identity
// support CURRENT_DATE and CURRENT_TIMESTAMP
val literalFunctions = Seq(CurrentDate(), CurrentTimestamp())
val name = nameParts.head
val func = literalFunctions.find(e => caseInsensitiveResolution(e.prettyName, name))
func.map(wrapper)
}
/**
* Resolves the attribute, column value and extract value expressions(s) by traversing the
* input expression in bottom-up manner. In order to resolve the nested complex type fields
* correctly, this function makes use of `throws` parameter to control when to raise an
* AnalysisException.
*
* Example :
* SELECT a.b FROM t ORDER BY b[0].d
*
* In the above example, in b needs to be resolved before d can be resolved. Given we are
* doing a bottom up traversal, it will first attempt to resolve d and fail as b has not
* been resolved yet. If `throws` is false, this function will handle the exception by
* returning the original attribute. In this case `d` will be resolved in subsequent passes
* after `b` is resolved.
*/
protected[sql] def resolveExpressionBottomUp(
expr: Expression,
plan: LogicalPlan,
throws: Boolean = false): Expression = {
if (expr.resolved) return expr
// Resolve expression in one round.
// If throws == false or the desired attribute doesn't exist
// (like try to resolve `a.b` but `a` doesn't exist), fail and return the origin one.
// Else, throw exception.
try {
expr transformUp {
case GetColumnByOrdinal(ordinal, _) => plan.output(ordinal)
case u @ UnresolvedAttribute(nameParts) =>
val result =
withPosition(u) {
plan.resolve(nameParts, resolver)
.orElse(resolveLiteralFunction(nameParts, u, plan))
.getOrElse(u)
}
logDebug(s"Resolving $u to $result")
result
case UnresolvedExtractValue(child, fieldName) if child.resolved =>
ExtractValue(child, fieldName, resolver)
}
} catch {
case a: AnalysisException if !throws => expr
}
}
/**
* In many dialects of SQL it is valid to use ordinal positions in order/sort by and group by
* clauses. This rule is to convert ordinal positions to the corresponding expressions in the
* select list. This support is introduced in Spark 2.0.
*
* - When the sort references or group by expressions are not integer but foldable expressions,
* just ignore them.
* - When spark.sql.orderByOrdinal/spark.sql.groupByOrdinal is set to false, ignore the position
* numbers too.
*
* Before the release of Spark 2.0, the literals in order/sort by and group by clauses
* have no effect on the results.
*/
object ResolveOrdinalInOrderByAndGroupBy extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if !p.childrenResolved => p
// Replace the index with the related attribute for ORDER BY,
// which is a 1-base position of the projection list.
case Sort(orders, global, child)
if orders.exists(_.child.isInstanceOf[UnresolvedOrdinal]) =>
val newOrders = orders map {
case s @ SortOrder(UnresolvedOrdinal(index), direction, nullOrdering, _) =>
if (index > 0 && index <= child.output.size) {
SortOrder(child.output(index - 1), direction, nullOrdering, Set.empty)
} else {
s.failAnalysis(
s"ORDER BY position $index is not in select list " +
s"(valid range is [1, ${child.output.size}])")
}
case o => o
}
Sort(newOrders, global, child)
// Replace the index with the corresponding expression in aggregateExpressions. The index is
// a 1-base position of aggregateExpressions, which is output columns (select expression)
case Aggregate(groups, aggs, child) if aggs.forall(_.resolved) &&
groups.exists(_.isInstanceOf[UnresolvedOrdinal]) =>
val newGroups = groups.map {
case u @ UnresolvedOrdinal(index) if index > 0 && index <= aggs.size =>
aggs(index - 1)
case ordinal @ UnresolvedOrdinal(index) =>
ordinal.failAnalysis(
s"GROUP BY position $index is not in select list " +
s"(valid range is [1, ${aggs.size}])")
case o => o
}
Aggregate(newGroups, aggs, child)
}
}
/**
* Replace unresolved expressions in grouping keys with resolved ones in SELECT clauses.
* This rule is expected to run after [[ResolveReferences]] applied.
*/
object ResolveAggAliasInGroupBy extends Rule[LogicalPlan] {
// This is a strict check though, we put this to apply the rule only if the expression is not
// resolvable by child.
private def notResolvableByChild(attrName: String, child: LogicalPlan): Boolean = {
!child.output.exists(a => resolver(a.name, attrName))
}
private def mayResolveAttrByAggregateExprs(
exprs: Seq[Expression], aggs: Seq[NamedExpression], child: LogicalPlan): Seq[Expression] = {
exprs.map { _.transform {
case u: UnresolvedAttribute if notResolvableByChild(u.name, child) =>
aggs.find(ne => resolver(ne.name, u.name)).getOrElse(u)
}}
}
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case agg @ Aggregate(groups, aggs, child)
if conf.groupByAliases && child.resolved && aggs.forall(_.resolved) &&
groups.exists(!_.resolved) =>
agg.copy(groupingExpressions = mayResolveAttrByAggregateExprs(groups, aggs, child))
case gs @ GroupingSets(selectedGroups, groups, child, aggs)
if conf.groupByAliases && child.resolved && aggs.forall(_.resolved) &&
groups.exists(_.isInstanceOf[UnresolvedAttribute]) =>
gs.copy(
selectedGroupByExprs = selectedGroups.map(mayResolveAttrByAggregateExprs(_, aggs, child)),
groupByExprs = mayResolveAttrByAggregateExprs(groups, aggs, child))
}
}
/**
* In many dialects of SQL it is valid to sort by attributes that are not present in the SELECT
* clause. This rule detects such queries and adds the required attributes to the original
* projection, so that they will be available during sorting. Another projection is added to
* remove these attributes after sorting.
*
* The HAVING clause could also used a grouping columns that is not presented in the SELECT.
*/
object ResolveMissingReferences extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
// Skip sort with aggregate. This will be handled in ResolveAggregateFunctions
case sa @ Sort(_, _, child: Aggregate) => sa
case s @ Sort(order, _, child)
if (!s.resolved || s.missingInput.nonEmpty) && child.resolved =>
val (newOrder, newChild) = resolveExprsAndAddMissingAttrs(order, child)
val ordering = newOrder.map(_.asInstanceOf[SortOrder])
if (child.output == newChild.output) {
s.copy(order = ordering)
} else {
// Add missing attributes and then project them away.
val newSort = s.copy(order = ordering, child = newChild)
Project(child.output, newSort)
}
case f @ Filter(cond, child) if (!f.resolved || f.missingInput.nonEmpty) && child.resolved =>
val (newCond, newChild) = resolveExprsAndAddMissingAttrs(Seq(cond), child)
if (child.output == newChild.output) {
f.copy(condition = newCond.head)
} else {
// Add missing attributes and then project them away.
val newFilter = Filter(newCond.head, newChild)
Project(child.output, newFilter)
}
}
/**
* This method tries to resolve expressions and find missing attributes recursively. Specially,
* when the expressions used in `Sort` or `Filter` contain unresolved attributes or resolved
* attributes which are missed from child output. This method tries to find the missing
* attributes out and add into the projection.
*/
private def resolveExprsAndAddMissingAttrs(
exprs: Seq[Expression], plan: LogicalPlan): (Seq[Expression], LogicalPlan) = {
// Missing attributes can be unresolved attributes or resolved attributes which are not in
// the output attributes of the plan.
if (exprs.forall(e => e.resolved && e.references.subsetOf(plan.outputSet))) {
(exprs, plan)
} else {
plan match {
case p: Project =>
// Resolving expressions against current plan.
val maybeResolvedExprs = exprs.map(resolveExpressionBottomUp(_, p))
// Recursively resolving expressions on the child of current plan.
val (newExprs, newChild) = resolveExprsAndAddMissingAttrs(maybeResolvedExprs, p.child)
// If some attributes used by expressions are resolvable only on the rewritten child
// plan, we need to add them into original projection.
val missingAttrs = (AttributeSet(newExprs) -- p.outputSet).intersect(newChild.outputSet)
(newExprs, Project(p.projectList ++ missingAttrs, newChild))
case a @ Aggregate(groupExprs, aggExprs, child) =>
val maybeResolvedExprs = exprs.map(resolveExpressionBottomUp(_, a))
val (newExprs, newChild) = resolveExprsAndAddMissingAttrs(maybeResolvedExprs, child)
val missingAttrs = (AttributeSet(newExprs) -- a.outputSet).intersect(newChild.outputSet)
if (missingAttrs.forall(attr => groupExprs.exists(_.semanticEquals(attr)))) {
// All the missing attributes are grouping expressions, valid case.
(newExprs, a.copy(aggregateExpressions = aggExprs ++ missingAttrs, child = newChild))
} else {
// Need to add non-grouping attributes, invalid case.
(exprs, a)
}
case g: Generate =>
val maybeResolvedExprs = exprs.map(resolveExpressionBottomUp(_, g))
val (newExprs, newChild) = resolveExprsAndAddMissingAttrs(maybeResolvedExprs, g.child)
(newExprs, g.copy(unrequiredChildIndex = Nil, child = newChild))
// For `Distinct` and `SubqueryAlias`, we can't recursively resolve and add attributes
// via its children.
case u: UnaryNode if !u.isInstanceOf[Distinct] && !u.isInstanceOf[SubqueryAlias] =>
val maybeResolvedExprs = exprs.map(resolveExpressionBottomUp(_, u))
val (newExprs, newChild) = resolveExprsAndAddMissingAttrs(maybeResolvedExprs, u.child)
(newExprs, u.withNewChildren(Seq(newChild)))
// For other operators, we can't recursively resolve and add attributes via its children.
case other =>
(exprs.map(resolveExpressionBottomUp(_, other)), other)
}
}
}
}
/**
* Checks whether a function identifier referenced by an [[UnresolvedFunction]] is defined in the
* function registry. Note that this rule doesn't try to resolve the [[UnresolvedFunction]]. It
* only performs simple existence check according to the function identifier to quickly identify
* undefined functions without triggering relation resolution, which may incur potentially
* expensive partition/schema discovery process in some cases.
* In order to avoid duplicate external functions lookup, the external function identifier will
* store in the local hash set externalFunctionNameSet.
* @see [[ResolveFunctions]]
* @see https://issues.apache.org/jira/browse/SPARK-19737
*/
object LookupFunctions extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = {
val externalFunctionNameSet = new mutable.HashSet[FunctionIdentifier]()
plan.resolveExpressions {
case f: UnresolvedFunction
if externalFunctionNameSet.contains(normalizeFuncName(f.name)) => f
case f: UnresolvedFunction if v1SessionCatalog.isRegisteredFunction(f.name) => f
case f: UnresolvedFunction if v1SessionCatalog.isPersistentFunction(f.name) =>
externalFunctionNameSet.add(normalizeFuncName(f.name))
f
case f: UnresolvedFunction =>
withPosition(f) {
throw new NoSuchFunctionException(
f.name.database.getOrElse(v1SessionCatalog.getCurrentDatabase),
f.name.funcName)
}
}
}
def normalizeFuncName(name: FunctionIdentifier): FunctionIdentifier = {
val funcName = if (conf.caseSensitiveAnalysis) {
name.funcName
} else {
name.funcName.toLowerCase(Locale.ROOT)
}
val databaseName = name.database match {
case Some(a) => formatDatabaseName(a)
case None => v1SessionCatalog.getCurrentDatabase
}
FunctionIdentifier(funcName, Some(databaseName))
}
protected def formatDatabaseName(name: String): String = {
if (conf.caseSensitiveAnalysis) name else name.toLowerCase(Locale.ROOT)
}
}
/**
* Replaces [[UnresolvedFunction]]s with concrete [[Expression]]s.
*/
object ResolveFunctions extends Rule[LogicalPlan] {
val trimWarningEnabled = new AtomicBoolean(true)
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case q: LogicalPlan =>
q transformExpressions {
case u if !u.childrenResolved => u // Skip until children are resolved.
case u: UnresolvedAttribute if resolver(u.name, VirtualColumn.hiveGroupingIdName) =>
withPosition(u) {
Alias(GroupingID(Nil), VirtualColumn.hiveGroupingIdName)()
}
case u @ UnresolvedGenerator(name, children) =>
withPosition(u) {
v1SessionCatalog.lookupFunction(name, children) match {
case generator: Generator => generator
case other =>
failAnalysis(s"$name is expected to be a generator. However, " +
s"its class is ${other.getClass.getCanonicalName}, which is not a generator.")
}
}
case u @ UnresolvedFunction(funcId, arguments, isDistinct, filter) =>
withPosition(u) {
v1SessionCatalog.lookupFunction(funcId, arguments) match {
// AggregateWindowFunctions are AggregateFunctions that can only be evaluated within
// the context of a Window clause. They do not need to be wrapped in an
// AggregateExpression.
case wf: AggregateWindowFunction =>
if (isDistinct || filter.isDefined) {
failAnalysis("DISTINCT or FILTER specified, " +
s"but ${wf.prettyName} is not an aggregate function")
} else {
wf
}
// We get an aggregate function, we need to wrap it in an AggregateExpression.
case agg: AggregateFunction =>
// TODO: SPARK-30276 Support Filter expression allows simultaneous use of DISTINCT
if (filter.isDefined) {
if (isDistinct) {
failAnalysis("DISTINCT and FILTER cannot be used in aggregate functions " +
"at the same time")
} else if (!filter.get.deterministic) {
failAnalysis("FILTER expression is non-deterministic, " +
"it cannot be used in aggregate functions")
}
}
AggregateExpression(agg, Complete, isDistinct, filter)
// This function is not an aggregate function, just return the resolved one.
case other if (isDistinct || filter.isDefined) =>
failAnalysis("DISTINCT or FILTER specified, " +
s"but ${other.prettyName} is not an aggregate function")
case e: String2TrimExpression if arguments.size == 2 =>
if (trimWarningEnabled.get) {
log.warn("Two-parameter TRIM/LTRIM/RTRIM function signatures are deprecated." +
" Use SQL syntax `TRIM((BOTH | LEADING | TRAILING)? trimStr FROM str)`" +
" instead.")
trimWarningEnabled.set(false)
}
e
case other =>
other
}
}
}
}
}
/**
* This rule resolves and rewrites subqueries inside expressions.
*
* Note: CTEs are handled in CTESubstitution.
*/
object ResolveSubquery extends Rule[LogicalPlan] with PredicateHelper {
/**
* Resolve the correlated expressions in a subquery by using the an outer plans' references. All
* resolved outer references are wrapped in an [[OuterReference]]
*/
private def resolveOuterReferences(plan: LogicalPlan, outer: LogicalPlan): LogicalPlan = {
plan resolveOperatorsDown {
case q: LogicalPlan if q.childrenResolved && !q.resolved =>
q transformExpressions {
case u @ UnresolvedAttribute(nameParts) =>
withPosition(u) {
try {
outer.resolve(nameParts, resolver) match {
case Some(outerAttr) => OuterReference(outerAttr)
case None => u
}
} catch {
case _: AnalysisException => u
}
}
}
}
}
/**
* Resolves the subquery plan that is referenced in a subquery expression. The normal
* attribute references are resolved using regular analyzer and the outer references are
* resolved from the outer plans using the resolveOuterReferences method.
*
* Outer references from the correlated predicates are updated as children of
* Subquery expression.
*/
private def resolveSubQuery(
e: SubqueryExpression,
plans: Seq[LogicalPlan])(
f: (LogicalPlan, Seq[Expression]) => SubqueryExpression): SubqueryExpression = {
// Step 1: Resolve the outer expressions.
var previous: LogicalPlan = null
var current = e.plan
do {
// Try to resolve the subquery plan using the regular analyzer.
previous = current
current = executeSameContext(current)
// Use the outer references to resolve the subquery plan if it isn't resolved yet.
val i = plans.iterator
val afterResolve = current
while (!current.resolved && current.fastEquals(afterResolve) && i.hasNext) {
current = resolveOuterReferences(current, i.next())
}
} while (!current.resolved && !current.fastEquals(previous))
// Step 2: If the subquery plan is fully resolved, pull the outer references and record
// them as children of SubqueryExpression.
if (current.resolved) {
// Record the outer references as children of subquery expression.
f(current, SubExprUtils.getOuterReferences(current))
} else {
e.withNewPlan(current)
}
}
/**
* Resolves the subquery. Apart of resolving the subquery and outer references (if any)
* in the subquery plan, the children of subquery expression are updated to record the
* outer references. This is needed to make sure
* (1) The column(s) referred from the outer query are not pruned from the plan during
* optimization.
* (2) Any aggregate expression(s) that reference outer attributes are pushed down to
* outer plan to get evaluated.
*/
private def resolveSubQueries(plan: LogicalPlan, plans: Seq[LogicalPlan]): LogicalPlan = {
plan transformExpressions {
case s @ ScalarSubquery(sub, _, exprId) if !sub.resolved =>
resolveSubQuery(s, plans)(ScalarSubquery(_, _, exprId))
case e @ Exists(sub, _, exprId) if !sub.resolved =>
resolveSubQuery(e, plans)(Exists(_, _, exprId))
case InSubquery(values, l @ ListQuery(_, _, exprId, _))
if values.forall(_.resolved) && !l.resolved =>
val expr = resolveSubQuery(l, plans)((plan, exprs) => {
ListQuery(plan, exprs, exprId, plan.output)
})
InSubquery(values, expr.asInstanceOf[ListQuery])
}
}
/**
* Resolve and rewrite all subqueries in an operator tree..
*/
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
// In case of HAVING (a filter after an aggregate) we use both the aggregate and
// its child for resolution.
case f @ Filter(_, a: Aggregate) if f.childrenResolved =>
resolveSubQueries(f, Seq(a, a.child))
// Only a few unary nodes (Project/Filter/Aggregate) can contain subqueries.
case q: UnaryNode if q.childrenResolved =>
resolveSubQueries(q, q.children)
case j: Join if j.childrenResolved =>
resolveSubQueries(j, Seq(j, j.left, j.right))
case s: SupportsSubquery if s.childrenResolved =>
resolveSubQueries(s, s.children)
}
}
/**
* Replaces unresolved column aliases for a subquery with projections.
*/
object ResolveSubqueryColumnAliases extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case u @ UnresolvedSubqueryColumnAliases(columnNames, child) if child.resolved =>
// Resolves output attributes if a query has alias names in its subquery:
// e.g., SELECT * FROM (SELECT 1 AS a, 1 AS b) t(col1, col2)
val outputAttrs = child.output
// Checks if the number of the aliases equals to the number of output columns
// in the subquery.
if (columnNames.size != outputAttrs.size) {
u.failAnalysis("Number of column aliases does not match number of columns. " +
s"Number of column aliases: ${columnNames.size}; " +
s"number of columns: ${outputAttrs.size}.")
}
val aliases = outputAttrs.zip(columnNames).map { case (attr, aliasName) =>
Alias(attr, aliasName)()
}
Project(aliases, child)
}
}
/**
* Turns projections that contain aggregate expressions into aggregations.
*/
object GlobalAggregates extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case Project(projectList, child) if containsAggregates(projectList) =>
Aggregate(Nil, projectList, child)
}
def containsAggregates(exprs: Seq[Expression]): Boolean = {
// Collect all Windowed Aggregate Expressions.
val windowedAggExprs: Set[Expression] = exprs.flatMap { expr =>
expr.collect {
case WindowExpression(ae: AggregateExpression, _) => ae
case WindowExpression(e: PythonUDF, _) if PythonUDF.isGroupedAggPandasUDF(e) => e
}
}.toSet
// Find the first Aggregate Expression that is not Windowed.
exprs.exists(_.collectFirst {
case ae: AggregateExpression if !windowedAggExprs.contains(ae) => ae
case e: PythonUDF if PythonUDF.isGroupedAggPandasUDF(e) &&
!windowedAggExprs.contains(e) => e
}.isDefined)
}
}
/**
* This rule finds aggregate expressions that are not in an aggregate operator. For example,
* those in a HAVING clause or ORDER BY clause. These expressions are pushed down to the
* underlying aggregate operator and then projected away after the original operator.
*/
object ResolveAggregateFunctions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case f @ Filter(cond, agg @ Aggregate(grouping, originalAggExprs, child)) if agg.resolved =>
// Try resolving the condition of the filter as though it is in the aggregate clause
try {
val aggregatedCondition =
Aggregate(
grouping,
Alias(cond, "havingCondition")() :: Nil,
child)
val resolvedOperator = executeSameContext(aggregatedCondition)
def resolvedAggregateFilter =
resolvedOperator
.asInstanceOf[Aggregate]
.aggregateExpressions.head
// If resolution was successful and we see the filter has an aggregate in it, add it to
// the original aggregate operator.
if (resolvedOperator.resolved) {
// Try to replace all aggregate expressions in the filter by an alias.
val aggregateExpressions = ArrayBuffer.empty[NamedExpression]
val transformedAggregateFilter = resolvedAggregateFilter.transform {
case ae: AggregateExpression =>
val alias = Alias(ae, ae.toString)()
aggregateExpressions += alias
alias.toAttribute
// Grouping functions are handled in the rule [[ResolveGroupingAnalytics]].
case e: Expression if grouping.exists(_.semanticEquals(e)) &&
!ResolveGroupingAnalytics.hasGroupingFunction(e) &&
!agg.output.exists(_.semanticEquals(e)) =>
e match {
case ne: NamedExpression =>
aggregateExpressions += ne
ne.toAttribute
case _ =>
val alias = Alias(e, e.toString)()
aggregateExpressions += alias
alias.toAttribute
}
}
// Push the aggregate expressions into the aggregate (if any).
if (aggregateExpressions.nonEmpty) {
Project(agg.output,
Filter(transformedAggregateFilter,
agg.copy(aggregateExpressions = originalAggExprs ++ aggregateExpressions)))
} else {
f
}
} else {
f
}
} catch {
// Attempting to resolve in the aggregate can result in ambiguity. When this happens,
// just return the original plan.
case ae: AnalysisException => f
}
case sort @ Sort(sortOrder, global, aggregate: Aggregate) if aggregate.resolved =>
// Try resolving the ordering as though it is in the aggregate clause.
try {
// If a sort order is unresolved, containing references not in aggregate, or containing
// `AggregateExpression`, we need to push down it to the underlying aggregate operator.
val unresolvedSortOrders = sortOrder.filter { s =>
!s.resolved || !s.references.subsetOf(aggregate.outputSet) || containsAggregate(s)
}
val aliasedOrdering =
unresolvedSortOrders.map(o => Alias(o.child, "aggOrder")())
val aggregatedOrdering = aggregate.copy(aggregateExpressions = aliasedOrdering)
val resolvedAggregate: Aggregate =
executeSameContext(aggregatedOrdering).asInstanceOf[Aggregate]
val resolvedAliasedOrdering: Seq[Alias] =
resolvedAggregate.aggregateExpressions.asInstanceOf[Seq[Alias]]
// If we pass the analysis check, then the ordering expressions should only reference to
// aggregate expressions or grouping expressions, and it's safe to push them down to
// Aggregate.
checkAnalysis(resolvedAggregate)
val originalAggExprs = aggregate.aggregateExpressions.map(
CleanupAliases.trimNonTopLevelAliases(_).asInstanceOf[NamedExpression])
// If the ordering expression is same with original aggregate expression, we don't need
// to push down this ordering expression and can reference the original aggregate
// expression instead.
val needsPushDown = ArrayBuffer.empty[NamedExpression]
val evaluatedOrderings = resolvedAliasedOrdering.zip(unresolvedSortOrders).map {
case (evaluated, order) =>
val index = originalAggExprs.indexWhere {
case Alias(child, _) => child semanticEquals evaluated.child
case other => other semanticEquals evaluated.child
}
if (index == -1) {
needsPushDown += evaluated
order.copy(child = evaluated.toAttribute)
} else {
order.copy(child = originalAggExprs(index).toAttribute)
}
}
val sortOrdersMap = unresolvedSortOrders
.map(new TreeNodeRef(_))
.zip(evaluatedOrderings)
.toMap
val finalSortOrders = sortOrder.map(s => sortOrdersMap.getOrElse(new TreeNodeRef(s), s))
// Since we don't rely on sort.resolved as the stop condition for this rule,
// we need to check this and prevent applying this rule multiple times
if (sortOrder == finalSortOrders) {
sort
} else {
Project(aggregate.output,
Sort(finalSortOrders, global,
aggregate.copy(aggregateExpressions = originalAggExprs ++ needsPushDown)))
}
} catch {
// Attempting to resolve in the aggregate can result in ambiguity. When this happens,
// just return the original plan.
case ae: AnalysisException => sort
}
}
def containsAggregate(condition: Expression): Boolean = {
condition.find(_.isInstanceOf[AggregateExpression]).isDefined
}
}
/**
* Extracts [[Generator]] from the projectList of a [[Project]] operator and creates [[Generate]]
* operator under [[Project]].
*
* This rule will throw [[AnalysisException]] for following cases:
* 1. [[Generator]] is nested in expressions, e.g. `SELECT explode(list) + 1 FROM tbl`
* 2. more than one [[Generator]] is found in projectList,
* e.g. `SELECT explode(list), explode(list) FROM tbl`
* 3. [[Generator]] is found in other operators that are not [[Project]] or [[Generate]],
* e.g. `SELECT * FROM tbl SORT BY explode(list)`
*/
object ExtractGenerator extends Rule[LogicalPlan] {
private def hasGenerator(expr: Expression): Boolean = {
expr.find(_.isInstanceOf[Generator]).isDefined
}
private def hasNestedGenerator(expr: NamedExpression): Boolean = {
def hasInnerGenerator(g: Generator): Boolean = g match {
// Since `GeneratorOuter` is just a wrapper of generators, we skip it here
case go: GeneratorOuter =>
hasInnerGenerator(go.child)
case _ =>
g.children.exists { _.find {
case _: Generator => true
case _ => false
}.isDefined }
}
CleanupAliases.trimNonTopLevelAliases(expr) match {
case UnresolvedAlias(g: Generator, _) => hasInnerGenerator(g)
case Alias(g: Generator, _) => hasInnerGenerator(g)
case MultiAlias(g: Generator, _) => hasInnerGenerator(g)
case other => hasGenerator(other)
}
}
private def hasAggFunctionInGenerator(ne: Seq[NamedExpression]): Boolean = {
ne.exists(_.find {
case g: Generator =>
g.children.exists(_.find(_.isInstanceOf[AggregateFunction]).isDefined)
case _ =>
false
}.nonEmpty)
}
private def trimAlias(expr: NamedExpression): Expression = expr match {
case UnresolvedAlias(child, _) => child
case Alias(child, _) => child
case MultiAlias(child, _) => child
case _ => expr
}
private object AliasedGenerator {
/**
* Extracts a [[Generator]] expression, any names assigned by aliases to the outputs
* and the outer flag. The outer flag is used when joining the generator output.
* @param e the [[Expression]]
* @return (the [[Generator]], seq of output names, outer flag)
*/
def unapply(e: Expression): Option[(Generator, Seq[String], Boolean)] = e match {
case Alias(GeneratorOuter(g: Generator), name) if g.resolved => Some((g, name :: Nil, true))
case MultiAlias(GeneratorOuter(g: Generator), names) if g.resolved => Some((g, names, true))
case Alias(g: Generator, name) if g.resolved => Some((g, name :: Nil, false))
case MultiAlias(g: Generator, names) if g.resolved => Some((g, names, false))
case _ => None
}
}
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case Project(projectList, _) if projectList.exists(hasNestedGenerator) =>
val nestedGenerator = projectList.find(hasNestedGenerator).get
throw new AnalysisException("Generators are not supported when it's nested in " +
"expressions, but got: " + toPrettySQL(trimAlias(nestedGenerator)))
case Project(projectList, _) if projectList.count(hasGenerator) > 1 =>
val generators = projectList.filter(hasGenerator).map(trimAlias)
throw new AnalysisException("Only one generator allowed per select clause but found " +
generators.size + ": " + generators.map(toPrettySQL).mkString(", "))
case Aggregate(_, aggList, _) if aggList.exists(hasNestedGenerator) =>
val nestedGenerator = aggList.find(hasNestedGenerator).get
throw new AnalysisException("Generators are not supported when it's nested in " +
"expressions, but got: " + toPrettySQL(trimAlias(nestedGenerator)))
case Aggregate(_, aggList, _) if aggList.count(hasGenerator) > 1 =>
val generators = aggList.filter(hasGenerator).map(trimAlias)
throw new AnalysisException("Only one generator allowed per aggregate clause but found " +
generators.size + ": " + generators.map(toPrettySQL).mkString(", "))
case agg @ Aggregate(groupList, aggList, child) if aggList.forall {
case AliasedGenerator(_, _, _) => true
case other => other.resolved
} && aggList.exists(hasGenerator) =>
// If generator in the aggregate list was visited, set the boolean flag true.
var generatorVisited = false
val projectExprs = Array.ofDim[NamedExpression](aggList.length)
val newAggList = aggList
.map(CleanupAliases.trimNonTopLevelAliases(_).asInstanceOf[NamedExpression])
.zipWithIndex
.flatMap {
case (AliasedGenerator(generator, names, outer), idx) =>
// It's a sanity check, this should not happen as the previous case will throw
// exception earlier.
assert(!generatorVisited, "More than one generator found in aggregate.")
generatorVisited = true
val newGenChildren: Seq[Expression] = generator.children.zipWithIndex.map {
case (e, idx) => if (e.foldable) e else Alias(e, s"_gen_input_${idx}")()
}
val newGenerator = {
val g = generator.withNewChildren(newGenChildren.map { e =>
if (e.foldable) e else e.asInstanceOf[Alias].toAttribute
}).asInstanceOf[Generator]
if (outer) GeneratorOuter(g) else g
}
val newAliasedGenerator = if (names.length == 1) {
Alias(newGenerator, names(0))()
} else {
MultiAlias(newGenerator, names)
}
projectExprs(idx) = newAliasedGenerator
newGenChildren.filter(!_.foldable).asInstanceOf[Seq[NamedExpression]]
case (other, idx) =>
projectExprs(idx) = other.toAttribute
other :: Nil
}
val newAgg = Aggregate(groupList, newAggList, child)
Project(projectExprs.toList, newAgg)
case p @ Project(projectList, _) if hasAggFunctionInGenerator(projectList) =>
// If a generator has any aggregate function, we need to apply the `GlobalAggregates` rule
// first for replacing `Project` with `Aggregate`.
p
case p @ Project(projectList, child) =>
// Holds the resolved generator, if one exists in the project list.
var resolvedGenerator: Generate = null
val newProjectList = projectList
.map(CleanupAliases.trimNonTopLevelAliases(_).asInstanceOf[NamedExpression])
.flatMap {
case AliasedGenerator(generator, names, outer) if generator.childrenResolved =>
// It's a sanity check, this should not happen as the previous case will throw
// exception earlier.
assert(resolvedGenerator == null, "More than one generator found in SELECT.")
resolvedGenerator =
Generate(
generator,
unrequiredChildIndex = Nil,
outer = outer,
qualifier = None,
generatorOutput = ResolveGenerate.makeGeneratorOutput(generator, names),
child)
resolvedGenerator.generatorOutput
case other => other :: Nil
}
if (resolvedGenerator != null) {
Project(newProjectList, resolvedGenerator)
} else {
p
}
case g: Generate => g
case p if p.expressions.exists(hasGenerator) =>
throw new AnalysisException("Generators are not supported outside the SELECT clause, but " +
"got: " + p.simpleString(SQLConf.get.maxToStringFields))
}
}
/**
* Rewrites table generating expressions that either need one or more of the following in order
* to be resolved:
* - concrete attribute references for their output.
* - to be relocated from a SELECT clause (i.e. from a [[Project]]) into a [[Generate]]).
*
* Names for the output [[Attribute]]s are extracted from [[Alias]] or [[MultiAlias]] expressions
* that wrap the [[Generator]].
*/
object ResolveGenerate extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case g: Generate if !g.child.resolved || !g.generator.resolved => g
case g: Generate if !g.resolved =>
g.copy(generatorOutput = makeGeneratorOutput(g.generator, g.generatorOutput.map(_.name)))
}
/**
* Construct the output attributes for a [[Generator]], given a list of names. If the list of
* names is empty names are assigned from field names in generator.
*/
private[analysis] def makeGeneratorOutput(
generator: Generator,
names: Seq[String]): Seq[Attribute] = {
val elementAttrs = generator.elementSchema.toAttributes
if (names.length == elementAttrs.length) {
names.zip(elementAttrs).map {
case (name, attr) => attr.withName(name)
}
} else if (names.isEmpty) {
elementAttrs
} else {
failAnalysis(
"The number of aliases supplied in the AS clause does not match the number of columns " +
s"output by the UDTF expected ${elementAttrs.size} aliases but got " +
s"${names.mkString(",")} ")
}
}
}
/**
* Extracts [[WindowExpression]]s from the projectList of a [[Project]] operator and
* aggregateExpressions of an [[Aggregate]] operator and creates individual [[Window]]
* operators for every distinct [[WindowSpecDefinition]].
*
* This rule handles three cases:
* - A [[Project]] having [[WindowExpression]]s in its projectList;
* - An [[Aggregate]] having [[WindowExpression]]s in its aggregateExpressions.
* - A [[Filter]]->[[Aggregate]] pattern representing GROUP BY with a HAVING
* clause and the [[Aggregate]] has [[WindowExpression]]s in its aggregateExpressions.
* Note: If there is a GROUP BY clause in the query, aggregations and corresponding
* filters (expressions in the HAVING clause) should be evaluated before any
* [[WindowExpression]]. If a query has SELECT DISTINCT, the DISTINCT part should be
* evaluated after all [[WindowExpression]]s.
*
* For every case, the transformation works as follows:
* 1. For a list of [[Expression]]s (a projectList or an aggregateExpressions), partitions
* it two lists of [[Expression]]s, one for all [[WindowExpression]]s and another for
* all regular expressions.
* 2. For all [[WindowExpression]]s, groups them based on their [[WindowSpecDefinition]]s
* and [[WindowFunctionType]]s.
* 3. For every distinct [[WindowSpecDefinition]] and [[WindowFunctionType]], creates a
* [[Window]] operator and inserts it into the plan tree.
*/
object ExtractWindowExpressions extends Rule[LogicalPlan] {
private def hasWindowFunction(exprs: Seq[Expression]): Boolean =
exprs.exists(hasWindowFunction)
private def hasWindowFunction(expr: Expression): Boolean = {
expr.find {
case window: WindowExpression => true
case _ => false
}.isDefined
}
/**
* From a Seq of [[NamedExpression]]s, extract expressions containing window expressions and
* other regular expressions that do not contain any window expression. For example, for
* `col1, Sum(col2 + col3) OVER (PARTITION BY col4 ORDER BY col5)`, we will extract
* `col1`, `col2 + col3`, `col4`, and `col5` out and replace their appearances in
* the window expression as attribute references. So, the first returned value will be
* `[Sum(_w0) OVER (PARTITION BY _w1 ORDER BY _w2)]` and the second returned value will be
* [col1, col2 + col3 as _w0, col4 as _w1, col5 as _w2].
*
* @return (seq of expressions containing at least one window expression,
* seq of non-window expressions)
*/
private def extract(
expressions: Seq[NamedExpression]): (Seq[NamedExpression], Seq[NamedExpression]) = {
// First, we partition the input expressions to two part. For the first part,
// every expression in it contain at least one WindowExpression.
// Expressions in the second part do not have any WindowExpression.
val (expressionsWithWindowFunctions, regularExpressions) =
expressions.partition(hasWindowFunction)
// Then, we need to extract those regular expressions used in the WindowExpression.
// For example, when we have col1 - Sum(col2 + col3) OVER (PARTITION BY col4 ORDER BY col5),
// we need to make sure that col1 to col5 are all projected from the child of the Window
// operator.
val extractedExprBuffer = new ArrayBuffer[NamedExpression]()
def extractExpr(expr: Expression): Expression = expr match {
case ne: NamedExpression =>
// If a named expression is not in regularExpressions, add it to
// extractedExprBuffer and replace it with an AttributeReference.
val missingExpr =
AttributeSet(Seq(expr)) -- (regularExpressions ++ extractedExprBuffer)
if (missingExpr.nonEmpty) {
extractedExprBuffer += ne
}
// alias will be cleaned in the rule CleanupAliases
ne
case e: Expression if e.foldable =>
e // No need to create an attribute reference if it will be evaluated as a Literal.
case e: Expression =>
// For other expressions, we extract it and replace it with an AttributeReference (with
// an internal column name, e.g. "_w0").
val withName = Alias(e, s"_w${extractedExprBuffer.length}")()
extractedExprBuffer += withName
withName.toAttribute
}
// Now, we extract regular expressions from expressionsWithWindowFunctions
// by using extractExpr.
val seenWindowAggregates = new ArrayBuffer[AggregateExpression]
val newExpressionsWithWindowFunctions = expressionsWithWindowFunctions.map {
_.transform {
// Extracts children expressions of a WindowFunction (input parameters of
// a WindowFunction).
case wf: WindowFunction =>
val newChildren = wf.children.map(extractExpr)
wf.withNewChildren(newChildren)
// Extracts expressions from the partition spec and order spec.
case wsc @ WindowSpecDefinition(partitionSpec, orderSpec, _) =>
val newPartitionSpec = partitionSpec.map(extractExpr)
val newOrderSpec = orderSpec.map { so =>
val newChild = extractExpr(so.child)
so.copy(child = newChild)
}
wsc.copy(partitionSpec = newPartitionSpec, orderSpec = newOrderSpec)
case WindowExpression(ae: AggregateExpression, _) if ae.filter.isDefined =>
failAnalysis(
"window aggregate function with filter predicate is not supported yet.")
// Extract Windowed AggregateExpression
case we @ WindowExpression(
ae @ AggregateExpression(function, _, _, _, _),
spec: WindowSpecDefinition) =>
val newChildren = function.children.map(extractExpr)
val newFunction = function.withNewChildren(newChildren).asInstanceOf[AggregateFunction]
val newAgg = ae.copy(aggregateFunction = newFunction)
seenWindowAggregates += newAgg
WindowExpression(newAgg, spec)
case AggregateExpression(aggFunc, _, _, _, _) if hasWindowFunction(aggFunc.children) =>
failAnalysis("It is not allowed to use a window function inside an aggregate " +
"function. Please use the inner window function in a sub-query.")
// Extracts AggregateExpression. For example, for SUM(x) - Sum(y) OVER (...),
// we need to extract SUM(x).
case agg: AggregateExpression if !seenWindowAggregates.contains(agg) =>
val withName = Alias(agg, s"_w${extractedExprBuffer.length}")()
extractedExprBuffer += withName
withName.toAttribute
// Extracts other attributes
case attr: Attribute => extractExpr(attr)
}.asInstanceOf[NamedExpression]
}
(newExpressionsWithWindowFunctions, regularExpressions ++ extractedExprBuffer)
} // end of extract
/**
* Adds operators for Window Expressions. Every Window operator handles a single Window Spec.
*/
private def addWindow(
expressionsWithWindowFunctions: Seq[NamedExpression],
child: LogicalPlan): LogicalPlan = {
// First, we need to extract all WindowExpressions from expressionsWithWindowFunctions
// and put those extracted WindowExpressions to extractedWindowExprBuffer.
// This step is needed because it is possible that an expression contains multiple
// WindowExpressions with different Window Specs.
// After extracting WindowExpressions, we need to construct a project list to generate
// expressionsWithWindowFunctions based on extractedWindowExprBuffer.
// For example, for "sum(a) over (...) / sum(b) over (...)", we will first extract
// "sum(a) over (...)" and "sum(b) over (...)" out, and assign "_we0" as the alias to
// "sum(a) over (...)" and "_we1" as the alias to "sum(b) over (...)".
// Then, the projectList will be [_we0/_we1].
val extractedWindowExprBuffer = new ArrayBuffer[NamedExpression]()
val newExpressionsWithWindowFunctions = expressionsWithWindowFunctions.map {
// We need to use transformDown because we want to trigger
// "case alias @ Alias(window: WindowExpression, _)" first.
_.transformDown {
case alias @ Alias(window: WindowExpression, _) =>
// If a WindowExpression has an assigned alias, just use it.
extractedWindowExprBuffer += alias
alias.toAttribute
case window: WindowExpression =>
// If there is no alias assigned to the WindowExpressions. We create an
// internal column.
val withName = Alias(window, s"_we${extractedWindowExprBuffer.length}")()
extractedWindowExprBuffer += withName
withName.toAttribute
}.asInstanceOf[NamedExpression]
}
// Second, we group extractedWindowExprBuffer based on their Partition and Order Specs.
val groupedWindowExpressions = extractedWindowExprBuffer.groupBy { expr =>
val distinctWindowSpec = expr.collect {
case window: WindowExpression => window.windowSpec
}.distinct
// We do a final check and see if we only have a single Window Spec defined in an
// expressions.
if (distinctWindowSpec.isEmpty) {
failAnalysis(s"$expr does not have any WindowExpression.")
} else if (distinctWindowSpec.length > 1) {
// newExpressionsWithWindowFunctions only have expressions with a single
// WindowExpression. If we reach here, we have a bug.
failAnalysis(s"$expr has multiple Window Specifications ($distinctWindowSpec)." +
s"Please file a bug report with this error message, stack trace, and the query.")
} else {
val spec = distinctWindowSpec.head
(spec.partitionSpec, spec.orderSpec, WindowFunctionType.functionType(expr))
}
}.toSeq
// Third, we aggregate them by adding each Window operator for each Window Spec and then
// setting this to the child of the next Window operator.
val windowOps =
groupedWindowExpressions.foldLeft(child) {
case (last, ((partitionSpec, orderSpec, _), windowExpressions)) =>
Window(windowExpressions, partitionSpec, orderSpec, last)
}
// Finally, we create a Project to output windowOps's output
// newExpressionsWithWindowFunctions.
Project(windowOps.output ++ newExpressionsWithWindowFunctions, windowOps)
} // end of addWindow
// We have to use transformDown at here to make sure the rule of
// "Aggregate with Having clause" will be triggered.
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperatorsDown {
case Filter(condition, _) if hasWindowFunction(condition) =>
failAnalysis("It is not allowed to use window functions inside WHERE and HAVING clauses")
// Aggregate with Having clause. This rule works with an unresolved Aggregate because
// a resolved Aggregate will not have Window Functions.
case f @ Filter(condition, a @ Aggregate(groupingExprs, aggregateExprs, child))
if child.resolved &&
hasWindowFunction(aggregateExprs) &&
a.expressions.forall(_.resolved) =>
val (windowExpressions, aggregateExpressions) = extract(aggregateExprs)
// Create an Aggregate operator to evaluate aggregation functions.
val withAggregate = Aggregate(groupingExprs, aggregateExpressions, child)
// Add a Filter operator for conditions in the Having clause.
val withFilter = Filter(condition, withAggregate)
val withWindow = addWindow(windowExpressions, withFilter)
// Finally, generate output columns according to the original projectList.
val finalProjectList = aggregateExprs.map(_.toAttribute)
Project(finalProjectList, withWindow)
case p: LogicalPlan if !p.childrenResolved => p
// Aggregate without Having clause.
case a @ Aggregate(groupingExprs, aggregateExprs, child)
if hasWindowFunction(aggregateExprs) &&
a.expressions.forall(_.resolved) =>
val (windowExpressions, aggregateExpressions) = extract(aggregateExprs)
// Create an Aggregate operator to evaluate aggregation functions.
val withAggregate = Aggregate(groupingExprs, aggregateExpressions, child)
// Add Window operators.
val withWindow = addWindow(windowExpressions, withAggregate)
// Finally, generate output columns according to the original projectList.
val finalProjectList = aggregateExprs.map(_.toAttribute)
Project(finalProjectList, withWindow)
// We only extract Window Expressions after all expressions of the Project
// have been resolved.
case p @ Project(projectList, child)
if hasWindowFunction(projectList) && !p.expressions.exists(!_.resolved) =>
val (windowExpressions, regularExpressions) = extract(projectList)
// We add a project to get all needed expressions for window expressions from the child
// of the original Project operator.
val withProject = Project(regularExpressions, child)
// Add Window operators.
val withWindow = addWindow(windowExpressions, withProject)
// Finally, generate output columns according to the original projectList.
val finalProjectList = projectList.map(_.toAttribute)
Project(finalProjectList, withWindow)
}
}
/**
* Pulls out nondeterministic expressions from LogicalPlan which is not Project or Filter,
* put them into an inner Project and finally project them away at the outer Project.
*/
object PullOutNondeterministic extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if !p.resolved => p // Skip unresolved nodes.
case p: Project => p
case f: Filter => f
case a: Aggregate if a.groupingExpressions.exists(!_.deterministic) =>
val nondeterToAttr = getNondeterToAttr(a.groupingExpressions)
val newChild = Project(a.child.output ++ nondeterToAttr.values, a.child)
a.transformExpressions { case e =>
nondeterToAttr.get(e).map(_.toAttribute).getOrElse(e)
}.copy(child = newChild)
// Don't touch collect metrics. Top-level metrics are not supported (check analysis will fail)
// and we want to retain them inside the aggregate functions.
case m: CollectMetrics => m
// todo: It's hard to write a general rule to pull out nondeterministic expressions
// from LogicalPlan, currently we only do it for UnaryNode which has same output
// schema with its child.
case p: UnaryNode if p.output == p.child.output && p.expressions.exists(!_.deterministic) =>
val nondeterToAttr = getNondeterToAttr(p.expressions)
val newPlan = p.transformExpressions { case e =>
nondeterToAttr.get(e).map(_.toAttribute).getOrElse(e)
}
val newChild = Project(p.child.output ++ nondeterToAttr.values, p.child)
Project(p.output, newPlan.withNewChildren(newChild :: Nil))
}
private def getNondeterToAttr(exprs: Seq[Expression]): Map[Expression, NamedExpression] = {
exprs.filterNot(_.deterministic).flatMap { expr =>
val leafNondeterministic = expr.collect { case n: Nondeterministic => n }
leafNondeterministic.distinct.map { e =>
val ne = e match {
case n: NamedExpression => n
case _ => Alias(e, "_nondeterministic")()
}
e -> ne
}
}.toMap
}
}
/**
* Set the seed for random number generation.
*/
object ResolveRandomSeed extends Rule[LogicalPlan] {
private lazy val random = new Random()
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if p.resolved => p
case p => p transformExpressionsUp {
case Uuid(None) => Uuid(Some(random.nextLong()))
case Shuffle(child, None) => Shuffle(child, Some(random.nextLong()))
}
}
}
/**
* Correctly handle null primitive inputs for UDF by adding extra [[If]] expression to do the
* null check. When user defines a UDF with primitive parameters, there is no way to tell if the
* primitive parameter is null or not, so here we assume the primitive input is null-propagatable
* and we should return null if the input is null.
*/
object HandleNullInputsForUDF extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if !p.resolved => p // Skip unresolved nodes.
case p => p transformExpressionsUp {
case udf @ ScalaUDF(_, _, inputs, inputPrimitives, _, _, _, _)
if inputPrimitives.contains(true) =>
// Otherwise, add special handling of null for fields that can't accept null.
// The result of operations like this, when passed null, is generally to return null.
assert(inputPrimitives.length == inputs.length)
val inputPrimitivesPair = inputPrimitives.zip(inputs)
val inputNullCheck = inputPrimitivesPair.collect {
case (isPrimitive, input) if isPrimitive && input.nullable =>
IsNull(input)
}.reduceLeftOption[Expression](Or)
if (inputNullCheck.isDefined) {
// Once we add an `If` check above the udf, it is safe to mark those checked inputs
// as null-safe (i.e., wrap with `KnownNotNull`), because the null-returning
// branch of `If` will be called if any of these checked inputs is null. Thus we can
// prevent this rule from being applied repeatedly.
val newInputs = inputPrimitivesPair.map {
case (isPrimitive, input) =>
if (isPrimitive && input.nullable) {
KnownNotNull(input)
} else {
input
}
}
val newUDF = udf.copy(children = newInputs)
If(inputNullCheck.get, Literal.create(null, udf.dataType), newUDF)
} else {
udf
}
}
}
}
/**
* Check and add proper window frames for all window functions.
*/
object ResolveWindowFrame extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions {
case WindowExpression(wf: WindowFunction, WindowSpecDefinition(_, _, f: SpecifiedWindowFrame))
if wf.frame != UnspecifiedFrame && wf.frame != f =>
failAnalysis(s"Window Frame $f must match the required frame ${wf.frame}")
case WindowExpression(wf: WindowFunction, s @ WindowSpecDefinition(_, _, UnspecifiedFrame))
if wf.frame != UnspecifiedFrame =>
WindowExpression(wf, s.copy(frameSpecification = wf.frame))
case we @ WindowExpression(e, s @ WindowSpecDefinition(_, o, UnspecifiedFrame))
if e.resolved =>
val frame = if (o.nonEmpty) {
SpecifiedWindowFrame(RangeFrame, UnboundedPreceding, CurrentRow)
} else {
SpecifiedWindowFrame(RowFrame, UnboundedPreceding, UnboundedFollowing)
}
we.copy(windowSpec = s.copy(frameSpecification = frame))
}
}
/**
* Check and add order to [[AggregateWindowFunction]]s.
*/
object ResolveWindowOrder extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions {
case WindowExpression(wf: WindowFunction, spec) if spec.orderSpec.isEmpty =>
failAnalysis(s"Window function $wf requires window to be ordered, please add ORDER BY " +
s"clause. For example SELECT $wf(value_expr) OVER (PARTITION BY window_partition " +
s"ORDER BY window_ordering) from table")
case WindowExpression(rank: RankLike, spec) if spec.resolved =>
val order = spec.orderSpec.map(_.child)
WindowExpression(rank.withOrder(order), spec)
}
}
/**
* Removes natural or using joins by calculating output columns based on output from two sides,
* Then apply a Project on a normal Join to eliminate natural or using join.
*/
object ResolveNaturalAndUsingJoin extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case j @ Join(left, right, UsingJoin(joinType, usingCols), _, hint)
if left.resolved && right.resolved && j.duplicateResolved =>
commonNaturalJoinProcessing(left, right, joinType, usingCols, None, hint)
case j @ Join(left, right, NaturalJoin(joinType), condition, hint)
if j.resolvedExceptNatural =>
// find common column names from both sides
val joinNames = left.output.map(_.name).intersect(right.output.map(_.name))
commonNaturalJoinProcessing(left, right, joinType, joinNames, condition, hint)
}
}
/**
* Resolves columns of an output table from the data in a logical plan. This rule will:
*
* - Reorder columns when the write is by name
* - Insert casts when data types do not match
* - Insert aliases when column names do not match
* - Detect plans that are not compatible with the output table and throw AnalysisException
*/
object ResolveOutputRelation extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperators {
case append @ AppendData(table, query, _, isByName)
if table.resolved && query.resolved && !append.outputResolved =>
validateStoreAssignmentPolicy()
val projection =
TableOutputResolver.resolveOutputColumns(table.name, table.output, query, isByName, conf)
if (projection != query) {
append.copy(query = projection)
} else {
append
}
case overwrite @ OverwriteByExpression(table, _, query, _, isByName)
if table.resolved && query.resolved && !overwrite.outputResolved =>
validateStoreAssignmentPolicy()
val projection =
TableOutputResolver.resolveOutputColumns(table.name, table.output, query, isByName, conf)
if (projection != query) {
overwrite.copy(query = projection)
} else {
overwrite
}
case overwrite @ OverwritePartitionsDynamic(table, query, _, isByName)
if table.resolved && query.resolved && !overwrite.outputResolved =>
validateStoreAssignmentPolicy()
val projection =
TableOutputResolver.resolveOutputColumns(table.name, table.output, query, isByName, conf)
if (projection != query) {
overwrite.copy(query = projection)
} else {
overwrite
}
}
}
private def validateStoreAssignmentPolicy(): Unit = {
// SPARK-28730: LEGACY store assignment policy is disallowed in data source v2.
if (conf.storeAssignmentPolicy == StoreAssignmentPolicy.LEGACY) {
val configKey = SQLConf.STORE_ASSIGNMENT_POLICY.key
throw new AnalysisException(s"""
|"LEGACY" store assignment policy is disallowed in Spark data source V2.
|Please set the configuration $configKey to other values.""".stripMargin)
}
}
private def commonNaturalJoinProcessing(
left: LogicalPlan,
right: LogicalPlan,
joinType: JoinType,
joinNames: Seq[String],
condition: Option[Expression],
hint: JoinHint) = {
val leftKeys = joinNames.map { keyName =>
left.output.find(attr => resolver(attr.name, keyName)).getOrElse {
throw new AnalysisException(s"USING column `$keyName` cannot be resolved on the left " +
s"side of the join. The left-side columns: [${left.output.map(_.name).mkString(", ")}]")
}
}
val rightKeys = joinNames.map { keyName =>
right.output.find(attr => resolver(attr.name, keyName)).getOrElse {
throw new AnalysisException(s"USING column `$keyName` cannot be resolved on the right " +
s"side of the join. The right-side columns: [${right.output.map(_.name).mkString(", ")}]")
}
}
val joinPairs = leftKeys.zip(rightKeys)
val newCondition = (condition ++ joinPairs.map(EqualTo.tupled)).reduceOption(And)
// columns not in joinPairs
val lUniqueOutput = left.output.filterNot(att => leftKeys.contains(att))
val rUniqueOutput = right.output.filterNot(att => rightKeys.contains(att))
// the output list looks like: join keys, columns from left, columns from right
val projectList = joinType match {
case LeftOuter =>
leftKeys ++ lUniqueOutput ++ rUniqueOutput.map(_.withNullability(true))
case LeftExistence(_) =>
leftKeys ++ lUniqueOutput
case RightOuter =>
rightKeys ++ lUniqueOutput.map(_.withNullability(true)) ++ rUniqueOutput
case FullOuter =>
// in full outer join, joinCols should be non-null if there is.
val joinedCols = joinPairs.map { case (l, r) => Alias(Coalesce(Seq(l, r)), l.name)() }
joinedCols ++
lUniqueOutput.map(_.withNullability(true)) ++
rUniqueOutput.map(_.withNullability(true))
case _ : InnerLike =>
leftKeys ++ lUniqueOutput ++ rUniqueOutput
case _ =>
sys.error("Unsupported natural join type " + joinType)
}
// use Project to trim unnecessary fields
Project(projectList, Join(left, right, joinType, newCondition, hint))
}
/**
* Replaces [[UnresolvedDeserializer]] with the deserialization expression that has been resolved
* to the given input attributes.
*/
object ResolveDeserializer extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if !p.childrenResolved => p
case p if p.resolved => p
case p => p transformExpressions {
case UnresolvedDeserializer(deserializer, inputAttributes) =>
val inputs = if (inputAttributes.isEmpty) {
p.children.flatMap(_.output)
} else {
inputAttributes
}
validateTopLevelTupleFields(deserializer, inputs)
val resolved = resolveExpressionBottomUp(
deserializer, LocalRelation(inputs), throws = true)
val result = resolved transformDown {
case UnresolvedMapObjects(func, inputData, cls) if inputData.resolved =>
inputData.dataType match {
case ArrayType(et, cn) =>
MapObjects(func, inputData, et, cn, cls) transformUp {
case UnresolvedExtractValue(child, fieldName) if child.resolved =>
ExtractValue(child, fieldName, resolver)
}
case other =>
throw new AnalysisException("need an array field but got " + other.catalogString)
}
case u: UnresolvedCatalystToExternalMap if u.child.resolved =>
u.child.dataType match {
case _: MapType =>
CatalystToExternalMap(u) transformUp {
case UnresolvedExtractValue(child, fieldName) if child.resolved =>
ExtractValue(child, fieldName, resolver)
}
case other =>
throw new AnalysisException("need a map field but got " + other.catalogString)
}
}
validateNestedTupleFields(result)
result
}
}
private def fail(schema: StructType, maxOrdinal: Int): Unit = {
throw new AnalysisException(s"Try to map ${schema.catalogString} to Tuple${maxOrdinal + 1}" +
", but failed as the number of fields does not line up.")
}
/**
* For each top-level Tuple field, we use [[GetColumnByOrdinal]] to get its corresponding column
* by position. However, the actual number of columns may be different from the number of Tuple
* fields. This method is used to check the number of columns and fields, and throw an
* exception if they do not match.
*/
private def validateTopLevelTupleFields(
deserializer: Expression, inputs: Seq[Attribute]): Unit = {
val ordinals = deserializer.collect {
case GetColumnByOrdinal(ordinal, _) => ordinal
}.distinct.sorted
if (ordinals.nonEmpty && ordinals != inputs.indices) {
fail(inputs.toStructType, ordinals.last)
}
}
/**
* For each nested Tuple field, we use [[GetStructField]] to get its corresponding struct field
* by position. However, the actual number of struct fields may be different from the number
* of nested Tuple fields. This method is used to check the number of struct fields and nested
* Tuple fields, and throw an exception if they do not match.
*/
private def validateNestedTupleFields(deserializer: Expression): Unit = {
val structChildToOrdinals = deserializer
// There are 2 kinds of `GetStructField`:
// 1. resolved from `UnresolvedExtractValue`, and it will have a `name` property.
// 2. created when we build deserializer expression for nested tuple, no `name` property.
// Here we want to validate the ordinals of nested tuple, so we should only catch
// `GetStructField` without the name property.
.collect { case g: GetStructField if g.name.isEmpty => g }
.groupBy(_.child)
.mapValues(_.map(_.ordinal).distinct.sorted)
structChildToOrdinals.foreach { case (expr, ordinals) =>
val schema = expr.dataType.asInstanceOf[StructType]
if (ordinals != schema.indices) {
fail(schema, ordinals.last)
}
}
}
}
/**
* Resolves [[NewInstance]] by finding and adding the outer scope to it if the object being
* constructed is an inner class.
*/
object ResolveNewInstance extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if !p.childrenResolved => p
case p if p.resolved => p
case p => p transformExpressions {
case n: NewInstance if n.childrenResolved && !n.resolved =>
val outer = OuterScopes.getOuterScope(n.cls)
if (outer == null) {
throw new AnalysisException(
s"Unable to generate an encoder for inner class `${n.cls.getName}` without " +
"access to the scope that this class was defined in.\n" +
"Try moving this class out of its parent class.")
}
n.copy(outerPointer = Some(outer))
}
}
}
/**
* Replace the [[UpCast]] expression by [[Cast]], and throw exceptions if the cast may truncate.
*/
object ResolveUpCast extends Rule[LogicalPlan] {
private def fail(from: Expression, to: DataType, walkedTypePath: Seq[String]) = {
val fromStr = from match {
case l: LambdaVariable => "array element"
case e => e.sql
}
throw new AnalysisException(s"Cannot up cast $fromStr from " +
s"${from.dataType.catalogString} to ${to.catalogString}.\n" +
"The type path of the target object is:\n" + walkedTypePath.mkString("", "\n", "\n") +
"You can either add an explicit cast to the input data or choose a higher precision " +
"type of the field in the target object")
}
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p if !p.childrenResolved => p
case p if p.resolved => p
case p => p transformExpressions {
case u @ UpCast(child, _, _) if !child.resolved => u
case UpCast(child, dt: AtomicType, _)
if SQLConf.get.getConf(SQLConf.LEGACY_LOOSE_UPCAST) &&
child.dataType == StringType =>
Cast(child, dt.asNullable)
case UpCast(child, dataType, walkedTypePath) if !Cast.canUpCast(child.dataType, dataType) =>
fail(child, dataType, walkedTypePath)
case UpCast(child, dataType, _) => Cast(child, dataType.asNullable)
}
}
}
/** Rule to mostly resolve, normalize and rewrite column names based on case sensitivity. */
object ResolveAlterTableChanges extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case a @ AlterTable(_, _, t: NamedRelation, changes) if t.resolved =>
// 'colsToAdd' keeps track of new columns being added. It stores a mapping from a
// normalized parent name of fields to field names that belong to the parent.
// For example, if we add columns "a.b.c", "a.b.d", and "a.c", 'colsToAdd' will become
// Map(Seq("a", "b") -> Seq("c", "d"), Seq("a") -> Seq("c")).
val colsToAdd = mutable.Map.empty[Seq[String], Seq[String]]
val schema = t.schema
val normalizedChanges = changes.flatMap {
case add: AddColumn =>
def addColumn(
parentSchema: StructType,
parentName: String,
normalizedParentName: Seq[String]): TableChange = {
val fieldsAdded = colsToAdd.getOrElse(normalizedParentName, Nil)
val pos = findColumnPosition(add.position(), parentName, parentSchema, fieldsAdded)
val field = add.fieldNames().last
colsToAdd(normalizedParentName) = fieldsAdded :+ field
TableChange.addColumn(
(normalizedParentName :+ field).toArray,
add.dataType(),
add.isNullable,
add.comment,
pos)
}
val parent = add.fieldNames().init
if (parent.nonEmpty) {
// Adding a nested field, need to normalize the parent column and position
val target = schema.findNestedField(parent, includeCollections = true, conf.resolver)
if (target.isEmpty) {
// Leave unresolved. Throws error in CheckAnalysis
Some(add)
} else {
val (normalizedName, sf) = target.get
sf.dataType match {
case struct: StructType =>
Some(addColumn(struct, parent.quoted, normalizedName :+ sf.name))
case other =>
Some(add)
}
}
} else {
// Adding to the root. Just need to normalize position
Some(addColumn(schema, "root", Nil))
}
case typeChange: UpdateColumnType =>
// Hive style syntax provides the column type, even if it may not have changed
val fieldOpt = schema.findNestedField(
typeChange.fieldNames(), includeCollections = true, conf.resolver)
if (fieldOpt.isEmpty) {
// We couldn't resolve the field. Leave it to CheckAnalysis
Some(typeChange)
} else {
val (fieldNames, field) = fieldOpt.get
if (field.dataType == typeChange.newDataType()) {
// The user didn't want the field to change, so remove this change
None
} else {
Some(TableChange.updateColumnType(
(fieldNames :+ field.name).toArray, typeChange.newDataType()))
}
}
case n: UpdateColumnNullability =>
// Need to resolve column
resolveFieldNames(
schema,
n.fieldNames(),
TableChange.updateColumnNullability(_, n.nullable())).orElse(Some(n))
case position: UpdateColumnPosition =>
position.position() match {
case after: After =>
// Need to resolve column as well as position reference
val fieldOpt = schema.findNestedField(
position.fieldNames(), includeCollections = true, conf.resolver)
if (fieldOpt.isEmpty) {
Some(position)
} else {
val (normalizedPath, field) = fieldOpt.get
val targetCol = schema.findNestedField(
normalizedPath :+ after.column(), includeCollections = true, conf.resolver)
if (targetCol.isEmpty) {
// Leave unchanged to CheckAnalysis
Some(position)
} else {
Some(TableChange.updateColumnPosition(
(normalizedPath :+ field.name).toArray,
ColumnPosition.after(targetCol.get._2.name)))
}
}
case _ =>
// Need to resolve column
resolveFieldNames(
schema,
position.fieldNames(),
TableChange.updateColumnPosition(_, position.position())).orElse(Some(position))
}
case comment: UpdateColumnComment =>
resolveFieldNames(
schema,
comment.fieldNames(),
TableChange.updateColumnComment(_, comment.newComment())).orElse(Some(comment))
case rename: RenameColumn =>
resolveFieldNames(
schema,
rename.fieldNames(),
TableChange.renameColumn(_, rename.newName())).orElse(Some(rename))
case delete: DeleteColumn =>
resolveFieldNames(schema, delete.fieldNames(), TableChange.deleteColumn)
.orElse(Some(delete))
case column: ColumnChange =>
// This is informational for future developers
throw new UnsupportedOperationException(
"Please add an implementation for a column change here")
case other => Some(other)
}
a.copy(changes = normalizedChanges)
}
/**
* Returns the table change if the field can be resolved, returns None if the column is not
* found. An error will be thrown in CheckAnalysis for columns that can't be resolved.
*/
private def resolveFieldNames(
schema: StructType,
fieldNames: Array[String],
copy: Array[String] => TableChange): Option[TableChange] = {
val fieldOpt = schema.findNestedField(
fieldNames, includeCollections = true, conf.resolver)
fieldOpt.map { case (path, field) => copy((path :+ field.name).toArray) }
}
private def findColumnPosition(
position: ColumnPosition,
parentName: String,
struct: StructType,
fieldsAdded: Seq[String]): ColumnPosition = {
position match {
case null => null
case after: After =>
(struct.fieldNames ++ fieldsAdded).find(n => conf.resolver(n, after.column())) match {
case Some(colName) =>
ColumnPosition.after(colName)
case None =>
throw new AnalysisException("Couldn't find the reference column for " +
s"$after at $parentName")
}
case other => other
}
}
}
}
/**
* Removes [[SubqueryAlias]] operators from the plan. Subqueries are only required to provide
* scoping information for attributes and can be removed once analysis is complete.
*/
object EliminateSubqueryAliases extends Rule[LogicalPlan] {
// This is also called in the beginning of the optimization phase, and as a result
// is using transformUp rather than resolveOperators.
def apply(plan: LogicalPlan): LogicalPlan = AnalysisHelper.allowInvokingTransformsInAnalyzer {
plan transformUp {
case SubqueryAlias(_, child) => child
}
}
}
/**
* Removes [[Union]] operators from the plan if it just has one child.
*/
object EliminateUnions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case Union(children) if children.size == 1 => children.head
}
}
/**
* Cleans up unnecessary Aliases inside the plan. Basically we only need Alias as a top level
* expression in Project(project list) or Aggregate(aggregate expressions) or
* Window(window expressions). Notice that if an expression has other expression parameters which
* are not in its `children`, e.g. `RuntimeReplaceable`, the transformation for Aliases in this
* rule can't work for those parameters.
*/
object CleanupAliases extends Rule[LogicalPlan] {
def trimAliases(e: Expression): Expression = {
e.transformDown {
case Alias(child, _) => child
case MultiAlias(child, _) => child
}
}
def trimNonTopLevelAliases(e: Expression): Expression = e match {
case a: Alias =>
a.copy(child = trimAliases(a.child))(
exprId = a.exprId,
qualifier = a.qualifier,
explicitMetadata = Some(a.metadata))
case a: MultiAlias =>
a.copy(child = trimAliases(a.child))
case other => trimAliases(other)
}
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case Project(projectList, child) =>
val cleanedProjectList =
projectList.map(trimNonTopLevelAliases(_).asInstanceOf[NamedExpression])
Project(cleanedProjectList, child)
case Aggregate(grouping, aggs, child) =>
val cleanedAggs = aggs.map(trimNonTopLevelAliases(_).asInstanceOf[NamedExpression])
Aggregate(grouping.map(trimAliases), cleanedAggs, child)
case Window(windowExprs, partitionSpec, orderSpec, child) =>
val cleanedWindowExprs =
windowExprs.map(e => trimNonTopLevelAliases(e).asInstanceOf[NamedExpression])
Window(cleanedWindowExprs, partitionSpec.map(trimAliases),
orderSpec.map(trimAliases(_).asInstanceOf[SortOrder]), child)
case CollectMetrics(name, metrics, child) =>
val cleanedMetrics = metrics.map {
e => trimNonTopLevelAliases(e).asInstanceOf[NamedExpression]
}
CollectMetrics(name, cleanedMetrics, child)
// Operators that operate on objects should only have expressions from encoders, which should
// never have extra aliases.
case o: ObjectConsumer => o
case o: ObjectProducer => o
case a: AppendColumns => a
case other =>
other transformExpressionsDown {
case Alias(child, _) => child
}
}
}
/**
* Ignore event time watermark in batch query, which is only supported in Structured Streaming.
* TODO: add this rule into analyzer rule list.
*/
object EliminateEventTimeWatermark extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case EventTimeWatermark(_, _, child) if !child.isStreaming => child
}
}
/**
* Maps a time column to multiple time windows using the Expand operator. Since it's non-trivial to
* figure out how many windows a time column can map to, we over-estimate the number of windows and
* filter out the rows where the time column is not inside the time window.
*/
object TimeWindowing extends Rule[LogicalPlan] {
import org.apache.spark.sql.catalyst.dsl.expressions._
private final val WINDOW_COL_NAME = "window"
private final val WINDOW_START = "start"
private final val WINDOW_END = "end"
/**
* Generates the logical plan for generating window ranges on a timestamp column. Without
* knowing what the timestamp value is, it's non-trivial to figure out deterministically how many
* window ranges a timestamp will map to given all possible combinations of a window duration,
* slide duration and start time (offset). Therefore, we express and over-estimate the number of
* windows there may be, and filter the valid windows. We use last Project operator to group
* the window columns into a struct so they can be accessed as `window.start` and `window.end`.
*
* The windows are calculated as below:
* maxNumOverlapping <- ceil(windowDuration / slideDuration)
* for (i <- 0 until maxNumOverlapping)
* windowId <- ceil((timestamp - startTime) / slideDuration)
* windowStart <- windowId * slideDuration + (i - maxNumOverlapping) * slideDuration + startTime
* windowEnd <- windowStart + windowDuration
* return windowStart, windowEnd
*
* This behaves as follows for the given parameters for the time: 12:05. The valid windows are
* marked with a +, and invalid ones are marked with a x. The invalid ones are filtered using the
* Filter operator.
* window: 12m, slide: 5m, start: 0m :: window: 12m, slide: 5m, start: 2m
* 11:55 - 12:07 + 11:52 - 12:04 x
* 12:00 - 12:12 + 11:57 - 12:09 +
* 12:05 - 12:17 + 12:02 - 12:14 +
*
* @param plan The logical plan
* @return the logical plan that will generate the time windows using the Expand operator, with
* the Filter operator for correctness and Project for usability.
*/
def apply(plan: LogicalPlan): LogicalPlan = plan.resolveOperatorsUp {
case p: LogicalPlan if p.children.size == 1 =>
val child = p.children.head
val windowExpressions =
p.expressions.flatMap(_.collect { case t: TimeWindow => t }).toSet
val numWindowExpr = windowExpressions.size
// Only support a single window expression for now
if (numWindowExpr == 1 &&
windowExpressions.head.timeColumn.resolved &&
windowExpressions.head.checkInputDataTypes().isSuccess) {
val window = windowExpressions.head
val metadata = window.timeColumn match {
case a: Attribute => a.metadata
case _ => Metadata.empty
}
def getWindow(i: Int, overlappingWindows: Int): Expression = {
val division = (PreciseTimestampConversion(
window.timeColumn, TimestampType, LongType) - window.startTime) / window.slideDuration
val ceil = Ceil(division)
// if the division is equal to the ceiling, our record is the start of a window
val windowId = CaseWhen(Seq((ceil === division, ceil + 1)), Some(ceil))
val windowStart = (windowId + i - overlappingWindows) *
window.slideDuration + window.startTime
val windowEnd = windowStart + window.windowDuration
CreateNamedStruct(
Literal(WINDOW_START) ::
PreciseTimestampConversion(windowStart, LongType, TimestampType) ::
Literal(WINDOW_END) ::
PreciseTimestampConversion(windowEnd, LongType, TimestampType) ::
Nil)
}
val windowAttr = AttributeReference(
WINDOW_COL_NAME, window.dataType, metadata = metadata)()
if (window.windowDuration == window.slideDuration) {
val windowStruct = Alias(getWindow(0, 1), WINDOW_COL_NAME)(
exprId = windowAttr.exprId, explicitMetadata = Some(metadata))
val replacedPlan = p transformExpressions {
case t: TimeWindow => windowAttr
}
// For backwards compatibility we add a filter to filter out nulls
val filterExpr = IsNotNull(window.timeColumn)
replacedPlan.withNewChildren(
Filter(filterExpr,
Project(windowStruct +: child.output, child)) :: Nil)
} else {
val overlappingWindows =
math.ceil(window.windowDuration * 1.0 / window.slideDuration).toInt
val windows =
Seq.tabulate(overlappingWindows)(i => getWindow(i, overlappingWindows))
val projections = windows.map(_ +: child.output)
val filterExpr =
window.timeColumn >= windowAttr.getField(WINDOW_START) &&
window.timeColumn < windowAttr.getField(WINDOW_END)
val substitutedPlan = Filter(filterExpr,
Expand(projections, windowAttr +: child.output, child))
val renamedPlan = p transformExpressions {
case t: TimeWindow => windowAttr
}
renamedPlan.withNewChildren(substitutedPlan :: Nil)
}
} else if (numWindowExpr > 1) {
p.failAnalysis("Multiple time window expressions would result in a cartesian product " +
"of rows, therefore they are currently not supported.")
} else {
p // Return unchanged. Analyzer will throw exception later
}
}
}
/**
* Resolve a [[CreateNamedStruct]] if it contains [[NamePlaceholder]]s.
*/
object ResolveCreateNamedStruct extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan.resolveExpressions {
case e: CreateNamedStruct if !e.resolved =>
val children = e.children.grouped(2).flatMap {
case Seq(NamePlaceholder, e: NamedExpression) if e.resolved =>
Seq(Literal(e.name), e)
case kv =>
kv
}
CreateNamedStruct(children.toList)
}
}
/**
* The aggregate expressions from subquery referencing outer query block are pushed
* down to the outer query block for evaluation. This rule below updates such outer references
* as AttributeReference referring attributes from the parent/outer query block.
*
* For example (SQL):
* {{{
* SELECT l.a FROM l GROUP BY 1 HAVING EXISTS (SELECT 1 FROM r WHERE r.d < min(l.b))
* }}}
* Plan before the rule.
* Project [a#226]
* +- Filter exists#245 [min(b#227)#249]
* : +- Project [1 AS 1#247]
* : +- Filter (d#238 < min(outer(b#227))) <-----
* : +- SubqueryAlias r
* : +- Project [_1#234 AS c#237, _2#235 AS d#238]
* : +- LocalRelation [_1#234, _2#235]
* +- Aggregate [a#226], [a#226, min(b#227) AS min(b#227)#249]
* +- SubqueryAlias l
* +- Project [_1#223 AS a#226, _2#224 AS b#227]
* +- LocalRelation [_1#223, _2#224]
* Plan after the rule.
* Project [a#226]
* +- Filter exists#245 [min(b#227)#249]
* : +- Project [1 AS 1#247]
* : +- Filter (d#238 < outer(min(b#227)#249)) <-----
* : +- SubqueryAlias r
* : +- Project [_1#234 AS c#237, _2#235 AS d#238]
* : +- LocalRelation [_1#234, _2#235]
* +- Aggregate [a#226], [a#226, min(b#227) AS min(b#227)#249]
* +- SubqueryAlias l
* +- Project [_1#223 AS a#226, _2#224 AS b#227]
* +- LocalRelation [_1#223, _2#224]
*/
object UpdateOuterReferences extends Rule[LogicalPlan] {
private def stripAlias(expr: Expression): Expression = expr match { case a: Alias => a.child }
private def updateOuterReferenceInSubquery(
plan: LogicalPlan,
refExprs: Seq[Expression]): LogicalPlan = {
plan resolveExpressions { case e =>
val outerAlias =
refExprs.find(stripAlias(_).semanticEquals(stripOuterReference(e)))
outerAlias match {
case Some(a: Alias) => OuterReference(a.toAttribute)
case _ => e
}
}
}
def apply(plan: LogicalPlan): LogicalPlan = {
plan resolveOperators {
case f @ Filter(_, a: Aggregate) if f.resolved =>
f transformExpressions {
case s: SubqueryExpression if s.children.nonEmpty =>
// Collect the aliases from output of aggregate.
val outerAliases = a.aggregateExpressions collect { case a: Alias => a }
// Update the subquery plan to record the OuterReference to point to outer query plan.
s.withNewPlan(updateOuterReferenceInSubquery(s.plan, outerAliases))
}
}
}
}
|
goldmedal/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
|
Scala
|
apache-2.0
| 157,410 |
/* - Coeus web framework -------------------------
*
* Licensed under the Apache License, Version 2.0.
*
* Author: Spiros Tzavellas
*/
package com.tzavellas.coeus.mvc.view.scalate
import javax.servlet.ServletContext
import scala.collection.mutable.Map
import org.fusesource.scalate.{ Binding, TemplateEngine }
import com.tzavellas.coeus.mvc.view.helper.DefaultViewHelpers
class ScalateConfigurator(val servletContext: ServletContext) {
val bindings = Map[String, Binding]()
val attributes = Map[String, Any]()
var templatePrefix = "/WEB-INF/templates/"
var templateSuffix = ".ssp"
bind("c" -> new DefaultViewHelpers(servletContext))
def configure(engine: TemplateEngine) { }
def bind[T: Manifest](binding: (String, T),
importMembers: Boolean = false,
isImplicit: Boolean = false) = {
attributes += binding
val name = binding._1
val className = implicitly[Manifest[T]].runtimeClass.getName
bindings += (name -> new Binding(
name, className, importMembers=importMembers, isImplicit=isImplicit))
}
}
|
sptz45/coeus
|
src/main/scala/com/tzavellas/coeus/mvc/view/scalate/ScalateConfigurator.scala
|
Scala
|
apache-2.0
| 1,100 |
package skinny.worker
import skinny.logging.Logging
import org.joda.time.DateTime
import java.util.concurrent._
/**
* Service which manages workers.
*/
case class SkinnyWorkerService(name: String = "skinny-framework-worker-service", threadPoolSize: Int = 10) extends Logging {
logger.info(s"SkinnyWorkerService (name: ${name}) is activated.")
/**
* Thread pool for this worker service.
*/
private[this] val pool = Executors.newScheduledThreadPool(threadPoolSize, new ThreadFactory() {
val threadGroup = new ThreadGroup(name)
def newThread(r: Runnable): Thread = {
val t = new Thread(threadGroup, r);
t.setDaemon(true)
t.setName(t.getThreadGroup.getName + "-thread-" + t.getId())
t
}
})
/**
* Registers new worker to this service.
*/
def registerSkinnyWorker(worker: SkinnyWorker, initial: Int, interval: Int, timeUnit: TimeUnit = TimeUnit.SECONDS) = {
pool.scheduleAtFixedRate(worker, initial, interval, timeUnit)
logger.debug(s"New worker has been scheduled. " +
s"(class: ${worker.getClass.getCanonicalName}, initial: ${initial}, interval: ${interval}, time unit: ${timeUnit})")
}
/**
* Schedules this worker every fixed milliseconds.
*/
def everyFixedMilliseconds(worker: SkinnyWorker, interval: Int) = {
registerSkinnyWorker(worker, 100, interval, TimeUnit.MILLISECONDS)
}
/**
* Schedules this worker every fixed seconds.
*/
def everyFixedSeconds(worker: SkinnyWorker, interval: Int) = {
registerSkinnyWorker(worker, 1, interval, TimeUnit.SECONDS)
}
/**
* Schedules this worker every fixed seconds.
*/
def everyFixedMinutes(worker: SkinnyWorker, interval: Int) = {
registerSkinnyWorker(worker, 1, (interval * 60), TimeUnit.SECONDS)
}
/**
* Schedules this worker hourly.
*/
def hourly(worker: SkinnyWorker, fixedMinute: Int = 0) = {
val scheduledDate = {
val date = DateTime.now.withMinuteOfHour(fixedMinute)
if (date.isAfterNow) date else date.plusHours(1)
}
val initialSeconds = ((scheduledDate.getMillis - DateTime.now.getMillis) / 1000).toInt
registerSkinnyWorker(worker, initialSeconds, 3600, TimeUnit.SECONDS)
}
/**
* Schedules this worker daily.
*/
def daily(worker: SkinnyWorker, fixedHour: Int = 9, fixedMinute: Int = 0) = {
val scheduledDate = {
val date = DateTime.now.withHourOfDay(fixedHour).withMinuteOfHour(fixedMinute)
if (date.isAfterNow) date else date.plusDays(1)
}
val initialSeconds = ((scheduledDate.getMillis - DateTime.now.getMillis) / 1000).toInt
registerSkinnyWorker(worker, initialSeconds, 3600 * 24, TimeUnit.SECONDS)
}
/**
* Shutdown this worker service safely.
*/
def shutdownNow(awaitSeconds: Int = 10) = {
// disable new tasks from being submitted
pool.shutdown()
try {
// cancel currently executing tasks
pool.shutdownNow()
// wait a while for tasks to respond to being cancelled
if (!pool.awaitTermination(awaitSeconds, TimeUnit.SECONDS)) {
logger.warn("Failed to terminate all worker thread")
} else {
logger.info(s"SkinnyWorkerService (name: ${name}) is abandoned safely.")
}
} catch {
case e: InterruptedException =>
// (re-) cancel if current thread also interrupted
pool.shutdownNow()
// preserve interrupt status
logger.info(s"SkinnyWorkerService (name: ${name}) will be interrupted.")
Thread.currentThread.interrupt()
}
}
}
|
BlackPrincess/skinny-framework
|
framework/src/main/scala/skinny/worker/SkinnyWorkerService.scala
|
Scala
|
mit
| 3,517 |
package vonsim.webapp
import vonsim.utils.CollectionUtils._
import vonsim.simulator.InstructionInfo
import scala.scalajs.js.annotation.ScalaJSDefined
import scala.scalajs.js.annotation.JSName
import org.scalajs.dom.html._
import org.scalajs.dom.raw.HTMLElement
import org.scalajs.dom
import scala.scalajs.js
import js.JSConverters._
import vonsim.simulator.Simulator
import scalatags.JsDom.all._
import vonsim.simulator.SimulatorProgramExecuting
import vonsim.assembly.Compiler.CompilationResult
import vonsim.assembly.Location
import vonsim.assembly.LexerError
import vonsim.assembly.ParserError
import vonsim.assembly.SemanticError
import vonsim.assembly.CompilationError
import scala.collection.mutable.ListBuffer
import vonsim.webapp.tutorials.Tutorial
import vonsim.assembly.lexer.Token
import vonsim.assembly.lexer.AL
import vonsim.assembly.lexer.RegisterToken
import vonsim.assembly.lexer.DW
import vonsim.assembly.lexer.DB
class TutorialUIControl(s: VonSimState,val tutorial:Tutorial,tutorialUpdated:Function0[Unit]) extends VonSimUI(s) {
def buttonFactory(text:String,iconClass:String)=a(cls:="tutorialButton btn btn-primary"
// ,img(cls:="",src := imageUrl, alt := s)
,i(cls:=s"fa $iconClass")
,text
,title := text
).render
val nextButton=buttonFactory("Siguiente","fa-next")
val previousButton=buttonFactory("Anterior","fa-previous")
val current=span().render
val total=span().render
val tutorialIndexUI=new TutorialIndexUI(s,tutorial,(stepSelected:Int)=>{
tutorial.goto(stepSelected)
update()
tutorialUpdated.apply()
})
val tutorialTitle=span(id := "tutorialTitle"
,a(cls:="helpButton"
,data("toggle"):="modal"
,data("target"):="#tutorialIndexModal"
,tutorial.title
)
,tutorialIndexUI.root
).render
val root=div(id:="tutorialControls"
,tutorialTitle
,div(id:="tutorialControlsInner"
,previousButton
,span(id:="tutorialCount",current,"/",total)
,nextButton
)).render
update()
def setDisabled(button:Anchor,disabled:Boolean){
disabled match{
case true => button.classList.add("disabled")
case false => button.classList.remove("disabled")
}
}
def update(){
setDisabled(nextButton, !tutorial.canForward(s))
setDisabled(previousButton, !tutorial.canBackward(s))
current.textContent=(tutorial.step+1).toString()
total.textContent=tutorial.steps.length.toString()
}
previousButton.onclick=(e:Any) =>{
tutorial.previous
update()
tutorialUpdated.apply()
}
nextButton.onclick=(e:Any) =>{
tutorial.next
update()
tutorialUpdated.apply()
}
def simulatorEvent() {
}
def simulatorEvent(i:InstructionInfo) {
simulatorEvent()
}
def compilationEvent(){
}
}
class TutorialIndexUI(s:VonSimState,tutorial:Tutorial,selectStep:Function1[Int,Unit]) extends ModalUI(s,"tutorialIndexModal"){
def getHeader()={
div(cls:="modal-header-help",img(cls:= "modal-icon", alt := "Von Sim Icon", title := s.uil.iconTitle, src := "img/icon.png")
,h4(cls:="modal-title",tutorial.title)
,button(`type`:="button",cls:="close", data("dismiss"):="modal",i(cls:="fa fa-close"))
).render
}
def getBody()={
val stepList=ol(cls:="list-group").render
for ((step,i) <- tutorial.steps.zipWithIndex){
val indexItem=a().render
indexItem.innerHTML=step.title
indexItem.onclick = (e:Any)=>{
selectStep(i)
close()
}
stepList.appendChild(li(cls:="list-group-item",indexItem).render)
}
val body=div(cls:=""
,h3("Índice")
,stepList
).render
body
}
def getFooter()={
div(cls:=""
).render
}
def simulatorEvent() {
}
def simulatorEvent(i:InstructionInfo) {
}
def compilationEvent(){
}
}
class TutorialUI(s: VonSimState,val tutorial:Tutorial,val mainUI:MainUI) extends VonSimUI(s) {
val controls=new TutorialUIControl(s,tutorial,() => {
displayTutorialStep()
})
val content=div().render
val subtitle=span(id:="tutorialStepTitle").render
val header=h3(subtitle)
val root = div(id := "tutorial"
,div(id:="tutorialHeader"
,controls.root
,header)
,div(id:="tutorialContent",content)
).render
def startTutorial(){
mainUI.editorUI.setCode(tutorial.initialCode)
// title.textContent=tutorial.title
displayTutorialStep()
}
def preprocessContent(content:String)={
var result=content
val exceptedRegisters=List(AL())
val registers=Token.registers.filter(r => !exceptedRegisters.contains(r))
result=preprocessContentForTokens(result,registers,"register","")
result=preprocessContentForTokens(result,exceptedRegisters,"register","_")
result=preprocessValues(result)
result=preprocessContentForTokens(result,Token.ops,"instruction","")
result=preprocessContentForTokens(result,List(DB(),DW()),"type","")
result
}
def preprocessValues(content:String)={
val valueClass="value"
var result=content
result=result.replaceAll(boundary+"([0-9][0-9A-Faf]*h)(?!_)","""$1<span class="value">$2</span>""")
result=result.replaceAll(boundary+"([0-1]+b)(?!_)","""$1<span class="value">$2</span>""")
result=result.replaceAll(boundary+"""(-?[0-9]+)([\b\s.,<])(?!_)""","""$1<span class="value">$2</span>$3""")
result=result.replaceAll("""(-?[0-9]+)(_)""","""$1""")
result
}
def tokenToKeyword(l:List[Token])=l.map(r => {
val s=r.toString().toLowerCase()
val l=s.length()
s.subSequence(0, l-2)
})
def boundary= """([,.=+?¿()]|\b)"""
def preprocessContentForTokens(content:String,tokens:List[Token],cls:String,prefix:String)={
var result=content
val registers=tokens
val keywords = tokenToKeyword(registers)
for (keyword <- keywords){
val replacement="<span class=\""+cls+"\">$2</span>"
val pattern =boundary+prefix+"("+keyword.toString()+")"+boundary
result=result.replaceAll(pattern, "$1"+replacement+"$3")
val patternNeg=boundary+prefix+"("+keyword.toString()+")(_)"+boundary
result=result.replaceAll(patternNeg, "$1$2$4")
}
result
}
def displayTutorialStep(){
subtitle.innerHTML=preprocessContent(tutorial.current.title)
content.innerHTML=preprocessContent(tutorial.current.content)
mainUI.applyUIConfig(tutorial.current.config)
dom.window.location.hash=(tutorial.step+1).toString()
tutorial.current.code match {
case Some(s)=> mainUI.editorUI.setCode(s)
case None =>
}
val elements=content.getElementsByClassName("answer")
for (i <- 0 until elements.length){
val element=elements.item(i)
// println(element)
val answerId="answer"+i
val link = button(cls:="btn btn-info","Respuesta",data("toggle"):="collapse",data("target"):="#"+answerId).render
val answerContainer=div(cls:="answerContainer",link).render
link.onclick=(e:Any) =>{
}
element.parentNode.replaceChild(answerContainer, element)
val answerContent=div(id:=answerId,cls:="answerContent collapse",element).render
//container.appendChild(answerContainer)
answerContainer.appendChild(answerContent)
// element.insertBefore(link, element.firstChild)
// element.attributes.setNamedItem(arg)
}
}
def simulatorEvent() {
// TODO check if code can be run and if the cpu is halted to allow enable buttons
if (s.isSimulatorExecuting()){
disable()
}else{
enable()
}
}
// def disable(){root.disabled=true}
// def enable() {root.disabled=false}
def simulatorEvent(i:InstructionInfo) {
simulatorEvent()
}
def compilationEvent(){
s.c match {
case Left(f) => {
}
case Right(f) => {
}
}
}
}
|
facundoq/vonsim
|
src/main/scala/vonsim/webapp/TutorialUI.scala
|
Scala
|
agpl-3.0
| 8,001 |
/*
* Copyright 2018 Analytics Zoo Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.zoo.common
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import scala.reflect.ClassTag
object TensorOperation {
def expandSize[T: ClassTag](tensor: Tensor[T], other: Tensor[T]): Array[Int] = {
val errorMsg = s"tensor size not match ${tensor.size.mkString("x")} " +
s"${other.size.mkString("x")}"
val longTensor = if (tensor.dim() > other.dim()) tensor else other
val shortTensor = if (tensor.dim() > other.dim()) other else tensor
val ndim = longTensor.nDimension()
val delta = longTensor.nDimension() - shortTensor.nDimension()
val size = new Array[Int](ndim)
var i = ndim - 1
while (i >= delta) {
require(longTensor.size(i + 1) == shortTensor.size(i + 1 - delta) ||
longTensor.size(i + 1) == 1 ||
shortTensor.size(i + 1 - delta) == 1, errorMsg)
size(i) = math.max(longTensor.size(i + 1), shortTensor.size(i + 1 - delta))
i -= 1
}
while (i >= 0) {
size(i) = longTensor.size(i + 1)
i -= 1
}
size
}
def expandTensor[T: ClassTag](tensor: Tensor[T], tensor2: Tensor[T])
(implicit ev: TensorNumeric[T]): Tensor[T] = {
val targetSize = expandSize(tensor, tensor2)
val expandStrides = new Array[Int](targetSize.length)
val expandStridesX = new Array[Int](targetSize.length)
var i = targetSize.length - 1
val delta2 = targetSize.length - tensor2.nDimension
while(i >= delta2) {
if (tensor2.size(i + 1- delta2) != 1) expandStridesX(i) = tensor2.stride(i + 1- delta2)
i -= 1
}
val expandX = Tensor[T](
tensor2.storage(),
tensor2.storageOffset(),
targetSize,
expandStridesX
)
if (targetSize.product != tensor.nElement()) {
i = targetSize.length - 1
val delta1 = targetSize.length - tensor.nDimension
while (i >= delta1) {
if (tensor.size(i + 1 - delta1) != 1) expandStrides(i) = tensor.stride(i + 1 - delta1)
i -= 1
}
val tensor1 = Tensor[T](
tensor.storage,
tensor.storageOffset(),
targetSize,
expandStrides
)
val newTensor = Tensor[T]().resize(targetSize).add(tensor1)
tensor.set(newTensor)
}
expandX
}
def subTensor[T: ClassTag](tensor: Tensor[T], tensor2: Tensor[T])
(implicit ev: TensorNumeric[T]): Tensor[T] = {
val expandedTensor = expandTensor(tensor, tensor2).contiguous()
tensor.sub(expandedTensor)
tensor
}
def divTensor[T: ClassTag](tensor: Tensor[T], tensor2: Tensor[T])
(implicit ev: TensorNumeric[T]): Tensor[T] = {
val expandedTensor = expandTensor(tensor, tensor2).contiguous()
tensor.div(expandedTensor)
tensor
}
}
|
intel-analytics/analytics-zoo
|
zoo/src/main/scala/com/intel/analytics/zoo/common/TensorOperation.scala
|
Scala
|
apache-2.0
| 3,400 |
package special.collection
import scalan.{NeverInline, RType}
import spire.syntax.all.cfor
class CViewColl[@specialized A, @specialized B](val source: Coll[A], val f: A => B)(implicit val tItem: RType[B]) extends Coll[B] {
private var isCalculated: Array[Boolean] = Array.ofDim[Boolean](source.length)(RType.BooleanType.classTag)
private var items: Array[B] = Array.ofDim[B](source.length)(tItem.classTag)
private var calculatedCount = 0
def fromPartialCalculation(calculated: Array[Boolean], calculatedItems: Array[B]): CViewColl[A, B] = {
if (calculated.length != source.length || calculatedItems.length != source.length)
throw new RuntimeException("Can't make partial collection: calculated items dimension != source dimension")
isCalculated = calculated
items = calculatedItems
calculatedCount = 0
cfor(0)(_ < isCalculated.length, _ + 1) { i =>
if (isCalculated(i)) {
calculatedCount += 1
}
}
this
}
private def isAllItemsCalculated(): Boolean = calculatedCount == length
private def calculateItem(index: Int): Unit = {
items(index) = f(source(index))
isCalculated(index) = true
}
private def ensureItemNoCalcCountChange(index: Int): Unit = {
if (!isCalculated(index)) {
calculateItem(index)
}
}
private def ensureItem(index: Int): Unit = {
if (!isCalculated(index)) {
calculateItem(index)
calculatedCount += 1
}
}
@inline private def ensureAndGetItem(index: Int): B = {
ensureItem(index)
items(index)
}
override def builder: CollBuilder = new CollOverArrayBuilder
@NeverInline
override def toArray: Array[B] = {
if (!isAllItemsCalculated()) {
cfor(0)(_ < length, _ + 1) { i =>
ensureItemNoCalcCountChange(i)
}
calculatedCount = length
}
items
}
@NeverInline
override def length: Int = source.length
@NeverInline
override def isEmpty: Boolean = source.isEmpty
@NeverInline
override def nonEmpty: Boolean = !isEmpty
@NeverInline
override def apply(i: Int): B = {
if (!isDefinedAt(i))
throw new ArrayIndexOutOfBoundsException()
ensureAndGetItem(i)
}
@NeverInline
override def isDefinedAt(idx: Int): Boolean = (idx >= 0) && (idx < length)
@NeverInline
override def getOrElse(index: Int, default: B): B = if (isDefinedAt(index)) ensureAndGetItem(index) else default
@NeverInline
override def map[@specialized C: RType](g: B => C): Coll[C] = builder.makeView(this, g)
@NeverInline
override def zip[@specialized C](ys: Coll[C]): Coll[(B, C)] = builder.pairColl(this, ys)
@NeverInline
override def exists(p: B => Boolean): Boolean = {
cfor(0)(_ < length, _ + 1) { i =>
val found = p(ensureAndGetItem(i))
if (found) return true
}
false
}
@NeverInline
override def forall(p: B => Boolean): Boolean = toArray.forall(p)
@NeverInline
override def filter(p: B => Boolean): Coll[B] = builder.fromArray(toArray)(tItem).filter(p)
@NeverInline
override def foldLeft[C](zero: C, op: ((C, B)) => C): C = toArray.foldLeft(zero)((item1, item2) => op((item1, item2)))
@NeverInline
override def indices: Coll[Int] = builder.fromArray((0 until source.length).toArray)
@NeverInline
override def flatMap[C: RType](g: B => Coll[C]): Coll[C] = builder.fromArray(toArray)(tItem).flatMap(g)
@NeverInline
override def segmentLength(p: B => Boolean, from: Int): Int = {
val trueFrom = math.max(0, from)
cfor(trueFrom)(_ < length, _ + 1) { i =>
val checkResult = p(ensureAndGetItem(i))
if (!checkResult) {
return i - trueFrom
}
}
length - trueFrom
}
@NeverInline
override def indexWhere(p: B => Boolean, from: Int): Int = {
val trueFrom = math.max(0, from)
cfor(trueFrom)(_ < length, _ + 1) { i =>
val found = p(ensureAndGetItem(i))
if (found) return i
}
-1
}
@NeverInline
override def lastIndexWhere(p: B => Boolean, end: Int): Int = toArray.lastIndexWhere(p, end)
@NeverInline
override def take(n: Int): Coll[B] = {
if (n <= 0)
return builder.emptyColl(tItem)
if (n > length)
return this
slice(0, n)
}
@NeverInline
override def partition(pred: B => Boolean): (Coll[B], Coll[B]) = builder.fromArray(toArray)(tItem).partition(pred)
@NeverInline
override def patch(from: Int,
patch: Coll[B],
replaced: Int): Coll[B] = {
if (length > 0) {
val start = math.max(0, from) // check if from is non-negative
val trueReplace = math.max(replaced, 0) // check if replace is non-negative
/*
* According to patch specification new length is the old length + patch length - replacedCount.
* replacedCount is min(trueReplace, length - start) since it can turn out that trueReplace is greater than
* the rest length of array
*/
val newLength = patch.length + length - math.min(trueReplace, length - start)
// At first we copy all items at [0, start), since they are kept unchanged
var itemsCopy = Array.ofDim[B](newLength)(tItem.classTag)
Array.copy(items, 0, itemsCopy, 0, start)
// There we put patch items after unchanged items from [0, start)
Array.copy(patch.toArray, 0, itemsCopy, start, patch.length)
// If there are any elements left in the rest of items and not all of them should be replaced, then we finally
// copy them to the end of new items
if (start + trueReplace < length)
Array.copy(items, start + trueReplace, itemsCopy, start + patch.length, length - start - trueReplace)
// Here's the same procedure as was with items
var calcCopy = Array.ofDim[Boolean](newLength)(RType.BooleanType.classTag)
Array.copy(isCalculated, 0, calcCopy, 0, start)
if (start + trueReplace < length)
Array.copy(isCalculated, start + trueReplace, calcCopy, start + patch.length, length - start - trueReplace)
// mark patched items as calculated
cfor(start)(_ < start + patch.length, _ + 1) { i =>
calcCopy(i) = true
}
/*
* patchColl solves problem with the absence of source elements in patch collection: it tries to copy the first
* element as source for all elements of patch. If there's no any source elements (collection is empty) then
* current collection is converted to CollOverArray and patch of CollOverArray is called (else branch below)
*/
val patchColl = new CReplColl(source(0), patch.length)(source.tItem)
builder.makePartialView(source.patch(start, patchColl, replaced), f, calcCopy, itemsCopy)(tItem)
} else {
builder.fromArray(toArray).patch(from, patch, replaced)
}
}
@NeverInline
override def updated(index: Int, elem: B): Coll[B] = {
if (!isDefinedAt(index))
throw new IndexOutOfBoundsException()
var itemsCopy = items.clone()
var calcCopy = isCalculated.clone()
calcCopy(index) = true
itemsCopy(index) = elem
builder.makePartialView(source, f, calcCopy, itemsCopy)
}
@NeverInline
override def updateMany(indexes: Coll[Int],
values: Coll[B]): Coll[B] = {
// here we copy items and information about which items have been calculated already
var itemsCopy = items.clone()
var calcCopy = isCalculated.clone()
// here we update items with new ones from values at indexes from indexes collection
cfor(0)(_ < indexes.length, _ + 1) { i =>
itemsCopy(indexes(i)) = values(i)
calcCopy(indexes(i)) = true // updated elements should be surely marked as calculated
}
builder.makePartialView(source, f, calcCopy, itemsCopy)(tItem)
}
@NeverInline
override def mapReduce[K: RType, V: RType](m: B => (K, V),
r: ((V, V)) => V): Coll[(K, V)] = builder.fromArray(toArray)(tItem).mapReduce(m, r)
@NeverInline
override def unionSet(that: Coll[B]): Coll[B] = builder.fromArray(toArray)(tItem).unionSet(that)
@NeverInline
override def sum(m: Monoid[B]): B = toArray.foldLeft(m.zero)((b, a) => m.plus(b, a))
@NeverInline
override def slice(from: Int, until: Int): Coll[B] = {
if (until <= 0 || until - from <= 0)
return builder.emptyColl(tItem)
val start = math.max(0, from)
val end = math.min(until, length)
val sliceLength = end - start
val itemsCopy = Array.ofDim[B](sliceLength)(tItem.classTag)
Array.copy(items, start, itemsCopy, 0, sliceLength)
val calcCopy = Array.ofDim[Boolean](sliceLength)(RType.BooleanType.classTag)
Array.copy(isCalculated, start, calcCopy, 0, sliceLength)
builder.makePartialView(source.slice(from, until), f, calcCopy, itemsCopy)
}
@NeverInline
override def append(other: Coll[B]): Coll[B] = builder.fromArray(toArray)(tItem).append(other)
@NeverInline
override def reverse: Coll[B] = {
builder.makePartialView(source.reverse, f, isCalculated.reverse, items.reverse)(tItem)
}
override private[collection] def isReplArray(len: Int, value: B) = ???
}
|
ScorexFoundation/sigmastate-interpreter
|
library-impl/src/main/scala/special/collection/ViewColls.scala
|
Scala
|
mit
| 9,083 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.examples
import breeze.linalg.{Vector, DenseVector, squaredDistance}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.SparkContext._
/**
* K-means clustering.
*
* This is an example implementation for learning how to use Spark. For more conventional use,
* please refer to org.apache.spark.mllib.clustering.KMeans
*/
object SparkKMeans {
def parseVector(line: String): Vector[Double] = {
DenseVector(line.split(' ').map(_.toDouble))
}
def closestPoint(p: Vector[Double], centers: Array[Vector[Double]]): Int = {
var bestIndex = 0
var closest = Double.PositiveInfinity
for (i <- 0 until centers.length) {
val tempDist = squaredDistance(p, centers(i))
if (tempDist < closest) {
closest = tempDist
bestIndex = i
}
}
bestIndex
}
def showWarning() {
System.err.println(
"""WARN: This is a naive implementation of KMeans Clustering and is given as an example!
|Please use the KMeans method found in org.apache.spark.mllib.clustering
|for more conventional use.
""".stripMargin)
}
def main(args: Array[String]) {
if (args.length < 3) {
System.err.println("Usage: SparkKMeans <file> <k> <convergeDist>")
System.exit(1)
}
showWarning()
val sparkConf = new SparkConf().setAppName("SparkKMeans")
val sc = new SparkContext(sparkConf)
val lines = sc.textFile(args(0))
val data = lines.map(parseVector _).cache()
val K = args(1).toInt
val convergeDist = args(2).toDouble
val kPoints = data.takeSample(withReplacement = false, K, 42).toArray
var tempDist = 1.0
while(tempDist > convergeDist) {
val closest = data.map (p => (closestPoint(p, kPoints), (p, 1)))
val pointStats = closest.reduceByKey{case ((x1, y1), (x2, y2)) => (x1 + x2, y1 + y2)}
val newPoints = pointStats.map {pair =>
(pair._1, pair._2._1 * (1.0 / pair._2._2))}.collectAsMap()
tempDist = 0.0
for (i <- 0 until K) {
tempDist += squaredDistance(kPoints(i), newPoints(i))
}
for (newP <- newPoints) {
kPoints(newP._1) = newP._2
}
println("Finished iteration (delta = " + tempDist + ")")
}
println("Final centers:")
kPoints.foreach(println)
sc.stop()
}
}
|
javachen/learning-spark
|
src/main/scala/org/apache/spark/examples/SparkKMeans.scala
|
Scala
|
apache-2.0
| 3,138 |
/**
* <slate_header>
* author: Kishore Reddy
* url: https://github.com/kishorereddy/scala-slate
* copyright: 2016 Kishore Reddy
* license: https://github.com/kishorereddy/scala-slate/blob/master/LICENSE.md
* desc: a scala micro-framework
* usage: Please refer to license on github for more info.
* </slate_header>
*/
package slate.http
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport
import spray.json.DefaultJsonProtocol
object HttpJson extends DefaultJsonProtocol with SprayJsonSupport {
}
|
kishorereddy/akka-http
|
src/main/scala/slate/http/HttpJson.scala
|
Scala
|
mit
| 531 |
/*
* Copyright 2015 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.spark.utils
import joptsimple.util.KeyValuePair
/**
* Provides utility methods for jsimple-opt key pair values.
*/
object KeyValuePairUtils {
val DefaultDelimiter = ","
/**
* Transforms the provided string into a list of KeyValuePair objects.
* @param keyValuePairString The string representing the series of keys and
* values
* @param delimiter The delimiter used for splitting up the string
* @return The list of KeyValuePair objects
*/
def stringToKeyValuePairSeq(
keyValuePairString: String,
delimiter: String = DefaultDelimiter
): Seq[KeyValuePair] = {
require(keyValuePairString != null, "KeyValuePair string cannot be null!")
keyValuePairString
.split(delimiter)
.map(_.trim)
.filterNot(_.isEmpty)
.map(KeyValuePair.valueOf)
.toSeq
}
/**
* Transforms the provided list of KeyValuePair elements into a string.
* @param keyValuePairSeq The sequence of KeyValuePair objects
* @param delimiter The separator between string KeyValuePair
* @return The resulting string from the list of KeyValuePair objects
*/
def keyValuePairSeqToString(
keyValuePairSeq: Seq[KeyValuePair],
delimiter: String = DefaultDelimiter
): String = {
require(keyValuePairSeq != null, "KeyValuePair sequence cannot be null!")
keyValuePairSeq
.map(pair => pair.key + "=" + pair.value)
.mkString(delimiter)
}
}
|
Drooids/spark-kernel
|
kernel-api/src/main/scala/com/ibm/spark/utils/KeyValuePairUtils.scala
|
Scala
|
apache-2.0
| 2,062 |
package cobase.user
import java.util.UUID
import com.mohiva.play.silhouette.api.{Identity, LoginInfo}
/**
* The user object.
*
* @param id The unique ID of the user.
* @param loginInfo The linked login info.
* @param firstName Maybe the first name of the authenticated user.
* @param lastName Maybe the last name of the authenticated user.
* @param fullName Maybe the full name of the authenticated user.
* @param email Maybe the email of the authenticated provider.
* @param avatarURL Maybe the avatar URL of the authenticated provider.
*/
case class User(
id: UUID,
loginInfo: LoginInfo,
firstName: Option[String],
lastName: Option[String],
fullName: Option[String],
email: Option[String],
avatarURL: Option[String]
) extends Identity
|
Cobase/cobase-pro
|
app/cobase/user/User.scala
|
Scala
|
mit
| 764 |
package org.scaladebugger.api.lowlevel.events
import org.scaladebugger.api.lowlevel.events.EventType.EventType
/**
* Represents an event manager whose operations do nothing.
*/
class DummyEventManager extends EventManager {
/**
* Adds the event function to this manager. The return value of the handler
* function contributes towards whether or not to resume the event set.
*
* @param eventHandlerId The id to associate with the event handler
* @param eventType The type of the event to add a function
* @param eventHandler The function to add, taking the occurring event and
* a collection of retrieved data from the event
* @param eventArguments The arguments used when determining whether or not to
* invoke the event handler
*
* @return The id associated with the event handler
*/
override def addEventHandlerWithId(
eventHandlerId: String,
eventType: EventType,
eventHandler: EventHandler,
eventArguments: JDIEventArgument*
): String = eventHandlerId
/**
* Retrieves the collection of event handler functions for the specific
* event class.
*
* @param eventType The type of event whose functions to retrieve
*
* @return The collection of event functions
*/
override def getHandlersForEventType(
eventType: EventType
): Seq[EventHandler] = Nil
/**
* Ends the processing of events from the virtual machine.
*/
override def stop(): Unit = {}
/**
* Removes the event function from this manager.
*
* @param eventHandlerId The id of the event handler to remove
*
* @return Some event handler if removed, otherwise None
*/
override def removeEventHandler(
eventHandlerId: String
): Option[EventHandler] = None
/**
* Indicates whether or not the event manager is processing events.
*
* @return True if it is running, otherwise false
*/
override def isRunning: Boolean = false
/**
* Retrieves the collection of event handler functions for the specific
* event class.
*
* @param eventType The type of event whose functions to retrieve
*
* @return The collection of event functions
*/
override def getHandlerIdsForEventType(
eventType: EventType
): Seq[String] = Nil
/**
* Retrieves the handler with the specified id.
*
* @param eventHandlerId The id of the handler to retrieve
*
* @return Some event handler if found, otherwise None
*/
override def getEventHandler(
eventHandlerId: String
): Option[EventHandler] = None
/**
* Begins the processing of events from the virtual machine.
*/
override def start(): Unit = {}
/**
* Retrieves information on all event handlers.
*
* @return The collection of information on all event handlers
*/
override def getAllEventHandlerInfo: Seq[EventHandlerInfo] = Nil
}
|
ensime/scala-debugger
|
scala-debugger-api/src/main/scala/org/scaladebugger/api/lowlevel/events/DummyEventManager.scala
|
Scala
|
apache-2.0
| 2,881 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.nodes.physical.stream
import org.apache.flink.table.planner.calcite.FlinkTypeFactory
import org.apache.flink.table.planner.expressions.{PlannerNamedWindowProperty, PlannerSliceEnd, PlannerWindowReference}
import org.apache.flink.table.planner.plan.logical.{TimeAttributeWindowingStrategy, WindowAttachedWindowingStrategy, WindowingStrategy}
import org.apache.flink.table.planner.plan.nodes.exec.stream.StreamExecLocalWindowAggregate
import org.apache.flink.table.planner.plan.nodes.exec.{ExecNode, InputProperty}
import org.apache.flink.table.planner.plan.rules.physical.stream.TwoStageOptimizedWindowAggregateRule
import org.apache.flink.table.planner.plan.utils.WindowUtil.checkEmitConfiguration
import org.apache.flink.table.planner.plan.utils.{AggregateUtil, FlinkRelOptUtil, RelExplainUtil, WindowUtil}
import org.apache.calcite.plan.{RelOptCluster, RelTraitSet}
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.core.AggregateCall
import org.apache.calcite.rel.{RelNode, RelWriter, SingleRel}
import org.apache.calcite.util.Litmus
import java.util
import scala.collection.JavaConverters._
/**
* Streaming local window aggregate physical node.
*
* <p>This is a local-aggregation node optimized from [[StreamPhysicalWindowAggregate]] after
* [[TwoStageOptimizedWindowAggregateRule]] optimization.
*
* @see [[TwoStageOptimizedWindowAggregateRule]]
* @see [[StreamPhysicalWindowAggregate]]
*/
class StreamPhysicalLocalWindowAggregate(
cluster: RelOptCluster,
traitSet: RelTraitSet,
inputRel: RelNode,
val grouping: Array[Int],
val aggCalls: Seq[AggregateCall],
val windowing: WindowingStrategy)
extends SingleRel(cluster, traitSet, inputRel)
with StreamPhysicalRel {
private lazy val aggInfoList = AggregateUtil.deriveStreamWindowAggregateInfoList(
FlinkTypeFactory.toLogicalRowType(inputRel.getRowType),
aggCalls,
windowing.getWindow,
isStateBackendDataViews = false)
private lazy val endPropertyName = windowing match {
case _: WindowAttachedWindowingStrategy => "window_end"
case _: TimeAttributeWindowingStrategy => "slice_end"
}
override def isValid(litmus: Litmus, context: RelNode.Context): Boolean = {
windowing match {
case _: WindowAttachedWindowingStrategy | _: TimeAttributeWindowingStrategy =>
// pass
case _ =>
return litmus.fail("StreamPhysicalLocalWindowAggregate should only accepts " +
"WindowAttachedWindowingStrategy and TimeAttributeWindowingStrategy, " +
s"but got ${windowing.getClass.getSimpleName}. " +
"This should never happen, please open an issue.")
}
super.isValid(litmus, context)
}
override def requireWatermark: Boolean = windowing.isRowtime
override def deriveRowType(): RelDataType = {
WindowUtil.deriveLocalWindowAggregateRowType(
aggInfoList,
grouping,
endPropertyName,
inputRel.getRowType,
getCluster.getTypeFactory.asInstanceOf[FlinkTypeFactory])
}
override def explainTerms(pw: RelWriter): RelWriter = {
val inputRowType = getInput.getRowType
val inputFieldNames = inputRowType.getFieldNames.asScala.toArray
val windowRef = new PlannerWindowReference("w$", windowing.getTimeAttributeType)
val namedProperties = Seq(
new PlannerNamedWindowProperty(endPropertyName, new PlannerSliceEnd(windowRef)))
super.explainTerms(pw)
.itemIf("groupBy", RelExplainUtil.fieldToString(grouping, inputRowType), grouping.nonEmpty)
.item("window", windowing.toSummaryString(inputFieldNames))
.item("select", RelExplainUtil.streamWindowAggregationToString(
inputRowType,
getRowType,
aggInfoList,
grouping,
namedProperties,
isLocal = true))
}
override def copy(
traitSet: RelTraitSet,
inputs: util.List[RelNode]): RelNode = {
new StreamPhysicalLocalWindowAggregate(
cluster,
traitSet,
inputs.get(0),
grouping,
aggCalls,
windowing
)
}
override def translateToExecNode(): ExecNode[_] = {
checkEmitConfiguration(FlinkRelOptUtil.getTableConfigFromContext(this))
new StreamExecLocalWindowAggregate(
grouping,
aggCalls.toArray,
windowing,
InputProperty.DEFAULT,
FlinkTypeFactory.toLogicalRowType(getRowType),
getRelDetailedDescription
)
}
}
|
StephanEwen/incubator-flink
|
flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/stream/StreamPhysicalLocalWindowAggregate.scala
|
Scala
|
apache-2.0
| 5,236 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package expr
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElement
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.ScCaseClauses
import com.intellij.psi.util.PsiTreeUtil
/**
* Author: Alexander Podkhalyuzin
* Date: 06.03.2008
*/
trait ScCatchBlock extends ScalaPsiElement {
def expression: Option[ScExpression] = findChild(classOf[ScExpression])
def getLeftParenthesis : Option[PsiElement]
def getRightParenthesis : Option[PsiElement]
}
object ScCatchBlock {
def unapply(catchBlock: ScCatchBlock): Option[ScCaseClauses] = {
for {
expr <- catchBlock.expression
child = PsiTreeUtil.findChildOfType(expr, classOf[ScCaseClauses])
if child != null
} yield child
}
}
|
consulo/consulo-scala
|
src/org/jetbrains/plugins/scala/lang/psi/api/expr/ScCatchBlock.scala
|
Scala
|
apache-2.0
| 830 |
package notebook.front
package widgets
import scala.util.Random
import scala.xml.{NodeSeq, UnprefixedAttribute, Null}
import play.api.libs.json._
import play.api.libs.json.Json.JsValueWrapper
import notebook._
import notebook.JsonCodec._
trait PipeComponent[X <: PipeComponent[X]] {
def id:String
def name:String
def tpe:String
def parameters:Map[String, String]
// inputs (one Any per inPort String) → outputs (one Any per outPort String)
def next(a:Map[String, Any]):Map[String, Any]
def merge(j:JsValue):X
private[front] val me:X = this.asInstanceOf[X]
def toJSON:JsObject = Json.obj(
"name" → name,
"id" → id,
"tpe" → tpe,
"parameters" → parameters
)
}
abstract class BasePipeComponent[X<:BasePipeComponent[X]] extends PipeComponent[X] {
}
abstract class LinkPipeComponent[X<:LinkPipeComponent[X]]() extends BasePipeComponent[X]() {
val tpe = "link"
def source:Option[(String, String)]
def target:Option[(String, String)]
}
abstract class BoxPipeComponent[X<:BoxPipeComponent[X]]() extends BasePipeComponent[X]() {
val tpe = "box"
def inPorts:List[String]
def outPorts:List[String]
def position:(Int, Int)
def size:(Int, Int)
def update(varName:String, i:notebook.util.InterpreterUtil, s:String):Unit = ()
override def toJSON:JsObject = super.toJSON ++ Json.obj(
"inPorts" → inPorts,
"outPorts" → outPorts,
"position" → Json.obj(
"x" → position._1,
"y" → position._2
),
"size" → Json.obj(
"width" → size._1,
"height" → size._2
)
)
}
case class LinkPipe(id:String = java.util.UUID.randomUUID.toString,
source:Option[(String, String)]=None,
target:Option[(String, String)]=None) extends LinkPipeComponent[LinkPipe]() {
val name = "link"
def goe(o:Option[String], e:String) = o.map(x => Map(e → x)).getOrElse(Map.empty[String, String])
val parameters = goe(source.map(_._1), "source_id") ++ goe(source.map(_._2), "source_port") ++
goe(target.map(_._1), "target_id") ++ goe(target.map(_._2), "target_port")
def next(a:Map[String, Any]):Map[String, Any] = {
println("Applying next on LinkPipe with " + a)
a
}
def getSource(j:JsValue, k:String) = (j \\ "parameters" \\ s"source_$k" ).asOpt[String]
def getTarget(j:JsValue, k:String) = (j \\ "parameters" \\ s"target_$k" ).asOpt[String]
def merge(j:JsValue) = {
copy(
id = (j \\ "id").as[String],
source = for (i <- getSource(j, "id"); p <- getSource(j, "port")) yield (i,p),
target = for (i <- getTarget(j, "id"); p <-getTarget(j, "port")) yield (i,p)
)
}
}
case class LogPipe(
id:String = java.util.UUID.randomUUID.toString,
inPorts:List[String] = List("in"),
outPorts:List[String] = List("out"),
position:(Int, Int) = (100, 100),
size:(Int, Int) = (100, 60)
) extends BoxPipeComponent[LogPipe]() {
val name = "log"
val parameters = Map.empty[String, String]
def next(a:Map[String, Any]):Map[String, Any] = {
println("Applying next on LogPipe with " + a)
a
}
def merge(j:JsValue):LogPipe = this
}
trait Updatable {
def update( varName:String,
i:notebook.util.InterpreterUtil
):Unit
}
case class CustomizableBoxPipe(
id:String = java.util.UUID.randomUUID.toString,
inPorts:List[String] = List("in"),
outPorts:List[String] = List("out"),
position:(Int, Int) = (100, 100),
size:(Int, Int) = (100, 60),
parameters:Map[String, String] = Map("next" → "(a:Map[String, Any])=>a")
) extends BoxPipeComponent[CustomizableBoxPipe]() with Updatable {
val name = "customizable"
var _next:Map[String, Any]=>Map[String, Any] = _
def next(a:Map[String, Any]):Map[String, Any] = _next(a)
def merge(j:JsValue):CustomizableBoxPipe = copy(
id = (j \\ "id").as[String],
inPorts = (j \\ "inPorts").as[List[String]],
outPorts = (j \\ "outPorts").as[List[String]],
position = ((j \\ "position" \\ "x").as[Int], (j \\ "position" \\ "y").as[Int]),
size = ((j \\ "size" \\ "width").as[Int], (j \\ "size" \\ "height").as[Int]),
parameters = (j \\ "parameters").as[Map[String, String]]
)
override def update(
varName:String,
i:notebook.util.InterpreterUtil
):Unit = {
i{
varName + s""".data.find(_.id == "$id").map{p => p.asInstanceOf[CustomizableBoxPipe]}.""" +
"foreach{p => p._next = { " + parameters("next") + "}.asInstanceOf[Map[String, Any]=>Map[String, Any]] }"
}
}
}
object Flow {
var registeredPC:scala.collection.mutable.Map[String, ()=>BoxPipeComponent[_]] =
scala.collection.mutable.Map(
"log" → (() => LogPipe()),
"customizable" → (() => CustomizableBoxPipe())
)
def registerPipeComponent(name:String, creator:() => BoxPipeComponent[_]) {
registeredPC += (name → creator)
}
def createPipeComponent(s:String):Option[PipeComponent[_]] = registeredPC.get(s).map(_())
}
case class Flow() extends Updatable with JsWorld[PipeComponent[_], JsValue] {
import notebook.JSBus._
implicit val singleToO = (pc:PipeComponent[_]) => pc.toJSON
implicit val singleCodec = idCodec[JsValue]
override def data:Seq[PipeComponent[_]] = mutData
private[this] var mutData:Seq[PipeComponent[_]] = Nil
val confConnection = notebook.JSBus.createConnection
confConnection --> Connection.fromObserver { (js:JsValue) =>
load(js)
}
val linkConnection = notebook.JSBus.createConnection
linkConnection --> Connection.fromObserver { (js:JsValue) =>
val l = new LinkPipe()
val u = l.merge(js)
addAndApply(u)
}
override val scripts = List(
Script("flow", Json.obj("confId" → confConnection.id, "linkId" → linkConnection.id))
)
def load(js:JsValue) = {
val JsArray(array) = js
val pcs = array.map { j =>
val JsString(name) = j \\ "name"
val pc = if (name == "link") {//hackish :-S
Some(new LinkPipe())
} else {
Flow.createPipeComponent(name)
}
val p = pc.map { p =>
val pc:PipeComponent[_] = p.merge(j).asInstanceOf[PipeComponent[_]]
pc
}
p
}.flatten
addAndApply(pcs)
}
private[this] var selected:Option[String] = None
val dl = new DropDown("---" :: Flow.registeredPC.keys.toList)
dl.selected --> Connection.fromObserver { (pc:String) =>
pc match {
case "---" => selected = None
case x => selected = Some(x)
}
}
val addPipeComponent = new Button(Some("+"))
addPipeComponent.currentData --> Connection.fromObserver { (_:Double) =>
for {
s <- selected
pc <- Flow.createPipeComponent(s)
} addAndApply(pc)
}
currentData --> Connection.fromObserver { (s:Seq[JsValue]) =>
val m = s.map{ j => ((j \\ "id").as[String]) → j }.toMap
mutData = mutData.map { pc =>
m.get(pc.id) match {
case None => Some(pc.me)
case Some(j) =>
j match {
case x:JsObject if x.keys.contains("remove") && (x \\ "remove").as[Boolean] =>
None
case j =>
Some(pc.merge(j))
}
}
}.collect{ case Some(x) => x.asInstanceOf[PipeComponent[_]] }
}
def addAndApply(pc:PipeComponent[_]) {
addAndApply(Seq[PipeComponent[_]](pc))
}
def addAndApply(pcs:Seq[PipeComponent[_]]) {
mutData = mutData ++ pcs
apply(data)
}
override def update(
varName:String,
i:notebook.util.InterpreterUtil
):Unit = data.filter(_.isInstanceOf[Updatable])
.foreach(_.asInstanceOf[Updatable].update(varName, i))
/**
* @param init is a function that take a source box and gives it a init value
*/
def run(init:(String /*BoxId*/, List[String/*InPort*/]) => Map[String/*OutPort*/, Any]):scala.collection.Map[(String, String),Any] = {
// build tree
val currentData:List[PipeComponent[_]] = mutData.toList
val (links, boxes) = {
val (ls, bxs) = currentData.partition(_.isInstanceOf[LinkPipeComponent[_]])
val links = ls.map(_.asInstanceOf[LinkPipeComponent[_]])
val boxes = bxs.map(_.asInstanceOf[BoxPipeComponent[_]])
(links.filter(l => l.source.isDefined && l.target.isDefined), boxes)
}
/**
* X ----> Z ------> U ----> A
* Y --/ /
* W -----------/
*\\____/ \\_____/ \\_____/ \\____/
* L0 L1 L2 L3
*/
def layer(remaning:Seq[BoxPipeComponent[_]], acc:List[Seq[BoxPipeComponent[_]]]):List[Seq[BoxPipeComponent[_]]] = {
remaning match {
case Nil => acc.reverse
case `boxes` =>
val layer0 = remaning.filterNot { b =>
links.exists(l => l.target == Some(b.id))
}
layer(remaning diff layer0, List(layer0))
case xs =>
val next = xs.filter { b =>
val targetB = links.filter(l => l.target == Some(b.id))
targetB.forall { l =>
acc.flatten.exists(p => l.source == Some(p.id))
}
}
layer(remaning diff next, next :: acc)
}
}
val layers = layer(boxes, Nil)
val values = scala.collection.mutable.Map.empty[(String /*BoxID*/, String /*OutPort*/), Any]
val results = layers.map { pcs =>
pcs.foreach { pc =>
val linksToPc = links.map { link =>
val pcid = pc.id
link.target match {
case Some((`pcid`, port)) if pc.inPorts.contains(port) => Some(port → link.source.get)
case _ => None
}
}.collect{case Some(x) => x}
val valuesForLinksSource = linksToPc.map{ case (inPort, (srcId, outPort)) =>
for{
src <- currentData.find(_.id == srcId)
v <- values.get((srcId, outPort))
} yield (inPort → v)
}
.collect{ case Some(x) => x }
.toMap
values ++= (valuesForLinksSource match {
case xs if xs.isEmpty => pc.next(init(pc.id, pc.inPorts))
case xs => pc.next(xs.asInstanceOf[Map[String, Any]])
}).map{ case (outPort, v) => (pc.id, outPort) → v}
}
}
val lastPCs = boxes.filterNot { b =>
links.exists(l => l.source.map(_._1) == Some(b.id))
}
val rs = values.filterKeys(k => lastPCs.exists(_.id == k._1))
rs
}
override def content = Some {
<div class="container-fluid">
<div class="control col-md-12">
{
dl.toHtml
}
{
addPipeComponent.toHtml
}
</div>
<div class="jointgraph col-md-9"></div>
<div class="col-md-3">
<h4>Configuration</h4>
<form class="form configure" action="#">
<div class="configuration">
</div>
<button type="button" class="btn btn-xs btn-danger remove">Remove</button>
<button type="submit" class="btn btn-default">Apply</button>
</form>
</div>
</div>
}
}
|
antonkulaga/spark-notebook
|
modules/common/src/main/scala/notebook/front/widgets/Flow.scala
|
Scala
|
apache-2.0
| 11,897 |
package com.bwsw.tstreamstransactionserver.netty.server.handler.data
import com.bwsw.tstreamstransactionserver.netty.Descriptors
import com.bwsw.tstreamstransactionserver.netty.server.TransactionServer
import com.bwsw.tstreamstransactionserver.netty.server.handler.RequestHandler
import com.bwsw.tstreamstransactionserver.rpc.{ServerException, TransactionService}
class GetTransactionDataHandler(server: TransactionServer)
extends RequestHandler{
private val descriptor = Descriptors.GetTransactionData
override def handleAndGetResponse(requestBody: Array[Byte]): Array[Byte] = {
val args = descriptor.decodeRequest(requestBody)
val result = server.getTransactionData(
args.streamID,
args.partition,
args.transaction,
args.from,
args.to
)
descriptor.encodeResponse(
TransactionService.GetTransactionData.Result(Some(result))
)
}
override def handle(requestBody: Array[Byte]): Unit = {
// throw new UnsupportedOperationException(
// "It doesn't make any sense to get transaction data according to fire and forget policy"
// )
}
override def createErrorResponse(message: String): Array[Byte] = {
descriptor.encodeResponse(
TransactionService.GetTransactionData.Result(
None,
Some(ServerException(message)
)
)
)
}
override def getName: String = descriptor.name
}
|
bwsw/tstreams-transaction-server
|
src/main/scala/com/bwsw/tstreamstransactionserver/netty/server/handler/data/GetTransactionDataHandler.scala
|
Scala
|
apache-2.0
| 1,408 |
/*
* Copyright ActionML, LLC under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* ActionML licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.routeme
import java.util.Date
import io.prediction.controller.{EngineFactory, Engine}
/** This file contains case classes that are used with reflection to specify how query and config
* JSON is to be parsed. the Query case class, for instance defines the way a JSON query is to be
* formed. The same for param case classes.
*/
/** The Query spec with optional values. The only hard rule is that there must be either a user or
* an item id. All other values are optional. */
case class Query(
user: Option[String] = None, // must be a user or item id
userBias: Option[Float] = None, // default: whatever is in algorithm params or 1
item: Option[String] = None, // must be a user or item id
itemBias: Option[Float] = None, // default: whatever is in algorithm params or 1
fields: Option[List[Field]] = None, // default: whatever is in algorithm params or None
currentDate: Option[String] = None, // if used will override dateRange filter, currentDate must lie between the item's
// expireDateName value and availableDateName value, all are ISO 8601 dates
dateRange: Option[DateRange] = None, // optional before and after filter applied to a date field
blacklistItems: Option[List[String]] = None, // default: whatever is in algorithm params or None
returnSelf: Option[Boolean] = None,// means for an item query should the item itself be returned, defaults
// to what is in the algorithm params or false
num: Option[Int] = None, // default: whatever is in algorithm params, which itself has a default--probably 20
eventNames: Option[List[String]]) // names used to ID all user actions
extends Serializable
/** Used to specify how Fields are represented in engine.json */
case class Field( // no optional values for fields, whne specified
name: String, // name of metadata field
values: Array[String], // fields can have multiple values like tags of a single value as when using hierarchical
// taxonomies
bias: Float)// any positive value is a boost, negative is a filter
extends Serializable
/** Used to specify the date range for a query */
case class DateRange(
name: String, // name of item property for the date comparison
before: Option[String], // empty strings means no filter
after: Option[String]) // both empty should be ignored
extends Serializable
/** results of a MMRAlgoritm.predict */
case class PredictedResult(
itemScores: Array[ItemScore])
extends Serializable
case class ItemScore(
item: String, // item id
score: Double )// used to rank, original score returned from teh search engine
extends Serializable
object RecommendationEngine extends EngineFactory {
def apply() = {
new Engine(
classOf[DataSource],
classOf[Preparator],
Map("ur" -> classOf[URAlgorithm]), // IMPORTANT: "ur" must be the "name" of the parameter set in engine.json
classOf[Serving])
}
}
|
heshamMassoud/RouteMe-API
|
PredictionIOEngine/src/main/scala/Engine.scala
|
Scala
|
apache-2.0
| 3,753 |
package org.kimbasoft.akka.router
import akka.actor.{Props, Actor}
/**
* Missing documentation.
*
* @author <a href="[email protected]">Steffen Krause</a>
* @since 1.0
*/
class ActorSupervisor(worker_type: String, worker_num: Int) extends Actor {
println(s"""Creating $worker_num "$worker_type" workers""")
val workers = ((1 to worker_num) map createWorker).toVector
println(s"""Workers: $workers""")
def receive: Receive = {
case _ => // Don't do anything here
}
private def createWorker(index: Int) = context.actorOf(ActorWorker.props(worker_type), s"worker$index")
}
object ActorSupervisor {
def props(worker_type: String, worker_num: Int): Props = Props(classOf[ActorSupervisor], worker_type, worker_num)
}
|
kimba74/sandbox-scala
|
src/main/scala/org/kimbasoft/akka/router/ActorSupervisor.scala
|
Scala
|
gpl-3.0
| 754 |
package recommender
import inputdata.DataHolder
import cmd._
object mainHolder {
private var recommender: Option[Recommender] = None
private var dataHolder: Option[DataHolder] = None
//private var dataHolder: Option[data_holder] = None
def setup(conf: Conf): Unit ={
// dataHolder = Some(new MovieLensDataHolder("/Users/taoranli/code/dataset/ml-1m"))
// recommender = Some(new KnnRecommender())
//Dataholder
val dataHolderNameToFactoryMap = DataHolderFactoryFromConf.dataHolderFactories.map(holder => holder.getName -> holder).toMap
val dataHolderStr: String = conf.data()
dataHolder = Some(dataHolderNameToFactoryMap.get(dataHolderStr).get.getDataHolderInstance(conf))
//Recommender
val recommenderNameToFactoryMap = RecommenderFactoryFromConf.recommenderFactories.map(rec => rec.getName -> rec).toMap
val recommenderStr: String = conf.method()
recommender = Some(recommenderNameToFactoryMap.get(recommenderStr).get.getRecommender(conf))
}
def getDataHolder(): DataHolder = {
dataHolder match {
case Some(holder) => holder
case None => throw new MainHolderNotInitializedException
}
}
def getRecommenderInstance(): Recommender = {
recommender match {
case Some(rec) => rec
case None => throw new MainHolderNotInitializedException
}
}
class MainHolderNotInitializedException extends Exception
}
|
litaoran/spray-sample
|
src/main/scala/recommender/mainHolder.scala
|
Scala
|
mit
| 1,415 |
package com.sksamuel.scrimage.filter
import com.sksamuel.scrimage.ImmutableImage
import org.scalatest.{BeforeAndAfter, FunSuite, OneInstancePerTest}
class RobertsFilterTest extends FunSuite with BeforeAndAfter with OneInstancePerTest {
private val original = ImmutableImage.fromResource("/bird_small.png")
test("filter output matches expected") {
val expected = ImmutableImage.fromResource("/com/sksamuel/scrimage/filters/bird_small_roberts.png")
assert(original.filter(new RobertsFilter) === expected)
}
}
|
sksamuel/scrimage
|
scrimage-filters/src/test/scala/com/sksamuel/scrimage/filter/RobertsFilterTest.scala
|
Scala
|
apache-2.0
| 525 |
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datamountaineer.streamreactor.connect.rethink.config
import com.datamountaineer.streamreactor.connect.rethink.TestBase
import org.scalatest.mockito.MockitoSugar
/**
* Created by [email protected] on 21/06/16.
* stream-reactor-maven
*/
class TestReThinkSourceSettings extends TestBase with MockitoSugar {
"should create a ReThinkSourceSetting for all fields with Initialize" in {
val config = ReThinkSourceConfig(getPropsSource)
val settings = ReThinkSourceSettings(config)
val setting = settings.head
setting.source shouldBe TABLE
setting.target shouldBe TOPIC
setting.initialise shouldBe true
setting.db shouldBe DB
setting.batchSize shouldBe BATCH_SIZE
}
"should create a ReThinkSourceSetting for all fields without Initialize" in {
val config = ReThinkSourceConfig(getPropsSourceDelta)
val settings = ReThinkSourceSettings(config)
val setting = settings.head
setting.source shouldBe TABLE
setting.target shouldBe TOPIC
setting.initialise shouldBe false
setting.db shouldBe DB
setting.batchSize shouldBe BATCH_SIZE
}
}
|
CodeSmell/stream-reactor
|
kafka-connect-rethink/src/test/scala/com/datamountaineer/streamreactor/connect/rethink/config/TestReThinkSourceSettings.scala
|
Scala
|
apache-2.0
| 1,722 |
package org.jetbrains.plugins.scala
package lang
package parser
package parsing
package top
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder
import org.jetbrains.plugins.scala.lang.parser.parsing.top.template.TemplateBody
object TemplateOpt {
def parseTemplateBody(implicit builder: ScalaPsiBuilder): Unit =
builder.getTokenType match {
case ScalaTokenTypes.tLBRACE if !builder.twoNewlinesBeforeCurrentToken => TemplateBody.parse(builder)
case _ =>
}
}
|
jastice/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/parser/parsing/top/TemplateOpt.scala
|
Scala
|
apache-2.0
| 570 |
package org.littlewings.tweetbot.config
import org.jasypt.util.text.{BasicTextEncryptor, TextEncryptor}
object EncryptTwitterConfigSupport {
val DisabledEncrypt: Boolean =
java.lang.Boolean.parseBoolean(System.getenv("TWEET_BOT_DISABLE_ENCRYPT_TWITTER_CONFIG"))
val EncryptPassword: String = System.getenv("TWEET_BOT_TWITTER_ENCRYPT_PASSWORD")
}
trait EncryptTwitterConfigSupport extends TwitterConfigSupport {
protected val textEncryptor: Option[TextEncryptor] = {
if (EncryptTwitterConfigSupport.DisabledEncrypt) {
Option.empty
} else {
val entryptor = new BasicTextEncryptor
entryptor.setPassword(EncryptTwitterConfigSupport.EncryptPassword)
Option(entryptor)
}
}
protected def decryptIfNeeded(fun: => String): String =
textEncryptor.map(t => t.decrypt(fun)).getOrElse(fun)
abstract override def twitterOAuthConsumerKey: String =
decryptIfNeeded(super.twitterOAuthConsumerKey)
abstract override def twitterOAuthConsumerSecret: String =
decryptIfNeeded(super.twitterOAuthConsumerSecret)
abstract override def twitterOAuthAccessToken: String =
decryptIfNeeded(super.twitterOAuthAccessToken)
abstract override def twitterOAuthAccessTokenSecret: String =
decryptIfNeeded(super.twitterOAuthAccessTokenSecret)
}
|
kazuhira-r/tweet-bot
|
src/main/scala/org/littlewings/tweetbot/config/EncryptTwitterConfigSupport.scala
|
Scala
|
mit
| 1,296 |
package foo {}; package bar {}; package baz {}
|
yusuke2255/dotty
|
tests/pos/t2973.scala
|
Scala
|
bsd-3-clause
| 47 |
package com.fhuertas.monkey.utils
import akka.actor.{ActorRef, Props}
import com.fhuertas.monkey.models.Monkey
import scalaz.Reader
class FastMonkey(canyon: ActorRef) extends Monkey(canyon) with FastConfiguration
object FastMonkey {
val props = Reader {
(canyon: ActorRef) => Props(classOf[FastMonkey], canyon)
}
}
|
fhuertas/monkey
|
src/test/scala/com/fhuertas/monkey/utils/FastMonkey.scala
|
Scala
|
apache-2.0
| 329 |
// 1EC Graph Parser
// Copyright (c) University of California
// Copyright (c) Jonathan Kummerfeld
//
// This software is covered by a license. See the LICENSE.txt file in the
// top-level directory of this distribution or at
// https://github.com/jkkummerfeld/1ec-graph-parser for the full text of the
// license.
package edu.berkeley.nlp.graphparser
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet, Set}
import edu.berkeley.nlp.graphparser.Hash._
import edu.berkeley.nlp.graphparser.Log._
/** A kind of forest reranker, though it's really just a fine pass with fully
* articulated parse structures (and could be trained with everything else).
*
* To consider:
* Tradeoff memory usage for complexity by only storing the backpointers and
* the state hash, then traverse the chart to rebuild for features.
*/
object FullStructureChart {
// State
// 0-11 (see baseParser)
// 12 hash first half
// 13 hash second half
// 14 external, structural arg (24) parent pos (8)
// for each position, including edges:
// spine (32)
// structural arg (24) parent (8)
// number of traces
// for each trace:
// arg (16) child (8) parent (8)
final val SUB_START = 15
@inline final def hashPart0(
states: UnboxedArrayBuffer, statePos: Int
) = states(statePos + SUB_START - 3)
@inline final def hashPart1(
states: UnboxedArrayBuffer, statePos: Int
) = states(statePos + SUB_START - 2)
@inline final def subArgParentForPosExternal(
states: UnboxedArrayBuffer, statePos: Int
) = states(statePos + SUB_START - 1)
@inline final def externalPosForPos(
states: UnboxedArrayBuffer, statePos: Int
) = Chart.externalPosFromState(states(statePos))
@inline final def externalParentForPos(
states: UnboxedArrayBuffer, statePos: Int
) = subArgParentForPosExternal(states, statePos) & 0xff
@inline final def externalArgForPos(
states: UnboxedArrayBuffer, statePos: Int
) = subArgParentForPosExternal(states, statePos) >>> 8
@inline final def subSpineForPos(
pos: Int, span0: Int, states: UnboxedArrayBuffer, statePos: Int
) = {
val delta = (pos - span0) * 2
states(statePos + SUB_START + delta)
}
@inline final def subArgParentForPos(
pos: Int, span0: Int, states: UnboxedArrayBuffer, statePos: Int
) = {
val delta = (pos - span0) * 2 + 1
states(statePos + SUB_START + delta)
}
@inline final def subParentForPos(
pos: Int, span0: Int, states: UnboxedArrayBuffer, statePos: Int
) = subArgParentForPos(pos, span0, states, statePos) & 0xff
@inline final def subArgForPos(
pos: Int, span0: Int, states: UnboxedArrayBuffer, statePos: Int
) = subArgParentForPos(pos, span0, states, statePos) >>> 8
@inline final def subNumTracesForPos(
span0: Int, span1: Int, states: UnboxedArrayBuffer, statePos: Int
) = {
val delta = (span1 - span0 + 1) * 2
states(statePos + SUB_START + delta)
}
@inline final def subTraceForPos(
num: Int, span0: Int, span1: Int, states: UnboxedArrayBuffer,
statePos: Int
) = {
val delta = (span1 - span0 + 1) * 2 + num + 1
states(statePos + SUB_START + delta)
}
@inline final def subTraceArgForPos(
num: Int, span0: Int, span1: Int, states: UnboxedArrayBuffer,
statePos: Int
) = subTraceForPos(num, span0, span1, states, statePos) >>> 16
@inline final def subTraceParentForPos(
num: Int, span0: Int, span1: Int, states: UnboxedArrayBuffer,
statePos: Int
) = subTraceForPos(num, span0, span1, states, statePos) & 0xff
@inline final def subTraceChildForPos(
num: Int, span0: Int, span1: Int, states: UnboxedArrayBuffer,
statePos: Int
) = (subTraceForPos(num, span0, span1, states, statePos) >>> 8) & 0xff
@inline final def makeSubArgParent(arg: Int, parent: Int) =
(arg << 8) | parent
@inline final def makeSubTrace(arg: Int, child: Int, parent: Int) =
(arg << 16) | (child << 8) | parent
}
class FullStructureChart(
stage: Stage,
chartAbove: Chart,
traceChart: Chart,
spineChart: Chart,
tokens: Vector[String],
tags: Vector[String],
sentenceID: Int,
goldEdges: UnboxedArrayBuffer,
goldUnaries: ArrayBuffer[Int],
goldStates: Set[(Int, Int, Int, Int, Int)],
lossType: LossType.Value,
doNotPruneGoldArcs: Boolean,
doNotPruneGoldSpines: Boolean,
pruningRatio: Double,
pruningRank: Double,
verbosity: Int,
training: Boolean
) extends Chart(stage, chartAbove, traceChart, spineChart, tokens, tags, sentenceID, goldEdges, goldUnaries, goldStates, lossType, doNotPruneGoldArcs, doNotPruneGoldSpines, pruningRatio, pruningRank, verbosity, training) {
import Chart._
import FullStructureChart._
import stage.model._
import stage._
override val thisClass = "FullStructureChart."+ stage.name
override val beamsL =
if (stage.subbeamType != SubbeamType.SPINES) beams
else Array.fill(numberOfCells)(beamType(beamMinLength, beamMaxLength,
beamMinMultiple, beamMaxMultiple, beamMinFraction, beamMaxFraction))
override val beamsR =
if (stage.subbeamType != SubbeamType.SPINES) beams
else Array.fill(numberOfCells)(beamType(beamMinLength, beamMaxLength,
beamMinMultiple, beamMaxMultiple, beamMinFraction, beamMaxFraction))
def curHash00 =
hashPart0(curCellStates0, curStatePos0)
def curHash01 =
hashPart1(curCellStates0, curStatePos0)
def subSpine(pos: Int) =
subSpineForPos(pos, curSpan00, curCellStates0, curStatePos0)
def subArgParent(pos: Int) =
subArgParentForPos(pos, curSpan00, curCellStates0, curStatePos0)
def subArgParentExternal =
subArgParentForPosExternal(curCellStates0, curStatePos0)
def subParent(pos: Int) =
subParentForPos(pos, curSpan00, curCellStates0, curStatePos0)
def subArg(pos: Int) =
subArgForPos(pos, curSpan00, curCellStates0, curStatePos0)
def subNumTraces =
subNumTracesForPos(curSpan00, curSpan01, curCellStates0, curStatePos0)
def subTrace(num: Int) =
subTraceForPos(num, curSpan00, curSpan01, curCellStates0, curStatePos0)
def subTraceArg(num: Int) =
subTraceArgForPos(num, curSpan00, curSpan01, curCellStates0, curStatePos0)
def subTraceParent(num: Int) =
subTraceParentForPos(num, curSpan00, curSpan01, curCellStates0, curStatePos0)
def subTraceChild(num: Int) =
subTraceChildForPos(num, curSpan00, curSpan01, curCellStates0, curStatePos0)
def curHash10 =
hashPart0(curCellStates1, curStatePos1)
def curHash11 =
hashPart1(curCellStates1, curStatePos1)
def subSpine1(pos: Int) =
subSpineForPos(pos, curSpan10, curCellStates1, curStatePos1)
def subArgParent1(pos: Int) =
subArgParentForPos(pos, curSpan10, curCellStates1, curStatePos1)
def subArgParentExternal1 =
subArgParentForPosExternal(curCellStates1, curStatePos1)
def subParent1(pos: Int) =
subParentForPos(pos, curSpan10, curCellStates1, curStatePos1)
def subArg1(pos: Int) =
subArgForPos(pos, curSpan10, curCellStates1, curStatePos1)
def subNumTraces1 =
subNumTracesForPos(curSpan10, curSpan11, curCellStates1, curStatePos1)
def subTrace1(num: Int) =
subTraceForPos(num, curSpan10, curSpan11, curCellStates1, curStatePos1)
def subTraceArg1(num: Int) =
subTraceArgForPos(num, curSpan10, curSpan11, curCellStates1, curStatePos1)
def subTraceParent1(num: Int) =
subTraceParentForPos(num, curSpan10, curSpan11, curCellStates1, curStatePos1)
def subTraceChild1(num: Int) =
subTraceChildForPos(num, curSpan10, curSpan11, curCellStates1, curStatePos1)
def curHash20 =
hashPart0(curCellStates2, curStatePos2)
def curHash21 =
hashPart1(curCellStates2, curStatePos2)
def subSpine2(pos: Int) =
subSpineForPos(pos, curSpan20, curCellStates2, curStatePos2)
def subArgParent2(pos: Int) =
subArgParentForPos(pos, curSpan20, curCellStates2, curStatePos2)
def subArgParentExternal2 =
subArgParentForPosExternal(curCellStates2, curStatePos2)
def subParent2(pos: Int) =
subParentForPos(pos, curSpan20, curCellStates2, curStatePos2)
def subArg2(pos: Int) =
subArgForPos(pos, curSpan20, curCellStates2, curStatePos2)
def subNumTraces2 =
subNumTracesForPos(curSpan20, curSpan21, curCellStates2, curStatePos2)
def subTrace2(num: Int) =
subTraceForPos(num, curSpan20, curSpan21, curCellStates2, curStatePos2)
def subTraceArg2(num: Int) =
subTraceArgForPos(num, curSpan20, curSpan21, curCellStates2, curStatePos2)
def subTraceParent2(num: Int) =
subTraceParentForPos(num, curSpan20, curSpan21, curCellStates2, curStatePos2)
def subTraceChild2(num: Int) =
subTraceChildForPos(num, curSpan20, curSpan21, curCellStates2, curStatePos2)
// Old and unused
override def minSplitGet(
rightSpine: Int, parent: HasParent.Value, span1: Int, span2: Int
) = {
val right = hashN(rightSpine, HasParent.id(parent), span2)
val ans = minSplit.get(right)
if (ans < (span1 + 1) || ans > sentenceLength) span1 + 1
else ans
}
override def maxSplitGet(
leftSpine: Int, parent: HasParent.Value, span1: Int, span2: Int
) = {
val left = hashN(leftSpine, HasParent.id(parent), span1)
val ans = maxSplit.get(left)
if (ans > (span2 - 1) || ans < 0) span2 - 1
else ans
}
override def minSplitUpdate(
rightSpine: Int, parent: HasParent.Value, span1: Int, span2: Int
) = {
val right = hashN(rightSpine, HasParent.id(parent), span2)
val prev = minSplit.get(right)
if (prev > span1) minSplit.put(right, span1)
}
override def maxSplitUpdate(
leftSpine: Int, parent: HasParent.Value, span1: Int, span2: Int
) = {
val left = hashN(leftSpine, HasParent.id(parent), span1)
val prev = maxSplit.get(left)
if (prev < span2) maxSplit.put(left, span2)
}
// override insert
override def insert(
loc: Int, splits: Int, gold_ante3: Int, ante: Int, ante2_arg: Int,
score: Float, sum: Float, state: Int, leftSpine: Int, rightSpine: Int,
externalSpine: Int, nStateHash: Long, binary: Boolean = false,
ternary: Boolean = false, init: Boolean = false, child: Int = -1,
parent: Int = -1, structural: Boolean = false
) : (Float, Int, Float) = {
val curCellStates = denseStates(loc)
val fullSpan = cellToSpan(loc)
/// val curStatePos = curCellStates.length
/// Log.logln(s"Inserting $fullSpan $leftSpine $rightSpine $externalSpine $binary $ternary $init : $curStatePos")
val ans = super.insert(loc, splits, gold_ante3, ante, ante2_arg, score,
sum, state, leftSpine, rightSpine, externalSpine, nStateHash, binary,
ternary, init, child, parent, structural)
val nExternalPos =
if (externalSpine == 0) -1
else externalPosFromState(state)
val xFrom0 = (externalPos == nExternalPos)
val xFrom1 = ((binary || ternary) && externalPos1 == nExternalPos)
val xFrom2 = (ternary && externalPos2 == nExternalPos)
// Hash that combines info on all current states
curCellStates.append((nStateHash >>> 32).toInt)
curCellStates.append((nStateHash & 0xffffffff).toInt)
// Structural arg and parent for External point (0 if no external)
if (externalSpine == 0) {
// No external point
curCellStates.append(0)
} else if (structural && child == nExternalPos) {
// Created now, x is the child
curCellStates.append(makeSubArgParent(ante2_arg, parent))
} else {
// It is in one of the states being combined. One will contain it, the
// others will be zero.
val toInsert =
( if (xFrom0) curCellStates0(curStatePos0 + 14) else 0 ) +
( if (xFrom1) curCellStates1(curStatePos1 + 14) else 0 ) +
( if (xFrom2) curCellStates2(curStatePos2 + 14) else 0 )
curCellStates.append(toInsert)
}
// Spines and structural edges:
// - Add spines from left
// - If that doesn't reach all the way to the end, add middle/right
// - If that doesn't reach all the way to the end, add right
// Helpful things that can be assumed
// - Spines agree (ie, get a spine from any item and it's fine)
// - A given arc can only occur in one item
if (init) {
val arg_parent = 0
val numTraces = 0
curCellStates.append(leftSpine)
curCellStates.append(arg_parent)
curCellStates.append(rightSpine)
curCellStates.append(arg_parent)
curCellStates.append(numTraces)
} else {
var pos = fullSpan._1
while (pos <= fullSpan._2) {
// Insert spine
val spine =
if (pos <= curSpan01) subSpine(pos)
else if (pos <= curSpan11) subSpine1(pos)
else if (pos <= curSpan21) subSpine2(pos)
else throw new Exception(s"pos $pos is outside span $fullSpan")
val arg_parent =
if (structural && pos == child) {
// being created here
makeSubArgParent(ante2_arg, parent)
} else if (pos == curSpan00) {
// left, middle external, right external
subArgParent(pos) +
( if ((binary || ternary) && externalPos1 == pos)
subArgParentExternal1
else 0) +
( if (ternary && externalPos2 == pos) subArgParentExternal2
else 0 )
} else if (pos < curSpan01) {
// Left item
subArgParent(pos)
} else if (pos == curSpan01) {
// left, middle, or right external
subArgParent(pos) +
(if (binary || ternary) subArgParent1(pos) else 0) +
( if (ternary && externalPos2 == pos) subArgParentExternal2
else 0 )
} else if ((binary|| ternary) && pos < curSpan11) {
// middle / right
subArgParent1(pos)
} else if ((binary|| ternary) && pos == curSpan11) {
// left external, middle / right, right
( if (externalPos == pos) subArgParentExternal else 0 ) +
subArgParent1(pos) +
(if (ternary) subArgParent2(pos) else 0)
} else if (ternary && pos < curSpan21) {
// right
subArgParent2(pos)
} else if (ternary && pos == curSpan21) {
// left external, middle external, right
( if (externalPos == pos) subArgParentExternal else 0 ) +
( if (externalPos1 == pos) subArgParentExternal1 else 0 ) +
subArgParent2(pos)
} else throw new Exception(s"pos $pos is outside span $fullSpan")
curCellStates.append(spine)
curCellStates.append(arg_parent)
pos += 1
}
// Trace edge fields
val nTraces =
subNumTraces +
(if (binary || ternary) subNumTraces1 else 0) +
(if (ternary) subNumTraces2 else 0) +
(if (child >= 0 && (gold_ante3 & ARGT_MASK) != 0) 1 else 0) +
(if (child >= 0 && (gold_ante3 & ARGC_MASK) != 0) 1 else 0)
curCellStates.append(nTraces)
// Insert new trace edges
if (child >= 0) {
if ((gold_ante3 & ARGT_MASK) != 0) {
val trace = makeSubTrace((gold_ante3 & ARGT_MASK) >> 15, child, parent)
curCellStates.append(trace)
} else if ((gold_ante3 & ARGC_MASK) != 0) {
val trace = makeSubTrace(gold_ante3 & ARGC_MASK, child, parent)
curCellStates.append(trace)
}
}
// Insert existing trace edges
var tracePos = 0
while (tracePos < subNumTraces) {
curCellStates.append(subTrace(tracePos))
tracePos += 1
}
if (binary || ternary) {
var tracePos = 0
while (tracePos < subNumTraces1) {
curCellStates.append(subTrace1(tracePos))
tracePos += 1
}
}
if (ternary) {
var tracePos = 0
while (tracePos < subNumTraces2) {
curCellStates.append(subTrace2(tracePos))
tracePos += 1
}
}
}
/// val newStatePos = curCellStates.length
/// Log.logln(s"Inserted $fullSpan $leftSpine $rightSpine $externalSpine $binary $ternary $init : $newStatePos - $curStatePos")
ans
}
// To ensure items are always inserted, need to generate the state hash from
// previous state hashes. Note, requires the assumption that whenever this
// is called, the current state is set accordingly.
override def stateHash(
state: Int, leftSpine: Int, rightSpine: Int, externalSpine: Int,
binary: Boolean = false, ternary: Boolean = false, init: Boolean = false
) = {
if (ternary)
hashToLong(state, leftSpine, rightSpine, externalSpine, curHash00,
curHash01, curHash10, curHash11, curHash20, curHash21)
else if (binary)
hashToLong(state, leftSpine, rightSpine, externalSpine, curHash00,
curHash01, curHash10, curHash11)
else if (init)
hashToLong(state, leftSpine, rightSpine, externalSpine)
else
hashToLong(state, leftSpine, rightSpine, externalSpine, curHash00,
curHash01)
}
// Binary needs to do scoring for positions being closed off
override def combiner(
spanLeft: Int, spanRight: Int, split1: Int, split2: Int, leftPos: Int,
middlePos: Int, rightPos: Int
) = {
val score = super.combiner(spanLeft, spanRight, split1, split2, leftPos,
middlePos, rightPos)
val leftCell = spanToCell(spanLeft, split1)
setCurCellAndPos(leftCell, leftPos)
if (middlePos < 0) {
val rightCell = spanToCell(split1, spanRight)
setCurCellAndPos1(rightCell, rightPos)
} else {
val middleCell = spanToCell(split1, split2)
val rightCell = spanToCell(split2, spanRight)
setCurCellAndPos1(middleCell, middlePos)
setCurCellAndPos2(rightCell, rightPos)
}
if (middlePos < 0) {
val nscore = getFullBinaryScore(tokenIDs, tokenFeatureIDs, tagIDs,
spanLeft, split1, -1, spanRight, split1, curCellStates0, curStatePos0,
null, -1, curCellStates1, curStatePos1, featureCounter,
lossType).toFloat
score + nscore
} else {
val nscore0 = getFullBinaryScore(tokenIDs, tokenFeatureIDs, tagIDs,
spanLeft, split1, split2, spanRight, split1, curCellStates0,
curStatePos0, curCellStates1, curStatePos1, curCellStates2,
curStatePos2, featureCounter, lossType).toFloat
val nscore1 = getFullBinaryScore(tokenIDs, tokenFeatureIDs, tagIDs,
spanLeft, split1, split2, spanRight, split2, curCellStates0,
curStatePos0, curCellStates1, curStatePos1, curCellStates2,
curStatePos2, featureCounter, lossType).toFloat
score + nscore0 + nscore1
}
}
override def curStateAsString =
super.curStateAsString + s"-$curHash00-$curHash01"
override def curStateAsString1 =
super.curStateAsString1 + s"-$curHash10-$curHash11"
override def curStateAsString2 =
super.curStateAsString2 + s"-$curHash20-$curHash21"
def fullCellsToString = {
val map = HashMap[(Int, Int), ArrayBuffer[String]]()
for {start <- 0 until sentenceLength
end <- (start + 1) until sentenceLength} {
val span = (start, end)
setCurCell(span)
while (next) {
val clist = map.getOrElseUpdate(span, ArrayBuffer[String]())
clist.append(curStateAsString +
f" $insideScore%.2e $curCell0 $curStatePos0 $curPtr00 $curPtr01 $curPtr02")
for (pos <- start to end)
clist.append(s"${subParent(pos)} ${subArg(pos)} ${subSpine(pos)}")
for (num <- 0 until subNumTraces)
clist.append(s"${subTraceParent(num)} ${subTraceChild(num)} ${subTraceArg(num)}")
}
}
val ans = ArrayBuffer[String]()
map.foreach{ case (k, v) =>
ans.append(f"Cell ${k._1}%2d ${k._2}%2d\\n "+ v.mkString("\\n "))
}
ans.sorted.mkString("\\n") +"\\n"
}
override def toString() = fullCellsToString
override def checkCoarseState(
state: Int, lSpine: Int, rSpine: Int, xSpine: Int, span: (Int, Int),
child: Int, parent: Int, arg: Int, stateNoArg: Int, gold: Boolean,
init: Boolean, arcSecondStage: Boolean = false, arcCreation: Int = 0,
structural: Boolean = false, chain: Boolean = false
) = {
coarseArcRatio = -1.0
// Assume chart above is null (would be crazy to run otherwise)
// Also assume that the chart above has the same structure (ie, also
// projective or also 1ec).
// Also assume we are never introducing arc types here that have not been
// seen above (again, would be way too slow).
if (gold && doNotPruneGoldArcs && doNotPruneGoldSpines) {
if (check(VAdding, verbosity)) logln(s" is gold, not pruned")
true
} else if (child < 0) {
val loc = spanToCell(span)
val hash = chartAbove.stateHash(state, lSpine, rSpine, xSpine)
val subbeamID = getSubbeamID(chartAbove.stage.subbeamType,
span, state, lSpine, rSpine, xSpine, arcCreation)
val pos = chartAbove.posForHash(loc, subbeamID, hash)
if (pos < 0) {
if (check(VAdding, verbosity)) logln(s" coarse does not exist, $pos")
if (gold) {
logln(s"Lost gold: checkCoarseState1 $thisClass $sentenceID $span "+
stateString(state, lSpine, rSpine, xSpine, 0, 0, 0, 0) +
s" $child $parent $arg $arcSecondStage $loc $hash $subbeamID $pos")
}
false
} else {
val insideScore = chartAbove.insideForPos(loc, pos)
val outsideScore = chartAbove.outsideForPos(loc, pos)
val total = insideScore + outsideScore
val ans = chartAbove.checkPruningRatio(total)
if (! ans && check(VAdding, verbosity)) {
val ratio = chartAbove.calcPruningRatio(total)
Log.logln(s"Coarse score too poor: $ratio < ${chartAbove.minRatio}")
}
if (!ans && gold) {
logln(s"Lost gold: checkCoarseState2 $thisClass $sentenceID $span "+
stateString(state, lSpine, rSpine, xSpine, 0, 0, 0, 0) +
s" $child $parent $arg $arcSecondStage")
}
ans
}
} else {
val hash = chartAbove.stateHash(state, lSpine, rSpine, xSpine)
val subbeam = getSubbeamID(chartAbove.stage.subbeamType, span, state,
lSpine, rSpine, xSpine, if (arcSecondStage) 2 else 1)
val loc = spanToCell(span)
val dist = span._2 - span._1
val pos = chartAbove.posForHash(loc, subbeam, hash)
if (pos < 0) {
if (check(VAdding, verbosity))
logln(s" coarse does not exist, $pos $state")
if (gold) {
logln(s"Lost gold: checkCoarseState3 $thisClass $sentenceID $span "+
stateString(state, lSpine, rSpine, xSpine, 0, 0, 0, 0) +
s" $child $parent $arg $arcSecondStage")
}
false
} else {
// Get outside from above it and inside from the predecessor below it
val arcInside = chartAbove.insideForPos(loc, pos)
val arcOutside = chartAbove.outsideForPos(loc, pos)
// Get the score itself
val cSpine =
if (child == span._1) lSpine
else if (child == span._2) rSpine
else xSpine
val pSpine =
if (parent == span._1) lSpine
else if (parent == span._2) rSpine
else xSpine
val oldArg =
if (structural) chartAbove.argSForPos(loc, pos)
else if (chain) chartAbove.argCForPos(loc, pos)
else chartAbove.argTForPos(loc, pos)
val oldArcScore = chartAbove.scoreArc(child, parent, cSpine, pSpine,
oldArg, dist, false, LossType.ZERO, false).toFloat
val arcScore = chartAbove.scoreArc(child, parent, cSpine, pSpine, arg,
dist, false, LossType.ZERO, false).toFloat
// Check against the ratio cutoff
val total = arcScore - oldArcScore + arcInside + arcOutside
val ratio = chartAbove.calcPruningRatio(total)
val ans = chartAbove.checkPruningRatio(total)
if (! ans && check(VAdding, verbosity)) {
Log.logln(s"Coarse score too poor: $ratio < ${chartAbove.minRatio}")
}
if (!ans && gold) {
logln(s"Lost gold: checkCoarseState4 $thisClass $sentenceID $span "+
stateString(state, lSpine, rSpine, xSpine, 0, 0, 0, 0) +
s" $child $parent $arg $arcSecondStage")
}
coarseArcRatio = ratio
ans
}
}
}
// This is not needed as the actual items will be checked
def checkCoarseSpine(pos: Int, spine: Int, gold: Boolean) = true
override def scoreArc(
childIndex: Int, parentIndex: Int, childSpine: Int, parentSpine: Int,
arg: Int, length: Int, gold: Boolean, lossType: LossType.Value,
goldHere: Boolean, countOnGet: (Int, Double) => Unit = null
) = {
val score = model.getArcScore(tokenIDs, tokenFeatureIDs, tagIDs,
childIndex, parentIndex, childSpine, parentSpine, arg, length, gold,
scoreCache, surfaceFeatureLists, countOnGet, useModelCache, edgeSpace)
score + super.scoreArc(childIndex, parentIndex, childSpine, parentSpine,
arg, length, gold, lossType, goldHere, countOnGet)
}
override def addSpineTopOptions = {
// Not planning to use this as a pruning pass, so nothing to do here.
}
}
class FullStructureParser(
stage: Stage,
coarseParser: Option[Parser],
traceParser: Option[Parser],
spineParser: Option[Parser],
pruningRatio: Double,
pruningRank: Double,
verbosity: Int,
doNotPruneGoldArcs: Boolean,
doNotPruneGoldSpines: Boolean,
training: Boolean
) extends Parser(stage, coarseParser, traceParser, spineParser, pruningRatio, pruningRank, verbosity, doNotPruneGoldArcs, doNotPruneGoldSpines, training) {
override val thisClass = "FullStructureParser."+ stage.name
override def makeFineChart(
tokens: Vector[String], tags: Vector[String], sentenceID: Int,
goldEdges: UnboxedArrayBuffer, goldUnaries: ArrayBuffer[Int],
goldStates: Set[(Int, Int, Int, Int, Int)],
lossType: LossType.Value
) = {
new FullStructureChart(stage, coarseChart, traceChart, spineChart, tokens,
tags, sentenceID, goldEdges, goldUnaries, goldStates, lossType,
doNotPruneGoldArcs, doNotPruneGoldSpines, pruningRatio, pruningRank,
verbosity, training)
}
override def parse(
tokens: Vector[String], tags: Vector[String], sentenceID: Int,
doOutside: Boolean = true, goldEdges: UnboxedArrayBuffer,
goldUnaries: ArrayBuffer[Int], goldStates: Set[(Int, Int, Int, Int, Int)],
lossType: LossType.Value = LossType.ZERO
) = {
logln("\\nParsing (full) "+ (tokens mkString " "))
if (tags.length > 0) logln("With POS tags "+ (tags mkString " "))
else logln("Without pre-defined POS tags")
if (! prepared)
prepare(tokens, tags, sentenceID, goldEdges, goldUnaries, goldStates,
lossType)
// Coarse
coarseParser.foreach{
_.parse(tokens, tags, sentenceID, true, goldEdges, goldUnaries, goldStates)
}
Log.logln(s"Coarse: ${coarseChart.curMinMarginal} ${coarseChart.scoreRange}")
// Full
val preParse = System.nanoTime()
fineChart.doInsidePass
val parse = fineChart.extractParse
val postParse = System.nanoTime()
postParseLog(postParse - preParse, parse._3, tokens, lossType)
prepared = false
parse
}
}
|
jkkummerfeld/1ec-graph-parser
|
parser/src/main/scala/fullStructureParser.scala
|
Scala
|
isc
| 27,050 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.test
import org.scalatest.FlatSpec
import org.apache.spark.sql.Dataset
/**
* The purpose of this suite is to make sure that generic FlatSpec-based scala
* tests work with a shared spark session
*/
class GenericFlatSpecSuite extends FlatSpec with SharedSparkSession {
import testImplicits._
private def ds = Seq((1, 1), (2, 1), (3, 2), (4, 2), (5, 3), (6, 3), (7, 4), (8, 4)).toDS
"A Simple Dataset" should "have the specified number of elements" in {
assert(8 === ds.count)
}
it should "have the specified number of unique elements" in {
assert(8 === ds.distinct.count)
}
it should "have the specified number of elements in each column" in {
assert(8 === ds.select("_1").count)
assert(8 === ds.select("_2").count)
}
it should "have the correct number of distinct elements in each column" in {
assert(8 === ds.select("_1").distinct.count)
assert(4 === ds.select("_2").distinct.count)
}
}
|
bravo-zhang/spark
|
sql/core/src/test/scala/org/apache/spark/sql/test/GenericFlatSpecSuite.scala
|
Scala
|
apache-2.0
| 1,772 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster
import java.nio.ByteBuffer
import org.apache.spark.TaskState.TaskState
import org.apache.spark.rpc.RpcEndpointRef
import org.apache.spark.scheduler.ExecutorLossReason
import org.apache.spark.util.SerializableBuffer
private[spark] sealed trait CoarseGrainedClusterMessage extends Serializable
private[spark] object CoarseGrainedClusterMessages {
case object RetrieveSparkAppConfig extends CoarseGrainedClusterMessage
case class SparkAppConfig(
sparkProperties: Seq[(String, String)],
ioEncryptionKey: Option[Array[Byte]])
extends CoarseGrainedClusterMessage
case object RetrieveLastAllocatedExecutorId extends CoarseGrainedClusterMessage
// Driver to executors
case class LaunchTask(data: SerializableBuffer) extends CoarseGrainedClusterMessage
case class KillTask(taskId: Long, executor: String, interruptThread: Boolean, reason: String)
extends CoarseGrainedClusterMessage
case class KillExecutorsOnHost(host: String)
extends CoarseGrainedClusterMessage
sealed trait RegisterExecutorResponse
case object RegisteredExecutor extends CoarseGrainedClusterMessage with RegisterExecutorResponse
case class RegisterExecutorFailed(message: String) extends CoarseGrainedClusterMessage
with RegisterExecutorResponse
// Executors to driver
case class RegisterExecutor(
executorId: String,
executorRef: RpcEndpointRef,
hostname: String,
cores: Int,
logUrls: Map[String, String])
extends CoarseGrainedClusterMessage
case class StatusUpdate(executorId: String, taskId: Long, state: TaskState,
data: SerializableBuffer) extends CoarseGrainedClusterMessage
object StatusUpdate {
/** Alternate factory method that takes a ByteBuffer directly for the data field */
def apply(executorId: String, taskId: Long, state: TaskState, data: ByteBuffer)
: StatusUpdate = {
StatusUpdate(executorId, taskId, state, new SerializableBuffer(data))
}
}
// Internal messages in driver
case object ReviveOffers extends CoarseGrainedClusterMessage
case object StopDriver extends CoarseGrainedClusterMessage
case object StopExecutor extends CoarseGrainedClusterMessage
case object StopExecutors extends CoarseGrainedClusterMessage
case class RemoveExecutor(executorId: String, reason: ExecutorLossReason)
extends CoarseGrainedClusterMessage
case class RemoveWorker(workerId: String, host: String, message: String)
extends CoarseGrainedClusterMessage
case class SetupDriver(driver: RpcEndpointRef) extends CoarseGrainedClusterMessage
// Exchanged between the driver and the AM in Yarn client mode
case class AddWebUIFilter(
filterName: String, filterParams: Map[String, String], proxyBase: String)
extends CoarseGrainedClusterMessage
// Messages exchanged between the driver and the cluster manager for executor allocation
// In Yarn mode, these are exchanged between the driver and the AM
case class RegisterClusterManager(am: RpcEndpointRef) extends CoarseGrainedClusterMessage
// Request executors by specifying the new total number of executors desired
// This includes executors already pending or running
case class RequestExecutors(
requestedTotal: Int,
localityAwareTasks: Int,
hostToLocalTaskCount: Map[String, Int],
nodeBlacklist: Set[String])
extends CoarseGrainedClusterMessage
// Check if an executor was force-killed but for a reason unrelated to the running tasks.
// This could be the case if the executor is preempted, for instance.
case class GetExecutorLossReason(executorId: String) extends CoarseGrainedClusterMessage
case class KillExecutors(executorIds: Seq[String]) extends CoarseGrainedClusterMessage
// Used internally by executors to shut themselves down.
case object Shutdown extends CoarseGrainedClusterMessage
}
|
mike0sv/spark
|
core/src/main/scala/org/apache/spark/scheduler/cluster/CoarseGrainedClusterMessage.scala
|
Scala
|
apache-2.0
| 4,691 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.unicomplex
import java.io._
import java.net.URL
import java.util.concurrent.{TimeUnit, TimeoutException}
import java.util.jar.JarFile
import java.util.{Timer, TimerTask}
import akka.actor._
import akka.pattern.ask
import akka.routing.FromConfig
import akka.util.Timeout
import com.typesafe.config._
import com.typesafe.scalalogging.LazyLogging
import org.squbs.lifecycle.ExtensionLifecycle
import org.squbs.pipeline.PipelineSetting
import org.squbs.unicomplex.UnicomplexBoot.CubeInit
import org.squbs.util.ConfigUtil._
import scala.annotation.tailrec
import scala.collection.concurrent.TrieMap
import scala.collection.mutable
import scala.collection.JavaConverters._
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
import scala.language.{postfixOps, existentials}
import scala.util.control.NonFatal
import scala.util.{Failure, Success, Try}
object UnicomplexBoot extends LazyLogging {
final val extConfigDirKey = "squbs.external-config-dir"
final val extConfigNameKey = "squbs.external-config-files"
final val actorSystemNameKey = "squbs.actorsystem-name"
val defaultStartupTimeout: Timeout =
Try(System.getProperty("startup.timeout").toLong) map { millis =>
akka.util.Timeout(millis, TimeUnit.MILLISECONDS)
} getOrElse (1 minute)
object StartupType extends Enumeration {
type StartupType = Value
val
// Identifies extensions
EXTENSIONS,
// Identifies actors as startup type
ACTORS,
// Identifies service as startup type
SERVICES = Value
}
case class CubeInit(info: Cube, components: Map[StartupType.Value, Seq[Config]])
val actorSystems = TrieMap.empty[String, ActorSystem]
def apply(addOnConfig: Config): UnicomplexBoot = {
val startTime = Timestamp(System.nanoTime, System.currentTimeMillis)
UnicomplexBoot(startTime, Option(addOnConfig), getFullConfig(Option(addOnConfig)))
}
def apply(actorSystemCreator: (String, Config) => ActorSystem): UnicomplexBoot = {
val startTime = Timestamp(System.nanoTime, System.currentTimeMillis)
UnicomplexBoot(startTime, None, getFullConfig(None), actorSystemCreator)
}
def getFullConfig(addOnConfig: Option[Config]): Config = {
val baseConfig = ConfigFactory.load()
// 1. See whether add-on config is there.
addOnConfig match {
case Some(config) =>
ConfigFactory.load(config withFallback baseConfig)
case None =>
// Sorry, the configDir is used to read the file. So it cannot be read from this config file.
val configDir = new File(baseConfig.getString(extConfigDirKey))
val configNames = baseConfig.getStringList(extConfigNameKey)
configNames.add("application")
val parseOptions = ConfigParseOptions.defaults().setAllowMissing(true)
val addConfigs = configNames.asScala.map {
name => ConfigFactory.parseFileAnySyntax(new File(configDir, name), parseOptions)
}
if (addConfigs.isEmpty) baseConfig
else ConfigFactory.load((addConfigs :\\ baseConfig) (_ withFallback _))
}
}
private[unicomplex] def scan(jarNames: Seq[String])(boot: UnicomplexBoot): UnicomplexBoot = {
val configEntries = jarNames map readConfigs
val jarConfigs = jarNames zip configEntries collect { case (jar, Some(cfg)) => (jar, cfg) }
resolveCubes(jarConfigs, boot.copy(jarNames = jarNames))
}
private[unicomplex] def scanResources(resources: Seq[URL],
withClassPath: Boolean = true)(boot: UnicomplexBoot): UnicomplexBoot = {
val cpResources: Seq[URL] =
if (withClassPath) {
val loader = getClass.getClassLoader
Seq("conf", "json", "properties").flatMap { ext => loader.getResources(s"META-INF/squbs-meta.$ext").asScala }
} else Seq.empty
// Dedup the resources, just in case.
val allResources = mutable.LinkedHashSet(cpResources ++ resources : _*).toSeq
val jarConfigs = allResources map readConfigs collect { case Some(jarCfg) => jarCfg }
resolveCubes(jarConfigs, boot)
}
private[this] def resolveCubes(jarConfigs: Seq[(String, Config)], boot: UnicomplexBoot) = {
val cubeList = resolveAliasConflicts(jarConfigs map { case (jar, config) => readCube(jar, config) } collect {
case Some(cube) => cube
})
// Read listener and alias information.
val (activeAliases, activeListeners, missingAliases) = findListeners(boot.config, cubeList)
missingAliases foreach { name => logger.warn(s"Requested listener $name not found!") }
boot.copy(cubes = cubeList, jarConfigs = jarConfigs, listeners = activeListeners, listenerAliases = activeAliases)
}
private def createReaderFromFS(directory: File): String => Option[Reader] = {
(filePath: String) => Option(new File(directory, filePath)) collect {
case configFile if configFile.isFile => new InputStreamReader(new FileInputStream(configFile), "UTF-8")
}
}
private def createReaderFromJarFile(file: File): String => Option[Reader] = {
val triedJarFile = Try(new JarFile(file))
(filePath: String) => triedJarFile match {
case Success(jarFile) => Option(jarFile.getEntry(filePath)) collect {
case configFile if !configFile.isDirectory => new InputStreamReader(jarFile.getInputStream(configFile), "UTF-8")
}
case Failure(e) => throw e
}
}
private def getConfigReader(jarName: String): Option[(Option[Reader], String)] = {
// Make it extra lazy, so that we do not create the next File if the previous one succeeds.
val configExtensions = Stream("conf", "json", "properties")
val maybeConfFileReader = Option(new File(jarName)) collect {
case file if file.isDirectory => createReaderFromFS(file)
case file if file.isFile => createReaderFromJarFile(file)
}
maybeConfFileReader flatMap (fileReader => configExtensions map { ext =>
val currentFile = s"META-INF/squbs-meta.$ext"
Try(fileReader(currentFile)) match {
case Failure(e) =>
logger.info(s"${e.getClass.getName} reading configuration from $jarName : $currentFile.\\n${e.getMessage}")
None
case Success(maybeReader) => Option(maybeReader, currentFile)
}
} find (_.isDefined) flatten)
}
private[this] def readConfigs(jarName: String): Option[Config] = {
getConfigReader(jarName) flatMap ((maybeReader: Option[Reader], fileName: String) => {
val maybeConfig = Try(maybeReader map ConfigFactory.parseReader) match {
case Failure(e) =>
logger.info(s"${e.getClass.getName} reading configuration from $jarName : $fileName.\\n${e.getMessage}")
None
case Success(cfg) => cfg
}
maybeReader foreach(_.close())
maybeConfig
}).tupled
}
private[this] def readConfigs(resource: URL): Option[(String, Config)] = {
// Taking the best guess at the jar name or classpath entry. Should work most of the time.
val jarName = resource.getProtocol match {
case "jar" =>
val jarURL = new URL(resource.getPath.split('!')(0))
jarURL.getProtocol match {
case "file" => jarURL.getPath
case _ => jarURL.toString
}
case "file" => // We assume the classpath entry ends before the last /META-INF/
val path = resource.getPath
val endIdx = path.lastIndexOf("/META-INF/")
if (endIdx > 0) path.substring(0, endIdx) else path
case _ =>
val path = resource.toString
val endIdx = path.lastIndexOf("/META-INF/")
if (endIdx > 0) path.substring(0, endIdx) else path
}
try {
val config = ConfigFactory.parseURL(resource, ConfigParseOptions.defaults().setAllowMissing(false))
Some((jarName, config))
} catch {
case NonFatal(e) =>
logger.warn(s"${e.getClass.getName} reading configuration from $jarName.\\n ${e.getMessage}")
None
}
}
private[this] def readCube(jarPath: String, config: Config): Option[CubeInit] = {
val cubeName =
try {
config.getString("cube-name")
} catch {
case e: ConfigException => return None
}
val cubeVersion =
try {
config.getString("cube-version")
} catch {
case e: ConfigException => return None
}
val cubeAlias = cubeName.substring(cubeName.lastIndexOf('.') + 1)
val c = Seq(
config.getOption[Seq[Config]]("squbs-actors") map ((StartupType.ACTORS, _)),
config.getOption[Seq[Config]]("squbs-services") map ((StartupType.SERVICES, _)),
config.getOption[Seq[Config]]("squbs-extensions") map ((StartupType.EXTENSIONS, _))
).collect { case Some((sType, configs)) => (sType, configs) }.toMap
Some(CubeInit(Cube(cubeAlias, cubeName, cubeVersion, jarPath), c))
}
// Resolve cube alias conflict by making it longer on demand.
@tailrec
private[unicomplex] def resolveAliasConflicts(cubeList: Seq[CubeInit]): Seq[CubeInit] = {
val aliasConflicts = cubeList map { cube =>
(cube.info.name, cube.info.fullName)
} groupBy (_._1) mapValues { seq =>
(seq map (_._2)).toSet
} filter { _._2.size > 1 }
if (aliasConflicts.isEmpty) cubeList
else {
var updated = false
val newAliases = (aliasConflicts flatMap { case (alias, conflicts) =>
conflicts.toSeq map { symName =>
val idx = symName.lastIndexOf('.', symName.length - alias.length - 2)
if (idx > 0) {
updated = true
(symName, symName.substring(idx + 1))
}
else (symName, symName)
}
}).toSeq
if (updated) {
val updatedList = cubeList map { cube =>
newAliases find { case (symName, alias) => symName == cube.info.fullName } match {
case Some((symName, alias)) => cube.copy(info = cube.info.copy(name = alias))
case None => cube
}
}
resolveAliasConflicts(updatedList)
}
else sys.error("Duplicate cube names: " + (aliasConflicts flatMap (_._2) mkString ", "))
}
}
private[unicomplex] def startComponents(cube: CubeInit, aliases: Map[String, String])
(implicit actorSystem: ActorSystem,
timeout: Timeout = UnicomplexBoot.defaultStartupTimeout) = {
import cube.components
import cube.info.{fullName, jarPath, name, version}
val cubeSupervisor = actorSystem.actorOf(Props[CubeSupervisor], name)
Unicomplex(actorSystem).uniActor ! CubeRegistration(cube.info, cubeSupervisor)
def startActor(actorConfig: Config): Option[(String, String, String, Class[_])] = {
val className = actorConfig getString "class-name"
val name = actorConfig.get[String]("name", className substring (className.lastIndexOf('.') + 1))
val withRouter = actorConfig.get[Boolean]("with-router", false)
val initRequired = actorConfig.get[Boolean]("init-required", false)
try {
val clazz = Class.forName(className, true, getClass.getClassLoader)
clazz asSubclass classOf[Actor]
// Create and the props for this actor to be started, optionally enabling the router.
val props = if (withRouter) Props(clazz) withRouter FromConfig() else Props(clazz)
// Send the props to be started by the cube.
cubeSupervisor ! StartCubeActor(props, name, initRequired)
Some((fullName, name, version, clazz))
} catch {
case NonFatal(e) =>
val t = getRootCause(e)
logger.warn(s"Can't load actor: $className.\\n" +
s"Cube: $fullName $version\\n" +
s"Path: $jarPath\\n" +
s"${t.getClass.getName}: ${t.getMessage}")
t.printStackTrace()
cubeSupervisor ! StartFailure(e)
None
}
}
def startServiceRoute(clazz: Class[_], webContext: String, listeners: Seq[String],
ps: PipelineSetting) = {
Try {
(clazz asSubclass classOf[RouteDefinition], classOf[RouteActor])
} orElse Try {
(clazz asSubclass classOf[FlowDefinition], classOf[FlowActor])
} orElse Try {
(clazz asSubclass classOf[AbstractRouteDefinition], classOf[JavaRouteActor])
} orElse Try {
(clazz asSubclass classOf[AbstractFlowDefinition], classOf[JavaFlowActor])
} map { case (routeClass, routeActor) =>
val props = Props(routeActor, webContext, routeClass)
val className = clazz.getSimpleName
val actorName =
if (webContext.length > 0) s"${webContext.replace('/', '_')}-$className-route"
else s"root-$className-route"
cubeSupervisor ! StartCubeService(webContext, listeners, props, actorName, ps, initRequired = true)
(fullName, name, version, clazz)
}
}
// This same creator class is available in Akka's Props.scala but it is inaccessible to us.
class TypedCreatorFunctionConsumer(clz: Class[_ <: Actor], creator: () => Actor) extends IndirectActorProducer {
override def actorClass = clz
override def produce() = creator()
}
def startServiceActor(clazz: Class[_], webContext: String, listeners: Seq[String],
ps: PipelineSetting, initRequired: Boolean) =
Try {
val actorClass = clazz asSubclass classOf[Actor]
def actorCreator: Actor = WithWebContext(webContext) { actorClass.newInstance() }
val props = Props(classOf[TypedCreatorFunctionConsumer], clazz, actorCreator _)
val className = clazz.getSimpleName
val actorName =
if (webContext.length > 0) s"${webContext.replace('/', '_')}-$className-handler"
else s"root-$className-handler"
cubeSupervisor ! StartCubeService(webContext, listeners, props, actorName, ps, initRequired)
(fullName, name, version, actorClass)
}
def startService(serviceConfig: Config): Option[(String, String, String, Class[_])] =
Try {
val className = serviceConfig.getString("class-name")
val clazz = Class.forName(className, true, getClass.getClassLoader)
val webContext = serviceConfig.getString("web-context")
val pipeline = serviceConfig.getOption[String]("pipeline")
val defaultFlowsOn = serviceConfig.getOption[Boolean]("defaultPipeline")
val pipelineSettings = (pipeline, defaultFlowsOn)
val listeners = serviceConfig.getOption[Seq[String]]("listeners").fold(Seq("default-listener")) { list =>
if (list.contains("*")) aliases.values.toSeq.distinct
else list flatMap { entry =>
aliases.get(entry) match {
case Some(listener) => Seq(listener)
case None =>
logger.warn(s"Listener $entry required by $fullName is not configured. Ignoring.")
Seq.empty[String]
}
}
}
val service = startServiceRoute(clazz, webContext, listeners, pipelineSettings) orElse
startServiceActor(clazz, webContext, listeners, pipelineSettings,
serviceConfig.get[Boolean]("init-required", false))
service match {
case Success(svc) => svc
case Failure(e) =>
throw new IOException(s"Class $className is neither a RouteDefinition nor an Actor.", e)
}
} match {
case Success(svc) => Some(svc)
case Failure(e) =>
val t = getRootCause(e)
logger.warn(s"Can't load service definition $serviceConfig.\\n" +
s"Cube: $fullName $version\\n" +
s"Path: $jarPath\\n" +
s"${t.getClass.getName}: ${t.getMessage}")
t.printStackTrace()
cubeSupervisor ! StartFailure(e)
None
}
val actorConfigs = components.getOrElse(StartupType.ACTORS, Seq.empty)
val routeConfigs = components.getOrElse(StartupType.SERVICES, Seq.empty)
val actorInfo = actorConfigs map startActor
val routeInfo = routeConfigs map startService
val startedF = cubeSupervisor ? Started // Tell the cube all actors to be started are started.
logger.info(s"Started cube $fullName $version")
val componentInfo = (actorInfo ++ routeInfo) collect { case Some(component) => component }
(startedF, componentInfo)
}
def configuredListeners(config: Config): Map[String, Config] = {
val listeners = config.root.asScala.toSeq.collect {
case (n, v: ConfigObject) if v.toConfig.getOption[String]("type").contains("squbs.listener") => (n, v.toConfig)
}
resolveDuplicates[Config](listeners, (name, conf, c) =>
logger.warn(s"Duplicate listener $name already declared. Ignoring.")
)
}
def findListenerAliases(listeners: Map[String, Config]): Map[String, String] = {
val aliases = for ((name, config) <- listeners) yield {
val aliasNames = config.get[Seq[String]]("aliases", Seq.empty[String])
(name, name) +: (aliasNames map ((_, name)))
}
resolveDuplicates[String](aliases.toSeq.flatten, (alias, listener, l) =>
logger.warn(s"Duplicate alias $alias for listener $listener already declared for listener $l. Ignoring.")
)
}
def resolveDuplicates[T](in: Seq[(String, T)], duplicateHandler: (String, T, T) => Unit): Map[String, T] = {
in.groupBy(_._1).map {
case (key, Seq((k, v))) => key -> v
case (key, head::tail) =>
tail.foreach { case (k, ass) => duplicateHandler(k, ass, head._2)}
key -> head._2
}
}
def findListeners(config: Config, cubes: Seq[CubeInit]) = {
val demandedListeners =
for {
routes <- cubes.map { _.components.get(StartupType.SERVICES) }.collect { case Some(routes) => routes }.flatten
routeListeners <- routes.get[Seq[String]]("listeners", Seq("default-listener"))
if routeListeners != "*" // Filter out wildcard listener bindings, not starting those.
} yield {
routeListeners
}
val listeners = configuredListeners(config)
val aliases = findListenerAliases(listeners)
val activeAliases = aliases filter { case (n, _) => demandedListeners contains n }
val missingAliases = demandedListeners filterNot { l => activeAliases exists { case (n, _) => n == l } }
val activeListenerNames = activeAliases.values
val activeListeners = listeners filter { case (n, c) => activeListenerNames exists (_ == n) }
(activeAliases, activeListeners, missingAliases)
}
def startServiceInfra(boot: UnicomplexBoot)(implicit actorSystem: ActorSystem): Unit = {
def getTimeout(keyRelPath: String): Option[Timeout] = {
val key = s"squbs.service-infra.$keyRelPath"
val timeoutDuration = actorSystem.settings.config.getOptionalDuration(key)
timeoutDuration.map { d =>
require(d.toMillis > 0, s"The config property, $key, must be greater than 0 milliseconds.")
Timeout(d)
}
}
val overallTimeout = getTimeout("timeout").getOrElse(Timeout(60.seconds))
val listenerTimeout =
getTimeout("listener-timeout")
.getOrElse(Timeout(10.seconds))
startServiceInfra(boot, overallTimeout, listenerTimeout)
}
def startServiceInfra(
boot: UnicomplexBoot,
timeout: Timeout,
listenerTimeout: Timeout
)(implicit actorSystem: ActorSystem): Unit = {
import actorSystem.dispatcher
val startTime = System.nanoTime
implicit val to = listenerTimeout
val ackFutures =
for ((listenerName, config) <- boot.listeners) yield {
val responseFuture = Unicomplex(actorSystem).uniActor ? StartListener(listenerName, config)
responseFuture.onComplete {
case Failure(t) if (t.isInstanceOf[TimeoutException]) =>
logger.error(s"The Unicomplex could not start the listener, $listenerName, within $to.", t)
case Failure(t) =>
logger.error(s"The Unicomplex failed to start the listener, $listenerName.", t)
case Success(StartFailure(t)) =>
logger.error(s"The Unicomplex reported a start failure for the listener, $listenerName.", t)
case _ =>
}
responseFuture
}
// Block for the web service to be started.
Await.ready(Future.sequence(ackFutures), timeout.duration)
val elapsed = (System.nanoTime - startTime) / 1000000
logger.info(s"Web Service started in $elapsed milliseconds")
}
@tailrec
private[unicomplex] def getRootCause(e: Throwable): Throwable = {
Option(e.getCause) match {
case Some(ex) => getRootCause(ex)
case None => e
}
}
}
case class UnicomplexBoot private[unicomplex](startTime: Timestamp,
addOnConfig: Option[Config] = None,
config: Config,
actorSystemCreator: (String, Config) => ActorSystem = { (name, config) => ActorSystem(name, config) },
cubes: Seq[CubeInit] = Seq.empty,
listeners: Map[String, Config] = Map.empty,
listenerAliases: Map[String, String] = Map.empty,
jarConfigs: Seq[(String, Config)] = Seq.empty,
jarNames: Seq[String] = Seq.empty,
actors: Seq[(String, String, String, Class[_])] = Seq.empty,
extensions: Seq[Extension] = Seq.empty,
started: Boolean = false,
stopJVM: Boolean = false) extends LazyLogging {
import UnicomplexBoot._
def actorSystemName = config.getString(actorSystemNameKey)
def actorSystem = UnicomplexBoot.actorSystems(actorSystemName)
def externalConfigDir = config.getString(extConfigDirKey)
def createUsing(actorSystemCreator: (String, Config) => ActorSystem) = copy(actorSystemCreator = actorSystemCreator)
def scanComponents(jarNames: Seq[String]): UnicomplexBoot = scan(jarNames)(this)
def scanComponents(jarNames: Array[String]): UnicomplexBoot = scan(jarNames.toSeq)(this)
def scanResources(withClassPath: Boolean, resources: String*): UnicomplexBoot =
UnicomplexBoot.scanResources(resources map (new File(_).toURI.toURL), withClassPath)(this)
def scanResources(resources: String*): UnicomplexBoot =
UnicomplexBoot.scanResources(resources map (new File(_).toURI.toURL))(this)
def scanResources(withClassPath: Boolean, resources: Array[String]): UnicomplexBoot =
scanResources(withClassPath, resources: _*)
def initExtensions: UnicomplexBoot = {
val initSeq = cubes.flatMap { cube =>
cube.components.getOrElse(StartupType.EXTENSIONS, Seq.empty) map { config =>
val className = config getString "class-name"
val seqNo = config.get[Int]("sequence", Int.MaxValue)
(seqNo, className, cube)
}
}.sortBy(_._1)
// load extensions
val extensions = initSeq map (loadExtension _).tupled
// preInit extensions
val preInitExtensions = extensions map extensionOp("preInit", _.preInit())
// Init extensions
val initExtensions = preInitExtensions map extensionOp("init", _.init())
copy(extensions = initExtensions)
}
def stopJVMOnExit: UnicomplexBoot = copy(stopJVM = true)
def start(): UnicomplexBoot = start(defaultStartupTimeout)
def start(implicit timeout: Timeout): UnicomplexBoot = synchronized {
if (started) throw new IllegalStateException("Unicomplex already started!")
// Extensions may have changed the config. So we need to reload the config here.
val newConfig = UnicomplexBoot.getFullConfig(addOnConfig)
val newName = config.getString(UnicomplexBoot.actorSystemNameKey)
implicit val actorSystem = {
val system = actorSystemCreator(newName, newConfig)
system.registerExtension(Unicomplex)
Unicomplex(system).setScannedComponents(jarNames)
system
}
UnicomplexBoot.actorSystems += actorSystem.name -> actorSystem
actorSystem.registerOnTermination {
UnicomplexBoot.actorSystems -= actorSystem.name
}
registerExtensionShutdown(actorSystem)
val uniActor = Unicomplex(actorSystem).uniActor
// Send start time to Unicomplex
uniActor ! startTime
// Register extensions in Unicomplex actor
uniActor ! Extensions(extensions)
val startServices = listeners.nonEmpty && cubes.exists(_.components.contains(StartupType.SERVICES))
// Notify Unicomplex that services will be started.
if (startServices) uniActor ! PreStartWebService(listeners)
// Signal started to Unicomplex.
uniActor ! Started
val preCubesInitExtensions = extensions map extensionOp("preCubesInit", _.preCubesInit())
uniActor ! Extensions(preCubesInitExtensions)
// Start all actors
val (futures, actorsUnflat) = cubes.map(startComponents(_, listenerAliases)).unzip
val actors = actorsUnflat.flatten
import actorSystem.dispatcher
Await.ready(Future.sequence(futures), timeout.duration)
// Start the service infrastructure if services are enabled and registered.
if (startServices) startServiceInfra(this)
val postInitExtensions = preCubesInitExtensions map extensionOp("postInit", _.postInit())
// Update the extension errors in Unicomplex actor, in case there are errors.
uniActor ! Extensions(postInitExtensions)
{
// Tell Unicomplex we're done.
val stateFuture = Unicomplex(actorSystem).uniActor ? Activate
Try(Await.result(stateFuture, timeout.duration)) recoverWith { case _: TimeoutException =>
val recoverFuture = Unicomplex(actorSystem).uniActor ? ActivateTimedOut
Try(Await.result(recoverFuture, timeout.duration))
} match {
case Success(Active) => logger.info(s"[$actorSystemName] activated")
case Success(Failed) => logger.info(s"[$actorSystemName] initialization failed.")
case e => logger.warn(s"[$actorSystemName] awaiting confirmation, $e.")
}
}
val boot = copy(config = actorSystem.settings.config, actors = actors, extensions = postInitExtensions, started = true)
Unicomplex(actorSystem).boot send boot
boot
}
def registerExtensionShutdown(actorSystem: ActorSystem): Unit = {
if (extensions.nonEmpty) {
actorSystem.registerOnTermination {
// Run the shutdown in a different thread, not in the ActorSystem's onTermination thread.
import scala.concurrent.Future
// Kill the JVM if the shutdown takes longer than the timeout.
if (stopJVM) {
val shutdownTimer = new Timer(true)
shutdownTimer.schedule(new TimerTask {
def run(): Unit = {
System.exit(0)
}
}, 5000)
}
// Then run the shutdown in the global execution context.
import scala.concurrent.ExecutionContext.Implicits.global
Future {
extensions.reverse foreach { e =>
import e.info._
e.extLifecycle foreach { elc =>
logger.info(s"Shutting down extension ${elc.getClass.getName} in $fullName $version")
elc.shutdown()
}
}
} onComplete {
case Success(result) =>
logger.info(s"ActorSystem ${actorSystem.name} shutdown complete")
if (stopJVM) System.exit(0)
case Failure(e) =>
logger.error(s"Error occurred during shutdown extensions: $e", e)
if (stopJVM) System.exit(-1)
}
}
}
}
def loadExtension(seqNo: Int, className: String, cube: CubeInit): Extension = {
try {
val clazz = Class.forName(className, true, getClass.getClassLoader)
val extLifecycle = ExtensionLifecycle(this) { clazz.asSubclass(classOf[ExtensionLifecycle]).newInstance }
Extension(cube.info, seqNo, Some(extLifecycle), Seq.empty)
} catch {
case NonFatal(e) =>
import cube.info._
val t = getRootCause(e)
logger.warn(s"Can't load extension $className.\\n" +
s"Cube: $fullName $version\\n" +
s"Path: $jarPath\\n" +
s"${t.getClass.getName}: ${t.getMessage}")
t.printStackTrace()
Extension(cube.info, seqNo, None, Seq("load" -> t))
}
}
def extensionOp(opName: String, opFn: ExtensionLifecycle => Unit)
(extension: Extension): Extension = {
import extension.info._
extension.extLifecycle match {
case None => extension
case Some(l) =>
try {
opFn(l)
logger.info(s"Success $opName extension ${l.getClass.getName} in $fullName $version")
extension
} catch {
case NonFatal(e) =>
val t = getRootCause(e)
logger.warn(s"Error on $opName extension ${l.getClass.getName}\\n" +
s"Cube: $fullName $version\\n" +
s"${t.getClass.getName}: ${t.getMessage}")
t.printStackTrace()
extension.copy(exceptions = extension.exceptions :+ (opName -> t))
}
}
}
}
|
anilgursel/squbs
|
squbs-unicomplex/src/main/scala/org/squbs/unicomplex/UnicomplexBoot.scala
|
Scala
|
apache-2.0
| 29,744 |
package model.auth
import common.FieldError
import jwt.model.Tokens
import model.User
case class AuthPayload(
user: Option[User] = None,
tokens: Option[Tokens] = None,
errors: Option[List[FieldError]] = None
)
|
sysgears/apollo-universal-starter-kit
|
modules/user/server-scala/src/main/scala/model/auth/AuthPayload.scala
|
Scala
|
mit
| 224 |
package com.wavesplatform.events.protobuf
import cats.Monoid
import com.google.protobuf.ByteString
import com.wavesplatform.events.StateUpdate.AssetInfo
import com.wavesplatform.events.protobuf.BlockchainUpdated.Append.Body
import com.wavesplatform.events.protobuf.BlockchainUpdated.{Append, Rollback, Update}
import com.wavesplatform.protobuf._
import com.wavesplatform.protobuf.block.{PBBlocks, PBMicroBlocks}
import com.wavesplatform.transaction.Transaction
import com.wavesplatform.{events => ve}
import scala.util.{Failure, Try}
package object serde {
implicit class BlockchainUpdatedProtobuf(val self: ve.BlockchainUpdated) extends AnyVal {
import BlockchainUpdatedProtobuf._
def protobuf: BlockchainUpdated =
self match {
case ve.BlockAppended(
id,
height,
block,
updatedWavesAmount,
blockStateUpdate,
transactionStateUpdates,
transactionsMetadata,
referencedAssets
) =>
val blockUpdate = Some(blockStateUpdate).filterNot(_.isEmpty).map(_.protobuf)
val txsUpdates = transactionStateUpdates.map(_.protobuf)
BlockchainUpdated(
id = id.toByteString,
height = height,
update = BlockchainUpdated.Update.Append(
Append(
transactionIds = getIds(block.transactionData),
stateUpdate = blockUpdate,
transactionStateUpdates = txsUpdates,
transactionsMetadata = transactionsMetadata,
body = Append.Body.Block(
Append.BlockAppend(
block = Some(PBBlocks.protobuf(block)),
updatedWavesAmount = updatedWavesAmount
)
)
)
),
referencedAssets = referencedAssets.map(AssetInfo.toPB)
)
case ve.MicroBlockAppended(
totalBlockId,
height,
microBlock,
microBlockStateUpdate,
transactionStateUpdates,
transactionsMetadata,
totalTransactionsRoot,
referencedAssets
) =>
val microBlockUpdate = Some(microBlockStateUpdate).filterNot(_.isEmpty).map(_.protobuf)
val txsUpdates = transactionStateUpdates.map(_.protobuf)
BlockchainUpdated(
id = totalBlockId.toByteString,
height = height,
update = BlockchainUpdated.Update.Append(
Append(
transactionIds = getIds(microBlock.transactionData),
stateUpdate = microBlockUpdate,
transactionStateUpdates = txsUpdates,
transactionsMetadata = transactionsMetadata,
body = Append.Body.MicroBlock(
Append.MicroBlockAppend(
microBlock = Some(PBMicroBlocks.protobuf(microBlock, totalBlockId)),
updatedTransactionsRoot = totalTransactionsRoot.toByteString
)
)
)
),
referencedAssets = referencedAssets.map(AssetInfo.toPB)
)
case ve.RollbackCompleted(to, height, result, referencedAssets) =>
BlockchainUpdated(
id = to.toByteString,
height = height,
update = BlockchainUpdated.Update.Rollback(
Rollback(
Rollback.RollbackType.BLOCK,
result.removedTransactionIds.map(_.toByteString),
result.removedBlocks.map(PBBlocks.protobuf),
Some(result.stateUpdate.protobuf)
)
),
referencedAssets = referencedAssets.map(AssetInfo.toPB)
)
case ve.MicroBlockRollbackCompleted(toSig, height, result, referencedAssets) =>
BlockchainUpdated(
id = toSig.toByteString,
height = height,
update = BlockchainUpdated.Update.Rollback(
Rollback(
Rollback.RollbackType.MICROBLOCK,
result.removedTransactionIds.map(_.toByteString),
Nil,
Some(result.stateUpdate.protobuf)
)
),
referencedAssets = referencedAssets.map(AssetInfo.toPB)
)
}
}
object BlockchainUpdatedProtobuf {
private def getIds(txs: Seq[Transaction]): Seq[ByteString] = txs.map(t => ByteString.copyFrom(t.id().arr))
}
implicit class BlockchainUpdatedVanilla(val self: BlockchainUpdated) extends AnyVal {
def vanillaAppend: ve.BlockAppended =
self.update match {
case Update.Append(append) =>
append.body match {
case Body.Block(body) =>
ve.BlockAppended(
id = self.id.toByteStr,
height = self.height,
block = body.block.map(PBBlocks.vanilla(_, unsafe = true).get).orNull,
updatedWavesAmount = body.updatedWavesAmount,
blockStateUpdate = append.stateUpdate.fold(Monoid[ve.StateUpdate].empty)(_.vanilla.get),
transactionStateUpdates = append.transactionStateUpdates.map(_.vanilla.get),
transactionMetadata = append.transactionsMetadata,
referencedAssets = self.referencedAssets.map(AssetInfo.fromPB)
)
case _: Body.MicroBlock => throw new IllegalArgumentException("Encountered microblock append body")
case Body.Empty => throw new IllegalArgumentException("Empty append body")
}
case _: Update.Rollback => throw new IllegalArgumentException("Encountered rollback")
case Update.Empty => throw new IllegalArgumentException("Update body is empty")
}
}
implicit class StateUpdateVanilla(val self: StateUpdate) extends AnyVal {
def vanilla: Try[ve.StateUpdate] =
Try {
ve.StateUpdate.fromPB(self)
} recoverWith { case err: Throwable => Failure(new IllegalArgumentException(s"Invalid protobuf StateUpdate", err)) }
}
implicit class StateUpdateProtobuf(val self: ve.StateUpdate) extends AnyVal {
def protobuf: StateUpdate = ve.StateUpdate.toPB(self)
}
}
|
wavesplatform/Waves
|
grpc-server/src/main/scala/com/wavesplatform/events/protobuf/serde/package.scala
|
Scala
|
mit
| 6,240 |
/*
* This file is part of eCobertura.
*
* Copyright (c) 2010 Joachim Hofer
* All rights reserved.
*
* This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*/
package ecobertura.ui.util.layout
import org.eclipse.swt.layout._
import org.eclipse.swt.widgets._
object FormDataBuilder {
def forFormElement(formElement: Control) = new FormDataBuilder(formElement)
}
class FormDataBuilder(formElement: Control) {
private val formData = new FormData
def leftAtPercent(percent: Int, margin: Int) = {
formData.left = new FormAttachment(percent, margin)
this
}
def rightAtPercent(percent: Int, margin: Int) = {
formData.right = new FormAttachment(percent, -margin)
this
}
def topAtPercent(percent: Int, margin: Int) = {
formData.top = new FormAttachment(percent, margin)
this
}
def bottomAtPercent(percent: Int, margin: Int) = {
formData.bottom = new FormAttachment(percent, -margin)
this
}
def leftNeighborOf(neighbor: Control, margin: Int) = {
formData.right = new FormAttachment(neighbor, -margin)
this
}
def rightNeighborOf(neighbor: Control, margin: Int) = {
formData.left = new FormAttachment(neighbor, margin)
this
}
def topNeighborOf(neighbor: Control, margin: Int) = {
formData.bottom = new FormAttachment(neighbor, -margin)
this
}
def bottomNeighborOf(neighbor: Control, margin: Int) = {
formData.top = new FormAttachment(neighbor, margin)
this
}
def build = {
formElement.setLayoutData(formData)
}
}
|
jmhofer/eCobertura
|
ecobertura.ui/src/main/scala/ecobertura/ui/util/layout/FormDataBuilder.scala
|
Scala
|
epl-1.0
| 1,737 |
package cmwell.analytics.util
import org.apache.spark.sql.{Dataset, SparkSession}
/**
* Takes two Datasets that each contain a single 'uuid' column, and does a set difference on the two datasets
* (i.e., determine the uuids in uuids1 that are not also in uuids2). The resulting uuids are expanded to include
* the uuid, path and lastModified. A filter is applied to remove any rows that are "current" since we allow them
* to be inconsistent for a short time.
*
* This implicitly brings the entire set difference result to the driver. This should normally be fine since we
* expect the number of inconsistent infotons to be low enough to allow it. In general, this creates a possibility
* for failure the number of inconsistencies is very large.
*
* Bringing all the data locally also means that writing the data out would result in a single partition file
* being created, which is most likely what we are after anyway.
*
* When doing differences between systems, positives for the root path ("/") and paths starting with "/meta/" are
* filtered out, since those infotons are created specifically for a CM-Well instance, and will not have the same
* uuids between systems.
*/
object SetDifferenceAndFilter {
def apply(uuids1: Dataset[KeyFields],
uuids2: Dataset[KeyFields],
allowableConsistencyLag: Long,
filterOutMeta: Boolean = false)
(implicit spark: SparkSession): Dataset[KeyFields] = {
// Filter out any inconsistencies found if more current than this point in time.
// TODO: Should System.currentTimeMillis be used, and if so, when should it be observed?
val currentThreshold = new java.sql.Timestamp(System.currentTimeMillis - allowableConsistencyLag)
import spark.implicits._
// The original setDifference implementation used the SQL except function, but that ignores any pre-partitioning.
// The next implementation used a left-anti join, but that created a weird execution plan that caused poor performance.
// The current implementation uses a outer join - which uses an efficient sort-merge join.
def setDifference(uuids1: Dataset[KeyFields], uuids2: Dataset[KeyFields]): Dataset[KeyFields] =
uuids1.join(uuids2, uuids1("uuid") === uuids2("uuid"), "left_outer")
.filter(uuids2("uuid").isNull)
.select(uuids1("*"))
.as[KeyFields]
// Calculate the set difference between the two sets of uuids.
// The anti-join produces just the left side, and only the ones that are not in the right side.
val positives = setDifference(uuids1, uuids2)
val timeToConsistencyFilter = positives("lastModified") < currentThreshold
val overallFilter = if (filterOutMeta)
timeToConsistencyFilter &&
(positives("path") =!= "/" && positives("path") =!= "/meta" && !positives("path").startsWith("/meta/"))
else
timeToConsistencyFilter
// Filter out any positives that occurred after the current threshold
positives.filter(overallFilter)
}
}
|
bryaakov/CM-Well
|
tools/dataConsistencyTool/cmwell-spark-analysis/src/main/scala/cmwell/analytics/util/SetDifferenceAndFilter.scala
|
Scala
|
apache-2.0
| 3,035 |
package taczombie.test.model
import org.specs2.mutable._
import taczombie.test.model.TestObjects._
import taczombie.model.GameFactory
import taczombie.model.util.CoordinateHelper._
class CoordinateHelperSpec extends Specification {
val testFile = getClass().getResource("/TestLevel_correct")
val testGame = GameFactory.newGame(random = false, file = testFile.getFile())
val testCoord1 = (10,10)
val testCoord2 = (1,3)
val -- = (a:Int,b:Int) => a - b
val ++ = (a:Int,b:Int) => a + b
private def upperLeftFor5StepsInBothDimensions(coord : (Int,Int))
(y : Int, fn1 : (Int,Int) => Int)
(x : Int, fn2 : (Int,Int) => Int) = {
for {
i <- 0 until 5
j <- 0 until 5
} yield coord.isUpperLeftOf(fn1(y,i),fn2(x,j))
}.toList
private def lowerRightFor5StepsInBothDimensions(coord : (Int,Int))
(y : Int, fn1 : (Int,Int) => Int)
(x : Int, fn2 : (Int,Int) => Int) = {
for {
i <- 0 until 5
j <- 0 until 5
} yield coord.isLowerRightOf(fn1(y,i),fn2(x,j))
}.toList
testCoord1.toString should {
"have (10,9) left of it" in {
testCoord1.leftOf must be_==(10,9)
}
"have (10,11) right of it" in {
testCoord1.rightOf must be_==(10,11)
}
"have (9,10) above of it" in {
testCoord1.aboveOf must be_==(9,10)
}
"have (11,10) below of it" in {
testCoord1.belowOf must be_==(11,10)
}
"is upper left of (11+i,11+j) " in {
upperLeftFor5StepsInBothDimensions(testCoord1)(11, ++)(11, ++)
.exists(_ == false) must be_!=(true)
}
"is not upper left of (10-j,11+i) " in {
upperLeftFor5StepsInBothDimensions(testCoord1)(10, --)(11, ++)
.exists(_ == true) must be_!=(true)
}
"is not upper left of (11+i,10-j) " in {
upperLeftFor5StepsInBothDimensions(testCoord1)(11, ++)(10, --)
.exists(_ == true) must be_!=(true)
}
"is lower right of (9-i,9-j) " in {
lowerRightFor5StepsInBothDimensions(testCoord1)(9, --)(9, --)
.exists(_ == false) must be_!=(true)
}
"is not lower right of (10+i,9-j) " in {
lowerRightFor5StepsInBothDimensions(testCoord1)(10, ++)(9, --)
.exists(_ == true) must be_!=(true)
}
"is not lower right of (9-i,10+j) " in {
lowerRightFor5StepsInBothDimensions(testCoord1)(9, --)(10, ++)
.exists(_ == true) must be_!=(true)
}
}
testCoord1.toString in {
"minus " + testCoord2.toString should {
"be (9,7)" in {
testCoord1 - testCoord2 must be_==(9,7)
}
}
"plus " + testCoord2.toString should {
"be (11,13)" in {
testCoord1 + testCoord2 must be_==(11,13)
}
}
"times factor -1" should {
"be the inverted value (-10,-10)" in {
testCoord1 * -1 must be_==(-10,-10)
}
}
}
"In testGame 'Testlevel_correct' the field at coordinate (14,13)" should {
"have the walkable neighbours (15,13) and (14,13)" in {
val expectedNeighbours = (15,13) :: (13,13) :: Nil
(14,13).getNeighbours(List(), testGame.gameField) must containAllOf(expectedNeighbours)
}
}
"Method allowed move for the range 5" should {
"return a correct List with walkable fields at startpoint (14,13)" in {
val expectedValue = (15,9) :: (15,10) :: (15,11) :: (15,12) :: (15,13) :: (15,14) :: (15,15) ::
(14,13) :: (14,15) ::
(13,11) :: (13,12) :: (13,13) :: (13,14) :: (13,15) ::
(12,11) :: (12,15) ::
(11,11) :: (11,15) :: Nil
(14,13).calculateAllowedMoves(5, testGame) must containAllOf(expectedValue)
}
}
}
|
mahieke/TacZombie
|
model/src/test/scala/taczombie/test/model/CoordinateHelperSpec.scala
|
Scala
|
gpl-2.0
| 3,651 |
package com.twitter.inject.server.tests
import com.google.inject.AbstractModule
import com.twitter.finagle.http.Status
import com.twitter.inject.app.App
import com.twitter.inject.server.{EmbeddedTwitterServer, Ports, TwitterServer}
import com.twitter.inject.{Test, TwitterModule}
import com.twitter.server.Lifecycle.Warmup
import com.twitter.server.{TwitterServer => BaseTwitterServer}
import com.twitter.util.Await
import com.twitter.util.registry.GlobalRegistry
import scala.util.parsing.json.JSON
class StartupIntegrationTest extends Test {
override protected def afterEach(): Unit = {
// "clear" GlobalRegistry
GlobalRegistry.get.iterator foreach { entry =>
GlobalRegistry.get.remove(entry.key)
}
super.afterEach()
}
"startup" should {
"ensure health check succeeds when guice config is good" in {
val server = new EmbeddedTwitterServer(new SimpleHttpTwitterServer)
server.assertHealthy()
server.httpGetAdmin(
"/admin/server_info",
andExpect = Status.Ok)
server.close()
}
"non HTTP twitter-server passes health check" in {
val server = new EmbeddedTwitterServer(new SimpleTwitterServer)
server.assertHealthy()
server.close()
}
"embedded raw com.twitter.server.Twitter starts up" in {
val server = new EmbeddedTwitterServer(
twitterServer = new ExtendedBaseTwitterServer)
server.assertHealthy()
server.close()
}
"TwitterServer starts up" in {
val server = new EmbeddedTwitterServer(
twitterServer = new TwitterServer {})
server.assertHealthy()
server.close()
}
"ensure server health check fails when guice config fails fast" in {
val server = new EmbeddedTwitterServer(new FailFastServer)
intercept[Exception] {
server.start()
}
server.close()
}
"ensure startup fails when base twitter server preMain throws exception" in {
val server = new EmbeddedTwitterServer(new PremainErrorBaseTwitterServer)
intercept[Exception] {
server.start()
}
server.close()
}
"ensure startup fails when preMain throws exception" in {
val server = new EmbeddedTwitterServer(new ServerPremainException)
intercept[Exception] {
server.start()
}
server.close()
}
"ensure http server starts after warmup" in {
pending //only manually run since uses sleeps
class WarmupServer extends TwitterServer {
override def warmup(): Unit = {
println("Warmup begin")
Thread.sleep(1000)
println("Warmup end")
}
}
val server = new EmbeddedTwitterServer(
twitterServer = new WarmupServer)
server.assertHealthy(healthy = true)
server.close()
}
"calling install without a TwitterModule works" in {
val server = new EmbeddedTwitterServer(new ServerWithModuleInstall)
server.start()
server.close()
}
"calling install with a TwitterModule throws exception" in {
val server = new EmbeddedTwitterServer(new ServerWithTwitterModuleInstall)
intercept[Exception] {
server.start()
}
server.close()
}
"injector called before main" in {
val app = new App {
override val modules = Seq(new TwitterModule {})
}
val e = intercept[Exception] {
app.injector
}
app.close()
e.getMessage should include("injector is not available before main")
}
"register framework library" in {
val server = new EmbeddedTwitterServer(
new ServerWithModuleInstall,
disableTestLogging = true)
try {
server.start()
val response = server.httpGetAdmin(
"/admin/registry.json",
andExpect = Status.Ok)
val json: Map[String, Any] = JSON.parseFull(response.contentString).get.asInstanceOf[Map[String, Any]]
val registry = json("registry").asInstanceOf[Map[String, Any]]
assert(registry.contains("library"))
assert(registry("library").asInstanceOf[Map[String, String]].contains("finatra"))
} finally {
server.close()
}
}
}
}
class FailFastServer extends TwitterServer {
override val modules = Seq(new AbstractModule {
def configure() {
throw new StartupTestException("guice module exception")
}
})
}
class SimpleTwitterServer extends TwitterServer {
override val modules = Seq()
}
class SimpleHttpTwitterServer extends TwitterServer {
}
class ServerWithTwitterModuleInstall extends TwitterServer {
override val modules = Seq(new TwitterModule {
override def configure() {
install(new TwitterModule {})
}
})
}
class ServerWithModuleInstall extends TwitterServer {
override val modules = Seq(new TwitterModule {
override def configure() {
install(new AbstractModule {
override def configure(): Unit = {}
})
}
})
}
class PremainErrorBaseTwitterServer extends BaseTwitterServer with Ports with Warmup {
premain {
throw new StartupTestException("premain exception")
}
def main() {
warmupComplete()
throw new StartupTestException("shouldn't get here")
}
}
class ServerPremainException extends TwitterServer {
premain {
throw new StartupTestException("premain exception")
}
}
class StartupTestException(msg: String) extends Exception(msg)
class ExtendedBaseTwitterServer extends BaseTwitterServer {
def main() {
Await.ready(
adminHttpServer)
}
}
|
syamantm/finatra
|
inject/inject-server/src/test/scala/com/twitter/inject/server/tests/StartupIntegrationTest.scala
|
Scala
|
apache-2.0
| 5,541 |
package com.twitter.finagle.buoyant.h2
package netty4
import io.netty.buffer.{ByteBuf, ByteBufHolder, EmptyByteBuf, Unpooled}
import io.netty.channel.ChannelHandler.Sharable
import io.netty.channel.{ChannelHandlerContext, ChannelInboundHandlerAdapter}
/**
* This was originally copied from finagle's implementation of AnyToHeapInboundHandler:
* https://github.com/twitter/finagle/blob/c86789cf0e064483ebf4509b52c9a216c31dd134/finagle-netty4/src/main/scala/com/twitter/finagle/netty4/channel/AnyToHeapInboundHandler.scala
*
* An inbound channel handler that copies byte buffers onto the JVM heap
* and gives them a deterministic lifecycle. This handler also makes sure to
* use the unpooled byte buffer (regardless of what channel's allocator is) as
* its destination thereby defining a clear boundaries between pooled and unpooled
* environments.
*
* @note If the input buffer is not readable it's still guaranteed to be released
* and replaced with EmptyByteBuf.
*
* @note This handler recognizes both ByteBuf's and ByteBufHolder's (think of HTTP
* messages extending ByteBufHolder's in Netty).
*
* @note If your protocol manages ref-counting or if you are delegating ref-counting
* to application space you don't need this handler in your pipeline. Every
* other use case needs this handler or you will with very high probability
* incur a direct buffer leak.
*/
@Sharable
object UnpoolHandler extends ChannelInboundHandlerAdapter {
private[this] final def copyOnHeapAndRelease(bb: ByteBuf): ByteBuf = {
try {
if (bb.readableBytes > 0) Unpooled.buffer(bb.readableBytes, bb.capacity).writeBytes(bb)
else Unpooled.EMPTY_BUFFER
} finally {
val _ = bb.release()
}
}
override def channelRead(ctx: ChannelHandlerContext, msg: Any): Unit = msg match {
case bb: ByteBuf =>
val _ = ctx.fireChannelRead(copyOnHeapAndRelease(bb))
// This case is special since it helps to avoid unnecessary `replace`
// when the underlying content is already `EmptyByteBuffer`.
case bbh: ByteBufHolder if bbh.content.isInstanceOf[EmptyByteBuf] =>
val _ = ctx.fireChannelRead(bbh)
case bbh: ByteBufHolder =>
val onHeapContent = copyOnHeapAndRelease(bbh.content)
val _ = ctx.fireChannelRead(bbh.replace(onHeapContent))
case _ =>
val _ = ctx.fireChannelRead(msg)
}
}
|
denverwilliams/linkerd
|
finagle/h2/src/main/scala/com/twitter/finagle/buoyant/h2/netty4/UnpoolHandler.scala
|
Scala
|
apache-2.0
| 2,386 |
package com.romankagan.languages.classroomanalysis
/**
* Created by roman on 5/6/15.
*/
object LongestImprovement extends App{
}
|
kagan770/talentbuddy
|
src/com/romankagan/languages/classroomanalysis/LongestImprovement.scala
|
Scala
|
apache-2.0
| 133 |
package org.littlewings.javaee7.rest
import javax.enterprise.context.RequestScoped
import javax.enterprise.inject.Instance
import javax.enterprise.inject.spi.{Bean, BeanManager, CDI}
import javax.inject.Inject
import javax.naming.InitialContext
import javax.ws.rs._
import javax.ws.rs.core.MediaType
import org.littlewings.javaee7.service.CalcService
@Path("calc")
@RequestScoped
class CalcResource {
@Inject
private var beanManager: BeanManager = _
@GET
@Path("beanManagerInject")
@Produces(Array(MediaType.TEXT_PLAIN))
def beanManagerInject(@QueryParam("a") @DefaultValue("0") a: Int, @QueryParam("b") @DefaultValue("0") b: Int): Int = {
val beans = beanManager.getBeans(classOf[CalcService])
val bean = beanManager.resolve[CalcService](beans.asInstanceOf[java.util.Set[Bean[_ <: CalcService]]])
beanManager
.getReference(bean, classOf[CalcService], beanManager.createCreationalContext(bean))
.asInstanceOf[CalcService]
.add(a, b)
}
@GET
@Path("beanManagerLookup")
@Produces(Array(MediaType.TEXT_PLAIN))
def beanManagerLookup(@QueryParam("a") @DefaultValue("0") a: Int, @QueryParam("b") @DefaultValue("0") b: Int): Int = {
val ic = new InitialContext()
val bm = ic.lookup("java:comp/env/BeanManager").asInstanceOf[BeanManager]
ic.close()
val beans = bm.getBeans(classOf[CalcService])
val bean = bm.resolve[CalcService](beans.asInstanceOf[java.util.Set[Bean[_ <: CalcService]]])
bm
.getReference(bean, classOf[CalcService], bm.createCreationalContext(bean))
.asInstanceOf[CalcService]
.add(a, b)
}
@GET
@Path("cdiUtil")
@Produces(Array(MediaType.TEXT_PLAIN))
def cdiUtil(@QueryParam("a") @DefaultValue("0") a: Int, @QueryParam("b") @DefaultValue("0") b: Int): Int =
CDI.current.select(classOf[CalcService]).get.add(a, b)
@Inject
private var calcServiceInstance: Instance[CalcService] = _
@GET
@Path("instanceLookup")
@Produces(Array(MediaType.TEXT_PLAIN))
def instanceLookup(@QueryParam("a") @DefaultValue("0") a: Int, @QueryParam("b") @DefaultValue("0") b: Int): Int =
calcServiceInstance.select().get().add(a, b)
}
|
kazuhira-r/javaee7-scala-examples
|
cdi-programmatic-lookup/src/main/scala/org/littlewings/javaee7/rest/CalcResource.scala
|
Scala
|
mit
| 2,154 |
/*
* -╥⌐⌐⌐⌐ -⌐⌐⌐⌐-
* ≡╢░░░░⌐\\░░░φ ╓╝░░░░⌐░░░░╪╕
* ╣╬░░` `░░░╢┘ φ▒╣╬╝╜ ░░╢╣Q
* ║╣╬░⌐ ` ╤▒▒▒Å` ║╢╬╣
* ╚╣╬░⌐ ╔▒▒▒▒`«╕ ╢╢╣▒
* ╫╬░░╖ .░ ╙╨╨ ╣╣╬░φ ╓φ░╢╢Å
* ╙╢░░░░⌐"░░░╜ ╙Å░░░░⌐░░░░╝`
* ``˚¬ ⌐ ˚˚⌐´
*
* Copyright © 2016 Flipkart.com
*/
package com.flipkart.connekt.commons.entities
import com.fasterxml.jackson.annotation.JsonProperty
import com.flipkart.connekt.commons.entities.bigfoot.PublishSupport
import com.flipkart.connekt.commons.utils.DateTimeUtils
import com.flipkart.connekt.commons.utils.StringUtils._
import com.roundeights.hasher.Implicits._
import scala.util.Try
case class DeviceDetails(deviceId: String,
userId: String,
@JsonProperty(required = true) token: String,
@JsonProperty(required = false) osName: String,
@JsonProperty(required = true) osVersion: String,
@JsonProperty(required = false) appName: String,
@JsonProperty(required = true) appVersion: String,
brand: String,
model: String,
state: String = "",
@JsonProperty(required = false) keys: Map[String, String] = Map.empty,
active: Boolean = true) extends PublishSupport {
override def toPublishFormat: fkint.mp.connekt.DeviceDetails = {
fkint.mp.connekt.DeviceDetails(
deviceId = deviceId, userId = userId, token = token.sha256.hash.hex, osName = osName, osVersion = osVersion,
appName = appName, appVersion = appVersion, brand = brand, model = model, state = state,
ts = DateTimeUtils.getStandardFormatted(), active = active
)
}
def toCallbackEvent = {
DeviceCallbackEvent(deviceId = deviceId, userId = userId, osName = osName, osVersion = osVersion,
appName = appName, appVersion = appVersion, brand = brand, model = model, state = state, ts = System.currentTimeMillis(), active = active)
}
def validate() = {
require(Try(MobilePlatform.withName(osName)).map(!_.equals(MobilePlatform.UNKNOWN)).getOrElse(false), "a device's platform cannot be unknown")
require(token.isDefined, "device detail's token cannot be null/empty")
require(userId != deviceId, "`userId` cannot be equal to `deviceId`")
require(appName.isDefined, "device detail's `appName` cannot be null/empty")
}
override def namespace: String = "fkint/mp/connekt/DeviceDetails"
}
|
Flipkart/connekt
|
commons/src/main/scala/com/flipkart/connekt/commons/entities/DeviceDetails.scala
|
Scala
|
mit
| 2,845 |
package dwaspada.thedaam.domain
import java.io.Serializable
trait ValueObject[V] extends Serializable {
/**
* Comparable to the same ValueObject
*
* @param other concrete type of ValueObject also
* @return
*/
def sameValueAs(other: V): Boolean
}
|
dewey92/commuterline-ddd
|
src/main/scala/dwaspada/thedaam/domain/ValueObject.scala
|
Scala
|
mit
| 273 |
package com.github.fellowship_of_the_bus
package tdtd
package game
import IDMap._
import scala.collection.mutable.Set
import lib.game.GameConfig
import scala.math._
import GameMap._
trait TowerType {
def damage: Float
def damage_=(d: Float): Unit
def fireRate: Int
def fireRate_=(r: Int): Unit
def range: Float
def range_=(r: Float): Unit
def aoe: Float
def aoe_=(area: Float): Unit
def currAI: AI
def currAI_=(ai: AI) : Unit
def id: Int
def id_=(i: Int): Unit
def projectileID: Int
def projectileID_=(p: Int): Unit
def speed: Float
def speed_=(s: Float): Unit
def value: Int
def value_=(i: Int): Unit
def name: String
def basename: String
def name_=(s: String): Unit
def cost : Int
def cost_=(i:Int): Unit
def description: String
def description_=(s: String): Unit
def hotkey: Char
def hotkey_=(c: Char): Unit
def describe() : List[String] = {
val fireSpeed = fireRate / GameConfig.FrameRate.toFloat
var ret = List(
s"Value: ${value}",
if (cost == 0) {
"Max Level"
} else {
s"Upgrade Cost: $cost"
},
f"Damage: ${damage}%.1f",
f"Fire Rate: $fireSpeed%.1f seconds",
f"Range: ${range}%.1f"
)
if (aoe != 0.0f) {
ret = ret ++ List(f"Area of Effect: ${aoe}%.1f")
}
ret = ret ++ List(
s"Default AI: ${currAI}",
s"Description: ${description}",
s"Hotkey: ${hotkey}"
)
ret
}
def init() = {
val att = TowerMap.towerMap((id,1))
val att1 = TowerMap.towerMap((id,2))
damage = att.dmg.toFloat
fireRate = att.rate
range = att.range
aoe = att.aoe
speed = att.projspd
value = att.cost
cost = att1.cost
}
}
abstract class Tower(xc: Float, yc: Float, towerType: TowerType) extends GameObject(xc, yc) {
protected var nextShot = 0
val kind = towerType
val id = towerType.id
var currAI = towerType.currAI
val height = 1.0f
val width = 1.0f
// r += 0.5f
// c += 0.5f
var kills = 0
var dmgDone = 0f
var level = 1
var boughtAI = false
def sell(): Int = {
inactivate
kind.value / 2
}
towerType.name = towerType.basename + " Level 1"
def upgradable () = level < 3
var damage = towerType.damage
var fireRate = towerType.fireRate
var range = towerType.range
var aoe = towerType.aoe
var speed = towerType.speed
var value = towerType.value
var name = towerType.name
var basename = towerType.basename
var cost = towerType.cost
def upgrade() = {
val att = TowerMap.towerMap((id,level+1))
if (level != 2) {
val att1 = TowerMap.towerMap((id,level+2))
cost = att1.cost
} else {
cost = 0
}
level += 1
damage = att.dmg.toFloat
fireRate = att.rate
range = att.range
aoe = att.aoe
speed = att.projspd
value += att.cost
name = basename + s" Level $level"
}
def upgradeCost() : Int = {
if (level >=3 ) {
return 0
}
val att = TowerMap.towerMap((id,level+1))
att.cost
}
def startRound(): Int = 0
def setRotation(tar: Enemy) {
val rVec = tar.r - r
val cVec = tar.c - c
val theta = atan2(rVec, cVec)
rotation = toDegrees(theta).asInstanceOf[Float] + 90f
}
def tick() : List[Projectile] = {
if(nextShot <= 0) {
val enemies = map.aoe(r, c, range)
if (!enemies.isEmpty) {
nextShot = fireRate
val target = currAI.pick(r, c, enemies)
setRotation(target)
val proj = Projectile(r, c, target, this)
proj.setMap(map)
List(proj)
} else {
List()
}
} else {
nextShot -= 1
List()
}
}
def setAI(ai: AI) : Unit = {
currAI = ai
}
def describe() : List[String] = {
val fireSpeed = fireRate / GameConfig.FrameRate.toFloat
var ret = List(
s"Value: ${value}",
if (cost == 0) {
"Max Level"
} else {
s"Upgrade Cost: $cost"
},
f"Damage: ${damage}%.1f",
f"Fire Rate: $fireSpeed%.1f seconds",
f"Range: ${range}%.1f"
)
if (aoe != 0.0f) {
ret = ret ++ List(f"Area of Effect: ${aoe}%.1f")
}
ret = ret ++ List(
s"Current AI: ${currAI}",
s"Kills: $kills",
f"Damage Dealt: $dmgDone%.1f",
s"Description: ${kind.description}"
)
ret
}
}
abstract class SlowingTower(xc: Float, yc: Float, towerType: SlowingTowerType) extends Tower(xc, yc, towerType) {
override val kind = towerType
var slowMult = towerType.slowMult
override def upgrade = {
super.upgrade()
val att = TowerMap.towerMap((id,level))
slowMult = 1f - att.slow
}
override def tick() : List[Projectile] = {
if (nextShot == 0) {
val enemies = map.aoe(r,c, range)
if (!enemies.isEmpty) {
nextShot = fireRate
enemies.foreach(enemy => {
val slow = new SlowEffect(towerType.slowMult, towerType.slowTime)
enemy.slow(slow)
}
)
}
List()
} else {
nextShot -= 1
List()
}
}
override def describe() : List[String] = {
val time = towerType.slowTime / GameConfig.FrameRate.toFloat
val mult = ((1f - slowMult) * 100).toInt
val ret = List(
s"Value: ${value}",
if (cost == 0) {
"Max Level"
} else {
s"Upgrade Cost: $cost"
},
s"Slow Multiplier: $mult%",
f"Slow Time: ${time}%.1f seconds",
f"Range: ${range}%.1f",
s"Description: ${towerType.description}"
)
ret
}
}
abstract class MazingTower(xc: Float, yc: Float, towerType: TowerType) extends Tower(xc, yc, towerType) {
override def tick() : List[Projectile] = List()
}
trait SlowingTowerType extends TowerType {
def slowMult: Float
def slowMult_=(m: Float): Unit
def slowTime : Int
def slowTime_=(t: Int): Unit
override def describe() : List[String] = {
val time = slowTime / GameConfig.FrameRate.toFloat
val mult = ((1f - slowMult) * 100).toInt
List(
s"Value: ${value}",
if (cost ==0) {
"Max Level"
} else {
s"Upgrade Cost: $cost"
},
s"Slow Multiplier: $mult%",
f"Slow Time: $time%.1f seconds",
f"Range: ${range}%.1f",
s"Description: ${description}",
s"Hotkey: ${hotkey}"
)
}
override def init() : Unit = {
super.init()
val att = TowerMap.towerMap((id,1))
slowMult = 1f - att.slow
}
}
class HarpoonTower(xc: Float, yc: Float) extends Tower(xc, yc, HarpoonTower) {
}
object HarpoonTower extends TowerType {
var cost = 0
var range = 2.0f
var damage = 2.0f
var fireRate = 60
var aoe = 0.0f
var currAI: AI = new RandomAI
var id = HarpoonTowerID
var projectileID = HarpoonID
var speed = 0.25f
var value = 10
var basename = "Harpoon Tower"
var name = "Harpoon Tower Level 1"
var description = "Basic single\\n target tower, can be placed\\n above water and below water"
var hotkey = 'H'
init
}
class CannonTower(xc: Float, yc: Float) extends Tower(xc, yc, CannonTower) {
}
object CannonTower extends TowerType {
var cost = 0
var range = 2.5f
var damage = 5.0f
var fireRate = 120
var aoe = 1.0f
var currAI: AI = new ClosestToGoalAI
var id = CannonTowerID
var projectileID = CannonballID
var speed = 0.2f
var value = 20
var basename = "Cannon Tower"
var name = "Cannon Tower Level 1"
var description = "Basic AoE tower\\n can only be placed above water"
var hotkey = 'C'
init
}
class TorpedoTower(xc: Float, yc: Float) extends Tower(xc, yc, TorpedoTower) {
private var maps = List[GameMap]()
override def setMap(m: GameMap): Unit = {
maps = m :: maps
}
override def tick() : List[Projectile] = {
if(nextShot == 0) {
val enemies = maps.foldRight(Set[Enemy]())((map, set) => {
if (set.isEmpty) {
val enemies = map.aoe(r, c, range)
if (!enemies.isEmpty) {
enemies
} else {
set
}
} else {
set
}
}
)
if (!enemies.isEmpty) {
nextShot = fireRate
val target = currAI.pick(r, c, enemies)
setRotation(target)
val proj = Projectile(r, c, target, this)
proj.setMap(target.getMap)
List(proj)
} else {
List()
}
} else {
nextShot -= 1
List()
}
}
}
object TorpedoTower extends TowerType {
var cost = 0
var range = 3.25f
var damage = 6.0f
var fireRate = 90
var aoe = 0.0f
var currAI: AI = new RandomAI
var id = TorpedoTowerID
var projectileID = TorpedoID
var speed = 0.3f
var value = 30
var basename = "Torpedo Tower"
var name = "Torpedo Tower Level 1"
var description = "Single target tower\\n Placed below water, but\\n can fire at both levels"
var hotkey = 'T'
init
}
class OilDrillTower(xc: Float, yc: Float) extends MazingTower(xc, yc, OilDrillTower) {
private var maps = List[GameMap]()
override def setMap(m: GameMap): Unit = {
maps = m :: maps
}
var cash = 30
override def upgrade() = {
val att = TowerMap.towerMap((id,level+1))
super.upgrade()
cash = att.money
}
override def startRound() : Int = {
cash
}
override def describe() : List[String] = {
List(
s"Value: ${value}",
if (cost == 0) {
"Max Level"
} else {
s"Upgrade Cost: $cost"
},
s"Cash Earned per Round: $cash",
s"Description: ${kind.description}"
)
}
}
object OilDrillTower extends TowerType {
var cost = 0
var range = 0.0f
var damage = 0.0f
var fireRate = 0
var aoe = 0.0f
var currAI: AI = new RandomAI
var id = OilDrillTowerID
var projectileID = HarpoonID
var speed = 0.0f
var value = 200
var basename = "Oil Drill"
var name = "Oil Drill Level 1"
var description = "Money generator\\n Earns money at the start\\n of each round\\n Takes up spot above and\\n below water"
var hotkey = 'O'
override def describe() : List[String] = {
val cash = 30
List(
s"Value: ${value}",
s"Upgrade Cost: $cash",
s"Cash Earned per Round: $cash",
s"Description: ${description}"
)
}
init
}
class IceTowerBottom(xc: Float, yc: Float) extends SlowingTower(xc, yc, IceTowerBottom) {
}
object IceTowerBottom extends SlowingTowerType {
var cost = 0
var range = 1.0f
var damage = 0.0f
var fireRate = 10
var aoe = 0.0f
var currAI: AI = new RandomAI
var id = IceTowerBottomID
var projectileID = HarpoonID
var speed = 2.0f
var value = 20
var basename = "Ice Tower"
var name = "Ice Tower Level 1"
var slowMult = 0.75f
var slowTime = 20
var description = "Slowing tower\\n Placed below and slows in area\\n Adds ice block to same spot\\n above water which blocks\\n enemies"
var hotkey = 'I'
init
}
class IceTowerTop(xc: Float, yc: Float) extends MazingTower(xc, yc, IceTowerTop) {
override def upgradeCost(): Int = 0
override def upgrade(): Unit = {}
}
object IceTowerTop extends TowerType {
var cost = 0
var range = 0.0f
var damage = 0.0f
var fireRate = 120
var aoe = 0.0f
var currAI: AI = new RandomAI
var id = IceTowerTopID
var projectileID = HarpoonID
var speed = 0.0f
var value = 0
var basename = "Ice Tower"
var name = "Ice Tower Level 1"
var description = ""
var hotkey = 'I'
}
class DepthChargeTower(xc: Float, yc: Float) extends Tower(xc, yc, DepthChargeTower) {
}
object DepthChargeTower extends TowerType {
var cost = 0
var range = 1.5f
var damage = 5.0f
var fireRate = 120
var aoe = 1.0f
var currAI: AI = new RandomAI
var id = DepthChargeTowerID
var projectileID = HarpoonID
var speed = 0.2f
var value = 20
var basename = "Depth Charge"
var name = "Depth Charge Level 1"
var description = "AoE tower\\n Placed above water, but fires\\n at enemies below water"
var hotkey = 'D'
init
}
class WhirlpoolBottom(xc: Float, yc: Float) extends MazingTower(xc, yc, WhirlpoolBottom) {
var slowMult = 0.75f
var slowTime = 30
}
object WhirlpoolBottom extends TowerType {
var cost = 0
var range = 4.0f
var damage = 0.0f
var fireRate = 10
var aoe = 0.0f
var currAI: AI = new RandomAI
var id = WhirlpoolBottomID
var projectileID = HarpoonID
var speed = 2.0f
var value = 20
var basename = "Whirlpool Tower"
var name = "Whirlpool Tower Level 1"
var description = "Slowing tower\\n Placed below water, slows\\n enemies in area above water"
var hotkey = 'W'
init
}
class WhirlpoolTop(xc: Float, yc: Float) extends SlowingTower(xc, yc, WhirlpoolTop) {
}
object WhirlpoolTop extends SlowingTowerType {
var cost = 0
var range = 4.0f
var damage = 0.0f
var fireRate = 1
var aoe = 0.0f
var currAI: AI = new RandomAI
var id = WhirlpoolTopID
var projectileID = HarpoonID
var speed = 0.0f
var value = 0
var basename = "Whirlpool Tower"
var name = "Whirlpool Tower Level 1"
var slowMult = 0.75f
var slowTime = 20
var description = "Slowing tower\\n Placed below water, slows\\n enemies in area above water"
var hotkey = 'W'
init
}
class MissileTower(xc: Float, yc: Float) extends Tower(xc, yc, MissileTower) {
var numTargets = 3
override def tick() : List[Projectile] = {
if(nextShot == 0) {
val enemies = map.aoe(r, c, range)
if (!enemies.isEmpty) {
nextShot = fireRate
var projectiles = List[Projectile]()
for(i <- 1 to numTargets) {
if (!enemies.isEmpty) {
val target = currAI.pick(r, c, enemies)
if (i == 1) {
setRotation(target)
}
val proj = Projectile(r, c, target, this)
proj.setMap(map)
projectiles = proj :: projectiles
enemies.remove(target)
}
}
projectiles
} else {
List()
}
} else {
nextShot -= 1
List()
}
}
}
object MissileTower extends TowerType {
var cost = 0
var range = 2.0f
var damage = 3.0f
var fireRate = 40
var aoe = 0.0f
var currAI: AI = new RandomAI
var id = MissileTowerID
var projectileID = MissileID
var speed = 0.3f
var value = 50
var basename = "Missile Tower"
var name = "Missile Tower Level 1"
var description = "Multitarget tower\\n Placed above water\\n Fires at multiple enemies\\n within range"
var hotkey = 'M'
init
}
class NetTower(xc: Float, yc: Float) extends Tower(xc, yc, NetTower) {
override def tick(): List[Projectile] = {
if(nextShot == 0) {
val enemies = map.aoe(r, c, range)
if (!enemies.isEmpty) {
nextShot = fireRate
val target = currAI.pick(r, c, enemies)
setRotation(target)
val proj = Projectile(r, c, target, this)
proj.setMap(map)
List(proj)
} else {
List()
}
} else {
nextShot -= 1
List()
}
}
}
object NetTower extends TowerType {
var cost = 0
var range = 2.0f
var damage = 0.0f
var fireRate = 90
var aoe = 0.0f
var currAI: AI = new RandomAI
var id = NetTowerID
var projectileID = NetID
var speed = 0.5f
var value = 100
var basename = "Net Tower"
var name = "Net Tower Level 1"
var description = "Single target tower\\n Placed above water\\n Temporarily stops targeted\\n enemy from moving"
var hotkey = 'N'
init
}
class SteamTower(xc: Float, yc: Float) extends Tower(xc, yc, SteamTower) {
override def tick(): List[Projectile] = {
if(nextShot == 0) {
val enemiesU = Set[Enemy]()
val enemiesL = Set[Enemy]()
val enemiesD = Set[Enemy]()
val enemiesR = Set[Enemy]()
for(i <- 1 to range.toInt) {
map(r+i,c) match {
case Some(tile) => enemiesU ++= tile.enemies
case None => ()
}
map(r-i,c) match {
case Some(tile) => enemiesD ++= tile.enemies
case None => ()
}
map(r,c+i) match {
case Some(tile) => enemiesR ++= tile.enemies
case None => ()
}
map(r,c-i) match {
case Some(tile) => enemiesL ++= tile.enemies
case None => ()
}
}
val enemies = enemiesU ++ enemiesL ++ enemiesR ++ enemiesD
if (!enemies.isEmpty) {
nextShot = fireRate
val target = currAI.pick(r, c, enemiesU, enemiesD, enemiesL, enemiesR)
var dir = Up
if (target.r.toInt > r.toInt) {
dir = Down
} else if (target.r.toInt < r.toInt) {
dir = Up
} else if (target.c.toInt > c.toInt) {
dir = Right
} else {
dir = Left
}
val proj = Projectile(r, c, dir, this)
proj.setMap(map)
List(proj)
} else {
List()
}
} else {
nextShot -= 1
List()
}
}
}
object SteamTower extends TowerType {
var cost = 0
var range = 2.5f
var damage = 5.0f
var fireRate = 90
var aoe = 1.0f
var currAI: AI = new SteamRandomAI
var id = SteamTowerID
var projectileID = SteamID
var speed = 1.0f
var value = 30
var basename = "Steam Tower"
var name = "Steam Tower Level 1"
var description = "Line damage tower\\n Placed below water\\n Damages all enemies in one\\n in one of four directions"
var hotkey = 'S'
init
}
object Tower {
def apply(id: Int, xc: Float, yc: Float) : Tower = {
id match {
case HarpoonTowerID => new HarpoonTower(xc, yc)
case CannonTowerID => new CannonTower(xc, yc)
case TorpedoTowerID => new TorpedoTower(xc, yc)
case OilDrillTowerID => new OilDrillTower(xc,yc)
case IceTowerBottomID => new IceTowerBottom(xc, yc)
case IceTowerTopID => new IceTowerTop(xc, yc)
case DepthChargeTowerID => new DepthChargeTower(xc, yc)
case WhirlpoolBottomID => new WhirlpoolBottom(xc, yc)
case WhirlpoolTopID => new WhirlpoolTop(xc, yc)
case MissileTowerID => new MissileTower(xc, yc)
case NetTowerID => new NetTower(xc, yc)
case SteamTowerID => new SteamTower(xc, yc)
}
}
def apply(id: Int) : TowerType = {
id match {
case HarpoonTowerID => HarpoonTower
case CannonTowerID => CannonTower
case TorpedoTowerID => TorpedoTower
case OilDrillTowerID => OilDrillTower
case IceTowerBottomID => IceTowerBottom
case IceTowerTopID => IceTowerTop
case DepthChargeTowerID => DepthChargeTower
case WhirlpoolBottomID => WhirlpoolBottom
case WhirlpoolTopID => WhirlpoolTop
case MissileTowerID => MissileTower
case NetTowerID => NetTower
case SteamTowerID => SteamTower
}
}
}
|
Fellowship-of-the-Bus/tdtd
|
src/main/scala/game/Tower.scala
|
Scala
|
apache-2.0
| 17,438 |
package net.stoerr.grokconstructor.patterntranslation
import scala.util.matching.Regex
/**
* Translates a log4j conversation pattern into a grok pattern for parsing the log4j output
*
* @author <a href="http://www.stoerr.net/">Hans-Peter Stoerr</a>
* @since 16.02.2015
* @see "https://logging.apache.org/log4j/1.2/apidocs/org/apache/log4j/PatternLayout.html"
*/
object Log4jTranslator {
/** Matches log4j conversion specifiers - group 1 = left justify if -, group 2 = minimum width,
* group 3 = maximum width, group 4 = argument in case of %d etc. */
val conversionSpecifier: Regex =
"""%(?:(-)?(\d+))?(?:\.(\d+))?([a-zA-Z])(?:\{([^}]+)\})?""".r
def translate(conversionpattern: String): String =
replaceMatchesAndInbetween(conversionpattern, conversionSpecifier, translateConversionSpecifier, quoteAsRegex)
private def translateConversionSpecifier(thematch: Regex.Match): String = {
val List(leftjust, minwidth, maxwidth, conversionchar, argument) = thematch.subgroups
val baseRegex = conversionchar match {
case "c" => "(?<logger>[A-Za-z0-9$_.]+)" // "%{JAVACLASS:logger}" does not work for abbreviated patterns
case "C" => "(?<class>[A-Za-z0-9$_.]+)"
case "F" => "%{JAVAFILE:class}"
case "l" => "%{JAVASTACKTRACEPART:location}"
case "L" => "%{NONNEGINT:line}"
case "m" => "%{GREEDYDATA:message}"
case "n" => "$" // possibly also "\\r?\\n"
case "M" => "%{NOTSPACE:method}"
case "p" => "%{LOGLEVEL:loglevel}"
case "r" => "%{INT:relativetime}"
case "t" => "%{NOTSPACE:thread}"
case "x" => "(%{NOTSPACE:ndc})?"
case "X" => if (null == argument) """\{(?<mdc>(?:\{[^\}]*,[^\}]*\})*)\}""" else "(%{NOTSPACE:" + argument + "})?"
case "d" => translateDate(argument)
case other => throw new TranslationException("Unknown conversion specifier " + other)
}
align(baseRegex, leftjust, minwidth, maxwidth)
}
private def translateDate(argument: String): String = {
val format = argument match {
case null | "ISO8601" => "%{TIMESTAMP_ISO8601}"
case "ABSOLUTE" => "HH:mm:ss,SSS"
case "DATE" => "dd MMM yyyy HH:mm:ss,SSS"
case explicitFormat => translateExplicitDateFormat(explicitFormat)
}
"(?<timestamp>" + format + ")"
}
val dateFormatComponent = "(([a-zA-Z])\\2*)(.*)".r
// fullcomponent, componentchar, rest
val dateFormatLiteral = "'([^']+)(.*)".r
// literal, rest
val otherChar = "([^a-zA-Z])(.*)".r // char, rest
private def translateExplicitDateFormat(dateFormat: String): String = dateFormat match {
case null | "" => dateFormat
case dateFormatLiteral(literal, rest) => quoteAsRegex(literal) + translateExplicitDateFormat(rest)
case dateFormatComponent(_, "d", rest) => "%{MONTHDAY}" + translateExplicitDateFormat(rest)
case dateFormatComponent(_, "y", rest) => "%{YEAR}" + translateExplicitDateFormat(rest)
case dateFormatComponent(_, "Y", rest) => "%{YEAR}" + translateExplicitDateFormat(rest)
case dateFormatComponent(_, "E", rest) => "%{DAY}" + translateExplicitDateFormat(rest)
case dateFormatComponent(_, "a", rest) => "(AM|PM)" + translateExplicitDateFormat(rest)
case dateFormatComponent("MMM", _, rest) => "%{MONTH}" + translateExplicitDateFormat(rest)
case dateFormatComponent("MM", _, rest) => "%{MONTHNUM2}" + translateExplicitDateFormat(rest)
case dateFormatComponent("EEE", _, rest) => "%{DAY}" + translateExplicitDateFormat(rest)
case dateFormatComponent(_, "H", rest) => "%{HOUR}" + translateExplicitDateFormat(rest)
case dateFormatComponent(_, "m", rest) => "%{MINUTE}" + translateExplicitDateFormat(rest)
case dateFormatComponent(_, "s", rest) => "%{SECOND}" + translateExplicitDateFormat(rest)
case dateFormatComponent(_, "S", rest) => "%{NONNEGINT}" + translateExplicitDateFormat(rest)
case dateFormatComponent(_, "D", rest) => "%{NONNEGINT}" + translateExplicitDateFormat(rest)
case dateFormatComponent(_, "X", rest) => "%{ISO8601_TIMEZONE}" + translateExplicitDateFormat(rest)
case dateFormatComponent(_, "z" | "Z", rest) => "%{TZ}" + translateExplicitDateFormat(rest)
case otherChar(char, rest) => quoteAsRegex(char) + translateExplicitDateFormat(rest)
}
// format_modifiers = [left_justification_flag][minimum_field_width][.][maximum_field_width]
// left_justification_flag = - for left justification (pad on the right) , not present -> right justification (pad on the left)
// Bsp: %20c, %-20c , %.30c, %20.30c, %-20.30c
private def align(baseRegex: String, leftjust: String, minwidth: String, maxwidth: String): String =
if (null == minwidth || minwidth.isEmpty) baseRegex
else leftjust match {
// widths are ignored for now - that'd be hard in regexes
case "-" => baseRegex + " *"
case "" | null => " *" + baseRegex
}
private def quoteAsRegex(literalchars: String): String = literalchars.replaceAll("%%", "%").replaceAll("%n", "\\n")
.replaceAll( """([(){}|\\\[\]])""", """\\$1""")
def replaceMatchesAndInbetween(source: String, regex: Regex, matchfunc: Regex.Match => String, betweenfunc: String => String): String = {
val res = new StringBuilder
var lastend = 0
regex findAllMatchIn source foreach { thematch =>
res ++= betweenfunc(source.substring(lastend, thematch.start))
res ++= matchfunc(thematch)
lastend = thematch.end
}
res ++= betweenfunc(source.substring(lastend, source.length))
res.toString()
}
}
case class TranslationException(reason: String) extends Exception(reason)
|
stoerr/GrokConstructor
|
src/main/scala/net/stoerr/grokconstructor/patterntranslation/Log4jTranslator.scala
|
Scala
|
gpl-3.0
| 5,576 |
/**
* This file is part of mycollab-mobile.
*
* mycollab-mobile is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* mycollab-mobile is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with mycollab-mobile. If not, see <http://www.gnu.org/licenses/>.
*/
package com.esofthead.mycollab.mobile.shell
import com.esofthead.mycollab.eventmanager.EventBusFactory
import com.esofthead.mycollab.mobile.module.crm.CrmUrlResolver
import com.esofthead.mycollab.mobile.module.project.ProjectUrlResolver
import com.esofthead.mycollab.mobile.shell.events.ShellEvent
import com.esofthead.mycollab.vaadin.mvp.UrlResolver
import com.vaadin.ui.UI
/**
* @author MyCollab Ltd
* @since 5.0.9
*/
class ShellUrlResolver extends UrlResolver {
this.addSubResolver("crm", new CrmUrlResolver().build)
this.addSubResolver("project", new ProjectUrlResolver().build)
def navigateByFragement(fragement: String) {
if (fragement != null && fragement.length > 0) {
val tokens: Array[String] = fragement.split("/")
this.handle(tokens: _*)
}
else {
EventBusFactory.getInstance.post(new ShellEvent.GotoMainPage(UI.getCurrent, null))
}
}
override protected def defaultPageErrorHandler(): Unit = {
EventBusFactory.getInstance.post(new ShellEvent.GotoMainPage(UI.getCurrent, null))
}
}
|
maduhu/mycollab
|
mycollab-mobile/src/main/scala/com/esofthead/mycollab/mobile/shell/ShellUrlResolver.scala
|
Scala
|
agpl-3.0
| 1,806 |
/*
*
* /\\\\\\\\\\
* /\\\\\\///\\\\\\
* /\\\\\\/ \\///\\\\\\ /\\\\\\\\\\\\\\\\\\ /\\\\\\ /\\\\\\
* /\\\\\\ \\//\\\\\\ /\\\\\\/////\\\\\\ /\\\\\\\\\\\\\\\\\\\\\\ \\/// /\\\\\\\\\\ /\\\\\\\\\\ /\\\\\\ /\\\\\\ /\\\\\\\\\\\\\\\\\\\\
* \\/\\\\\\ \\/\\\\\\ \\/\\\\\\\\\\\\\\\\\\\\ \\////\\\\\\//// /\\\\\\ /\\\\\\///\\\\\\\\\\///\\\\\\ \\/\\\\\\ \\/\\\\\\ \\/\\\\\\//////
* \\//\\\\\\ /\\\\\\ \\/\\\\\\////// \\/\\\\\\ \\/\\\\\\ \\/\\\\\\ \\//\\\\\\ \\/\\\\\\ \\/\\\\\\ \\/\\\\\\ \\/\\\\\\\\\\\\\\\\\\\\
* \\///\\\\\\ /\\\\\\ \\/\\\\\\ \\/\\\\\\_/\\\\ \\/\\\\\\ \\/\\\\\\ \\/\\\\\\ \\/\\\\\\ \\/\\\\\\ \\/\\\\\\ \\////////\\\\\\
* \\///\\\\\\\\\\/ \\/\\\\\\ \\//\\\\\\\\\\ \\/\\\\\\ \\/\\\\\\ \\/\\\\\\ \\/\\\\\\ \\//\\\\\\\\\\\\\\\\\\ /\\\\\\\\\\\\\\\\\\\\
* \\///// \\/// \\///// \\/// \\/// \\/// \\/// \\///////// \\//////////
*
* The mathematical programming library for Scala.
*
*/
package optimus.optimization
import gurobi._
import optimus.algebra._
import optimus.optimization.enums.PreSolve._
import optimus.optimization.enums.{ PreSolve, SolutionStatus }
import optimus.optimization.model.MPConstraint
/**
* Gurobi solver.
*/
final class Gurobi extends MPSolver {
type Solver = GRBModel
protected var underlyingSolver: Solver = new GRBModel(new GRBEnv())
/**
* Problem builder, should configure the solver and append
* mathematical model variables and constraints.
*
* @param numberOfVars number of variables in the model
*/
def buildModel(numberOfVars: Int): Unit = {
logger.info {
"\\n" +
""" _________ ______ _____ """ + "\\n" +
""" __ ____/___ _________________ /____(_) """ + "\\n" +
""" _ / __ _ / / /_ ___/ __ \\_ __ \\_ / """ + "\\n" +
""" / /_/ / / /_/ /_ / / /_/ / /_/ / / """ + "\\n" +
""" \\____/ \\__._/ /_/ \\____//_____//_/ """ + "\\n"
}
this.numberOfVars = numberOfVars
underlyingSolver.getEnv.set(GRB.IntParam.OutputFlag, 0)
underlyingSolver.addVars(
Array.fill(numberOfVariables)(0),
Array.fill(numberOfVariables)(GRB.INFINITY),
Array.fill(numberOfVariables)(0),
Array.fill(numberOfVariables)(GRB.CONTINUOUS),
Array.tabulate(numberOfVariables)(i => s"x$i")
)
underlyingSolver.update()
}
/**
* Get value of the variable in the specified position. Solution
* should exist in order for a value to exist.
*
* @param colId position of the variable
* @return the value of the variable in the solution
*/
def getVarValue(colId: Int): Double = solution(colId)
/**
* Set bounds of variable in the specified position.
*
* @param colId position of the variable
* @param lower domain lower bound
* @param upper domain upper bound
*/
def setBounds(colId: Int, lower: Double, upper: Double): Unit = {
val GRBVar = underlyingSolver.getVar(colId)
GRBVar.set(GRB.DoubleAttr.LB, lower)
GRBVar.set(GRB.DoubleAttr.UB, upper)
}
/**
* Set upper bound to unbounded (infinite)
*
* @param colId position of the variable
*/
def setUnboundUpperBound(colId: Int): Unit = {
underlyingSolver.getVar(colId).set(GRB.DoubleAttr.UB, GRB.INFINITY)
}
/**
* Set lower bound to unbounded (infinite)
*
* @param colId position of the variable
*/
def setUnboundLowerBound(colId: Int): Unit = {
underlyingSolver.getVar(colId).set(GRB.DoubleAttr.LB, -GRB.INFINITY)
}
/**
* Set the column/variable as an integer variable
*
* @param colId position of the variable
*/
def setInteger(colId: Int): Unit = {
underlyingSolver.getVar(colId).set(GRB.CharAttr.VType, GRB.INTEGER)
}
/**
* Set the column / variable as an binary integer variable
*
* @param colId position of the variable
*/
def setBinary(colId: Int): Unit = {
underlyingSolver.getVar(colId).set(GRB.CharAttr.VType, GRB.BINARY)
}
/**
* Set the column/variable as a float variable
*
* @param colId position of the variable
*/
def setFloat(colId: Int): Unit = {
underlyingSolver.getVar(colId).set(GRB.CharAttr.VType, GRB.CONTINUOUS)
}
/**
* Add objective expression to be optimized by the solver.
*
* @param objective the expression to be optimized
* @param minimize flag for minimization instead of maximization
*/
def setObjective(objective: Expression, minimize: Boolean): Unit = {
objective.getOrder match {
case ExpressionType.GENERIC => throw new IllegalArgumentException("Higher than quadratic: " + objective)
case ExpressionType.QUADRATIC =>
val QExpression = new GRBQuadExpr
val iterator = objective.terms.iterator
while (iterator.hasNext) {
iterator.advance()
val indexes = decode(iterator.key)
if (indexes.length == 1) QExpression.addTerm(iterator.value, underlyingSolver.getVar(indexes.head))
else QExpression.addTerm(iterator.value, underlyingSolver.getVar(indexes.head), underlyingSolver.getVar(indexes(1)))
}
QExpression.addConstant(objective.constant)
underlyingSolver.setObjective(QExpression, if (minimize) 1 else -1)
case ExpressionType.LINEAR =>
val LExpression = new GRBLinExpr
val variables = objective.terms.keys.map(code => underlyingSolver.getVar(decode(code).head))
LExpression.addTerms(objective.terms.values, variables)
LExpression.addConstant(objective.constant)
underlyingSolver.setObjective(LExpression, if (minimize) 1 else -1)
case ExpressionType.CONSTANT =>
val CExpression = new GRBLinExpr
CExpression.addConstant(objective.constant)
underlyingSolver.setObjective(CExpression, if (minimize) 1 else -1)
}
underlyingSolver.update()
}
/**
* Add a mathematical programming constraint to the solver.
*
* @param mpConstraint the mathematical programming constraint
*/
def addConstraint(mpConstraint: MPConstraint): Unit = {
numberOfCons += 1
val lhs = mpConstraint.constraint.lhs - mpConstraint.constraint.rhs
val rhs = -lhs.constant
val operator = mpConstraint.constraint.operator
val GRBOperator = operator match {
case ConstraintRelation.GE => GRB.GREATER_EQUAL
case ConstraintRelation.LE => GRB.LESS_EQUAL
case ConstraintRelation.EQ => GRB.EQUAL
}
lhs.getOrder match {
case ExpressionType.GENERIC => throw new IllegalArgumentException("Higher than quadratic: " + lhs)
case ExpressionType.QUADRATIC =>
val QExpression = new GRBQuadExpr
val iterator = lhs.terms.iterator
while (iterator.hasNext) {
iterator.advance()
val indexes = decode(iterator.key)
if (indexes.length == 1) QExpression.addTerm(iterator.value, underlyingSolver.getVar(indexes.head))
else QExpression.addTerm(iterator.value, underlyingSolver.getVar(indexes.head), underlyingSolver.getVar(indexes(1)))
}
underlyingSolver.addQConstr(QExpression, GRBOperator, rhs, "")
case ExpressionType.LINEAR | ExpressionType.CONSTANT =>
val LExpression = new GRBLinExpr
val iterator = lhs.terms.iterator
while (iterator.hasNext) {
iterator.advance()
LExpression.addTerm(iterator.value, underlyingSolver.getVar(decode(iterator.key).head))
}
underlyingSolver.addConstr(LExpression, GRBOperator, rhs, "")
}
}
/**
* Solve the problem.
*
* @return status code indicating the nature of the solution
*/
def solve(preSolve: PreSolve = DISABLED): SolutionStatus = {
if (preSolve == CONSERVATIVE) underlyingSolver.getEnv.set(GRB.IntParam.Presolve, 1)
else if (preSolve == AGGRESSIVE) underlyingSolver.getEnv.set(GRB.IntParam.Presolve, 2)
underlyingSolver.update()
underlyingSolver.optimize()
var optimizationStatus = underlyingSolver.get(GRB.IntAttr.Status)
_solutionStatus = if (optimizationStatus == GRB.INF_OR_UNBD) {
underlyingSolver.getEnv.set(GRB.IntParam.Presolve, 0)
underlyingSolver.optimize()
optimizationStatus = underlyingSolver.get(GRB.IntAttr.Status)
SolutionStatus.UNBOUNDED
} else if (optimizationStatus == GRB.OPTIMAL) {
_solution = Array.tabulate(numberOfVars)(col => underlyingSolver.getVar(col).get(GRB.DoubleAttr.X))
_objectiveValue = Some(underlyingSolver.get(GRB.DoubleAttr.ObjVal))
SolutionStatus.OPTIMAL
} else if (optimizationStatus == GRB.INFEASIBLE) {
underlyingSolver.computeIIS()
SolutionStatus.INFEASIBLE
} else if (optimizationStatus == GRB.UNBOUNDED) {
SolutionStatus.UNBOUNDED
} else {
_solution = Array.tabulate(numberOfVars)(col => underlyingSolver.getVar(col).get(GRB.DoubleAttr.X))
logger.info("Optimization stopped with status = " + optimizationStatus)
SolutionStatus.SUBOPTIMAL
}
_solutionStatus
}
/**
* Release memory associated to the problem.
*/
def release(): Unit = {
underlyingSolver.dispose()
}
/**
* Set a time limit for solver optimization. After the limit
* is reached the solver stops running.
*
* @param limit the time limit
*/
def setTimeout(limit: Int): Unit = {
require(0 <= limit)
underlyingSolver.getEnv.set(GRB.DoubleParam.TimeLimit, limit.toDouble)
}
}
|
vagm/Optimus
|
solver-gurobi/src/main/scala/optimus/optimization/Gurobi.scala
|
Scala
|
lgpl-3.0
| 9,261 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.mathematics
import org.scalatest.{ Matchers, WordSpec }
import com.twitter.scalding._
class HistogramJob(args: Args) extends Job(args) {
try {
val hist = Tsv("input", 'n)
.groupAll{ _.histogram('n -> 'hist) }
hist
.flatMapTo('hist -> ('bin, 'cdf)){ h: Histogram => h.cdf }
.write(Tsv("cdf-output"))
hist
.mapTo('hist -> ('min, 'max, 'sum, 'mean, 'stdDev)){ h: Histogram => (h.min, h.max, h.sum, h.mean, h.stdDev) }
.write(Tsv("stats-output"))
} catch {
case e: Exception => e.printStackTrace()
}
}
class HistogramJobTest extends WordSpec with Matchers {
import Dsl._
val values = List(1.0, 2.5, 1.5, 3.0, 3.0, 3.0, 4.2, 2.0, 8.0, 1.0)
val inputData = values.map(Tuple1(_))
val cdfOutput = Set((1.0, 0.3), (2.0, 0.5), (3.0, 0.8), (4.0, 0.9), (8.0, 1.0))
"A HistogramJob" should {
JobTest(new HistogramJob(_))
.source(Tsv("input", ('n)), inputData)
.sink[(Double, Double, Double, Double, Double)](Tsv("stats-output")) { buf =>
val (min, max, sum, mean, stdDev) = buf.head
"correctly compute the min" in {
min shouldBe (values.map(_.floor).min)
}
"correctly compute the max" in {
max shouldBe (values.map(_.floor).max)
}
"correctly compute the sum" in {
sum shouldBe (values.map(_.floor).sum)
}
"correctly compute the mean" in {
mean shouldBe (values.map(_.floor).sum / values.size)
}
"correctly compute the stdDev" in {
stdDev shouldBe 1.989974874 +- 0.000000001
}
}
.sink[(Double, Double)](Tsv("cdf-output")) { buf =>
"correctly compute a CDF" in {
buf.toSet shouldBe cdfOutput
}
}
.run
.finish
}
}
|
benpence/scalding
|
scalding-core/src/test/scala/com/twitter/scalding/mathematics/HistogramTest.scala
|
Scala
|
apache-2.0
| 2,363 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.index
import org.geotools.feature.simple.SimpleFeatureTypeBuilder
import org.junit.runner.RunWith
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.geomesa.utils.index.TemporalIndexCheck
import org.opengis.feature.simple.SimpleFeatureType
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class TemporalIndexCheckTest extends Specification {
// setup the basic types
def noDTGType = SimpleFeatureTypes.createType("noDTGType", s"foo:String,bar:Geometry,baz:String,geom:Point")
def oneDTGType = SimpleFeatureTypes.createType("oneDTGType", s"foo:String,bar:Geometry,baz:String,geom:Point,dtg:Date")
def twoDTGType = SimpleFeatureTypes.createType("twoDTGType", s"foo:String,bar:Geometry,baz:String,geom:Point,dtg:Date,dtg_end_time:Date")
val DEFAULT_DATE_KEY = org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes.Configs.DefaultDtgField
def copy(sft: SimpleFeatureType) = {
val b = new SimpleFeatureTypeBuilder()
b.init(sft)
b.buildFeatureType()
}
"TemporalIndexCheck" should {
"detect no valid DTG" in {
val testType = copy(noDTGType)
TemporalIndexCheck.validateDtgField(testType)
testType.getDtgField must beNone
}
"detect no valid DTG even if DEFAULT_DATE_KEY is set incorrectly" in {
val testType = copy(noDTGType)
testType.getUserData.put(DEFAULT_DATE_KEY, "dtg")
TemporalIndexCheck.validateDtgField(testType)
testType.getDtgField must beNone
}
"detect a valid DTG if DEFAULT_DATE_KEY is not set" in {
val testType = copy(oneDTGType)
testType.getUserData.remove(DEFAULT_DATE_KEY)
TemporalIndexCheck.validateDtgField(testType)
testType.getDtgField must beSome("dtg")
}
"detect a valid DTG if DEFAULT_DATE_KEY is not properly set" in {
val testType = copy(oneDTGType)
testType.getUserData.put(DEFAULT_DATE_KEY, "no_such_dtg")
TemporalIndexCheck.validateDtgField(testType)
testType.getDtgField must beSome("dtg")
}
"present no DTG candidate if DEFAULT_DATE_KEY is set properly" in {
val testType = copy(oneDTGType)
testType.setDtgField("dtg")
TemporalIndexCheck.validateDtgField(testType)
testType.getDtgField must beSome("dtg")
}
"detect valid DTG candidates and select the first if DEFAULT_DATE_KEY is not set correctly" in {
val testType = copy(twoDTGType)
testType.getUserData.put(DEFAULT_DATE_KEY, "no_such_dtg")
TemporalIndexCheck.validateDtgField(testType)
testType.getDtgField must beSome("dtg")
}
"present no DTG candidate if DEFAULT_DATE_KEY is set properly and there are multiple Date attributes" in {
val testType = copy(twoDTGType)
testType.getUserData.put(DEFAULT_DATE_KEY, "dtg")
TemporalIndexCheck.validateDtgField(testType)
testType.getDtgField must beSome("dtg")
}
}
"getDTGFieldName" should {
"return a dtg field name if DEFAULT_DATE_KEY is set properly" in {
val testType = copy(oneDTGType)
testType.setDtgField("dtg")
testType.getDtgField must beSome("dtg")
}
"not return a dtg field name if DEFAULT_DATE_KEY is not set correctly" in {
val testType = copy(noDTGType)
testType.setDtgField("dtg") must throwAn[IllegalArgumentException]
testType.getDtgField must beNone
}
}
"getDTGDescriptor" should {
"return a dtg attribute descriptor if DEFAULT_DATE_KEY is set properly" in {
val testType = copy(oneDTGType)
testType.setDtgField("dtg")
testType.getDtgIndex.map(testType.getDescriptor) must beSome(oneDTGType.getDescriptor("dtg"))
}
"not return a dtg attribute descriptor if DEFAULT_DATE_KEY is not set correctly" in {
val testType = copy(noDTGType)
testType.setDtgField("dtg") must throwAn[IllegalArgumentException]
testType.getDtgIndex.map(testType.getDescriptor) must beNone
}
}
}
|
locationtech/geomesa
|
geomesa-accumulo/geomesa-accumulo-datastore/src/test/scala/org/locationtech/geomesa/accumulo/index/TemporalIndexCheckTest.scala
|
Scala
|
apache-2.0
| 4,580 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import org.apache.spark.benchmark.Benchmark
import org.apache.spark.sql.Column
import org.apache.spark.sql.catalyst.expressions.{Expression, Literal, Or}
import org.apache.spark.sql.execution.benchmark.SqlBasedBenchmark
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
/**
* The benchmarks aims to measure performance of the queries where there are subexpression
* elimination or not.
* To run this benchmark:
* {{{
* 1. without sbt:
* bin/spark-submit --class <this class> --jars <spark core test jar>,
* <spark catalyst test jar> <spark sql test jar>
* 2. build/sbt "sql/test:runMain <this class>"
* 3. generate result:
* SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "sql/test:runMain <this class>"
* Results will be written to "benchmarks/SubExprEliminationBenchmark-results.txt".
* }}}
*/
object SubExprEliminationBenchmark extends SqlBasedBenchmark {
import spark.implicits._
def withFromJson(rowsNum: Int, numIters: Int): Unit = {
val benchmark = new Benchmark("from_json as subExpr in Project", rowsNum, output = output)
withTempPath { path =>
prepareDataInfo(benchmark)
val numCols = 500
val schema = writeWideRow(path.getAbsolutePath, rowsNum, numCols)
val cols = (0 until numCols).map { idx =>
from_json('value, schema).getField(s"col$idx")
}
Seq(
("false", "true", "CODEGEN_ONLY"),
("false", "false", "NO_CODEGEN"),
("true", "true", "CODEGEN_ONLY"),
("true", "false", "NO_CODEGEN")
).foreach { case (subExprEliminationEnabled, codegenEnabled, codegenFactory) =>
// We only benchmark subexpression performance under codegen/non-codegen, so disabling
// json optimization.
val caseName = s"subExprElimination $subExprEliminationEnabled, codegen: $codegenEnabled"
benchmark.addCase(caseName, numIters) { _ =>
withSQLConf(
SQLConf.SUBEXPRESSION_ELIMINATION_ENABLED.key -> subExprEliminationEnabled,
SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> codegenEnabled,
SQLConf.CODEGEN_FACTORY_MODE.key -> codegenFactory,
SQLConf.JSON_EXPRESSION_OPTIMIZATION.key -> "false") {
val df = spark.read
.text(path.getAbsolutePath)
.select(cols: _*)
df.write.mode("overwrite").format("noop").save()
}
}
}
benchmark.run()
}
}
def withFilter(rowsNum: Int, numIters: Int): Unit = {
val benchmark = new Benchmark("from_json as subExpr in Filter", rowsNum, output = output)
withTempPath { path =>
prepareDataInfo(benchmark)
val numCols = 500
val schema = writeWideRow(path.getAbsolutePath, rowsNum, numCols)
val predicate = (0 until numCols).map { idx =>
(from_json('value, schema).getField(s"col$idx") >= Literal(100000)).expr
}.asInstanceOf[Seq[Expression]].reduce(Or)
Seq(
("false", "true", "CODEGEN_ONLY"),
("false", "false", "NO_CODEGEN"),
("true", "true", "CODEGEN_ONLY"),
("true", "false", "NO_CODEGEN")
).foreach { case (subExprEliminationEnabled, codegenEnabled, codegenFactory) =>
// We only benchmark subexpression performance under codegen/non-codegen, so disabling
// json optimization.
val caseName = s"subExprElimination $subExprEliminationEnabled, codegen: $codegenEnabled"
benchmark.addCase(caseName, numIters) { _ =>
withSQLConf(
SQLConf.SUBEXPRESSION_ELIMINATION_ENABLED.key -> subExprEliminationEnabled,
SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> codegenEnabled,
SQLConf.CODEGEN_FACTORY_MODE.key -> codegenFactory,
SQLConf.JSON_EXPRESSION_OPTIMIZATION.key -> "false") {
val df = spark.read
.text(path.getAbsolutePath)
.where(Column(predicate))
df.write.mode("overwrite").format("noop").save()
}
}
}
benchmark.run()
}
}
override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
val numIters = 3
runBenchmark("Benchmark for performance of subexpression elimination") {
withFromJson(100, numIters)
withFilter(100, numIters)
}
}
}
|
shaneknapp/spark
|
sql/core/src/test/scala/org/apache/spark/sql/execution/SubExprEliminationBenchmark.scala
|
Scala
|
apache-2.0
| 5,134 |
package metamorphic.generator.slick
import metamorphic.MetamorphicException
import metamorphic.dsl.model._
import metamorphic.dsl.generator._
import metamorphic.dsl.util.StringImplicits._
import metamorphic.dsl.util.Instantiator
import scala.reflect.macros.blackbox.Context
class SlickRepositoryGenerator extends RepositoryGenerator {
val driverGenerator = Instantiator.instance[SlickDriverGenerator](SlickSettings.driverGenerator)
def name: String = "slick"
def isAsync: Boolean = false
def generate(model: Model)(implicit c: Context): List[c.Tree] = {
import c.universe._
driverGenerator match {
case None => c.abort(c.enclosingPosition, "Slick driver not found.")
case Some(generator) => {
try {
generator.validate
} catch {
case e: MetamorphicException => c.abort(c.enclosingPosition, e.getMessage)
}
}
}
val imports: List[Tree] = model.entities.map(entity => q"import ${entity.obj.toTerm}._")
val repositories: List[Tree] = model.entities.flatMap(entity => repository(model, entity))
q"""
..$imports
import scala.slick.driver.JdbcDriver
trait Profile {
import com.github.tototoshi.slick.GenericJodaSupport
val driver: JdbcDriver
object jodaSupport extends GenericJodaSupport(driver)
}
..$repositories
""".children
}
private def repository(model: Model, entity: Entity)(implicit c: Context): List[c.Tree] = {
import c.universe._
def repositoryObject: Tree = {
val daos: List[Tree] = new SlickDaoGenerator(entity).generate
val rows: List[Tree] = new SlickRowGenerator(entity).generate
val components: List[Tree] = new SlickComponentGenerator(entity).generate
q"""
object ${entity.obj.toTerm} {
..$rows
..$components
..$daos
}
"""
}
def repositoryClass: Tree = {
q"""
class ${entity.repository.toType} {
import scala.slick.jdbc.JdbcBackend.{Database, Session}
..$declareDaos
val db = ${driverGenerator.get.database}
def getAll: List[${entity.toType}] = {
db.withSession { implicit session =>
${entity.dao.obj.toTerm}.getAll.map(row => $rowToEntity)
}
}
def get(id: Int): Option[${entity.toType}] = {
db.withSession { implicit session =>
${entity.dao.obj.toTerm}.get(id).map(row => $rowToEntity)
}
}
def create(instance: ${entity.toType}): ${entity.toType} = {
db.withSession { implicit session =>
val newId = ${entity.dao.obj.toTerm}.create($entityToRow)
..${manyToManyCreate(true)}
instance.copy(id = Some(newId))
}
}
def replace(instance: ${entity.toType}): Option[${entity.toType}] = {
db.withSession { implicit session =>
if (${entity.dao.obj.toTerm}.replace($entityToRow)) {
..$manyToManyDelete
..${manyToManyCreate(false)}
Some(instance)
} else {
None
}
}
}
def delete(id: Int) = {
db.withSession { implicit session =>
${entity.dao.obj.toTerm}.delete(id)
}
}
db.withSession { implicit session =>
..$createIfNotExists
}
}
"""
}
def declareDaos: List[Tree] = {
q"val ${entity.dao.obj.toTerm} = new ${entity.dao.toType}(${driverGenerator.get.driver})" ::
entity.manyToManyToImplement.map(property =>
q"""
val ${entity.merge(property).dao.obj.toTerm} =
new ${entity.merge(property).dao.toType}(${driverGenerator.get.driver})
"""
)
}
def manyToManyCreate(firstTime: Boolean): List[Tree] = {
val idTerm = if (firstTime) q"newId" else q"instance.id.get"
entity.manyToManyToImplement.map(property =>
q"""${entity.merge(property).dao.obj.toTerm}.create(instance.${property.name.toTerm}.map(id =>
${entity.merge(property).row.toTerm}($idTerm, id)))""")
}
def manyToManyDelete: List[Tree] = {
entity.manyToManyToImplement.map(property =>
q"""${entity.merge(property).dao.obj.toTerm}.${entity.deleteById.toTerm}(instance.id.get)""")
}
def entityToRow: Tree = {
val rows = q"instance.id" :: entity.properties
.filter(property => property.isImplementedAsRow)
.map(property => q"instance.${property.toTerm}")
q"${entity.row.toTerm}(..$rows)"
}
def rowToEntity: Tree = {
val values = q"row.id" :: entity.properties
.filter(property => property.isImplementedAsRow || property.isImplementedAsManyToMany)
.map(property => {
if (property.isImplementedAsRow) {
q"row.${property.toTerm}"
} else {
q"${entity.merge(property).dao.obj.toTerm}.${property.end2.className.getIds.toTerm}(row.id.get)"
}
});
q"${entity.name.toTerm}(..$values)"
}
def createIfNotExists: List[Tree] = {
q"${entity.dao.obj.toTerm}.createIfNotExists" :: entity.properties
.filter(property => property.isImplementedAsManyToMany)
.map(property => q"${entity.merge(property).dao.obj.toTerm}.createIfNotExists")
}
repositoryObject :: repositoryClass :: Nil
}
}
|
frroliveira/metamorphic
|
metamorphic-slick/src/main/scala/metamorphic/generator/slick/SlickRepositoryGenerator.scala
|
Scala
|
mit
| 5,684 |
package com.outr.arango
case class ArangoError(code: Int, num: Int, message: String, exception: String) {
lazy val errorCode: ArangoCode = ArangoCode(num)
def is(code: ArangoCode): Boolean = code == errorCode
override def toString: String = s"message: $message, exception: $exception, code: $errorCode"
}
|
outr/scarango
|
driver/src/main/scala/com/outr/arango/ArangoError.scala
|
Scala
|
mit
| 313 |
/*
* Copyright (C) 2012 Romain Reuillon
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.plugin.domain
import java.io.File
import java.nio.file.Path
import org.openmole.core.dsl._
import org.openmole.core.dsl.extension._
import scala.reflect.runtime.universe._
package object modifier {
trait CanGetName[A] {
def apply(a: A): String
}
implicit val fileGetName = new CanGetName[File] { def apply(f: File) = f.getName }
implicit val pathGetName = new CanGetName[Path] { def apply(p: Path) = p.toFile.getName }
implicit def domainModifierDecorator[D, T: TypeTag](domain: D)(implicit discrete: DiscreteFromContextDomain[D, T]) = new {
def take(n: FromContext[Int]) = TakeDomain(domain, n)
def group(n: FromContext[Int])(implicit m: Manifest[T]) = GroupDomain(domain, n)
def sliding(n: FromContext[Int], s: FromContext[Int] = 1)(implicit m: Manifest[T]) = SlidingDomain(domain, n, s)
def map[O](f: T ⇒ O) = MapDomain[D, T, O](domain, f)
def map[O: Manifest](s: String)(implicit m: Manifest[T]) = MapDomain[D, T, O](domain, FromContext.codeToFromContext[T ⇒ O](s))
def filter(f: T ⇒ Boolean) = FilteredDomain(domain, f)
def zipWith[O](f: T ⇒ O) = ZipWithDomain[D, T, O](domain, f)
def zipWith[O: Manifest](f: String)(implicit m: Manifest[T]) = ZipWithDomain[D, T, O](domain, FromContext.codeToFromContext[T ⇒ O](f))
def zipWithIndex = ZipWithIndexDomain[D, T](domain)
def zipWithName(implicit cgn: CanGetName[T]) = zipWith(cgn.apply _)
def sort(implicit o: Ordering[T]) = SortedByDomain(domain, identity[T] _)
def sortBy[S: Ordering](s: T ⇒ S) = SortedByDomain(domain, s)
def sortBy[S: Ordering: Manifest](s: String)(implicit m: Manifest[T]) = SortedByDomain(domain, FromContext.codeToFromContext[T ⇒ S](s))
def shuffle = ShuffleDomain(domain)
def distinct = DistinctDomain(domain)
def takeWhile(predicate: T ⇒ Boolean) = TakeWhileDomain(domain, predicate)
def takeWhile(predicate: String)(implicit m: Manifest[T]) = TakeWhileDomain(domain, FromContext.codeToFromContext[T ⇒ Boolean](predicate))
def ++[D2](d2: D2)(implicit discrete2: DiscreteFromContextDomain[D2, T]) = AppendDomain(domain, d2)
}
// implicit def discreteFactorModifierDecorator[D, T: TypeTag](factor: Factor[D, T])(implicit discrete: DiscreteFromContext[D, T]) = new {
// def take(n: FromContext[Int]) = factor.copy(domain = factor.domain.take(n))
// def group(n: FromContext[Int])(implicit m: Manifest[T]) = factor.copy(domain = factor.domain.group(n))
// def sliding(n: FromContext[Int], s: FromContext[Int] = 1)(implicit m: Manifest[T]) = factor.copy(domain = factor.domain.sliding(n, s))
// def map[O](f: T ⇒ O) = factor.copy(domain = factor.domain.map(f))
// def filter(f: T ⇒ Boolean) = factor.copy(domain = factor.domain.filter(f))
// def zipWith[O](f: T ⇒ O) = factor.copy(domain = factor.domain.zipWith(f))
// def zipWithIndex = factor.copy(domain = factor.domain.zipWithIndex)
// def zipWithName(implicit cgn: CanGetName[T]) = factor.copy(domain = factor.domain.zipWithName)
// }
//
// implicit def finiteFactorModifierDecorator[D, T](factor: Factor[D, T])(implicit finite: FiniteFromContext[D, T], inputs: DomainInputs[D]) = new {
// def sort(implicit o: Ordering[T]) = factor.copy(domain = factor.domain.sort)
// def sortBy[S: Ordering](s: T ⇒ S) = factor.copy(domain = factor.domain.sortBy(s))
// def shuffle = factor.copy(domain = factor.copy(domain = factor.domain.shuffle))
// def distinct = factor.copy(domain = factor.domain.distinct)
// def takeWhile(predicate: FromContext[T ⇒ Boolean]) = factor.copy(domain = factor.domain.takeWhile(predicate))
// }
}
|
openmole/openmole
|
openmole/plugins/org.openmole.plugin.domain.modifier/src/main/scala/org/openmole/plugin/domain/modifier/package.scala
|
Scala
|
agpl-3.0
| 4,394 |
/*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package oled.mwua
import java.io.File
import akka.actor.{Actor, ActorRef, PoisonPill, Props}
import app.runutils.IOHandling.InputSource
import app.runutils.{Globals, RunningOptions}
import logic.Examples.Example
import logic._
import oled.mwua.MessageTypes.{FinishedBatchMsg, ProcessBatchMsg}
import org.slf4j.LoggerFactory
import oled.functions.SingleCoreOLEDFunctions.{crossVal, eval}
import scala.collection.mutable.{ListBuffer, Map}
import AuxFuncs._
import utils.{ASP, Utils}
import utils.Implicits._
import scala.util.control.Breaks._
import scala.reflect.internal.Trees
import java.util.Random
import scala.util.matching.Regex
/**
* Created by nkatz at 26/10/2018
*/
/*
*
* I ran this on the normal CAVIAR ordering as follows:
* --inpath=/home/nkatz/dev/OLED-BK/BKExamples/BK-various-taks/DevTest/caviar-bk --delta=0.00001 --prune=0.8
* --train=caviar --repfor=4 --chunksize=50 --try-more-rules=true --scorefun=default --onlineprune=true
*
* */
class Learner[T <: InputSource](
val inps: RunningOptions,
val trainingDataOptions: T,
val testingDataOptions: T,
val trainingDataFunction: T => Iterator[Example],
val testingDataFunction: T => Iterator[Example],
val writeExprmtResultsTo: String = "") extends Actor {
startTime = System.nanoTime()
/*
private var totalTPs = 0
private var totalFPs = 0
private var totalFNs = 0
private var totalTNs = 0
*/
//--------------------------
val normalizeWeights = true
//--------------------------
private var totalTPs = Set[String]()
private var totalFPs = Set[String]()
private var totalFNs = Set[String]()
private var totalTNs = Set[String]()
private var totalBatchProcessingTime = 0.0
private var totalRuleScoringTime = 0.0
private var totalNewRuleTestTime = 0.0
private var totalCompressRulesTime = 0.0
private var totalExpandRulesTime = 0.0
private var totalNewRuleGenerationTime = 0.0
private var totalWeightsUpdateTime = 0.0
private var totalgroundingsTime = 0.0
private var totalPredictionTime = 0.0
private val logger = LoggerFactory.getLogger(self.path.name)
private val withec = Globals.glvalues("with-ec").toBoolean
private var bestTheoryFoundSoFar = Theory()
// This map cantanins all fluents that were true previously,
// (i.e. at the time point prior to the one that is currently being processed)
// along with their weights. The weights are updated properly at each time point
// and new atoms are added if we predict that they start holding, and
// existing atoms are removed if we predict that they're terminated.
// The key values are string representations of fluents, not holdsAt/2 atoms.
// So, we're storing "meeting(id1,id2)", not "holdsAt(meeting(id1,id2), 10)".
private var inertiaExpert = scala.collection.mutable.Map[String, Double]()
def getInertiaExpertPrediction(fluent: String) = {
if (inertiaExpert.keySet.contains(fluent)) inertiaExpert(fluent) else 0.0
}
val learningRate = 1.0
//----------------------------------------------------------------------
// If true, the firing/non-firing initiation rules are not taken
// into account when making a prediction about a fluent that persists
// by inertia.
// Setting this to false is the default for learning/reasoning with
// weakly initiated fluents, but setting it to true is necessary for
// strongly initiated settings, in order to allow for previously
// recognized fluents to persist.
private val isStrongInertia = false
//----------------------------------------------------------------------
/* All these are for presenting analytics/results after a run. */
private val initWeightSums = new ListBuffer[Double]
private val nonInitWeightSums = new ListBuffer[Double]
private val TermWeightSums = new ListBuffer[Double]
private val monTermWeightSums = new ListBuffer[Double]
private val predictInitWeightSums = new ListBuffer[Double]
private val predictTermWeightSums = new ListBuffer[Double]
private val inertWeightSums = new ListBuffer[Double]
private val prodictHoldsWeightSums = new ListBuffer[Double]
// For each query atom encountered during a run, 0.0 or 1.0 is stored in this buffer (true false)
private val trueLabels = new ListBuffer[Double]
// Keep weights only for this
val keepStatsForFluent = "meeting(id4,id5)"
// Control learning iterations over the data
private var repeatFor = inps.repeatFor
// Used to count examples for holdout evaluation
private var exampleCounter = 0
// Local data variable. Cleared at each iteration (in case repfor > 1).
private var data = Iterator[Example]()
// This is optional. A testing set (for holdout evaluation) may not be provided.
private var testingData = Iterator[Example]()
// Counts the number of precessed batches. Used to determine when to
// perform holdout evaluation on the test set. Incremented whenever a
// new batch is fetched (see the getNextBatch() method)
private var batchCounter = 0
// Stores the error from the prequential evaluation at each batch.
private var prequentialError = Vector[Double]()
// Current prequential error (for logging only, updated as a string message containing the actual error).
private var currentError = ""
// Stores the F1-scores from holdout evaluation
private var holdoutScores = Vector[Double]()
// Evolving theory. If we're learning with the Event Calculus the head of the
// list is the initiation part of the theory and the tail is the termination.
// If not, the list has a single element (the current version of the theory).
private var theory = if (withec) List(Theory(), Theory()) else List(Theory())
private var startTime = System.nanoTime()
private var endTime = System.nanoTime()
// Get the training data from the current inout source
private def getTrainData = trainingDataFunction(trainingDataOptions)
private def getTestingData = testingDataFunction(testingDataOptions)
private def getNextBatch(lleNoise: Boolean = false) = {
this.batchCounter += 1
if (data.isEmpty) {
Example()
} else {
if (!lleNoise) {
data.next()
} else {
val currentBatch = data.next()
val noisyNarrative = {
currentBatch.narrative map { x =>
x.replaceAll("active", "active_1")
}
}
Example(annot = currentBatch.annotation, nar = noisyNarrative, _time = currentBatch.time)
}
}
}
val workers: List[ActorRef] = {
// Two workers for initiated and terminated rules respectively.
if (withec) {
val worker1 = context.actorOf(Props(new Worker(inps)), name = "worker-1")
val worker2 = context.actorOf(Props(new Worker(inps)), name = "worker-2")
List(worker1, worker2)
} else {
val worker = context.actorOf(Props(new Worker(inps)), name = "worker")
List(worker)
}
}
// Use this variable to count the responses received from worker actors while processing a new batch.
private var responseCounter = workers.length
// Keep response messages from workers in here until all workers are done.
private val responses = Map[String, FinishedBatchMsg]()
def receive = {
case "start" => {
this.repeatFor -= 1
this.data = getTrainData
if (inps.test != "None") this.testingData = getTestingData
if (this.data.isEmpty) {
logger.error(s"Input source ${inps.train} is empty.")
System.exit(-1)
}
processNext()
}
case "eval" => {
// Prequential evaluation of a given theory
///*
logger.info(s"Performing prequential Evaluation of theory from ${inps.evalth}")
(1 to repeatFor) foreach { _ =>
this.data = getTrainData
while (data.hasNext) {
evaluate(data.next(), inps.evalth)
logger.info(currentError)
}
}
logger.info(s"Prequential error vector:\\n${prequentialError.map(x => x.toDouble)}")
logger.info(s"Prequential error vector (Accumulated Error):\\n${prequentialError.scanLeft(0.0)(_ + _).tail}")
//*/
// This is evaluation on a test set, just comment-out prequential, uncomment this.
/*
val testData = testingDataFunction(testingDataOptions)
val (tps,fps,fns,precision,recall,fscore) = crossVal(Theory(), data=testData, handCraftedTheoryFile = inps.evalth, globals = inps.globals, inps = inps)
logger.info(s"\\ntps: $tps\\nfps: $fps\\nfns: " + s"$fns\\nprecision: $precision\\nrecall: $recall\\nf-score: $fscore)")
*/
context.system.terminate()
}
// Use a hand-crafted theory for sequential prediction. This updates the rule weights after each round,
// but it does not mess with the structure of the rules.
case "predict" => {
def matches(p: Regex, str: String) = p.pattern.matcher(str).matches
val rules = scala.io.Source.fromFile(inps.evalth).getLines.toList.filter(line => !matches("""""".r, line) && !line.startsWith("%"))
val rulesParsed = rules.map(r => Clause.parse(r))
println(rulesParsed)
(1 to repeatFor) foreach { _ =>
this.data = getTrainData
while (data.hasNext) {
val batch = getNextBatch(lleNoise = false)
logger.info(s"Prosessing $batchCounter")
evaluateTest_NEW(batch, "", false, true, Theory(rulesParsed))
}
}
logger.info(s"Prequential error vector:\\n${prequentialError.map(x => x.toDouble)}")
logger.info(s"\\nPrequential error vector (Accumulated Error):\\n${prequentialError.scanLeft(0.0)(_ + _).tail}")
/*
logger.info(s"\\nTrue labels:\\n$trueLabels")
logger.info(s"\\nInitiation Weight Sums:\\n$initWeightSums")
logger.info(s"\\nNo Initiation Weight Sums:\\n$nonInitWeightSums")
logger.info(s"\\nTermination Weight Sums:\\n$TermWeightSums")
logger.info(s"\\nNon Termination Weight Sums:\\n$monTermWeightSums")
logger.info(s"\\nPredict Initiation Weight Sums:\\n$predictInitWeightSums")
logger.info(s"\\nPredict Termination Weight Sums:\\n$predictTermWeightSums")
logger.info(s"\\nInertia Weight Sums:\\n$inertWeightSums")
logger.info(s"\\nHolds Weight Sums:\\n$prodictHoldsWeightSums")
*/
//logger.info(s"\\nTrue labels:\\n$trueLabels")
///*
utils.plotting.PlotTest2.plotResults("/home/nkatz/Desktop/", "results",
trueLabels.toVector, initWeightSums.toVector, nonInitWeightSums.toVector, TermWeightSums.toVector,
monTermWeightSums.toVector, predictInitWeightSums.toVector, predictTermWeightSums.toVector,
inertWeightSums.toVector, prodictHoldsWeightSums.toVector)
//*/
context.system.terminate()
}
case p: FinishedBatchMsg => {
responseCounter -= 1
if (p.targetClass == "") responses += ("theory-no-ec" -> p) else responses += (p.targetClass -> p)
if (responseCounter == 0) {
processedBatches += 1
// General case first (no event calculus)
if (responses.keySet.size == 1) {
val r = responses("theory-no-ec")
this.theory = List(r.theory)
this.totalBatchProcessingTime += r.BatchProcessingTime
this.totalCompressRulesTime += r.compressRulesTime
this.totalExpandRulesTime += r.expandRulesTime
this.totalNewRuleGenerationTime += r.newRuleGenerationTime
this.totalNewRuleTestTime += r.newRuleTestTime
this.totalRuleScoringTime += r.ruleScoringTime
} else {
val ir = responses("initiated")
val tr = responses("terminated")
val newInitTheory = ir.theory
val newTermTheory = tr.theory
this.theory = List(newInitTheory, newTermTheory)
this.totalBatchProcessingTime += math.max(ir.BatchProcessingTime, tr.BatchProcessingTime)
this.totalCompressRulesTime += math.max(ir.compressRulesTime, tr.compressRulesTime)
this.totalExpandRulesTime += math.max(ir.expandRulesTime, tr.expandRulesTime)
this.totalNewRuleGenerationTime += math.max(ir.newRuleGenerationTime, tr.newRuleGenerationTime)
this.totalNewRuleTestTime += math.max(ir.newRuleTestTime, tr.newRuleTestTime)
this.totalRuleScoringTime += math.max(ir.ruleScoringTime, tr.ruleScoringTime)
}
//logger.info(currentError)
// reset these before processing a new batch
responseCounter = workers.length
responses.clear()
processNext()
}
}
}
var processedBatches = 0
/*
* Performs online evaluation and sends the next batch to the worker(s) for processing.
*
* */
private def processNext() = {
val nextBatch = getNextBatch(lleNoise = false)
logger.info(s"Processing batch $batchCounter")
exampleCounter += inps.chunkSize
if (nextBatch.isEmpty) {
logger.info(s"Finished the data.")
if (this.repeatFor > 0) {
logger.info(s"Starting new iteration.")
self ! "start"
} else if (this.repeatFor == 0) {
endTime = System.nanoTime()
logger.info("Done.")
workers foreach (w => w ! PoisonPill)
wrapUp()
context.system.terminate()
} else {
throw new RuntimeException("This should never have happened (repeatfor is now negative?)")
}
} else {
evaluate(nextBatch)
//evaluateTest(nextBatch)
//evaluateTest_NEW(nextBatch)
//evaluateTest_NEW_EXPAND_WHEN_NEEDED(nextBatch)
if (this.workers.length > 1) { // we're learning with the Event Calculus.
val msg1 = new ProcessBatchMsg(theory.head, nextBatch, "initiated")
val msg2 = new ProcessBatchMsg(theory.tail.head, nextBatch, "terminated")
workers.head ! msg1
workers.tail.head ! msg2
} else { // We're learning without the Event Calculus.
workers.head ! new ProcessBatchMsg(theory.head, nextBatch)
}
}
}
/* Finished. Just show results and shut down */
def wrapUp(): Unit = {
val merged = {
if (theory.length == 1) {
theory.head
} else {
Theory(theory.head.clauses ++ theory.tail.head.clauses)
}
}
val theorySize = merged.clauses.foldLeft(0)((x, y) => x + y.body.length + 1)
val totalRunningTime = (endTime - startTime) / 1000000000.0
val totalTrainingTime = totalBatchProcessingTime
logger.info(s"\\nAll rules found (non-pruned, non-compressed):\\n ${merged.showWithStats}")
val pruned = Theory(merged.clauses.filter(_.score >= inps.pruneThreshold))
/* THIS MAY TAKE TOO LONG FOR LARGE AND COMPLEX THEORIES!! */
logger.info("Compressing theory...")
val pruned_ = Theory(LogicUtils.compressTheory(pruned.clauses))
logger.info(s"\\nFinal Pruned theory found:\\n ${pruned_.showWithStats}")
logger.info(s"Theory size: $theorySize")
logger.info(s"Total running time: $totalTrainingTime")
logger.info(s"Total batch processing time: $totalRunningTime")
logger.info(s"Total rule scoring time: $totalRuleScoringTime")
logger.info(s"Total rule expansion time: $totalExpandRulesTime")
logger.info(s"Total rule compression time: $totalCompressRulesTime")
logger.info(s"Total testing for new rule generation time: $totalNewRuleTestTime")
logger.info(s"Total new rule generation time: $totalNewRuleGenerationTime")
logger.info(s"Total prediction & weights update time: $totalWeightsUpdateTime")
logger.info(s"Total groundings computation time: $totalgroundingsTime")
logger.info(s"Prequential error vector:\\n${prequentialError.map(x => x.toDouble)}")
logger.info(s"Prequential error vector (Accumulated Error):\\n${prequentialError.scanLeft(0.0)(_ + _).tail}")
logger.info(s"Prequential F1-score:\\n$runningF1Score")
logger.info(s"Total TPs: $TPs, total FPs: $FPs, total FNs: $FNs")
if (this.writeExprmtResultsTo != "") {
// Just for quick and dirty experiments
val x = prequentialError.scanLeft(0.0)(_ + _).tail.toString()
Utils.writeToFile(new File(this.writeExprmtResultsTo), "append") { p => List(x).foreach(p.println) }
}
//logger.info(s"Total TPs: ${totalTPs.size}, Total FPs: ${totalFPs.size}, Total FNs: ${totalFNs.size}")
if (trainingDataOptions != testingDataOptions) {
//logger.info("Evaluating on the test set")
val testData = testingDataFunction(testingDataOptions)
// Prequential eval on the test set (without weights update at each step).
logger.info("Evaluating on the test set with the theory found so far (no weights update at each step, no structure updates).")
prequentialError = Vector[Double]()
totalTPs = Set[String]()
totalFPs = Set[String]()
totalFNs = Set[String]()
// This includes the refinements in the final theory
// Comment it out to test with the final theory
///*
val predictWith = getFinalTheory(theory, useAvgWeights = true, logger)
val newInit = predictWith._1
val newTerm = predictWith._2
theory = List(Theory(newInit), Theory(newTerm))
//*/
testData foreach { batch =>
evaluateTest_NEW(batch, testOnly = true)
}
logger.info(s"Prequential error on test set:\\n${prequentialError.mkString(",")}")
logger.info(s"Prequential error vector on test set (Accumulated Error):\\n${prequentialError.scanLeft(0.0)(_ + _).tail}")
logger.info(s"Evaluation on the test set\\ntps: ${totalTPs.size}\\nfps: ${totalFPs.size}\\nfns: ${totalFNs.size}")
// just for quick and dirty experiments
if (this.writeExprmtResultsTo != "") {
val x = s"tps: ${totalTPs.size}\\nfps: ${totalFPs.size}\\nfns: ${totalFNs.size}\\n\\n"
Utils.writeToFile(new File(this.writeExprmtResultsTo), "append") { p => List(x).foreach(p.println) }
}
logger.info(s"Total prediction & weights update time: $totalWeightsUpdateTime")
logger.info(s"Total groundings computation time: $totalgroundingsTime")
logger.info(s"Total per-rule prediction time (combining rule's sub-experts' predictions): $totalPredictionTime")
}
//val (tps,fps,fns,precision,recall,fscore) = crossVal(pruned_, data=testData, globals = inps.globals, inps = inps)
//logger.info(s"\\ntps: $tps\\nfps: $fps\\nfns: " + s"$fns\\nprecision: $precision\\nrecall: $recall\\nf-score: $fscore)")
}
var TPs = 0
var FPs = 0
var FNs = 0
var runningF1Score = Vector.empty[Double]
def evaluate(batch: Example, inputTheoryFile: String = ""): Unit = {
if (inps.prequential) {
if (withec) {
val (init, term) = (theory.head, theory.tail.head)
//val merged = Theory( (init.clauses ++ term.clauses).filter(p => p.body.length >= 1 && p.seenExmplsNum > 5000 && p.score > 0.7) )
//val merged = Theory( (init.clauses ++ term.clauses).filter(p => p.body.length >= 1 && p.score > 0.9) )
val merged = Theory(init.clauses.filter(p => p.precision >= inps.pruneThreshold) ++ term.clauses.filter(p => p.recall >= inps.pruneThreshold))
val (tps, fps, fns, precision, recall, fscore) = eval(merged, batch, inps)
// I think this is wrong, the correct error is the number of mistakes (fps+fns)
//currentError = s"TPs: $tps, FPs: $fps, FNs: $fns, error (|true state| - |inferred state|): ${math.abs(batch.annotation.toSet.size - (tps+fps))}"
val error = (fps + fns).toDouble
TPs += tps
FPs += fps
FNs += fns
val currentPrecision = TPs.toDouble / (TPs + FPs)
val currentRecall = TPs.toDouble / (TPs + FNs)
val _currentF1Score = 2 * currentPrecision * currentRecall / (currentPrecision + currentRecall)
val currentF1Score = if (_currentF1Score.isNaN) 0.0 else _currentF1Score
runningF1Score = runningF1Score :+ currentF1Score
currentError = s"Number of mistakes (FPs+FNs) "
this.prequentialError = this.prequentialError :+ error
println(s"time, scoring theory size, error: ${batch.time}, ${merged.size}, $error")
println(this.prequentialError)
}
}
// TODO :
// Implement holdout evaluation.
if (inps.holdout != 0) {
}
}
private def getMergedTheory(testOnly: Boolean) = {
if (withec) {
val (init, term) = (theory.head, theory.tail.head)
val _merged = Theory(init.clauses ++ term.clauses)
if (testOnly) {
_merged
} else {
_merged.clauses foreach (rule => if (rule.refinements.isEmpty) rule.generateCandidateRefs(inps.globals))
// Do we want to also filter(p => p.score > inps.pruneThreshold) here?
// Do we want to compress here? Theory(LogicUtils.compressTheory(_merged.clauses))
val mergedWithRefs = Theory(_merged.clauses ++ _merged.clauses.flatMap(_.refinements))
//val merged = _merged
val merged = mergedWithRefs
merged
}
} else {
Theory() /* TODO */
}
}
/* This is called whenever a new rule is added due to a mistake. */
private def addRuleAndUpdate(r: Clause, testOnly: Boolean = false) = {
// Update the current theory
if (withec) {
if (r.head.functor.contains("initiated")) {
theory = List(Theory(theory.head.clauses :+ r), theory.tail.head)
} else if (r.head.functor.contains("terminated")) {
theory = List(theory.head, Theory(theory.tail.head.clauses :+ r))
} else {
throw new RuntimeException("Error while updating current theory.")
}
} else {
/* TODO */
}
// Update merged theory and marked-up stuff.
val mergedNew = getMergedTheory(testOnly)
val markedNew = marked(mergedNew.clauses.toVector, inps.globals)
val markedProgramNew = markedNew._1
val markedMapNew = markedNew._2
(mergedNew, markedProgramNew, markedMapNew)
}
/* This is called whenever we're specializing a rule due to a mistake */
private def specializeRuleAndUpdate(
topRule: Clause,
refinement: Clause, testOnly: Boolean = false) = {
val filter = (p: List[Clause]) => {
p.foldLeft(List[Clause]()) { (x, y) =>
if (!topRule.equals(y)) {
x :+ y
} else {
x
}
}
}
// Update the current theory
val oldInit = theory.head.clauses
val oldTerm = theory.tail.head.clauses
if (withec) {
if (topRule.head.functor.contains("initiated")) {
val newInit = filter(oldInit) :+ refinement
theory = List(Theory(newInit), Theory(oldTerm))
showInfo(topRule, refinement)
} else if (topRule.head.functor.contains("terminated")) {
val newTerm = filter(oldTerm) :+ refinement
theory = List(Theory(oldInit), Theory(newTerm))
showInfo(topRule, refinement)
} else {
throw new RuntimeException("Error while updating current theory.")
}
} else {
/* TODO */
}
// Update merged theory and marked-up stuff.
val mergedNew = getMergedTheory(testOnly)
val markedNew = marked(mergedNew.clauses.toVector, inps.globals)
val markedProgramNew = markedNew._1
val markedMapNew = markedNew._2
(mergedNew, markedProgramNew, markedMapNew)
}
private def showInfo(parent: Clause, child: Clause) = {
logger.info(s"\\nRule (id: ${parent.##} | score: ${parent.score} | tps: ${parent.tps} fps: ${parent.fps} " +
s"fns: ${parent.fns} | ExpertWeight: ${parent.w_pos} " +
s"AvgExpertWeight: ${parent.avgWeight})\\n${parent.tostring}\\nwas refined to" +
s"(id: ${child.##} | score: ${child.score} | tps: ${child.tps} fps: ${child.fps} fns: ${child.fns} | " +
s"ExpertWeight: ${child.w_pos} AvgExpertWeight: ${child.avgWeight})\\n${child.tostring}")
}
def evaluateTest_NEW(batch: Example, inputTheoryFile: String = "",
testOnly: Boolean = false, weightsOnly: Boolean = false, inputTheory: Theory = Theory()) = {
if (withec) {
var merged = if (inputTheory == Theory()) getMergedTheory(testOnly) else inputTheory
//logger.info(s"Predicting with ${merged.tostring}")
// just for debugging
val weightsBefore = merged.clauses.map(x => x.w_pos)
// just for debugging
val inertiaBefore = inertiaExpert.map(x => x)
val _marked = marked(merged.clauses.toVector, inps.globals)
var markedProgram = _marked._1
var markedMap = _marked._2
val e = (batch.annotationASP ++ batch.narrativeASP).mkString("\\n")
val trueAtoms = batch.annotation.toSet
var inferredAtoms = (Set[String](), Set[String](), Set[String]())
// this is to be set to the time the previous iteration stopped at.
// It was supposed to be used for removing already seen stuff from the batch
// whenever we make a mistake and start computing groundings all over again, but
// I haven't done that yet.
var processedUntil = 0
var finishedBatch = false
var alreadyProcessedAtoms = Set.empty[String]
while (!finishedBatch) {
val groundingsMapTimed = Utils.time{
computeRuleGroundings(inps, markedProgram, markedMap, e, trueAtoms)
}
val groundingsMap = groundingsMapTimed._1._1
val times = groundingsMapTimed._1._2
val groundingsTime = groundingsMapTimed._2
totalgroundingsTime += groundingsTime
// We sort the groundings map by the time-stamp of each inferred holdsAt atom in ascending order.
// For each holdsAt atom we calculate if it should actually be inferred, based on the weights
// of the rules that initiate or terminate it. In this process, the weights of the rules are
// updated based on whether the atom is mistakenly/correctly predicted and whether each individual
// rule mistakenly/correctly predicts it. Sorting the inferred atoms and iterating over them is necessary
// so as to promote/penalize the rule weights correctly after each mistake.
val sorted = groundingsMap.map { entry =>
val parsed = Literal.parse(entry._1)
val time = parsed.terms.tail.head.name.toInt
((entry._1, time), entry._2)
}.toVector.sortBy(x => x._1._2) // sort by time
val predictAndUpdateTimed = Utils.time {
breakable {
sorted foreach { y =>
val (currentAtom, currentTime) = (y._1._1, y._1._2)
if (!alreadyProcessedAtoms.contains(currentAtom)) {
val parsed = Literal.parse(currentAtom)
val currentFluent = parsed.terms.head.tostring
val (initiatedBy, terminatedBy) = (y._2._1, y._2._2)
val initWeightSum = if (initiatedBy.nonEmpty) initiatedBy.map(x => markedMap(x).w_pos).sum else 0.0
val termWeightSum = if (terminatedBy.nonEmpty) terminatedBy.map(x => markedMap(x).w_pos).sum else 0.0
// only updates weights when we're not running in test mode.
val prediction =
predictAndUpdate(currentAtom, currentFluent,
initiatedBy, terminatedBy, markedMap, testOnly, trueAtoms, batch)
//val prediction = _prediction._1
prediction match {
case "TP" => inferredAtoms = (inferredAtoms._1 + currentAtom, inferredAtoms._2, inferredAtoms._3)
case "FP" => inferredAtoms = (inferredAtoms._1, inferredAtoms._2 + currentAtom, inferredAtoms._3)
case "FN" => inferredAtoms = (inferredAtoms._1, inferredAtoms._2, inferredAtoms._3 + currentAtom)
case "TN" => // do nothing
case _ => throw new RuntimeException("Unexpected response from predictAndUpdate")
}
if (!testOnly && !weightsOnly) {
if (prediction == "FP" && terminatedBy.isEmpty) {
// Let's try adding a new termination expert only when there is no other termination expert that fires.
// Else, let it fix the mistakes in future rounds by increasing the weights of firing terminating experts.
// Generate a new termination rule from the point where we currently err.
// This rule will be used for fixing the mistake in the next round.
// This most probably results in over-training. It increases weights too much and the new rule dominates.
//val totalWeight = inertiaExpert(currentFluent) + initWeightSum
val totalWeight = 1.0
val newTerminationRule = generateNewExpert(batch, currentAtom, inps.globals, "terminatedAt", totalWeight)
if (!newTerminationRule.equals(Clause.empty)) {
logger.info(s"Generated new termination rule in response to FP atom: $currentAtom")
// Since neither the new termination rule (empty-bodied), nor its refinements fire,
// therefore, they do not contribute to the FP, increase their weights further
increaseWeights(newTerminationRule.refinements :+ newTerminationRule, learningRate)
// Finally, add the new termination rule to the current theory.
val update = addRuleAndUpdate(newTerminationRule)
merged = update._1
markedProgram = update._2
markedMap = update._3
// do it here, cause it won't be set otherwise due to the break.
alreadyProcessedAtoms = alreadyProcessedAtoms + currentAtom
break
} else {
logger.info(s"At batch $batchCounter: Failed to generate bottom rule from FP mistake with atom: $currentAtom")
}
}
//if (prediction == "FN" && initiatedBy.isEmpty && getInertiaExpertPrediction(currentFluent) == 0.0) {
if (prediction == "FN" && initiatedBy.isEmpty) {
//val newInitiationRule = generateNewExpert(batch, currentAtom, inps.globals, "initiatedAt", termWeightSum)
// Don't give the total weight of the termination part. It's dangerous
// (e.g. if the termination part new rules get the total weight of 0.0, and the TN is never fixed!)
// and it's also wrong. You just over-train to get rid of a few mistakes!
val newInitiationRule = generateNewExpert(batch, currentAtom, inps.globals, "initiatedAt", 1.0)
if (!newInitiationRule.equals(Clause.empty)) {
logger.info(s"Generated new initiation rule in response to FN atom: $currentAtom")
val update = addRuleAndUpdate(newInitiationRule)
merged = update._1
markedProgram = update._2
markedMap = update._3
// do it here, cause it won't be set otherwise due to the break.
alreadyProcessedAtoms = alreadyProcessedAtoms + currentAtom
break
} else {
logger.info(s"At batch $batchCounter: Failed to generate bottom rule from FN mistake with atom: $currentAtom")
}
}
}
}
alreadyProcessedAtoms = alreadyProcessedAtoms + currentAtom
}
finishedBatch = true
}
}
totalWeightsUpdateTime += predictAndUpdateTimed._2
}
val (tps, fps, fns) = (inferredAtoms._1, inferredAtoms._2, inferredAtoms._3)
val (fpsNumber, currentFNsNumber) = (fps.size, fns.size)
// All atoms in tps are certainly true positives.
// But we need to account for the real true atoms which are not in there.
val restFNs = trueAtoms.diff(tps).filter(!fns.contains(_))
if (restFNs.nonEmpty) throw new RuntimeException("FUUUUUUUUUUUUCK!!!!!")
val restFNsNumber = restFNs.size
var trueFNsNumber = currentFNsNumber + restFNsNumber
// Ugly (AND DANGEROUS) hack to avoid counting as mistakes the holdsAt/2 atoms at the first time point of an interval
if (trueFNsNumber == 2) trueFNsNumber = 0
// We have gathered the FNs that have not been inferred, but we need to add the rest of them in the global counter
totalFNs = totalFNs ++ restFNs
// Just for debugging.
val weightsAfter = merged.clauses.map(x => x.w_pos)
//just for debugging
val inertiaAfter = inertiaExpert.map(x => x)
prequentialError = prequentialError :+ (fpsNumber + trueFNsNumber).toDouble
if (fpsNumber + trueFNsNumber > 0) {
logger.info(s"\\nMade mistakes: FPs: $fpsNumber, " +
s"FNs: $trueFNsNumber.\\nWeights before: $weightsBefore\\nWeights after: $weightsAfter\\nInertia Before: " +
s"$inertiaBefore\\nInertia after: $inertiaAfter") //\\nPredicted with:\\n${merged.showWithStats}")
}
} else { // No Event Calculus. We'll see what we'll do with that.
}
}
def evaluateTest_NEW_EXPAND_WHEN_NEEDED(batch: Example, inputTheoryFile: String = "",
testOnly: Boolean = false, weightsOnly: Boolean = false, inputTheory: Theory = Theory()) = {
if (withec) {
var merged = if (inputTheory == Theory()) getMergedTheory(testOnly) else inputTheory
// just for debugging
val weightsBefore = merged.clauses.map(x => x.w_pos)
// just for debugging
val inertiaBefore = inertiaExpert.map(x => x)
val _marked = marked(merged.clauses.toVector, inps.globals)
var markedProgram = _marked._1
var markedMap = _marked._2
val e = (batch.annotationASP ++ batch.narrativeASP).mkString("\\n")
val trueAtoms = batch.annotation.toSet
var inferredAtoms = (Set[String](), Set[String](), Set[String]())
// this is to be set to the time the previous iteration stopped at.
// It was supposed to be used for removing already seen stuff from the batch
// whenever we make a mistake and start computing groundings all over again, but
// I haven't done that yet.
var processedUntil = 0
var finishedBatch = false
var alreadyProcessedAtoms = Set.empty[String]
while (!finishedBatch) {
val groundingsMapTimed = Utils.time{
computeRuleGroundings(inps, markedProgram, markedMap, e, trueAtoms)
}
val groundingsMap = groundingsMapTimed._1._1
val times = groundingsMapTimed._1._2
val groundingsTime = groundingsMapTimed._2
totalgroundingsTime += groundingsTime
// We sort the groundings map by the time-stamp of each inferred holdsAt atom in ascending order.
// For each holdsAt atom we calculate if it should actually be inferred, based on the weights
// of the rules that initiate or terminate it. In this process, the weights of the rules are
// updated based on whether the atom is mistakenly/correctly predicted and whether each individual
// rule mistakenly/correctly predicts it. Sorting the inferred atoms and iterating over them is necessary
// so as to promote/penalize the rule weights correctly after each mistake.
val sorted = groundingsMap.map { entry =>
val parsed = Literal.parse(entry._1)
val time = parsed.terms.tail.head.name.toInt
((entry._1, time), entry._2)
}.toVector.sortBy(x => x._1._2) // sort by time
val predictAndUpdateTimed = Utils.time {
breakable {
sorted foreach { y =>
val (currentAtom, currentTime) = (y._1._1, y._1._2)
if (!alreadyProcessedAtoms.contains(currentAtom)) {
val parsed = Literal.parse(currentAtom)
val currentFluent = parsed.terms.head.tostring
val (initiatedBy, terminatedBy) = (y._2._1, y._2._2)
// This is also calculated at predictAndUpdate, we need to factor it out.
// Calculate it here (because it is needed here) and pass it to predictAndUpdate
// to avoid doing it twice.
///*
val nonFiringInitRules =
markedMap.filter(x =>
x._2.head.functor.contains("initiated") && !initiatedBy.contains(x._1))
//*/
// This is also calculated at predictAndUpdate, we need to factor it out.
// Calculate it here (because it is needed here) and pass it to predictAndUpdate
// to avoid doing it twice.
///*
val nonFiringTermRules =
markedMap.filter(x =>
x._2.head.functor.contains("terminated") && !terminatedBy.toSet.contains(x._1))
//*/
val initWeightSum = if (initiatedBy.nonEmpty) initiatedBy.map(x => markedMap(x).w_pos).sum else 0.0
val termWeightSum = if (terminatedBy.nonEmpty) terminatedBy.map(x => markedMap(x).w_pos).sum else 0.0
// only updates weights when we're not running in test mode.
val prediction =
predictAndUpdate(currentAtom, currentFluent,
initiatedBy, terminatedBy, markedMap, testOnly, trueAtoms, batch)
prediction match {
case "TP" => inferredAtoms = (inferredAtoms._1 + currentAtom, inferredAtoms._2, inferredAtoms._3)
case "FP" => inferredAtoms = (inferredAtoms._1, inferredAtoms._2 + currentAtom, inferredAtoms._3)
case "FN" => inferredAtoms = (inferredAtoms._1, inferredAtoms._2, inferredAtoms._3 + currentAtom)
case "TN" => // do nothing
case _ => throw new RuntimeException("Unexpected response from predictAndUpdate")
}
if (!testOnly && !weightsOnly) {
if (prediction == "FP") {
if (terminatedBy.isEmpty) {
// Let's try adding a new termination expert only when there is no other termination expert that fires.
// Else, let it fix the mistakes in future rounds by increasing the weights of firing terminating experts.
// Generate a new termination rule from the point where we currently err.
// This rule will be used for fixing the mistake in the next round.
// This most probably results in over-training. It increases weights too much and the new rule dominates.
//val totalWeight = inertiaExpert(currentFluent) + initWeightSum
val totalWeight = 1.0
val newTerminationRule = generateNewExpert(batch, currentAtom, inps.globals, "terminatedAt", totalWeight)
if (!newTerminationRule.equals(Clause.empty)) {
logger.info(s"Generated new termination rule in response to FP atom: $currentAtom")
// Since neither the new termination rule (empty-bodied), nor its refinements fire,
// therefore, they do not contribute to the FP, increase their weights further
// NO, WE DO NOT INCREASE WEIGHTS OF NON-FIRING RULES!!!
//increaseWeights(newTerminationRule.refinements :+ newTerminationRule, learningRate)
// Finally, add the new termination rule to the current theory.
val update = addRuleAndUpdate(newTerminationRule)
merged = update._1
markedProgram = update._2
markedMap = update._3
// do it here, cause it won't be set otherwise due to the break.
alreadyProcessedAtoms = alreadyProcessedAtoms + currentAtom
break
} else {
logger.info(s"At batch $batchCounter: Failed to generate bottom rule from FP mistake with atom: $currentAtom")
}
} else { // We do have firing termination rules
// Specialize a firing initiation rule. If no firing initiation rule exists,
// therefore the FP is due to inertia, just let the inertia weight degrade, until
// the termination rules take over the majority (note that we do have firing termination rules here,
// so there are reasons to believe that we'll have such rules in the up-coming rounds).
if (initiatedBy.nonEmpty) {
// Note that we'll most certainly have a top-rule that fires: for every
// refinement that fires, its parent rule must fire as well. Therefore, if
// initiatedBy is non empty, at least some of the rules in there must be top rules.
val rulesToSpecialize =
// This is the first minor difference with the piece of code
// for specializing termination rules (below). Here We select the
// rules from the initiation part of the theory, below from the termination
theory.head.clauses.
filter(x => initiatedBy.toSet.contains(x.##.toString))
var performedSpecialization = false
rulesToSpecialize foreach { ruleToSpecialize =>
// Find suitable refinements, i.e refinements that DO NOT fire
// w.r.t. the current FP atom.
val suitableRefs =
// Here is the second difference. We use nonFiringInitRules here.
// It's really stupid to have this code duplicated like that.
// Fuck your quick & dirty bullshit.
ruleToSpecialize.refinements.
filter(r => nonFiringInitRules.keySet.contains(r.##.toString)).
filter(s => s.score > ruleToSpecialize.score).
filter(r => !theory.head.clauses.exists(r1 => r1.thetaSubsumes(r) && r.thetaSubsumes(r1))).
sortBy { x => (-x.w_pos, -x.score, x.body.length + 1) }
if (suitableRefs.nonEmpty) {
performedSpecialization = true
val bestRefinement = suitableRefs.head
if (bestRefinement.refinements.isEmpty) bestRefinement.generateCandidateRefs(inps.globals)
val update = specializeRuleAndUpdate(ruleToSpecialize, bestRefinement)
merged = update._1
markedProgram = update._2
markedMap = update._3
// do it here, cause it won't be set otherwise due to the break.
alreadyProcessedAtoms = alreadyProcessedAtoms + currentAtom
break
}
}
if (performedSpecialization) break
}
}
}
if (prediction == "FN") {
//if (initiatedBy.isEmpty || (nonFiringInitRules.values.map(_.w).sum > initWeightSum) ) {
if (initiatedBy.isEmpty) {
//val newInitiationRule = generateNewExpert(batch, currentAtom, inps.globals, "initiatedAt", termWeightSum)
// Don't give the total weight of the termination part. It's dangerous
// (e.g. if the termination part new rules get the total weight of 0.0, and the TN is never fixed!)
// and it's also wrong. You just over-train to get rid of a few mistakes!
val newInitiationRule = generateNewExpert(batch, currentAtom, inps.globals, "initiatedAt", 1.0)
if (!newInitiationRule.equals(Clause.empty)) {
logger.info(s"Generated new initiation rule in response to FN atom: $currentAtom")
val update = addRuleAndUpdate(newInitiationRule)
merged = update._1
markedProgram = update._2
markedMap = update._3
// do it here, cause it won't be set otherwise due to the break.
alreadyProcessedAtoms = alreadyProcessedAtoms + currentAtom
break
} else {
logger.info(s"At batch $batchCounter: Failed to generate bottom rule from FN mistake with atom: $currentAtom")
}
/* THE CODE BELOW IS THE SAME AS ABOVE. FACTOR IT OUT TO A FUNCTION. */
} else {
// Them the FN is due to over-weighted firing termination rules. Specialize one.
if (terminatedBy.nonEmpty) {
val termRulesToSpecialize =
theory.tail.head.clauses.
filter(x => terminatedBy.toSet.contains(x.##.toString))
var performedSpecialization = false
termRulesToSpecialize foreach { ruleToSpecialize =>
// Find suitable refinements, i.e refinements that DO NOT fire
// w.r.t. the current FN atom.
val suitableRefs =
ruleToSpecialize.refinements.
filter(r => nonFiringTermRules.keySet.contains(r.##.toString)).
filter(s => s.score > ruleToSpecialize.score).
filter(r => !theory.tail.head.clauses.exists(r1 => r1.thetaSubsumes(r) && r.thetaSubsumes(r1))).
sortBy { x => (-x.w_pos, -x.score, x.body.length + 1) }
if (suitableRefs.nonEmpty) {
performedSpecialization = true
val bestRefinement = suitableRefs.head
if (bestRefinement.refinements.isEmpty) bestRefinement.generateCandidateRefs(inps.globals)
val update = specializeRuleAndUpdate(ruleToSpecialize, bestRefinement)
merged = update._1
markedProgram = update._2
markedMap = update._3
// do it here, cause it won't be set otherwise due to the break.
alreadyProcessedAtoms = alreadyProcessedAtoms + currentAtom
break
}
}
if (performedSpecialization) break
} else {
// This would be a problem, certainly something that is worth looking into.
// We have an FN, with firing initiation rules, but not firing termination ones.
// UPDATE: It's ok, it can happen because the non-firing weight is greater then the firing weight.
/*
throw new RuntimeException(s"We have an FN atom, which is " +
s"initiated by some rules and terminated by NO rules. It's worth finding out how this happens!\\nBatch" +
s" cointer: $batchCounter, atom: $currentAtom")
*/
}
}
}
}
}
alreadyProcessedAtoms = alreadyProcessedAtoms + currentAtom
}
finishedBatch = true
}
}
totalWeightsUpdateTime += predictAndUpdateTimed._2
}
val (tps, fps, fns) = (inferredAtoms._1, inferredAtoms._2, inferredAtoms._3)
val (fpsNumber, currentFNsNumber) = (fps.size, fns.size)
// All atoms in tps are certainly true positives.
// But we need to account for the real true atoms which are not in there.
val restFNs = trueAtoms.diff(tps).filter(!fns.contains(_))
if (restFNs.nonEmpty) throw new RuntimeException("FUUUUUUUUUUUUCK!!!!!")
val restFNsNumber = restFNs.size
var trueFNsNumber = currentFNsNumber + restFNsNumber
// Ugly (AND DANGEROUS) hack to avoid counting as mistakes the holdsAt/2 atoms at the first time point of an interval
//if (trueFNsNumber == 2) trueFNsNumber = 0
// We have gathered the FNs that have not been inferred, but we need to add the rest of them in the global counter
totalFNs = totalFNs ++ restFNs
// Just for debugging.
val weightsAfter = merged.clauses.map(x => x.w_pos)
//just for debugging
val inertiaAfter = inertiaExpert.map(x => x)
prequentialError = prequentialError :+ (fpsNumber + trueFNsNumber).toDouble
if (fpsNumber + trueFNsNumber > 0) {
logger.info(s"\\nMade mistakes: FPs: $fpsNumber, " +
s"FNs: $trueFNsNumber.\\nWeights before: $weightsBefore\\nWeights after: $weightsAfter\\nInertia Before: " +
s"$inertiaBefore\\nInertia after: $inertiaAfter") //\\nPredicted with:\\n${merged.showWithStats}")
}
} else { // No Event Calculus. We'll see what we'll do with that.
}
}
def updateAnalyticsBuffers(atom: String, initWghtSum: Double, termWghtSum: Double,
nonInitWghtSum: Double, nonTermWghtSum: Double,
predictInitWghtSum: Double, predictTermWghtSum: Double,
inertWghtSum: Double, holdsWght: Double) = {
if (atom.contains(keepStatsForFluent)) {
initWeightSums += initWghtSum
TermWeightSums += termWghtSum
nonInitWeightSums += nonInitWghtSum
monTermWeightSums += nonTermWghtSum
predictInitWeightSums += predictInitWghtSum
predictTermWeightSums += predictTermWghtSum
inertWeightSums += inertWghtSum
prodictHoldsWeightSums += holdsWght
}
}
def updateTrueLabels(atom: String, value: Double) = {
if (atom.contains(keepStatsForFluent)) {
trueLabels += value
}
}
def predictAndUpdate(currentAtom: String, currentFluent: String, init: Vector[String],
term: Vector[String], markedMap: scala.collection.immutable.Map[String, Clause],
testOnly: Boolean, trueAtoms: Set[String], batch: Example) = {
val (initiatedBy, terminatedBy) = (init, term)
val initWeightSum = if (initiatedBy.nonEmpty) initiatedBy.map(x => markedMap(x).w_pos).sum else 0.0
val termWeightSum = if (terminatedBy.nonEmpty) terminatedBy.map(x => markedMap(x).w_pos).sum else 0.0
val inertiaExpertPrediction = getInertiaExpertPrediction(currentFluent)
val firingInitRulesIds = initiatedBy
val nonFiringInitRules =
markedMap.filter(x =>
x._2.head.functor.contains("initiated") && !firingInitRulesIds.contains(x._1))
// Use this to have all rules and their refs vote independently:
// This was the default but does not seam reasonable.
val predictInitiated = initWeightSum // - nonFiringInitRules.values.map(_.w).sum
// Use this to have one prediction per top rule, resulting by combing the
// opinions of the rule's sub-expert committee (its specializations)
/*
val predictInitiatedTimed = Utils.time{
val individualPredictions =
theory.head.clauses.map( rule => getRulePrediction(rule, firingInitRulesIds, nonFiringInitRules.keys.toVector) )
individualPredictions.sum
}
val predictInitiated = predictInitiatedTimed._1
totalPredictionTime += predictInitiatedTimed._2
*/
// Use this to have one prediction per top rule.
// The best (based on current weight) between the top-rule's own
// prediction and the prediction of the rule's best sub-expert:
/*
val predictInitiatedTimed = Utils.time{
val individualPredictions =
theory.head.clauses.map( rule => getRulePrediction1(rule, firingInitRulesIds, nonFiringInitRules.keys.toVector) )
individualPredictions.sum
}
val predictInitiated = predictInitiatedTimed._1
totalPredictionTime += predictInitiatedTimed._2
*/
val firingTermRulesIds = terminatedBy
val nonFiringTermRules =
markedMap.filter(x =>
x._2.head.functor.contains("terminated") && !firingTermRulesIds.toSet.contains(x._1))
// Use this to have all rules and their refs vote independently:
// This was the default but does not seam reasonable.
val predictTerminated = termWeightSum // - nonFiringTermRules.values.map(_.w).sum
// Use this to have one prediction per top rule, resulting by combing the
// opinions of the rule's sub-expert committee (its specializations):
/*
val predictTerminatedTimed = Utils.time {
val individualPredictions =
theory.tail.head.clauses.map( rule => getRulePrediction(rule, firingTermRulesIds, nonFiringTermRules.keys.toVector) )
individualPredictions.sum
}
val predictTerminated = predictTerminatedTimed._1
totalPredictionTime += predictTerminatedTimed._2
*/
// Use this to have one prediction per top rule.
// The best (based on current weight) between the top-rule's own
// prediction and the prediction of the rule's best sub-expert:
/*
val predictTerminatedTimed = Utils.time {
val individualPredictions =
theory.tail.head.clauses.map( rule => getRulePrediction1(rule, firingTermRulesIds, nonFiringTermRules.keys.toVector) )
individualPredictions.sum
}
val predictTerminated = predictTerminatedTimed._1
totalPredictionTime += predictTerminatedTimed._2
*/
// WITH INERTIA
///*
val _predictAtomHolds = predict(inertiaExpertPrediction, predictInitiated, predictTerminated, isStrongInertia)
val (predictAtomHolds, holdsWeight) = (_predictAtomHolds._1, _predictAtomHolds._2)
//*/
// NO INERTIA
//val _predictAtomHolds = predictInitiated - predictTerminated
//val (predictAtomHolds, holdsWeight) = (if (_predictAtomHolds > 0) true else false, _predictAtomHolds)
updateAnalyticsBuffers(currentAtom, initWeightSum, termWeightSum,
nonFiringInitRules.values.map(_.w_pos).sum, nonFiringTermRules.values.map(_.w_pos).sum,
predictInitiated, predictTerminated, inertiaExpertPrediction, holdsWeight)
/*
* THIS PREDICTION RULE IS WRONG:
*
* val holdsPredictionWeight = inertiaExpertPrediction + predictInitiated - predictTerminated
* val predictAtomHolds = holdsPredictionWeight > 0.0
*
* Look what might happen:
*
* Made FP mistake for atom: holdsAt(meeting(id3,id1),2600).
* Inertia weight: 0.0
* Firing initiation rules: 0, sum of weight: 0.0
* Non firing initiation rules: 17, sum of weight: 25.23524944624197
* Firing termination rules: 3, sum of weight: 101.70330033914848
* Non firing termination rules: 4, sum of weight: 135.60440045219798
*
* Semantically, there is no reason to predict HOLDS: The fluent does not hold by inertia, nor is it
* initiated by any rule. But we have that predictInitiated = -25.23524944624197 and
* predictInitiated = -33.901100113049495, because in both cases, the sum of weights of the non-firing
* is greater than that of the firing ones. Therefore, (and since predictInitiated > 25.23524944624197) we have
*
* holdsPredictionWeight = 0.0 + (-25.23524944624197) - (-33.901100113049495) > 0
*
* and we get a wrong prediction, while there is no reason for that.
*
* */
//val holdsPredictionWeight = inertiaExpertPrediction + predictInitiated - predictTerminated
//val predictAtomHolds = holdsPredictionWeight > 0.0
if (predictAtomHolds) {
// If the fluent we predicted that it holds is not in the inertia expert map, add it,
// with the weight it was predicted.
if (!inertiaExpert.keySet.contains(currentFluent)) {
// this is guaranteed to be positive from the prediction rule
val holdsWeight = inertiaExpertPrediction + predictInitiated
inertiaExpert += (currentFluent -> holdsWeight)
//inertiaExpert += (currentFluent -> 1.0)
}
if (trueAtoms.contains(currentAtom)) {
// Then it's a TP. Simply return it without updating any weights, after properly scoring the rules.
// Update the analytics buffer for this atom
updateTrueLabels(currentAtom, 1.0)
updateRulesScore("TP", initiatedBy.map(x => markedMap(x)), nonFiringInitRules.values.toVector,
terminatedBy.map(x => markedMap(x)), nonFiringTermRules.values.toVector)
totalTPs = totalTPs + currentAtom
"TP"
} else {
// Then it's an FP.
// Update the analytics buffer for this atom
updateTrueLabels(currentAtom, 0.0)
// That's for debugging
/*
reportMistake("FP", currentAtom, inertiaExpertPrediction, initiatedBy.size,
nonFiringInitRules.size, terminatedBy.size, nonFiringTermRules.size, initWeightSum,
termWeightSum, nonFiringInitRules.values.map(_.w).sum, nonFiringTermRules.values.map(_.w).sum, this.logger)
*/
totalFPs = totalFPs + currentAtom
if (!testOnly) {
// Decrease the weights of all rules that contribute to the FP: Rules that incorrectly initiate it.
reduceWeights(initiatedBy, markedMap, learningRate)
// Non-firing rules should not be touched, they should be treated as abstaining experts.
// Increasing the weights of non-firing rules results in over-training of garbage, which dominate.
// reduceWeights(nonFiringTermRules.keys.toVector, markedMap, learningRate)
// Reduce the weight of the inertia expert for the particular atom, if the inertia expert predicted that it holds.
if (inertiaExpert.keySet.contains(currentFluent)) {
val newWeight = inertiaExpert(currentFluent) * Math.pow(Math.E, (-1.0) * learningRate)
inertiaExpert += (currentFluent -> newWeight)
}
// Increase the weights of rules that can fix the mistake:
// Rules that terminate the fluent and initiation rules that do not fire (NO!).
increaseWeights(terminatedBy, markedMap, learningRate)
// Non-firing rules should not be touched, they should be treated as abstaining experts.
// Increasing the weights of non-firing rules results in over-training of garbage, which dominate.
//increaseWeights(nonFiringInitRules.keys.toVector, markedMap, learningRate)
}
updateRulesScore("FP", initiatedBy.map(x => markedMap(x)), nonFiringInitRules.values.toVector,
terminatedBy.map(x => markedMap(x)), nonFiringTermRules.values.toVector)
"FP" // result returned to the calling method.
}
} else {
// We predicted that the atom does not hold...
if (trueAtoms.contains(currentAtom)) {
// ...while it actually does, so we have an FN.
// Update the analytics buffer for this atom
updateTrueLabels(currentAtom, 1.0)
/*
reportMistake("FN", currentAtom, inertiaExpertPrediction, initiatedBy.size,
nonFiringInitRules.size, terminatedBy.size, nonFiringTermRules.size, initWeightSum,
termWeightSum, nonFiringInitRules.values.map(_.w).sum, nonFiringTermRules.values.map(_.w).sum, this.logger)
*/
totalFNs = totalFNs + currentAtom
if (!testOnly) {
// Increase the weights of all rules that initiate it
increaseWeights(initiatedBy, markedMap, learningRate)
// and all rules that do not terminate it (NO!!)
// Non-firing rules should not be touched, they should be treated as abstaining experts.
// Increasing the weights of non-firing rules results in over-training of garbage, which dominate.
//increaseWeights(nonFiringTermRules.keys.toVector, markedMap, learningRate)
// Increase the weight of the inertia expert for that particular atom,
// if the inertia expert predicted that it holds.
if (inertiaExpert.keySet.contains(currentFluent)) {
var newWeight = inertiaExpert(currentFluent) * Math.pow(Math.E, 1.0 * learningRate)
newWeight = if (newWeight.isPosInfinity) inertiaExpert(currentFluent) else newWeight
inertiaExpert += (currentFluent -> newWeight)
}
// Also, reduce the weights of all initiation rules that do not fire (NO!) and all termination rules that fire.
//reduceWeights(nonFiringInitRules.keys.toVector, markedMap, learningRate) // No, maybe that's wrong, there's no point in penalizing a rule that does not fire.
reduceWeights(terminatedBy, markedMap, learningRate)
}
updateRulesScore("FN", initiatedBy.map(x => markedMap(x)), nonFiringInitRules.values.toVector,
terminatedBy.map(x => markedMap(x)), nonFiringTermRules.values.toVector)
"FN" // result returned to the calling method.
} else {
// Then we have an atom which was erroneously inferred by the (un-weighted) rules (with ordinary logical
// inference), but which eventually not inferred, thanks to the expert-based weighted framework. That is,
// either the total weight of the non-firing "initiatedAt" fragment of the theory was greater than the weight
// of the firing part, or the the total weight of the firing "terminatedAt" path of the theory was greater
// than the weight of the "initiatedAt" fragment. In either case, the atom is eventually a TN. We don't do
// anything with it, but we need to instruct the the inertia expert to "forget" the atom.
// Update the analytics buffer for this atom
updateTrueLabels(currentAtom, 0.0)
if (inertiaExpert.keySet.contains(currentFluent)) {
inertiaExpert -= currentFluent
}
updateRulesScore("TN", initiatedBy.map(x => markedMap(x)), nonFiringInitRules.values.toVector,
terminatedBy.map(x => markedMap(x)), nonFiringTermRules.values.toVector)
"TN"
}
}
}
}
|
nkatzz/OLED
|
src/main/scala/oled/mwua/Learner.scala
|
Scala
|
gpl-3.0
| 63,191 |
package com.github.nethad.clustermeister.integration.sc03
// algorithm-specific message
case class SignalMessage[@specialized SourceId, @specialized TargetId, @specialized SignalType](sourceId: SourceId, targetId: TargetId, signal: SignalType) {
override def toString = "Signal(sourceId=" + sourceId + ", targetId=" + targetId + ", signal=" + signal + ")"
}
|
nethad/clustermeister
|
integration-tests/src/main/scala/com/github/nethad/clustermeister/integration/sc03/SignalMessage.scala
|
Scala
|
apache-2.0
| 361 |
package app
import fuel.util.Options
import scala.collection.mutable
final case class MissingRequiredArgumentException(message: String, cause: Throwable = None.orNull)
extends Exception(message, cause)
final case class UnrecognizedArgumentException(message: String, cause: Throwable = None.orNull)
extends Exception(message, cause)
final case class IncorrectValueException(message: String, cause: Throwable = None.orNull)
extends Exception(message, cause)
/**
* Stores basic meta-information related to some option.
*/
case class OptionInfo(name: String, tpe: String = "", desc: String = "", default: Option[String] = None,
choice: Set[String] = Set(), required: Boolean = false, group: Option[String] = None) {
assert(choice.isEmpty || default.isEmpty || choice.contains(default.get), "If default value is specified for a choice option, then it must be a part of choice-set.")
override def toString: String = {
val textDefault = if (default.isDefined) s" (default: ${default.get})" else ""
val textChoice = if (choice.nonEmpty) s" (choice: ${choice.mkString(", ")})" else ""
name.padTo(37, ' ') + tpe.padTo(18, ' ') + desc + textDefault + textChoice + "\\n"
}
}
/**
* Validates correctness of the options. The following elements are checked:
* - Are all required values provided?
* - Are there some unrecognized arguments?
* - For choice arguments, are all the provided values allowed?
*
* Additionally, not provided arguments with program-defined default values are added
* to the list of provided options along with their default value, in order to avoid
* assigning them locally different values in the different parts of the application.
*
* @param args A list of metadata regarding the accepted options.
*/
case class OptionsValidator(args: List[OptionInfo], optionsGroup: Option[String] = None) {
val argsMap: Map[String, OptionInfo] = args.map(a => (a.name, a)).toMap
private val strictUnrecognizedPolicy = true
/**
* Checks if the arguments are correct with respect to what is expected by the
* application.
*
* @param opt Options.
* @return Options with included entries for not present arguments with default values.
*/
def process(opt: Options): Options = {
checkRequiredArgs(opt)
checkUnrecognizedArgs(opt)
checkIncorrectValue(opt)
fillDefaultValues(opt)
}
/** Checks, if an incorrect value was specified. Currently, only choices are being checked. */
def checkRequiredArgs(opt: Options): Unit = {
args.foreach{ a =>
if (a.required && !opt.allOptions.contains(a.name))
throw MissingRequiredArgumentException(s"Missing required argument: '--${a.name}'")
}
}
/** Checks, if there was specified an unrecognized argument. */
def checkUnrecognizedArgs(opt: Options): Unit = {
opt.allOptions.foreach{ case (key, _) =>
if (strictUnrecognizedPolicy && !argsMap.contains(key))
throw UnrecognizedArgumentException(s"Unrecognized arguments: '--$key'")
}
}
/** Checks, if an incorrect value was specified. Currently, only choices are being checked. */
def checkIncorrectValue(opt: Options): Unit = {
opt.allOptions.foreach{
case (key, value) if argsMap.contains(key) =>
val meta = argsMap(key)
if (meta.choice.nonEmpty && !meta.choice.contains(value)) {
throw IncorrectValueException(s"Incorrect value for: '--$key'. Possible values: ${meta.choice.mkString("'", "', '","'")}")
}
case _ => // Ignore
}
}
/** Adds default values for not provided arguments. */
def fillDefaultValues(opt: Options): Options = {
val list = args.flatMap {
case a: OptionInfo if a.default.isDefined && !opt.allOptions.contains(a.name) =>
if (a.group.isDefined && optionsGroup.isDefined && a.group.get != optionsGroup.get)
List()
else
List(s"--${a.name}", a.default.get)
case a: OptionInfo if opt.allOptions.contains(a.name) =>
if (opt.allOptions.contains(a.name))
List(s"--${a.name}", opt(a.name))
else
List()
case _ => List()
}
Options(list)
}
def printOptions() {
println("REQUIRED ARGUMENTS:")
args.sortBy(_.name).foreach { a => if (a.required) print(a) }
println("\\nOTHER ARGUMENTS:")
args.sortBy(_.name).foreach { a => if (!a.required) print(a) }
}
}
object OptionsValidator {
val optVersion = OptionInfo("version", tpe="-", desc="Prints version.")
val optHelp = OptionInfo("help", tpe="-", desc="Prints help.")
}
object CDGPOptions {
val groupCDSR = "cdsr"
val groupCDGP = "cdgp"
val args = mutable.MutableList[OptionInfo]()
args += OptionsValidator.optVersion
args += OptionsValidator.optHelp
// required args
args += OptionInfo("benchmark", "String", required=true, desc="Path to a file in the SYGUS format describing the synthesis problem.")
args += OptionInfo("method", "String", choice=Set("CDGP", "CDGPprops", "GP", "GPR"), required=true, desc="Search algorithm to be used.")
args += OptionInfo("solverPath", "String", required=true, desc="Path to the SMT solver.")
// selected most important args
args += OptionInfo("evolutionMode", "String", choice=Set("generational", "steadyState"), default=Some("steadyState"), desc="Type of evolution: generational (new population is created), or steady state (solutions are updated in place one by one).")
args += OptionInfo("maxGenerations", "Int", default=Some("50"), desc="Maximum number of generations.")
args += OptionInfo("maxTime", "Int", default=Some("86400000"), desc="Maximum runtime.")
args += OptionInfo("selection", "String", choice=Set("lexicase", "tournament"), default=Some("lexicase"), desc="Selection of the evolutionary algorithm.")
// other args
args += OptionInfo("allowTestDuplicates", "Bool", default=Some("false"), desc="If false, then the test duplicates will not be added to the set of test cases. In most cases the preferred option.")
args += OptionInfo("globalConstraintInFitness", "Bool", default=Some("false"), desc="If true, then the solution will be verified on all formal constraints at once and the result will be prepended to the fitness vector.")
args += OptionInfo("gprMaxInt", "Int", default=Some("100"), desc="Upper bound for Int terminals in GPR.", group=Some(groupCDGP))
args += OptionInfo("gprMaxDouble", "Double", default=Some("1.0"), desc="Upper bound for Double terminals in GPR.", group=Some(groupCDGP))
args += OptionInfo("gprMinInt", "Int", default=Some("-100"), desc="Lower bound for Int terminals in GPR.", group=Some(groupCDGP))
args += OptionInfo("gprMinDouble", "Double", default=Some("0.0"), desc="Lower bound for Double terminals in GPR.", group=Some(groupCDGP))
args += OptionInfo("gprRetryIfUndefined", "Bool", default=Some("true"), desc="In GPR, when a new random test is generated, check if the output for the test is defined, i.e., not all output values are correct. Adding such tests is meaningless. If the function is known to be defined at every point, switching this off will slightly speed up GPR.", group=Some(groupCDGP))
args += OptionInfo("lexicaseDeselection", "Bool", default=Some("false"), desc="Deselection to be used in lexicase.")
args += OptionInfo("logAllQueries", "Bool", default=Some("false"), desc="Log every query to the solver.")
args += OptionInfo("logPassedConstraints", "Bool", default=Some("true"), desc="Save information about which constraints were passed. Requires a separate verification query for each constraint.")
args += OptionInfo("logTestsHistory", "Bool", default=Some("false"), desc="Save for each generation the number of generated tests.")
args += OptionInfo("maxNewTestsPerIter", "Int", default=Some("10"), desc="Maximum number of tests which can be added per iteration.")
args += OptionInfo("maxRecursiveCalls", "Int", default=Some("1"), desc="Maximum number of allowed recursive invocations in a candidate solution.")
args += OptionInfo("maxSolverRestarts", "Int", default=Some("2"), desc="Maximum number of times a solver will be restarted after failure.")
args += OptionInfo("mixedSpecAllowed", "Bool", default=Some("true"), desc="If false, then tests will be treated as part of a formal specification instead of seeding the set of test cases.")
args += OptionInfo("moreSolverArgs", "String", default=Some(""), desc="Additional arguments for the solver, appended after the previous.")
args += OptionInfo("multipop.maxGenerations", "Int", default=Some("100"), desc="Number of generations per subpopulation.", group=Some(groupCDGP))
args += OptionInfo("multipop.maxTime", "Int", default=Some("86400000"), desc="Maximum time for a multipop scenario.", group=Some(groupCDGP))
args += OptionInfo("multipop.M", "Int", default=Some("5"), desc="Number of populations.", group=Some(groupCDGP))
args += OptionInfo("multipop.scheme", "String", choice=Set("none", "convectionEqualNumber"), default=Some("none"),
desc="Maximum time for a multipop scenario.", group=Some(groupCDGP))
args += OptionInfo("notes", "String", desc="Any additional notes to be saved in logs.")
args += OptionInfo("maxRestarts", "Int", desc="Number of times the algorithm will be restarted. If a correct solution is found in any run, then the whole algorithm terminates.")
args += OptionInfo("optionsFile", "String", desc="Path to property file from which options will be read.")
args += OptionInfo("partialConstraintsInFitness", "Bool", desc="If true, then the correctness of solution will be checked separately on individual constraints, and this information will be prepended to the evaluation vector. By default true for CDGPprops, otherwise false.")
args += OptionInfo("partialConstraintsWeight", "Int", default=Some("1"), desc="The weight of the partial constraints for lexicase selection. For example the weight of 2 means that each partial constraint will occur twice in the fitness vector.")
args += OptionInfo("printTests", "Bool", default=Some("false"), desc="Prints every added test and after the evolution ends prints all collected tests.")
args += OptionInfo("printQueries", "Bool", default=Some("false"), desc="Print all queries to SMT solver.")
args += OptionInfo("recDepthLimit", "Int", default=Some("1000"), desc="A limit of calls for recursive functions.")
args += OptionInfo("regression", "Bool", default=Some("false"), desc="If true, then the version of CDGP for symbolic regression problems will be used.")
args += OptionInfo("reportFreq", "Int", desc="Frequency of fitness reporting.")
args += OptionInfo("silent", "Bool", default=Some("false"), desc="Less printing.")
args += OptionInfo("saveTests", "Bool", default=Some("false"), desc="Saving every generated counterexample.")
args += OptionInfo("searchForSecondOutput", "Bool", default=Some("true"), desc="If yes, then after finding one correct output for the incomplete test the next one will be searched for in order to determine, if the correct output is unique or not. If false, the search will be aborted and test will remain incomplete.")
args += OptionInfo("sizeInFitness", "Bool", default=Some("false"), desc="If true, then to the evaluation vector will be prepended the size of the solution to foster shorter programs.")
args += OptionInfo("solverArgs", "String", desc="If specified, then these arguments will be used by the solver and CDGP will not change them in any way.")
args += OptionInfo("solverInteractive", "Bool", default=Some("true"), desc="Run solver in interactive mode (much faster).")
args += OptionInfo("solverTimeout", "Int", desc="Time in miliseconds after which solver will be terminated.")
args += OptionInfo("solverHardTimeout", "Int", desc="Time in miliseconds after which solver process will be terminated and restarted, even when the computations haven't stopped. Sometimes solver ignores the time parameters it is given.")
args += OptionInfo("solverType", "String", choice=Set("z3", "cvc4", "other"), default=Some("z3"), desc="Type of the solver. Must be specified, because some solvers require different options to work effectively.")
args += OptionInfo("testsMaxDiff", "Int", desc="If specified, solution will be verified if it does not pass at most testsMaxDiff tests. TestsRatio parameter would be ignored.")
args += OptionInfo("testsRatio", "Double", default=Some("1.0"), desc="Ratio of tests which must be passed in order to apply verification in search for a counterexample.")
args += OptionInfo("testsTypesForRatio", "[String]+", desc="Types of tests based on which ratio of passed tests will be computed. c - complete tests, i - incomplete tests, s - special tests (e.g., partial constraints). Example usage: '--testsTypesForRatio i,c'.")
args += OptionInfo("verbose", "Bool", default=Some("false"), desc="More printing.")
// options used only in the regression mode
args += OptionInfo("noiseDeltaX", "Double", default=Some("0.0"), desc="In regression mode, this will be the modifier for the standard deviation of the independent variables. Higher value means higher noise.", group=Some(groupCDSR))
args += OptionInfo("noiseDeltaY", "Double", default=Some("0.0"), desc="In regression mode, this will be the modifier for the standard deviation of the dependent variable. Higher value means higher noise.", group=Some(groupCDSR))
args += OptionInfo("notImprovedWindow", "Int", default=Some("15"), desc="A number of iterations without improvement, after which a *new* bestSoFar solution worse on a validation set than the previous bestSoFar solution will trigger the termination of EA.", group=Some(groupCDSR))
args += OptionInfo("optThreshold", "Double", default=None, desc="Optimality threshold. If the solution's error is below this number, then it is assumed to be optimal and the run is terminated. If not specified, it is computed automatically as 0.001 times standard deviation of tests outputs.", group=Some(groupCDSR))
args += OptionInfo("optThresholdC", "Double", default=Some("0.01"), desc="Factor C for automatic scaling of the optimality threshold ((C*stddev)^2).", group=Some(groupCDSR))
args += OptionInfo("shuffleData", "Bool", default=Some("true"), desc="If true, then the test cases will be shuffled before dividing them on training and test sets. By setting this to false one can be certain, that the first sizeTrainSet examples will land in the training set.", group=Some(groupCDSR))
args += OptionInfo("sizeTestSet", "String", default=Some("0"), desc="Size of the test set. When '%' is the suffix, the size is taken as a given percent.", group=Some(groupCDSR))
args += OptionInfo("sizeTrainSet", "String", desc="Size of the training set. When '%' is the suffix, the size is taken as a given percent.", group=Some(groupCDSR))
args += OptionInfo("sizeValidationSet", "String", default=Some("0"), desc="Size of the validation set. When '%' is the suffix, the size is taken as a given percent.", group=Some(groupCDSR))
args += OptionInfo("testErrorUseOptThreshold", "Bool", default=Some("false"), desc="If true, then the optimality criterion for tests will be the error on individual tests rather than the cumulative MSE. This option is limited to the termination criterion of the CDGP.", group=Some(groupCDSR))
args += OptionInfo("testErrorOptPercent", "Int", default=Some("0.05"), desc="The percent threshold, for example, a 5% deviation from the original value treated as acceptable. It is the default option when the correctness of a single regression test is concerned. Assumed notation: 1% = 0.01. Is used only for checking the optimality of some solution.", group=Some(groupCDSR))
args += OptionInfo("testErrorOptValue", "Int", desc="An absolute deviation from the expected value while still treated as a passed test. If this option is specified, then it overrides the --testErrorOptPercent option. Is used only for checking the optimality of some solution.", group=Some(groupCDSR))
args += OptionInfo("testErrorVerPercent", "Int", default=Some("0.05"), desc="The percent threshold, for example, a 5% deviation from the original value treated as acceptable. It is the default option when the correctness of a single regression test is concerned. Assumed notation: 1% = 0.01.", group=Some(groupCDSR))
args += OptionInfo("testErrorVerValue", "Int", desc="An absolute deviation from the expected value while still treated as a passed test. If this option is specified, then it overrides the --testErrorVerPercent option.", group=Some(groupCDSR))
// fuel and swim options
args += OptionInfo("deleteOutputFile", "Bool", default=Some("true"), desc="Deletes output file upon successful completion of experiment.")
args += OptionInfo("operatorProbs", "[Double]+", default=Some("0.5,0.5"), desc="Probabilities of engaging search operators (comma-separated list of doubles). For CDGP, the first value corresponds to mutation, and the second to crossover.")
args += OptionInfo("outDir", "String", desc="Output directory.")
args += OptionInfo("outFile", "String", desc="Output file.")
args += OptionInfo("quiet", "Bool", default=Some("false"), desc="Silences progress reporting.")
args += OptionInfo("parEval", "Bool", default=Some("false"), desc="Enables multithreaded evaluation.")
args += OptionInfo("printResults", "Bool", default=Some("false"), desc="Prints the content of result collector at the end of run.")
args += OptionInfo("populationSize", "Int", default=Some("1000"), desc="Population size.")
args += OptionInfo("removeEvalDuplicates", "Bool", default=Some("false"), desc="Removes duplicates w.r.t. evaluation in NSGA2Selection.")
args += OptionInfo("saveLastState", "Bool", default=Some("false"), desc="Saves the snapshot of the final search state to a file.")
args += OptionInfo("saveBestSoFar", "Bool", default=Some("false"), desc="Saves the best solution found so far after every iteration.")
args += OptionInfo("seed", "Int", default=Some("0"), desc="Seed for pseudorandom generator.")
args += OptionInfo("snapshotFrequency", "Int", default=Some("0"), desc="Saves the snapshot of the current search state every n iterations (generations).")
args += OptionInfo("tournamentSize", "Int", default=Some("7"), desc="Size of the tournament in the tournament selection.")
args += OptionInfo("tournamentDeselectSize", "Int", desc="Size of the tournament in the tournament deselection.")
// swim-specific options
args += OptionInfo("initMaxTreeDepth", "Int", default=Some("4"), desc=".")
args += OptionInfo("maxSubtreeDepth", "Int", default=Some("4"), desc=".")
args += OptionInfo("maxTreeDepth", "Int", default=Some("12"), desc=".")
args += OptionInfo("stoppingDepthRatio", "Double", default=Some("0.8"), desc=".")
def getValidator(group: Option[String]) = OptionsValidator(args.toList, group)
}
|
kkrawiec/CDGP
|
src/main/scala/app/CDGPOptions.scala
|
Scala
|
mit
| 18,838 |
package nodes.misc
import nodes.stats.TermFrequency
import org.apache.spark.SparkContext
import org.scalatest.FunSuite
import workflow.PipelineContext
class TermFrequencySuite extends FunSuite with PipelineContext {
test("term frequency of simple strings") {
sc = new SparkContext("local", "test")
val in = Seq(Seq[Any]("b", "a", "c", "b", "b", "a", "b"))
val out = TermFrequency().apply(sc.parallelize(in)).first().toMap
assert(out === Map("a" -> 2, "b" -> 4, "c" -> 1))
}
test("term frequency of varying types") {
sc = new SparkContext("local", "test")
val in = Seq(Seq("b", "a", "c", ("b", "b"), ("b", "b"), 12, 12, "a", "b", 12))
val out = TermFrequency().apply(sc.parallelize(in)).first().toMap
assert(out === Map("a" -> 2, "b" -> 2, "c" -> 1, ("b", "b") -> 2, 12 -> 3))
}
test("log term frequency") {
sc = new SparkContext("local", "test")
val in = Seq(Seq[Any]("b", "a", "c", "b", "b", "a", "b"))
val out = TermFrequency(x => math.log(x + 1)).apply(sc.parallelize(in)).first().toMap
assert(out === Map("a" -> math.log(3), "b" -> math.log(5), "c" -> math.log(2)))
}
}
|
tomerk/keystone
|
src/test/scala/nodes/misc/TermFrequencySuite.scala
|
Scala
|
apache-2.0
| 1,138 |
/*
* Copyright 2015 Nicolas Rinaudo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kantan.csv
import kantan.csv.laws.discipline.arbitrary._
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks
@SuppressWarnings(Array("org.wartremover.warts.Throw"))
class DecodeResultTests extends AnyFunSuite with ScalaCheckPropertyChecks with Matchers {
test("DecodeResult.success should return a Right") {
forAll { i: Int =>
DecodeResult.success(i) should be(Right(i))
}
}
test("DecodeResult.apply should return a Right on 'good' values") {
forAll { i: Int =>
DecodeResult(i) should be(Right(i))
}
}
test("DecodeResult.apply should return a Left on 'bad' values") {
forAll { e: Exception =>
DecodeResult(throw e) should be(Left(DecodeError.TypeError(e)))
}
}
test("DecodeResult.outOfBounds should return a Left") {
forAll { i: Int =>
DecodeResult.outOfBounds(i) should be(Left(DecodeError.OutOfBounds(i)))
}
}
}
|
nrinaudo/tabulate
|
core/shared/src/test/scala/kantan/csv/DecodeResultTests.scala
|
Scala
|
mit
| 1,596 |
package com.twitter.finagle
import com.twitter.finagle.stats.DefaultStatsReceiver
import com.twitter.finagle.toggle.{StandardToggleMap, ToggleMap}
package object memcached {
/**
* The name of the finagle-memcached [[ToggleMap]].
*/
private[this] val LibraryName: String =
"com.twitter.finagle.memcached"
/**
* The [[ToggleMap]] used for finagle-memcached.
*/
private[finagle] val Toggles: ToggleMap =
StandardToggleMap(LibraryName, DefaultStatsReceiver)
}
|
twitter/finagle
|
finagle-memcached/src/main/scala/com/twitter/finagle/package.scala
|
Scala
|
apache-2.0
| 488 |
package com.twitter.finatra.http.tests.integration.doeverything.main.domain
import com.twitter.finatra.validation.constraints.PastTime
case class TestUserWithInvalidFieldValidation(@PastTime name: String)
|
twitter/finatra
|
http-server/src/test/scala/com/twitter/finatra/http/tests/integration/doeverything/main/domain/TestUserWithInvalidFieldValidation.scala
|
Scala
|
apache-2.0
| 207 |
package metamorphic.dsl.model.builder
import metamorphic.dsl.model.Entity
import metamorphic.dsl.util.Attachable
class EntityBuilder(val entityName: String) extends Attachable {
private var builders: List[PropertyBuilder] = Nil
def build: Entity = {
Entity(entityName, propertyBuilders.map(builder => builder.build))
}
def propertyBuilders: List[PropertyBuilder] = {
builders.reverse
}
def builder(propertyName: String): Option[PropertyBuilder] = {
builders.find(builder => builder.propertyName.equals(propertyName))
}
def add(propertyName: String): PropertyBuilder = {
val builder = new PropertyBuilder(propertyName)
add(builder)
builder
}
def add(propertyBuilder: PropertyBuilder): EntityBuilder = {
builders = propertyBuilder :: builders
this
}
def add(propertyBuilders: List[PropertyBuilder]): EntityBuilder = {
propertyBuilders.foreach(builder => add(builder))
this
}
}
|
frroliveira/metamorphic
|
metamorphic/src/main/scala/metamorphic/dsl/model/builder/EntityBuilder.scala
|
Scala
|
mit
| 950 |
package jp.opap.material.dao
import java.util.UUID
import com.mongodb.BasicDBObject
import com.mongodb.client.MongoDatabase
import jp.opap.material.dao.MongoComponentDao.FileAndThumbnail
import jp.opap.material.dao.MongoDao.Documents
import jp.opap.material.model.ComponentEntry
import jp.opap.material.model.ComponentEntry.{DirectoryEntry, FileEntry}
import jp.opap.material.model.Thumbnail
import org.bson.Document
import scala.beans.BeanProperty
import scala.collection.JavaConverters._
class MongoComponentDao(mongo: MongoDatabase) extends MongoDao(mongo) {
override def collectionName = "components"
def insert(item: ComponentEntry): Unit = {
def componentDocument(component: ComponentEntry): Document = new Document()
.append("_id", component.id.toString)
.append("name", component.name)
.append("parent_id", component.parentId.map(_.toString).orNull)
.append("repository_id", component.repositoryId)
.append("path", component.path)
def fileDocument(file: FileEntry): Document = new Document()
.append("blob_id", file.blobId)
val document = item match {
case component: DirectoryEntry => componentDocument(component).append("_constructor", "CompositeElement")
case component: FileEntry => (componentDocument(component) + fileDocument(component)).append("_constructor", "LeafElement")
}
this.collection.insertOne(document)
}
def findById(id: UUID): Option[ComponentEntry] = {
this.findOneByKey("_id", id)
.map(fromDocument)
}
def findFileById(id: UUID): Option[FileEntry] = {
this
.findById(id)
.flatMap(item => item match {
case file: FileEntry => Option(file)
case _ => Option.empty
})
}
def findFiles(repositoryId: String): Seq[FileEntry] = {
val filter = new BasicDBObject()
.append("repository_id", repositoryId)
this.collection.find(filter)
.asScala
.map(fromDocument)
.par
.flatMap {
case file: FileEntry => Option(file)
case _ => Option.empty[FileEntry]
}
.toSeq
.seq
}
def findImages(): Seq[FileAndThumbnail] = {
def thumb(document: Document): Option[FileAndThumbnail] = {
fromDocument(document) match {
case file: FileEntry => document
.getFirstDocumentFrom("thumbnail")
.map(thumbnail => FileAndThumbnail(file, MongoThumbnailDao.infoFromDocument(thumbnail)))
case _ => Option.empty
}
}
val pipeline = List(
"{ $lookup: { from: 'thumbnails', localField: 'blob_id', foreignField: '_id', as: 'thumbnail' } }",
"{ $match: { thumbnail: { $size: 1 } } }",
"{ $project: { 'thumbnail.data': false } }"
).map(BasicDBObject.parse).asJava
this.collection.aggregate(pipeline)
.asScala
.flatMap(thumb)
.toSeq
}
def deleteByRepositoryId(repositoryId: String): Unit = {
val filter = new BasicDBObject()
.append("repository_id", repositoryId)
this.collection.deleteMany(filter)
}
def fromDocument(document: Document): ComponentEntry = {
val id = UUID.fromString(document.getString("_id"))
val repositoryId = document.getString("repository_id")
val parentId = Option(document.getString("parent_id"))
.map(UUID.fromString)
val name = document.getString("name")
val path = document.getString("path")
val blobId = document.getString("blob_id")
document.getString("_constructor") match {
case "CompositeElement" => DirectoryEntry(id, repositoryId, parentId, name, path)
case "LeafElement" => FileEntry(id, repositoryId, parentId, name, path, blobId)
case _ => throw new IllegalArgumentException()
}
}
}
object MongoComponentDao {
case class FileAndThumbnail(@BeanProperty file: FileEntry, @BeanProperty thumbnail: Thumbnail)
}
|
opap-jp/material-explorer
|
rest/src/main/scala/jp/opap/material/dao/MongoComponentDao.scala
|
Scala
|
mit
| 3,837 |
package com.sfxcode.sapphire.core.fxml
import scala.reflect.runtime.universe
import scala.reflect.runtime.universe._
case class FxmlLocation(path: String) extends scala.annotation.StaticAnnotation
object FxmlLocation {
def pathValue(clazzTag: scala.reflect.ClassTag[_]): String = {
var result = ""
val runtimeClass = clazzTag.runtimeClass
val rootMirror = universe.runtimeMirror(runtimeClass.getClassLoader)
val myAnnotatedClass = rootMirror.classSymbol(runtimeClass)
val annotation: Option[universe.Annotation] =
myAnnotatedClass.annotations.find(_.tree.tpe =:= universe.typeOf[FxmlLocation])
annotation.flatMap { a =>
a.tree.children.tail.collectFirst {
case Literal(Constant(name: String)) =>
result = name
}
}
result
}
}
|
sfxcode/sapphire-core
|
src/main/scala/com/sfxcode/sapphire/core/fxml/FxmlLocation.scala
|
Scala
|
apache-2.0
| 802 |
/*
* Copyright (C) 2011 Romain Reuillon
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.core.workflow.hook
import java.util.concurrent.atomic.AtomicInteger
import org.openmole.core.context.Val
import org.openmole.core.workflow.mole._
import org.openmole.core.workflow.task._
import org.openmole.core.workflow.job._
import org.openmole.core.workflow.mole._
import org.openmole.core.workflow.builder._
import org.openmole.core.workflow.composition.TaskNode
import org.openmole.core.workflow.dsl._
import org.openmole.core.workflow.test.{ TestHook, TestTask }
import org.scalatest._
class HookSpec extends FlatSpec with Matchers {
import org.openmole.core.workflow.test.Stubs._
"A hook" should "intercept the execution of a task" in {
val executed = new AtomicInteger(0)
val p = Val[String]
val t1 =
TestTask { _ + (p → "test") } set (
name := "Test",
outputs += p
)
val hook = TestHook { context ⇒
context.contains(p) should equal(true)
context(p) should equal("test")
executed.incrementAndGet()
}
val ex = t1 hook hook
ex.run
executed.get should equal(1)
}
"A hook" should "intercept the execution of a master capsule" in {
@transient var executed = false
val p = Val[String]("p")
val t1 =
TestTask { _ + (p → "test") } set (
outputs += p
)
val hook = TestHook { context ⇒
context.contains(p) should equal(true)
context(p) should equal("test")
executed = true
}
val ex = Master(t1) hook hook
ex.run
executed should equal(true)
}
"Display hook" should "be accepted" in {
val t1 = EmptyTask()
val ex = t1 hook display
(ex: DSL)
}
}
|
openmole/openmole
|
openmole/core/org.openmole.core.workflow/src/test/scala/org/openmole/core/workflow/mole/HookSpec.scala
|
Scala
|
agpl-3.0
| 2,379 |
import scala.quoted.*
inline def rewrite[T](inline x: Any): Any = ${ stringRewriter('x) }
private def stringRewriter(e: Expr[Any])(using Quotes): Expr[Any] =
StringRewriter.transform(e)
private object StringRewriter extends ExprMap {
def transform[T](e: Expr[T])(using Type[T])(using Quotes): Expr[T] = e match
case '{ ${Expr(s)}: String } =>
Expr(s.reverse) match
case '{ $x: T } => x
case _ => e // e had a singlton String type
case _ => transformChildren(e)
}
|
dotty-staging/dotty
|
tests/run-macros/expr-map-1/Macro_1.scala
|
Scala
|
apache-2.0
| 503 |
package com.sksamuel.elastic4s
import com.sksamuel.elastic4s.DefinitionAttributes._
import com.sksamuel.elastic4s.analyzers.Analyzer
import org.elasticsearch.common.geo.GeoDistance
import org.elasticsearch.common.unit.{DistanceUnit, Fuzziness}
import org.elasticsearch.index.query.CommonTermsQueryBuilder.Operator
import org.elasticsearch.index.query._
import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder
import org.elasticsearch.index.query.support.QueryInnerHitBuilder
import org.elasticsearch.search.fetch.innerhits.InnerHitsBuilder.InnerHit
import scala.language.implicitConversions
import scala.language.reflectiveCalls
/** @author Stephen Samuel */
trait QueryDsl {
implicit def string2query(string: String): SimpleStringQueryDefinition = new SimpleStringQueryDefinition(string)
implicit def tuple2query(kv: (String, String)): TermQueryDefinition = new TermQueryDefinition(kv._1, kv._2)
def query = this
def boostingQuery: BoostingQueryDefinition = new BoostingQueryDefinition
def commonQuery(field: String) = new CommonQueryExpectsText(field)
def commonQuery = new CommonQueryExpectsField
class CommonQueryExpectsField {
def field(name: String) = new CommonQueryExpectsText(name)
}
class CommonQueryExpectsText(name: String) {
def text(q: String): CommonTermsQueryDefinition = new CommonTermsQueryDefinition(name, q)
def query(q: String): CommonTermsQueryDefinition = text(q)
}
def constantScoreQuery(q: QueryDefinition) = ConstantScoreDefinition(QueryBuilders.constantScoreQuery(q.builder))
def dismax = new DisMaxDefinition
def existsQuery = ExistsQueryDefinition
def functionScoreQuery(query: QueryDefinition): FunctionScoreQueryDefinition = new FunctionScoreQueryDefinition(query)
@deprecated("Use boolQuery instead with a must clause for the query and a filter clause for the filter", "2.0.0")
def filteredQuery = new FilteredQueryDefinition
def fuzzyQuery(name: String, value: Any) = new FuzzyQueryDefinition(name, value)
def geoBoxQuery(field: String) = GeoBoundingBoxQueryDefinition(field)
def geoDistanceQuery(field: String): GeoDistanceQueryDefinition = GeoDistanceQueryDefinition(field)
def geoHashCell(field: String, value: String): GeoHashCellQuery = new GeoHashCellQuery(field).geohash(value)
def geoPolygonQuery(field: String) = GeoPolygonQueryDefinition(field)
def indicesQuery(indices: String*) = new {
def query(query: QueryDefinition): IndicesQueryDefinition = new IndicesQueryDefinition(indices, query)
}
def hasChildQuery(`type`: String) = new HasChildExpectsQuery(`type`)
class HasChildExpectsQuery(`type`: String) {
def query(q: QueryDefinition): HasChildQueryDefinition = HasChildQueryDefinition(`type`, q)
}
def hasParentQuery(`type`: String) = new HasParentExpectsQuery(`type`)
class HasParentExpectsQuery(`type`: String) {
def query(q: QueryDefinition) = new HasParentQueryDefinition(`type`, q)
}
def matchQuery(tuple: (String, Any)): MatchQueryDefinition = matchQuery(tuple._1, tuple._2)
def matchQuery(field: String, value: Any): MatchQueryDefinition = new MatchQueryDefinition(field, value)
def matchPhraseQuery(field: String, value: Any): MatchPhraseDefinition = new MatchPhraseDefinition(field, value)
def matchPhrasePrefixQuery(field: String, value: Any) = new MatchPhrasePrefixDefinition(field, value)
def multiMatchQuery(text: String) = new MultiMatchQueryDefinition(text)
def matchAllQuery = new MatchAllQueryDefinition
@deprecated("Use existsQuery with a mustNot clause", "2.2.0")
def missingQuery(field: String) = MissingQueryDefinition(field)
def moreLikeThisQuery(flds: Iterable[String]): MoreLikeThisQueryDefinition = MoreLikeThisQueryDefinition(flds.toSeq)
def moreLikeThisQuery(first: String, rest: String*): MoreLikeThisQueryDefinition = moreLikeThisQuery(first +: rest)
def nestedQuery(path: String) = new {
def query(query: QueryDefinition) = NestedQueryDefinition(path, query)
}
def query(q: String): QueryStringQueryDefinition = queryStringQuery(q)
def queryStringQuery(q: String): QueryStringQueryDefinition = new QueryStringQueryDefinition(q)
def rangeQuery(field: String): RangeQueryDefinition = new RangeQueryDefinition(field)
def regexQuery(tuple: (String, Any)): RegexQueryDefinition = regexQuery(tuple._1, tuple._2)
def regexQuery(field: String, value: Any): RegexQueryDefinition = new RegexQueryDefinition(field, value)
def prefixQuery(tuple: (String, Any)): PrefixQueryDefinition = prefixQuery(tuple._1, tuple._2)
def prefixQuery(field: String, value: Any): PrefixQueryDefinition = new PrefixQueryDefinition(field, value)
def scriptQuery(script: String): ScriptQueryDefinition = ScriptQueryDefinition(script)
def simpleStringQuery(q: String): SimpleStringQueryDefinition = new SimpleStringQueryDefinition(q)
def stringQuery(q: String): QueryStringQueryDefinition = new QueryStringQueryDefinition(q)
def spanFirstQuery = new {
def query(spanQuery: SpanQueryDefinition) = new {
def end(end: Int) = new SpanFirstQueryDefinition(spanQuery, end)
}
}
def spanOrQuery: SpanOrQueryDefinition = new SpanOrQueryDefinition
def spanTermQuery(field: String, value: Any): SpanTermQueryDefinition = new SpanTermQueryDefinition(field, value)
def spanNotQuery: SpanNotQueryDefinition = new SpanNotQueryDefinition
def spanNearQuery: SpanNearQueryDefinition = new SpanNearQueryDefinition
def spanMultiTermQuery(query: MultiTermQueryDefinition) = new SpanMultiTermQueryDefinition(query)
def termQuery(tuple: (String, Any)): TermQueryDefinition = termQuery(tuple._1, tuple._2)
def termQuery(field: String, value: Any): TermQueryDefinition = TermQueryDefinition(field, value)
def termsQuery(field: String, values: AnyRef*): TermsQueryDefinition = {
TermsQueryDefinition(field, values.map(_.toString))
}
def termsQuery(field: String, values: Int*): IntTermsQueryDefinition = {
IntTermsQueryDefinition(field, values)
}
def termsQuery(field: String, values: Long*): LongTermsQueryDefinition = {
LongTermsQueryDefinition(field, values)
}
def termsQuery(field: String, values: Float*): FloatTermsQueryDefinition = {
FloatTermsQueryDefinition(field, values)
}
def termsQuery(field: String, values: Double*): DoubleTermsQueryDefinition = {
DoubleTermsQueryDefinition(field, values)
}
def wildcardQuery(tuple: (String, Any)): WildcardQueryDefinition = wildcardQuery(tuple._1, tuple._2)
def wildcardQuery(field: String, value: Any): WildcardQueryDefinition = new WildcardQueryDefinition(field, value)
def typeQuery(`type`: String) = TypeQueryDefinition(`type`)
@deprecated("use idsQuery", "2.0.0")
def ids(ids: Iterable[String]): IdQueryDefinition = IdQueryDefinition(ids.toSeq)
@deprecated("use idsQuery", "2.0.0")
def ids(ids: String*): IdQueryDefinition = IdQueryDefinition(ids.toSeq)
def idsQuery(ids: Iterable[String]): IdQueryDefinition = IdQueryDefinition(ids.toSeq)
def idsQuery(id: String, rest: String*): IdQueryDefinition = IdQueryDefinition(id +: rest)
def all: MatchAllQueryDefinition = new MatchAllQueryDefinition
// -- bool query dsl ---
def bool(block: => BoolQueryDefinition): BoolQueryDefinition = block
def bool(mustQueries: Seq[QueryDefinition],
shouldQueries: Seq[QueryDefinition],
notQueries: Seq[QueryDefinition]): BoolQueryDefinition = {
must(mustQueries).should(shouldQueries).not(notQueries)
}
def must(queries: QueryDefinition*): BoolQueryDefinition = new BoolQueryDefinition().must(queries: _*)
def must(queries: Iterable[QueryDefinition]): BoolQueryDefinition = new BoolQueryDefinition().must(queries)
def filter(first: QueryDefinition, rest:QueryDefinition*): BoolQueryDefinition = filter(first+:rest)
def filter(queries: Iterable[QueryDefinition]): BoolQueryDefinition = new BoolQueryDefinition().filter(queries)
def should(queries: QueryDefinition*): BoolQueryDefinition = new BoolQueryDefinition().should(queries: _*)
def should(queries: Iterable[QueryDefinition]): BoolQueryDefinition = new BoolQueryDefinition().should(queries)
def not(queries: QueryDefinition*): BoolQueryDefinition = new BoolQueryDefinition().not(queries: _*)
def not(queries: Iterable[QueryDefinition]): BoolQueryDefinition = new BoolQueryDefinition().not(queries)
}
class BoolQueryDefinition extends QueryDefinition {
val builder = QueryBuilders.boolQuery()
def adjustPureNegative(adjustPureNegative: Boolean): this.type = {
builder.adjustPureNegative(adjustPureNegative)
this
}
def boost(boost: Double): this.type = {
builder.boost(boost.toFloat)
this
}
def must(queries: QueryDefinition*): this.type = {
queries.foreach(builder must _.builder)
this
}
def must(queries: Iterable[QueryDefinition]): this.type = {
queries.foreach(builder must _.builder)
this
}
def filter(first: QueryDefinition, rest: QueryDefinition*): this.type = filter(first +: rest)
def filter(queries: Iterable[QueryDefinition]): this.type = {
queries.foreach(builder filter _.builder)
this
}
def not(queries: QueryDefinition*): this.type = {
queries.foreach(builder mustNot _.builder)
this
}
def not(queries: Iterable[QueryDefinition]): this.type = {
queries.foreach(builder mustNot _.builder)
this
}
def should(queries: QueryDefinition*): this.type = {
queries.foreach(builder should _.builder)
this
}
def should(queries: Iterable[QueryDefinition]): this.type = {
queries.foreach(builder should _.builder)
this
}
def minimumShouldMatch(minimumShouldMatch: String): this.type = {
builder.minimumShouldMatch(minimumShouldMatch: String)
this
}
def minimumShouldMatch(minimumNumberShouldMatch: Int): this.type = {
builder.minimumNumberShouldMatch(minimumNumberShouldMatch: Int)
this
}
def disableCoord(disableCoord: Boolean): this.type = {
builder.disableCoord(disableCoord: Boolean)
this
}
def queryName(queryName: String): this.type = {
builder.queryName(queryName)
this
}
}
trait QueryDefinition {
def builder: org.elasticsearch.index.query.QueryBuilder
}
case class FunctionScoreQueryDefinition(query: QueryDefinition)
extends QueryDefinition
with DefinitionAttributeBoost
with DefinitionAttributeBoostMode
with DefinitionAttributeMaxBoost
with DefinitionAttributeScoreMode
with DefinitionAttributeMinScore {
val builder = new FunctionScoreQueryBuilder(query.builder)
val _builder = builder
def scorers(scorers: ScoreDefinition[_]*): FunctionScoreQueryDefinition = {
scorers.foreach(scorer => scorer._filter match {
case None => builder.add(scorer.builder)
case Some(filter) => builder.add(filter.builder, scorer.builder)
})
this
}
}
case class FuzzyQueryDefinition(field: String, termValue: Any)
extends MultiTermQueryDefinition
with DefinitionAttributePrefixLength
with DefinitionAttributeBoost {
val builder = QueryBuilders.fuzzyQuery(field, termValue.toString)
val _builder = builder
def fuzziness(fuzziness: Fuzziness) = {
builder.fuzziness(fuzziness)
this
}
def maxExpansions(maxExpansions: Int) = {
builder.maxExpansions(maxExpansions)
this
}
def transpositions(transpositions: Boolean) = {
builder.transpositions(transpositions)
this
}
def queryName(queryName: String): this.type = {
builder.queryName(queryName)
this
}
}
case class Item(index: String, `type`: String, id: String)
case class MoreLikeThisQueryDefinition(fields: Seq[String]) extends QueryDefinition {
val _builder = QueryBuilders.moreLikeThisQuery(fields: _*)
val builder = _builder
def like(first: String, rest: String*): this.type = like(first +: rest)
def like(likes: Iterable[String]): this.type = {
_builder.like(likes.toSeq: _*)
this
}
def like(item: Item, rest: Item*): this.type = like(item +: rest)
def like(items: Seq[Item]): this.type = {
builder.like(items.map(item => new MoreLikeThisQueryBuilder.Item(item.index, item.`type`, item.id)): _ *)
this
}
def analyzer(analyser: String): this.type = {
_builder.analyzer(analyser)
this
}
@deprecated("Use unlike", "2.1.0")
def ignoreLike(first: String, rest: String*): this.type = unlike(first +: rest)
@deprecated("Use unlike", "2.1.0")
def ignoreLike(likes: Iterable[String]): this.type = {
_builder.unlike(likes.toSeq: _*)
this
}
def unlike(first: String, rest: String*): this.type = unlike(first +: rest)
def unlike(likes: Iterable[String]): this.type = {
_builder.unlike(likes.toSeq: _*)
this
}
def analyser(analyser: String): this.type = {
_builder.analyzer(analyser)
this
}
@deprecated("deprecated in elasticsearch", "2.0.0")
def ids(ids: String*): this.type = {
_builder.ids(ids: _*)
this
}
def exclude(): this.type = {
_builder.include(false)
this
}
def include(): this.type = {
_builder.include(true)
this
}
def failOnUnsupportedField(): this.type = {
_builder.failOnUnsupportedField(true)
this
}
def notFailOnUnsupportedField(): this.type = {
_builder.failOnUnsupportedField(false)
this
}
def minTermFreq(freq: Int): this.type = {
_builder.minTermFreq(freq)
this
}
def stopWords(stopWords: String*): this.type = {
_builder.stopWords(stopWords: _*)
this
}
def maxWordLength(maxWordLen: Int): this.type = {
_builder.maxWordLength(maxWordLen)
this
}
def minWordLength(minWordLen: Int): this.type = {
_builder.minWordLength(minWordLen)
this
}
def boostTerms(boostTerms: Double): this.type = {
_builder.boostTerms(boostTerms.toFloat)
this
}
def boost(boost: Double): this.type = {
_builder.boost(boost.toFloat)
this
}
def maxQueryTerms(maxQueryTerms: Int): this.type = {
_builder.maxQueryTerms(maxQueryTerms)
this
}
def minDocFreq(minDocFreq: Int): this.type = {
_builder.minDocFreq(minDocFreq)
this
}
def maxDocFreq(maxDocFreq: Int): this.type = {
_builder.maxDocFreq(maxDocFreq)
this
}
def queryName(queryName: String): this.type = {
builder.queryName(queryName)
this
}
}
case class MultiMatchQueryDefinition(text: String)
extends QueryDefinition
with DefinitionAttributeFuzziness
with DefinitionAttributePrefixLength
with DefinitionAttributeFuzzyRewrite
with DefinitionAttributeCutoffFrequency {
val _builder = QueryBuilders.multiMatchQuery(text)
val builder = _builder
def maxExpansions(maxExpansions: Int): MultiMatchQueryDefinition = {
builder.maxExpansions(maxExpansions)
this
}
def fields(_fields: Iterable[String]) = {
for ( f <- _fields ) builder.field(f)
this
}
def fields(_fields: String*): MultiMatchQueryDefinition = fields(_fields.toIterable)
def boost(boost: Double): MultiMatchQueryDefinition = {
builder.boost(boost.toFloat)
this
}
def analyzer(a: Analyzer): MultiMatchQueryDefinition = analyzer(a.name)
def analyzer(a: String): MultiMatchQueryDefinition = {
builder.analyzer(a)
this
}
def minimumShouldMatch(minimumShouldMatch: Int): MultiMatchQueryDefinition = {
builder.minimumShouldMatch(minimumShouldMatch.toString)
this
}
def minimumShouldMatch(minimumShouldMatch: String): MultiMatchQueryDefinition = {
builder.minimumShouldMatch(minimumShouldMatch: String)
this
}
@deprecated("@deprecated use a tieBreaker of 1.0f to disable dis-max query or select the appropriate Type", "1.2.0")
def useDisMax(useDisMax: Boolean): MultiMatchQueryDefinition = {
builder.useDisMax(java.lang.Boolean.valueOf(useDisMax))
this
}
def lenient(l: Boolean): MultiMatchQueryDefinition = {
builder.lenient(l)
this
}
def zeroTermsQuery(q: MatchQueryBuilder.ZeroTermsQuery): MultiMatchQueryDefinition = {
builder.zeroTermsQuery(q)
this
}
def tieBreaker(tieBreaker: Double): MultiMatchQueryDefinition = {
builder.tieBreaker(java.lang.Float.valueOf(tieBreaker.toFloat))
this
}
def operator(op: MatchQueryBuilder.Operator): MultiMatchQueryDefinition = {
builder.operator(op)
this
}
def operator(op: String): MultiMatchQueryDefinition = {
op match {
case "AND" => builder.operator(org.elasticsearch.index.query.MatchQueryBuilder.Operator.AND)
case _ => builder.operator(org.elasticsearch.index.query.MatchQueryBuilder.Operator.OR)
}
this
}
def matchType(t: MultiMatchQueryBuilder.Type): MultiMatchQueryDefinition = {
builder.`type`(t)
this
}
def queryName(queryName: String): this.type = {
builder.queryName(queryName)
this
}
def matchType(t: String): MultiMatchQueryDefinition = {
val mt = t match {
case "most_fields" => MultiMatchQueryBuilder.Type.MOST_FIELDS
case "cross_fields" => MultiMatchQueryBuilder.Type.CROSS_FIELDS
case "phrase" => MultiMatchQueryBuilder.Type.PHRASE
case "phrase_prefix" => MultiMatchQueryBuilder.Type.PHRASE_PREFIX
case _ => MultiMatchQueryBuilder.Type.BEST_FIELDS
}
matchType(mt)
}
}
case class GeoPolygonQueryDefinition(field: String)
extends QueryDefinition {
val builder = QueryBuilders.geoPolygonQuery(field)
val _builder = builder
def point(geohash: String): GeoPolygonQueryDefinition = {
builder.addPoint(geohash)
this
}
def point(lat: Double, lon: Double): GeoPolygonQueryDefinition = {
_builder.addPoint(lat, lon)
this
}
def queryName(queryName: String): this.type = {
builder.queryName(queryName)
this
}
}
case class GeoDistanceQueryDefinition(field: String)
extends QueryDefinition
with DefinitionAttributeLat
with DefinitionAttributeLon {
val builder = QueryBuilders.geoDistanceQuery(field)
val _builder = builder
def geoDistance(geoDistance: GeoDistance): GeoDistanceQueryDefinition = {
builder.geoDistance(geoDistance)
this
}
def geohash(geohash: String): GeoDistanceQueryDefinition = {
builder.geohash(geohash)
this
}
def queryName(name: String): GeoDistanceQueryDefinition = {
builder.queryName(name)
this
}
def distance(distance: String): GeoDistanceQueryDefinition = {
builder.distance(distance)
this
}
def distance(distance: Double, unit: DistanceUnit): GeoDistanceQueryDefinition = {
builder.distance(distance, unit)
this
}
def point(lat: Double, long: Double): GeoDistanceQueryDefinition = {
builder.point(lat, long)
this
}
}
case class GeoBoundingBoxQueryDefinition(field: String)
extends QueryDefinition {
val builder = QueryBuilders.geoBoundingBoxQuery(field)
val _builder = builder
private var _left: Double = _
private var _top: Double = _
private var _right: Double = _
private var _bottom: Double = _
private var _type: String = _
def `type`(`type`: String): GeoBoundingBoxQueryDefinition = {
_type = `type`
builder.`type`(_type)
this
}
def left(left: Double): GeoBoundingBoxQueryDefinition = {
_left = left
builder.topLeft(_top, _left)
this
}
def top(top: Double): GeoBoundingBoxQueryDefinition = {
_top = top
builder.topLeft(_top, _left)
this
}
def right(right: Double): GeoBoundingBoxQueryDefinition = {
_right = right
builder.bottomRight(_bottom, _right)
this
}
def bottom(bottom: Double): GeoBoundingBoxQueryDefinition = {
_bottom = bottom
builder.bottomRight(_bottom, _right)
this
}
def queryName(queryName: String): this.type = {
builder.queryName(queryName)
this
}
}
case class GeoDistanceRangeQueryDefinition(field: String)
extends QueryDefinition
with DefinitionAttributeTo
with DefinitionAttributeFrom
with DefinitionAttributeLt
with DefinitionAttributeGt
with DefinitionAttributeLat
with DefinitionAttributeLon
with DefinitionAttributePoint {
val builder = QueryBuilders.geoDistanceRangeQuery(field)
val _builder = builder
def geoDistance(geoDistance: GeoDistance): GeoDistanceRangeQueryDefinition = {
builder.geoDistance(geoDistance)
this
}
def geohash(geohash: String): GeoDistanceRangeQueryDefinition = {
builder.geohash(geohash)
this
}
def gte(gte: Any): GeoDistanceRangeQueryDefinition = {
builder.gte(gte)
this
}
def lte(lte: Any): GeoDistanceRangeQueryDefinition = {
builder.lte(lte)
this
}
def includeLower(includeLower: Boolean): GeoDistanceRangeQueryDefinition = {
builder.includeLower(includeLower)
this
}
def includeUpper(includeUpper: Boolean): GeoDistanceRangeQueryDefinition = {
builder.includeUpper(includeUpper)
this
}
def queryName(name: String): GeoDistanceRangeQueryDefinition = {
builder.queryName(name)
this
}
}
case class GeoHashCellQuery(field: String)
extends QueryDefinition {
val builder = QueryBuilders.geoHashCellQuery(field)
val _builder = builder
def point(lat: Double, long: Double): this.type = {
builder.point(lat, long)
this
}
def geohash(geohash: String): this.type = {
builder.geohash(geohash)
this
}
def neighbours(neighbours: Boolean): this.type = {
builder.neighbors(neighbours)
this
}
}
case class HasChildQueryDefinition(`type`: String, q: QueryDefinition)
extends QueryDefinition with DefinitionAttributeBoost {
val builder = QueryBuilders.hasChildQuery(`type`, q.builder)
val _builder = builder
/**
* Defines the minimum number of children that are required to match for the parent to be considered a match.
*/
def minChildren(min: Int): HasChildQueryDefinition = {
builder.minChildren(min)
this
}
/**
* Configures at what cut off point only to evaluate parent documents that contain the matching parent id terms
* instead of evaluating all parent docs.
*/
def shortCircuitCutoff(shortCircuitCutoff: Int): HasChildQueryDefinition = {
builder.setShortCircuitCutoff(shortCircuitCutoff)
this
}
/**
* Defines the maximum number of children that are required to match for the parent to be considered a match.
*/
def maxChildren(max: Int): HasChildQueryDefinition = {
builder.maxChildren(max)
this
}
/**
* Defines how the scores from the matching child documents are mapped into the parent document.
*/
def scoreMode(scoreMode: String): HasChildQueryDefinition = {
builder.scoreMode(scoreMode)
this
}
/**
* Defines how the scores from the matching child documents are mapped into the parent document.
*/
@deprecated("use scoreMode", "2.1.0")
def scoreType(scoreType: String): HasChildQueryDefinition = {
builder.scoreType(scoreType)
this
}
def queryName(name: String) = {
builder.queryName(name)
this
}
}
case class HasParentQueryDefinition(`type`: String, q: QueryDefinition)
extends QueryDefinition with DefinitionAttributeBoost {
val builder = QueryBuilders.hasParentQuery(`type`, q.builder)
val _builder = builder
def scoreMode(scoreMode: String): HasParentQueryDefinition = {
builder.scoreMode(scoreMode)
this
}
@deprecated("use scoreMode", "2.1.0")
def scoreType(scoreType: String): HasParentQueryDefinition = {
builder.scoreType(scoreType)
this
}
def queryName(name: String) = {
builder.queryName(name)
this
}
}
case class IndicesQueryDefinition(indices: Iterable[String], query: QueryDefinition) extends QueryDefinition {
override val builder = QueryBuilders.indicesQuery(query.builder, indices.toSeq: _*)
def noMatchQuery(query: QueryDefinition): this.type = {
builder.noMatchQuery(query.builder)
this
}
def queryName(queryName: String): this.type = {
builder.queryName(queryName)
this
}
}
class BoostingQueryDefinition extends QueryDefinition {
val builder = QueryBuilders.boostingQuery()
def positive(block: => QueryDefinition) = {
builder.positive(block.builder)
this
}
def negative(block: => QueryDefinition) = {
builder.negative(block.builder)
this
}
def positiveBoost(b: Double) = {
builder.boost(b.toFloat)
this
}
def negativeBoost(b: Double) = {
builder.negativeBoost(b.toFloat)
this
}
}
case class ConstantScoreDefinition(builder: ConstantScoreQueryBuilder) extends QueryDefinition {
def boost(b: Double): QueryDefinition = {
builder.boost(b.toFloat)
this
}
}
case class CommonTermsQueryDefinition(name: String, text: String)
extends QueryDefinition
with DefinitionAttributeBoost
with DefinitionAttributeCutoffFrequency {
val builder = QueryBuilders.commonTermsQuery(name, text)
val _builder = builder
def queryName(queryName: String): CommonTermsQueryDefinition = {
builder.queryName(queryName)
this
}
def highFreqMinimumShouldMatch(highFreqMinimumShouldMatch: Int): CommonTermsQueryDefinition = {
builder.highFreqMinimumShouldMatch(highFreqMinimumShouldMatch.toString)
this
}
def highFreqOperator(operator: String): CommonTermsQueryDefinition = {
builder.highFreqOperator(if (operator.toLowerCase == "and") Operator.AND else Operator.OR)
this
}
def analyzer(analyzer: Analyzer): CommonTermsQueryDefinition = {
builder.analyzer(analyzer.name)
this
}
def lowFreqMinimumShouldMatch(lowFreqMinimumShouldMatch: Int): CommonTermsQueryDefinition = {
builder.lowFreqMinimumShouldMatch(lowFreqMinimumShouldMatch.toString)
this
}
def lowFreqOperator(operator: String): CommonTermsQueryDefinition = {
builder.lowFreqOperator(if (operator.toLowerCase == "and") Operator.AND else Operator.OR)
this
}
}
class DisMaxDefinition extends QueryDefinition {
val builder = QueryBuilders.disMaxQuery()
def query(queries: QueryDefinition*): DisMaxDefinition = {
queries.foreach(q => builder.add(q.builder))
this
}
def queryName(queryName: String): DisMaxDefinition = {
builder.queryName(queryName)
this
}
def boost(b: Double): DisMaxDefinition = {
builder.boost(b.toFloat)
this
}
def tieBreaker(tieBreaker: Double): DisMaxDefinition = {
builder.tieBreaker(tieBreaker.toFloat)
this
}
}
case class ExistsQueryDefinition(field: String) extends QueryDefinition {
val builder = QueryBuilders.existsQuery(field)
def queryName(name: String): ExistsQueryDefinition = {
builder.queryName(name)
this
}
}
@deprecated("Use boolQuery instead with a must clause for the query and a filter clause for the filter", "2.0.0")
class FilteredQueryDefinition extends QueryDefinition {
def builder = QueryBuilders.filteredQuery(_query, _filter).boost(_boost.toFloat)
private var _query: QueryBuilder = QueryBuilders.matchAllQuery
private var _filter: QueryBuilder = null
private var _boost: Double = -1d
def boost(boost: Double): FilteredQueryDefinition = {
_boost = boost
this
}
def query(query: => QueryDefinition): FilteredQueryDefinition = {
_query = Option(query).map(_.builder).getOrElse(_query)
this
}
def filter(filter: => QueryDefinition): FilteredQueryDefinition = {
_filter = Option(filter).map(_.builder).orNull
this
}
def queryName(queryName: String): this.type = {
builder.queryName(queryName)
this
}
}
case class IdQueryDefinition(ids: Seq[String],
types: Seq[String] = Nil,
boost: Option[Double] = None,
queryName: Option[String] = None) extends QueryDefinition {
def builder = {
val builder = QueryBuilders.idsQuery(types: _*).addIds(ids: _*)
boost.foreach(b => builder.boost(b.toFloat))
queryName.foreach(builder.queryName)
builder
}
def types(types: Iterable[String]): IdQueryDefinition = copy(types = types.toSeq)
def types(first: String, rest: String*): IdQueryDefinition = copy(types = first +: rest)
def queryName(name: String): IdQueryDefinition = copy(queryName = Option(name))
def boost(boost: Double): IdQueryDefinition = copy(boost = Option(boost))
}
class SpanOrQueryDefinition extends SpanQueryDefinition with DefinitionAttributeBoost {
val builder = QueryBuilders.spanOrQuery
val _builder = builder
def clause(spans: SpanTermQueryDefinition*): SpanOrQueryDefinition = {
spans.foreach {
span => builder.clause(span.builder)
}
this
}
}
class SpanTermQueryDefinition(field: String, value: Any) extends SpanQueryDefinition {
val builder = QueryBuilders.spanTermQuery(field, value.toString)
def boost(boost: Double) = {
builder.boost(boost.toFloat)
this
}
def queryName(queryName: String): this.type = {
builder.queryName(queryName)
this
}
}
trait MultiTermQueryDefinition extends QueryDefinition {
override def builder: MultiTermQueryBuilder
}
case class WildcardQueryDefinition(field: String, query: Any)
extends QueryDefinition
with DefinitionAttributeRewrite
with DefinitionAttributeBoost {
val builder = QueryBuilders.wildcardQuery(field, query.toString)
val _builder = builder
def queryName(queryName: String): this.type = {
builder.queryName(queryName)
this
}
}
case class PrefixQueryDefinition(field: String, prefix: Any)
extends MultiTermQueryDefinition
with DefinitionAttributeRewrite
with DefinitionAttributeBoost {
val builder = QueryBuilders.prefixQuery(field, prefix.toString)
val _builder = builder
def queryName(queryName: String): this.type = {
builder.queryName(queryName)
this
}
}
case class RegexQueryDefinition(field: String, regex: Any)
extends MultiTermQueryDefinition
with DefinitionAttributeRewrite
with DefinitionAttributeBoost {
val builder = QueryBuilders.regexpQuery(field, regex.toString)
val _builder = builder
def flags(flags: RegexpFlag*): RegexQueryDefinition = {
builder.flags(flags: _*)
this
}
def queryName(queryName: String): this.type = {
builder.queryName(queryName)
this
}
}
trait SpanQueryDefinition extends QueryDefinition {
override def builder: SpanQueryBuilder
}
case class SpanFirstQueryDefinition(query: SpanQueryDefinition, end: Int) extends QueryDefinition {
val builder = QueryBuilders.spanFirstQuery(query.builder, end)
}
class SpanNotQueryDefinition extends QueryDefinition {
val builder = QueryBuilders.spanNotQuery()
def boost(boost: Double): this.type = {
builder.boost(boost.toFloat)
this
}
def dist(dist: Int): this.type = {
builder.dist(dist)
this
}
def exclude(query: SpanQueryDefinition): this.type = {
builder.exclude(query.builder)
this
}
def include(query: SpanQueryDefinition): this.type = {
builder.include(query.builder)
this
}
def pre(pre: Int): this.type = {
builder.pre(pre)
this
}
def post(post: Int): this.type = {
builder.post(post)
this
}
def queryName(queryName: String): this.type = {
builder.queryName(queryName)
this
}
}
case class SpanMultiTermQueryDefinition(query: MultiTermQueryDefinition) extends SpanQueryDefinition {
override val builder = QueryBuilders.spanMultiTermQueryBuilder(query.builder)
}
class SpanNearQueryDefinition extends SpanQueryDefinition {
val builder = QueryBuilders.spanNearQuery()
def boost(boost: Double): this.type = {
builder.boost(boost.toFloat)
this
}
def inOrder(inOrder: Boolean): this.type = {
builder.inOrder(inOrder)
this
}
def collectPayloads(collectPayloads: Boolean): this.type = {
builder.collectPayloads(collectPayloads)
this
}
def clause(query: SpanQueryDefinition): this.type = {
builder.clause(query.builder)
this
}
def slop(slop: Int): this.type = {
builder.slop(slop)
this
}
def queryName(queryName: String): this.type = {
builder.queryName(queryName)
this
}
}
case class TermsLookupQueryDefinition(field: String)
extends QueryDefinition {
val builder = QueryBuilders.termsLookupQuery(field)
val _builder = builder
def queryName(name: String): this.type = {
builder.queryName(name)
this
}
def index(index: String): this.type = {
builder.lookupIndex(index)
this
}
def lookupType(`type`: String): this.type = {
builder.lookupType(`type`)
this
}
def id(id: String): this.type = {
builder.lookupId(id)
this
}
def path(path: String): this.type = {
builder.lookupPath(path)
this
}
def routing(routing: String): this.type = {
builder.lookupRouting(routing)
this
}
}
case class TermQueryDefinition(field: String, value: Any) extends QueryDefinition {
val builder = value match {
case str: String => QueryBuilders.termQuery(field, str)
case iter: Iterable[Any] => QueryBuilders.termQuery(field, iter.toArray)
case other => QueryBuilders.termQuery(field, other)
}
def boost(boost: Double) = {
builder.boost(boost.toFloat)
this
}
def queryName(queryName: String): this.type = {
builder.queryName(queryName)
this
}
}
trait GenericTermsQueryDefinition extends QueryDefinition {
def builder: TermsQueryBuilder
def boost(boost: Double): this.type = {
builder.boost(boost.toFloat)
this
}
def queryName(queryName: String): this.type = {
builder.queryName(queryName)
this
}
}
case class TermsQueryDefinition(field: String, values: Seq[String]) extends GenericTermsQueryDefinition {
val builder: TermsQueryBuilder = QueryBuilders.termsQuery(field, values: _*)
@deprecated("deprecated in elasticsearch", "2.0.0")
def minimumShouldMatch(min: Int): TermsQueryDefinition = minimumShouldMatch(min.toString)
@deprecated("deprecated in elasticsearch", "2.0.0")
def minimumShouldMatch(min: String): TermsQueryDefinition = {
builder.minimumShouldMatch(min)
this
}
@deprecated("deprecated in elasticsearch", "2.0.0")
def disableCoord(disableCoord: Boolean): TermsQueryDefinition = {
builder.disableCoord(disableCoord)
this
}
}
case class IntTermsQueryDefinition(field: String, values: Seq[Int]) extends GenericTermsQueryDefinition {
val builder: TermsQueryBuilder = QueryBuilders.termsQuery(field, values: _*)
}
case class LongTermsQueryDefinition(field: String, values: Seq[Long]) extends GenericTermsQueryDefinition {
val builder: TermsQueryBuilder = QueryBuilders.termsQuery(field, values: _*)
}
case class FloatTermsQueryDefinition(field: String, values: Seq[Float]) extends GenericTermsQueryDefinition {
val builder: TermsQueryBuilder = QueryBuilders.termsQuery(field, values: _*)
}
case class DoubleTermsQueryDefinition(field: String, values: Seq[Double]) extends GenericTermsQueryDefinition {
val builder: TermsQueryBuilder = QueryBuilders.termsQuery(field, values: _*)
}
case class TypeQueryDefinition(`type`: String) extends QueryDefinition {
val builder = QueryBuilders.typeQuery(`type`)
}
case class MatchAllQueryDefinition() extends QueryDefinition {
val builder = QueryBuilders.matchAllQuery
def boost(boost: Double): MatchAllQueryDefinition = {
builder.boost(boost.toFloat)
this
}
}
case class RangeQueryDefinition(field: String) extends MultiTermQueryDefinition with DefinitionAttributeBoost {
val builder = QueryBuilders.rangeQuery(field)
val _builder = builder
def from(f: Any) = {
builder.from(f)
this
}
def to(t: Any) = {
builder.to(t)
this
}
def timeZone(timeZone: String): RangeQueryDefinition = {
builder.timeZone(timeZone)
this
}
def gte(d: String): RangeQueryDefinition = {
builder.gte(d)
this
}
def gte(d: Double): RangeQueryDefinition = {
builder.gte(d)
this
}
def lte(d: String): RangeQueryDefinition = {
builder.lte(d)
this
}
def lte(d: Double): RangeQueryDefinition = {
builder.lte(d)
this
}
def includeLower(includeLower: Boolean) = {
builder.includeLower(includeLower)
this
}
def includeUpper(includeUpper: Boolean) = {
builder.includeUpper(includeUpper)
this
}
def queryName(queryName: String): this.type = {
builder.queryName(queryName)
this
}
}
case class MatchQueryDefinition(field: String, value: Any)
extends QueryDefinition
with DefinitionAttributeBoost
with DefinitionAttributeFuzziness
with DefinitionAttributeFuzzyRewrite
with DefinitionAttributePrefixLength
with DefinitionAttributeCutoffFrequency {
val builder = QueryBuilders.matchQuery(field, value)
val _builder = builder
def operator(op: String): MatchQueryDefinition = {
op match {
case "AND" => builder.operator(org.elasticsearch.index.query.MatchQueryBuilder.Operator.AND)
case _ => builder.operator(org.elasticsearch.index.query.MatchQueryBuilder.Operator.OR)
}
this
}
def analyzer(a: Analyzer): MatchQueryDefinition = {
builder.analyzer(a.name)
this
}
def zeroTermsQuery(z: MatchQueryBuilder.ZeroTermsQuery) = {
builder.zeroTermsQuery(z)
this
}
def slop(s: Int) = {
builder.slop(s)
this
}
def setLenient(lenient: Boolean) = {
builder.setLenient(lenient)
this
}
def operator(op: MatchQueryBuilder.Operator) = {
builder.operator(op)
this
}
def minimumShouldMatch(a: Any) = {
builder.minimumShouldMatch(a.toString)
this
}
def maxExpansions(max: Int) = {
builder.maxExpansions(max)
this
}
def fuzzyTranspositions(f: Boolean): MatchQueryDefinition = {
builder.fuzzyTranspositions(f)
this
}
def queryName(queryName: String): this.type = {
builder.queryName(queryName)
this
}
}
case class MatchPhrasePrefixDefinition(field: String, value: Any)
extends QueryDefinition
with DefinitionAttributeBoost
with DefinitionAttributeFuzziness
with DefinitionAttributeFuzzyRewrite
with DefinitionAttributePrefixLength
with DefinitionAttributeCutoffFrequency {
def builder = _builder
val _builder = QueryBuilders.matchPhrasePrefixQuery(field, value.toString)
def analyzer(a: Analyzer): MatchPhrasePrefixDefinition = {
builder.analyzer(a.name)
this
}
def analyzer(name: String): MatchPhrasePrefixDefinition = {
builder.analyzer(name)
this
}
def zeroTermsQuery(z: MatchQueryBuilder.ZeroTermsQuery): MatchPhrasePrefixDefinition = {
builder.zeroTermsQuery(z)
this
}
def slop(s: Int): MatchPhrasePrefixDefinition = {
builder.slop(s)
this
}
def setLenient(lenient: Boolean): MatchPhrasePrefixDefinition = {
builder.setLenient(lenient)
this
}
def operator(op: MatchQueryBuilder.Operator): MatchPhrasePrefixDefinition = {
builder.operator(op)
this
}
def operator(op: String): MatchPhrasePrefixDefinition = {
op match {
case "AND" => builder.operator(org.elasticsearch.index.query.MatchQueryBuilder.Operator.AND)
case _ => builder.operator(org.elasticsearch.index.query.MatchQueryBuilder.Operator.OR)
}
this
}
def minimumShouldMatch(a: Any): MatchPhrasePrefixDefinition = {
builder.minimumShouldMatch(a.toString)
this
}
def maxExpansions(max: Int): MatchPhrasePrefixDefinition = {
builder.maxExpansions(max)
this
}
def fuzzyTranspositions(f: Boolean): MatchPhrasePrefixDefinition = {
builder.fuzzyTranspositions(f)
this
}
}
case class MatchPhraseDefinition(field: String, value: Any)
extends QueryDefinition
with DefinitionAttributeBoost
with DefinitionAttributeFuzziness
with DefinitionAttributeFuzzyRewrite
with DefinitionAttributePrefixLength
with DefinitionAttributeCutoffFrequency {
val builder = QueryBuilders.matchPhraseQuery(field, value.toString)
val _builder = builder
def analyzer(a: Analyzer): MatchPhraseDefinition = {
builder.analyzer(a.name)
this
}
def zeroTermsQuery(z: MatchQueryBuilder.ZeroTermsQuery) = {
builder.zeroTermsQuery(z)
this
}
def slop(s: Int): MatchPhraseDefinition = {
builder.slop(s)
this
}
def setLenient(lenient: Boolean): MatchPhraseDefinition = {
builder.setLenient(lenient)
this
}
def operator(op: MatchQueryBuilder.Operator): MatchPhraseDefinition = {
builder.operator(op)
this
}
def operator(op: String): MatchPhraseDefinition = {
op match {
case "AND" => builder.operator(org.elasticsearch.index.query.MatchQueryBuilder.Operator.AND)
case _ => builder.operator(org.elasticsearch.index.query.MatchQueryBuilder.Operator.OR)
}
this
}
def minimumShouldMatch(a: Any) = {
builder.minimumShouldMatch(a.toString)
this
}
def maxExpansions(max: Int) = {
builder.maxExpansions(max)
this
}
def fuzzyTranspositions(f: Boolean) = {
builder.fuzzyTranspositions(f)
this
}
def queryName(queryName: String): this.type = {
builder.queryName(queryName)
this
}
}
@deprecated("Use existsQuery with a mustNot clause", "2.2.0")
case class MissingQueryDefinition(field: String) extends QueryDefinition {
val builder = QueryBuilders.missingQuery(field)
def includeNull(nullValue: Boolean): MissingQueryDefinition = {
builder.nullValue(nullValue)
this
}
def existence(existence: Boolean): MissingQueryDefinition = {
builder.existence(existence)
this
}
def queryName(queryName: String): this.type = {
builder.queryName(queryName)
this
}
}
case class ScriptQueryDefinition(script: ScriptDefinition)
extends QueryDefinition {
val builder = QueryBuilders.scriptQuery(script.toJavaAPI)
val _builder = builder
def queryName(queryName: String): ScriptQueryDefinition = {
builder.queryName(queryName)
this
}
}
case class SimpleStringQueryDefinition(query: String) extends QueryDefinition {
val builder = QueryBuilders.simpleQueryStringQuery(query)
def analyzer(analyzer: String): SimpleStringQueryDefinition = {
builder.analyzer(analyzer)
this
}
def analyzer(analyzer: Analyzer): SimpleStringQueryDefinition = {
builder.analyzer(analyzer.name)
this
}
def queryName(queryName: String): SimpleStringQueryDefinition = {
builder.queryName(queryName)
this
}
def defaultOperator(op: String): SimpleStringQueryDefinition = {
op match {
case "AND" => builder.defaultOperator(SimpleQueryStringBuilder.Operator.AND)
case _ => builder.defaultOperator(SimpleQueryStringBuilder.Operator.OR)
}
this
}
def defaultOperator(d: SimpleQueryStringBuilder.Operator): SimpleStringQueryDefinition = {
builder.defaultOperator(d)
this
}
def asfields(fields: String*): SimpleStringQueryDefinition = {
fields foreach field
this
}
def field(name: String): SimpleStringQueryDefinition = {
builder.field(name)
this
}
def field(name: String, boost: Double): SimpleStringQueryDefinition = {
builder.field(name, boost.toFloat)
this
}
def flags(flags: SimpleQueryStringFlag*): SimpleStringQueryDefinition = {
builder.flags(flags: _*)
this
}
}
case class QueryStringQueryDefinition(query: String)
extends QueryDefinition
with DefinitionAttributeRewrite
with DefinitionAttributeBoost {
val builder = QueryBuilders.queryStringQuery(query)
val _builder = builder
def analyzer(analyzer: String): this.type = {
builder.analyzer(analyzer)
this
}
def analyzer(analyzer: Analyzer): this.type = {
builder.analyzer(analyzer.name)
this
}
def defaultOperator(op: String): this.type = {
op.toUpperCase match {
case "AND" => builder.defaultOperator(QueryStringQueryBuilder.Operator.AND)
case _ => builder.defaultOperator(QueryStringQueryBuilder.Operator.OR)
}
this
}
def defaultOperator(op: QueryStringQueryBuilder.Operator): this.type = {
builder.defaultOperator(op)
this
}
def asfields(fields: String*): this.type = {
fields foreach field
this
}
def lowercaseExpandedTerms(lowercaseExpandedTerms: Boolean): this.type = {
builder.lowercaseExpandedTerms(lowercaseExpandedTerms)
this
}
def queryName(queryName: String): this.type = {
builder.queryName(queryName)
this
}
def fuzzyPrefixLength(fuzzyPrefixLength: Int): this.type = {
builder.fuzzyPrefixLength(fuzzyPrefixLength)
this
}
def fuzzyMaxExpansions(fuzzyMaxExpansions: Int): this.type = {
builder.fuzzyMaxExpansions(fuzzyMaxExpansions)
this
}
def fuzzyRewrite(fuzzyRewrite: String): this.type = {
builder.fuzzyRewrite(fuzzyRewrite)
this
}
def tieBreaker(tieBreaker: Double): this.type = {
builder.tieBreaker(tieBreaker.toFloat)
this
}
def allowLeadingWildcard(allowLeadingWildcard: Boolean): this.type = {
builder.allowLeadingWildcard(allowLeadingWildcard)
this
}
def lenient(lenient: Boolean): this.type = {
builder.lenient(lenient)
this
}
def minimumShouldMatch(minimumShouldMatch: Int): this.type = {
builder.minimumShouldMatch(minimumShouldMatch.toString)
this
}
def enablePositionIncrements(enablePositionIncrements: Boolean): this.type = {
builder.enablePositionIncrements(enablePositionIncrements)
this
}
def quoteFieldSuffix(quoteFieldSuffix: String): this.type = {
builder.quoteFieldSuffix(quoteFieldSuffix)
this
}
def field(name: String): this.type = {
builder.field(name)
this
}
def field(name: String, boost: Double): this.type = {
builder.field(name, boost.toFloat)
this
}
def defaultField(field: String): this.type = {
builder.defaultField(field)
this
}
def analyzeWildcard(analyzeWildcard: Boolean): this.type = {
builder.analyzeWildcard(analyzeWildcard)
this
}
def autoGeneratePhraseQueries(autoGeneratePhraseQueries: Boolean): this.type = {
builder.autoGeneratePhraseQueries(autoGeneratePhraseQueries)
this
}
def operator(op: String): this.type = {
op.toLowerCase match {
case "and" => builder.defaultOperator(QueryStringQueryBuilder.Operator.AND)
case _ => builder.defaultOperator(QueryStringQueryBuilder.Operator.OR)
}
this
}
def phraseSlop(phraseSlop: Int): QueryStringQueryDefinition = {
builder.phraseSlop(phraseSlop)
this
}
}
case class NestedQueryDefinition(path: String,
query: QueryDefinition,
boost: Option[Double] = None,
inner: Option[QueryInnerHitBuilder] = None,
queryName: Option[String] = None,
scoreMode: Option[String] = None) extends QueryDefinition {
require(query != null, "must specify query for nested score query")
def builder: NestedQueryBuilder = {
val builder = QueryBuilders.nestedQuery(path, query.builder)
boost.foreach(b => builder.boost(b.toFloat))
scoreMode.foreach(builder.scoreMode)
inner.foreach(builder.innerHit)
queryName.foreach(builder.queryName)
builder
}
def inner(name: String): NestedQueryDefinition = copy(inner = Option(new QueryInnerHitBuilder().setName(name)))
def inner(inner: QueryInnerHitsDefinition): NestedQueryDefinition = copy(inner = Option(inner.builder))
def scoreMode(scoreMode: String): NestedQueryDefinition = copy(scoreMode = Option(scoreMode))
def boost(b: Double): NestedQueryDefinition = copy(boost = Option(b))
def queryName(queryName: String): NestedQueryDefinition = copy(queryName = Option(queryName))
}
@deprecated("use bool query with a mustNot clause", "2.1.1")
class NotQueryDefinition(filter: QueryDefinition)
extends QueryDefinition {
val builder = QueryBuilders.notQuery(filter.builder)
val _builder = builder
def queryName(queryName: String): NotQueryDefinition = {
builder.queryName(queryName)
this
}
}
case class QueryInnerHitsDefinition(private[elastic4s] val name: String) {
private[elastic4s] val builder = new QueryInnerHitBuilder().setName(name)
private var includes: Array[String] = Array.empty
private var excludes: Array[String] = Array.empty
def from(f: Int): this.type = {
builder.setFrom(f)
this
}
def size(s: Int): this.type = {
builder.setSize(s)
this
}
def highlighting(highlights: HighlightDefinition*): this.type = {
highlights.foreach(highlight => builder.addHighlightedField(highlight.builder))
this
}
def fetchSource(fetch: Boolean): this.type = {
builder.setFetchSource(fetch)
this
}
def sourceInclude(includes: String*): this.type = {
this.includes = includes.toArray
builder.setFetchSource(this.includes, excludes)
this
}
def sourceExclude(excludes: String*): this.type = {
this.excludes = excludes.toArray
builder.setFetchSource(includes, this.excludes)
this
}
}
case class InnerHitDefinition(private[elastic4s] val name: String) {
private[elastic4s] val inner = new InnerHit
def path(p: String): this.type = {
inner.setPath(p)
this
}
def `type`(t: String): this.type = {
inner.setType(t)
this
}
def highlighting(highlights: HighlightDefinition*): this.type = {
highlights.foreach(highlight => inner.addHighlightedField(highlight.builder))
this
}
}
|
sjoerdmulder/elastic4s
|
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/queries.scala
|
Scala
|
apache-2.0
| 49,125 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.