code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
object Main {
def main(args: Array[String]) {
val t = readLine().toInt
var memo = Array.fill[BigInt](101)(0)
memo(1) = 1
def allFactorials(t: Int, memo: Array[BigInt]) {
def factorial(n: Int):BigInt = {
if( memo(n) == 0 && n != 1) {
memo(n) = n * factorial (n - 1)
}
memo(n)
}
for( i <- 1 to t ) {
println(factorial(readLine().toInt))
}
}
allFactorials(t, memo)
}
}
|
DavidOrchard/codechef
|
smallfactorial/Main.scala
|
Scala
|
mit
| 482 |
/**
* Copyright 2014 Frank Austin Nothaft
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.fnothaft.snark
import org.scalatest.FunSuite
class DenseArrayStructureSuite extends FunSuite {
test("cannot create a structure without a level of hierarchy") {
intercept[AssertionError] {
new DenseArrayStructure(Seq[Long]())
}
}
test("check to make sure that we return the number of elements correctly") {
val a1 = new DenseArrayStructure(Seq(4L, 4L, 5L))
assert(a1.elements === 13L)
}
}
|
fnothaft/snark
|
snark-core/src/test/scala/net/fnothaft/snark/DenseArrayStructureSuite.scala
|
Scala
|
apache-2.0
| 1,040 |
package org.jetbrains.plugins.scala.lang.completion
import com.intellij.codeInsight.completion.{CompletionResultSet, InsertHandler}
import com.intellij.codeInsight.lookup.{AutoCompletionPolicy, LookupElement, LookupElementPresentation, LookupElementRenderer}
import com.intellij.openapi.application.ApplicationManager
import com.intellij.psi.search.searches.ClassInheritorsSearch
import com.intellij.psi.search.{GlobalSearchScope, LocalSearchScope}
import com.intellij.psi.{PsiClass, PsiDocCommentOwner, PsiElement, PsiNamedElement}
import com.intellij.util.Processor
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.completion.handlers.ScalaConstructorInsertHandler
import org.jetbrains.plugins.scala.lang.completion.lookups.ScalaLookupItem
import org.jetbrains.plugins.scala.lang.formatting.settings.ScalaCodeStyleSettings
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScSimpleTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.base.{ScConstructor, ScStableCodeReferenceElement}
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScNewTemplateDefinition
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.{ScClassParents, ScExtendsBlock}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScObject, ScTrait}
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.resolve.ResolveUtils
import scala.collection.mutable
/**
* @author Alefas
* @since 27.03.12
*/
object ScalaAfterNewCompletionUtil {
lazy val afterNewPattern = ScalaSmartCompletionContributor.superParentsPattern(classOf[ScStableCodeReferenceElement],
classOf[ScSimpleTypeElement], classOf[ScConstructor], classOf[ScClassParents], classOf[ScExtendsBlock], classOf[ScNewTemplateDefinition])
def getLookupElementFromClass(expectedTypes: Array[ScType], clazz: PsiClass,
renamesMap: mutable.HashMap[String, (String, PsiNamedElement)]): LookupElement = {
val undefines: Seq[ScUndefinedType] = clazz.getTypeParameters.map(ptp =>
new ScUndefinedType(new ScTypeParameterType(ptp, ScSubstitutor.empty))
)
val predefinedType =
if (clazz.getTypeParameters.length == 1) {
ScParameterizedType(ScDesignatorType(clazz), undefines)
}
else
ScDesignatorType(clazz)
val noUndefType =
if (clazz.getTypeParameters.length == 1) {
ScParameterizedType(ScDesignatorType(clazz), clazz.getTypeParameters.map(ptp =>
new ScTypeParameterType(ptp, ScSubstitutor.empty)
))
}
else
ScDesignatorType(clazz)
val iterator = expectedTypes.iterator
while (iterator.hasNext) {
val typez = iterator.next()
if (predefinedType.conforms(typez)) {
val undef = Conformance.undefinedSubst(typez, predefinedType)
undef.getSubstitutor match {
case Some(subst) =>
val lookupElement = getLookupElementFromTypeAndClass(subst.subst(noUndefType), clazz,
ScSubstitutor.empty, new AfterNewLookupElementRenderer(_, _, _), new ScalaConstructorInsertHandler, renamesMap)
for (undefine <- undefines) {
subst.subst(undefine) match {
case ScUndefinedType(_) =>
lookupElement.typeParametersProblem = true
case _ =>
}
}
return lookupElement
case _ =>
}
}
}
val lookupElement = getLookupElementFromTypeAndClass(noUndefType, clazz, ScSubstitutor.empty,
new AfterNewLookupElementRenderer(_, _, _), new ScalaConstructorInsertHandler, renamesMap)
if (undefines.nonEmpty) {
lookupElement.typeParametersProblem = true
}
lookupElement
}
class AfterNewLookupElementRenderer(tp: ScType, psiClass: PsiClass,
subst: ScSubstitutor) extends LookupElementRenderer[LookupElement] {
def renderElement(ignore: LookupElement, presentation: LookupElementPresentation) {
var isDeprecated = false
psiClass match {
case doc: PsiDocCommentOwner if doc.isDeprecated => isDeprecated = true
case _ =>
}
var tailText: String = ""
val itemText: String = psiClass.name + (tp match {
case ScParameterizedType(_, tps) =>
tps.map(tp => ScType.presentableText(subst.subst(tp))).mkString("[", ", ", "]")
case _ => ""
})
psiClass match {
case clazz: PsiClass =>
if (psiClass.isInterface || psiClass.isInstanceOf[ScTrait] ||
psiClass.hasModifierPropertyScala("abstract")) {
tailText += " {...}"
}
val location: String = clazz.getPresentation.getLocationString
presentation.setTailText(tailText + " " + location, true)
case _ =>
}
presentation.setIcon(psiClass.getIcon(0))
presentation.setStrikeout(isDeprecated)
presentation.setItemText(itemText)
}
}
private def getLookupElementFromTypeAndClass(tp: ScType, psiClass: PsiClass, subst: ScSubstitutor,
renderer: (ScType, PsiClass, ScSubstitutor) => LookupElementRenderer[LookupElement],
insertHandler: InsertHandler[LookupElement],
renamesMap: mutable.HashMap[String, (String, PsiNamedElement)]): ScalaLookupItem = {
val name: String = psiClass.name
val isRenamed = renamesMap.filter {
case (aName, (renamed, aClazz)) => aName == name && aClazz == psiClass
}.map(_._2._1).headOption
val lookupElement: ScalaLookupItem = new ScalaLookupItem(psiClass, isRenamed.getOrElse(name)) {
override def renderElement(presentation: LookupElementPresentation) {
renderer(tp, psiClass, subst).renderElement(this, presentation)
isRenamed match {
case Some(nme) => presentation.setItemText(nme + " <= " + presentation.getItemText)
case _ =>
}
}
}
lookupElement.isRenamed = isRenamed
if (ApplicationManager.getApplication.isUnitTestMode || psiClass.isInterface ||
psiClass.isInstanceOf[ScTrait] || psiClass.hasModifierPropertyScala("abstract"))
lookupElement.setAutoCompletionPolicy(if (ApplicationManager.getApplication.isUnitTestMode) AutoCompletionPolicy.ALWAYS_AUTOCOMPLETE
else AutoCompletionPolicy.NEVER_AUTOCOMPLETE)
val qualName = psiClass.qualifiedName
if (ScalaCodeStyleSettings.getInstance(psiClass.getProject).hasImportWithPrefix(qualName)) {
lookupElement.prefixCompletion = true
}
lookupElement.setInsertHandler(new ScalaConstructorInsertHandler)
tp match {
case ScParameterizedType(_, tps) => lookupElement.typeParameters = tps
case _ =>
}
lookupElement
}
def convertTypeToLookupElement(tp: ScType, place: PsiElement, addedClasses: mutable.HashSet[String],
renderer: (ScType, PsiClass, ScSubstitutor) => LookupElementRenderer[LookupElement],
insertHandler: InsertHandler[LookupElement],
renamesMap: mutable.HashMap[String, (String, PsiNamedElement)]): ScalaLookupItem = {
ScType.extractClassType(tp, Some(place.getProject)) match {
case Some((clazz: PsiClass, subst: ScSubstitutor)) =>
//filter base types (it's important for scala 2.9)
clazz.qualifiedName match {
case "scala.Boolean" | "scala.Int" | "scala.Long" | "scala.Byte" | "scala.Short" | "scala.AnyVal" |
"scala.Char" | "scala.Unit" | "scala.Float" | "scala.Double" | "scala.Any" => return null
case _ =>
}
//todo: filter inner classes smarter (how? don't forget deep inner classes)
if (clazz.containingClass != null && (!clazz.containingClass.isInstanceOf[ScObject] ||
clazz.hasModifierPropertyScala("static"))) return null
if (!ResolveUtils.isAccessible(clazz, place, forCompletion = true)) return null
if (addedClasses.contains(clazz.qualifiedName)) return null
addedClasses += clazz.qualifiedName
getLookupElementFromTypeAndClass(tp, clazz, subst, renderer, insertHandler, renamesMap)
case _ => null
}
}
def collectInheritorsForType(typez: ScType, place: PsiElement, addedClasses: mutable.HashSet[String],
result: CompletionResultSet,
renderer: (ScType, PsiClass, ScSubstitutor) => LookupElementRenderer[LookupElement],
insertHandler: InsertHandler[LookupElement], renamesMap: mutable.HashMap[String, (String, PsiNamedElement)]) {
ScType.extractClassType(typez, Some(place.getProject)) match {
case Some((clazz, subst)) =>
//this change is important for Scala Worksheet/Script classes. Will not find inheritors, due to file copy.
val searchScope =
if (clazz.getUseScope.isInstanceOf[LocalSearchScope]) GlobalSearchScope.allScope(place.getProject)
else clazz.getUseScope
ClassInheritorsSearch.search(clazz, searchScope, true).forEach(new Processor[PsiClass] {
def process(clazz: PsiClass): Boolean = {
if (clazz.name == null || clazz.name == "") return true
val undefines: Seq[ScUndefinedType] = clazz.getTypeParameters.map(ptp =>
new ScUndefinedType(new ScTypeParameterType(ptp, ScSubstitutor.empty))
)
val predefinedType =
if (clazz.getTypeParameters.nonEmpty) {
ScParameterizedType(ScDesignatorType(clazz), undefines)
}
else
ScDesignatorType(clazz)
val noUndefType =
if (clazz.getTypeParameters.nonEmpty) {
ScParameterizedType(ScDesignatorType(clazz), clazz.getTypeParameters.map(ptp =>
new ScTypeParameterType(ptp, ScSubstitutor.empty)
))
}
else
ScDesignatorType(clazz)
if (!predefinedType.conforms(typez)) return true
val undef = Conformance.undefinedSubst(typez, predefinedType)
undef.getSubstitutor match {
case Some(undefSubst) =>
val lookupElement = convertTypeToLookupElement(undefSubst.subst(noUndefType), place, addedClasses,
renderer, insertHandler, renamesMap)
if (lookupElement != null) {
for (undefine <- undefines) {
undefSubst.subst(undefine) match {
case ScUndefinedType(_) =>
lookupElement.typeParametersProblem = true
case _ =>
}
}
result.addElement(lookupElement)
}
case _ =>
}
true
}
})
case _ =>
}
}
}
|
advancedxy/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/completion/ScalaAfterNewCompletionUtil.scala
|
Scala
|
apache-2.0
| 10,948 |
package me.jamesphiliprobinson.utilities.time
import org.scalatest.{FunSuite, Matchers}
/**
* Created by James Robinson on 03/04/2016.
*/
class DefaultSleepTimerSpecImpl extends FunSuite with Matchers {
test("Can sleep for the time required following creation") {
val timer = new DefaultSleepTimerImpl(100, 50)
val time = System.currentTimeMillis
timer.sleep
val timePassed = System.currentTimeMillis - time
timePassed should be >= 100L
timePassed should be < 110L
}
test("Can sleep for the time required following use") {
val timer = new DefaultSleepTimerImpl(100, 50)
timer.sleep
val time = System.currentTimeMillis
timer.sleep
val timePassed = System.currentTimeMillis - time
timePassed should be >= 100L
timePassed should be < 110L
}
test("Can sleep for the time min required following an over sleep after creation") {
val timer = new DefaultSleepTimerImpl(100, 50)
Thread sleep 150L
val time = System.currentTimeMillis
timer.sleep
val timePassed = System.currentTimeMillis - time
timePassed should be >= 50L
timePassed should be < 60L
}
test("Can sleep for the time min required following an over sleep") {
val timer = new DefaultSleepTimerImpl(100, 50)
timer.sleep
Thread sleep 150L
val time = System.currentTimeMillis
timer.sleep
val timePassed = System.currentTimeMillis - time
timePassed should be >= 50L
timePassed should be < 60L
}
test("Can sleep for the time required following reset") {
val timer = new DefaultSleepTimerImpl(100, 50)
timer.sleep
timer.sleep
Thread sleep 150L
timer.sleep
Thread sleep 150L
timer.reset
val time = System.currentTimeMillis
timer.sleep
val timePassed = System.currentTimeMillis - time
timePassed should be >= 100L
timePassed should be < 110L
}
test("Can sleep without interrupted exception") {
val timer = new DefaultSleepTimerImpl(100, 50)
val thread = Thread.currentThread
val time = System.currentTimeMillis
new Thread(new Runnable {
override def run = {
Thread sleep 30L
thread.interrupt
}
}).start
timer.sleep
val taken = System.currentTimeMillis - time
taken should be >= 30L
taken should be < 40L
}
test("Can sleep with interrupted exception") {
val timer = new DefaultSleepTimerImpl(100, 50)
val thread = Thread.currentThread
new Thread(new Runnable {
override def run = {
Thread sleep 30L
thread.interrupt
}
}).start
intercept[InterruptedException] {
timer.sleepWithInterruptedException
}
}
test("Can stay asleep for a little longer if reset part way through") {
val timer = new DefaultSleepTimerImpl(100, 50)
val time = System.currentTimeMillis
new Thread(new Runnable {
override def run = {
Thread sleep 50
timer.reset
}
}).start
timer.sleep
val timePassed = System.currentTimeMillis - time
timePassed should be >= 150L
timePassed should be < 160L
}
}
|
etothepii/utilities
|
src/test/scala/me/jamesphiliprobinson/utilities/time/DefaultSleepTimerSpecImpl.scala
|
Scala
|
apache-2.0
| 3,096 |
/**
* Copyright (C) 2017 DANS - Data Archiving and Networked Services ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.knaw.dans.easy.greeter
import javax.servlet.ServletContext
import nl.knaw.dans.lib.logging.DebugEnhancedLogging
import org.eclipse.jetty.server.Server
import org.eclipse.jetty.servlet.ServletContextHandler
import org.scalatra._
import org.scalatra.servlet.ScalatraListener
import scala.util.Try
class GreeterService extends GreeterApp with DebugEnhancedLogging { app =>
import logger._
validateSettings()
private val port = properties.getInt("daemon.http.port")
private val server = new Server(port)
new ServletContextHandler(ServletContextHandler.NO_SESSIONS) {
setInitParameter(ScalatraListener.LifeCycleKey, "nl.knaw.dans.easy.greeter.ServletMounter")
setAttribute(CONTEXT_ATTRIBUTE_APPLICATION, app)
addEventListener(new ScalatraListener())
server.setHandler(this)
}
logger.info(s"HTTP port is $port")
def start(): Try[Unit] = Try {
info("Starting HTTP service ...")
server.start()
}
def stop(): Try[Unit] = Try {
info("Stopping HTTP service ...")
server.stop()
}
def destroy(): Try[Unit] = Try {
server.destroy()
}
}
class ServletMounter extends LifeCycle {
override def init(context: ServletContext): Unit = {
context.getAttribute(CONTEXT_ATTRIBUTE_APPLICATION) match {
case app: GreeterApp =>
context.mount(GreeterServlet(app), "/")
case _ => throw new IllegalStateException("Service not configured: no GreeterApp found")
}
}
}
|
rvanheest/easy-greeter
|
src/main/scala/nl/knaw/dans/easy/greeter/GreeterService.scala
|
Scala
|
apache-2.0
| 2,107 |
package games.audio
import java.io.InputStream
import com.jcraft.jogg.Packet
import com.jcraft.jogg.Page
import com.jcraft.jogg.StreamState
import com.jcraft.jogg.SyncState
import com.jcraft.jorbis.DspState
import com.jcraft.jorbis.Block
import com.jcraft.jorbis.Info
import com.jcraft.jorbis.Comment
import java.io.EOFException
import java.io.FilterInputStream
import java.io.IOException
import java.nio.ByteBuffer
import java.nio.ByteOrder
import java.io.Closeable
class VorbisDecoder private[games] (var in: InputStream, conv: Converter) extends Closeable {
private val packet = new Packet
private val page = new Page
private val streamState = new StreamState
private val syncState = new SyncState
private val dspState = new DspState
private val block = new Block(dspState)
private val comment = new Comment
private val info = new Info
private var firstPage = true
private var lastPage = false
private val readBufferSize = 4096
private def getNextPage(): Page = {
syncState.pageout(page) match {
case 0 => // need more data
val index = syncState.buffer(readBufferSize)
val buffer = syncState.data
var read = in.read(buffer, index, readBufferSize)
if (read < 0) {
if (!lastPage) { System.err.println("Warning: End of stream reached before EOS page") }
throw new EOFException()
}
val code = syncState.wrote(read)
if (code < 0) throw new RuntimeException("Could not load the buffer. Code " + code)
else getNextPage() // once the buffer is loaded successfully, try again
case 1 => // page ok
if (firstPage) {
firstPage = false
streamState.init(page.serialno())
val code = streamState.reset()
if (code < 0) throw new RuntimeException("Could not reset streamState. Code " + code)
info.init()
comment.init()
}
if (lastPage) System.err.println("Warning: EOS page already reached")
else lastPage = page.eos() != 0
page
case x => throw new RuntimeException("Could not retrieve page from buffer. Code " + x)
}
}
def getNextPacket(): Packet = streamState.packetout(packet) match {
case 0 => // need a new page
val code = streamState.pagein(getNextPage())
if (code < 0) throw new RuntimeException("Could not load the page. Code " + code)
else getNextPacket() // once a new page is loaded successfully, try again
case 1 => packet // packet ok
case x => throw new RuntimeException("Could not retrieve packet from page. Code " + x)
}
init()
private def init() {
try {
syncState.init()
for (i <- 1 to 3) { // Decode the three header packets
val code = info.synthesis_headerin(comment, getNextPacket())
if (code < 0) throw new RuntimeException("Could not synthesize the info. Code " + code)
}
if (dspState.synthesis_init(info) < 0) throw new RuntimeException("Could not init DspState")
block.init(dspState)
pcmIn = new Array[Array[Array[Float]]](1)
indexIn = new Array[Int](info.channels)
} catch {
case e: Exception => throw new RuntimeException("Could not init the decoder", e)
}
}
def rate: Int = info.rate
def channels: Int = info.channels
private var pcmIn: Array[Array[Array[Float]]] = _
private var indexIn: Array[Int] = _
private var remainingSamples = 0
private var samplesRead = 0
private def decodeNextPacket(): Unit = {
if (dspState.synthesis_read(samplesRead) < 0) throw new RuntimeException("Could not acknowledge read samples")
samplesRead = 0
if (block.synthesis(this.getNextPacket()) < 0) throw new RuntimeException("Could not synthesize the block from packet")
if (dspState.synthesis_blockin(block) < 0) throw new RuntimeException("Could not synthesize dspState from block")
val availableSamples = dspState.synthesis_pcmout(pcmIn, indexIn)
if (availableSamples < 0) throw new RuntimeException("Could not decode the block")
//else if (availableSamples == 0) System.err.println("Warning: 0 samples decoded")
remainingSamples = availableSamples
}
def read(out: ByteBuffer): Int = {
while (remainingSamples <= 0) {
decodeNextPacket()
}
def loop(count: Int): Int = {
if (remainingSamples <= 0 || !(out.remaining() >= info.channels * conv.bytePerValue)) {
count
} else {
for (channelNo <- 0 until info.channels) {
val value = pcmIn(0)(channelNo)(indexIn(channelNo) + samplesRead)
conv(value, out)
}
samplesRead += 1
remainingSamples -= 1
loop(count + 1)
}
}
loop(0) * conv.bytePerValue * info.channels
}
def readFully(out: ByteBuffer): Int = {
if (out.remaining() % (info.channels * conv.bytePerValue) != 0) throw new RuntimeException("Buffer capacity incorrect (remaining " + out.remaining() + ", required multiple of " + (info.channels * conv.bytePerValue) + ")")
var total = 0
while (out.remaining() > 0) {
total += read(out)
}
total
}
def close(): Unit = {
streamState.clear()
block.clear()
dspState.clear()
info.clear()
syncState.clear()
in.close()
}
}
|
joelross/scalajs-games
|
demo/jvm/src/main/scala/games/audio/VorbisDecoder.scala
|
Scala
|
bsd-3-clause
| 5,261 |
package contege
import scala.collection.mutable.Set
import java.lang.reflect._
/**
* Reads methods, constructors, and fields of a class,
* ignoring all members that can't be called in a generated test
* (i.e., only public, non-abstract etc. members are considered).
*/
class ClassReader(val cls: Class[_]) {
def readMethodAtoms = {
val atoms = Set[MethodAtom]()
cls.getMethods.foreach(m => if (Modifier.isPublic(m.getModifiers) &&
!m.isSynthetic &&
!Modifier.isAbstract(m.getModifiers) &&
m.getName != "XXXmyClinitXXX" &&
m.getDeclaringClass.getName != "java.lang.Object" &&
!ExcludedMethods.methods.contains(m.toString)) atoms.add(new MethodAtom(cls, m)))
atoms.toSeq.sortWith((x,y) => x.toString < y.toString)
}
def readConstructorAtoms = {
val atoms = Set[ConstructorAtom]()
if (!Modifier.isAbstract(cls.getModifiers)) {
cls.getConstructors.foreach(c => if (Modifier.isPublic(c.getModifiers) &&
!c.isSynthetic &&
!Modifier.isAbstract(c.getModifiers)) atoms += new ConstructorAtom(cls, c))
}
atoms.toSeq.sortWith((x,y) => x.toString < y.toString)
}
def readFieldGetterAtoms = {
val atoms = Set[FieldGetterAtom]()
cls.getFields.foreach(f => if (Modifier.isPublic(f.getModifiers) &&
!f.isSynthetic &&
!Modifier.isAbstract(f.getModifiers)) atoms += new FieldGetterAtom(cls, f))
atoms.toSeq.sortWith((x,y) => x.toString < y.toString)
}
}
|
michaelpradel/ConTeGe
|
src/contege/ClassReader.scala
|
Scala
|
gpl-2.0
| 1,554 |
package controllers.api
import controllers.Application
import db._
import models.frontend.AccountReceivedFromFrontend
import play.api.libs.json._
import play.api.mvc.{Action, Controller}
import services.EmailService
object AccountApi extends Controller {
val httpStatusCodeEmailAlreadyRegistered = 230
val httpStatusCodeLinkedInAccountIdAlreadyRegistered = 231
def create = Action(parse.json) { request =>
Application.getAccountId(request.session) match {
case None => BadRequest("Account ID not found in session")
case Some(accountId) =>
request.body.validate[AccountReceivedFromFrontend] match {
case s: JsSuccess[AccountReceivedFromFrontend] =>
val frontendAccount = s.get
try {
// We create the new account and retrieve its ID
AccountDto.create(frontendAccount.emailAddress, frontendAccount.linkedInAccountId) match {
case Some(newAccountId) =>
// We update temporary user data to final user data
AccountDataDto.updateAccountId(accountId, newAccountId)
// We delete the old account
AccountDto.deleteOfId(accountId)
EmailService.sendWelcomeEmail(frontendAccount.emailAddress)
AccountDataDto.getOfAccountId(newAccountId) match {
case Some(accountData) => EmailService.sendAccountDataUpdatedEmail(frontendAccount.emailAddress, accountData)
case None =>
}
val jsonToReturn = JsObject(Seq(
"accountId" -> JsNumber(newAccountId),
"accountData" -> AccountDataDto.getOfAccountId(newAccountId).getOrElse(JsNull)
))
Ok(jsonToReturn).withSession(request.session +("accountId", newAccountId.toString))
case None => InternalServerError("FATAL ERROR: AccountDto.create() did not return an ID")
}
} catch {
case eare: EmailAlreadyRegisteredException => Status(httpStatusCodeEmailAlreadyRegistered)("This email is already registered")
case liaiare: LinkedInAccountIdAlreadyRegisteredException => Status(httpStatusCodeLinkedInAccountIdAlreadyRegistered)("This LinkedIn account ID is already registered")
}
case e: JsError => BadRequest("Validation of AccountReceivedFromFrontend failed")
}
}
}
def get(linkedinAccountId: String) = Action { request =>
AccountDto.getOfLinkedinAccountId(linkedinAccountId) match {
case None => NoContent
case Some(account) => Ok(Json.toJson(account))
}
}
}
|
PanzerKunst/workdimension
|
website/app/controllers/api/AccountApi.scala
|
Scala
|
gpl-3.0
| 2,685 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.scaladsl.server
import java.util.concurrent.atomic.AtomicBoolean
import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.scaladsl.{ Flow, Sink, Source }
import akka.stream.{ ActorMaterializer, Materializer }
import com.lightbend.lagom.internal.scaladsl.server.ScaladslServiceRouter
import com.lightbend.lagom.scaladsl.api.Service
import com.lightbend.lagom.scaladsl.api.transport._
import com.lightbend.lagom.scaladsl.server.mocks._
import com.lightbend.lagom.scaladsl.server.testkit.FakeRequest
import org.scalatest.{ Assertion, AsyncFlatSpec, BeforeAndAfterAll, Matchers }
import play.api.http.HttpConfiguration
import play.api.http.websocket.{ Message, TextMessage }
import play.api.mvc
import play.api.mvc.{ Handler, PlayBodyParsers }
import scala.concurrent.{ ExecutionContext, Future }
/**
* This test relies on DefaultExceptionSerializer so in case of failure some information is lost on de/ser. Check the
* status code of the response (won't be 200) and locate the suspect line of code where that status code is launched.
*/
class ScaladslStreamedServiceRouterSpec extends AsyncFlatSpec with Matchers with BeforeAndAfterAll {
private val system = ActorSystem("ScaladslServiceRouterSpec")
private implicit val ec: ExecutionContext = system.dispatcher
private implicit val mat: Materializer = ActorMaterializer.create(system)
override protected def afterAll(): Unit = {
system.terminate()
super.afterAll()
}
behavior of "ScaladslServiceRouter"
it should "serve a non-filtered Streamed request" in {
val atomicBoolean = new AtomicBoolean(false)
// this test is canary
val service = new SimpleStreamedService {
override def streamed(): ServerServiceCall[Source[String, NotUsed], Source[String, NotUsed]] = ServerServiceCall { (headers, _) =>
atomicBoolean.compareAndSet(false, true)
Future.successful((ResponseHeader.Ok, Source.single("unused")))
}
}
val x: mvc.WebSocket => mvc.RequestHeader => Future[WSFlow] =
(websocket) => (rh) => websocket(rh).map(_.right.get)
runRequest(service)(x) {
atomicBoolean.get() should be(true)
}
}
// this test can only assert that play filters and lagom filters were invoked and request headers made its way into
// the service implementation layer but since this test is not running a fully fledged HTTP server
// we can't run assertions over the response headers. The expected behavior is that both play and lagom filters
// are invoked while processing the response but Play doesn't support adding custom headers on a websocket handshake.
ignore should "propagate headers added by a Play Filter down to the ServiceImpl. [Streamed message]"
// this test can only assert that play filters and lagom filters were invoked and request headers made its way into
// the service implementation layer but since this test is not running a fully fledged HTTP server
// we can't run assertions over the response headers. The expected behavior is that both play and lagom filters
// are invoked while processing the response but Play doesn't support adding custom headers on a websocket handshake.
ignore should "propagate headers added by a Play Filter and a Lagom HeaderFilter down to the ServiceImpl (invoking Play Filter first). [String message]"
type WSFlow = Flow[Message, Message, _]
// ---------------------------------------------------------------------------------------------------
private def runRequest(service: Service)(x: mvc.WebSocket => mvc.RequestHeader => Future[WSFlow])(block: => Assertion): Future[Assertion] = {
val httpConfig = HttpConfiguration.createWithDefaults()
val parsers = PlayBodyParsers()
val router = new ScaladslServiceRouter(service.descriptor, service, httpConfig, parsers)
val req: mvc.Request[NotUsed] = new FakeRequest(method = "GET", path = PathProvider.PATH)
val handler = router.routes(req)
val futureResult: Future[WSFlow] = Handler.applyStages(req, handler) match {
case (_, action: mvc.WebSocket) => x(action)(req)
case _ => Future.failed(new AssertionError("Not a WebSocket."))
}
futureResult flatMap {
_.runWith(Source.single(TextMessage("41")), Sink.ignore)._2.map {
_ => block
}
}
}
}
|
rstento/lagom
|
service/scaladsl/server/src/test/scala/com/lightbend/lagom/scaladsl/server/ScaladslStreamedServiceRouterSpec.scala
|
Scala
|
apache-2.0
| 4,417 |
package skuber
import skuber.json.format.serviceFmt
import org.scalatest.Matchers
import org.scalatest.concurrent.Eventually
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.{Failure, Random, Success}
class ServiceSpec extends K8SFixture with Eventually with Matchers {
val nginxServiceName: String = Random.alphanumeric.filter(_.isLetter).take(20).mkString.toLowerCase
behavior of "Service"
it should "create a service" in { k8s =>
k8s.create(getService(nginxServiceName)) map { p =>
assert(p.name == nginxServiceName)
}
}
it should "get the newly created service" in { k8s =>
k8s.get[Service](nginxServiceName) map { d =>
assert(d.name == nginxServiceName)
// Default ServiceType is ClusterIP
assert(d.spec.map(_._type) == Option(Service.Type.ClusterIP))
}
}
it should "delete a service" in { k8s =>
k8s.delete[Service](nginxServiceName).map { _ =>
eventually(timeout(100.seconds), interval(3.seconds)) {
val retrieveService = k8s.get[Service](nginxServiceName)
val serviceRetrieved = Await.ready(retrieveService, 2.seconds).value.get
serviceRetrieved match {
case s: Success[_] => assert(false)
case Failure(ex) => ex match {
case ex: K8SException if ex.status.code.contains(404) => assert(true)
case _ => assert(false)
}
}
}
}
}
def getService(name: String): Service = {
val spec: Service.Spec = Service.Spec(ports = List(Service.Port(port = 80)), selector = Map("app" -> "nginx"))
Service(name, spec)
}
}
|
doriordan/skuber
|
client/src/it/scala/skuber/ServiceSpec.scala
|
Scala
|
apache-2.0
| 1,626 |
package is.hail.expr.ir
import java.io.OutputStreamWriter
import is.hail.types._
import is.hail.types.physical.PStruct
import is.hail.types.virtual._
import is.hail.io.fs.FS
import is.hail.rvd._
import is.hail.utils._
import is.hail.variant.ReferenceGenome
import org.json4s._
import org.json4s.jackson.JsonMethods.parse
import org.json4s.jackson.{JsonMethods, Serialization}
import scala.language.{existentials, implicitConversions}
abstract class ComponentSpec
object RelationalSpec {
implicit val formats: Formats = new DefaultFormats() {
override val typeHints = ShortTypeHints(List(
classOf[ComponentSpec], classOf[RVDComponentSpec], classOf[PartitionCountsComponentSpec],
classOf[RelationalSpec], classOf[MatrixTableSpec], classOf[TableSpec]))
override val typeHintFieldName = "name"
} +
new TableTypeSerializer +
new MatrixTypeSerializer
def readMetadata(fs: FS, path: String): JValue = {
if (!fs.isDir(path))
fatal(s"MatrixTable and Table files are directories; path '$path' is not a directory")
val metadataFile = path + "/metadata.json.gz"
val jv = using(fs.open(metadataFile)) { in => parse(in) }
val fileVersion = jv \\ "file_version" match {
case JInt(rep) => SemanticVersion(rep.toInt)
case _ =>
fatal(
s"""cannot read file: metadata does not contain file version: $metadataFile
| Common causes:
| - File is an 0.1 VariantDataset or KeyTable (0.1 and 0.2 native formats are not compatible!)""".stripMargin)
}
if (!FileFormat.version.supports(fileVersion))
fatal(s"incompatible file format when reading: $path\\n supported version: ${ FileFormat.version }, found $fileVersion")
jv
}
def read(fs: FS, path: String): RelationalSpec = {
val jv = readMetadata(fs, path)
val references = readReferences(fs, path, jv)
references.foreach { rg =>
if (!ReferenceGenome.hasReference(rg.name))
ReferenceGenome.addReference(rg)
}
(jv \\ "name").extract[String] match {
case "TableSpec" => TableSpec.fromJValue(fs, path, jv)
case "MatrixTableSpec" => MatrixTableSpec.fromJValue(fs, path, jv)
}
}
def readReferences(fs: FS, path: String): Array[ReferenceGenome] =
readReferences(fs, path, readMetadata(fs, path))
def readReferences(fs: FS, path: String, jv: JValue): Array[ReferenceGenome] = {
// FIXME this violates the abstraction of the serialization boundary
val referencesRelPath = (jv \\ "references_rel_path").extract[String]
ReferenceGenome.readReferences(fs, path + "/" + referencesRelPath)
}
}
abstract class RelationalSpec {
def file_version: Int
def hail_version: String
def components: Map[String, ComponentSpec]
def getComponent[T <: ComponentSpec](name: String): T = components(name).asInstanceOf[T]
def globalsComponent: RVDComponentSpec = getComponent[RVDComponentSpec]("globals")
def partitionCounts: Array[Long] = getComponent[PartitionCountsComponentSpec]("partition_counts").counts.toArray
def indexed: Boolean
def version: SemanticVersion = SemanticVersion(file_version)
def toJValue: JValue
}
case class RVDComponentSpec(rel_path: String) extends ComponentSpec {
def absolutePath(path: String): String = path + "/" + rel_path
def rvdSpec(fs: FS, path: String): AbstractRVDSpec =
AbstractRVDSpec.read(fs, absolutePath(path))
def indexed(fs: FS, path: String): Boolean = rvdSpec(fs, path).indexed
def read(
ctx: ExecuteContext,
path: String,
requestedType: TStruct,
newPartitioner: Option[RVDPartitioner] = None,
filterIntervals: Boolean = false
): RVD = {
val rvdPath = path + "/" + rel_path
rvdSpec(ctx.fs, path)
.read(ctx, rvdPath, requestedType, newPartitioner, filterIntervals)
}
def readLocalSingleRow(ctx: ExecuteContext, path: String, requestedType: TStruct): (PStruct, Long) = {
val rvdPath = path + "/" + rel_path
rvdSpec(ctx.fs, path)
.readLocalSingleRow(ctx, rvdPath, requestedType)
}
}
case class PartitionCountsComponentSpec(counts: Seq[Long]) extends ComponentSpec
abstract class AbstractMatrixTableSpec extends RelationalSpec {
def matrix_type: MatrixType
def references_rel_path: String
def colsComponent: RVDComponentSpec = getComponent[RVDComponentSpec]("cols")
def rowsComponent: RVDComponentSpec = getComponent[RVDComponentSpec]("rows")
def entriesComponent: RVDComponentSpec = getComponent[RVDComponentSpec]("entries")
def globalsSpec: AbstractTableSpec
def colsSpec: AbstractTableSpec
def rowsSpec: AbstractTableSpec
def entriesSpec: AbstractTableSpec
def indexed: Boolean = rowsSpec.indexed
}
object MatrixTableSpec {
def fromJValue(fs: FS, path: String, jv: JValue): MatrixTableSpec = {
implicit val formats: Formats = new DefaultFormats() {
override val typeHints = ShortTypeHints(List(
classOf[ComponentSpec], classOf[RVDComponentSpec], classOf[PartitionCountsComponentSpec]))
override val typeHintFieldName = "name"
} +
new MatrixTypeSerializer
val params = jv.extract[MatrixTableSpecParameters]
val globalsSpec = RelationalSpec.read(fs, path + "/globals").asInstanceOf[AbstractTableSpec]
val colsSpec = RelationalSpec.read(fs, path + "/cols").asInstanceOf[AbstractTableSpec]
val rowsSpec = RelationalSpec.read(fs, path + "/rows").asInstanceOf[AbstractTableSpec]
// some legacy files written as MatrixTableSpec wrote the wrong type to the entries table metadata
var entriesSpec = RelationalSpec.read(fs, path + "/entries").asInstanceOf[TableSpec]
entriesSpec = TableSpec(fs, path + "/entries",
entriesSpec.params.copy(
table_type = TableType(params.matrix_type.entriesRVType, FastIndexedSeq(), params.matrix_type.globalType)))
new MatrixTableSpec(params, globalsSpec, colsSpec, rowsSpec, entriesSpec)
}
}
case class MatrixTableSpecParameters(
file_version: Int,
hail_version: String,
references_rel_path: String,
matrix_type: MatrixType,
components: Map[String, ComponentSpec]) {
def write(fs: FS, path: String) {
using(new OutputStreamWriter(fs.create(path + "/metadata.json.gz"))) { out =>
out.write(JsonMethods.compact(decomposeWithName(this, "MatrixTableSpec")(RelationalSpec.formats)))
}
}
}
class MatrixTableSpec(
val params: MatrixTableSpecParameters,
val globalsSpec: AbstractTableSpec,
val colsSpec: AbstractTableSpec,
val rowsSpec: AbstractTableSpec,
val entriesSpec: AbstractTableSpec) extends AbstractMatrixTableSpec {
def references_rel_path: String = params.references_rel_path
def file_version: Int = params.file_version
def hail_version: String = params.hail_version
def matrix_type: MatrixType = params.matrix_type
def components: Map[String, ComponentSpec] = params.components
def toJValue: JValue = {
decomposeWithName(params, "MatrixTableSpec")(RelationalSpec.formats)
}
}
object FileFormat {
val version: SemanticVersion = SemanticVersion(1, 5, 0)
}
|
cseed/hail
|
hail/src/main/scala/is/hail/expr/ir/AbstractMatrixTableSpec.scala
|
Scala
|
mit
| 7,030 |
/*
* This file is part of Kiama.
*
* Copyright (C) 2011-2015 Anthony M Sloane, Macquarie University.
*
* Kiama is free software: you can redistribute it and/or modify it under
* the terms of the GNU Lesser General Public License as published by the
* Free Software Foundation, either version 3 of the License, or (at your
* option) any later version.
*
* Kiama is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
* more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Kiama. (See files COPYING and COPYING.LESSER.) If not, see
* <http://www.gnu.org/licenses/>.
*/
package org.kiama
package example.oberon0
package L1
trait TypeAnalyser extends L0.TypeAnalyser {
import base.source.Expression
import source.{IfStatement, WhileStatement}
/**
* The type expected of an expression as defined by its context.
*/
override def exptypeDef : Expression => Type =
{
case tree.parent (_ : IfStatement | _ : WhileStatement) =>
booleanType
case n =>
super.exptypeDef (n)
}
}
|
adeze/kiama
|
library/src/org/kiama/example/oberon0/L1/TypeAnalyser.scala
|
Scala
|
gpl-3.0
| 1,303 |
package uk.ac.surrey.xw
import java.io.File
import java.lang.reflect.Modifier.isAbstract
import java.lang.reflect.Modifier.isPublic
import java.net.JarURLConnection
import java.net.URL
import java.net.URLClassLoader
import java.util.jar.Attributes
import java.util.jar.JarFile
import scala.collection.JavaConverters.enumerationAsScalaIteratorConverter
import uk.ac.surrey.xw.api.ExtraWidget
import uk.ac.surrey.xw.api.TabKind
import uk.ac.surrey.xw.api.WidgetKind
import uk.ac.surrey.xw.api.XWException
object WidgetsLoader {
def loadWidgetKinds(extensionFolder: File): Map[String, WidgetKind[_]] = {
val widgetKinds =
for {
folder ← getWidgetsFolder(extensionFolder).listFiles
if folder.isDirectory
file ← folder.listFiles
if file.getName.toUpperCase == (folder.getName + ".jar").toUpperCase
classLoader = newClassLoader(file, getClass.getClassLoader)
className ← classNamesIn(file)
clazz = loadClass(className, classLoader, file.toURI.toURL)
modifiers = clazz.getModifiers
if isPublic(modifiers) && !isAbstract(modifiers) &&
classOf[WidgetKind[_]].isAssignableFrom(clazz)
} yield clazz.newInstance.asInstanceOf[WidgetKind[_ <: ExtraWidget]]
(new TabKind +: widgetKinds)
.map(kind ⇒ kind.name -> kind)
.toMap
}
def classNamesIn(jar: File): Iterator[String] =
for {
entry ← new JarFile(jar).entries.asScala
entryName = entry.getName
if entryName.endsWith(".class")
className = entryName
.stripSuffix(".class")
.replaceAllLiterally("/", ".")
} yield className
def getAttributeValue(attributes: Attributes, attributeName: String, fileURL: URL): Either[XWException, String] =
Option(attributes.getValue(attributeName))
.toRight(XWException("Bad widget: Can't find attribute " +
attributeName + " class name in Manifest for " + fileURL + "."))
def loadClass(
className: String,
classLoader: ClassLoader,
fileURL: URL): Class[_] =
try classLoader.loadClass(className)
catch {
case e: ClassNotFoundException ⇒
throw new XWException("Can't find class " + className +
"\n in widget jar: " + fileURL + ".", e)
case e: NoClassDefFoundError ⇒
throw new XWException("No class definition found for " + className +
"\n in widget jar: " + fileURL + ".")
}
def getManifestAttributes(fileURL: URL): Either[XWException, Attributes] = {
val url = new URL("jar", "", fileURL + "!/")
val connection = url.openConnection.asInstanceOf[JarURLConnection]
Option(connection.getManifest())
.toRight(XWException("Can't find Manifest file in widget jar: " + fileURL + "."))
.right.map(_.getMainAttributes)
}
def getWidgetsFolder(extensionFolder: File): File =
extensionFolder
.listFiles
.filter(_.isDirectory)
.find(_.getName == "widgets")
.getOrElse(throw new XWException("Can't find extra widgets folder below extension folder."))
def newClassLoader(jarFile: File, parentLoader: ClassLoader): URLClassLoader = {
val jarURLs = addCompanionJars(jarFile).map(_.toURI.toURL)
URLClassLoader.newInstance(jarURLs, parentLoader)
}
def addCompanionJars(jarFile: File): Array[File] =
jarFile.getAbsoluteFile.getParentFile.listFiles
.filter(_.getName.toUpperCase.endsWith(".JAR"))
}
|
CRESS-Surrey/eXtraWidgets
|
core/src/main/scala/uk/ac/surrey/xw/WidgetsLoader.scala
|
Scala
|
mit
| 3,416 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.convert2.validators
import java.io.Closeable
import com.typesafe.scalalogging.LazyLogging
import org.locationtech.geomesa.convert2.metrics.ConverterMetrics
import org.locationtech.geomesa.utils.classpath.ServiceLoader
import org.locationtech.geomesa.utils.conf.GeoMesaSystemProperties.SystemProperty
import org.locationtech.geomesa.utils.io.CloseWithLogging
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
trait SimpleFeatureValidator extends Closeable {
/**
* Validate a feature
*
* @param sf simple feature
* @return validation error message, or null if valid
*/
def validate(sf: SimpleFeature): String
}
object SimpleFeatureValidator extends LazyLogging {
private lazy val factories = ServiceLoader.load[SimpleFeatureValidatorFactory]()
// noinspection ScalaDeprecation
private lazy val factoriesV1 =
ServiceLoader.load[org.locationtech.geomesa.convert.SimpleFeatureValidator.ValidatorFactory]()
val DefaultValidators = SystemProperty("geomesa.converter.validators", IndexValidatorFactory.Name)
/**
* Default validator names
*
* @return
*/
def default: Seq[String] = DefaultValidators.get.split(",")
/**
* Create validators for the given feature type
*
* @param sft simple feature type
* @param names validator names and options
* @param metrics optional metrics registry for tracking validation results
* @return
*/
def apply(sft: SimpleFeatureType, names: Seq[String], metrics: ConverterMetrics): SimpleFeatureValidator = {
val validators = names.map { full =>
val i = full.indexOf('(')
val (name, options) = if (i == -1) { (full, None) } else {
require(full.last == ')', s"Invalid option parentheses: $full")
(full.substring(0, i), Some(full.substring(i + 1, full.length - 1)))
}
val factory = factories.find(_.name.equalsIgnoreCase(name)).orElse(v1(name)).getOrElse {
throw new IllegalArgumentException(s"No factory found for name '$name'. " +
s"Available factories: ${(factories.map(_.name) ++ factoriesV1.map(_.name)).mkString(", ")}")
}
factory.apply(sft, metrics, options)
}
if (validators.lengthCompare(2) < 0) {
validators.headOption.getOrElse(NoValidator)
} else {
new CompositeValidator(validators)
}
}
/**
* Wrapper for custom version 1 validators
*
* @param name validator name
* @return
*/
private def v1(name: String): Option[SimpleFeatureValidatorFactory] = {
factoriesV1.find(_.name.equalsIgnoreCase(name)).map { factory =>
logger.warn(s"Using deprecated validator API for factory '${factory.getClass.getName}'. " +
s"Please migrate to org.locationtech.geomesa.convert2.validators.SimpleFeatureValidatorFactory")
new SimpleFeatureValidatorFactory() {
override def name: String = factory.name
override def apply(
sft: SimpleFeatureType,
metrics: ConverterMetrics,
config: Option[String]): SimpleFeatureValidator = {
new SimpleFeatureValidator() {
private val validator = factory.validator(sft, config)
override def validate(sf: SimpleFeature): String = validator.validate(sf)
override def close(): Unit = {}
}
}
}
}
}
/**
* Evaluates multiple validators
*
* @param validators validators
*/
class CompositeValidator(validators: Seq[SimpleFeatureValidator]) extends SimpleFeatureValidator {
override def validate(sf: SimpleFeature): String = {
var error: String = null
validators.foreach { validator =>
val e = validator.validate(sf)
if (e != null) {
error = if (error == null) { e } else { s"$error, $e" }
}
}
error
}
override def close(): Unit = CloseWithLogging(validators)
}
}
|
elahrvivaz/geomesa
|
geomesa-convert/geomesa-convert-common/src/main/scala/org/locationtech/geomesa/convert2/validators/SimpleFeatureValidator.scala
|
Scala
|
apache-2.0
| 4,395 |
package com.sandinh.couchbase.transcoder
import com.couchbase.client.core.lang.{Tuple, Tuple2}
import com.couchbase.client.core.message.ResponseStatus
import com.couchbase.client.core.message.kv.MutationToken
import com.couchbase.client.deps.io.netty.buffer.ByteBuf
import com.couchbase.client.deps.io.netty.util.CharsetUtil.UTF_8
import com.couchbase.client.java.error.TranscodingException
import com.couchbase.client.java.transcoder.{TranscoderUtils, AbstractTranscoder}
import TranscoderUtils.{STRING_COMMON_FLAGS, JSON_COMPAT_FLAGS, JSON_COMMON_FLAGS, hasStringFlags, encodeStringAsUtf8}
import com.sandinh.couchbase.document.CompatStringDocument
/** A abstract transcoder to decode CompatStringDocument.
* This class permit decoding a stored document in format of StringDocument OR JsonStringDocument.
*/
abstract class CompatStringTranscoderBase extends AbstractTranscoder[CompatStringDocument, String] {
def doDecode(id: String, content: ByteBuf, cas: Long, expiry: Int, flags: Int, status: ResponseStatus): CompatStringDocument = {
lazy val s = content.toString(UTF_8)
val decoded =
if (hasStringFlags(flags)) s
else if ( /* hasCommonFlags(flags) && */ flags == JSON_COMMON_FLAGS) s.substring(1, s.length - 1)
else if (flags == 0) {
if (s.startsWith("\\"") && s.endsWith("\\"")) s.substring(1, s.length - 1)
else s
} else throw new TranscodingException(
s"Flags (0x${Integer.toHexString(flags)}) indicate non-String && non-JsonStringDocument document for id $id, could not decode."
)
newDocument(id, expiry, decoded, cas)
}
def newDocument(id: String, expiry: Int, content: String, cas: Long) = new CompatStringDocument(id, content, expiry, cas)
override def newDocument(id: String, expiry: Int, content: String, cas: Long, mutationToken: MutationToken) =
new CompatStringDocument(id, content, expiry, cas, mutationToken)
def documentType() = classOf[CompatStringDocument]
}
/** A transcoder to encode and decode CompatStringDocument. This class permit:
* + decoding a stored document in format of StringDocument OR JsonStringDocument.
* + encoding a String as JsonStringDocument.
*/
class CompatStringTranscoder extends CompatStringTranscoderBase {
/** encode same as JsonStringTranscoder
* @see com.couchbase.client.java.transcoder.JsonStringTranscoder#doEncode(com.couchbase.client.java.document.JsonStringDocument)
*/
def doEncode(document: CompatStringDocument): Tuple2[ByteBuf, Integer] =
Tuple.create(encodeStringAsUtf8("\\"" + document.content + "\\""), JSON_COMPAT_FLAGS)
}
object CompatStringTranscoder extends CompatStringTranscoder
/** A transcoder to encode and decode CompatStringDocument. This class permit:
* + decoding a stored document in format of StringDocument OR JsonStringDocument.
* + encoding a String as StringDocument.
*/
class CompatStringTranscoderLegacy extends CompatStringTranscoderBase {
/** encode same as StringTranscoder
* @see com.couchbase.client.java.transcoder.StringTranscoder#doEncode(com.couchbase.client.java.document.StringDocument)
*/
def doEncode(document: CompatStringDocument): Tuple2[ByteBuf, Integer] =
Tuple.create(encodeStringAsUtf8(document.content), STRING_COMMON_FLAGS)
}
object CompatStringTranscoderLegacy extends CompatStringTranscoderLegacy
|
giabao/couchbase-scala
|
core/src/main/scala/com/sandinh/couchbase/transcoder/CompatStringTranscoder.scala
|
Scala
|
apache-2.0
| 3,344 |
package com.sksamuel.elastic4s.bulk
import com.sksamuel.elastic4s.TcpClient
import com.sksamuel.exts.OptionImplicits._
import org.elasticsearch.action.bulk.BulkProcessor.Listener
import org.elasticsearch.action.bulk.{BackoffPolicy, BulkRequest, BulkResponse}
import org.elasticsearch.common.unit.{ByteSizeUnit, ByteSizeValue, TimeValue}
import scala.concurrent.duration.FiniteDuration
object BulkProcessorBuilder {
def apply() = new BulkProcessorBuilder()
}
case class BulkProcessorBuilder(name: Option[String] = None,
count: Option[Int] = None,
backoffPolicy: Option[BackoffPolicy] = None,
concurrentRequests: Option[Int] = None,
flushInterval: Option[FiniteDuration] = None,
size: Option[ByteSizeValue] = None) {
def build(client: TcpClient): BulkProcessor = {
val builder = org.elasticsearch.action.bulk.BulkProcessor.builder(client.java, new Listener {
override def beforeBulk(executionId: Long, request: BulkRequest): Unit = ()
override def afterBulk(executionId: Long,
request: BulkRequest,
response: BulkResponse): Unit = ()
override def afterBulk(executionId: Long,
request: BulkRequest,
failure: Throwable): Unit = ()
})
backoffPolicy.foreach(builder.setBackoffPolicy)
concurrentRequests.foreach(builder.setConcurrentRequests)
count.foreach(builder.setBulkActions)
flushInterval.map(_.toNanos).map(TimeValue.timeValueNanos).foreach(builder.setFlushInterval)
name.foreach(builder.setName)
size.foreach(builder.setBulkSize)
new BulkProcessor(client.java, builder.build())
}
def name(name: String): BulkProcessorBuilder = copy(name = name.some)
def backoffPolicy(backoffPolicy: BackoffPolicy): BulkProcessorBuilder = copy(backoffPolicy = backoffPolicy.some)
def concurrentRequests(concurrentRequests: Int): BulkProcessorBuilder =
copy(concurrentRequests = concurrentRequests.some)
def flushInterval(flushInterval: FiniteDuration): BulkProcessorBuilder = copy(flushInterval = flushInterval.some)
def actionCount(count: Int): BulkProcessorBuilder = copy(count = count.some)
def actionSize(units: Int, unit: ByteSizeUnit): BulkProcessorBuilder =
copy(size = new ByteSizeValue(units, unit).some)
}
|
FabienPennequin/elastic4s
|
elastic4s-tcp/src/main/scala/com/sksamuel/elastic4s/bulk/BulkProcessorBuilder.scala
|
Scala
|
apache-2.0
| 2,472 |
/**
* Created on March 6, 2011
* Copyright (c) 2011, Wei-ju Wu
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Wei-ju Wu nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY WEI-JU WU ''AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL WEI-JU WU BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.dmpp.infoviewer
import java.io._
import javax.swing._
import java.awt._
import java.awt.event._
import java.awt.image._
import org.dmpp.infolib._
/**
* Simple viewer component that can display a single AmigaIcon instance.
* @constructor creates an InfoCanvas instance
* @param icon an AmigaIcon instance to display
*/
class InfoCanvas(icon: AmigaIcon)
extends JComponent {
setPreferredSize(new Dimension(640, 480))
override def paintComponent(g: Graphics) {
super.paintComponent(g)
g.setColor(new Color(0x14, 0x5a, 0xab))
g.fillRect(0, 0, 640, 480)
g.drawImage(icon.normalImage, 0, 20, null)
if (icon.highlightImage != None) {
g.drawImage(icon.highlightImage.get, 100, 20, null)
}
}
}
/**
* This is a very simple demo program that expects an Amiga .info
* file as its first parameter and attempts to display it in a frame.
*/
object InfoViewer {
def main(args: Array[String]) {
println("Info Reader Version 1.0")
if (args.length == 0) {
println("please provide the name of a valid Amiga .info file")
} else {
displayIcon(args(0))
}
}
private def displayIcon(path: String) {
val file = new File(path)
val data = new Array[Byte](file.length.asInstanceOf[Int])
var in: FileInputStream = null
val infoReader = new AmigaInfoReader(AmigaInfoReader.Palette_1_x)
try {
in = new FileInputStream(file)
in.read(data)
displayFrameWith(infoReader.createIcon(data))
} finally {
if (in != null) in.close
}
}
private def displayFrameWith(icon: AmigaIcon) {
val frame = new JFrame("Info Viewer")
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE)
val canvas = new InfoCanvas(icon)
frame.getContentPane.add(canvas, BorderLayout.CENTER)
frame.pack
frame.setVisible(true)
}
}
|
weiju/adf-tools
|
infoviewer/src/main/scala/org/dmpp/infoviewer/InfoViewer.scala
|
Scala
|
bsd-3-clause
| 3,380 |
package exerciseThree
import akka.actor._
import akka.stream.actor._
import video.Frame
// ------------
// EXERCISE 3.1
// ------------
// Fill in the code necessary to produce random circles based on the requested demand.
// The properties of the circles should be retrieved from the the CircleGenerator actor
// which will return the random properties of a circle. The random circles
// should then be drawn to a Buffered Image and used to create the Frame.
//
// When the Frame is ready it should be sent to the consumer using:
// onNext(Frame ... )
//
// See video.imageUtils.ImageUtils.createBufferedImage
class CirclePublisher extends ActorPublisher[Frame] {
override def receive: Receive = {
case ActorPublisherMessage.Request(elements) =>
// TODO IMPLEMENT ME
case ActorPublisherMessage.Cancel =>
// TODO IMPLEMENT ME
}
}
object CirclePublisher {
/**
* run:
* ./activator 'runMain exerciseThree.CircleProducer'
*
*/
def main(args: Array[String]): Unit = {
// ActorSystem represents the "engine" we run in, including threading configuration and concurrency semantics.
val system = ActorSystem()
// Fill in the code necessary to construct a UI to consume and display the Frames produced
// by the Circle producer.
val display = video.display(system)
val circleProducer = system.actorOf(Props[CirclePublisher], "circleProducer")
ActorPublisher(circleProducer).subscribe(display)
}
}
|
ktoso/streams-workshop
|
src/exercises/exerciseThree/CirclePublisher.scala
|
Scala
|
cc0-1.0
| 1,478 |
/*
* Copyright (c) 2013 original authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.eigengo.monitor.agent.akka
import akka.actor.Props
import akka.routing.RoundRobinPool
import org.eigengo.monitor.{TestCounterInterface, ContainsTag, TestCounter}
/**
* Checks that the ``ActorCellMonitoringAspect`` records the required information.
*
* Here, we check that we can successfully record the message counts, and that we can
* monitor the queue size.
*
* When running from your IDE, remember to include the -javaagent JVM parameter:
* -javaagent:$HOME/.ivy2/cache/org.aspectj/aspectjweaver/jars/aspectjweaver-1.7.3.jar
* in my case -javaagent:/Users/janmachacek/.ivy2/cache/org.aspectj/aspectjweaver/jars/aspectjweaver-1.7.3.jar
*/
class UnfilteredActorCellMonitoringAspectSpec extends ActorCellMonitoringAspectSpec(Some("Unfiltered.conf")) {
sequential
import Aspects._
import TestCounterInterface.takeLHS
"Non-routed actor monitoring" should {
// records the count of actors, grouped by simple class name
"Record the actor count" in {
TestCounterInterface.clear()
withActorOf(Props[SimpleActor]) { ca =>
TestCounterInterface.foldlByAspect(actorCount, ContainsTag(ca.pathTag))(takeLHS) must contain(TestCounter(actorCount, 1, ca.tags))
// stop(self)
ca.actor ! 'stop
Thread.sleep(500) // wait for the messages
// we're sending gauge values here. We want the latest (hence our fold takes the 'head')
TestCounterInterface.foldlByAspect(actorCount, ContainsTag(ca.pathTag))(takeLHS) must contain(TestCounter(actorCount, 0, ca.tags))
}
}
"Record the actor count using a creator" in {
TestCounterInterface.clear()
val props = Props.create(new SimpleActorCreator)
val simpleActor = system.actorOf(props, "xxx")
val tags = getTags(simpleActor, props)
TestCounterInterface.foldlByAspect(actorCount, ContainsTag(tags.head))(takeLHS) must contain(TestCounter(actorCount, 1, tags))
// stop(self)
simpleActor ! 'stop
Thread.sleep(500) // wait for the messages
// we're sending gauge values here. We want the latest (hence our fold takes the 'head')
TestCounterInterface.foldlByAspect(actorCount, ContainsTag(tags.head))(takeLHS) must contain(TestCounter(actorCount, 0, tags))
}
"Record the actor count of a named actor using a creator" in {
withActorOf(Props.create(new SimpleActorCreator)) { ca =>
TestCounterInterface.foldlByAspect(actorCount, ContainsTag(ca.pathTag))(takeLHS) must contain(TestCounter(actorCount, 1, ca.tags))
// stop(self)
ca.actor ! 'stop
Thread.sleep(500) // wait for the messages
// we're sending gauge values here. We want the latest (hence our fold takes the 'head')
TestCounterInterface.foldlByAspect(actorCount, ContainsTag(ca.pathTag))(takeLHS) must contain(TestCounter(actorCount, 0, ca.tags))
}
}
// records the count of messages received, grouped by message type
"Record the message sent to actor" in {
withActorOf(Props[SimpleActor]) { ca =>
ca.actor ! 1 // OK
ca.actor ! 1 // OK
ca.actor ! "Bantha Poodoo!" // OK
ca.actor ! 2.2 // original Actor.unhandled
ca.actor ! 'stop // OK. stop self
Thread.sleep(500) // wait for the messages
// we expect to see 2 integers, 1 string and 1 undelivered
TestCounterInterface.foldlByAspect(delivered(1: Int))(TestCounter.plus) must contain(TestCounter(delivered(1: Int), 2, ca.tags))
TestCounterInterface.foldlByAspect(delivered(""))(TestCounter.plus) must contain(TestCounter(delivered(""), 1, ca.tags))
// NB: undelivered does not include the actor class name
TestCounterInterface.foldlByAspect(undelivered)(TestCounter.plus) must contain(TestCounter(undelivered, 1, ca.pathTags ++ ca.systemTags))
}
}
// records the queue size at any given time
"Record the message queue size" in {
withActorOf(Props[SimpleActor]) { ca =>
// because we are using the test ActorSystem, which uses single-threaded dispatcher
// we expect to see the queue size to reach the count in size. But we must
// allow for some crafty threading, and allow 3 the queue to be a bit smaller
val count = 100
val tolerance = 3
for (i <- 0 to count) ca.actor ! 10
Thread.sleep(count * (10 + 2))
// fold by _max_ over the counters by the ``queueSizeAspect``, tagged with this actor's name
val counter = TestCounterInterface.foldlByAspect(queueSize, ContainsTag(ca.pathTag))(TestCounter.max)(0)
counter.value must beGreaterThan(count - tolerance)
counter.tags must containAllOf(ca.tags)
}
}
// keep track of the actor duration; that is the time the receive method takes
"Record the actor duration" in {
withActorOf(Props[SimpleActor]) { ca =>
ca.actor ! 1000
Thread.sleep(1100)
val counter = TestCounterInterface.foldlByAspect(actorDuration, ContainsTag(ca.pathTag))(TestCounter.max)(0)
counter.value must beGreaterThan(900)
counter.value must beLessThan(1100)
counter.tags must containAllOf(ca.tags)
}
}
"Record the errors" in {
withActorOf(Props[SimpleActor]) { ca =>
// match error in receive
ca.actor ! false
Thread.sleep(500) // wait for the messages
TestCounterInterface.foldlByAspect(actorError)(TestCounter.plus) must contain(TestCounter(actorError, 1, ca.tags))
}
}
}
// If we create actor "foo" with round-robin routing with x | x > 1 instances, then each instance's metrics
// should _also_ be contributed to the supervisor
//
// Nota bene the _also_ bit: we record the metrics for each instance _and_ add them to the parent. Put more
// plainly, the tags for routed actors should include the actor itself and its supervisor
"Routed actor monitoring" should {
"Record the message sent to actor" in {
val count = 10
withActorOf(Props[SimpleActor].withRouter(RoundRobinPool(nrOfInstances = count))) { ca =>
for (i <- 0 until count) ca.actor ! 100
Thread.sleep(3500)
// we expect to see 10 integers for the supervisor and 1 integer for each child
val supCounter = TestCounterInterface.foldlByAspect(delivered(1: Int), ContainsTag(ca.pathTag))(TestCounter.plus)(0)
// TODO: RoundRobinPool changes the naming of the routees!
//val c1Counter = TestCounterInterface.foldlByAspect(delivered(1: Int), ContainsTag(ca.pathTag + "/$a"))(TestCounter.plus)(0)
supCounter.value mustEqual 10
//c1Counter.value mustEqual 1
}
}
"Shutdown system" in {
system.shutdown()
success
}
}
}
|
eigengo/monitor
|
agent-akka/src/test/scala/org/eigengo/monitor/agent/akka/UnfilteredActorCellMonitoringAspectSpec.scala
|
Scala
|
apache-2.0
| 7,431 |
package colang.ast.raw.expression
import colang.MappedStrategy
import colang.ast.raw.ParserImpl.SingleTokenStrategy
import colang.tokens.ThisKeyword
/**
* Represents a 'this' expression referencing the current contextual object.
* @param thisKeyword 'this' keyword
*/
case class ThisReference(thisKeyword: ThisKeyword) extends Expression {
def source = thisKeyword.source
}
object ThisReference {
val strategy = MappedStrategy(SingleTokenStrategy(classOf[ThisKeyword]), ThisReference.apply)
}
|
psenchanka/colang
|
src/main/scala/colang/ast/raw/expression/ThisReference.scala
|
Scala
|
mit
| 506 |
/*
* Copyright 2018 Analytics Zoo Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.zoo.examples.nnframes.finetune
import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.nn.CrossEntropyCriterion
import com.intel.analytics.bigdl.optim.{Top1Accuracy, Trigger}
import com.intel.analytics.bigdl.utils.{LoggerFilter, Shape}
import com.intel.analytics.zoo.common.NNContext
import com.intel.analytics.zoo.pipeline.api.keras.layers._
import com.intel.analytics.zoo.pipeline.api.keras.models.Model
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric.NumericFloat
import com.intel.analytics.zoo.feature.image.{RowToImageFeature, _}
import com.intel.analytics.zoo.pipeline.api.Net
import com.intel.analytics.zoo.pipeline.nnframes.{NNClassifier, NNImageReader}
import org.apache.hadoop.fs.Path
import org.apache.spark.ml.Pipeline
import org.apache.spark.sql.Row
import org.apache.spark.sql.functions.{col, udf}
import org.apache.spark.SparkConf
import scopt.OptionParser
/**
* Scala example for image fine tuning with Inception model on Spark DataFrame.
* Please refer to the readme.md in the same folder for more details.
*/
object ImageFinetune {
LoggerFilter.redirectSparkInfoLogs()
def main(args: Array[String]): Unit = {
Utils.trainParser.parse(args, Utils.TrainParams()).foreach(param => {
val conf = new SparkConf().setAppName("Fine tuning Example")
val sc = NNContext.initNNContext(conf)
val createLabel = udf { row: Row =>
if (new Path(row.getString(0)).getName.contains("cat")) 1.0 else 2.0
}
val imagesDF = NNImageReader.readImages(param.imagePath, sc,
resizeH = 256, resizeW = 256, imageCodec = 1)
.withColumn("label", createLabel(col("image")))
val Array(validationDF, trainingDF) = imagesDF.randomSplit(Array(0.20, 0.80), seed = 1L)
val featureTransformer = RowToImageFeature() ->
ImageCenterCrop(224, 224) -> ImageChannelNormalize(123, 117, 104) ->
ImageMatToTensor() -> ImageFeatureToTensor()
val model = getTransferLearningModel(param.modelPath)
val classifier = NNClassifier(model, CrossEntropyCriterion[Float](), featureTransformer)
.setFeaturesCol("image")
.setLearningRate(0.003)
.setBatchSize(param.batchSize)
.setMaxEpoch(param.nEpochs)
.setCachingSample(false)
.setValidation(Trigger.everyEpoch, validationDF, Array(new Top1Accuracy()), param.batchSize)
val pipeline = new Pipeline().setStages(Array(classifier))
val pipelineModel = pipeline.fit(trainingDF)
val predictions = pipelineModel.transform(validationDF)
predictions.select("image", "label", "prediction").sample(false, 0.05).show(false)
sc.stop()
})
}
private def getTransferLearningModel(preTrainedPath: String): Module[Float] = {
// you can use Net.loadBigDL[Float](preTrainedPath).saveGraphTopology(somePath)
// and use tensorboard to visualize the model topology and decide
val inception = Net.loadBigDL[Float](preTrainedPath)
.newGraph(output = "pool5/drop_7x7_s1") // remove layers after pool5/drop_7x7_s1
.freezeUpTo("pool4/3x3_s2") // freeze layer pool4/3x3_s2 and the layers before it
.toKeras()
// add a new classifer
val input = Input[Float](inputShape = Shape(3, 224, 224))
val feature = inception.inputs(input)
val flatten = Flatten[Float]().inputs(feature)
val logits = Dense[Float](2).inputs(flatten)
Model[Float](input, logits)
}
}
private object Utils {
case class TrainParams(
modelPath: String = "/tmp/zoo/bigdl_inception-v1_imagenet_0.4.0.model",
imagePath: String = "/tmp/zoo/dogs_cats/samples",
batchSize: Int = 32,
nEpochs: Int = 2)
val trainParser = new OptionParser[TrainParams]("BigDL ptbModel Train Example") {
opt[String]('m', "modelPath")
.text("pretrained model path")
.action((x, c) => c.copy(modelPath = x))
opt[String]('d', "imagePath")
.text("training data path")
.action((x, c) => c.copy(imagePath = x))
opt[Int]('b', "batchSize")
.text("batchSize")
.action((x, c) => c.copy(batchSize = x))
opt[Int]('e', "nEpochs")
.text("epoch numbers")
.action((x, c) => c.copy(nEpochs = x))
}
}
|
intel-analytics/analytics-zoo
|
zoo/src/main/scala/com/intel/analytics/zoo/examples/nnframes/finetune/ImageFinetune.scala
|
Scala
|
apache-2.0
| 4,836 |
/*
* InterpreterViewImpl.scala
* (SysSon)
*
* Copyright (c) 2013-2017 Institute of Electronic Music and Acoustics, Graz.
* Copyright (c) 2014-2019 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* [email protected]
*/
package at.iem.sysson
package gui
package impl
import java.io.{File, FileInputStream, IOException}
import de.sciss.desktop
import de.sciss.desktop.WindowHandler
import de.sciss.scalainterpreter.{CodePane, Interpreter, InterpreterPane}
import scala.swing.Component
private[gui] object InterpreterViewImpl {
def apply(): InterpreterView = new Impl
private final class Impl extends InterpreterView {
impl =>
val intp: InterpreterPane = {
val codeCfg = CodePane.Config()
val file = new File( /* new File( "" ).getAbsoluteFile.getParentFile, */ "interpreter.txt" )
if (file.isFile) try {
val fis = new FileInputStream(file)
val txt = try {
val arr = new Array[Byte](fis.available())
fis.read(arr)
new String(arr, "UTF-8")
} finally {
fis.close()
}
codeCfg.text = txt
} catch {
case e: IOException => e.printStackTrace()
}
val intpCfg = Interpreter.Config()
intpCfg.imports = List(
"at.iem.sysson._",
"at.iem.sysson.Implicits._",
"de.sciss.file._",
"de.sciss.osc.Implicits._",
"de.sciss.synth._",
"de.sciss.synth.ugen._",
"de.sciss.synth.Ops._",
"de.sciss.synth.swing.Implicits._", // ScalaCollider swing extensions
"de.sciss.synth.swing.Plotting._", // ScalaCollider swing app extensions
// "scala.concurrent.duration._",
"at.iem.sysson.gui.InterpreterView.Bindings._"
)
InterpreterPane(interpreterConfig = intpCfg, codePaneConfig = codeCfg)
}
val component: Component = intp.component
val f = new desktop.impl.WindowImpl {
frame =>
// override def style = desktop.Window.Auxiliary
def handler: WindowHandler = SwingApplication.windowHandler
title = "Interpreter"
contents = impl.component
closeOperation = desktop.Window.CloseDispose
pack()
desktop.Util.centerOnScreen(this)
front()
}
}
}
|
iem-projects/sysson
|
src/main/scala/at/iem/sysson/gui/impl/InterpreterViewImpl.scala
|
Scala
|
gpl-3.0
| 2,391 |
/*
* Copyright 2013-2015 Websudos, Limited.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Explicit consent must be obtained from the copyright owner, Outworkers Limited before any redistribution is made.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.websudos.phantom.builder.serializers
import java.util.UUID
import com.websudos.phantom.builder.query.KeySpaceSuite
import com.websudos.phantom.dsl._
import com.websudos.phantom.tables.TestDatabase
import com.websudos.util.testing._
import org.scalatest.{FlatSpec, Matchers}
class QuerySerializationTest extends FlatSpec with Matchers with KeySpaceSuite {
val Articles = TestDatabase.articles
val Recipes = TestDatabase.recipes
val TableWithCompoundKey = TestDatabase.tableWithCompoundKey
it should "compile a full select query" in {
"Articles.select.where(_.id eqs gen[UUID])" should compile
}
it should "serialize a full select query" in {
val someId = gen[UUID]
Articles.select.where(_.id eqs someId).qb.queryString shouldBe s"SELECT * FROM phantom.articles WHERE id = $someId"
}
it should "compile a single column partial select query" in {
"Articles.select(_.id).where(_.id eqs gen[UUID])" should compile
}
it should "serialize a single column partial select query" in {
val someId = gen[UUID]
Articles.select(_.id).where(_.id eqs someId).qb.queryString shouldBe s"SELECT id FROM phantom.${Articles.tableName} WHERE id = $someId"
}
it should "compile a query to query condition clause" in {
"""Articles.update.where(_.id eqs gen[UUID]).modify(_.name setTo "test").onlyIf(_.name is "update")""" should compile
}
it should "serialize a condition query to a query condition" in {
val someId = gen[UUID]
val query = Articles.update.where(_.id eqs someId).modify(_.name setTo "test").onlyIf(_.name is "update").qb.queryString
query shouldEqual s"UPDATE phantom.articles SET name = 'test' WHERE id = $someId IF name = 'update'"
}
it should "serialize a 2 column partial select query" in {
val someId = gen[UUID]
Articles.select(_.id, _.name).where(_.id eqs someId).qb.queryString shouldBe s"SELECT id, name FROM phantom.articles WHERE id = $someId"
}
it should "serialize a 3 column partial select query" in {
val someId = gen[String]
Recipes.select(
_.url,
_.description,
_.ingredients
).where(_.url eqs someId)
.qb.queryString shouldBe s"SELECT url, description, ingredients FROM phantom.recipes WHERE url = '$someId'"
}
it should "serialise a conditional update query with a single List column based clause" in {
val qb = Recipes.update.where(_.url eqs "test")
.modify(_.description setTo Some("blabla"))
.onlyIf(_.ingredients is List("1", "2", "3"))
.qb.queryString
qb shouldEqual "UPDATE phantom.recipes SET description = 'blabla' WHERE url = 'test' IF ingredients = ['1', '2', '3']"
}
it should "serialise a multi-part conditional update query with a List column part" in {
val qb = Recipes.update.where(_.url eqs "test")
.modify(_.description setTo Some("blabla"))
.onlyIf(_.ingredients is List("1", "2", "3"))
.and(_.description is Some("test"))
.qb.queryString
qb shouldEqual "UPDATE phantom.recipes SET description = 'blabla' WHERE url = 'test' IF ingredients = ['1', '2', '3'] AND description = 'test'"
}
it should "serialize a simple count query" in {
Recipes.select.count.qb.queryString shouldEqual "SELECT COUNT(*) FROM phantom.recipes"
}
it should "serialize a count query with a where clause" in {
val key = gen[String]
Recipes.select.count.where(_.url eqs key).qb.queryString shouldEqual s"SELECT COUNT(*) FROM phantom.recipes WHERE url = '$key'"
}
it should "serialize a count query with a where-and clause" in {
val id = UUID.randomUUID()
val key = id.toString
TableWithCompoundKey.select.count.where(_.id eqs id).and(_.second eqs id).qb.queryString shouldEqual s"SELECT COUNT(*) FROM phantom.tableWithCompoundKey WHERE id = $key AND second = $key"
}
it should "allow setting a limit on a count query" in {
val id = UUID.randomUUID()
val key = id.toString
TableWithCompoundKey.select.count.where(_.id eqs id).and(_.second eqs id).limit(10).qb.queryString shouldEqual s"SELECT COUNT(*) FROM phantom.tableWithCompoundKey WHERE id = $key AND second = $key LIMIT 10"
}
it should "allow filtering on a count query" in {
val id = UUID.randomUUID()
val key = id.toString
TableWithCompoundKey.select.count
.where(_.id eqs id).and(_.second eqs id)
.limit(10)
.allowFiltering()
.qb.queryString shouldEqual s"SELECT COUNT(*) FROM phantom.tableWithCompoundKey WHERE id = $key AND second = $key LIMIT 10 ALLOW FILTERING"
}
}
|
levinson/phantom
|
phantom-dsl/src/test/scala/com/websudos/phantom/builder/serializers/QuerySerializationTest.scala
|
Scala
|
bsd-2-clause
| 6,038 |
package dzufferey.utils
import java.io._
object IO {
def storeInFile(file: File, data: Array[Byte]): Unit = {
val fileOut = new DataOutputStream( new FileOutputStream(file))
fileOut.write(data, 0, data.length)
fileOut.close
}
def storeInFile(file: String, data: Array[Byte]): Unit = storeInFile(new File(file), data)
def storeInTempFile(prefix: String, suffix: String, uploadDirectory: File, data: Array[Byte]) = {
val storage = java.io.File.createTempFile(prefix, suffix, uploadDirectory)
storeInFile(storage, data)
storage
}
def writeInFile(file: File, data: String): Unit = {
val fileOut = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(file)))
fileOut.write(data)
fileOut.flush
fileOut.close
}
def writeInFile(file: String, data: String): Unit = writeInFile(new File(file), data)
def writeInFile(file: String, data: BufferedWriter => Unit): Unit = {
val fileOut = new BufferedWriter(new OutputStreamWriter(new FileOutputStream(file)))
data(fileOut)
fileOut.flush
fileOut.close
}
//TODO the append to file version
//...
def readTextFile(file: String): String = {
val fileIn = new BufferedReader(new FileReader(file))
val builder = new StringBuilder(1000)
while(fileIn.ready) {
builder ++= fileIn.readLine + "\\n"
}
fileIn.close
builder.toString
}
def readStdin: String = {
val read = new scala.collection.mutable.StringBuilder
var line = scala.Console.in.readLine
while (line != null) {
read ++= line
read ++= "\\n"
line = scala.Console.in.readLine
}
read.toString
}
}
/** prefix as a writer */
class PrefixingWriter(prefix: String, base: OutputStream) extends Writer {
val out = new OutputStreamWriter(base)
var needPrefix = true
private def ifPrefixNeeded: Unit = {
if (needPrefix) {
out.write(prefix)
needPrefix = false
}
}
def write(cbuf: Array[Char], off: Int, len: Int): Unit = {
val max = math.min(cbuf.size - off, off + len)
var start = off
while (start < max) {
val idx = cbuf.indexOf('\\n', start)
if (idx == -1 || idx >= max) {
ifPrefixNeeded
out.write(cbuf, start, len + off - start)
start = max
} else {
ifPrefixNeeded
out.write(cbuf, start, idx - start + 1)
start = math.min(max, idx + 1)
if (start < max) {
needPrefix = true
}
}
}
}
def close: Unit = {
out.close
}
def flush: Unit = {
out.flush
}
}
|
dzufferey/misc-scala-utils
|
src/main/scala/dzufferey/utils/IO.scala
|
Scala
|
apache-2.0
| 2,572 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.logical
import java.lang.reflect.Method
import java.util
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.core.{CorrelationId, JoinRelType}
import org.apache.calcite.rel.logical.LogicalTableFunctionScan
import org.apache.calcite.rex.{RexInputRef, RexNode}
import org.apache.calcite.tools.RelBuilder
import org.apache.flink.api.common.typeinfo.BasicTypeInfo._
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.operators.join.JoinType
import org.apache.flink.table.api.{StreamTableEnvironment, TableEnvironment, UnresolvedException}
import org.apache.flink.table.calcite.{FlinkRelBuilder, FlinkTypeFactory}
import org.apache.flink.table.expressions._
import org.apache.flink.table.functions.TableFunction
import org.apache.flink.table.functions.utils.TableSqlFunction
import org.apache.flink.table.functions.utils.UserDefinedFunctionUtils._
import org.apache.flink.table.plan.schema.FlinkTableFunctionImpl
import org.apache.flink.table.validate.{ValidationFailure, ValidationSuccess}
import scala.collection.JavaConverters._
import scala.collection.mutable
case class Project(projectList: Seq[NamedExpression], child: LogicalNode) extends UnaryNode {
override def output: Seq[Attribute] = projectList.map(_.toAttribute)
override def resolveExpressions(tableEnv: TableEnvironment): LogicalNode = {
val afterResolve = super.resolveExpressions(tableEnv).asInstanceOf[Project]
val newProjectList =
afterResolve.projectList.zipWithIndex.map { case (e, i) =>
e match {
case u @ UnresolvedAlias(c) => c match {
case ne: NamedExpression => ne
case expr if !expr.valid => u
case c @ Cast(ne: NamedExpression, tp) => Alias(c, s"${ne.name}-$tp")
case gcf: GetCompositeField => Alias(gcf, gcf.aliasName().getOrElse(s"_c$i"))
case other => Alias(other, s"_c$i")
}
case _ =>
throw new RuntimeException("This should never be called and probably points to a bug.")
}
}
Project(newProjectList, child)
}
override def validate(tableEnv: TableEnvironment): LogicalNode = {
val resolvedProject = super.validate(tableEnv).asInstanceOf[Project]
val names: mutable.Set[String] = mutable.Set()
def checkName(name: String): Unit = {
if (names.contains(name)) {
failValidation(s"Duplicate field name $name.")
} else if (tableEnv.isInstanceOf[StreamTableEnvironment] && name == "rowtime") {
failValidation("'rowtime' cannot be used as field name in a streaming environment.")
} else {
names.add(name)
}
}
resolvedProject.projectList.foreach {
case n: Alias =>
// explicit name
checkName(n.name)
case r: ResolvedFieldReference =>
// simple field forwarding
checkName(r.name)
case _ => // Do nothing
}
resolvedProject
}
override protected[logical] def construct(relBuilder: RelBuilder): RelBuilder = {
child.construct(relBuilder)
relBuilder.project(
projectList.map(_.toRexNode(relBuilder)).asJava,
projectList.map(_.name).asJava,
true)
}
}
case class AliasNode(aliasList: Seq[Expression], child: LogicalNode) extends UnaryNode {
override def output: Seq[Attribute] =
throw UnresolvedException("Invalid call to output on AliasNode")
override protected[logical] def construct(relBuilder: RelBuilder): RelBuilder =
throw UnresolvedException("Invalid call to toRelNode on AliasNode")
override def resolveExpressions(tableEnv: TableEnvironment): LogicalNode = {
if (aliasList.length > child.output.length) {
failValidation("Aliasing more fields than we actually have")
} else if (!aliasList.forall(_.isInstanceOf[UnresolvedFieldReference])) {
failValidation("Alias only accept name expressions as arguments")
} else if (!aliasList.forall(_.asInstanceOf[UnresolvedFieldReference].name != "*")) {
failValidation("Alias can not accept '*' as name")
} else if (tableEnv.isInstanceOf[StreamTableEnvironment] && !aliasList.forall {
case UnresolvedFieldReference(name) => name != "rowtime"
}) {
failValidation("'rowtime' cannot be used as field name in a streaming environment.")
} else {
val names = aliasList.map(_.asInstanceOf[UnresolvedFieldReference].name)
val input = child.output
Project(
names.zip(input).map { case (name, attr) =>
Alias(attr, name)} ++ input.drop(names.length), child)
}
}
}
case class Distinct(child: LogicalNode) extends UnaryNode {
override def output: Seq[Attribute] = child.output
override protected[logical] def construct(relBuilder: RelBuilder): RelBuilder = {
child.construct(relBuilder)
relBuilder.distinct()
}
override def validate(tableEnv: TableEnvironment): LogicalNode = {
if (tableEnv.isInstanceOf[StreamTableEnvironment]) {
failValidation(s"Distinct on stream tables is currently not supported.")
}
this
}
}
case class Sort(order: Seq[Ordering], child: LogicalNode) extends UnaryNode {
override def output: Seq[Attribute] = child.output
override protected[logical] def construct(relBuilder: RelBuilder): RelBuilder = {
child.construct(relBuilder)
relBuilder.sort(order.map(_.toRexNode(relBuilder)).asJava)
}
override def validate(tableEnv: TableEnvironment): LogicalNode = {
if (tableEnv.isInstanceOf[StreamTableEnvironment]) {
failValidation(s"Sort on stream tables is currently not supported.")
}
super.validate(tableEnv)
}
}
case class Limit(offset: Int, fetch: Int = -1, child: LogicalNode) extends UnaryNode {
override def output: Seq[Attribute] = child.output
override protected[logical] def construct(relBuilder: RelBuilder): RelBuilder = {
child.construct(relBuilder)
relBuilder.limit(offset, fetch)
}
override def validate(tableEnv: TableEnvironment): LogicalNode = {
if (tableEnv.isInstanceOf[StreamTableEnvironment]) {
failValidation(s"Limit on stream tables is currently not supported.")
}
if (!child.validate(tableEnv).isInstanceOf[Sort]) {
failValidation(s"Limit operator must be preceded by an OrderBy operator.")
}
if (offset < 0) {
failValidation(s"Offset should be greater than or equal to zero.")
}
super.validate(tableEnv)
}
}
case class Filter(condition: Expression, child: LogicalNode) extends UnaryNode {
override def output: Seq[Attribute] = child.output
override protected[logical] def construct(relBuilder: RelBuilder): RelBuilder = {
child.construct(relBuilder)
relBuilder.filter(condition.toRexNode(relBuilder))
}
override def validate(tableEnv: TableEnvironment): LogicalNode = {
val resolvedFilter = super.validate(tableEnv).asInstanceOf[Filter]
if (resolvedFilter.condition.resultType != BOOLEAN_TYPE_INFO) {
failValidation(s"Filter operator requires a boolean expression as input," +
s" but ${resolvedFilter.condition} is of type ${resolvedFilter.condition.resultType}")
}
resolvedFilter
}
}
case class Aggregate(
groupingExpressions: Seq[Expression],
aggregateExpressions: Seq[NamedExpression],
child: LogicalNode) extends UnaryNode {
override def output: Seq[Attribute] = {
(groupingExpressions ++ aggregateExpressions) map {
case ne: NamedExpression => ne.toAttribute
case e => Alias(e, e.toString).toAttribute
}
}
override protected[logical] def construct(relBuilder: RelBuilder): RelBuilder = {
child.construct(relBuilder)
relBuilder.aggregate(
relBuilder.groupKey(groupingExpressions.map(_.toRexNode(relBuilder)).asJava),
aggregateExpressions.map {
case Alias(agg: Aggregation, name, _) => agg.toAggCall(name)(relBuilder)
case _ => throw new RuntimeException("This should never happen.")
}.asJava)
}
override def validate(tableEnv: TableEnvironment): LogicalNode = {
if (tableEnv.isInstanceOf[StreamTableEnvironment]) {
failValidation(s"Aggregate on stream tables is currently not supported.")
}
val resolvedAggregate = super.validate(tableEnv).asInstanceOf[Aggregate]
val groupingExprs = resolvedAggregate.groupingExpressions
val aggregateExprs = resolvedAggregate.aggregateExpressions
aggregateExprs.foreach(validateAggregateExpression)
groupingExprs.foreach(validateGroupingExpression)
def validateAggregateExpression(expr: Expression): Unit = expr match {
// check no nested aggregation exists.
case aggExpr: Aggregation =>
aggExpr.children.foreach { child =>
child.preOrderVisit {
case agg: Aggregation =>
failValidation(
"It's not allowed to use an aggregate function as " +
"input of another aggregate function")
case _ => // OK
}
}
case a: Attribute if !groupingExprs.exists(_.checkEquals(a)) =>
failValidation(
s"expression '$a' is invalid because it is neither" +
" present in group by nor an aggregate function")
case e if groupingExprs.exists(_.checkEquals(e)) => // OK
case e => e.children.foreach(validateAggregateExpression)
}
def validateGroupingExpression(expr: Expression): Unit = {
if (!expr.resultType.isKeyType) {
failValidation(
s"expression $expr cannot be used as a grouping expression " +
"because it's not a valid key type which must be hashable and comparable")
}
}
resolvedAggregate
}
}
case class Minus(left: LogicalNode, right: LogicalNode, all: Boolean) extends BinaryNode {
override def output: Seq[Attribute] = left.output
override protected[logical] def construct(relBuilder: RelBuilder): RelBuilder = {
left.construct(relBuilder)
right.construct(relBuilder)
relBuilder.minus(all)
}
override def validate(tableEnv: TableEnvironment): LogicalNode = {
if (tableEnv.isInstanceOf[StreamTableEnvironment]) {
failValidation(s"Minus on stream tables is currently not supported.")
}
val resolvedMinus = super.validate(tableEnv).asInstanceOf[Minus]
if (left.output.length != right.output.length) {
failValidation(s"Minus two table of different column sizes:" +
s" ${left.output.size} and ${right.output.size}")
}
val sameSchema = left.output.zip(right.output).forall { case (l, r) =>
l.resultType == r.resultType
}
if (!sameSchema) {
failValidation(s"Minus two table of different schema:" +
s" [${left.output.map(a => (a.name, a.resultType)).mkString(", ")}] and" +
s" [${right.output.map(a => (a.name, a.resultType)).mkString(", ")}]")
}
resolvedMinus
}
}
case class Union(left: LogicalNode, right: LogicalNode, all: Boolean) extends BinaryNode {
override def output: Seq[Attribute] = left.output
override protected[logical] def construct(relBuilder: RelBuilder): RelBuilder = {
left.construct(relBuilder)
right.construct(relBuilder)
relBuilder.union(all)
}
override def validate(tableEnv: TableEnvironment): LogicalNode = {
if (tableEnv.isInstanceOf[StreamTableEnvironment] && !all) {
failValidation(s"Union on stream tables is currently not supported.")
}
val resolvedUnion = super.validate(tableEnv).asInstanceOf[Union]
if (left.output.length != right.output.length) {
failValidation(s"Union two tables of different column sizes:" +
s" ${left.output.size} and ${right.output.size}")
}
val sameSchema = left.output.zip(right.output).forall { case (l, r) =>
l.resultType == r.resultType
}
if (!sameSchema) {
failValidation(s"Union two tables of different schema:" +
s" [${left.output.map(a => (a.name, a.resultType)).mkString(", ")}] and" +
s" [${right.output.map(a => (a.name, a.resultType)).mkString(", ")}]")
}
resolvedUnion
}
}
case class Intersect(left: LogicalNode, right: LogicalNode, all: Boolean) extends BinaryNode {
override def output: Seq[Attribute] = left.output
override protected[logical] def construct(relBuilder: RelBuilder): RelBuilder = {
left.construct(relBuilder)
right.construct(relBuilder)
relBuilder.intersect(all)
}
override def validate(tableEnv: TableEnvironment): LogicalNode = {
if (tableEnv.isInstanceOf[StreamTableEnvironment]) {
failValidation(s"Intersect on stream tables is currently not supported.")
}
val resolvedIntersect = super.validate(tableEnv).asInstanceOf[Intersect]
if (left.output.length != right.output.length) {
failValidation(s"Intersect two tables of different column sizes:" +
s" ${left.output.size} and ${right.output.size}")
}
// allow different column names between tables
val sameSchema = left.output.zip(right.output).forall { case (l, r) =>
l.resultType == r.resultType
}
if (!sameSchema) {
failValidation(s"Intersect two tables of different schema:" +
s" [${left.output.map(a => (a.name, a.resultType)).mkString(", ")}] and" +
s" [${right.output.map(a => (a.name, a.resultType)).mkString(", ")}]")
}
resolvedIntersect
}
}
case class Join(
left: LogicalNode,
right: LogicalNode,
joinType: JoinType,
condition: Option[Expression],
correlated: Boolean) extends BinaryNode {
override def output: Seq[Attribute] = {
left.output ++ right.output
}
private case class JoinFieldReference(
name: String,
resultType: TypeInformation[_],
left: LogicalNode,
right: LogicalNode) extends Attribute {
val isFromLeftInput = left.output.map(_.name).contains(name)
val (indexInInput, indexInJoin) = if (isFromLeftInput) {
val indexInLeft = left.output.map(_.name).indexOf(name)
(indexInLeft, indexInLeft)
} else {
val indexInRight = right.output.map(_.name).indexOf(name)
(indexInRight, indexInRight + left.output.length)
}
override def toString = s"'$name"
override def toRexNode(implicit relBuilder: RelBuilder): RexNode = {
// look up type of field
val fieldType = relBuilder.field(2, if (isFromLeftInput) 0 else 1, name).getType
// create a new RexInputRef with index offset
new RexInputRef(indexInJoin, fieldType)
}
override def withName(newName: String): Attribute = {
if (newName == name) {
this
} else {
JoinFieldReference(newName, resultType, left, right)
}
}
}
override def resolveExpressions(tableEnv: TableEnvironment): LogicalNode = {
val node = super.resolveExpressions(tableEnv).asInstanceOf[Join]
val partialFunction: PartialFunction[Expression, Expression] = {
case field: ResolvedFieldReference => JoinFieldReference(
field.name,
field.resultType,
left,
right)
}
val resolvedCondition = node.condition.map(_.postOrderTransform(partialFunction))
Join(node.left, node.right, node.joinType, resolvedCondition, correlated)
}
override protected[logical] def construct(relBuilder: RelBuilder): RelBuilder = {
left.construct(relBuilder)
right.construct(relBuilder)
val corSet = mutable.Set[CorrelationId]()
if (correlated) {
corSet += relBuilder.peek().getCluster.createCorrel()
}
relBuilder.join(
convertJoinType(joinType),
condition.map(_.toRexNode(relBuilder)).getOrElse(relBuilder.literal(true)),
corSet.asJava)
}
private def convertJoinType(joinType: JoinType) = joinType match {
case JoinType.INNER => JoinRelType.INNER
case JoinType.LEFT_OUTER => JoinRelType.LEFT
case JoinType.RIGHT_OUTER => JoinRelType.RIGHT
case JoinType.FULL_OUTER => JoinRelType.FULL
}
private def ambiguousName: Set[String] =
left.output.map(_.name).toSet.intersect(right.output.map(_.name).toSet)
override def validate(tableEnv: TableEnvironment): LogicalNode = {
if (tableEnv.isInstanceOf[StreamTableEnvironment]
&& !right.isInstanceOf[LogicalTableFunctionCall]) {
failValidation(s"Join on stream tables is currently not supported.")
}
val resolvedJoin = super.validate(tableEnv).asInstanceOf[Join]
if (!resolvedJoin.condition.forall(_.resultType == BOOLEAN_TYPE_INFO)) {
failValidation(s"Filter operator requires a boolean expression as input, " +
s"but ${resolvedJoin.condition} is of type ${resolvedJoin.joinType}")
} else if (ambiguousName.nonEmpty) {
failValidation(s"join relations with ambiguous names: ${ambiguousName.mkString(", ")}")
}
resolvedJoin.condition.foreach(testJoinCondition)
resolvedJoin
}
private def testJoinCondition(expression: Expression): Unit = {
def checkIfJoinCondition(exp: BinaryComparison) = exp.children match {
case (x: JoinFieldReference) :: (y: JoinFieldReference) :: Nil
if x.isFromLeftInput != y.isFromLeftInput => true
case _ => false
}
def checkIfFilterCondition(exp: BinaryComparison) = exp.children match {
case (x: JoinFieldReference) :: (y: JoinFieldReference) :: Nil => false
case (x: JoinFieldReference) :: (_) :: Nil => true
case (_) :: (y: JoinFieldReference) :: Nil => true
case _ => false
}
var equiJoinPredicateFound = false
var nonEquiJoinPredicateFound = false
var localPredicateFound = false
def validateConditions(exp: Expression, isAndBranch: Boolean): Unit = exp match {
case x: And => x.children.foreach(validateConditions(_, isAndBranch))
case x: Or => x.children.foreach(validateConditions(_, isAndBranch = false))
case x: EqualTo =>
if (isAndBranch && checkIfJoinCondition(x)) {
equiJoinPredicateFound = true
}
if (checkIfFilterCondition(x)) {
localPredicateFound = true
}
case x: BinaryComparison =>
if (checkIfFilterCondition(x)) {
localPredicateFound = true
} else {
nonEquiJoinPredicateFound = true
}
case x => failValidation(
s"Unsupported condition type: ${x.getClass.getSimpleName}. Condition: $x")
}
validateConditions(expression, isAndBranch = true)
if (!equiJoinPredicateFound) {
failValidation(
s"Invalid join condition: $expression. At least one equi-join predicate is " +
s"required.")
}
if (joinType != JoinType.INNER && (nonEquiJoinPredicateFound || localPredicateFound)) {
failValidation(
s"Invalid join condition: $expression. Non-equality join predicates or local" +
s" predicates are not supported in outer joins.")
}
}
}
case class CatalogNode(
tablePath: Array[String],
rowType: RelDataType) extends LeafNode {
val output: Seq[Attribute] = rowType.getFieldList.asScala.map { field =>
ResolvedFieldReference(field.getName, FlinkTypeFactory.toTypeInfo(field.getType))
}
override protected[logical] def construct(relBuilder: RelBuilder): RelBuilder = {
relBuilder.scan(tablePath.toIterable.asJava)
}
override def validate(tableEnv: TableEnvironment): LogicalNode = this
}
/**
* Wrapper for valid logical plans generated from SQL String.
*/
case class LogicalRelNode(
relNode: RelNode) extends LeafNode {
val output: Seq[Attribute] = relNode.getRowType.getFieldList.asScala.map { field =>
ResolvedFieldReference(field.getName, FlinkTypeFactory.toTypeInfo(field.getType))
}
override protected[logical] def construct(relBuilder: RelBuilder): RelBuilder = {
relBuilder.push(relNode)
}
override def validate(tableEnv: TableEnvironment): LogicalNode = this
}
case class WindowAggregate(
groupingExpressions: Seq[Expression],
window: LogicalWindow,
propertyExpressions: Seq[NamedExpression],
aggregateExpressions: Seq[NamedExpression],
child: LogicalNode)
extends UnaryNode {
override def output: Seq[Attribute] = {
(groupingExpressions ++ aggregateExpressions ++ propertyExpressions) map {
case ne: NamedExpression => ne.toAttribute
case e => Alias(e, e.toString).toAttribute
}
}
// resolve references of this operator's parameters
override def resolveReference(
tableEnv: TableEnvironment,
name: String)
: Option[NamedExpression] = tableEnv match {
// resolve reference to rowtime attribute in a streaming environment
case _: StreamTableEnvironment if name == "rowtime" =>
Some(RowtimeAttribute())
case _ =>
window.alias match {
// resolve reference to this window's alias
case UnresolvedFieldReference(alias) if name == alias =>
// check if reference can already be resolved by input fields
val found = super.resolveReference(tableEnv, name)
if (found.isDefined) {
failValidation(s"Reference $name is ambiguous.")
} else {
Some(WindowReference(name))
}
case _ =>
// resolve references as usual
super.resolveReference(tableEnv, name)
}
}
override protected[logical] def construct(relBuilder: RelBuilder): RelBuilder = {
val flinkRelBuilder = relBuilder.asInstanceOf[FlinkRelBuilder]
child.construct(flinkRelBuilder)
flinkRelBuilder.aggregate(
window,
relBuilder.groupKey(groupingExpressions.map(_.toRexNode(relBuilder)).asJava),
propertyExpressions.map {
case Alias(prop: WindowProperty, name, _) => prop.toNamedWindowProperty(name)(relBuilder)
case _ => throw new RuntimeException("This should never happen.")
},
aggregateExpressions.map {
case Alias(agg: Aggregation, name, _) => agg.toAggCall(name)(relBuilder)
case _ => throw new RuntimeException("This should never happen.")
}.asJava)
}
override def validate(tableEnv: TableEnvironment): LogicalNode = {
val resolvedWindowAggregate = super.validate(tableEnv).asInstanceOf[WindowAggregate]
val groupingExprs = resolvedWindowAggregate.groupingExpressions
val aggregateExprs = resolvedWindowAggregate.aggregateExpressions
aggregateExprs.foreach(validateAggregateExpression)
groupingExprs.foreach(validateGroupingExpression)
def validateAggregateExpression(expr: Expression): Unit = expr match {
// check no nested aggregation exists.
case aggExpr: Aggregation =>
aggExpr.children.foreach { child =>
child.preOrderVisit {
case agg: Aggregation =>
failValidation(
"It's not allowed to use an aggregate function as " +
"input of another aggregate function")
case _ => // ok
}
}
case a: Attribute if !groupingExprs.exists(_.checkEquals(a)) =>
failValidation(
s"Expression '$a' is invalid because it is neither" +
" present in group by nor an aggregate function")
case e if groupingExprs.exists(_.checkEquals(e)) => // ok
case e => e.children.foreach(validateAggregateExpression)
}
def validateGroupingExpression(expr: Expression): Unit = {
if (!expr.resultType.isKeyType) {
failValidation(
s"Expression $expr cannot be used as a grouping expression " +
"because it's not a valid key type which must be hashable and comparable")
}
}
// validate window
resolvedWindowAggregate.window.validate(tableEnv) match {
case ValidationFailure(msg) =>
failValidation(s"$window is invalid: $msg")
case ValidationSuccess => // ok
}
resolvedWindowAggregate
}
}
/**
* LogicalNode for calling a user-defined table functions.
*
* @param functionName function name
* @param tableFunction table function to be called (might be overloaded)
* @param parameters actual parameters
* @param fieldNames output field names
* @param child child logical node
*/
case class LogicalTableFunctionCall(
functionName: String,
tableFunction: TableFunction[_],
parameters: Seq[Expression],
resultType: TypeInformation[_],
fieldNames: Array[String],
child: LogicalNode)
extends UnaryNode {
private val (_, fieldIndexes, fieldTypes) = getFieldInfo(resultType)
private var evalMethod: Method = _
override def output: Seq[Attribute] = fieldNames.zip(fieldTypes).map {
case (n, t) => ResolvedFieldReference(n, t)
}
override def validate(tableEnv: TableEnvironment): LogicalNode = {
val node = super.validate(tableEnv).asInstanceOf[LogicalTableFunctionCall]
// check if not Scala object
checkNotSingleton(tableFunction.getClass)
// check if class could be instantiated
checkForInstantiation(tableFunction.getClass)
// look for a signature that matches the input types
val signature = node.parameters.map(_.resultType)
val foundMethod = getEvalMethod(tableFunction, signature)
if (foundMethod.isEmpty) {
failValidation(
s"Given parameters of function '$functionName' do not match any signature. \\n" +
s"Actual: ${signatureToString(signature)} \\n" +
s"Expected: ${signaturesToString(tableFunction)}")
} else {
node.evalMethod = foundMethod.get
}
node
}
override protected[logical] def construct(relBuilder: RelBuilder): RelBuilder = {
val fieldIndexes = getFieldInfo(resultType)._2
val function = new FlinkTableFunctionImpl(resultType, fieldIndexes, fieldNames, evalMethod)
val typeFactory = relBuilder.getTypeFactory.asInstanceOf[FlinkTypeFactory]
val sqlFunction = TableSqlFunction(
tableFunction.functionIdentifier,
tableFunction,
resultType,
typeFactory,
function)
val scan = LogicalTableFunctionScan.create(
relBuilder.peek().getCluster,
new util.ArrayList[RelNode](),
relBuilder.call(sqlFunction, parameters.map(_.toRexNode(relBuilder)).asJava),
function.getElementType(null),
function.getRowType(relBuilder.getTypeFactory, null),
null)
relBuilder.push(scan)
}
}
|
hwstreaming/flink
|
flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/logical/operators.scala
|
Scala
|
apache-2.0
| 26,917 |
import scala.io.Source._
object FacebookSettings {
/**
To do later because it is slightly different from twitter's config... Maybe we'll erase that shit out
*/
}
|
t3g7/spark-streaming-facebook
|
src/main/scala/FacebookSettings.scala
|
Scala
|
apache-2.0
| 167 |
package peregin.gpv.gui.gauge
import java.awt.{Font, Graphics2D}
trait DigitalGauge extends GaugePainter with DigitalFont {
def valueText(): String
def unitText(): String
override def paint(g: Graphics2D, w: Int, h: Int) = {
super.paint(g, w, h)
val cy = h / 2
val box = math.min(w, h)
val fs = box / 2
// draw current speed
g.setFont(digitalFont.deriveFont(Font.BOLD, fs.toFloat))
val text = valueText()
val tb = g.getFontMetrics.getStringBounds(text, g)
textWidthShadow(g, text, (w - tb.getWidth) / 2, cy + box / 2 - tb.getHeight * 1.2)
// draw unit
g.setFont(digitalFont.deriveFont(Font.BOLD, fs / 4))
val utext = unitText()
val utb = g.getFontMetrics.getStringBounds(utext, g)
textWidthShadow(g, utext, (w - utb.getWidth) / 2, cy + box / 2 + utb.getHeight * 2 - tb.getHeight * 1.2)
}
}
|
peregin/gps-overlay-on-video
|
src/main/scala/peregin/gpv/gui/gauge/DigitalGauge.scala
|
Scala
|
mit
| 861 |
package models.daos
import com.mohiva.play.silhouette.api.LoginInfo
import com.mohiva.play.silhouette.impl.daos.DelegableAuthInfoDAO
import com.mohiva.play.silhouette.impl.providers.OAuth2Info
import models.daos.OAuth2InfoDAO._
import play.api.libs.concurrent.Execution.Implicits._
import scala.collection.mutable
import scala.concurrent.Future
/**
* The DAO to store the OAuth2 information.
*
* Note: Not thread safe, demo only.
*/
class OAuth2InfoDAO extends DelegableAuthInfoDAO[OAuth2Info] {
/**
* Finds the auth info which is linked with the specified login info.
*
* @param loginInfo The linked login info.
* @return The retrieved auth info or None if no auth info could be retrieved for the given login info.
*/
def find(loginInfo: LoginInfo): Future[Option[OAuth2Info]] = {
Future.successful(data.get(loginInfo))
}
/**
* Adds new auth info for the given login info.
*
* @param loginInfo The login info for which the auth info should be added.
* @param authInfo The auth info to add.
* @return The added auth info.
*/
def add(loginInfo: LoginInfo, authInfo: OAuth2Info): Future[OAuth2Info] = {
data += (loginInfo -> authInfo)
Future.successful(authInfo)
}
/**
* Updates the auth info for the given login info.
*
* @param loginInfo The login info for which the auth info should be updated.
* @param authInfo The auth info to update.
* @return The updated auth info.
*/
def update(loginInfo: LoginInfo, authInfo: OAuth2Info): Future[OAuth2Info] = {
data += (loginInfo -> authInfo)
Future.successful(authInfo)
}
/**
* Saves the auth info for the given login info.
*
* This method either adds the auth info if it doesn't exists or it updates the auth info
* if it already exists.
*
* @param loginInfo The login info for which the auth info should be saved.
* @param authInfo The auth info to save.
* @return The saved auth info.
*/
def save(loginInfo: LoginInfo, authInfo: OAuth2Info): Future[OAuth2Info] = {
find(loginInfo).flatMap {
case Some(_) => update(loginInfo, authInfo)
case None => add(loginInfo, authInfo)
case unknown => Future.failed(new RuntimeException(s"find(loginInfo) returned an unexpected type $unknown"))
}
}
/**
* Removes the auth info for the given login info.
*
* @param loginInfo The login info for which the auth info should be removed.
* @return A future to wait for the process to be completed.
*/
def remove(loginInfo: LoginInfo): Future[Unit] = {
data -= loginInfo
Future.successful(())
}
}
/**
* The companion object.
*/
object OAuth2InfoDAO {
/**
* The data store for the OAuth2 info.
*/
var data: mutable.HashMap[LoginInfo, OAuth2Info] = mutable.HashMap()
}
|
glidester/play-silhouette-seed
|
app/models/daos/OAuth2InfoDAO.scala
|
Scala
|
apache-2.0
| 2,797 |
package net.mauhiz.poustache
import scala.util.Try
case class MustacheContext(initialContext: Any, strict: Boolean = true) {
import net.mauhiz.poustache.MustacheContext._
var contextStack: Seq[Any] = Vector(initialContext)
def context(contextName: String, innerContent: => String): String = {
contextName.indexOf('.') match {
case dotIndex if dotIndex >= 0 && contextName.size > 1 => {
val headContextName = contextName.substring(0, dotIndex)
val tailContextName = contextName.substring(dotIndex + 1)
context(headContextName, context(tailContextName, innerContent))
}
case -1 =>
lookup(contextName) match {
case None | Some(false) => ""
case Some(true) => innerContent
case Some(trav: TraversableOnce[_]) => trav.map {
seqItem =>
contextStack = seqItem +: contextStack
val out = innerContent
contextStack = contextStack.tail
out
}.mkString("")
case Some(mb: Option[_]) => mb.map {
item =>
contextStack = item +: contextStack
val out = innerContent
contextStack = contextStack.tail
out
}.mkString("")
case Some(item: Any) => {
contextStack = item +: contextStack
val out = innerContent
contextStack = contextStack.tail
out
}
}
}
}
def notContext(contextName: String, innerContent: => String): String = {
contextName.indexOf('.') match {
case dotIndex if dotIndex >= 0 && contextName.size > 1 => {
val headContextName = contextName.substring(0, dotIndex)
val tailContextName = contextName.substring(dotIndex + 1)
// FIXME the logic is wrong here.
notContext(headContextName, notContext(tailContextName, innerContent))
}
case -1 =>
lookup(contextName) match {
case None | Some(false) => innerContent
case Some(trav: TraversableOnce[_]) if trav.isEmpty => innerContent
case Some(opt: Option[_]) if opt.isEmpty => innerContent
case _ => ""
}
}
}
def printEscape(pos: Int, key: String): String = escape(printNoEscape(pos, key))
def printNoEscape(pos: Int, key: String): String = {
lookup(key) match {
case None => if (strict) throw KeyNotFoundException(pos, key) else ""
case Some(Some(item)) => item.toString
case Some(item) => item.toString
}
}
private def lookup(key: String, withContext: Seq[Any] = contextStack): Option[Any] = {
if (key == ".") {
contextStack.headOption
} else if (key.isEmpty || key.endsWith(".")) {
throw new KeyNotFoundException(-1, "")
} else {
for (item <- contextStack) {
val lookedUp = singleLookup(key, item)
if (lookedUp.isDefined) return lookedUp
}
if (strict) throw KeyNotFoundException(-1, key) else None
}
}
private def singleLookup(key: String, context: Any): Option[Any] = {
context match {
case map: Map[String, _] => map.get(key)
case trav: TraversableOnce[_] => Some(trav)
case opt: Option[_] => Some(opt)
case boo: Boolean => Some(boo)
case any: AnyRef => {
Try {
any.getClass.getMethod(key).invoke(any)
}.toOption
}
case _ => None
}
}
}
object MustacheContext {
private def escape(v: String): String = v.foldLeft(StringBuilder.newBuilder)((acc, ch) => sanitize(ch, acc)).toString()
private def sanitize(ch: Char, buffer: StringBuilder): StringBuilder = {
escapeSingleChar(ch).fold(buffer.append(ch))(buffer.append)
}
private[poustache] val escapeSingleChar: (Char) => Option[String] = ({
case '\\'' => "'"
case '"' => """
case '&' => "&"
case '<' => "<"
case '>' => ">"
}: PartialFunction[Char, String]).lift
}
|
mauhiz/poustache
|
src/main/scala/net/mauhiz/poustache/MustacheContext.scala
|
Scala
|
gpl-2.0
| 3,991 |
package purespark.examples
import purespark.GraphX._
import purespark.Prelude._
import scala.util.Random
object Types {
type Color = Int
type Palette = (Color, List[Double], Boolean, Random)
}
/**
* A pregel implemention of the Communication-Free Learning algorithm for graph coloring.
*
* See D. J. Leith and P. Clifford. Convergence of distributed learning algorithms for
* optimal wireless channel allocation. In IEEE CDC, pages 2980–2985, 2006.
*/
object RandomizedGraphColoring {
import Types._
private def sampleColor (dist: List[Double], rnd: Double): Int = {
foldl(dist)((1, 0.0)) {
case ((color, mass), weight) => {
val m = mass + weight
(if (m < rnd) color + 1 else color, m)
}
}._1
}
def run (graph: GraphRDD[Color, _], beta: Double, maxNumColors: Int): GraphRDD[Color, _] = {
val initColorDist = map((1 to maxNumColors).toList)(_ => 1.0 / maxNumColors)
val distGraph = GraphRDD(mapVertices(graph.vertexRDD) {
v => (v.attr, initColorDist, true, new Random(Random.nextLong))
}, graph.edgeRDD)
def sendMessage (edge: EdgeTriplet[Palette, _]): List[Vertex[Boolean]] = {
if (edge.srcAttr._1 == edge.dstAttr._1)
return List(Vertex(edge.srcId, true))
if (edge.srcAttr._3)
return List(Vertex(edge.srcId, false))
List.empty
}
def vprog (v: Vertex[Palette], active: Boolean): Palette = {
val color = v.attr._1
val dist = v.attr._2
val rng = v.attr._4
val new_dist = foldl(dist)((1, List[Double]())) {
case ((i, list), weight) => (i + 1,
if (active)
list :+ (weight * (1 - beta) + (if (color == i) 0.0 else beta / (maxNumColors - 1)))
else
list :+ (if (color == i) 1.0 else 0.0))
}._2
val new_color = if (active) sampleColor(new_dist, rng.nextDouble) else color
(new_color, new_dist, active, rng)
}
val resGraph = Pregel(distGraph)(true)(vprog)(sendMessage)(_ || _)
GraphRDD(mapVertices(resGraph.vertexRDD)(v => v.attr._1), resGraph.edgeRDD)
}
}
object GraphColoringExample extends App {
// convert an RDD of tuples to an EdgeRDD
private def fromTuplesToEdges (edges: RDD[(VertexId, VertexId)]): EdgeRDD[Null] =
map(edges)(map(_) { case (s, t) => Edge(s, t, null)})
// convert directed edges to undirected edges
private def fromDGToUG (edges: RDD[(VertexId, VertexId)]): EdgeRDD[Null] =
map(edges)(concatMap(_) {
case (s, t) => List(Edge(s, t, null), Edge(t, s, null))
})
// compute the maximum degree of an undirected graph
private def maxDegreeOf[A, B] (graph: GraphRDD[A, B]): Int =
foldl(concat(aggregateMessages(graph)(e => List(Vertex(e.dstId, 1)))(_ + _)))(0)({
case (d, v) => scala.math.max(d, v.attr)
})
def run = {
// arbitraty chosen
val beta = 0.5
/*
val edges = fromDGToUG(List(
// an edge RDD with three partitions
List((2, 1), (2, 3), (3, 1), (6, 7)),
List((5, 7), (4, 2), (4, 7), (5, 1)),
List((5, 6), (3, 6))
))
*/
// generate a complete graph
val numVertices = 9
val vids: List[VertexId] = (1 to numVertices).toList
val edges = fromTuplesToEdges(map(vids) {
i => foldl(vids)(List[(VertexId, VertexId)]()) {
(list, j) => if (j != i) list :+(i, j) else list
}
})
// all vertices have the same color at the beginning
val vertices = List((1 to numVertices).toList.map(Vertex(_, 1)))
// an example undirected graph
val graph = GraphRDD(vertices, edges)
val maxNumColors = maxDegreeOf(graph) + 1
val result = RandomizedGraphColoring.run(graph, beta, maxNumColors)
println("Number of available colors: " + maxNumColors)
println("A proper coloring of the provided graph:")
concat(result.vertexRDD).foreach(println)
}
GraphColoringExample.run
}
|
ericpony/scala-examples
|
PureSpark/examples/CFLVertexColoring.scala
|
Scala
|
mit
| 4,003 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.analysis
import javax.annotation.Nullable
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.types._
/**
* A collection of [[Rule Rules]] that can be used to coerce differing types that
* participate in operations into compatible ones. Most of these rules are based on Hive semantics,
* but they do not introduce any dependencies on the hive codebase. For this reason they remain in
* Catalyst until we have a more standard set of coercions.
*/
object HiveTypeCoercion {
val typeCoercionRules =
PropagateTypes ::
InConversion ::
WidenSetOperationTypes ::
PromoteStrings ::
DecimalPrecision ::
BooleanEquality ::
StringToIntegralCasts ::
FunctionArgumentConversion ::
CaseWhenCoercion ::
IfCoercion ::
Division ::
PropagateTypes ::
ImplicitTypeCasts ::
DateTimeOperations ::
Nil
// See https://cwiki.apache.org/confluence/display/Hive/LanguageManual+Types.
// The conversion for integral and floating point types have a linear widening hierarchy:
private val numericPrecedence =
IndexedSeq(
ByteType,
ShortType,
IntegerType,
LongType,
FloatType,
DoubleType)
/**
* Find the tightest common type of two types that might be used in a binary expression.
* This handles all numeric types except fixed-precision decimals interacting with each other or
* with primitive types, because in that case the precision and scale of the result depends on
* the operation. Those rules are implemented in [[HiveTypeCoercion.DecimalPrecision]].
*/
val findTightestCommonTypeOfTwo: (DataType, DataType) => Option[DataType] = {
case (t1, t2) if t1 == t2 => Some(t1)
case (NullType, t1) => Some(t1)
case (t1, NullType) => Some(t1)
case (t1: IntegralType, t2: DecimalType) if t2.isWiderThan(t1) =>
Some(t2)
case (t1: DecimalType, t2: IntegralType) if t1.isWiderThan(t2) =>
Some(t1)
// Promote numeric types to the highest of the two
case (t1, t2) if Seq(t1, t2).forall(numericPrecedence.contains) =>
val index = numericPrecedence.lastIndexWhere(t => t == t1 || t == t2)
Some(numericPrecedence(index))
case _ => None
}
/** Similar to [[findTightestCommonType]], but can promote all the way to StringType. */
private def findTightestCommonTypeToString(left: DataType, right: DataType): Option[DataType] = {
findTightestCommonTypeOfTwo(left, right).orElse((left, right) match {
case (StringType, t2: AtomicType) if t2 != BinaryType && t2 != BooleanType => Some(StringType)
case (t1: AtomicType, StringType) if t1 != BinaryType && t1 != BooleanType => Some(StringType)
case _ => None
})
}
/**
* Similar to [[findTightestCommonType]], if can not find the TightestCommonType, try to use
* [[findTightestCommonTypeToString]] to find the TightestCommonType.
*/
private def findTightestCommonTypeAndPromoteToString(types: Seq[DataType]): Option[DataType] = {
types.foldLeft[Option[DataType]](Some(NullType))((r, c) => r match {
case None => None
case Some(d) =>
findTightestCommonTypeToString(d, c)
})
}
/**
* Find the tightest common type of a set of types by continuously applying
* `findTightestCommonTypeOfTwo` on these types.
*/
private def findTightestCommonType(types: Seq[DataType]): Option[DataType] = {
types.foldLeft[Option[DataType]](Some(NullType))((r, c) => r match {
case None => None
case Some(d) => findTightestCommonTypeOfTwo(d, c)
})
}
private def findWiderTypeForTwo(t1: DataType, t2: DataType): Option[DataType] = (t1, t2) match {
case (t1: DecimalType, t2: DecimalType) =>
Some(DecimalPrecision.widerDecimalType(t1, t2))
case (t: IntegralType, d: DecimalType) =>
Some(DecimalPrecision.widerDecimalType(DecimalType.forType(t), d))
case (d: DecimalType, t: IntegralType) =>
Some(DecimalPrecision.widerDecimalType(DecimalType.forType(t), d))
case (t: FractionalType, d: DecimalType) =>
Some(DoubleType)
case (d: DecimalType, t: FractionalType) =>
Some(DoubleType)
case _ =>
findTightestCommonTypeToString(t1, t2)
}
private def findWiderCommonType(types: Seq[DataType]) = {
types.foldLeft[Option[DataType]](Some(NullType))((r, c) => r match {
case Some(d) => findWiderTypeForTwo(d, c)
case None => None
})
}
/**
* Applies any changes to [[AttributeReference]] data types that are made by other rules to
* instances higher in the query tree.
*/
object PropagateTypes extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
// No propagation required for leaf nodes.
case q: LogicalPlan if q.children.isEmpty => q
// Don't propagate types from unresolved children.
case q: LogicalPlan if !q.childrenResolved => q
case q: LogicalPlan =>
val inputMap = q.inputSet.toSeq.map(a => (a.exprId, a)).toMap
q transformExpressions {
case a: AttributeReference =>
inputMap.get(a.exprId) match {
// This can happen when a Attribute reference is born in a non-leaf node, for example
// due to a call to an external script like in the Transform operator.
// TODO: Perhaps those should actually be aliases?
case None => a
// Leave the same if the dataTypes match.
case Some(newType) if a.dataType == newType.dataType => a
case Some(newType) =>
logDebug(s"Promoting $a to $newType in ${q.simpleString}")
newType
}
}
}
}
/**
* Widens numeric types and converts strings to numbers when appropriate.
*
* Loosely based on rules from "Hadoop: The Definitive Guide" 2nd edition, by Tom White
*
* The implicit conversion rules can be summarized as follows:
* - Any integral numeric type can be implicitly converted to a wider type.
* - All the integral numeric types, FLOAT, and (perhaps surprisingly) STRING can be implicitly
* converted to DOUBLE.
* - TINYINT, SMALLINT, and INT can all be converted to FLOAT.
* - BOOLEAN types cannot be converted to any other type.
* - Any integral numeric type can be implicitly converted to decimal type.
* - two different decimal types will be converted into a wider decimal type for both of them.
* - decimal type will be converted into double if there float or double together with it.
*
* Additionally, all types when UNION-ed with strings will be promoted to strings.
* Other string conversions are handled by PromoteStrings.
*
* Widening types might result in loss of precision in the following cases:
* - IntegerType to FloatType
* - LongType to FloatType
* - LongType to DoubleType
* - DecimalType to Double
*
* This rule is only applied to Union/Except/Intersect
*/
object WidenSetOperationTypes extends Rule[LogicalPlan] {
private[this] def widenOutputTypes(
planName: String,
left: LogicalPlan,
right: LogicalPlan): (LogicalPlan, LogicalPlan) = {
require(left.output.length == right.output.length)
val castedTypes = left.output.zip(right.output).map {
case (lhs, rhs) if lhs.dataType != rhs.dataType =>
findWiderTypeForTwo(lhs.dataType, rhs.dataType)
case other => None
}
def castOutput(plan: LogicalPlan): LogicalPlan = {
val casted = plan.output.zip(castedTypes).map {
case (e, Some(dt)) if e.dataType != dt =>
Alias(Cast(e, dt), e.name)()
case (e, _) => e
}
Project(casted, plan)
}
if (castedTypes.exists(_.isDefined)) {
(castOutput(left), castOutput(right))
} else {
(left, right)
}
}
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case p if p.analyzed => p
case s @ SetOperation(left, right) if s.childrenResolved
&& left.output.length == right.output.length && !s.resolved =>
val (newLeft, newRight) = widenOutputTypes(s.nodeName, left, right)
s.makeCopy(Array(newLeft, newRight))
}
}
/**
* Promotes strings that appear in arithmetic expressions.
*/
object PromoteStrings extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions {
// Skip nodes who's children have not been resolved yet.
case e if !e.childrenResolved => e
case a @ BinaryArithmetic(left @ StringType(), right @ DecimalType.Expression(_, _)) =>
a.makeCopy(Array(Cast(left, DecimalType.SYSTEM_DEFAULT), right))
case a @ BinaryArithmetic(left @ DecimalType.Expression(_, _), right @ StringType()) =>
a.makeCopy(Array(left, Cast(right, DecimalType.SYSTEM_DEFAULT)))
case a @ BinaryArithmetic(left @ StringType(), right) =>
a.makeCopy(Array(Cast(left, DoubleType), right))
case a @ BinaryArithmetic(left, right @ StringType()) =>
a.makeCopy(Array(left, Cast(right, DoubleType)))
// For equality between string and timestamp we cast the string to a timestamp
// so that things like rounding of subsecond precision does not affect the comparison.
case p @ Equality(left @ StringType(), right @ TimestampType()) =>
p.makeCopy(Array(Cast(left, TimestampType), right))
case p @ Equality(left @ TimestampType(), right @ StringType()) =>
p.makeCopy(Array(left, Cast(right, TimestampType)))
// We should cast all relative timestamp/date/string comparison into string comparisions
// This behaves as a user would expect because timestamp strings sort lexicographically.
// i.e. TimeStamp(2013-01-01 00:00 ...) < "2014" = true
case p @ BinaryComparison(left @ StringType(), right @ DateType()) =>
p.makeCopy(Array(left, Cast(right, StringType)))
case p @ BinaryComparison(left @ DateType(), right @ StringType()) =>
p.makeCopy(Array(Cast(left, StringType), right))
case p @ BinaryComparison(left @ StringType(), right @ TimestampType()) =>
p.makeCopy(Array(left, Cast(right, StringType)))
case p @ BinaryComparison(left @ TimestampType(), right @ StringType()) =>
p.makeCopy(Array(Cast(left, StringType), right))
// Comparisons between dates and timestamps.
case p @ BinaryComparison(left @ TimestampType(), right @ DateType()) =>
p.makeCopy(Array(Cast(left, StringType), Cast(right, StringType)))
case p @ BinaryComparison(left @ DateType(), right @ TimestampType()) =>
p.makeCopy(Array(Cast(left, StringType), Cast(right, StringType)))
case p @ BinaryComparison(left @ StringType(), right) if right.dataType != StringType =>
p.makeCopy(Array(Cast(left, DoubleType), right))
case p @ BinaryComparison(left, right @ StringType()) if left.dataType != StringType =>
p.makeCopy(Array(left, Cast(right, DoubleType)))
case i @ In(a @ DateType(), b) if b.forall(_.dataType == StringType) =>
i.makeCopy(Array(Cast(a, StringType), b))
case i @ In(a @ TimestampType(), b) if b.forall(_.dataType == StringType) =>
i.makeCopy(Array(a, b.map(Cast(_, TimestampType))))
case i @ In(a @ DateType(), b) if b.forall(_.dataType == TimestampType) =>
i.makeCopy(Array(Cast(a, StringType), b.map(Cast(_, StringType))))
case i @ In(a @ TimestampType(), b) if b.forall(_.dataType == DateType) =>
i.makeCopy(Array(Cast(a, StringType), b.map(Cast(_, StringType))))
case Sum(e @ StringType()) => Sum(Cast(e, DoubleType))
case SumDistinct(e @ StringType()) => Sum(Cast(e, DoubleType))
case Average(e @ StringType()) => Average(Cast(e, DoubleType))
}
}
/**
* Convert all expressions in in() list to the left operator type
*/
object InConversion extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions {
// Skip nodes who's children have not been resolved yet.
case e if !e.childrenResolved => e
case i @ In(a, b) if b.exists(_.dataType != a.dataType) =>
i.makeCopy(Array(a, b.map(Cast(_, a.dataType))))
}
}
// scalastyle:off
/**
* Calculates and propagates precision for fixed-precision decimals. Hive has a number of
* rules for this based on the SQL standard and MS SQL:
* https://cwiki.apache.org/confluence/download/attachments/27362075/Hive_Decimal_Precision_Scale_Support.pdf
* https://msdn.microsoft.com/en-us/library/ms190476.aspx
*
* In particular, if we have expressions e1 and e2 with precision/scale p1/s2 and p2/s2
* respectively, then the following operations have the following precision / scale:
*
* Operation Result Precision Result Scale
* ------------------------------------------------------------------------
* e1 + e2 max(s1, s2) + max(p1-s1, p2-s2) + 1 max(s1, s2)
* e1 - e2 max(s1, s2) + max(p1-s1, p2-s2) + 1 max(s1, s2)
* e1 * e2 p1 + p2 + 1 s1 + s2
* e1 / e2 p1 - s1 + s2 + max(6, s1 + p2 + 1) max(6, s1 + p2 + 1)
* e1 % e2 min(p1-s1, p2-s2) + max(s1, s2) max(s1, s2)
* e1 union e2 max(s1, s2) + max(p1-s1, p2-s2) max(s1, s2)
* sum(e1) p1 + 10 s1
* avg(e1) p1 + 4 s1 + 4
*
* Catalyst also has unlimited-precision decimals. For those, all ops return unlimited precision.
*
* To implement the rules for fixed-precision types, we introduce casts to turn them to unlimited
* precision, do the math on unlimited-precision numbers, then introduce casts back to the
* required fixed precision. This allows us to do all rounding and overflow handling in the
* cast-to-fixed-precision operator.
*
* In addition, when mixing non-decimal types with decimals, we use the following rules:
* - BYTE gets turned into DECIMAL(3, 0)
* - SHORT gets turned into DECIMAL(5, 0)
* - INT gets turned into DECIMAL(10, 0)
* - LONG gets turned into DECIMAL(20, 0)
* - FLOAT and DOUBLE cause fixed-length decimals to turn into DOUBLE
*
* Note: Union/Except/Interact is handled by WidenTypes
*/
// scalastyle:on
object DecimalPrecision extends Rule[LogicalPlan] {
import scala.math.{max, min}
private def isFloat(t: DataType): Boolean = t == FloatType || t == DoubleType
// Returns the wider decimal type that's wider than both of them
def widerDecimalType(d1: DecimalType, d2: DecimalType): DecimalType = {
widerDecimalType(d1.precision, d1.scale, d2.precision, d2.scale)
}
// max(s1, s2) + max(p1-s1, p2-s2), max(s1, s2)
def widerDecimalType(p1: Int, s1: Int, p2: Int, s2: Int): DecimalType = {
val scale = max(s1, s2)
val range = max(p1 - s1, p2 - s2)
DecimalType.bounded(range + scale, scale)
}
private def promotePrecision(e: Expression, dataType: DataType): Expression = {
PromotePrecision(Cast(e, dataType))
}
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
// fix decimal precision for expressions
case q => q.transformExpressions {
// Skip nodes whose children have not been resolved yet
case e if !e.childrenResolved => e
// Skip nodes who is already promoted
case e: BinaryArithmetic if e.left.isInstanceOf[PromotePrecision] => e
case Add(e1 @ DecimalType.Expression(p1, s1), e2 @ DecimalType.Expression(p2, s2)) =>
val dt = DecimalType.bounded(max(s1, s2) + max(p1 - s1, p2 - s2) + 1, max(s1, s2))
CheckOverflow(Add(promotePrecision(e1, dt), promotePrecision(e2, dt)), dt)
case Subtract(e1 @ DecimalType.Expression(p1, s1), e2 @ DecimalType.Expression(p2, s2)) =>
val dt = DecimalType.bounded(max(s1, s2) + max(p1 - s1, p2 - s2) + 1, max(s1, s2))
CheckOverflow(Subtract(promotePrecision(e1, dt), promotePrecision(e2, dt)), dt)
case Multiply(e1 @ DecimalType.Expression(p1, s1), e2 @ DecimalType.Expression(p2, s2)) =>
val resultType = DecimalType.bounded(p1 + p2 + 1, s1 + s2)
val widerType = widerDecimalType(p1, s1, p2, s2)
CheckOverflow(Multiply(promotePrecision(e1, widerType), promotePrecision(e2, widerType)),
resultType)
case Divide(e1 @ DecimalType.Expression(p1, s1), e2 @ DecimalType.Expression(p2, s2)) =>
var intDig = min(DecimalType.MAX_SCALE, p1 - s1 + s2)
var decDig = min(DecimalType.MAX_SCALE, max(6, s1 + p2 + 1))
val diff = (intDig + decDig) - DecimalType.MAX_SCALE
if (diff > 0) {
decDig -= diff / 2 + 1
intDig = DecimalType.MAX_SCALE - decDig
}
val resultType = DecimalType.bounded(intDig + decDig, decDig)
val widerType = widerDecimalType(p1, s1, p2, s2)
CheckOverflow(Divide(promotePrecision(e1, widerType), promotePrecision(e2, widerType)),
resultType)
case Remainder(e1 @ DecimalType.Expression(p1, s1), e2 @ DecimalType.Expression(p2, s2)) =>
val resultType = DecimalType.bounded(min(p1 - s1, p2 - s2) + max(s1, s2), max(s1, s2))
// resultType may have lower precision, so we cast them into wider type first.
val widerType = widerDecimalType(p1, s1, p2, s2)
CheckOverflow(Remainder(promotePrecision(e1, widerType), promotePrecision(e2, widerType)),
resultType)
case Pmod(e1 @ DecimalType.Expression(p1, s1), e2 @ DecimalType.Expression(p2, s2)) =>
val resultType = DecimalType.bounded(min(p1 - s1, p2 - s2) + max(s1, s2), max(s1, s2))
// resultType may have lower precision, so we cast them into wider type first.
val widerType = widerDecimalType(p1, s1, p2, s2)
CheckOverflow(Pmod(promotePrecision(e1, widerType), promotePrecision(e2, widerType)),
resultType)
case b @ BinaryComparison(e1 @ DecimalType.Expression(p1, s1),
e2 @ DecimalType.Expression(p2, s2)) if p1 != p2 || s1 != s2 =>
val resultType = widerDecimalType(p1, s1, p2, s2)
b.makeCopy(Array(Cast(e1, resultType), Cast(e2, resultType)))
// Promote integers inside a binary expression with fixed-precision decimals to decimals,
// and fixed-precision decimals in an expression with floats / doubles to doubles
case b @ BinaryOperator(left, right) if left.dataType != right.dataType =>
(left.dataType, right.dataType) match {
case (t: IntegralType, DecimalType.Fixed(p, s)) =>
b.makeCopy(Array(Cast(left, DecimalType.forType(t)), right))
case (DecimalType.Fixed(p, s), t: IntegralType) =>
b.makeCopy(Array(left, Cast(right, DecimalType.forType(t))))
case (t, DecimalType.Fixed(p, s)) if isFloat(t) =>
b.makeCopy(Array(left, Cast(right, DoubleType)))
case (DecimalType.Fixed(p, s), t) if isFloat(t) =>
b.makeCopy(Array(Cast(left, DoubleType), right))
case _ =>
b
}
// TODO: MaxOf, MinOf, etc might want other rules
// SUM and AVERAGE are handled by the implementations of those expressions
}
}
}
/**
* Changes numeric values to booleans so that expressions like true = 1 can be evaluated.
*/
object BooleanEquality extends Rule[LogicalPlan] {
private val trueValues = Seq(1.toByte, 1.toShort, 1, 1L, Decimal.ONE)
private val falseValues = Seq(0.toByte, 0.toShort, 0, 0L, Decimal.ZERO)
private def buildCaseKeyWhen(booleanExpr: Expression, numericExpr: Expression) = {
CaseKeyWhen(numericExpr, Seq(
Literal(trueValues.head), booleanExpr,
Literal(falseValues.head), Not(booleanExpr),
Literal(false)))
}
private def transform(booleanExpr: Expression, numericExpr: Expression) = {
If(Or(IsNull(booleanExpr), IsNull(numericExpr)),
Literal.create(null, BooleanType),
buildCaseKeyWhen(booleanExpr, numericExpr))
}
private def transformNullSafe(booleanExpr: Expression, numericExpr: Expression) = {
CaseWhen(Seq(
And(IsNull(booleanExpr), IsNull(numericExpr)), Literal(true),
Or(IsNull(booleanExpr), IsNull(numericExpr)), Literal(false),
buildCaseKeyWhen(booleanExpr, numericExpr)
))
}
def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions {
// Skip nodes who's children have not been resolved yet.
case e if !e.childrenResolved => e
// Hive treats (true = 1) as true and (false = 0) as true,
// all other cases are considered as false.
// We may simplify the expression if one side is literal numeric values
case EqualTo(bool @ BooleanType(), Literal(value, _: NumericType))
if trueValues.contains(value) => bool
case EqualTo(bool @ BooleanType(), Literal(value, _: NumericType))
if falseValues.contains(value) => Not(bool)
case EqualTo(Literal(value, _: NumericType), bool @ BooleanType())
if trueValues.contains(value) => bool
case EqualTo(Literal(value, _: NumericType), bool @ BooleanType())
if falseValues.contains(value) => Not(bool)
case EqualNullSafe(bool @ BooleanType(), Literal(value, _: NumericType))
if trueValues.contains(value) => And(IsNotNull(bool), bool)
case EqualNullSafe(bool @ BooleanType(), Literal(value, _: NumericType))
if falseValues.contains(value) => And(IsNotNull(bool), Not(bool))
case EqualNullSafe(Literal(value, _: NumericType), bool @ BooleanType())
if trueValues.contains(value) => And(IsNotNull(bool), bool)
case EqualNullSafe(Literal(value, _: NumericType), bool @ BooleanType())
if falseValues.contains(value) => And(IsNotNull(bool), Not(bool))
case EqualTo(left @ BooleanType(), right @ NumericType()) =>
transform(left , right)
case EqualTo(left @ NumericType(), right @ BooleanType()) =>
transform(right, left)
case EqualNullSafe(left @ BooleanType(), right @ NumericType()) =>
transformNullSafe(left, right)
case EqualNullSafe(left @ NumericType(), right @ BooleanType()) =>
transformNullSafe(right, left)
}
}
/**
* When encountering a cast from a string representing a valid fractional number to an integral
* type the jvm will throw a `java.lang.NumberFormatException`. Hive, in contrast, returns the
* truncated version of this number.
*/
object StringToIntegralCasts extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions {
// Skip nodes who's children have not been resolved yet.
case e if !e.childrenResolved => e
case Cast(e @ StringType(), t: IntegralType) =>
Cast(Cast(e, DecimalType.forType(LongType)), t)
}
}
/**
* This ensure that the types for various functions are as expected.
*/
object FunctionArgumentConversion extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions {
// Skip nodes who's children have not been resolved yet.
case e if !e.childrenResolved => e
case a @ CreateArray(children) if children.map(_.dataType).distinct.size > 1 =>
val types = children.map(_.dataType)
findTightestCommonTypeAndPromoteToString(types) match {
case Some(finalDataType) => CreateArray(children.map(Cast(_, finalDataType)))
case None => a
}
// Promote SUM, SUM DISTINCT and AVERAGE to largest types to prevent overflows.
case s @ Sum(e @ DecimalType()) => s // Decimal is already the biggest.
case Sum(e @ IntegralType()) if e.dataType != LongType => Sum(Cast(e, LongType))
case Sum(e @ FractionalType()) if e.dataType != DoubleType => Sum(Cast(e, DoubleType))
case s @ SumDistinct(e @ DecimalType()) => s // Decimal is already the biggest.
case SumDistinct(e @ IntegralType()) if e.dataType != LongType =>
SumDistinct(Cast(e, LongType))
case SumDistinct(e @ FractionalType()) if e.dataType != DoubleType =>
SumDistinct(Cast(e, DoubleType))
case s @ Average(e @ DecimalType()) => s // Decimal is already the biggest.
case Average(e @ IntegralType()) if e.dataType != LongType =>
Average(Cast(e, LongType))
case Average(e @ FractionalType()) if e.dataType != DoubleType =>
Average(Cast(e, DoubleType))
// Hive lets you do aggregation of timestamps... for some reason
case Sum(e @ TimestampType()) => Sum(Cast(e, DoubleType))
case Average(e @ TimestampType()) => Average(Cast(e, DoubleType))
// Coalesce should return the first non-null value, which could be any column
// from the list. So we need to make sure the return type is deterministic and
// compatible with every child column.
case c @ Coalesce(es) if es.map(_.dataType).distinct.size > 1 =>
val types = es.map(_.dataType)
findWiderCommonType(types) match {
case Some(finalDataType) => Coalesce(es.map(Cast(_, finalDataType)))
case None => c
}
case NaNvl(l, r) if l.dataType == DoubleType && r.dataType == FloatType =>
NaNvl(l, Cast(r, DoubleType))
case NaNvl(l, r) if l.dataType == FloatType && r.dataType == DoubleType =>
NaNvl(Cast(l, DoubleType), r)
}
}
/**
* Hive only performs integral division with the DIV operator. The arguments to / are always
* converted to fractional types.
*/
object Division extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions {
// Skip nodes who has not been resolved yet,
// as this is an extra rule which should be applied at last.
case e if !e.resolved => e
// Decimal and Double remain the same
case d: Divide if d.dataType == DoubleType => d
case d: Divide if d.dataType.isInstanceOf[DecimalType] => d
case Divide(left, right) => Divide(Cast(left, DoubleType), Cast(right, DoubleType))
}
}
/**
* Coerces the type of different branches of a CASE WHEN statement to a common type.
*/
object CaseWhenCoercion extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions {
case c: CaseWhenLike if c.childrenResolved && !c.valueTypesEqual =>
logDebug(s"Input values for null casting ${c.valueTypes.mkString(",")}")
val maybeCommonType = findWiderCommonType(c.valueTypes)
maybeCommonType.map { commonType =>
val castedBranches = c.branches.grouped(2).map {
case Seq(when, value) if value.dataType != commonType =>
Seq(when, Cast(value, commonType))
case Seq(elseVal) if elseVal.dataType != commonType =>
Seq(Cast(elseVal, commonType))
case other => other
}.reduce(_ ++ _)
c match {
case _: CaseWhen => CaseWhen(castedBranches)
case CaseKeyWhen(key, _) => CaseKeyWhen(key, castedBranches)
}
}.getOrElse(c)
case c: CaseKeyWhen if c.childrenResolved && !c.resolved =>
val maybeCommonType =
findWiderCommonType((c.key +: c.whenList).map(_.dataType))
maybeCommonType.map { commonType =>
val castedBranches = c.branches.grouped(2).map {
case Seq(whenExpr, thenExpr) if whenExpr.dataType != commonType =>
Seq(Cast(whenExpr, commonType), thenExpr)
case other => other
}.reduce(_ ++ _)
CaseKeyWhen(Cast(c.key, commonType), castedBranches)
}.getOrElse(c)
}
}
/**
* Coerces the type of different branches of If statement to a common type.
*/
object IfCoercion extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions {
case e if !e.childrenResolved => e
// Find tightest common type for If, if the true value and false value have different types.
case i @ If(pred, left, right) if left.dataType != right.dataType =>
findTightestCommonTypeToString(left.dataType, right.dataType).map { widestType =>
val newLeft = if (left.dataType == widestType) left else Cast(left, widestType)
val newRight = if (right.dataType == widestType) right else Cast(right, widestType)
If(pred, newLeft, newRight)
}.getOrElse(i) // If there is no applicable conversion, leave expression unchanged.
// Convert If(null literal, _, _) into boolean type.
// In the optimizer, we should short-circuit this directly into false value.
case If(pred, left, right) if pred.dataType == NullType =>
If(Literal.create(null, BooleanType), left, right)
}
}
/**
* Turns Add/Subtract of DateType/TimestampType/StringType and CalendarIntervalType
* to TimeAdd/TimeSub
*/
object DateTimeOperations extends Rule[LogicalPlan] {
private val acceptedTypes = Seq(DateType, TimestampType, StringType)
def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions {
// Skip nodes who's children have not been resolved yet.
case e if !e.childrenResolved => e
case Add(l @ CalendarIntervalType(), r) if acceptedTypes.contains(r.dataType) =>
Cast(TimeAdd(r, l), r.dataType)
case Add(l, r @ CalendarIntervalType()) if acceptedTypes.contains(l.dataType) =>
Cast(TimeAdd(l, r), l.dataType)
case Subtract(l, r @ CalendarIntervalType()) if acceptedTypes.contains(l.dataType) =>
Cast(TimeSub(l, r), l.dataType)
}
}
/**
* Casts types according to the expected input types for [[Expression]]s.
*/
object ImplicitTypeCasts extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan resolveExpressions {
// Skip nodes who's children have not been resolved yet.
case e if !e.childrenResolved => e
case b @ BinaryOperator(left, right) if left.dataType != right.dataType =>
findTightestCommonTypeOfTwo(left.dataType, right.dataType).map { commonType =>
if (b.inputType.acceptsType(commonType)) {
// If the expression accepts the tightest common type, cast to that.
val newLeft = if (left.dataType == commonType) left else Cast(left, commonType)
val newRight = if (right.dataType == commonType) right else Cast(right, commonType)
b.withNewChildren(Seq(newLeft, newRight))
} else {
// Otherwise, don't do anything with the expression.
b
}
}.getOrElse(b) // If there is no applicable conversion, leave expression unchanged.
case e: ImplicitCastInputTypes if e.inputTypes.nonEmpty =>
val children: Seq[Expression] = e.children.zip(e.inputTypes).map { case (in, expected) =>
// If we cannot do the implicit cast, just use the original input.
implicitCast(in, expected).getOrElse(in)
}
e.withNewChildren(children)
case e: ExpectsInputTypes if e.inputTypes.nonEmpty =>
// Convert NullType into some specific target type for ExpectsInputTypes that don't do
// general implicit casting.
val children: Seq[Expression] = e.children.zip(e.inputTypes).map { case (in, expected) =>
if (in.dataType == NullType && !expected.acceptsType(NullType)) {
Literal.create(null, expected.defaultConcreteType)
} else {
in
}
}
e.withNewChildren(children)
}
/**
* Given an expected data type, try to cast the expression and return the cast expression.
*
* If the expression already fits the input type, we simply return the expression itself.
* If the expression has an incompatible type that cannot be implicitly cast, return None.
*/
def implicitCast(e: Expression, expectedType: AbstractDataType): Option[Expression] = {
val inType = e.dataType
// Note that ret is nullable to avoid typing a lot of Some(...) in this local scope.
// We wrap immediately an Option after this.
@Nullable val ret: Expression = (inType, expectedType) match {
// If the expected type is already a parent of the input type, no need to cast.
case _ if expectedType.acceptsType(inType) => e
// Cast null type (usually from null literals) into target types
case (NullType, target) => Cast(e, target.defaultConcreteType)
// If the function accepts any numeric type and the input is a string, we follow the hive
// convention and cast that input into a double
case (StringType, NumericType) => Cast(e, NumericType.defaultConcreteType)
// Implicit cast among numeric types. When we reach here, input type is not acceptable.
// If input is a numeric type but not decimal, and we expect a decimal type,
// cast the input to decimal.
case (d: NumericType, DecimalType) => Cast(e, DecimalType.forType(d))
// For any other numeric types, implicitly cast to each other, e.g. long -> int, int -> long
case (_: NumericType, target: NumericType) => Cast(e, target)
// Implicit cast between date time types
case (DateType, TimestampType) => Cast(e, TimestampType)
case (TimestampType, DateType) => Cast(e, DateType)
// Implicit cast from/to string
case (StringType, DecimalType) => Cast(e, DecimalType.SYSTEM_DEFAULT)
case (StringType, target: NumericType) => Cast(e, target)
case (StringType, DateType) => Cast(e, DateType)
case (StringType, TimestampType) => Cast(e, TimestampType)
case (StringType, BinaryType) => Cast(e, BinaryType)
// Cast any atomic type to string.
case (any: AtomicType, StringType) if any != StringType => Cast(e, StringType)
// When we reach here, input type is not acceptable for any types in this type collection,
// try to find the first one we can implicitly cast.
case (_, TypeCollection(types)) => types.flatMap(implicitCast(e, _)).headOption.orNull
// Else, just return the same input expression
case _ => null
}
Option(ret)
}
}
}
|
ArvinDevel/onlineAggregationOnSparkV2
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/HiveTypeCoercion.scala
|
Scala
|
apache-2.0
| 35,414 |
trait Foo[T <: Bar[T]#Elem] // error: illegal cyclic reference
trait Bar[T] {
type Elem = T
}
trait Foo2[T <: Bar2[T]#Elem] // error: illegal cyclic reference
trait Bar2[T] {
type Elem = T
}
|
densh/dotty
|
tests/neg/i974.scala
|
Scala
|
bsd-3-clause
| 195 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.scheduler.rate
import org.apache.spark.SparkConf
import org.apache.spark.streaming.Duration
/**
* A component that estimates the rate at which an `InputDStream` should ingest
* records, based on updates at every batch completion.
*
* Please see `org.apache.spark.streaming.scheduler.RateController` for more details.
*/
private[streaming] trait RateEstimator extends Serializable {
/**
* Computes the number of records the stream attached to this `RateEstimator`
* should ingest per second, given an update on the size and completion
* times of the latest batch.
*
* @param time The timestamp of the current batch interval that just finished
* @param elements The number of records that were processed in this batch
* @param processingDelay The time in ms that took for the job to complete
* @param schedulingDelay The time in ms that the job spent in the scheduling queue
*/
def compute(
time: Long,
elements: Long,
processingDelay: Long,
schedulingDelay: Long): Option[Double]
}
object RateEstimator {
/**
* Return a new `RateEstimator` based on the value of
* `spark.streaming.backpressure.rateEstimator`.
*
* The only known and acceptable estimator right now is `pid`.
*
* @return An instance of RateEstimator
* @throws IllegalArgumentException if the configured RateEstimator is not `pid`.
*/
def create(conf: SparkConf, batchInterval: Duration): RateEstimator =
conf.get("spark.streaming.backpressure.rateEstimator", "pid") match {
case "pid" =>
val proportional = conf.getDouble("spark.streaming.backpressure.pid.proportional", 1.0)
val integral = conf.getDouble("spark.streaming.backpressure.pid.integral", 0.2)
val derived = conf.getDouble("spark.streaming.backpressure.pid.derived", 0.0)
val minRate = conf.getDouble("spark.streaming.backpressure.pid.minRate", 100)
new PIDRateEstimator(batchInterval.milliseconds, proportional, integral, derived, minRate)
case estimator =>
throw new IllegalArgumentException(s"Unknown rate estimator: $estimator")
}
}
|
saturday-shi/spark
|
streaming/src/main/scala/org/apache/spark/streaming/scheduler/rate/RateEstimator.scala
|
Scala
|
apache-2.0
| 2,962 |
/*
* Copyright 2001-2008 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.path
import org.scalatest._
import org.scalatest.path.{ FreeSpec => PathFreeSpec }
// elements
import org.scalatest.events._
class FreeSpecSpec extends org.scalatest.FunSpec with SharedHelpers with GivenWhenThen {
describe("A FreeSpec") {
describe("(when a nesting rule has been violated)") {
it("should, if they call a describe from within an it clause, result in a TestFailedException when running the test") {
class MySpec extends PathFreeSpec {
"should blow up" in {
"in the wrong place, at the wrong time" - {
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
it("should, if they call a describe with a nested it from within an it clause, result in a TestFailedException when running the test") {
class MySpec extends PathFreeSpec {
"should blow up" in {
"in the wrong place, at the wrong time" - {
"should never run" in {
assert(1 === 1)
}
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
it("should, if they call a nested it from within an it clause, result in a TestFailedException when running the test") {
class MySpec extends PathFreeSpec {
"should blow up" in {
"should never run" in {
assert(1 === 1)
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
it("should, if they call a nested it with tags from within an it clause, result in a TestFailedException when running the test") {
class MySpec extends PathFreeSpec {
"should blow up" in {
"should never run" taggedAs(mytags.SlowAsMolasses) in {
assert(1 === 1)
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
it("should, if they call a describe with a nested ignore from within an it clause, result in a TestFailedException when running the test") {
class MySpec extends PathFreeSpec {
"should blow up" in {
"in the wrong place, at the wrong time" - {
"should never run" ignore {
assert(1 === 1)
}
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
it("should, if they call a nested ignore from within an it clause, result in a TestFailedException when running the test") {
class MySpec extends PathFreeSpec {
"should blow up" in {
"should never run" ignore {
assert(1 === 1)
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
it("should, if they call a nested ignore with tags from within an it clause, result in a TestFailedException when running the test") {
class MySpec extends PathFreeSpec {
"should blow up" in {
"should never run" taggedAs(mytags.SlowAsMolasses) ignore {
assert(1 === 1)
}
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
}
it("should return the test names in registration order from testNames") {
class AFreeSpec extends PathFreeSpec {
"it should test this" in {}
"it should test that" in {}
override def newInstance = new AFreeSpec
}
val a = new AFreeSpec
expect(List("it should test this", "it should test that")) {
a.testNames.iterator.toList
}
class BFreeSpec extends PathFreeSpec
val b = new BFreeSpec
expect(List[String]()) {
b.testNames.iterator.toList
}
class CFreeSpec extends PathFreeSpec {
"it should test that" in {}
"it should test this" in {}
override def newInstance = new CFreeSpec
}
val c = new CFreeSpec
expect(List("it should test that", "it should test this")) {
c.testNames.iterator.toList
}
class DFreeSpec extends PathFreeSpec {
"A Tester" - {
"should test that" in {}
"should test this" in {}
}
override def newInstance = new DFreeSpec
}
val d = new DFreeSpec
expect(List("A Tester should test that", "A Tester should test this")) {
d.testNames.iterator.toList
}
class EFreeSpec extends PathFreeSpec {
"A Tester" - {
"should test this" in {}
"should test that" in {}
}
override def newInstance = new EFreeSpec
}
val e = new EFreeSpec
expect(List("A Tester should test this", "A Tester should test that")) {
e.testNames.iterator.toList
}
}
// TODO: put a better message in the instantation exception or probably wrap it in something that
// has a better message, explaining the probable solutoin is to add an "override def newInstance"
it("should throw DuplicateTestNameException if a duplicate test name registration is attempted") {
intercept[DuplicateTestNameException] {
class AFreeSpec extends PathFreeSpec {
"should test this" in {}
"should test this" in {}
override def newInstance = new AFreeSpec
}
(new AFreeSpec).tags // Must call a method to get it to attempt to register the second test
}
intercept[DuplicateTestNameException] {
class AFreeSpec extends PathFreeSpec {
"should test this" in {}
"should test this" ignore {}
override def newInstance = new AFreeSpec
}
(new AFreeSpec).tags
}
intercept[DuplicateTestNameException] {
class AFreeSpec extends PathFreeSpec {
"should test this" ignore {}
"should test this" ignore {}
override def newInstance = new AFreeSpec
}
(new AFreeSpec).tags
}
intercept[DuplicateTestNameException] {
class AFreeSpec extends PathFreeSpec {
"should test this" ignore {}
"should test this" in {}
override def newInstance = new AFreeSpec
}
(new AFreeSpec).tags
}
}
describe("(with info calls)") {
class InfoInsideTestSpec extends PathFreeSpec {
val msg = "hi there, dude"
val testName = "test name"
testName in {
info(msg)
}
}
// In a Spec, any InfoProvided's fired during the test should be cached and sent out after the test has
// suceeded or failed. This makes the report look nicer, because the info is tucked under the "specifier'
// text for that test.
it("should, when the info appears in the code of a successful test, report the info after the TestSucceeded") {
val spec = new InfoInsideTestSpec
val (infoProvidedIndex, testStartingIndex, testSucceededIndex) =
getIndexesForInformerEventOrderTests(spec, spec.testName, spec.msg)
assert(testSucceededIndex < infoProvidedIndex)
}
class InfoBeforeTestSpec extends PathFreeSpec {
val msg = "hi there, dude"
val testName = "test name"
info(msg)
testName in {}
}
it("should, when the info appears in the body before a test, report the info before the test") {
val spec = new InfoBeforeTestSpec
val (infoProvidedIndex, testStartingIndex, testSucceededIndex) =
getIndexesForInformerEventOrderTests(spec, spec.testName, spec.msg)
assert(infoProvidedIndex < testStartingIndex)
assert(testStartingIndex < testSucceededIndex)
}
it("should, when the info appears in the body after a test, report the info after the test runs") {
val msg = "hi there, dude"
val testName = "test name"
class MySpec extends PathFreeSpec {
testName in {}
info(msg)
}
val (infoProvidedIndex, testStartingIndex, testSucceededIndex) =
getIndexesForInformerEventOrderTests(new MySpec, testName, msg)
assert(testStartingIndex < testSucceededIndex)
assert(testSucceededIndex < infoProvidedIndex)
}
it("should throw an IllegalStateException when info is called by a method invoked after the suite has been executed") {
class MySpec extends PathFreeSpec {
callInfo() // This should work fine
def callInfo() {
info("howdy")
}
"howdy also" in {
callInfo() // This should work fine
}
override def newInstance = new MySpec
}
val spec = new MySpec
val myRep = new EventRecordingReporter
spec.run(None, myRep, new Stopper {}, Filter(), Map(), None, new Tracker)
intercept[IllegalStateException] {
spec.callInfo()
}
}
it("should send an InfoProvided with an IndentedText formatter with level 1 when called outside a test") {
val spec = new InfoBeforeTestSpec
val indentedText = getIndentedTextFromInfoProvided(spec)
assert(indentedText === IndentedText("+ " + spec.msg, spec.msg, 0))
}
it("should send an InfoProvided with an IndentedText formatter with level 2 when called within a test") {
val spec = new InfoInsideTestSpec
val indentedText = getIndentedTextFromInfoProvided(spec)
assert(indentedText === IndentedText(" + " + spec.msg, spec.msg, 1))
}
}
it("should throw NullPointerException if a null test tag is provided") {
// it
intercept[NullPointerException] {
new PathFreeSpec {
"hi" taggedAs(null) in {}
}
}
val caught = intercept[NullPointerException] {
new PathFreeSpec {
"hi" taggedAs(mytags.SlowAsMolasses, null) in {}
}
}
assert(caught.getMessage === "a test tag was null")
intercept[NullPointerException] {
new PathFreeSpec {
"hi" taggedAs(mytags.SlowAsMolasses, null, mytags.WeakAsAKitten) in {}
}
}
// ignore
intercept[NullPointerException] {
new PathFreeSpec {
"hi" taggedAs(null) ignore {}
}
}
val caught2 = intercept[NullPointerException] {
new PathFreeSpec {
"hi" taggedAs(mytags.SlowAsMolasses, null) ignore {}
}
}
assert(caught2.getMessage === "a test tag was null")
intercept[NullPointerException] {
new PathFreeSpec {
"hi" taggedAs(mytags.SlowAsMolasses, null, mytags.WeakAsAKitten) ignore {}
}
}
}
it("should return a correct tags map from the tags method using is (pending)") {
class AFreeSpec extends PathFreeSpec {
"test this" ignore {}
"test that" is (pending)
override def newInstance = new AFreeSpec
}
val a = new AFreeSpec
expect(Map("test this" -> Set("org.scalatest.Ignore"))) {
a.tags
}
class BFreeSpec extends PathFreeSpec {
"test this" is (pending)
"test that" ignore {}
override def newInstance = new BFreeSpec
}
val b = new BFreeSpec
expect(Map("test that" -> Set("org.scalatest.Ignore"))) {
b.tags
}
class CFreeSpec extends PathFreeSpec {
"test this" ignore {}
"test that" ignore {}
override def newInstance = new CFreeSpec
}
val c = new CFreeSpec
expect(Map("test this" -> Set("org.scalatest.Ignore"), "test that" -> Set("org.scalatest.Ignore"))) {
c.tags
}
class DFreeSpec extends PathFreeSpec {
"test this" taggedAs(mytags.SlowAsMolasses) is (pending)
"test that" taggedAs(mytags.SlowAsMolasses) ignore {}
override def newInstance = new DFreeSpec
}
val d = new DFreeSpec
expect(Map("test this" -> Set("org.scalatest.SlowAsMolasses"), "test that" -> Set("org.scalatest.Ignore", "org.scalatest.SlowAsMolasses"))) {
d.tags
}
class EFreeSpec extends PathFreeSpec {
"test this" is (pending)
"test that" is (pending)
override def newInstance = new EFreeSpec
}
val e = new EFreeSpec
expect(Map()) {
e.tags
}
class FFreeSpec extends PathFreeSpec {
"test this" taggedAs(mytags.SlowAsMolasses, mytags.WeakAsAKitten) is (pending)
"test that" taggedAs(mytags.SlowAsMolasses) in {}
override def newInstance = new FFreeSpec
}
val f = new FFreeSpec
expect(Map("test this" -> Set("org.scalatest.SlowAsMolasses", "org.scalatest.WeakAsAKitten"), "test that" -> Set("org.scalatest.SlowAsMolasses"))) {
f.tags
}
class GFreeSpec extends PathFreeSpec {
"test this" taggedAs(mytags.SlowAsMolasses, mytags.WeakAsAKitten) is (pending)
"test that" taggedAs(mytags.SlowAsMolasses) in {}
override def newInstance = new GFreeSpec
}
val g = new GFreeSpec
expect(Map("test this" -> Set("org.scalatest.SlowAsMolasses", "org.scalatest.WeakAsAKitten"), "test that" -> Set("org.scalatest.SlowAsMolasses"))) {
g.tags
}
}
case class TestWasCalledCounts(var theTestThisCalled: Boolean, var theTestThatCalled: Boolean)
class TestWasCalledSuite(val counts: TestWasCalledCounts) extends PathFreeSpec {
"run this" in { counts.theTestThisCalled = true }
"run that, maybe" in { counts.theTestThatCalled = true }
override def newInstance = new TestWasCalledSuite(counts)
}
it("should execute all tests when run is called with testName None") {
val b = new TestWasCalledSuite(TestWasCalledCounts(false, false))
b.run(None, SilentReporter, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(b.counts.theTestThisCalled)
assert(b.counts.theTestThatCalled)
}
it("should execute one test when run is called with a defined testName") {
val a = new TestWasCalledSuite(TestWasCalledCounts(false, false))
val rep = new EventRecordingReporter
a.run(Some("run this"), rep, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(a.counts.theTestThisCalled)
assert(a.counts.theTestThatCalled)
rep.testSucceededEventsReceived
// val tse = rep.testSucceededEventsReceived
// assert(tse.size === 1)
// 99
val tse = rep.testSucceededEventsReceived
assert(tse.size === 1)
assert(tse(0).testName === "run this")
val tfe = rep.testFailedEventsReceived
assert(tfe.size === 0)
val tste = rep.testStartingEventsReceived
assert(tste.size === 1)
}
it("should report as ignored, and not run, tests marked ignored") {
class AFreeSpec(val counts: TestWasCalledCounts) extends PathFreeSpec {
"test this" in { counts.theTestThisCalled = true }
"test that" in { counts.theTestThatCalled = true }
override def newInstance = new AFreeSpec(counts)
}
val a = new AFreeSpec(TestWasCalledCounts(false, false))
val repA = new TestIgnoredTrackingReporter
a.run(None, repA, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(!repA.testIgnoredReceived)
assert(a.counts.theTestThisCalled)
assert(a.counts.theTestThatCalled)
class BFreeSpec(val counts: TestWasCalledCounts) extends PathFreeSpec {
"test this" ignore { counts.theTestThisCalled = true }
"test that" in { counts.theTestThatCalled = true }
override def newInstance = new BFreeSpec(counts)
}
val b = new BFreeSpec(TestWasCalledCounts(false, false))
val repB = new TestIgnoredTrackingReporter
b.run(None, repB, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(repB.testIgnoredReceived)
assert(repB.lastEvent.isDefined)
assert(repB.lastEvent.get.testName endsWith "test this")
assert(!b.counts.theTestThisCalled)
assert(b.counts.theTestThatCalled)
class CFreeSpec(val counts: TestWasCalledCounts) extends PathFreeSpec {
"test this" in { counts.theTestThisCalled = true }
"test that" ignore { counts.theTestThatCalled = true }
override def newInstance = new CFreeSpec(counts)
}
val c = new CFreeSpec(TestWasCalledCounts(false, false))
val repC = new TestIgnoredTrackingReporter
c.run(None, repC, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(repC.testIgnoredReceived)
assert(repC.lastEvent.isDefined)
assert(repC.lastEvent.get.testName endsWith "test that", repC.lastEvent.get.testName)
assert(c.counts.theTestThisCalled)
assert(!c.counts.theTestThatCalled)
// The order I want is order of appearance in the file.
// Will try and implement that tomorrow. Subtypes will be able to change the order.
class DFreeSpec(val counts: TestWasCalledCounts) extends PathFreeSpec {
"test this" ignore { counts.theTestThisCalled = true }
"test that" ignore { counts.theTestThatCalled = true }
override def newInstance = new DFreeSpec(counts)
}
val d = new DFreeSpec(TestWasCalledCounts(false, false))
val repD = new TestIgnoredTrackingReporter
d.run(None, repD, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(repD.testIgnoredReceived)
assert(repD.lastEvent.isDefined)
assert(repD.lastEvent.get.testName endsWith "test that") // last because should be in order of appearance
assert(!d.counts.theTestThisCalled)
assert(!d.counts.theTestThatCalled)
}
it("should ignore a test marked as ignored if run is invoked with that testName") {
// If I provide a specific testName to run, then it should ignore an Ignore on that test
// method and actually invoke it.
class EFreeSpec extends PathFreeSpec {
var theTestThisCalled = false
var theTestThatCalled = false
"test this" ignore { theTestThisCalled = true }
"test that" in { theTestThatCalled = true }
override def newInstance = new EFreeSpec
}
val e = new EFreeSpec
val repE = new TestIgnoredTrackingReporter
e.run(Some("test this"), repE, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(repE.testIgnoredReceived)
assert(!e.theTestThisCalled)
assert(!e.theTestThatCalled)
}
it("should run only those tests selected by the tags to include and exclude sets") {
// Nothing is excluded
class AFreeSpec(val counts: TestWasCalledCounts) extends PathFreeSpec {
"test this" taggedAs(mytags.SlowAsMolasses) in { counts.theTestThisCalled = true }
"test that" in { counts.theTestThatCalled = true }
override def newInstance = new AFreeSpec(counts)
}
val a = new AFreeSpec(TestWasCalledCounts(false, false))
val repA = new TestIgnoredTrackingReporter
a.run(None, repA, new Stopper {}, Filter(), Map(), None, new Tracker)
assert(!repA.testIgnoredReceived)
assert(a.counts.theTestThisCalled)
assert(a.counts.theTestThatCalled)
// SlowAsMolasses is included, one test should be excluded
class BFreeSpec(val counts: TestWasCalledCounts) extends PathFreeSpec {
"test this" taggedAs(mytags.SlowAsMolasses) in { counts.theTestThisCalled = true }
"test that" in { counts.theTestThatCalled = true }
override def newInstance = new BFreeSpec(counts)
}
val b = new BFreeSpec(TestWasCalledCounts(false, false))
val repB = new EventRecordingReporter
b.run(None, repB, new Stopper {}, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set()), Map(), None, new Tracker)
assert(repB.testIgnoredEventsReceived.isEmpty)
assert(b.counts.theTestThisCalled)
assert(b.counts.theTestThatCalled)
assert(repB.testStartingEventsReceived.size === 1)
assert(repB.testStartingEventsReceived(0).testName == "test this")
// SlowAsMolasses is included, and both tests should be included
class CFreeSpec(val counts: TestWasCalledCounts) extends PathFreeSpec {
"test this" taggedAs(mytags.SlowAsMolasses) in { counts.theTestThisCalled = true }
"test that" taggedAs(mytags.SlowAsMolasses) in { counts.theTestThatCalled = true }
override def newInstance = new CFreeSpec(counts)
}
val c = new CFreeSpec(TestWasCalledCounts(false, false))
val repC = new EventRecordingReporter
c.run(None, repC, new Stopper {}, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set()), Map(), None, new Tracker)
assert(repC.testIgnoredEventsReceived.isEmpty)
assert(c.counts.theTestThisCalled)
assert(c.counts.theTestThatCalled)
assert(repC.testStartingEventsReceived.size === 2)
// SlowAsMolasses is included. both tests should be included but one ignored
class DFreeSpec(val counts: TestWasCalledCounts) extends PathFreeSpec {
"test this" taggedAs(mytags.SlowAsMolasses) ignore { counts.theTestThisCalled = true }
"test that" taggedAs(mytags.SlowAsMolasses) in { counts.theTestThatCalled = true }
override def newInstance = new DFreeSpec(counts)
}
val d = new DFreeSpec(TestWasCalledCounts(false, false))
val repD = new EventRecordingReporter
d.run(None, repD, new Stopper {}, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.Ignore")), Map(), None, new Tracker)
assert(repD.testIgnoredEventsReceived.size === 1)
assert(!d.counts.theTestThisCalled)
assert(d.counts.theTestThatCalled)
assert(repD.testStartingEventsReceived.size === 1)
assert(repD.testStartingEventsReceived(0).testName === "test that")
case class ThreeCounts(var theTestThisCalled: Boolean, var theTestThatCalled: Boolean, var theTestTheOtherCalled: Boolean)
// SlowAsMolasses included, FastAsLight excluded
class EFreeSpec(val counts: ThreeCounts) extends PathFreeSpec {
"test this" taggedAs(mytags.SlowAsMolasses, mytags.FastAsLight) in { counts.theTestThisCalled = true }
"test that" taggedAs(mytags.SlowAsMolasses) in { counts.theTestThatCalled = true }
"test the other" in { counts.theTestTheOtherCalled = true }
override def newInstance = new EFreeSpec(counts)
}
val e = new EFreeSpec(ThreeCounts(false, false, false))
val repE = new EventRecordingReporter
e.run(None, repE, new Stopper {}, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight")),
Map(), None, new Tracker)
assert(repE.testIgnoredEventsReceived.isEmpty)
assert(e.counts.theTestThisCalled)
assert(e.counts.theTestThatCalled)
assert(e.counts.theTestTheOtherCalled)
assert(repE.testStartingEventsReceived.size === 1)
assert(repE.testStartingEventsReceived(0).testName === "test that")
// An Ignored test that was both included and excluded should not generate a TestIgnored event
class FFreeSpec(val counts: ThreeCounts) extends PathFreeSpec {
"test this" taggedAs(mytags.SlowAsMolasses, mytags.FastAsLight) ignore { counts.theTestThisCalled = true }
"test that" taggedAs(mytags.SlowAsMolasses) in { counts.theTestThatCalled = true }
"test the other" in { counts.theTestTheOtherCalled = true }
override def newInstance = new FFreeSpec(counts)
}
val f = new FFreeSpec(ThreeCounts(false, false, false))
val repF = new EventRecordingReporter
f.run(None, repF, new Stopper {}, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight")),
Map(), None, new Tracker)
assert(repE.testIgnoredEventsReceived.isEmpty)
assert(!f.counts.theTestThisCalled)
assert(f.counts.theTestThatCalled)
assert(f.counts.theTestTheOtherCalled)
assert(repE.testStartingEventsReceived.size === 1)
assert(repE.testStartingEventsReceived(0).testName === "test that")
// An Ignored test that was not included should not generate a TestIgnored event
class GFreeSpec(val counts: ThreeCounts) extends PathFreeSpec {
"test this" taggedAs(mytags.SlowAsMolasses, mytags.FastAsLight) in { counts.theTestThisCalled = true }
"test that" taggedAs(mytags.SlowAsMolasses) in { counts.theTestThatCalled = true }
"test the other" ignore { counts.theTestTheOtherCalled = true }
override def newInstance = new GFreeSpec(counts)
}
val g = new GFreeSpec(ThreeCounts(false, false, false))
val repG = new EventRecordingReporter
g.run(None, repG, new Stopper {}, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight")),
Map(), None, new Tracker)
assert(repG.testIgnoredEventsReceived.isEmpty)
assert(g.counts.theTestThisCalled)
assert(g.counts.theTestThatCalled)
assert(!g.counts.theTestTheOtherCalled)
assert(repG.testStartingEventsReceived.size === 1)
assert(repG.testStartingEventsReceived(0).testName === "test that")
// No tagsToInclude set, FastAsLight excluded
class HFreeSpec(val counts: ThreeCounts) extends PathFreeSpec {
"test this" taggedAs(mytags.SlowAsMolasses, mytags.FastAsLight) in { counts.theTestThisCalled = true }
"test that" taggedAs(mytags.SlowAsMolasses) in { counts.theTestThatCalled = true }
"test the other" in { counts.theTestTheOtherCalled = true }
override def newInstance = new HFreeSpec(counts)
}
val h = new HFreeSpec(ThreeCounts(false, false, false))
val repH = new EventRecordingReporter
h.run(None, repH, new Stopper {}, Filter(None, Set("org.scalatest.FastAsLight")), Map(), None, new Tracker)
assert(repH.testIgnoredEventsReceived.isEmpty)
assert(h.counts.theTestThisCalled)
assert(h.counts.theTestThatCalled)
assert(h.counts.theTestTheOtherCalled)
assert(repH.testStartingEventsReceived.size === 2)
assert(repH.testStartingEventsReceived.exists(_.testName == "test that"))
assert(repH.testStartingEventsReceived.exists(_.testName == "test the other"))
// No tagsToInclude set, mytags.SlowAsMolasses excluded
class IFreeSpec(val counts: ThreeCounts) extends PathFreeSpec {
"test this" taggedAs(mytags.SlowAsMolasses, mytags.FastAsLight) in { counts.theTestThisCalled = true }
"test that" taggedAs(mytags.SlowAsMolasses) in { counts.theTestThatCalled = true }
"test the other" in { counts.theTestTheOtherCalled = true }
override def newInstance = new IFreeSpec(counts)
}
val i = new IFreeSpec(ThreeCounts(false, false, false))
val repI = new EventRecordingReporter
i.run(None, repI, new Stopper {}, Filter(None, Set("org.scalatest.SlowAsMolasses")), Map(), None, new Tracker)
assert(repI.testIgnoredEventsReceived.isEmpty)
assert(i.counts.theTestThisCalled)
assert(i.counts.theTestThatCalled)
assert(i.counts.theTestTheOtherCalled)
assert(repI.testStartingEventsReceived.size === 1)
assert(repI.testStartingEventsReceived(0).testName === "test the other")
// No tagsToInclude set, mytags.SlowAsMolasses excluded, TestIgnored should not be received on excluded ones
class JFreeSpec(val counts: ThreeCounts) extends PathFreeSpec {
"test this" taggedAs(mytags.SlowAsMolasses, mytags.FastAsLight) ignore { counts.theTestThisCalled = true }
"test that" taggedAs(mytags.SlowAsMolasses) ignore { counts.theTestThatCalled = true }
"test the other" in { counts.theTestTheOtherCalled = true }
override def newInstance = new JFreeSpec(counts)
}
val j = new JFreeSpec(ThreeCounts(false, false, false))
val repJ = new TestIgnoredTrackingReporter
j.run(None, repJ, new Stopper {}, Filter(None, Set("org.scalatest.SlowAsMolasses")), Map(), None, new Tracker)
assert(!repJ.testIgnoredReceived)
assert(!j.counts.theTestThisCalled)
assert(!j.counts.theTestThatCalled)
assert(j.counts.theTestTheOtherCalled)
// Same as previous, except Ignore specifically mentioned in excludes set
class KFreeSpec(val counts: ThreeCounts) extends PathFreeSpec {
"test this" taggedAs(mytags.SlowAsMolasses, mytags.FastAsLight) ignore { counts.theTestThisCalled = true }
"test that" taggedAs(mytags.SlowAsMolasses) ignore { counts.theTestThatCalled = true }
"test the other" ignore { counts.theTestTheOtherCalled = true }
override def newInstance = new KFreeSpec(counts)
}
val k = new KFreeSpec(ThreeCounts(false, false, false))
val repK = new TestIgnoredTrackingReporter
k.run(None, repK, new Stopper {}, Filter(None, Set("org.scalatest.SlowAsMolasses", "org.scalatest.Ignore")), Map(), None, new Tracker)
assert(repK.testIgnoredReceived)
assert(!k.counts.theTestThisCalled)
assert(!k.counts.theTestThatCalled)
assert(!k.counts.theTestTheOtherCalled)
}
it("should return the correct test count from its expectedTestCount method") {
class AFreeSpec extends PathFreeSpec {
"test this" in {}
"test that" in {}
override def newInstance = new AFreeSpec
}
val a = new AFreeSpec
assert(a.expectedTestCount(Filter()) === 2)
class BFreeSpec extends PathFreeSpec {
"test this" ignore {}
"test that" in {}
override def newInstance = new BFreeSpec
}
val b = new BFreeSpec
assert(b.expectedTestCount(Filter()) === 1)
class CFreeSpec extends PathFreeSpec {
"test this" taggedAs(mytags.FastAsLight) in {}
"test that" in {}
override def newInstance = new CFreeSpec
}
val c = new CFreeSpec
assert(c.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 1)
assert(c.expectedTestCount(Filter(None, Set("org.scalatest.FastAsLight"))) === 1)
class DFreeSpec extends PathFreeSpec {
"test this" taggedAs(mytags.FastAsLight, mytags.SlowAsMolasses) in {}
"test that" taggedAs(mytags.SlowAsMolasses) in {}
"test the other thing" in {}
override def newInstance = new DFreeSpec
}
val d = new DFreeSpec
assert(d.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 1)
assert(d.expectedTestCount(Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight"))) === 1)
assert(d.expectedTestCount(Filter(None, Set("org.scalatest.SlowAsMolasses"))) === 1)
assert(d.expectedTestCount(Filter()) === 3)
class EFreeSpec extends PathFreeSpec {
"test this" taggedAs(mytags.FastAsLight, mytags.SlowAsMolasses) in {}
"test that" taggedAs(mytags.SlowAsMolasses) in {}
"test the other thing" ignore {}
override def newInstance = new EFreeSpec
}
val e = new EFreeSpec
assert(e.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 1)
assert(e.expectedTestCount(Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight"))) === 1)
assert(e.expectedTestCount(Filter(None, Set("org.scalatest.SlowAsMolasses"))) === 0)
assert(e.expectedTestCount(Filter()) === 2)
val f = new Suites(a, b, c, d, e)
assert(f.expectedTestCount(Filter()) === 10)
}
it("should generate a TestPending message when the test body is (pending)") {
class AFreeSpec extends PathFreeSpec {
"should do this" is (pending)
"should do that" in {
assert(2 + 2 === 4)
}
"should do something else" in {
assert(2 + 2 === 4)
pending
}
override def newInstance = new AFreeSpec
}
val a = new AFreeSpec
val rep = new EventRecordingReporter
a.run(None, rep, new Stopper {}, Filter(), Map(), None, new Tracker())
val tp = rep.testPendingEventsReceived
assert(tp.size === 2)
}
it("should generate a test failure if a Throwable, or an Error other than direct Error subtypes " +
"known in JDK 1.5, excluding AssertionError") {
class AFreeSpec extends PathFreeSpec {
"This FreeSpec" - {
"should throw AssertionError" in { throw new AssertionError }
"should throw plain old Error" in { throw new Error }
"should throw Throwable" in { throw new Throwable }
}
override def newInstance = new AFreeSpec
}
val a = new AFreeSpec
val rep = new EventRecordingReporter
a.run(None, rep, new Stopper {}, Filter(), Map(), None, new Tracker())
val tf = rep.testFailedEventsReceived
assert(tf.size === 3)
}
it("should propagate out Errors that are direct subtypes of Error in JDK 1.5, other than " +
"AssertionError, causing Suites and Runs to abort.") {
class AFreeSpec extends PathFreeSpec {
"This FreeSpec" - {
"should throw AssertionError" in { throw new OutOfMemoryError }
}
override def newInstance = new AFreeSpec
}
// val a = new AFreeSpec
intercept[OutOfMemoryError] {
new AFreeSpec
// a.run(None, SilentReporter, new Stopper {}, Filter(), Map(), None, new Tracker())
}
}
it("should send InfoProvided events with aboutAPendingTest set to true for info " +
"calls made from a test that is pending") {
class AFreeSpec extends PathFreeSpec with GivenWhenThen {
"A FreeSpec" - {
"should do something" in {
given("two integers")
when("one is subracted from the other")
then("the result is the difference between the two numbers")
pending
}
}
override def newInstance = new AFreeSpec
}
val a = new AFreeSpec
val rep = new EventRecordingReporter
a.run(None, rep, new Stopper {}, Filter(), Map(), None, new Tracker())
val ip = rep.infoProvidedEventsReceived
val so = rep.scopeOpenedEventsReceived
val sc = rep.scopeClosedEventsReceived
assert(ip.size === 3)
assert(so.size === 1)
assert(sc.size === 1)
for (event <- ip) {
assert(event.message == "A FreeSpec" || event.aboutAPendingTest.isDefined && event.aboutAPendingTest.get)
}
}
it("should send InfoProvided events with aboutAPendingTest set to false for info " +
"calls made from a test that is not pending") {
class AFreeSpec extends PathFreeSpec with GivenWhenThen {
"A FreeSpec" - {
"should do something" in {
given("two integers")
when("one is subracted from the other")
then("the result is the difference between the two numbers")
assert(1 + 1 === 2)
}
}
override def newInstance = new AFreeSpec
}
val a = new AFreeSpec
val rep = new EventRecordingReporter
a.run(None, rep, new Stopper {}, Filter(), Map(), None, new Tracker())
val ip = rep.infoProvidedEventsReceived
val so = rep.scopeOpenedEventsReceived
val sc = rep.scopeClosedEventsReceived
assert(ip.size === 3)
assert(so.size === 1)
assert(sc.size === 1)
for (event <- ip) {
assert(event.message == "A FreeSpec" || event.aboutAPendingTest.isDefined && !event.aboutAPendingTest.get)
}
}
it("should not put parentheses around should clauses that follow when") {
class AFreeSpec extends PathFreeSpec {
"A Stack" - {
"when empty" - {
"should chill out" in {
assert(1 + 1 === 2)
}
}
}
override def newInstance = new AFreeSpec
}
val a = new AFreeSpec
val rep = new EventRecordingReporter
a.run(None, rep, new Stopper {}, Filter(), Map(), None, new Tracker())
val ts = rep.testSucceededEventsReceived
assert(ts.size === 1)
assert(ts.head.testName === "A Stack when empty should chill out")
}
it("should not put parentheses around should clauses that don't follow when") {
class AFreeSpec extends PathFreeSpec {
"A Stack" - {
"should chill out" in {
assert(1 + 1 === 2)
}
}
override def newInstance = new AFreeSpec
}
val a = new AFreeSpec
val rep = new EventRecordingReporter
a.run(None, rep, new Stopper {}, Filter(), Map(), None, new Tracker())
val ts = rep.testSucceededEventsReceived
assert(ts.size === 1)
assert(ts.head.testName === "A Stack should chill out")
}
it ("should report the duration of the actuall running of the test, not the replaying of the test") {
class AFreeSpec extends PathFreeSpec {
"A Stack" - {
"should chill out" in {
Thread.sleep(100)
}
}
override def newInstance = new AFreeSpec
}
val a = new AFreeSpec
val rep = new EventRecordingReporter
a.run(None, rep, new Stopper {}, Filter(), Map(), None, new Tracker())
val ts = rep.testSucceededEventsReceived
assert(ts.size === 1)
import OptionValues._
val dura = ts.head.duration.value
assert(dura > 80, "duration was: " + dura)
}
}
}
|
yyuu/scalatest
|
src/test/scala/org/scalatest/path/FreeSpecSpec.scala
|
Scala
|
apache-2.0
| 38,190 |
package kvstore
import akka.testkit.TestKit
import akka.testkit.ImplicitSender
import org.scalatest.BeforeAndAfterAll
import org.scalatest.matchers.ShouldMatchers
import org.scalatest.FunSuite
import akka.actor.ActorSystem
import scala.concurrent.duration._
import akka.testkit.TestProbe
import Arbiter._
import Persistence._
import Replicator._
class Step5_PrimaryPersistenceSpec extends TestKit(ActorSystem("Step5PrimaryPersistenceSpec"))
with FunSuite
with BeforeAndAfterAll
with ShouldMatchers
with ImplicitSender
with Tools {
override def afterAll(): Unit = {
system.shutdown()
}
test("case1: Primary does not acknowledge updates which have not been persisted") {
val arbiter = TestProbe()
val persistence = TestProbe()
val primary = system.actorOf(Replica.props(arbiter.ref, probeProps(persistence)), "case1-primary")
val client = session(primary)
arbiter.expectMsg(Join)
arbiter.send(primary, JoinedPrimary)
val setId = client.set("foo", "bar")
val persistId = persistence.expectMsgPF() {
case Persist("foo", Some("bar"), id) => id
}
client.nothingHappens(100.milliseconds)
persistence.reply(Persisted("foo", persistId))
client.waitAck(setId)
}
test("case2: Primary retries persistence every 100 milliseconds") {
val arbiter = TestProbe()
val persistence = TestProbe()
val primary = system.actorOf(Replica.props(arbiter.ref, probeProps(persistence)), "case2-primary")
val client = session(primary)
arbiter.expectMsg(Join)
arbiter.send(primary, JoinedPrimary)
val setId = client.set("foo", "bar")
val persistId = persistence.expectMsgPF() {
case Persist("foo", Some("bar"), id) => id
}
// Retries form above
persistence.expectMsg(200.milliseconds, Persist("foo", Some("bar"), persistId))
persistence.expectMsg(200.milliseconds, Persist("foo", Some("bar"), persistId))
client.nothingHappens(100.milliseconds)
persistence.reply(Persisted("foo", persistId))
client.waitAck(setId)
}
test("case3: Primary generates failure after 1 second if persistence fails") {
val arbiter = TestProbe()
val persistence = TestProbe()
val primary = system.actorOf(Replica.props(arbiter.ref, probeProps(persistence)), "case3-primary")
val client = session(primary)
arbiter.expectMsg(Join)
arbiter.send(primary, JoinedPrimary)
val setId = client.set("foo", "bar")
persistence.expectMsgType[Persist]
client.nothingHappens(800.milliseconds) // Should not fail too early
client.waitFailed(setId)
}
test("case4: Primary generates failure after 1 second if global acknowledgement fails") {
val arbiter = TestProbe()
val persistence = TestProbe()
val primary = system.actorOf(Replica.props(arbiter.ref, Persistence.props(flaky = false)), "case4-primary")
val secondary = TestProbe()
val client = session(primary)
arbiter.expectMsg(Join)
arbiter.send(primary, JoinedPrimary)
arbiter.send(primary, Replicas(Set(primary, secondary.ref)))
val setId = client.set("foo", "bar")
secondary.expectMsgType[Snapshot]
client.nothingHappens(800.milliseconds) // Should not fail too early
client.waitFailed(setId)
}
test("case5: Primary acknowledges only after persistence and global acknowledgement") {
val arbiter = TestProbe()
val persistence = TestProbe()
val primary = system.actorOf(Replica.props(arbiter.ref, Persistence.props(flaky = false)), "case5-primary")
val secondaryA, secondaryB = TestProbe()
val client = session(primary)
arbiter.expectMsg(Join)
arbiter.send(primary, JoinedPrimary)
arbiter.send(primary, Replicas(Set(primary, secondaryA.ref, secondaryB.ref)))
val setId = client.set("foo", "bar")
val seqA = secondaryA.expectMsgType[Snapshot].seq
val seqB = secondaryB.expectMsgType[Snapshot].seq
client.nothingHappens(300.milliseconds)
secondaryA.reply(SnapshotAck("foo", seqA))
client.nothingHappens(300.milliseconds)
secondaryB.reply(SnapshotAck("foo", seqB))
client.waitAck(setId)
}
}
|
gempesaw/reactive-coursera
|
kvstore/src/test/scala/kvstore/Step5_PrimaryPersistenceSpec.scala
|
Scala
|
mit
| 4,101 |
package org.http4s
import java.net.URLEncoder
import org.http4s.Uri.{apply => _, unapply => _, Fragment => _, Path => _, _}
import org.http4s.UriTemplate._
import org.http4s.util.StringWriter
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.util.{Failure, Success, Try}
/**
* Simple representation of a URI Template that can be rendered as RFC6570
* conform string.
*
* This model reflects only a subset of RFC6570.
*
* Level 1 and Level 2 are completely modeled and
* Level 3 features are limited to:
* - Path segments, slash-prefixed
* - Form-style query, ampersand-separated
* - Fragment expansion
*/
final case class UriTemplate(
scheme: Option[Scheme] = None,
authority: Option[Authority] = None,
path: Path = Nil,
query: UriTemplate.Query = Nil,
fragment: Fragment = Nil) {
/**
* Replaces any expansion type that matches the given `name`. If no matching
* `expansion` could be found the same instance will be returned.
*/
def expandAny[T: QueryParamEncoder](name: String, value: T): UriTemplate =
expandPath(name, value).expandQuery(name, value).expandFragment(name, value)
/**
* Replaces any expansion type in `fragment` that matches the given `name`.
* If no matching `expansion` could be found the same instance will be
* returned.
*/
def expandFragment[T: QueryParamEncoder](name: String, value: T): UriTemplate =
if (fragment.isEmpty) this
else copy(fragment = expandFragmentN(fragment, name, String.valueOf(value)))
/**
* Replaces any expansion type in `path` that matches the given `name`. If no
* matching `expansion` could be found the same instance will be returned.
*/
def expandPath[T: QueryParamEncoder](name: String, values: List[T]): UriTemplate =
copy(path = expandPathN(path, name, values.map(QueryParamEncoder[T].encode)))
/**
* Replaces any expansion type in `path` that matches the given `name`. If no
* matching `expansion` could be found the same instance will be returned.
*/
def expandPath[T: QueryParamEncoder](name: String, value: T): UriTemplate =
copy(path = expandPathN(path, name, QueryParamEncoder[T].encode(value) :: Nil))
/**
* Replaces any expansion type in `query` that matches the specified `name`.
* If no matching `expansion` could be found the same instance will be
* returned.
*/
def expandQuery[T: QueryParamEncoder](name: String, values: List[T]): UriTemplate =
if (query.isEmpty) this
else copy(query = expandQueryN(query, name, values.map(QueryParamEncoder[T].encode(_).value)))
/**
* Replaces any expansion type in `query` that matches the specified `name`.
* If no matching `expansion` could be found the same instance will be
* returned.
*/
def expandQuery(name: String): UriTemplate = expandQuery(name, List[String]())
/**
* Replaces any expansion type in `query` that matches the specified `name`.
* If no matching `expansion` could be found the same instance will be
* returned.
*/
def expandQuery[T: QueryParamEncoder](name: String, values: T*): UriTemplate =
expandQuery(name, values.toList)
override lazy val toString =
renderUriTemplate(this)
/**
* If no expansion is available an `Uri` will be created otherwise the
* current instance of `UriTemplate` will be returned.
*/
def toUriIfPossible: Try[Uri] =
if (containsExpansions(this))
Failure(
new IllegalStateException(s"all expansions must be resolved to be convertable: $this"))
else Success(toUri(this))
}
object UriTemplate {
type Path = List[PathDef]
type Query = List[QueryDef]
type Fragment = List[FragmentDef]
protected val unreserved =
(('a' to 'z') ++ ('A' to 'Z') ++ ('0' to '9') :+ '-' :+ '.' :+ '_' :+ '~').toSet
// protected val genDelims = ':' :: '/' :: '?' :: '#' :: '[' :: ']' :: '@' :: Nil
// protected val subDelims = '!' :: '$' :: '&' :: '\\'' :: '(' :: ')' :: '*' :: '+' :: ',' :: ';' :: '=' :: Nil
// protected val reserved = genDelims ::: subDelims
def isUnreserved(s: String): Boolean = s.forall(unreserved.contains)
def isUnreservedOrEncoded(s: String): Boolean =
URLEncoder.encode(s, "UTF-8").forall(c => unreserved.contains(c) || c == '%')
protected def expandPathN(path: Path, name: String, values: List[QueryParameterValue]): Path = {
val acc = new ArrayBuffer[PathDef]()
def appendValues() = values.foreach { v =>
acc.append(PathElm(v.value))
}
path.foreach {
case p @ PathElm(_) => acc.append(p)
case p @ VarExp(Seq(n)) =>
if (n == name) appendValues()
else acc.append(p)
case p @ VarExp(ns) =>
if (ns.contains(name)) {
appendValues()
acc.append(VarExp(ns.filterNot(_ == name)))
} else acc.append(p)
case p @ ReservedExp(Seq(n)) =>
if (n == name) appendValues()
else acc.append(p)
case p @ ReservedExp(ns) =>
if (ns.contains(name)) {
appendValues()
acc.append(VarExp(ns.filterNot(_ == name)))
} else acc.append(p)
case p @ PathExp(Seq(n)) =>
if (n == name) appendValues()
else acc.append(p)
case p @ PathExp(ns) =>
if (ns.contains(name)) {
appendValues()
acc.append(PathExp(ns.filterNot(_ == name)))
} else acc.append(p)
}
acc.toList
}
protected def expandQueryN(query: Query, name: String, values: List[String]): Query = {
val acc = new ArrayBuffer[QueryDef]()
query.foreach {
case p @ ParamElm(_, _) => acc.append(p)
case p @ ParamVarExp(r, List(n)) =>
if (n == name) acc.append(ParamElm(r, values))
else acc.append(p)
case p @ ParamVarExp(r, ns) =>
if (ns.contains(name)) {
acc.append(ParamElm(r, values))
acc.append(ParamVarExp(r, ns.filterNot(_ == name)))
} else acc.append(p)
case p @ ParamReservedExp(r, List(n)) =>
if (n == name) acc.append(ParamElm(r, values))
else acc.append(p)
case p @ ParamReservedExp(r, ns) =>
if (ns.contains(name)) {
acc.append(ParamElm(r, values))
acc.append(ParamReservedExp(r, ns.filterNot(_ == name)))
} else acc.append(p)
case p @ ParamExp(Seq(n)) =>
if (n == name) acc.append(ParamElm(name, values))
else acc.append(p)
case p @ ParamExp(ns) =>
if (ns.contains(name)) {
acc.append(ParamElm(name, values))
acc.append(ParamExp(ns.filterNot(_ == name)))
} else acc.append(p)
case p @ ParamContExp(Seq(n)) =>
if (n == name) acc.append(ParamElm(name, values))
else acc.append(p)
case p @ ParamContExp(ns) =>
if (ns.contains(name)) {
acc.append(ParamElm(name, values))
acc.append(ParamContExp(ns.filterNot(_ == name)))
} else acc.append(p)
}
acc.toList
}
protected def expandFragmentN(fragment: Fragment, name: String, value: String): Fragment = {
val acc = new ArrayBuffer[FragmentDef]()
fragment.foreach {
case p @ FragmentElm(_) => acc.append(p)
case p @ SimpleFragmentExp(n) =>
if (n == name) acc.append(FragmentElm(value)) else acc.append(p)
case p @ MultiFragmentExp(Seq(n)) =>
if (n == name) acc.append(FragmentElm(value)) else acc.append(p)
case p @ MultiFragmentExp(ns) =>
if (ns.contains(name)) {
acc.append(FragmentElm(value))
acc.append(MultiFragmentExp(ns.filterNot(_ == name)))
} else acc.append(p)
}
acc.toList
}
protected def renderAuthority(a: Authority): String = a match {
case Authority(Some(u), h, None) => u + "@" + renderHost(h)
case Authority(Some(u), h, Some(p)) => u + "@" + renderHost(h) + ":" + p
case Authority(None, h, Some(p)) => renderHost(h) + ":" + p
case Authority(_, h, _) => renderHost(h)
case _ => ""
}
protected def renderHost(h: Host): String = h match {
case RegName(n) => n.toString
case IPv4(a) => a.toString
case IPv6(a) => "[" + a.toString + "]"
case _ => ""
}
protected def renderScheme(s: Scheme): String =
(new StringWriter << s << ":").result
protected def renderSchemeAndAuthority(t: UriTemplate): String = t match {
case UriTemplate(None, None, _, _, _) => ""
case UriTemplate(Some(s), Some(a), _, _, _) => renderScheme(s) + "//" + renderAuthority(a)
case UriTemplate(Some(s), None, _, _, _) => renderScheme(s)
case UriTemplate(None, Some(a), _, _, _) => renderAuthority(a)
}
protected def renderQuery(ps: Query): String = {
val parted = ps.partition {
case ParamElm(_, _) => false
case ParamVarExp(_, _) => false
case ParamReservedExp(_, _) => false
case ParamExp(_) => true
case ParamContExp(_) => true
}
val elements = new ArrayBuffer[String]()
parted._2.foreach {
case ParamElm(n, Nil) => elements.append(n)
case ParamElm(n, List(v)) => elements.append(n + "=" + v)
case ParamElm(n, vs) => vs.foreach(v => elements.append(n + "=" + v))
case ParamVarExp(n, vs) => elements.append(n + "=" + "{" + vs.mkString(",") + "}")
case ParamReservedExp(n, vs) => elements.append(n + "=" + "{+" + vs.mkString(",") + "}")
case u => throw new IllegalStateException(s"type ${u.getClass.getName} not supported")
}
val exps = new ArrayBuffer[String]()
def separator = if (elements.isEmpty && exps.isEmpty) "?" else "&"
parted._1.foreach {
case ParamExp(ns) => exps.append("{" + separator + ns.mkString(",") + "}")
case ParamContExp(ns) => exps.append("{" + separator + ns.mkString(",") + "}")
case u => throw new IllegalStateException(s"type ${u.getClass.getName} not supported")
}
if (elements.isEmpty) exps.mkString
else "?" + elements.mkString("&") + exps.mkString
}
protected def renderFragment(f: Fragment): String = {
val elements = new mutable.ArrayBuffer[String]()
val expansions = new mutable.ArrayBuffer[String]()
f.map {
case FragmentElm(v) => elements.append(v)
case SimpleFragmentExp(n) => expansions.append(n)
case MultiFragmentExp(ns) => expansions.append(ns.mkString(","))
}
if (elements.nonEmpty && expansions.nonEmpty) {
"#" + elements.mkString(",") + "{#" + expansions.mkString(",") + "}"
} else if (elements.nonEmpty) {
"#" + elements.mkString(",")
} else if (expansions.nonEmpty) {
"{#" + expansions.mkString(",") + "}"
} else {
"#"
}
}
protected def renderFragmentIdentifier(f: Fragment): String = {
val elements = new mutable.ArrayBuffer[String]()
f.map {
case FragmentElm(v) => elements.append(v)
case SimpleFragmentExp(_) =>
throw new IllegalStateException("SimpleFragmentExp cannot be converted to a Uri")
case MultiFragmentExp(_) =>
throw new IllegalStateException("MultiFragmentExp cannot be converted to a Uri")
}
if (elements.isEmpty) ""
else elements.mkString(",")
}
protected def buildQuery(q: Query): org.http4s.Query = {
val elements = Query.newBuilder
q.map {
case ParamElm(n, Nil) => elements += ((n, None))
case ParamElm(n, List(v)) => elements += ((n, Some(v)))
case ParamElm(n, vs) => vs.foreach(v => elements += ((n, Some(v))))
case u =>
throw new IllegalStateException(s"${u.getClass.getName} cannot be converted to a Uri")
}
elements.result()
}
protected def renderPath(p: Path): String = p match {
case Nil => "/"
case ps =>
val elements = new ArrayBuffer[String]()
ps.foreach {
case PathElm(n) => elements.append("/" + n)
case VarExp(ns) => elements.append("{" + ns.mkString(",") + "}")
case ReservedExp(ns) => elements.append("{+" + ns.mkString(",") + "}")
case PathExp(ns) => elements.append("{/" + ns.mkString(",") + "}")
case u => throw new IllegalStateException(s"type ${u.getClass.getName} not supported")
}
elements.mkString
}
protected def renderPathAndQueryAndFragment(t: UriTemplate): String = t match {
case UriTemplate(_, _, Nil, Nil, Nil) => "/"
case UriTemplate(_, _, Nil, Nil, f) => "/" + renderFragment(f)
case UriTemplate(_, _, Nil, query, Nil) => "/" + renderQuery(query)
case UriTemplate(_, _, Nil, query, f) => "/" + renderQuery(query) + renderFragment(f)
case UriTemplate(_, _, path, Nil, Nil) => renderPath(path)
case UriTemplate(_, _, path, query, Nil) => renderPath(path) + renderQuery(query)
case UriTemplate(_, _, path, Nil, f) => renderPath(path) + renderFragment(f)
case UriTemplate(_, _, path, query, f) =>
renderPath(path) + renderQuery(query) + renderFragment(f)
case _ => ""
}
protected def renderUriTemplate(t: UriTemplate): String = t match {
case UriTemplate(None, None, Nil, Nil, Nil) => "/"
case UriTemplate(Some(_), Some(_), Nil, Nil, Nil) => renderSchemeAndAuthority(t)
case UriTemplate(scheme @ _, authority @ _, path @ _, params @ _, fragment @ _) =>
renderSchemeAndAuthority(t) + renderPathAndQueryAndFragment(t)
case _ => ""
}
protected def fragmentExp(f: FragmentDef): Boolean = f match {
case FragmentElm(_) => false
case SimpleFragmentExp(_) => true
case MultiFragmentExp(_) => true
}
protected def pathExp(p: PathDef): Boolean = p match {
case PathElm(_) => false
case VarExp(_) => true
case ReservedExp(_) => true
case PathExp(_) => true
}
protected def queryExp(q: QueryDef): Boolean = q match {
case ParamElm(_, _) => false
case ParamVarExp(_, _) => true
case ParamReservedExp(_, _) => true
case ParamExp(_) => true
case ParamContExp(_) => true
}
protected def containsExpansions(t: UriTemplate): Boolean = t match {
case UriTemplate(_, _, Nil, Nil, Nil) => false
case UriTemplate(_, _, Nil, Nil, f) => f.exists(fragmentExp)
case UriTemplate(_, _, Nil, q, Nil) => q.exists(queryExp)
case UriTemplate(_, _, Nil, q, f) => (q.exists(queryExp)) || (f.exists(fragmentExp))
case UriTemplate(_, _, p, Nil, Nil) => p.exists(pathExp)
case UriTemplate(_, _, p, Nil, f) => (p.exists(pathExp)) || (f.exists(fragmentExp))
case UriTemplate(_, _, p, q, Nil) => (p.exists(pathExp)) || (q.exists(queryExp))
case UriTemplate(_, _, p, q, f) =>
(p.exists(pathExp)) || (q.exists(queryExp)) || (f.exists(fragmentExp))
}
protected def toUri(t: UriTemplate): Uri = t match {
case UriTemplate(s, a, Nil, Nil, Nil) => Uri(s, a)
case UriTemplate(s, a, Nil, Nil, f) => Uri(s, a, fragment = Some(renderFragmentIdentifier(f)))
case UriTemplate(s, a, Nil, q, Nil) => Uri(s, a, query = buildQuery(q))
case UriTemplate(s, a, Nil, q, f) =>
Uri(s, a, query = buildQuery(q), fragment = Some(renderFragmentIdentifier(f)))
case UriTemplate(s, a, p, Nil, Nil) => Uri(s, a, renderPath(p))
case UriTemplate(s, a, p, q, Nil) => Uri(s, a, renderPath(p), buildQuery(q))
case UriTemplate(s, a, p, Nil, f) =>
Uri(s, a, renderPath(p), fragment = Some(renderFragmentIdentifier(f)))
case UriTemplate(s, a, p, q, f) =>
Uri(s, a, renderPath(p), buildQuery(q), Some(renderFragmentIdentifier(f)))
}
sealed trait PathDef
/** Static path element */
final case class PathElm(value: String) extends PathDef
sealed trait QueryDef
sealed trait QueryExp extends QueryDef
/** Static query parameter element */
final case class ParamElm(name: String, values: List[String]) extends QueryDef
object ParamElm {
def apply(name: String): ParamElm = new ParamElm(name, Nil)
def apply(name: String, values: String*): ParamElm = new ParamElm(name, values.toList)
}
/**
* Simple string expansion for query parameter
*/
final case class ParamVarExp(name: String, variables: List[String]) extends QueryDef {
require(variables.forall(isUnreserved), "all variables must consist of unreserved characters")
}
object ParamVarExp {
def apply(name: String): ParamVarExp = new ParamVarExp(name, Nil)
def apply(name: String, variables: String*): ParamVarExp =
new ParamVarExp(name, variables.toList)
}
/**
* Reserved string expansion for query parameter
*/
final case class ParamReservedExp(name: String, variables: List[String]) extends QueryDef {
require(variables.forall(isUnreserved), "all variables must consist of unreserved characters")
}
object ParamReservedExp {
def apply(name: String): ParamReservedExp = new ParamReservedExp(name, Nil)
def apply(name: String, variables: String*): ParamReservedExp =
new ParamReservedExp(name, variables.toList)
}
/**
* URI Templates are similar to a macro language with a fixed set of macro
* definitions: the expression type determines the expansion process.
*
* The default expression type is simple string expansion (Level 1), wherein a
* single named variable is replaced by its value as a string after
* pct-encoding any characters not in the set of unreserved URI characters
* (<a href="http://tools.ietf.org/html/rfc6570#section-1.5">Section 1.5</a>).
*
* Level 2 templates add the plus ("+") operator, for expansion of values that
* are allowed to include reserved URI characters
* (<a href="http://tools.ietf.org/html/rfc6570#section-1.5">Section 1.5</a>),
* and the crosshatch ("#") operator for expansion of fragment identifiers.
*
* Level 3 templates allow multiple variables per expression, each
* separated by a comma, and add more complex operators for dot-prefixed
* labels, slash-prefixed path segments, semicolon-prefixed path
* parameters, and the form-style construction of a query syntax
* consisting of name=value pairs that are separated by an ampersand
* character.
*/
sealed trait ExpansionType
sealed trait FragmentDef
/** Static fragment element */
final case class FragmentElm(value: String) extends FragmentDef
/**
* Fragment expansion, crosshatch-prefixed
* (<a href="http://tools.ietf.org/html/rfc6570#section-3.2.4">Section 3.2.4</a>)
*/
final case class SimpleFragmentExp(name: String) extends FragmentDef {
require(name.nonEmpty, "at least one character must be set")
require(isUnreserved(name), "name must consist of unreserved characters")
}
/**
* Level 1 allows string expansion
* (<a href="http://tools.ietf.org/html/rfc6570#section-3.2.2">Section 3.2.2</a>)
*
* Level 3 allows string expansion with multiple variables
* (<a href="http://tools.ietf.org/html/rfc6570#section-3.2.2">Section 3.2.2</a>)
*/
final case class VarExp(names: List[String]) extends PathDef {
require(names.nonEmpty, "at least one name must be set")
require(names.forall(isUnreserved), "all names must consist of unreserved characters")
}
object VarExp {
def apply(names: String*): VarExp = new VarExp(names.toList)
}
/**
* Level 2 allows reserved string expansion
* (<a href="http://tools.ietf.org/html/rfc6570#section-3.2.3">Section 3.2.3</a>)
*
* Level 3 allows reserved expansion with multiple variables
* (<a href="http://tools.ietf.org/html/rfc6570#section-3.2.3">Section 3.2.3</a>)
*/
final case class ReservedExp(names: List[String]) extends PathDef {
require(names.nonEmpty, "at least one name must be set")
require(names.forall(isUnreserved), "all names must consist of unreserved characters")
}
object ReservedExp {
def apply(names: String*): ReservedExp = new ReservedExp(names.toList)
}
/**
* Fragment expansion with multiple variables, crosshatch-prefixed
* (<a href="http://tools.ietf.org/html/rfc6570#section-3.2.4">Section 3.2.4</a>)
*/
final case class MultiFragmentExp(names: List[String]) extends FragmentDef {
require(names.nonEmpty, "at least one name must be set")
require(names.forall(isUnreserved), "all names must consist of unreserved characters")
}
object MultiFragmentExp {
def apply(names: String*): MultiFragmentExp = new MultiFragmentExp(names.toList)
}
/**
* Path segments, slash-prefixed
* (<a href="http://tools.ietf.org/html/rfc6570#section-3.2.6">Section 3.2.6</a>)
*/
final case class PathExp(names: List[String]) extends PathDef {
require(names.nonEmpty, "at least one name must be set")
require(names.forall(isUnreserved), "all names must consist of unreserved characters")
}
object PathExp {
def apply(names: String*): PathExp = new PathExp(names.toList)
}
/**
* Form-style query, ampersand-separated
* (<a href="http://tools.ietf.org/html/rfc6570#section-3.2.8">Section 3.2.8</a>)
*/
final case class ParamExp(names: List[String]) extends QueryExp {
require(names.nonEmpty, "at least one name must be set")
require(
names.forall(isUnreservedOrEncoded),
"all names must consist of unreserved characters or be encoded")
}
object ParamExp {
def apply(names: String*): ParamExp = new ParamExp(names.toList)
}
/**
* Form-style query continuation
* (<a href="http://tools.ietf.org/html/rfc6570#section-3.2.9">Section 3.2.9</a>)
*/
final case class ParamContExp(names: List[String]) extends QueryExp {
require(names.nonEmpty, "at least one name must be set")
require(names.forall(isUnreserved), "all names must consist of unreserved characters")
}
object ParamContExp {
def apply(names: String*): ParamContExp = new ParamContExp(names.toList)
}
}
|
reactormonk/http4s
|
core/src/main/scala/org/http4s/UriTemplate.scala
|
Scala
|
apache-2.0
| 21,600 |
args.foreach(arg => println(arg))
args.foreach((arg : String) => println(arg))
args.foreach(println)
|
DinoZhang/hello-scala
|
pa.scala
|
Scala
|
mit
| 101 |
package coursier.util
abstract class PlatformSyncCompanion
|
alexarchambault/coursier
|
modules/cache/js/src/main/scala/coursier/util/PlatformSyncCompanion.scala
|
Scala
|
apache-2.0
| 60 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.schema
import org.apache.flink.configuration.ReadableConfig
import org.apache.flink.table.api.TableColumn.ComputedColumn
import org.apache.flink.table.api.config.TableConfigOptions
import org.apache.flink.table.api.{TableException, ValidationException}
import org.apache.flink.table.catalog.CatalogTable
import org.apache.flink.table.factories.{TableFactoryUtil, TableSourceFactory, TableSourceFactoryContextImpl}
import org.apache.flink.table.planner.JMap
import org.apache.flink.table.planner.calcite.{FlinkContext, FlinkRelBuilder, FlinkTypeFactory}
import org.apache.flink.table.planner.catalog.CatalogSchemaTable
import org.apache.flink.table.planner.hint.FlinkHints
import org.apache.flink.table.sources.{StreamTableSource, TableSource, TableSourceValidation}
import org.apache.flink.table.types.logical.{LocalZonedTimestampType, TimestampKind, TimestampType}
import org.apache.calcite.plan.{RelOptSchema, RelOptTable}
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.logical.LogicalTableScan
import java.util.{List => JList}
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
/**
* A legacy implementation of [[FlinkPreparingTableBase]] which defines the interfaces required
* to translate the Calcite [[RelOptTable]] to the Flink specific [[LegacyTableSourceTable]].
*
* <p>This table is only used to translate the catalog table into [[LegacyTableSourceTable]]
* during the last phrase of sql-to-rel conversion, it is overdue once the sql node was converted
* to relational expression.
*
* <p>Note: this class can be removed once legacy [[TableSource]] interface is removed.
*
* @param schemaTable Schema table which takes the variables needed to find the table source
*/
class LegacyCatalogSourceTable[T](
relOptSchema: RelOptSchema,
names: JList[String],
rowType: RelDataType,
val schemaTable: CatalogSchemaTable,
val catalogTable: CatalogTable)
extends FlinkPreparingTableBase(relOptSchema, rowType, names, schemaTable.getStatistic) {
lazy val columnExprs: Map[String, String] = {
catalogTable.getSchema
.getTableColumns
.flatMap {
case computedColumn: ComputedColumn =>
Some((computedColumn.getName, computedColumn.getExpression))
case _ =>
None
}
.toMap
}
override def toRel(context: RelOptTable.ToRelContext): RelNode = {
val cluster = context.getCluster
val flinkContext = cluster
.getPlanner
.getContext
.unwrap(classOf[FlinkContext])
val typeFactory = cluster.getTypeFactory.asInstanceOf[FlinkTypeFactory]
val conf = flinkContext.getTableConfig.getConfiguration
val hintedOptions = FlinkHints.getHintedOptions(context.getTableHints)
if (hintedOptions.nonEmpty
&& !conf.getBoolean(TableConfigOptions.TABLE_DYNAMIC_TABLE_OPTIONS_ENABLED)) {
throw new ValidationException(s"${FlinkHints.HINT_NAME_OPTIONS} hint is allowed only when "
+ s"${TableConfigOptions.TABLE_DYNAMIC_TABLE_OPTIONS_ENABLED.key} "
+ s"is set to true")
}
val tableSource = findAndCreateLegacyTableSource(
hintedOptions,
conf)
// erase time indicator types in the rowType
val actualRowType = eraseTimeIndicator(rowType, typeFactory, tableSource)
val tableSourceTable = new LegacyTableSourceTable[T](
relOptSchema,
schemaTable.getTableIdentifier,
actualRowType,
statistic,
tableSource,
schemaTable.isStreamingMode,
catalogTable,
hintedOptions)
// 1. push table scan
// Get row type of physical fields.
val physicalFields = getRowType
.getFieldList
.filter(f => !columnExprs.contains(f.getName))
.map(f => f.getIndex)
.toArray
// Copy this table with physical scan row type.
val newRelTable = tableSourceTable.copy(tableSource, physicalFields)
val scan = LogicalTableScan.create(cluster, newRelTable, context.getTableHints)
val relBuilder = FlinkRelBuilder.of(cluster, getRelOptSchema)
relBuilder.push(scan)
val toRexFactory = flinkContext.getSqlExprToRexConverterFactory
// 2. push computed column project
val fieldNames = actualRowType.getFieldNames.asScala
if (columnExprs.nonEmpty) {
val fieldExprs = fieldNames
.map { name =>
if (columnExprs.contains(name)) {
columnExprs(name)
} else {
s"`$name`"
}
}.toArray
val rexNodes = toRexFactory.create(newRelTable.getRowType, null).convertToRexNodes(fieldExprs)
relBuilder.projectNamed(rexNodes.toList, fieldNames, true)
}
// 3. push watermark assigner
val watermarkSpec = catalogTable
.getSchema
// we only support single watermark currently
.getWatermarkSpecs.asScala.headOption
if (schemaTable.isStreamingMode && watermarkSpec.nonEmpty) {
if (TableSourceValidation.hasRowtimeAttribute(tableSource)) {
throw new TableException(
"If watermark is specified in DDL, the underlying TableSource of connector" +
" shouldn't return an non-empty list of RowtimeAttributeDescriptor" +
" via DefinedRowtimeAttributes interface.")
}
val rowtime = watermarkSpec.get.getRowtimeAttribute
if (rowtime.contains(".")) {
throw new TableException(
s"Nested field '$rowtime' as rowtime attribute is not supported right now.")
}
val rowtimeIndex = fieldNames.indexOf(rowtime)
val watermarkRexNode = toRexFactory
.create(actualRowType, null)
.convertToRexNode(watermarkSpec.get.getWatermarkExpr)
relBuilder.watermark(rowtimeIndex, watermarkRexNode)
}
// 4. returns the final RelNode
relBuilder.build()
}
/** Create the legacy table source. */
private def findAndCreateLegacyTableSource(
hintedOptions: JMap[String, String],
conf: ReadableConfig): TableSource[T] = {
val tableToFind = if (hintedOptions.nonEmpty) {
catalogTable.copy(
FlinkHints.mergeTableOptions(
hintedOptions,
catalogTable.getOptions))
} else {
catalogTable
}
val tableSource = TableFactoryUtil.findAndCreateTableSource(
schemaTable.getCatalog.orElse(null),
schemaTable.getTableIdentifier,
tableToFind,
conf,
schemaTable.isTemporary)
// validation
val tableName = schemaTable.getTableIdentifier.asSummaryString
tableSource match {
case ts: StreamTableSource[_] =>
if (!schemaTable.isStreamingMode && !ts.isBounded) {
throw new ValidationException("Cannot query on an unbounded source in batch mode, " +
s"but '$tableName' is unbounded.")
}
case _ =>
throw new ValidationException("Catalog tables only support "
+ "StreamTableSource and InputFormatTableSource")
}
tableSource.asInstanceOf[TableSource[T]]
}
/**
* Erases time indicators, i.e. converts rowtime and proctime type into regular timestamp type.
* This is required before converting this [[CatalogSourceTable]] into multiple RelNodes,
* otherwise the derived data types are mismatch.
*/
private def eraseTimeIndicator(
relDataType: RelDataType,
factory: FlinkTypeFactory,
tableSource: TableSource[_]): RelDataType = {
val hasLegacyTimeAttributes =
TableSourceValidation.hasRowtimeAttribute(tableSource) ||
TableSourceValidation.hasProctimeAttribute(tableSource)
// If the table source is defined by TableEnvironment.connect() and the time attributes are
// defined by legacy proctime and rowtime descriptors, we should not erase time indicator types
if (columnExprs.isEmpty
&& catalogTable.getSchema.getWatermarkSpecs.isEmpty
&& hasLegacyTimeAttributes) {
relDataType
} else {
val logicalRowType = FlinkTypeFactory.toLogicalRowType(relDataType)
val fieldNames = logicalRowType.getFieldNames
val fieldTypes = logicalRowType.getFields.map { f =>
if (FlinkTypeFactory.isTimeIndicatorType(f.getType)) {
f.getType match {
case ts: TimestampType =>
new TimestampType(
ts.isNullable,
TimestampKind.REGULAR,
ts.getPrecision)
case ltz: LocalZonedTimestampType =>
new LocalZonedTimestampType(
ltz.isNullable,
TimestampKind.REGULAR,
ltz.getPrecision)
case _ => throw new ValidationException("The supported time indicator type" +
" are TIMESTAMP and TIMESTAMP_LTZ, but is " + f.getType + ".")
}
} else {
f.getType
}
}
factory.buildRelNodeRowType(fieldNames.asScala, fieldTypes)
}
}
}
|
StephanEwen/incubator-flink
|
flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/schema/LegacyCatalogSourceTable.scala
|
Scala
|
apache-2.0
| 9,707 |
/*
* ____ ____ _____ ____ ___ ____
* | _ \\ | _ \\ | ____| / ___| / _/ / ___| Precog (R)
* | |_) | | |_) | | _| | | | | /| | | _ Advanced Analytics Engine for NoSQL Data
* | __/ | _ < | |___ | |___ |/ _| | | |_| | Copyright (C) 2010 - 2013 SlamData, Inc.
* |_| |_| \\_\\ |_____| \\____| /__/ \\____| All Rights Reserved.
*
* This program is free software: you can redistribute it and/or modify it under the terms of the
* GNU Affero General Public License as published by the Free Software Foundation, either version
* 3 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License along with this
* program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package com.precog
package mimir
import org.joda.time._
import util.NumericComparisons
import com.precog.common._
import bytecode._
import yggdrasil._
import yggdrasil.table._
import com.precog.util._
import scalaz._
import scalaz.std.anyVal._
import scalaz.std.option._
import scalaz.std.set._
import scalaz.std.tuple._
import scalaz.syntax.foldable._
import scalaz.syntax.std.option._
import scalaz.syntax.std.boolean._
import scala.annotation.tailrec
import scala.collection.mutable
class LongAdder {
var t = 0L
val ts = mutable.ArrayBuffer.empty[BigDecimal]
final def maxLongSqrt = 3037000499L
def add(x: BigDecimal): Unit = ts.append(x)
def addSquare(x: Long) = if (x < maxLongSqrt)
add(x * x)
else
add(BigDecimal(x) pow 2)
def add(x: Long): Unit = {
val y = t + x
if ((~(x ^ t) & (x ^ y)) >= 0L) {
t = y
} else {
ts.append(BigDecimal(t))
t = x
}
}
def total(): BigDecimal = ts.sum + t
}
trait ReductionLibModule[M[+_]] extends ColumnarTableLibModule[M] {
trait ReductionLib extends ColumnarTableLib {
import BigDecimalOperations._
val ReductionNamespace = Vector()
override def _libReduction = super._libReduction ++ Set(Count, Max, Min, MaxTime, MinTime, Sum, Mean, GeometricMean, SumSq, Variance, StdDev, Forall, Exists)
val CountMonoid = implicitly[Monoid[Count.Result]]
object Count extends Reduction(ReductionNamespace, "count") {
// limiting ourselves to 9.2e18 rows doesn't seem like a problem.
type Result = Long
implicit val monoid = CountMonoid
val tpe = UnaryOperationType(JType.JUniverseT, JNumberT)
def reducer(ctx: MorphContext): Reducer[Result] = new CReducer[Result] {
def reduce(schema: CSchema, range: Range) = {
val cx = schema.columns(JType.JUniverseT).toArray
var count = 0L
RangeUtil.loop(range) { i =>
if (Column.isDefinedAt(cx, i)) count += 1L
}
count
}
}
def extract(res: Result): Table = Table.constLong(Set(res))
def extractValue(res: Result) = Some(CNum(res))
}
object MaxTime extends Reduction(ReductionNamespace, "maxTime") {
type Result = Option[DateTime]
implicit val monoid = new Monoid[Result] {
def zero = None
def append(left: Result, right: => Result): Result = {
(for {
l <- left
r <- right
} yield {
val res = NumericComparisons.compare(l, r)
if (res > 0) l
else r
}) orElse left orElse right
}
}
val tpe = UnaryOperationType(JDateT, JDateT)
def reducer(ctx: MorphContext): Reducer[Result] = new CReducer[Result] {
def reduce(schema: CSchema, range: Range): Result = {
val maxs = schema.columns(JDateT) map {
case col: DateColumn =>
var zmax: DateTime = {
val init = new DateTime(0)
val min = -292275054 - 1970 //the smallest Int value jodatime accepts
init.plus(Period.years(min))
}
val seen = RangeUtil.loopDefined(range, col) { i =>
val z = col(i)
if (NumericComparisons.compare(z, zmax) > 0) zmax = z
}
if (seen) Some(zmax) else None
case _ => None
}
if (maxs.isEmpty) None else maxs.suml(monoid)
}
}
def extract(res: Result): Table =
res map { dt => Table.constDate(Set(dt)) } getOrElse Table.empty
def extractValue(res: Result) = res map { CDate(_) }
}
object MinTime extends Reduction(ReductionNamespace, "minTime") {
type Result = Option[DateTime]
implicit val monoid = new Monoid[Result] {
def zero = None
def append(left: Result, right: => Result): Result = {
(for {
l <- left
r <- right
} yield {
val res = NumericComparisons.compare(l, r)
if (res < 0) l
else r
}) orElse left orElse right
}
}
val tpe = UnaryOperationType(JDateT, JDateT)
def reducer(ctx: MorphContext): Reducer[Result] = new CReducer[Result] {
def reduce(schema: CSchema, range: Range): Result = {
val maxs = schema.columns(JDateT) map {
case col: DateColumn =>
var zmax: DateTime = {
val init = new DateTime(0)
val max = 292278993 - 1970 //the largest Int value jodatime accepts
init.plus(Period.years(max))
}
val seen = RangeUtil.loopDefined(range, col) { i =>
val z = col(i)
if (NumericComparisons.compare(z, zmax) < 0) zmax = z
}
if (seen) Some(zmax) else None
case _ => None
}
if (maxs.isEmpty) None else maxs.suml(monoid)
}
}
def extract(res: Result): Table =
res map { dt => Table.constDate(Set(dt)) } getOrElse Table.empty
def extractValue(res: Result) = res map { CDate(_) }
}
val MaxMonoid = implicitly[Monoid[Max.Result]]
object Max extends Reduction(ReductionNamespace, "max") {
type Result = Option[BigDecimal]
implicit val monoid = new Monoid[Result] {
def zero = None
def append(left: Result, right: => Result): Result = {
(for (l <- left; r <- right) yield l max r) orElse left orElse right
}
}
val tpe = UnaryOperationType(JNumberT, JNumberT)
def reducer(ctx: MorphContext): Reducer[Result] = new CReducer[Result] {
def reduce(schema: CSchema, range: Range): Result = {
val maxs = schema.columns(JNumberT) map {
case col: LongColumn =>
// for longs, we'll use a Boolean to track whether zmax was really
// seen or not.
var zmax = Long.MinValue
val seen = RangeUtil.loopDefined(range, col) { i =>
val z = col(i)
if (z > zmax) zmax = z
}
if (seen) Some(BigDecimal(zmax)) else None
case col: DoubleColumn =>
// since -inf is not a legal value, it's a great starting point for
// finding the max because any legal value will be greater.
var zmax = Double.NegativeInfinity
val seen = RangeUtil.loopDefined(range, col) { i =>
val z = col(i)
if (z > zmax) zmax = z
}
if (zmax > Double.NegativeInfinity) Some(BigDecimal(zmax)) else None
case col: NumColumn =>
// we can just use a null BigDecimal to signal that we haven't
// found a value yet.
var zmax: BigDecimal = null
RangeUtil.loopDefined(range, col) { i =>
val z = col(i)
if (zmax == null || z > zmax) zmax = z
}
if (zmax != null) Some(zmax) else None
case _ => None
}
// now we just find the max out of all of our column types
if (maxs.isEmpty) None else maxs.suml(monoid)
}
}
def extract(res: Result): Table =
res map { v => Table.constDecimal(Set(v)) } getOrElse Table.empty
def extractValue(res: Result) = res map { CNum(_) }
}
val MinMonoid = implicitly[Monoid[Min.Result]]
object Min extends Reduction(ReductionNamespace, "min") {
type Result = Option[BigDecimal]
implicit val monoid = new Monoid[Result] {
def zero = None
def append(left: Result, right: => Result): Result = {
(for (l <- left; r <- right) yield l min r) orElse left orElse right
}
}
val tpe = UnaryOperationType(JNumberT, JNumberT)
def reducer(ctx: MorphContext): Reducer[Result] = new CReducer[Result] {
def reduce(schema: CSchema, range: Range): Result = {
val mins = schema.columns(JNumberT) map {
case col: LongColumn =>
// for longs, we'll use a Boolean to track whether zmin was really
// seen or not.
var zmin = Long.MaxValue
val seen = RangeUtil.loopDefined(range, col) { i =>
val z = col(i)
if (z < zmin) zmin = z
}
if (seen) Some(BigDecimal(zmin)) else None
case col: DoubleColumn =>
// since +inf is not a legal value, it's a great starting point for
// finding the min because any legal value will be less.
var zmin = Double.PositiveInfinity
RangeUtil.loopDefined(range, col) { i =>
val z = col(i)
if (z < zmin) zmin = z
}
if (zmin < Double.PositiveInfinity) Some(BigDecimal(zmin)) else None
case col: NumColumn =>
// we can just use a null BigDecimal to signal that we haven't
// found a value yet.
var zmin: BigDecimal = null
RangeUtil.loopDefined(range, col) { i =>
val z = col(i)
if (zmin == null || z < zmin) zmin = z
}
if (zmin != null) Some(zmin) else None
case _ => None
}
// now we just find the min out of all of our column types
if (mins.isEmpty) None else mins.suml(monoid)
}
}
def extract(res: Result): Table =
res map { v => Table.constDecimal(Set(v)) } getOrElse Table.empty
def extractValue(res: Result) = res map { CNum(_) }
}
val SumMonoid = implicitly[Monoid[Sum.Result]]
object Sum extends Reduction(ReductionNamespace, "sum") {
type Result = Option[BigDecimal]
implicit val monoid = SumMonoid
val tpe = UnaryOperationType(JNumberT, JNumberT)
def reducer(ctx: MorphContext): Reducer[Result] = new CReducer[Result] {
def reduce(schema: CSchema, range: Range) = {
val sum = schema.columns(JNumberT) map {
case col: LongColumn =>
val ls = new LongAdder()
val seen = RangeUtil.loopDefined(range, col) { i => ls.add(col(i)) }
if (seen) Some(ls.total) else None
// TODO: exactness + overflow
case col: DoubleColumn =>
var t = 0.0
var seen = RangeUtil.loopDefined(range, col) { i => t += col(i) }
if (seen) Some(BigDecimal(t)) else None
case col: NumColumn =>
var t = BigDecimal(0)
val seen = RangeUtil.loopDefined(range, col) { i => t += col(i) }
if (seen) Some(t) else None
case _ => None
}
if (sum.isEmpty) None else sum.suml(monoid)
}
}
def extract(res: Result): Table =
res map { v => Table.constDecimal(Set(v)) } getOrElse Table.empty
def extractValue(res: Result) = res map { CNum(_) }
}
val MeanMonoid = implicitly[Monoid[Mean.Result]]
object Mean extends Reduction(ReductionNamespace, "mean") {
type Result = Option[InitialResult]
type InitialResult = (BigDecimal, Long) // (sum, count)
implicit val monoid = MeanMonoid
val tpe = UnaryOperationType(JNumberT, JNumberT)
def reducer(ctx: MorphContext): Reducer[Result] = new Reducer[Result] {
def reduce(schema: CSchema, range: Range): Result = {
val results = schema.columns(JNumberT) map {
case col: LongColumn =>
val ls = new LongAdder()
var count = 0L
RangeUtil.loopDefined(range, col) { i =>
ls.add(col(i))
count += 1L
}
if (count > 0L) Some((ls.total, count)) else None
case col: DoubleColumn =>
var count = 0L
var t = BigDecimal(0)
RangeUtil.loopDefined(range, col) { i =>
t += col(i)
count += 1L
}
if (count > 0L) Some((t, count)) else None
case col: NumColumn =>
var count = 0L
var t = BigDecimal(0)
RangeUtil.loopDefined(range, col) { i =>
t += col(i)
count += 1L
}
if (count > 0L) Some((t, count)) else None
case _ => None
}
if (results.isEmpty) None else results.suml(monoid)
}
}
def perform(res: Result): Option[BigDecimal] = res map {
case (sum, count) => sum / count
}
def extract(res: Result): Table = perform(res) map {
case v => Table.constDecimal(Set(v))
} getOrElse Table.empty
def extractValue(res: Result) = perform(res) map { CNum(_) }
}
object GeometricMean extends Reduction(ReductionNamespace, "geometricMean") {
type Result = Option[InitialResult]
type InitialResult = (BigDecimal, Long)
implicit val monoid = new Monoid[Result] {
def zero = None
def append(left: Result, right: => Result) = {
val both = for ((l1, l2) <- left; (r1, r2) <- right) yield (l1 * r1, l2 + r2)
both orElse left orElse right
}
}
val tpe = UnaryOperationType(JNumberT, JNumberT)
def reducer(ctx: MorphContext): Reducer[Result] = new Reducer[Option[(BigDecimal, Long)]] {
def reduce(schema: CSchema, range: Range): Result = {
val results = schema.columns(JNumberT) map {
case col: LongColumn =>
var prod = BigDecimal(1)
var count = 0L
RangeUtil.loopDefined(range, col) { i =>
prod *= col(i)
count += 1L
}
if (count > 0) Some((prod, count)) else None
case col: DoubleColumn =>
var prod = BigDecimal(1)
var count = 0L
RangeUtil.loopDefined(range, col) { i =>
prod *= col(i)
count += 1L
}
if (count > 0) Some((prod, count)) else None
case col: NumColumn =>
var prod = BigDecimal(1)
var count = 0L
RangeUtil.loopDefined(range, col) { i =>
prod *= col(i)
count += 1L
}
if (count > 0) Some((prod, count)) else None
case _ => None
}
if (results.isEmpty) None else results.suml(monoid)
}
}
private def perform(res: Result) = res map {
case (prod, count) => math.pow(prod.toDouble, 1 / count.toDouble)
} filter(StdLib.doubleIsDefined)
def extract(res: Result): Table = perform(res) map {
v => Table.constDouble(Set(v))
} getOrElse {
Table.empty
}
def extractValue(res: Result) = perform(res).map(CNum(_))
}
val SumSqMonoid = implicitly[Monoid[SumSq.Result]]
object SumSq extends Reduction(ReductionNamespace, "sumSq") {
type Result = Option[BigDecimal]
implicit val monoid = SumSqMonoid
val tpe = UnaryOperationType(JNumberT, JNumberT)
def reducer(ctx: MorphContext): Reducer[Result] = new Reducer[Result] {
def reduce(schema: CSchema, range: Range): Result = {
val result = schema.columns(JNumberT) map {
case col: LongColumn =>
val ls = new LongAdder()
val seen = RangeUtil.loopDefined(range, col) { i =>
ls.addSquare(col(i))
}
if (seen) Some(ls.total) else None
case col: DoubleColumn =>
var t = BigDecimal(0)
val seen = RangeUtil.loopDefined(range, col) { i =>
t += BigDecimal(col(i)) pow 2
}
if (seen) Some(t) else None
case col: NumColumn =>
var t = BigDecimal(0)
val seen = RangeUtil.loopDefined(range, col) { i =>
t += col(i) pow 2
}
if (seen) Some(t) else None
case _ => None
}
if (result.isEmpty) None else result.suml(monoid)
}
}
def extract(res: Result): Table =
res map { v => Table.constDecimal(Set(v)) } getOrElse Table.empty
def extractValue(res: Result) = res map { CNum(_) }
}
class CountSumSumSqReducer extends Reducer[Option[(Long, BigDecimal, BigDecimal)]] {
def reduce(schema: CSchema, range: Range):
Option[(Long, BigDecimal, BigDecimal)] = {
val result = schema.columns(JNumberT) map {
case col: LongColumn =>
var count = 0L
var sum = new LongAdder()
var sumsq = new LongAdder()
val seen = RangeUtil.loopDefined(range, col) { i =>
val z = col(i)
count += 1
sum.add(z)
sumsq.addSquare(z)
}
if (seen) Some((count, sum.total, sumsq.total)) else None
case col: DoubleColumn =>
var count = 0L
var sum = BigDecimal(0)
var sumsq = BigDecimal(0)
val seen = RangeUtil.loopDefined(range, col) { i =>
val z = BigDecimal(col(i))
count += 1
sum += z
sumsq += z pow 2
}
if (seen) Some((count, sum, sumsq)) else None
case col: NumColumn =>
var count = 0L
var sum = BigDecimal(0)
var sumsq = BigDecimal(0)
val seen = RangeUtil.loopDefined(range, col) { i =>
val z = col(i)
count += 1
sum += z
sumsq += z pow 2
}
if (seen) Some((count, sum, sumsq)) else None
case _ => None
}
if (result.isEmpty) None else result.suml
}
}
val VarianceMonoid = implicitly[Monoid[Variance.Result]]
object Variance extends Reduction(ReductionNamespace, "variance") {
type Result = Option[InitialResult]
type InitialResult = (Long, BigDecimal, BigDecimal)
implicit val monoid = VarianceMonoid
val tpe = UnaryOperationType(JNumberT, JNumberT)
def reducer(ctx: MorphContext): Reducer[Result] = new CountSumSumSqReducer()
def perform(res: Result) = res flatMap {
case (count, sum, sumsq) if count > 0 =>
val n = (sumsq - (sum * sum / count)) / count
Some(n)
case _ =>
None
}
def extract(res: Result): Table = perform(res) map { v =>
Table.constDecimal(Set(v))
} getOrElse Table.empty
def extractValue(res: Result) = perform(res) map { CNum(_) }
}
val StdDevMonoid = implicitly[Monoid[StdDev.Result]]
object StdDev extends Reduction(ReductionNamespace, "stdDev") {
type Result = Option[InitialResult]
type InitialResult = (Long, BigDecimal, BigDecimal) // (count, sum, sumsq)
implicit val monoid = StdDevMonoid
val tpe = UnaryOperationType(JNumberT, JNumberT)
def reducer(ctx: MorphContext): Reducer[Result] = new CountSumSumSqReducer()
def perform(res: Result) = res flatMap {
case (count, sum, sumsq) if count > 0 =>
val n = sqrt(count * sumsq - sum * sum) / count
Some(n)
case _ =>
None
}
def extract(res: Result): Table = perform(res) map { v =>
Table.constDecimal(Set(v))
} getOrElse Table.empty
// todo using toDouble is BAD
def extractValue(res: Result) = perform(res) map { CNum(_) }
}
object Forall extends Reduction(ReductionNamespace, "forall") {
type Result = Option[Boolean]
val tpe = UnaryOperationType(JBooleanT, JBooleanT)
implicit val monoid = new Monoid[Option[Boolean]] {
def zero = None
def append(left: Option[Boolean], right: => Option[Boolean]) = {
val both = for (l <- left; r <- right) yield l && r
both orElse left orElse right
}
}
def reducer(ctx: MorphContext): Reducer[Result] = new CReducer[Result] {
def reduce(schema: CSchema, range: Range) = {
if (range.isEmpty) {
None
} else {
var back = true
var defined = false
schema.columns(JBooleanT) foreach { c =>
val bc = c.asInstanceOf[BoolColumn]
var acc = back
val idef = RangeUtil.loopDefined(range, bc) { i =>
acc &&= bc(i)
}
back &&= acc
if (idef) {
defined = true
}
}
if (defined)
Some(back)
else
None
}
}
}
private val default = true
private def perform(res: Result) = res getOrElse default
def extract(res: Result): Table = Table.constBoolean(Set(perform(res)))
def extractValue(res: Result) = Some(CBoolean(perform(res)))
}
object Exists extends Reduction(ReductionNamespace, "exists") {
type Result = Option[Boolean]
val tpe = UnaryOperationType(JBooleanT, JBooleanT)
implicit val monoid = new Monoid[Option[Boolean]] {
def zero = None
def append(left: Option[Boolean], right: => Option[Boolean]) = {
val both = for (l <- left; r <- right) yield l || r
both orElse left orElse right
}
}
def reducer(ctx: MorphContext): Reducer[Result] = new CReducer[Result] {
def reduce(schema: CSchema, range: Range) = {
if (range.isEmpty) {
None
} else {
var back = false
var defined = false
schema.columns(JBooleanT) foreach { c =>
val bc = c.asInstanceOf[BoolColumn]
var acc = back
val idef = RangeUtil.loopDefined(range, bc) { i =>
acc ||= bc(i)
}
back ||= acc
if (idef) {
defined = true
}
}
if (defined)
Some(back)
else
None
}
}
}
private val default = false
private def perform(res: Result) = res getOrElse default
def extract(res: Result): Table = Table.constBoolean(Set(perform(res)))
def extractValue(res: Result) = Some(CBoolean(perform(res)))
}
}
}
|
precog/platform
|
mimir/src/main/scala/com/precog/mimir/ReductionLib.scala
|
Scala
|
agpl-3.0
| 23,997 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import java.{util => ju}
import java.io.File
import java.text.SimpleDateFormat
import java.util.{Calendar, Date, Locale}
import org.apache.commons.io.FileUtils
import org.scalatest.{BeforeAndAfter, Matchers}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.{AnalysisException, Dataset}
import org.apache.spark.sql.catalyst.plans.logical.EventTimeWatermark
import org.apache.spark.sql.catalyst.util.DateTimeUtils
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.functions.{count, window}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.streaming.OutputMode._
import org.apache.spark.util.Utils
class EventTimeWatermarkSuite extends StreamTest with BeforeAndAfter with Matchers with Logging {
import testImplicits._
after {
sqlContext.streams.active.foreach(_.stop())
}
test("EventTimeStats") {
val epsilon = 10E-6
val stats = EventTimeStats(max = 100, min = 10, avg = 20.0, count = 5)
stats.add(80L)
stats.max should be (100)
stats.min should be (10)
stats.avg should be (30.0 +- epsilon)
stats.count should be (6)
val stats2 = EventTimeStats(80L, 5L, 15.0, 4)
stats.merge(stats2)
stats.max should be (100)
stats.min should be (5)
stats.avg should be (24.0 +- epsilon)
stats.count should be (10)
}
test("EventTimeStats: avg on large values") {
val epsilon = 10E-6
val largeValue = 10000000000L // 10B
// Make sure `largeValue` will cause overflow if we use a Long sum to calc avg.
assert(largeValue * largeValue != BigInt(largeValue) * BigInt(largeValue))
val stats =
EventTimeStats(max = largeValue, min = largeValue, avg = largeValue, count = largeValue - 1)
stats.add(largeValue)
stats.avg should be (largeValue.toDouble +- epsilon)
val stats2 = EventTimeStats(
max = largeValue + 1,
min = largeValue,
avg = largeValue + 1,
count = largeValue)
stats.merge(stats2)
stats.avg should be ((largeValue + 0.5) +- epsilon)
}
test("error on bad column") {
val inputData = MemoryStream[Int].toDF()
val e = intercept[AnalysisException] {
inputData.withWatermark("badColumn", "1 minute")
}
assert(e.getMessage contains "badColumn")
}
test("error on wrong type") {
val inputData = MemoryStream[Int].toDF()
val e = intercept[AnalysisException] {
inputData.withWatermark("value", "1 minute")
}
assert(e.getMessage contains "value")
assert(e.getMessage contains "int")
}
test("event time and watermark metrics") {
// No event time metrics when there is no watermarking
val inputData1 = MemoryStream[Int]
val aggWithoutWatermark = inputData1.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
testStream(aggWithoutWatermark, outputMode = Complete)(
AddData(inputData1, 15),
CheckAnswer((15, 1)),
assertEventStats { e => assert(e.isEmpty) },
AddData(inputData1, 10, 12, 14),
CheckAnswer((10, 3), (15, 1)),
assertEventStats { e => assert(e.isEmpty) }
)
// All event time metrics where watermarking is set
val inputData2 = MemoryStream[Int]
val aggWithWatermark = inputData2.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
testStream(aggWithWatermark)(
AddData(inputData2, 15),
CheckAnswer(),
assertEventStats(min = 15, max = 15, avg = 15, wtrmark = 0),
AddData(inputData2, 10, 12, 14),
CheckAnswer(),
assertEventStats(min = 10, max = 14, avg = 12, wtrmark = 5),
AddData(inputData2, 25),
CheckAnswer((10, 3)),
assertEventStats(min = 25, max = 25, avg = 25, wtrmark = 5)
)
}
test("event time and watermark metrics with Trigger.Once (SPARK-24699)") {
// All event time metrics where watermarking is set
val inputData = MemoryStream[Int]
val aggWithWatermark = inputData.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
// Unlike the ProcessingTime trigger, Trigger.Once only runs one trigger every time
// the query is started and it does not run no-data batches. Hence the answer generated
// by the updated watermark is only generated the next time the query is started.
// Also, the data to process in the next trigger is added *before* starting the stream in
// Trigger.Once to ensure that first and only trigger picks up the new data.
testStream(aggWithWatermark)(
StartStream(Trigger.Once), // to make sure the query is not running when adding data 1st time
awaitTermination(),
AddData(inputData, 15),
StartStream(Trigger.Once),
awaitTermination(),
CheckNewAnswer(),
assertEventStats(min = 15, max = 15, avg = 15, wtrmark = 0),
// watermark should be updated to 15 - 10 = 5
AddData(inputData, 10, 12, 14),
StartStream(Trigger.Once),
awaitTermination(),
CheckNewAnswer(),
assertEventStats(min = 10, max = 14, avg = 12, wtrmark = 5),
// watermark should stay at 5
AddData(inputData, 25),
StartStream(Trigger.Once),
awaitTermination(),
CheckNewAnswer(),
assertEventStats(min = 25, max = 25, avg = 25, wtrmark = 5),
// watermark should be updated to 25 - 10 = 15
AddData(inputData, 50),
StartStream(Trigger.Once),
awaitTermination(),
CheckNewAnswer((10, 3)), // watermark = 15 is used to generate this
assertEventStats(min = 50, max = 50, avg = 50, wtrmark = 15),
// watermark should be updated to 50 - 10 = 40
AddData(inputData, 50),
StartStream(Trigger.Once),
awaitTermination(),
CheckNewAnswer((15, 1), (25, 1)), // watermark = 40 is used to generate this
assertEventStats(min = 50, max = 50, avg = 50, wtrmark = 40))
}
test("recovery from Spark ver 2.3.1 commit log without commit metadata (SPARK-24699)") {
// All event time metrics where watermarking is set
val inputData = MemoryStream[Int]
val aggWithWatermark = inputData.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
val resourceUri = this.getClass.getResource(
"/structured-streaming/checkpoint-version-2.3.1-without-commit-log-metadata/").toURI
val checkpointDir = Utils.createTempDir().getCanonicalFile
// Copy the checkpoint to a temp dir to prevent changes to the original.
// Not doing this will lead to the test passing on the first run, but fail subsequent runs.
FileUtils.copyDirectory(new File(resourceUri), checkpointDir)
inputData.addData(15)
inputData.addData(10, 12, 14)
testStream(aggWithWatermark)(
/*
Note: The checkpoint was generated using the following input in Spark version 2.3.1
StartStream(checkpointLocation = "./sql/core/src/test/resources/structured-streaming/" +
"checkpoint-version-2.3.1-without-commit-log-metadata/")),
AddData(inputData, 15), // watermark should be updated to 15 - 10 = 5
CheckAnswer(),
AddData(inputData, 10, 12, 14), // watermark should stay at 5
CheckAnswer(),
StopStream,
// Offset log should have watermark recorded as 5.
*/
StartStream(Trigger.Once),
awaitTermination(),
AddData(inputData, 25),
StartStream(Trigger.Once, checkpointLocation = checkpointDir.getAbsolutePath),
awaitTermination(),
CheckNewAnswer(),
assertEventStats(min = 25, max = 25, avg = 25, wtrmark = 5),
// watermark should be updated to 25 - 10 = 15
AddData(inputData, 50),
StartStream(Trigger.Once, checkpointLocation = checkpointDir.getAbsolutePath),
awaitTermination(),
CheckNewAnswer((10, 3)), // watermark = 15 is used to generate this
assertEventStats(min = 50, max = 50, avg = 50, wtrmark = 15),
// watermark should be updated to 50 - 10 = 40
AddData(inputData, 50),
StartStream(Trigger.Once, checkpointLocation = checkpointDir.getAbsolutePath),
awaitTermination(),
CheckNewAnswer((15, 1), (25, 1)), // watermark = 40 is used to generate this
assertEventStats(min = 50, max = 50, avg = 50, wtrmark = 40))
}
test("append mode") {
val inputData = MemoryStream[Int]
val windowedAggregation = inputData.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
testStream(windowedAggregation)(
AddData(inputData, 10, 11, 12, 13, 14, 15),
CheckNewAnswer(),
AddData(inputData, 25), // Advance watermark to 15 seconds
CheckNewAnswer((10, 5)),
assertNumStateRows(2),
AddData(inputData, 10), // Should not emit anything as data less than watermark
CheckNewAnswer(),
assertNumStateRows(2)
)
}
test("update mode") {
val inputData = MemoryStream[Int]
spark.conf.set("spark.sql.shuffle.partitions", "10")
val windowedAggregation = inputData.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
testStream(windowedAggregation, OutputMode.Update)(
AddData(inputData, 10, 11, 12, 13, 14, 15),
CheckNewAnswer((10, 5), (15, 1)),
AddData(inputData, 25), // Advance watermark to 15 seconds
CheckNewAnswer((25, 1)),
assertNumStateRows(2),
AddData(inputData, 10, 25), // Ignore 10 as its less than watermark
CheckNewAnswer((25, 2)),
assertNumStateRows(2),
AddData(inputData, 10), // Should not emit anything as data less than watermark
CheckNewAnswer(),
assertNumStateRows(2)
)
}
test("delay in months and years handled correctly") {
val currentTimeMs = System.currentTimeMillis
val currentTime = new Date(currentTimeMs)
val input = MemoryStream[Long]
val aggWithWatermark = input.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "2 years 5 months")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
def monthsSinceEpoch(date: Date): Int = {
val cal = Calendar.getInstance()
cal.setTime(date)
cal.get(Calendar.YEAR) * 12 + cal.get(Calendar.MONTH)
}
testStream(aggWithWatermark)(
AddData(input, currentTimeMs / 1000),
CheckAnswer(),
AddData(input, currentTimeMs / 1000),
CheckAnswer(),
assertEventStats { e =>
assert(timestampFormat.parse(e.get("max")).getTime === (currentTimeMs / 1000) * 1000)
val watermarkTime = timestampFormat.parse(e.get("watermark"))
val monthDiff = monthsSinceEpoch(currentTime) - monthsSinceEpoch(watermarkTime)
// monthsSinceEpoch is like `math.floor(num)`, so monthDiff has two possible values.
assert(monthDiff === 29 || monthDiff === 30,
s"currentTime: $currentTime, watermarkTime: $watermarkTime")
}
)
}
test("recovery") {
val inputData = MemoryStream[Int]
val df = inputData.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
testStream(df)(
AddData(inputData, 10, 11, 12, 13, 14, 15),
CheckAnswer(),
AddData(inputData, 25), // Advance watermark to 15 seconds
CheckAnswer((10, 5)),
StopStream,
AssertOnQuery { q => // purge commit and clear the sink
val commit = q.commitLog.getLatest().map(_._1).getOrElse(-1L)
q.commitLog.purge(commit)
q.sink.asInstanceOf[MemorySink].clear()
true
},
StartStream(),
AddData(inputData, 10, 27, 30), // Advance watermark to 20 seconds, 10 should be ignored
CheckAnswer((15, 1)),
StopStream,
StartStream(),
AddData(inputData, 17), // Watermark should still be 20 seconds, 17 should be ignored
CheckAnswer((15, 1)),
AddData(inputData, 40), // Advance watermark to 30 seconds, emit first data 25
CheckNewAnswer((25, 2))
)
}
test("watermark with 2 streams") {
import org.apache.spark.sql.functions.sum
val first = MemoryStream[Int]
val firstDf = first.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
.select('value)
val second = MemoryStream[Int]
val secondDf = second.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "5 seconds")
.select('value)
withTempDir { checkpointDir =>
val unionWriter = firstDf.union(secondDf).agg(sum('value))
.writeStream
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.format("memory")
.outputMode("complete")
.queryName("test")
val union = unionWriter.start()
def getWatermarkAfterData(
firstData: Seq[Int] = Seq.empty,
secondData: Seq[Int] = Seq.empty,
query: StreamingQuery = union): Long = {
if (firstData.nonEmpty) first.addData(firstData)
if (secondData.nonEmpty) second.addData(secondData)
query.processAllAvailable()
// add a dummy batch so lastExecution has the new watermark
first.addData(0)
query.processAllAvailable()
// get last watermark
val lastExecution = query.asInstanceOf[StreamingQueryWrapper].streamingQuery.lastExecution
lastExecution.offsetSeqMetadata.batchWatermarkMs
}
// Global watermark starts at 0 until we get data from both sides
assert(getWatermarkAfterData(firstData = Seq(11)) == 0)
assert(getWatermarkAfterData(secondData = Seq(6)) == 1000)
// Global watermark stays at left watermark 1 when right watermark moves to 2
assert(getWatermarkAfterData(secondData = Seq(8)) == 1000)
// Global watermark switches to right side value 2 when left watermark goes higher
assert(getWatermarkAfterData(firstData = Seq(21)) == 3000)
// Global watermark goes back to left
assert(getWatermarkAfterData(secondData = Seq(17, 28, 39)) == 11000)
// Global watermark stays on left as long as it's below right
assert(getWatermarkAfterData(firstData = Seq(31)) == 21000)
assert(getWatermarkAfterData(firstData = Seq(41)) == 31000)
// Global watermark switches back to right again
assert(getWatermarkAfterData(firstData = Seq(51)) == 34000)
// Global watermark is updated correctly with simultaneous data from both sides
assert(getWatermarkAfterData(firstData = Seq(100), secondData = Seq(100)) == 90000)
assert(getWatermarkAfterData(firstData = Seq(120), secondData = Seq(110)) == 105000)
assert(getWatermarkAfterData(firstData = Seq(130), secondData = Seq(125)) == 120000)
// Global watermark doesn't decrement with simultaneous data
assert(getWatermarkAfterData(firstData = Seq(100), secondData = Seq(100)) == 120000)
assert(getWatermarkAfterData(firstData = Seq(140), secondData = Seq(100)) == 120000)
assert(getWatermarkAfterData(firstData = Seq(100), secondData = Seq(135)) == 130000)
// Global watermark recovers after restart, but left side watermark ahead of it does not.
assert(getWatermarkAfterData(firstData = Seq(200), secondData = Seq(190)) == 185000)
union.stop()
val union2 = unionWriter.start()
assert(getWatermarkAfterData(query = union2) == 185000)
// Even though the left side was ahead of 185000 in the last execution, the watermark won't
// increment until it gets past it in this execution.
assert(getWatermarkAfterData(secondData = Seq(200), query = union2) == 185000)
assert(getWatermarkAfterData(firstData = Seq(200), query = union2) == 190000)
}
}
test("complete mode") {
val inputData = MemoryStream[Int]
val windowedAggregation = inputData.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
// No eviction when asked to compute complete results.
testStream(windowedAggregation, OutputMode.Complete)(
AddData(inputData, 10, 11, 12),
CheckAnswer((10, 3)),
AddData(inputData, 25),
CheckAnswer((10, 3), (25, 1)),
AddData(inputData, 25),
CheckAnswer((10, 3), (25, 2)),
AddData(inputData, 10),
CheckAnswer((10, 4), (25, 2)),
AddData(inputData, 25),
CheckAnswer((10, 4), (25, 3))
)
}
test("group by on raw timestamp") {
val inputData = MemoryStream[Int]
val windowedAggregation = inputData.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
.groupBy($"eventTime")
.agg(count("*") as 'count)
.select($"eventTime".cast("long").as[Long], $"count".as[Long])
testStream(windowedAggregation)(
AddData(inputData, 10),
CheckAnswer(),
AddData(inputData, 25), // Advance watermark to 15 seconds
CheckAnswer((10, 1))
)
}
test("delay threshold should not be negative.") {
val inputData = MemoryStream[Int].toDF()
var e = intercept[IllegalArgumentException] {
inputData.withWatermark("value", "-1 year")
}
assert(e.getMessage contains "should not be negative.")
e = intercept[IllegalArgumentException] {
inputData.withWatermark("value", "1 year -13 months")
}
assert(e.getMessage contains "should not be negative.")
e = intercept[IllegalArgumentException] {
inputData.withWatermark("value", "1 month -40 days")
}
assert(e.getMessage contains "should not be negative.")
e = intercept[IllegalArgumentException] {
inputData.withWatermark("value", "-10 seconds")
}
assert(e.getMessage contains "should not be negative.")
}
test("the new watermark should override the old one") {
val df = MemoryStream[(Long, Long)].toDF()
.withColumn("first", $"_1".cast("timestamp"))
.withColumn("second", $"_2".cast("timestamp"))
.withWatermark("first", "1 minute")
.withWatermark("second", "2 minutes")
val eventTimeColumns = df.logicalPlan.output
.filter(_.metadata.contains(EventTimeWatermark.delayKey))
assert(eventTimeColumns.size === 1)
assert(eventTimeColumns(0).name === "second")
}
test("EventTime watermark should be ignored in batch query.") {
val df = testData
.withColumn("eventTime", $"key".cast("timestamp"))
.withWatermark("eventTime", "1 minute")
.select("eventTime")
.as[Long]
checkDataset[Long](df, 1L to 100L: _*)
}
test("SPARK-21565: watermark operator accepts attributes from replacement") {
withTempDir { dir =>
dir.delete()
val df = Seq(("a", 100.0, new java.sql.Timestamp(100L)))
.toDF("symbol", "price", "eventTime")
df.write.json(dir.getCanonicalPath)
val input = spark.readStream.schema(df.schema)
.json(dir.getCanonicalPath)
val groupEvents = input
.withWatermark("eventTime", "2 seconds")
.groupBy("symbol", "eventTime")
.agg(count("price") as 'count)
.select("symbol", "eventTime", "count")
val q = groupEvents.writeStream
.outputMode("append")
.format("console")
.start()
try {
q.processAllAvailable()
} finally {
q.stop()
}
}
}
test("test no-data flag") {
val flagKey = SQLConf.STREAMING_NO_DATA_MICRO_BATCHES_ENABLED.key
def testWithFlag(flag: Boolean): Unit = withClue(s"with $flagKey = $flag") {
val inputData = MemoryStream[Int]
val windowedAggregation = inputData.toDF()
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
.groupBy(window($"eventTime", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start").cast("long").as[Long], $"count".as[Long])
testStream(windowedAggregation)(
StartStream(additionalConfs = Map(flagKey -> flag.toString)),
AddData(inputData, 10, 11, 12, 13, 14, 15),
CheckNewAnswer(),
AddData(inputData, 25), // Advance watermark to 15 seconds
// Check if there is new answer if flag is set, no new answer otherwise
if (flag) CheckNewAnswer((10, 5)) else CheckNewAnswer()
)
}
testWithFlag(true)
testWithFlag(false)
}
test("MultipleWatermarkPolicy: max") {
val input1 = MemoryStream[Int]
val input2 = MemoryStream[Int]
withSQLConf(SQLConf.STREAMING_MULTIPLE_WATERMARK_POLICY.key -> "max") {
testStream(dfWithMultipleWatermarks(input1, input2))(
MultiAddData(input1, 20)(input2, 30),
CheckLastBatch(20, 30),
checkWatermark(input1, 15), // max(20 - 10, 30 - 15) = 15
StopStream,
StartStream(),
checkWatermark(input1, 15), // watermark recovered correctly
MultiAddData(input1, 120)(input2, 130),
CheckLastBatch(120, 130),
checkWatermark(input1, 115), // max(120 - 10, 130 - 15) = 115, policy recovered correctly
AddData(input1, 150),
CheckLastBatch(150),
checkWatermark(input1, 140) // should advance even if one of the input has data
)
}
}
test("MultipleWatermarkPolicy: min") {
val input1 = MemoryStream[Int]
val input2 = MemoryStream[Int]
withSQLConf(SQLConf.STREAMING_MULTIPLE_WATERMARK_POLICY.key -> "min") {
testStream(dfWithMultipleWatermarks(input1, input2))(
MultiAddData(input1, 20)(input2, 30),
CheckLastBatch(20, 30),
checkWatermark(input1, 10), // min(20 - 10, 30 - 15) = 10
StopStream,
StartStream(),
checkWatermark(input1, 10), // watermark recovered correctly
MultiAddData(input1, 120)(input2, 130),
CheckLastBatch(120, 130),
checkWatermark(input2, 110), // min(120 - 10, 130 - 15) = 110, policy recovered correctly
AddData(input2, 150),
CheckLastBatch(150),
checkWatermark(input2, 110) // does not advance when only one of the input has data
)
}
}
test("MultipleWatermarkPolicy: recovery from checkpoints ignores session conf") {
val input1 = MemoryStream[Int]
val input2 = MemoryStream[Int]
val checkpointDir = Utils.createTempDir().getCanonicalFile
withSQLConf(SQLConf.STREAMING_MULTIPLE_WATERMARK_POLICY.key -> "max") {
testStream(dfWithMultipleWatermarks(input1, input2))(
StartStream(checkpointLocation = checkpointDir.getAbsolutePath),
MultiAddData(input1, 20)(input2, 30),
CheckLastBatch(20, 30),
checkWatermark(input1, 15) // max(20 - 10, 30 - 15) = 15
)
}
withSQLConf(SQLConf.STREAMING_MULTIPLE_WATERMARK_POLICY.key -> "min") {
testStream(dfWithMultipleWatermarks(input1, input2))(
StartStream(checkpointLocation = checkpointDir.getAbsolutePath),
checkWatermark(input1, 15), // watermark recovered correctly
MultiAddData(input1, 120)(input2, 130),
CheckLastBatch(120, 130),
checkWatermark(input1, 115), // max(120 - 10, 130 - 15) = 115, policy recovered correctly
AddData(input1, 150),
CheckLastBatch(150),
checkWatermark(input1, 140) // should advance even if one of the input has data
)
}
}
test("MultipleWatermarkPolicy: recovery from Spark ver 2.3.1 checkpoints ensures min policy") {
val input1 = MemoryStream[Int]
val input2 = MemoryStream[Int]
val resourceUri = this.getClass.getResource(
"/structured-streaming/checkpoint-version-2.3.1-for-multi-watermark-policy/").toURI
val checkpointDir = Utils.createTempDir().getCanonicalFile
// Copy the checkpoint to a temp dir to prevent changes to the original.
// Not doing this will lead to the test passing on the first run, but fail subsequent runs.
FileUtils.copyDirectory(new File(resourceUri), checkpointDir)
input1.addData(20)
input2.addData(30)
input1.addData(10)
withSQLConf(SQLConf.STREAMING_MULTIPLE_WATERMARK_POLICY.key -> "max") {
testStream(dfWithMultipleWatermarks(input1, input2))(
StartStream(checkpointLocation = checkpointDir.getAbsolutePath),
Execute { _.processAllAvailable() },
MultiAddData(input1, 120)(input2, 130),
CheckLastBatch(120, 130),
checkWatermark(input2, 110), // should calculate 'min' even if session conf has 'max' policy
AddData(input2, 150),
CheckLastBatch(150),
checkWatermark(input2, 110)
)
}
}
test("MultipleWatermarkPolicy: fail on incorrect conf values") {
val invalidValues = Seq("", "random")
invalidValues.foreach { value =>
val e = intercept[IllegalArgumentException] {
spark.conf.set(SQLConf.STREAMING_MULTIPLE_WATERMARK_POLICY.key, value)
}
assert(e.getMessage.toLowerCase(Locale.ROOT).contains("valid values are 'min' and 'max'"))
}
}
private def dfWithMultipleWatermarks(
input1: MemoryStream[Int],
input2: MemoryStream[Int]): Dataset[_] = {
val df1 = input1.toDF
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "10 seconds")
val df2 = input2.toDF
.withColumn("eventTime", $"value".cast("timestamp"))
.withWatermark("eventTime", "15 seconds")
df1.union(df2).select($"eventTime".cast("int"))
}
private def checkWatermark(input: MemoryStream[Int], watermark: Long) = Execute { q =>
input.addData(1)
q.processAllAvailable()
assert(q.lastProgress.eventTime.get("watermark") == formatTimestamp(watermark))
}
private def assertNumStateRows(numTotalRows: Long): AssertOnQuery = AssertOnQuery { q =>
q.processAllAvailable()
val progressWithData = q.recentProgress.lastOption.get
assert(progressWithData.stateOperators(0).numRowsTotal === numTotalRows)
true
}
/** Assert event stats generated on that last batch with data in it */
private def assertEventStats(body: ju.Map[String, String] => Unit): AssertOnQuery = {
Execute("AssertEventStats") { q =>
body(q.recentProgress.filter(_.numInputRows > 0).lastOption.get.eventTime)
}
}
/** Assert event stats generated on that last batch with data in it */
private def assertEventStats(min: Long, max: Long, avg: Double, wtrmark: Long): AssertOnQuery = {
assertEventStats { e =>
assert(e.get("min") === formatTimestamp(min), s"min value mismatch")
assert(e.get("max") === formatTimestamp(max), s"max value mismatch")
assert(e.get("avg") === formatTimestamp(avg.toLong), s"avg value mismatch")
assert(e.get("watermark") === formatTimestamp(wtrmark), s"watermark value mismatch")
}
}
private val timestampFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'") // ISO8601
timestampFormat.setTimeZone(ju.TimeZone.getTimeZone("UTC"))
private def formatTimestamp(sec: Long): String = {
timestampFormat.format(new ju.Date(sec * 1000))
}
private def awaitTermination(): AssertOnQuery = Execute("AwaitTermination") { q =>
q.awaitTermination()
}
}
|
ahnqirage/spark
|
sql/core/src/test/scala/org/apache/spark/sql/streaming/EventTimeWatermarkSuite.scala
|
Scala
|
apache-2.0
| 29,617 |
package aima.core.environment.map2d
/**
* Provides a general interface for two dimensional maps.
*
* @author Shawn Garner
*/
trait Map2D {
/**
*
* @return a list of all locations in the map.
*/
def locations: List[String]
/**
* Answers to the question: Where can I get, following one of the
* connections starting at the specified location?
*
* @param fromLocation
* locations linked from.
* @return a list of the locations that are connected from the given
* location.
*/
def locationsLinkedTo(fromLocation: String): List[String]
/**
* Get the travel distance between the two specified locations if they are
* linked by a connection and null otherwise.
*
* @param fromLocation
* the starting from location.
* @param toLocation
* the to location.
* @return the travel distance between the two specified locations if they
* are linked by a connection and null otherwise.
*/
def distance(fromLocation: String, toLocation: String): Option[Distance]
/**
* Get the position of the specified location.
*
* @param location
* the location whose position is to be returned.
* @return the position of the specified location in the two dimensional
* space.
*/
def position(location: String): Option[Point2D]
}
final case class Point2D(x: Double, y: Double)
final case class Distance(value: Double) extends AnyVal
object Point2D {
def distance(p1: Point2D, p2: Point2D): Distance = {
val x_distance: Double = (p1.x - p2.x) * (p1.x - p2.x)
// Distance Between Y Coordinates
val y_distance: Double = (p1.y - p2.y) * (p1.y - p2.y)
// Distance Between 2d Points
val total_distance = math.sqrt(x_distance + y_distance)
Distance(total_distance)
}
}
import scala.collection.mutable
class ExtendableMap2D(
val links: LabeledGraph[String, Distance],
val locationPositions: mutable.LinkedHashMap[String, Point2D]
) extends Map2D {
def this() = this(new LabeledGraph[String, Distance], new mutable.LinkedHashMap[String, Point2D])
override def locations: List[String] = links.vertexLabels
override def locationsLinkedTo(fromLocation: String): List[String] = links.successors(fromLocation)
override def distance(fromLocation: String, toLocation: String): Option[Distance] =
links.get(fromLocation, toLocation)
override def position(location: String): Option[Point2D] = locationPositions.get(location)
def clear(): Unit = {
links.clear()
locationPositions.clear()
}
/**
* Add a one-way connection to the map.
*
* @param fromLocation
* the from location.
* @param toLocation
* the to location.
* @param distance
* the distance between the two given locations.
*/
def addUnidirectionalLink(fromLocation: String, toLocation: String, distance: Distance): Unit = {
links.set(fromLocation, toLocation, distance)
}
/**
* Adds a connection which can be traveled in both direction. Internally,
* such a connection is represented as two one-way connections.
*
* @param fromLocation
* the from location.
* @param toLocation
* the to location.
* @param distance
* the distance between the two given locations.
*/
def addBidirectionalLink(fromLocation: String, toLocation: String, distance: Distance): Unit = {
links.set(fromLocation, toLocation, distance)
links.set(toLocation, fromLocation, distance)
}
/**
* Defines the position of a location within the map. Using this method, one
* location should be selected as a reference position (<code>dist=0</code>
* and <code>dir=0</code>) and all the other locations should be placed
* relative to it.
*
* @param loc
* location name
* @param dist
* distance to a reference position
* @param dir
* bearing (compass direction) in which the location is seen from
* the reference position
*/
def setDistAndDirToRefLocation(loc: String, dist: Distance, dir: Int): Unit = {
val coords = Point2D(-math.sin(dir * math.Pi / 180.0) * dist.value, math.cos(dir * math.Pi / 180.0) * dist.value)
links.addVertex(loc)
locationPositions.put(loc, coords)
()
}
}
|
aimacode/aima-scala
|
core/src/main/scala/aima/core/environment/map2d/Map2D.scala
|
Scala
|
mit
| 4,267 |
object GenericTypedPlaceholder {
def foo[T](x: T => Int, y: T) = x(y)
foo[String](/*start*/_.length/*end*/, "")
}
//(String) => Int
|
ilinum/intellij-scala
|
testdata/typeInference/expected/placeholder/GenericTypedPlaceholder.scala
|
Scala
|
apache-2.0
| 136 |
package dazzle.waffle
import dazzle.waffle.adapter.Adapter
import java.nio.file.Path
import java.io.{File, ByteArrayInputStream, InputStream}
import scala.util.Try
/**
* Filesystem
*
* @param adapter an adapter
*/
class FileSystem(adapter: Adapter) {
/**
* Reads the content of the file
*
* @param key file path
* @return input stream
*/
def read(key: String): Try[InputStream] = adapter.read(key)
/**
* Writes the given content into the file
*
* @param key file path
* @param content the content
* @param length length of the content
*/
def write(key: String, content: InputStream, length: Long): Try[Long] = adapter.write(key, content, length)
/**
* Writes the given path(nio2) into the file
*
* @param key file path
* @param content the content
*/
def write(key: String, content: Path): Try[Long] = adapter.write(key, content)
/**
* Writes the given file(io) into the file
*
* @param key file path
* @param content the content
*/
def write(key: String, content: File): Try[Long] = adapter.write(key, content.toPath)
/**
* Writes the given string into the file
*
* @param key file path
* @param content the content
*/
def write(key: String, content: String): Try[Long] = {
adapter.write(key, new ByteArrayInputStream(content.getBytes("utf-8")), content.length)
}
/**
* Deletes the file
*
* @param key file path
*/
def delete(key: String): Try[Unit] = adapter.delete(key)
/**
* Renames the file
*
* @param sourceKey source file path
* @param targetKey target file path
*/
def rename(sourceKey: String, targetKey: String): Try[Unit] = adapter.move(sourceKey, targetKey)
/**
* Moves the file
*
* @param sourceKey source file path
* @param targetKey target file path
*/
def move(sourceKey: String, targetKey: String): Try[Unit] = adapter.move(sourceKey, targetKey)
/**
* Gets last modified time
*
* @param key file path
* @return last modified time
*/
def mtime(key: String): Try[Long] = adapter.mtime(key)
/**
* Indicates whether the file exists
*
* @param key file path
* @return boolean
*/
def exists(key: String): Boolean = adapter.exists(key)
}
|
dazzle-lab/waffle
|
src/main/scala/dazzle/waffle/FileSystem.scala
|
Scala
|
mit
| 2,259 |
package scala.dao
import model.dao.ProductDAO
import org.scalatest.BeforeAndAfterEach
import org.scalatestplus.play.{OneAppPerTest, PlaySpec}
import play.api.db.DBApi
import play.api.db.evolutions.Evolutions
import play.api.inject.guice.GuiceApplicationBuilder
import scala.reflect.ClassTag
/**
* Created by lukasz on 13.11.16.
*/
class ProductDaoTest extends PlaySpec with OneAppPerTest with BeforeAndAfterEach {
val productRepo = Injector.inject[ProductDAO]
/* override def afterEach(): Unit = EvolutionHelper.clean()
"An item " should {
"be inserted during the first test case" in new WithApplication(FakeApplication()) {
val action = productRepo.insert(new Product(1, "A", "B"))
.flatMap(_ => productRepo.findAll())
val result = Await.result(action, Duration.Inf)
result mustBe List(Product(1, "A", "B"))
}
}*/
}
object Injector {
lazy val injector = (new GuiceApplicationBuilder).injector()
def inject[T: ClassTag]: T = injector.instanceOf[T]
}
object EvolutionHelper {
def clean() = {
val dbApi = Injector.inject[DBApi]
Evolutions.cleanupEvolutions(dbApi.database("default"))
}
}
|
lszku/ProductDatabase
|
test/scala/dao/ProductDaoTest.scala
|
Scala
|
bsd-3-clause
| 1,178 |
package com.sksamuel.elastic4s.searches.queries.geo
import com.sksamuel.elastic4s.searches.GeoPoint
import com.sksamuel.elastic4s.searches.queries.Query
import com.sksamuel.exts.OptionImplicits._
case class GeoPolygonQuery(field: String,
points: Seq[GeoPoint],
ignoreUnmapped: Option[Boolean] = None,
validationMethod: Option[GeoValidationMethod] = None,
boost: Option[Double] = None,
queryName: Option[String] = None)
extends Query {
def ignoreUnmapped(ignoreUnmapped: Boolean): GeoPolygonQuery = copy(ignoreUnmapped = ignoreUnmapped.some)
def validationMethod(method: String): GeoPolygonQuery =
validationMethod(GeoValidationMethod.valueOf(method))
def validationMethod(method: GeoValidationMethod): GeoPolygonQuery = copy(validationMethod = method.some)
def boost(boost: Double): GeoPolygonQuery = copy(boost = Option(boost))
def queryName(queryName: String): GeoPolygonQuery = copy(queryName = Some(queryName))
}
|
Tecsisa/elastic4s
|
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/searches/queries/geo/GeoPolygonQuery.scala
|
Scala
|
apache-2.0
| 1,083 |
package liang.don.dzviewer.tile
/**
* Stores information regarding the original image size.
*
* @constructor Create a new image size setting with a width and height value.
* @param width The image's width value.
* @param height The image's height value.
*
* @author Don Liang
* @Version 0.1, 14/09/2011
*/
class ImageSize(val width: Int, val height: Int) { }
|
dl2k84/DeepZoomViewer
|
src/liang/don/dzviewer/tile/ImageSize.scala
|
Scala
|
mit
| 369 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api
import _root_.java.util.{HashMap => JHashMap}
import _root_.java.util.{Map => JMap}
import _root_.java.sql.{Date, Time, Timestamp}
import org.apache.flink.api.common.typeinfo.{BasicTypeInfo, SqlTimeTypeInfo, TypeInformation}
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.table.api.internal.TableEnvironmentInternal
import org.apache.flink.table.api.scala._
import org.apache.flink.table.descriptors.{ConnectorDescriptor, Schema}
import org.apache.flink.table.descriptors.ConnectorDescriptorValidator.CONNECTOR
import org.apache.flink.table.expressions.utils._
import org.apache.flink.table.runtime.utils.CommonTestData
import org.apache.flink.table.sources.{CsvTableSource, TableSource}
import org.apache.flink.table.utils.TableTestUtil._
import org.apache.flink.table.utils.{TableTestBase, TestFilterableTableSource}
import org.apache.flink.types.Row
import org.junit.{Assert, Test}
class TableSourceTest extends TableTestBase {
private val projectedFields: Array[String] = Array("last", "id", "score")
private val noCalcFields: Array[String] = Array("id", "score", "first")
@Test
def testTableSourceScanToString(): Unit = {
val (tableSource1, _) = filterableTableSource
val (tableSource2, _) = filterableTableSource
val util = batchTestUtil()
val tableEnv = util.tableEnv
tableEnv.asInstanceOf[TableEnvironmentInternal]
.registerTableSourceInternal("table1", tableSource1)
tableEnv.asInstanceOf[TableEnvironmentInternal]
.registerTableSourceInternal("table2", tableSource2)
val table1 = tableEnv.scan("table1").where($"amount" > 2)
val table2 = tableEnv.scan("table2").where($"amount" > 2)
val result = table1.unionAll(table2)
val expected = binaryNode(
"DataSetUnion",
batchFilterableSourceTableNode(
"table1",
Array("name", "id", "amount", "price"),
isPushedDown = true,
"'amount > 2"),
batchFilterableSourceTableNode(
"table2",
Array("name", "id", "amount", "price"),
isPushedDown = true,
"'amount > 2"),
term("all", "true"),
term("union", "name, id, amount, price")
)
util.verifyTable(result, expected)
}
// batch plan
@Test
def testBatchProjectableSourceScanPlanTableApi(): Unit = {
val (tableSource, tableName) = csvTable
val util = batchTestUtil()
val tableEnv = util.tableEnv
tableEnv.asInstanceOf[TableEnvironmentInternal]
.registerTableSourceInternal(tableName, tableSource)
val result = tableEnv
.scan(tableName)
.select('last.upperCase(), 'id.floor(), 'score * 2)
val expected = unaryNode(
"DataSetCalc",
batchSourceTableNode(tableName, projectedFields),
term("select", "UPPER(last) AS _c0", "FLOOR(id) AS _c1", "*(score, 2) AS _c2")
)
util.verifyTable(result, expected)
}
@Test
def testBatchProjectableSourceScanPlanSQL(): Unit = {
val (tableSource, tableName) = csvTable
val util = batchTestUtil()
util.tableEnv.asInstanceOf[TableEnvironmentInternal]
.registerTableSourceInternal(tableName, tableSource)
val sqlQuery = s"SELECT `last`, floor(id), score * 2 FROM $tableName"
val expected = unaryNode(
"DataSetCalc",
batchSourceTableNode(tableName, projectedFields),
term("select", "last", "FLOOR(id) AS EXPR$1", "*(score, 2) AS EXPR$2")
)
util.verifySql(sqlQuery, expected)
}
@Test
def testBatchProjectableSourceScanNoIdentityCalc(): Unit = {
val (tableSource, tableName) = csvTable
val util = batchTestUtil()
val tableEnv = util.tableEnv
tableEnv.asInstanceOf[TableEnvironmentInternal]
.registerTableSourceInternal(tableName, tableSource)
val result = tableEnv
.scan(tableName)
.select('id, 'score, 'first)
val expected = batchSourceTableNode(tableName, noCalcFields)
util.verifyTable(result, expected)
}
@Test
def testBatchProjectableSourceFullProjection(): Unit = {
val (tableSource, tableName) = csvTable
val util = batchTestUtil()
val tableEnv = util.tableEnv
tableEnv.asInstanceOf[TableEnvironmentInternal]
.registerTableSourceInternal(tableName, tableSource)
val result = tableEnv
.scan(tableName)
.select(1)
val expected = unaryNode(
"DataSetCalc",
s"BatchTableSourceScan(table=[[default_catalog, default_database, $tableName]], " +
s"fields=[], " +
s"source=[CsvTableSource(read fields: )])",
term("select", "1 AS _c0")
)
util.verifyTable(result, expected)
}
@Test
def testBatchFilterableWithoutPushDown(): Unit = {
val (tableSource, tableName) = filterableTableSource
val util = batchTestUtil()
val tableEnv = util.tableEnv
tableEnv.asInstanceOf[TableEnvironmentInternal]
.registerTableSourceInternal(tableName, tableSource)
val result = tableEnv
.scan(tableName)
.select('price, 'id, 'amount)
.where($"price" * 2 < 32)
val expected = unaryNode(
"DataSetCalc",
batchFilterableSourceTableNode(
tableName,
Array("price", "id", "amount"),
isPushedDown = true,
""),
term("select", "price", "id", "amount"),
term("where", "<(*(price, 2), 32)")
)
util.verifyTable(result, expected)
}
@Test
def testBatchFilterablePartialPushDown(): Unit = {
val (tableSource, tableName) = filterableTableSource
val util = batchTestUtil()
val tableEnv = util.tableEnv
tableEnv.asInstanceOf[TableEnvironmentInternal]
.registerTableSourceInternal(tableName, tableSource)
val result = tableEnv
.scan(tableName)
.where($"amount" > 2 && $"price" * 2 < 32)
.select('price, 'name.lowerCase(), 'amount)
val expected = unaryNode(
"DataSetCalc",
batchFilterableSourceTableNode(
tableName,
Array("price", "name", "amount"),
isPushedDown = true,
"'amount > 2"),
term("select", "price", "LOWER(name) AS _c1", "amount"),
term("where", "<(*(price, 2), 32)")
)
util.verifyTable(result, expected)
}
@Test
def testBatchFilterableFullyPushedDown(): Unit = {
val (tableSource, tableName) = filterableTableSource
val util = batchTestUtil()
val tableEnv = util.tableEnv
tableEnv.asInstanceOf[TableEnvironmentInternal]
.registerTableSourceInternal(tableName, tableSource)
val result = tableEnv
.scan(tableName)
.select('price, 'id, 'amount)
.where($"amount" > 2 && $"amount" < 32)
val expected = batchFilterableSourceTableNode(
tableName,
Array("price", "id", "amount"),
isPushedDown = true,
"'amount > 2 && 'amount < 32")
util.verifyTable(result, expected)
}
@Test
def testBatchFilterableWithUnconvertedExpression(): Unit = {
val (tableSource, tableName) = filterableTableSource
val util = batchTestUtil()
val tableEnv = util.tableEnv
tableEnv.asInstanceOf[TableEnvironmentInternal]
.registerTableSourceInternal(tableName, tableSource)
val result = tableEnv
.scan(tableName)
.select('price, 'id, 'amount)
.where($"amount" > 2 && $"id" < 1.2 &&
($"amount" < 32 || $"amount".cast(Types.LONG) > 10)) // cast can not be converted
val expected = unaryNode(
"DataSetCalc",
batchFilterableSourceTableNode(
tableName,
Array("price", "id", "amount"),
isPushedDown = true,
"'amount > 2"),
term("select", "price", "id", "amount"),
term("where", "AND(<(id, 1.2E0:DOUBLE), OR(<(amount, 32), >(CAST(amount), 10)))")
)
util.verifyTable(result, expected)
}
@Test
def testBatchFilterableWithUDF(): Unit = {
val (tableSource, tableName) = filterableTableSource
val util = batchTestUtil()
val tableEnv = util.tableEnv
tableEnv.asInstanceOf[TableEnvironmentInternal]
.registerTableSourceInternal(tableName, tableSource)
val func = Func0
tableEnv.registerFunction("func0", func)
val result = tableEnv
.scan(tableName)
.select('price, 'id, 'amount)
.where($"amount" > 2 && call("func0", $"amount") < 32)
val expected = unaryNode(
"DataSetCalc",
batchFilterableSourceTableNode(
tableName,
Array("price", "id", "amount"),
isPushedDown = true,
"'amount > 2"),
term("select", "price", "id", "amount"),
term("where", s"<(${Func0.getClass.getSimpleName}(amount), 32)")
)
util.verifyTable(result, expected)
}
// stream plan
@Test
def testStreamProjectableSourceScanPlanTableApi(): Unit = {
val (tableSource, tableName) = csvTable
val util = streamTestUtil()
val tableEnv = util.tableEnv
tableEnv.asInstanceOf[TableEnvironmentInternal]
.registerTableSourceInternal(tableName, tableSource)
val result = tableEnv
.scan(tableName)
.select('last, 'id.floor(), 'score * 2)
val expected = unaryNode(
"DataStreamCalc",
streamSourceTableNode(tableName, projectedFields),
term("select", "last", "FLOOR(id) AS _c1", "*(score, 2) AS _c2")
)
util.verifyTable(result, expected)
}
@Test
def testStreamProjectableSourceScanPlanSQL(): Unit = {
val (tableSource, tableName) = csvTable
val util = streamTestUtil()
util.tableEnv.asInstanceOf[TableEnvironmentInternal]
.registerTableSourceInternal(tableName, tableSource)
val sqlQuery = s"SELECT `last`, floor(id), score * 2 FROM $tableName"
val expected = unaryNode(
"DataStreamCalc",
streamSourceTableNode(tableName, projectedFields),
term("select", "last", "FLOOR(id) AS EXPR$1", "*(score, 2) AS EXPR$2")
)
util.verifySql(sqlQuery, expected)
}
@Test
def testStreamProjectableSourceScanNoIdentityCalc(): Unit = {
val (tableSource, tableName) = csvTable
val util = streamTestUtil()
val tableEnv = util.tableEnv
tableEnv.asInstanceOf[TableEnvironmentInternal]
.registerTableSourceInternal(tableName, tableSource)
val result = tableEnv
.scan(tableName)
.select('id, 'score, 'first)
val expected = streamSourceTableNode(tableName, noCalcFields)
util.verifyTable(result, expected)
}
@Test
def testStreamFilterableSourceScanPlanTableApi(): Unit = {
val (tableSource, tableName) = filterableTableSource
val util = streamTestUtil()
val tableEnv = util.tableEnv
tableEnv.asInstanceOf[TableEnvironmentInternal]
.registerTableSourceInternal(tableName, tableSource)
val result = tableEnv
.scan(tableName)
.select('price, 'id, 'amount)
.where($"amount" > 2 && $"price" * 2 < 32)
val expected = unaryNode(
"DataStreamCalc",
streamFilterableSourceTableNode(
tableName,
Array("price", "id", "amount"),
isPushedDown = true,
"'amount > 2"),
term("select", "price", "id", "amount"),
term("where", "<(*(price, 2), 32)")
)
util.verifyTable(result, expected)
}
@Test
def testConnectToTableWithProperties(): Unit = {
val util = streamTestUtil()
val tableEnv = util.tableEnv
val path = "cat.db.tab1"
tableEnv.connect(new ConnectorDescriptor("COLLECTION", 1, false) {
override protected def toConnectorProperties: JMap[String, String] = {
val context = new JHashMap[String, String]()
context.put(CONNECTOR, "COLLECTION")
context
}
}).withSchema(
new Schema()
.schema(TableSchema.builder()
.field("id", DataTypes.INT())
.field("name", DataTypes.STRING())
.build())
).createTemporaryTable(path)
val result = tableEnv.from(path)
val expected = "StreamTableSourceScan(table=[[cat, db, tab1]], fields=[id, name], " +
"source=[CollectionTableSource(id, name)])"
util.verifyTable(result, expected)
}
// csv builder
@Test
def testCsvTableSourceBuilder(): Unit = {
val source1 = CsvTableSource.builder()
.path("/path/to/csv")
.field("myfield", Types.STRING)
.field("myfield2", Types.INT)
.quoteCharacter(';')
.fieldDelimiter("#")
.lineDelimiter("\\r\\n")
.commentPrefix("%%")
.ignoreFirstLine()
.ignoreParseErrors()
.build()
val source2 = new CsvTableSource(
"/path/to/csv",
Array("myfield", "myfield2"),
Array(Types.STRING, Types.INT),
"#",
"\\r\\n",
';',
true,
"%%",
true)
Assert.assertEquals(source1, source2)
}
// TODO enable this test once we expose the feature through the table environment
// @Test
// def testCsvTableSourceDescriptor(): Unit = {
// val util = streamTestUtil()
// val source1 = util.tableEnv
// .from(
// FileSystem()
// .path("/path/to/csv"))
// .withFormat(
// Csv()
// .field("myfield", Types.STRING)
// .field("myfield2", Types.INT)
// .quoteCharacter(';')
// .fieldDelimiter("#")
// .lineDelimiter("\\r\\n")
// .commentPrefix("%%")
// .ignoreFirstLine()
// .ignoreParseErrors())
// .withSchema(
// Schema()
// .field("myfield", Types.STRING)
// .field("myfield2", Types.INT))
// .toTableSource
//
// val source2 = new CsvTableSource(
// "/path/to/csv",
// Array("myfield", "myfield2"),
// Array(Types.STRING, Types.INT),
// "#",
// "\\r\\n",
// ';',
// true,
// "%%",
// true)
//
// Assert.assertEquals(source1, source2)
// }
@Test
def testTimeLiteralExpressionPushdown(): Unit = {
val (tableSource, tableName) = filterableTableSourceTimeTypes
val util = batchTestUtil()
val tableEnv = util.tableEnv
tableEnv.asInstanceOf[TableEnvironmentInternal]
.registerTableSourceInternal(tableName, tableSource)
val sqlQuery =
s"""
|SELECT id from $tableName
|WHERE
| tv > TIME '14:25:02' AND
| dv > DATE '2017-02-03' AND
| tsv > TIMESTAMP '2017-02-03 14:25:02.000'
""".stripMargin
val result = tableEnv.sqlQuery(sqlQuery)
val expectedFilter =
"'tv > 14:25:02.toTime && " +
"'dv > 2017-02-03.toDate && " +
"'tsv > 2017-02-03 14:25:02.0.toTimestamp"
val expected = batchFilterableSourceTableNode(
tableName,
Array("id"),
isPushedDown = true,
expectedFilter
)
util.verifyTable(result, expected)
}
// utils
def filterableTableSource:(TableSource[_], String) = {
val tableSource = TestFilterableTableSource()
(tableSource, "filterableTable")
}
def filterableTableSourceTimeTypes:(TableSource[_], String) = {
val rowTypeInfo = new RowTypeInfo(
Array[TypeInformation[_]](
BasicTypeInfo.INT_TYPE_INFO,
SqlTimeTypeInfo.DATE,
SqlTimeTypeInfo.TIME,
SqlTimeTypeInfo.TIMESTAMP
),
Array("id", "dv", "tv", "tsv")
)
val row = new Row(4)
row.setField(0, 1)
row.setField(1, Date.valueOf("2017-01-23"))
row.setField(2, Time.valueOf("14:23:02"))
row.setField(3, Timestamp.valueOf("2017-01-24 12:45:01.234"))
val tableSource = TestFilterableTableSource(rowTypeInfo, Seq(row), Set("dv", "tv", "tsv"))
(tableSource, "filterableTable")
}
def csvTable: (CsvTableSource, String) = {
val csvTable = CommonTestData.getCsvTableSource
val tableName = "csvTable"
(csvTable, tableName)
}
def batchSourceTableNode(sourceName: String, fields: Array[String]): String = {
s"BatchTableSourceScan(table=[[default_catalog, default_database, $sourceName]], " +
s"fields=[${fields.mkString(", ")}], " +
s"source=[CsvTableSource(read fields: ${fields.mkString(", ")})])"
}
def streamSourceTableNode(sourceName: String, fields: Array[String] ): String = {
s"StreamTableSourceScan(table=[[default_catalog, default_database, $sourceName]], " +
s"fields=[${fields.mkString(", ")}], " +
s"source=[CsvTableSource(read fields: ${fields.mkString(", ")})])"
}
def batchFilterableSourceTableNode(
sourceName: String,
fields: Array[String],
isPushedDown: Boolean,
exp: String)
: String = {
"BatchTableSourceScan(" +
s"table=[[default_catalog, default_database, $sourceName]], fields=[${
fields
.mkString(", ")
}], source=[filterPushedDown=[$isPushedDown], filter=[$exp]])"
}
def streamFilterableSourceTableNode(
sourceName: String,
fields: Array[String],
isPushedDown: Boolean,
exp: String)
: String = {
"StreamTableSourceScan(" +
s"table=[[default_catalog, default_database, $sourceName]], fields=[${
fields
.mkString(", ")
}], source=[filterPushedDown=[$isPushedDown], filter=[$exp]])"
}
}
|
hequn8128/flink
|
flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/api/TableSourceTest.scala
|
Scala
|
apache-2.0
| 17,733 |
package org.jetbrains.plugins.scala
package codeInsight.unwrap
import java.util
import com.intellij.codeInsight.CodeInsightBundle
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.extensions.childOf
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScIfStmt
/**
* Nikolay.Tropin
* 2014-06-27
*/
class ScalaIfUnwrapper extends ScalaUnwrapper {
override def isApplicableTo(e: PsiElement): Boolean = e.getParent match {
case (_: ScIfStmt) childOf (_: ScIfStmt) => false
case ScIfStmt(_, Some(`e`), _) => true
case _ => false
}
override def doUnwrap(element: PsiElement, context: ScalaUnwrapContext): Unit = element.getParent match {
case ifSt @ ScIfStmt(_, Some(thenBr), _) =>
context.extractBlockOrSingleStatement(thenBr, ifSt)
context.delete(ifSt)
case _ =>
}
override def collectAffectedElements(e: PsiElement, toExtract: util.List[PsiElement]): PsiElement = e.getParent match {
case ifSt @ ScIfStmt(_, Some(`e`), _) =>
super.collectAffectedElements(e, toExtract)
ifSt
case _ => e
}
override def getDescription(e: PsiElement): String = CodeInsightBundle.message("unwrap.if")
}
|
ilinum/intellij-scala
|
src/org/jetbrains/plugins/scala/codeInsight/unwrap/ScalaIfUnwrapper.scala
|
Scala
|
apache-2.0
| 1,175 |
package controllers
import com.gu.googleauth.UserIdentity
import play.api.mvc.Security.AuthenticatedRequest
object Auth {
type AuthRequest[A] = AuthenticatedRequest[A, UserIdentity]
}
|
ovotech/comms-audit-log
|
app/controllers/Auth.scala
|
Scala
|
mit
| 190 |
package controllers
import play.api._
import play.api.mvc._
import play.api.libs.json._
import play.api.libs.iteratee._
import models._
import akka.actor._
import scala.concurrent.duration._
class Application extends Controller {
/**
* Just display the home page.
*/
def index = Action { implicit request =>
Ok(views.html.index())
}
/**
* Display the chat room page.
*/
def chatRoom(username: Option[String]) = Action { implicit request =>
username.filterNot(_.isEmpty).map { username =>
Ok(views.html.chatRoom(username))
}.getOrElse {
Redirect(routes.Application.index).flashing(
"error" -> "Please choose a valid username."
)
}
}
def chatRoomJs(username: String) = Action { implicit request =>
Ok(views.js.chatRoom(username))
}
/**
* Handles the chat websocket.
*/
def chat(username: String) = WebSocket.tryAccept[JsValue] { request =>
ChatRoom.join(username)
}
}
|
play2-maven-plugin/play2-maven-test-projects
|
play24/scala/websocket-chat/app/controllers/Application.scala
|
Scala
|
apache-2.0
| 981 |
package org.talkingpuffin.ui
import _root_.scala.swing.{ListView, Button, GridBagPanel, ScrollPane}
import org.talkingpuffin.filter.FilterSet
import javax.swing.event.{TableModelEvent, TableModelListener}
import javax.swing.BorderFactory
import java.awt.Dimension
import _root_.scala.swing.event.ButtonClicked
import _root_.scala.swing.GridBagPanel._
import twitter4j.User
/**
* A panel for unmuting any muted users
*/
class UnmutePane(title: String, tableModel: StatusTableModel, filterSet: FilterSet,
mutedList: scala.collection.mutable.Map[Long,User], unMute: (List[Long]) => Unit)
extends GridBagPanel with TableModelListener {
border = BorderFactory.createTitledBorder(title)
val view = new ListView(mutedList.values.toList)
add(new ScrollPane(view) {
val dim = new Dimension(150,130); preferredSize=dim; minimumSize=dim
}, new Constraints {grid=(0,0); anchor=Anchor.West; fill=Fill.Both; weighty=1})
tableModel.addTableModelListener(this)
def tableChanged(e: TableModelEvent) = view.listData = mutedList.values.toList
val removeButton = new Button("Remove")
add(removeButton, new Constraints {grid=(0,1)})
listenTo(removeButton)
reactions += {
case ButtonClicked(b) => unMute(view.selection.items.toList.map(_.getId.toLong))
}
}
|
dcbriccetti/talking-puffin
|
desktop/src/main/scala/org/talkingpuffin/ui/UnmutePane.scala
|
Scala
|
mit
| 1,286 |
package ro.redeul.katas.algos
import org.scalatest.{GivenWhenThen, Matchers, FeatureSpec}
class IslandProfileTest extends FeatureSpec with GivenWhenThen with Matchers {
scenario("Complicated") {
Given("simple tree")
val profile = Seq((0, 0), (1, 7), (2, 5), (3, 10), (4, 2), (5, 3), (6, 1), (7, 6), (8, 4), (9, 11), (10, 0))
Then("the value should be")
IslandProfile.waterVolume(profile) should be (34.971428f +- 0.000001f)
}
scenario("Original problem") {
Given("a profile")
val island = Seq((0, 0), (1, 10), (3, 1), (5, 5), (6, 20), (7, 0))
Then("the volume should be 25.5")
IslandProfile.waterVolume(island) should be (23.833332f +- 0.000001f)
}
scenario("Hill type island") {
Given("a hill like island")
val island = Seq((0, 0), (2, 10), (5, 0))
Then("the water volume should be 0")
IslandProfile.waterVolume(island) should be(0.0f)
}
scenario("One puddle island") {
Given("a one puddle island")
val island = Seq((0, 0), (1, 5), (2, 10), (3, 5), (4, 10), (5, 0))
Then("the water volume should be 0")
IslandProfile.waterVolume(island) should be(5.0f)
}
scenario("One puddle island with higher peak to the left") {
Given("a one puddle island")
val island = Seq((0, 0), (1, 5), (3, 15), (5, 5), (6, 10), (7, 0))
Then("the water volume should be 0")
IslandProfile.waterVolume(island) should be(5.0f)
}
scenario("One puddle island with higher peak to the right") {
Given("a one puddle island")
val island = Seq((0, 0), (1, 5), (2, 10), (3, 5), (5, 15), (7, 0))
Then("the water volume should be 0")
IslandProfile.waterVolume(island) should be(5.0f)
}
scenario("Two puddle island") {
Given("a two puddles island")
val island = Seq((0, 0), (1, 5), (2, 10), (3, 5), (4, 10), (5, 8), (6, 9), (7, 0))
Then("the water volume should be 0")
IslandProfile.waterVolume(island) should be(5.75f)
}
}
|
mtoader/katas
|
algos/src/test/scala/ro/redeul/katas/algos/IslandProfileTest.scala
|
Scala
|
mit
| 1,943 |
package scala.slick
/** JDBC-related code, including all facilities for <em>Plain SQL</em> queries
* and JDBC-specific driver components. */
package object jdbc
|
dvinokurov/slick
|
src/main/scala/scala/slick/jdbc/package.scala
|
Scala
|
bsd-2-clause
| 163 |
class DefaultCredentialsProvider extends CredentialsProvider {
private val provider = new com.amazonaws.auth.DefaultAWSCredentialsProviderChain
override def getCredentials: Credentials = {
provider.getCredentials match {
case sc: com.amazonaws.auth.AWSSessionCredentials => Credentials(sc.getAWSAccessKeyId, sc.getAWSSecretKey, sc.getSessionToken)
case c => Credentials(c.getAWSAccessKeyId, c.getAWSSecretKey)
}
}
override def refresh: Unit = provider.refresh
}
object DefaultCredentialsProvider {
def apply(): DefaultCredentialsProvider =
new DefaultCredentialsProvider
}
|
hirokikonishi/awscala
|
aws/core/src/main/scala/DefaultCredentialsProvider.scala
|
Scala
|
apache-2.0
| 608 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.tools
import java.util.Properties
import kafka.consumer.ConsumerTimeoutException
import kafka.integration.KafkaServerTestHarness
import kafka.server.KafkaConfig
import kafka.tools.MirrorMaker.{MirrorMakerNewConsumer, MirrorMakerProducer}
import kafka.utils.TestUtils
import org.apache.kafka.clients.consumer.{ConsumerConfig, KafkaConsumer}
import org.apache.kafka.clients.producer.{ProducerConfig, ProducerRecord}
import org.apache.kafka.common.serialization.{ByteArrayDeserializer, ByteArraySerializer}
import org.junit.Test
class MirrorMakerIntegrationTest extends KafkaServerTestHarness {
override def generateConfigs(): Seq[KafkaConfig] = TestUtils.createBrokerConfigs(1, zkConnect)
.map(KafkaConfig.fromProps(_, new Properties()))
@Test
def testCommaSeparatedRegex(): Unit = {
val topic = "new-topic"
val msg = "a test message"
val brokerList = TestUtils.getBrokerListStrFromServers(servers)
val producerProps = new Properties
producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList)
producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[ByteArraySerializer])
producerProps.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, classOf[ByteArraySerializer])
val producer = new MirrorMakerProducer(true, producerProps)
MirrorMaker.producer = producer
MirrorMaker.producer.send(new ProducerRecord(topic, msg.getBytes()))
MirrorMaker.producer.close()
val consumerProps = new Properties
consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, "test-group")
consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList)
val consumer = new KafkaConsumer(consumerProps, new ByteArrayDeserializer, new ByteArrayDeserializer)
val mirrorMakerConsumer = new MirrorMakerNewConsumer(consumer, None, whitelistOpt = Some("another_topic,new.*,foo"))
mirrorMakerConsumer.init()
try {
TestUtils.waitUntilTrue(() => {
try {
val data = mirrorMakerConsumer.receive()
data.topic == topic && new String(data.value) == msg
} catch {
// this exception is thrown if no record is returned within a short timeout, so safe to ignore
case _: ConsumerTimeoutException => false
}
}, "MirrorMaker consumer should read the expected message from the expected topic within the timeout")
} finally consumer.close()
}
}
|
wangcy6/storm_app
|
frame/kafka-0.11.0/kafka-0.11.0.1-src/core/src/test/scala/integration/kafka/tools/MirrorMakerIntegrationTest.scala
|
Scala
|
apache-2.0
| 3,292 |
/*
* Copyright (c) 2016, Innoave.com
* All rights reserved.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL INNOAVE.COM OR ITS CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
* AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.innoave.abacus.domain.model
import scalafx.util.Duration
trait Parameter {
def radix: Int
def beadHeight: Int
def beadWidth: Int
def rodDiameter: Int
def rodLengthAugment: Int
def beadMovingDuration: Duration
}
trait DefaultParameter extends Parameter {
override val radix = 10
override val beadHeight = 32
override val beadWidth = 64
override val rodDiameter = 10
override val rodLengthAugment = 1
override val beadMovingDuration: Duration = Duration(200)
}
object DefaultParameter extends DefaultParameter
|
haraldmaida/AbacusSFX
|
src/main/scala/com/innoave/abacus/domain/model/Parameter.scala
|
Scala
|
apache-2.0
| 1,409 |
package com.github.kmizu.scomb
import org.scalatest.{DiagrammedAssertions, FunSpec}
class JsonSpec extends FunSpec with DiagrammedAssertions {
sealed abstract class JValue
case class JObject(properties: (String, JValue)*) extends JValue
case class JArray(elements: JValue*) extends JValue
case class JString(value: String) extends JValue
case class JNumber(value: Double) extends JValue
case class JBoolean(value: Boolean) extends JValue
case object JNull extends JValue
object JsonParser extends SCombinator {
def root: Parser[JValue] = for{
_ <- DefaultSpace.*
v <- jvalue
} yield v
def escape(ch: Char): Char = ch match {
case ' ' => ' '
case 't' => '\\t'
case 'f' => '\\f'
case 'b' => '\\b'
case 'r' => '\\r'
case 'n' => '\\n'
case '\\\\' => '\\\\'
case '"' => '"'
case '\\'' => '\\''
case otherwise => otherwise
}
lazy val LBRACKET = defaultToken("[")
lazy val RBRACKET = defaultToken("]")
lazy val LBRACE = defaultToken("{")
lazy val RBRACE = defaultToken("}")
lazy val COLON = defaultToken(":")
lazy val COMMA = defaultToken(",")
lazy val TRUE = defaultToken("true")
lazy val FALSE = defaultToken("false")
lazy val NULL = defaultToken("null")
lazy val jvalue: P[JValue] = rule(jobject | jarray | jstring | jnumber | jboolean | jnull)
lazy val jobject: P[JValue] = rule{for {
_ <- LBRACE
properties <- pair.repeat0By(COMMA)
_ <- RBRACE.l("RBRACE")
} yield JObject(properties:_*)}
lazy val pair: P[(String, JValue)] = rule{for {
key <- string
_ <- COLON.l("COLON")
value <- jvalue
} yield (key, value)}
lazy val jarray: P[JValue] = rule{for {
_ <- LBRACKET
elements <- jvalue.repeat0By(COMMA)
_ <- RBRACKET.l("rbracket")
} yield JArray(elements:_*)}
lazy val string: Parser[String] = rule{for {
_ <- $("\\"")
contents <- ($("\\\\") ~ any ^^ { case _ ~ ch => escape(ch).toString} | except('"')).*
_ <- $("\\"").l("double quote")
_ <- DefaultSpace.*
} yield contents.mkString}
lazy val jstring: Parser[JValue] = rule(string ^^ {v => JString(v)})
lazy val jnumber: Parser[JValue] = rule{for {
value <- (set('0'to'9').+) ^^ { case digits => JNumber(digits.mkString.toInt) }
_ <- DefaultSpace.*
} yield value}
lazy val jboolean: Parser[JValue] = rule(
TRUE ^^ {_ => JBoolean(true)}
| FALSE ^^ {_ => JBoolean(false)}
)
lazy val jnull: Parser[JValue] = rule(NULL ^^ {_ => JNull})
def parse(input: String): Result[JValue] = parse(root, input)
}
import JsonParser._
describe("JSONParser can parse basic values") {
it("null") {
assert(Some(JNull) == parse("null").value)
assert(Some(JNull) == parse(" null").value)
assert(Some(JNull) == parse(" null ").value)
assert(Some(JNull) == parse("null ").value)
assert(Some(JNull) == parse(" null").value)
}
it("boolean") {
assert(Some(JBoolean(true)) == parse("true").value)
assert(Some(JBoolean(false)) == parse("false").value)
assert(Some(JBoolean(true)) == parse("true ").value)
assert(Some(JBoolean(true)) == parse(" true").value)
assert(Some(JBoolean(true)) == parse(" true ").value)
assert(Some(JBoolean(false)) == parse("false ").value)
assert(Some(JBoolean(false)) == parse(" false").value)
assert(Some(JBoolean(false)) == parse(" false ").value)
}
it("number") {
assert(Some(JNumber(0)) == parse("0").value)
assert(Some(JNumber(0)) == parse(" 0").value)
assert(Some(JNumber(0)) == parse("0 ").value)
assert(Some(JNumber(0)) == parse(" 0 ").value)
assert(Some(JNumber(200)) == parse("200").value)
assert(Some(JNumber(200)) == parse(" 200").value)
assert(Some(JNumber(200)) == parse("200 ").value)
assert(Some(JNumber(200)) == parse(" 200 ").value)
assert(Some(JNumber(300)) == parse("300").value)
assert(Some(JNumber(300)) == parse(" 300").value)
assert(Some(JNumber(300)) == parse("300 ").value)
assert(Some(JNumber(300)) == parse(" 300 ").value)
}
it("string") {
assert(Some(JString("")) == parse("\\"\\"").value)
}
}
describe("A JsonParser") {
it("should parse an object") {
assert(Some(JObject()) == parse("{}").value)
assert(Some(JObject("k" -> JObject())) == parse("{\\"k\\":{}}").value)
assert(Some(JObject("x" -> JNumber(100), "y" -> JNumber(200))) == parse("{\\"x\\": 100, \\"y\\": 200}").value)
}
it("should parse an array") {
assert(Some(JArray()) == parse("[]").value)
assert(Some(JArray(JArray())) == parse("[[]]").value)
assert(Some(JArray(JNumber(1), JNumber(2), JNumber(3))) == parse("[1, 2, 3]").value)
assert(Some(JArray(JObject())) == parse("[{}]").value)
}
}
describe("The JsonParser") {
it("cannot parse incorrect object") {
val failure = parse("{").asInstanceOf[Result.Failure]
assert(Location(1, 2) == failure.location)
assert("""expected:`}` actual:EOF in <RBRACE>""" == failure.message)
}
}
describe("The JsonParser") {
it("cannot parse incorrect array") {
val failure = parse("[1, 2, ]").asInstanceOf[Result.Failure]
assert(Location(1, 6) == failure.location)
assert("""expected:`]` actual:`,` in <rbracket>""" == failure.message)
}
}
}
|
kmizu/scomb
|
src/test/scala/com/github/kmizu/scomb/JsonSpec.scala
|
Scala
|
bsd-3-clause
| 5,430 |
// An object is a class with a single instance
object Upper {
def upper(strings:String*) = strings.map(_.toUpperCase())
}
println(Upper.upper("A","First","Scala","Program").mkString(" "))
|
M42/linguaggi
|
scala/hello.scala
|
Scala
|
gpl-2.0
| 191 |
import a.*
object B {
val foo = new A.Buf[Seq[Double]]
val bar = Seq.empty[Double]
foo.append(bar)
}
|
dotty-staging/dotty
|
sbt-test/tasty-compat/add-overload/b/B.scala
|
Scala
|
apache-2.0
| 108 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.io.{ObjectInputStream, ObjectOutputStream, IOException}
import org.apache.spark.TaskContext
/**
* A Task implementation that fails to serialize.
*/
private[spark] class NotSerializableFakeTask(myId: Int, stageId: Int)
extends Task[Array[Byte]](stageId, 0, 0, Seq.empty) {
override def runTask(context: TaskContext): Array[Byte] = Array.empty[Byte]
override def preferredLocations: Seq[TaskLocation] = Seq[TaskLocation]()
@throws(classOf[IOException])
private def writeObject(out: ObjectOutputStream): Unit = {
if (stageId == 0) {
throw new IllegalStateException("Cannot serialize")
}
}
@throws(classOf[IOException])
private def readObject(in: ObjectInputStream): Unit = {}
}
|
ArvinDevel/onlineAggregationOnSparkV2
|
core/src/test/scala/org/apache/spark/scheduler/NotSerializableFakeTask.scala
|
Scala
|
apache-2.0
| 1,562 |
import sbt._
import Keys._
object SoqlStdlib {
lazy val settings: Seq[Setting[_]] = BuildSettings.projectSettings() ++ Seq(
name := "soql-stdlib",
libraryDependencies ++= {
if(scalaVersion.value.startsWith("2.13")) Nil
else Seq(compilerPlugin("org.scalamacros" % "paradise" % "2.1.1" cross CrossVersion.full))
},
scalacOptions ++= {
if(scalaVersion.value.startsWith("2.13")) Seq("-Ymacro-annotations")
else Nil
}
)
}
|
socrata-platform/soql-reference
|
project/SoqlStdlib.scala
|
Scala
|
apache-2.0
| 466 |
package io.youi.component.feature
import io.youi.Color
import io.youi.component.types.{Prop, TextDecorationLine, TextDecorationStyle}
class TextDecorationFeature(override val parent: FeatureParent) extends Feature {
lazy val line: Prop[Set[TextDecorationLine]] = new Prop(
getter = Set.empty,
setter = set => parent.css.setProperty("text-decoration-line", set.map(TextDecorationLine.toString).mkString(" "))
)
lazy val color: Prop[Color] = Prop.stringify(parent.css.getPropertyValue("text-decoration-color"), parent.css.setProperty("text-decoration-color", _), Color, Color.Clear)
lazy val style: Prop[TextDecorationStyle] = Prop.stringify(parent.css.getPropertyValue("text-decoration-style"), parent.css.setProperty("text-decoration-style", _), TextDecorationStyle, TextDecorationStyle.Initial)
}
|
outr/youi
|
gui/src/main/scala/io/youi/component/feature/TextDecorationFeature.scala
|
Scala
|
mit
| 816 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import monix.reactive.Observable
import scala.concurrent.duration._
object DebounceFlattenSuite extends BaseOperatorSuite {
def createObservable(sourceCount: Int) = Some {
val o = Observable
.now(1L)
.delayOnComplete(1.day)
.debounceTo(1.second, (x: Long) => Observable.interval(1.second).map(_ => x))
.take(sourceCount.toLong)
val count = sourceCount
val sum = sourceCount
Sample(o, count, sum.toLong, 1.second, 1.second)
}
def observableInError(sourceCount: Int, ex: Throwable) = None
def brokenUserCodeObservable(sourceCount: Int, ex: Throwable) = None
override def cancelableObservables(): Seq[Sample] = {
val sample = Observable
.now(1L)
.delayOnComplete(1.day)
.debounceTo(1.second, (x: Long) => Observable.interval(1.second).map(_ => 1L))
.take(10L)
Seq(Sample(sample, 2, 2, 2.seconds, 2.seconds))
}
}
|
monixio/monix
|
monix-reactive/shared/src/test/scala/monix/reactive/internal/operators/DebounceFlattenSuite.scala
|
Scala
|
apache-2.0
| 1,616 |
package org.jetbrains.plugins.scala.codeInsight.template.macros
import com.intellij.codeInsight.lookup.LookupElement
import com.intellij.codeInsight.template.{Expression, ExpressionContext, Macro, Result}
import org.jetbrains.plugins.scala.project.ProjectExt
/**
* @author adkozlov
*/
trait ScalaMacro extends Macro {
override def calculateResult(params: Array[Expression], context: ExpressionContext): Result = {
innerCalculateResult(params, context)
}
override def calculateLookupItems(params: Array[Expression], context: ExpressionContext): Array[LookupElement] = {
innerCalculateLookupItems(params, context)
}
protected def innerCalculateResult(params: Array[Expression], context: ExpressionContext): Result
protected def innerCalculateLookupItems(params: Array[Expression], context: ExpressionContext): Array[LookupElement] = {
super.calculateLookupItems(params, context)
}
private def getTypeSystem(context: ExpressionContext) = context.getProject.typeSystem
}
|
ilinum/intellij-scala
|
src/org/jetbrains/plugins/scala/codeInsight/template/macros/ScalaMacro.scala
|
Scala
|
apache-2.0
| 1,005 |
/*
* Open Korean Text - Scala library to process Korean text
*
* Copyright 2014 Twitter, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.openkoreantext.processor.util
/**
* Korean Part-of-Speech
*
* N Noun: 명사 (Nouns, Pronouns, Company Names, Proper Noun, Person Names, Numerals, Standalone, Dependent)
* V Verb: 동사 (하, 먹, 자, 차)
* J Adjective: 형용사 (예쁘다, 크다, 작다)
* A Adverb: 부사 (잘, 매우, 빨리, 반드시, 과연)
* D Determiner: 관형사 (새, 헌, 참, 첫, 이, 그, 저)
* E Exclamation: 감탄사 (헐, ㅋㅋㅋ, 어머나, 얼씨구)
*
* C Conjunction: 접속사
*
* j SubstantiveJosa: 조사 (의, 에, 에서)
* l AdverbialJosa: 부사격 조사 (~인, ~의, ~일)
* e Eomi: 어말어미 (다, 요, 여, 하댘ㅋㅋ)
* r PreEomi: 선어말어미 (었)
*
* p NounPrefix: 접두사 ('초'대박)
* v VerbPrefix: 동사 접두어 ('쳐'먹어)
* s Suffix: 접미사 (~적)
*
* f Foreign: 한글이 아닌 문자들
*
* 지시사는 Derterminant로 대체하기로 함
* Derterminant is used for demonstratives.
*
* Korean: Korean chunk (candidate for parsing)
* Foreign: Mixture of non-Korean strings
* Number: 숫자
* Emotion: Korean Single Character Emotions (ㅋㅋㅋㅋ, ㅎㅎㅎㅎ, ㅠㅜㅠㅜ)
* Alpha: Alphabets 알파벳
* Punctuation: 문장부호
* Hashtag: Twitter Hashtag 해쉬태그 #Korean
* ScreenName: Twitter username (@nlpenguin)
*
* Unkown: Could not parse the string.
*/
object KoreanPos extends Enumeration {
type KoreanPos = Value
// Word leved POS
val Noun, Verb, Adjective,
Adverb, Determiner, Exclamation,
Josa, Eomi, PreEomi, Conjunction,
Modifier, VerbPrefix, Suffix, Unknown,
// Chunk level POS
Korean, Foreign, Number, KoreanParticle, Alpha,
Punctuation, Hashtag, ScreenName,
Email, URL, CashTag,
// Functional POS
Space, Others,
ProperNoun = Value
val OtherPoses = Set(
Korean, Foreign, Number, KoreanParticle, Alpha,
Punctuation, Hashtag, ScreenName,
Email, URL, CashTag)
val shortCut = Map(
'N' -> Noun,
'V' -> Verb,
'J' -> Adjective,
'A' -> Adverb,
'D' -> Determiner,
'E' -> Exclamation,
'C' -> Conjunction,
'j' -> Josa,
'e' -> Eomi,
'r' -> PreEomi,
'm' -> Modifier,
'v' -> VerbPrefix,
's' -> Suffix,
'a' -> Alpha,
'n' -> Number,
'o' -> Others
)
case class KoreanPosTrie(curPos: KoreanPos, nextTrie: List[KoreanPosTrie], ending: Option[KoreanPos])
val selfNode = KoreanPosTrie(null, null, ending = None)
protected[processor] def buildTrie(s: String, ending_pos: KoreanPos): List[KoreanPosTrie] = {
def isFinal(rest: String): Boolean = {
val isNextOptional = rest.foldLeft(true) {
case (output: Boolean, c: Char) if c == '+' || c == '1' => false
case (output: Boolean, c: Char) => output
}
rest.length == 0 || isNextOptional
}
if (s.length < 2) {
return List()
}
val pos = shortCut(s.charAt(0))
val rule = s.charAt(1)
val rest = s.slice(2, s.length)
val end: Option[KoreanPos] = if (isFinal(rest)) Some(ending_pos) else None
rule match {
case '+' =>
List(KoreanPosTrie(pos, selfNode :: buildTrie(rest, ending_pos), end))
case '*' =>
List(KoreanPosTrie(pos, selfNode :: buildTrie(rest, ending_pos), end)) ++ buildTrie(rest, ending_pos)
case '1' =>
List(KoreanPosTrie(pos, buildTrie(rest, ending_pos), end))
case '0' =>
List(KoreanPosTrie(pos, buildTrie(rest, ending_pos), end)) ++ buildTrie(rest, ending_pos)
}
}
protected[processor] def getTrie(sequences: Map[String, KoreanPos]): List[KoreanPosTrie] =
sequences.foldLeft(List[KoreanPosTrie]()) {
case (results: List[KoreanPosTrie], (s: String, ending_pos: KoreanPos)) =>
buildTrie(s, ending_pos) ::: results
}
val Predicates = Set(Verb, Adjective)
}
|
open-korean-text/open-korean-text
|
src/main/scala/org/openkoreantext/processor/util/KoreanPos.scala
|
Scala
|
apache-2.0
| 4,461 |
import java.sql.SQLException
import com.mysql.jdbc.exceptions.MySQLDataException
import slick.driver.MySQLDriver.api._
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
case class User(id: Long, name: String, password: String, email: Option[String])
class UsersTable(tag: Tag) extends Table[User](tag, "users") {
def id = column[Long]("id", O.PrimaryKey, O.AutoInc)
def name = column[String]("name")
def password = column[String]("password")
def email = column[Option[String]]("email")
def uniqueName = index("unique_name", (name), unique = true)
def * = (id, name, password, email) <>(User.tupled, User.unapply)
}
object CaseClass {
val db = Database.forConfig("slickexamples")
val timeout = 20.seconds
val users = TableQuery[UsersTable]
def selectName(): Unit = {
val q = users.map(_.name)
Await.result(
db.run(q.result).map { res =>
// res is a Vector[String]
println(res)
}, timeout)
}
def selectByName(name: String): Unit = {
val q = users.filter(user => user.name === name)
Await.result(
db.run(q.result).map { res =>
// res is a Vector[User]
println(res)
}, timeout)
}
def selectWithCondition2(): Unit = {
// in slick, '===' means equal to
// likewise, '=!=' means not equal to
// also, you can use short form lambdas
val q = users.filter(_.name =!= "mary").map(_.email)
Await.result(
db.run(q.result).map { res =>
// res is a Vector[String]
println(res)
}, timeout)
}
def selectAll(): Unit = {
// this is equivalent to select * from users
val q = users
Await.result(
db.run(q.result).map { res =>
// res is a Vector[User]
println(res)
}, timeout)
}
def updateName(): Unit = {
// update users u set u.name='peter' where u.id=1
val q = users.filter(_.id === 1L).map(_.name).update("peter")
Await.result(
db.run(q).map { numAffectedRows =>
println(numAffectedRows)
}, timeout)
}
def deleteByName(name: String): Unit = {
val q = users.filter(_.name === name).delete
Await.result(
db.run(q).map { numAffectedRows =>
println(numAffectedRows)
}, timeout)
}
def transaction(name1: String, name2: String, sampleUsername: String): Unit = {
// here we will delete an user and create another two users
// but we will abort the transaction and recover the deleted used
// in case we fail to add the two next users
val qDelete = users.filter(_.name === "john").delete
val addSampleUser = (users returning users.map(_.id)) += User(0, name = sampleUsername, password = "passwd", email = None)
val q1 = (users returning users.map(_.id)) += User(0, name = name1, password = "passwd", email = None)
val q2 = (users returning users.map(_.id)) += User(0, name = name2, password = "passwd", email = None)
val actions = for {
affectedRows <- qDelete
// watch how inserting sampleUser to the database will be 'rolled back' if the preconditions fail
_ <- addSampleUser
// see how an exception is thrown if affectedRows is different from 1
// exceptions thrown here cause the transaction to be rolled back
newUserId1 <- if (affectedRows == 1) q1 else DBIO.failed(new SQLException(s"""precondition failed... aborting! Try running selectByName("$sampleUsername") to see how it's been rolled back"""))
newUserId2 <- if (affectedRows == 1) q2 else DBIO.failed(new SQLException(s"""precondition failed... aborting! Try running selectByName("$sampleUsername") to see how it's been rolled back"""))
}yield(newUserId1, newUserId2)
Await.result(
db.run(actions.transactionally).map { res =>
println(s"Newly-added users had the following ids ${res._1} and ${res._2}")
}.recover {
case e: SQLException => println("Caught exception: " + e.getMessage)
}, timeout)
}
def insertUser(name: String, password: String): Unit = {
// use zero as the ID because the database will generate a new ID
val newUser = (users returning users.map(_.id)) += User(0, name = name, password = password, email = None)
// note that the newly-added id is returned instead of
// the number of affected rows
Await.result(db.run(newUser).map { newId =>
newId match {
case x: Long => println(s"last entry added had id $x")
}
}.recover {
case e: SQLException => println("Caught exception: " + e.getMessage)
}, timeout)
}
}
|
queirozfcom/slick_sandbox
|
src/main/scala/CaseClass.scala
|
Scala
|
apache-2.0
| 4,629 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* This Scala source file was generated by the Gradle 'init' task.
*/
package io.truthencode.ddo.modeling
class Library {
def someLibraryMethod(): Boolean = true
}
|
adarro/ddo-calc
|
subprojects/common/ddo-modeling/src/main/scala/io/truthencode/ddo/modeling/Library.scala
|
Scala
|
apache-2.0
| 840 |
/**
* Copyright (C) 2010-2011 LShift Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.lshift.diffa.kernel.util
import net.lshift.diffa.kernel.differencing.FeedbackHandle
/**
* Simple implementation of a feedback handle that doesn't cancel.
* This is used for testing when no cancellation is required.
*/
class NonCancellingFeedbackHandle extends FeedbackHandle {
def isCancelled = false
def cancel() = {}
}
|
lshift/diffa
|
kernel/src/main/scala/net/lshift/diffa/kernel/util/NonCancellingFeedbackHandle.scala
|
Scala
|
apache-2.0
| 951 |
package nz.wicker.autoencoder.math.optimization
case class LimitNumberOfLineSearches(maxLineSearches: Int)
extends TerminationCriterion[Any, (Int, Int)] {
def apply(x: Any, lineSearchesEvals: (Int, Int)): Boolean = {
lineSearchesEvals._1 > maxLineSearches
}
}
|
joergwicker/autoencoder
|
src/main/scala/nz/wicker/autoencoder/math/optimization/LimitNumberOfLineSearches.scala
|
Scala
|
gpl-3.0
| 274 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.classification
import org.apache.hadoop.fs.Path
import org.apache.spark.annotation.Since
import org.apache.spark.ml.PredictorParams
import org.apache.spark.ml.linalg._
import org.apache.spark.ml.param.{DoubleParam, Param, ParamMap, ParamValidators}
import org.apache.spark.ml.param.shared.HasWeightCol
import org.apache.spark.ml.util._
import org.apache.spark.ml.util.Instrumentation.instrumented
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.sql.{Dataset, Row}
import org.apache.spark.sql.functions.{col, lit}
/**
* Params for Naive Bayes Classifiers.
*/
private[classification] trait NaiveBayesParams extends PredictorParams with HasWeightCol {
/**
* The smoothing parameter.
* (default = 1.0).
* @group param
*/
final val smoothing: DoubleParam = new DoubleParam(this, "smoothing", "The smoothing parameter.",
ParamValidators.gtEq(0))
/** @group getParam */
final def getSmoothing: Double = $(smoothing)
/**
* The model type which is a string (case-sensitive).
* Supported options: "multinomial" and "bernoulli".
* (default = multinomial)
* @group param
*/
final val modelType: Param[String] = new Param[String](this, "modelType", "The model type " +
"which is a string (case-sensitive). Supported options: multinomial (default) and bernoulli.",
ParamValidators.inArray[String](NaiveBayes.supportedModelTypes.toArray))
/** @group getParam */
final def getModelType: String = $(modelType)
}
// scalastyle:off line.size.limit
/**
* Naive Bayes Classifiers.
* It supports Multinomial NB
* (see <a href="http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html">
* here</a>)
* which can handle finitely supported discrete data. For example, by converting documents into
* TF-IDF vectors, it can be used for document classification. By making every vector a
* binary (0/1) data, it can also be used as Bernoulli NB
* (see <a href="http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html">
* here</a>).
* The input feature values must be nonnegative.
*/
// scalastyle:on line.size.limit
@Since("1.5.0")
class NaiveBayes @Since("1.5.0") (
@Since("1.5.0") override val uid: String)
extends ProbabilisticClassifier[Vector, NaiveBayes, NaiveBayesModel]
with NaiveBayesParams with DefaultParamsWritable {
import NaiveBayes._
@Since("1.5.0")
def this() = this(Identifiable.randomUID("nb"))
/**
* Set the smoothing parameter.
* Default is 1.0.
* @group setParam
*/
@Since("1.5.0")
def setSmoothing(value: Double): this.type = set(smoothing, value)
setDefault(smoothing -> 1.0)
/**
* Set the model type using a string (case-sensitive).
* Supported options: "multinomial" and "bernoulli".
* Default is "multinomial"
* @group setParam
*/
@Since("1.5.0")
def setModelType(value: String): this.type = set(modelType, value)
setDefault(modelType -> NaiveBayes.Multinomial)
/**
* Sets the value of param [[weightCol]].
* If this is not set or empty, we treat all instance weights as 1.0.
* Default is not set, so all instances have weight one.
*
* @group setParam
*/
@Since("2.1.0")
def setWeightCol(value: String): this.type = set(weightCol, value)
override protected def train(dataset: Dataset[_]): NaiveBayesModel = {
trainWithLabelCheck(dataset, positiveLabel = true)
}
/**
* ml assumes input labels in range [0, numClasses). But this implementation
* is also called by mllib NaiveBayes which allows other kinds of input labels
* such as {-1, +1}. `positiveLabel` is used to determine whether the label
* should be checked and it should be removed when we remove mllib NaiveBayes.
*/
private[spark] def trainWithLabelCheck(
dataset: Dataset[_],
positiveLabel: Boolean): NaiveBayesModel = instrumented { instr =>
instr.logPipelineStage(this)
instr.logDataset(dataset)
if (positiveLabel && isDefined(thresholds)) {
val numClasses = getNumClasses(dataset)
instr.logNumClasses(numClasses)
require($(thresholds).length == numClasses, this.getClass.getSimpleName +
".train() called with non-matching numClasses and thresholds.length." +
s" numClasses=$numClasses, but thresholds has length ${$(thresholds).length}")
}
val modelTypeValue = $(modelType)
val requireValues: Vector => Unit = {
modelTypeValue match {
case Multinomial =>
requireNonnegativeValues
case Bernoulli =>
requireZeroOneBernoulliValues
case _ =>
// This should never happen.
throw new UnknownError(s"Invalid modelType: ${$(modelType)}.")
}
}
instr.logParams(this, labelCol, featuresCol, weightCol, predictionCol, rawPredictionCol,
probabilityCol, modelType, smoothing, thresholds)
val numFeatures = dataset.select(col($(featuresCol))).head().getAs[Vector](0).size
instr.logNumFeatures(numFeatures)
val w = if (!isDefined(weightCol) || $(weightCol).isEmpty) lit(1.0) else col($(weightCol))
// Aggregates term frequencies per label.
// TODO: Calling aggregateByKey and collect creates two stages, we can implement something
// TODO: similar to reduceByKeyLocally to save one stage.
val aggregated = dataset.select(col($(labelCol)), w, col($(featuresCol))).rdd
.map { row => (row.getDouble(0), (row.getDouble(1), row.getAs[Vector](2)))
}.aggregateByKey[(Double, DenseVector, Long)]((0.0, Vectors.zeros(numFeatures).toDense, 0L))(
seqOp = {
case ((weightSum, featureSum, count), (weight, features)) =>
requireValues(features)
BLAS.axpy(weight, features, featureSum)
(weightSum + weight, featureSum, count + 1)
},
combOp = {
case ((weightSum1, featureSum1, count1), (weightSum2, featureSum2, count2)) =>
BLAS.axpy(1.0, featureSum2, featureSum1)
(weightSum1 + weightSum2, featureSum1, count1 + count2)
}).collect().sortBy(_._1)
val numSamples = aggregated.map(_._2._3).sum
instr.logNumExamples(numSamples)
val numLabels = aggregated.length
instr.logNumClasses(numLabels)
val numDocuments = aggregated.map(_._2._1).sum
val labelArray = new Array[Double](numLabels)
val piArray = new Array[Double](numLabels)
val thetaArray = new Array[Double](numLabels * numFeatures)
val lambda = $(smoothing)
val piLogDenom = math.log(numDocuments + numLabels * lambda)
var i = 0
aggregated.foreach { case (label, (n, sumTermFreqs, _)) =>
labelArray(i) = label
piArray(i) = math.log(n + lambda) - piLogDenom
val thetaLogDenom = $(modelType) match {
case Multinomial => math.log(sumTermFreqs.values.sum + numFeatures * lambda)
case Bernoulli => math.log(n + 2.0 * lambda)
case _ =>
// This should never happen.
throw new UnknownError(s"Invalid modelType: ${$(modelType)}.")
}
var j = 0
while (j < numFeatures) {
thetaArray(i * numFeatures + j) = math.log(sumTermFreqs(j) + lambda) - thetaLogDenom
j += 1
}
i += 1
}
val pi = Vectors.dense(piArray)
val theta = new DenseMatrix(numLabels, numFeatures, thetaArray, true)
new NaiveBayesModel(uid, pi, theta).setOldLabels(labelArray)
}
@Since("1.5.0")
override def copy(extra: ParamMap): NaiveBayes = defaultCopy(extra)
}
@Since("1.6.0")
object NaiveBayes extends DefaultParamsReadable[NaiveBayes] {
/** String name for multinomial model type. */
private[classification] val Multinomial: String = "multinomial"
/** String name for Bernoulli model type. */
private[classification] val Bernoulli: String = "bernoulli"
/* Set of modelTypes that NaiveBayes supports */
private[classification] val supportedModelTypes = Set(Multinomial, Bernoulli)
private[NaiveBayes] def requireNonnegativeValues(v: Vector): Unit = {
val values = v match {
case sv: SparseVector => sv.values
case dv: DenseVector => dv.values
}
require(values.forall(_ >= 0.0),
s"Naive Bayes requires nonnegative feature values but found $v.")
}
private[NaiveBayes] def requireZeroOneBernoulliValues(v: Vector): Unit = {
val values = v match {
case sv: SparseVector => sv.values
case dv: DenseVector => dv.values
}
require(values.forall(v => v == 0.0 || v == 1.0),
s"Bernoulli naive Bayes requires 0 or 1 feature values but found $v.")
}
@Since("1.6.0")
override def load(path: String): NaiveBayes = super.load(path)
}
/**
* Model produced by [[NaiveBayes]]
* @param pi log of class priors, whose dimension is C (number of classes)
* @param theta log of class conditional probabilities, whose dimension is C (number of classes)
* by D (number of features)
*/
@Since("1.5.0")
class NaiveBayesModel private[ml] (
@Since("1.5.0") override val uid: String,
@Since("2.0.0") val pi: Vector,
@Since("2.0.0") val theta: Matrix)
extends ProbabilisticClassificationModel[Vector, NaiveBayesModel]
with NaiveBayesParams with MLWritable {
import NaiveBayes.{Bernoulli, Multinomial}
/**
* mllib NaiveBayes is a wrapper of ml implementation currently.
* Input labels of mllib could be {-1, +1} and mllib NaiveBayesModel exposes labels,
* both of which are different from ml, so we should store the labels sequentially
* to be called by mllib. This should be removed when we remove mllib NaiveBayes.
*/
private[spark] var oldLabels: Array[Double] = null
private[spark] def setOldLabels(labels: Array[Double]): this.type = {
this.oldLabels = labels
this
}
/**
* Bernoulli scoring requires log(condprob) if 1, log(1-condprob) if 0.
* This precomputes log(1.0 - exp(theta)) and its sum which are used for the linear algebra
* application of this condition (in predict function).
*/
private lazy val (thetaMinusNegTheta, negThetaSum) = $(modelType) match {
case Multinomial => (None, None)
case Bernoulli =>
val negTheta = theta.map(value => math.log(1.0 - math.exp(value)))
val ones = new DenseVector(Array.fill(theta.numCols) {1.0})
val thetaMinusNegTheta = theta.map { value =>
value - math.log(1.0 - math.exp(value))
}
(Option(thetaMinusNegTheta), Option(negTheta.multiply(ones)))
case _ =>
// This should never happen.
throw new UnknownError(s"Invalid modelType: ${$(modelType)}.")
}
@Since("1.6.0")
override val numFeatures: Int = theta.numCols
@Since("1.5.0")
override val numClasses: Int = pi.size
private def multinomialCalculation(features: Vector) = {
val prob = theta.multiply(features)
BLAS.axpy(1.0, pi, prob)
prob
}
private def bernoulliCalculation(features: Vector) = {
features.foreachActive((_, value) =>
require(value == 0.0 || value == 1.0,
s"Bernoulli naive Bayes requires 0 or 1 feature values but found $features.")
)
val prob = thetaMinusNegTheta.get.multiply(features)
BLAS.axpy(1.0, pi, prob)
BLAS.axpy(1.0, negThetaSum.get, prob)
prob
}
override protected def predictRaw(features: Vector): Vector = {
$(modelType) match {
case Multinomial =>
multinomialCalculation(features)
case Bernoulli =>
bernoulliCalculation(features)
case _ =>
// This should never happen.
throw new UnknownError(s"Invalid modelType: ${$(modelType)}.")
}
}
override protected def raw2probabilityInPlace(rawPrediction: Vector): Vector = {
rawPrediction match {
case dv: DenseVector =>
var i = 0
val size = dv.size
val maxLog = dv.values.max
while (i < size) {
dv.values(i) = math.exp(dv.values(i) - maxLog)
i += 1
}
val probSum = dv.values.sum
i = 0
while (i < size) {
dv.values(i) = dv.values(i) / probSum
i += 1
}
dv
case sv: SparseVector =>
throw new RuntimeException("Unexpected error in NaiveBayesModel:" +
" raw2probabilityInPlace encountered SparseVector")
}
}
@Since("1.5.0")
override def copy(extra: ParamMap): NaiveBayesModel = {
copyValues(new NaiveBayesModel(uid, pi, theta).setParent(this.parent), extra)
}
@Since("1.5.0")
override def toString: String = {
s"NaiveBayesModel (uid=$uid) with ${pi.size} classes"
}
@Since("1.6.0")
override def write: MLWriter = new NaiveBayesModel.NaiveBayesModelWriter(this)
}
@Since("1.6.0")
object NaiveBayesModel extends MLReadable[NaiveBayesModel] {
@Since("1.6.0")
override def read: MLReader[NaiveBayesModel] = new NaiveBayesModelReader
@Since("1.6.0")
override def load(path: String): NaiveBayesModel = super.load(path)
/** [[MLWriter]] instance for [[NaiveBayesModel]] */
private[NaiveBayesModel] class NaiveBayesModelWriter(instance: NaiveBayesModel) extends MLWriter {
private case class Data(pi: Vector, theta: Matrix)
override protected def saveImpl(path: String): Unit = {
// Save metadata and Params
DefaultParamsWriter.saveMetadata(instance, path, sc)
// Save model data: pi, theta
val data = Data(instance.pi, instance.theta)
val dataPath = new Path(path, "data").toString
sparkSession.createDataFrame(Seq(data)).repartition(1).write.parquet(dataPath)
}
}
private class NaiveBayesModelReader extends MLReader[NaiveBayesModel] {
/** Checked against metadata when loading model */
private val className = classOf[NaiveBayesModel].getName
override def load(path: String): NaiveBayesModel = {
val metadata = DefaultParamsReader.loadMetadata(path, sc, className)
val dataPath = new Path(path, "data").toString
val data = sparkSession.read.parquet(dataPath)
val vecConverted = MLUtils.convertVectorColumnsToML(data, "pi")
val Row(pi: Vector, theta: Matrix) = MLUtils.convertMatrixColumnsToML(vecConverted, "theta")
.select("pi", "theta")
.head()
val model = new NaiveBayesModel(metadata.uid, pi, theta)
metadata.getAndSetParams(model)
model
}
}
}
|
michalsenkyr/spark
|
mllib/src/main/scala/org/apache/spark/ml/classification/NaiveBayes.scala
|
Scala
|
apache-2.0
| 15,016 |
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.app.classify
import cc.factorie.variable._
import cc.factorie.infer._
import cc.factorie.la.{WeightsMapAccumulator, Tensor1, SingletonBinaryTensor1}
import cc.factorie.optimize._
import cc.factorie.app.classify.backend._
/** A record of the result of applying a Classifier to a variable. */
class Classification[V<:DiscreteVar](val _1:V, score:Tensor1) extends MulticlassClassification(score) with DiscreteMarginal1[V] {
def bestValue = _1.domain.apply(bestLabelIndex)
}
// Classifiers
/** Performs iid prediction of a DiscreteVar. */
trait Classifier[L<:DiscreteVar] {
// Get classification record without changing the value of the label
def classification(v:L): Classification[L]
def classifications(labels: Iterable[L]): Seq[Classification[L]] = labels.toSeq.map(label => classification(label))
// Get classification record and also set the label to its best scoring value
def classify[L2<:L with MutableDiscreteVar](v:L2): Classification[L] = { val c = classification(v); v := c.bestLabelIndex; c }
def classify(labels: Iterable[L with MutableDiscreteVar]): Seq[Classification[L]] = labels.toSeq.map(l => classify(l))
def bestLabelIndex(v:L): Int = classification(v).bestLabelIndex
// TODO It might be nice to have a weighted version of this. We could do this with a LabelList. :-) -akm
def accuracy(labels:Iterable[L with LabeledDiscreteVar]): Double = {
var correct = 0.0; var total = 0.0
labels.foreach(label => { total += 1.0; if (bestLabelIndex(label) == label.target.intValue) correct += 1.0 })
correct / total
}
}
/** A Classifier in which the "input, observed" object to be classified is a VectorVar (with value Tensor1). */
trait VectorClassifier[V<:DiscreteVar, Features<:VectorVar] extends Classifier[V] with MulticlassClassifier[Tensor1] {
def labelToFeatures: V=>Features
}
/** A VectorClassifier in which the score for each class is a dot-product between the observed feature vector and a vector of parameters.
Examples include NaiveBayes, MultivariateLogisticRegression, LinearSVM, and many others.
Counter-examples include KNearestNeighbor. */
class LinearVectorClassifier[L<:DiscreteVar,F<:VectorVar](numLabels:Int, numFeatures:Int, val labelToFeatures:L=>F) extends LinearMulticlassClassifier(numLabels, numFeatures) with VectorClassifier[L,F] {
def classification(v:L): Classification[L] = new Classification(v, predict(labelToFeatures(v).value))
override def bestLabelIndex(v:L): Int = predict(labelToFeatures(v).value).maxIndex
}
// Classifier trainers
/** An object that can create and train a VectorClassifier given labeled training data. */
trait VectorClassifierTrainer {
def train[L<:LabeledDiscreteVar,F<:VectorVar](labels:Iterable[L], l2f:L=>F): VectorClassifier[L,F]
}
/** An object that can create and train a LinearVectorClassifier (or train a pre-existing LinearVectorClassifier) given labeled training data. */
trait LinearVectorClassifierTrainer extends VectorClassifierTrainer {
/** Create a new LinearVectorClassifier, not yet trained. */
protected def newClassifier[L<:LabeledDiscreteVar,F<:VectorVar](labelDomainSize:Int, featureDomainSize:Int, l2f:L=>F): LinearVectorClassifier[L,F] = new LinearVectorClassifier(labelDomainSize, featureDomainSize, l2f)
/** Create, train and return a new LinearVectorClassifier */
def train[L<:LabeledDiscreteVar,F<:VectorVar](labels:Iterable[L], l2f:L=>F): LinearVectorClassifier[L,F] = train(newClassifier(labels.head.domain.size, l2f(labels.head).domain.dimensionSize, l2f), labels, l2f)
/** Train (and return) an already-created (perhaps already partially-trained) LinearVectorClassifier. */
def train[C<:LinearVectorClassifier[L,F],L<:LabeledDiscreteVar,F<:VectorVar](classifier:C, trainLabels:Iterable[L], l2f:L=>F): C
}
/** A LinearVectorClassifierTrainer that uses the cc.factorie.optimize package to estimate parameters. */
class OptimizingLinearVectorClassifierTrainer(
val optimizer: GradientOptimizer,
val useParallelTrainer: Boolean,
val useOnlineTrainer: Boolean,
val objective: OptimizableObjectives.Multiclass,
val maxIterations: Int,
val miniBatch: Int,
val nThreads: Int)(implicit random: scala.util.Random) extends LinearVectorClassifierTrainer
{
// TODO This is missing weights on Examples. I think passing a Seq[Double] is error prone, and am tempted to go back to LabelList. -akm
/** Create a sequence of Example instances for obtaining the gradients used for training. */
def examples[L<:LabeledDiscreteVar,F<:VectorVar](classifier:LinearVectorClassifier[L,F], labels:Iterable[L], l2f:L=>F, objective:OptimizableObjectives.Multiclass): Seq[Example] =
labels.toSeq.map(l => new PredictorExample(classifier, l2f(l).value, l.target.intValue, objective))
/** Train the classifier to convergence, calling the diagnostic function once after each iteration.
This is the base method called by the other simpler train methods. */
def train[C<:LinearVectorClassifier[L,F],L<:LabeledDiscreteVar,F<:VectorVar](classifier:C, trainLabels:Iterable[L], l2f:L=>F, diagnostic:C=>Unit): C = {
Trainer.train(parameters=classifier.parameters, examples=examples(classifier, trainLabels, l2f, objective), maxIterations=maxIterations, evaluate = ()=>diagnostic(classifier), optimizer=optimizer, useParallelTrainer=useParallelTrainer, useOnlineTrainer=useOnlineTrainer, miniBatch=miniBatch, nThreads=nThreads)
classifier
}
/** Return a function suitable for passing in as the diagnostic to train which prints the accuracy on the testLabels */
def defaultTestDiagnostic[C<:LinearVectorClassifier[L,F],L<:LabeledDiscreteVar,F<:VectorVar](classifier:LinearVectorClassifier[L,F], trainLabels:Iterable[L], testLabels:Iterable[L]): C=>Unit =
(c:C) => println(f"Test accuracy: ${classifier.accuracy(testLabels.asInstanceOf[Iterable[L with LabeledDiscreteVar]])}%1.4f")
/** Return a function suitable for passing in as the diagnostic to train which prints the accuracy on the trainLabels and the testLabels */
def defaultTrainAndTestDiagnostic[C<:LinearVectorClassifier[L,F],L<:LabeledDiscreteVar,F<:VectorVar](classifier:LinearVectorClassifier[L,F], trainLabels:Iterable[L], testLabels:Iterable[L]): C=>Unit =
(c:LinearVectorClassifier[L,F]) => println(f"Train accuracy: ${classifier.accuracy(trainLabels.asInstanceOf[Iterable[L with LabeledDiscreteVar]])}%1.4f\\nTest accuracy: ${classifier.accuracy(testLabels.asInstanceOf[Iterable[L with LabeledDiscreteVar]])}%1.4f")
/** Train the classifier to convergence, calling a test-accuracy-printing diagnostic function once after each iteration. */
def train[C<:LinearVectorClassifier[L,F],L<:LabeledDiscreteVar,F<:VectorVar](classifier:C, trainLabels:Iterable[L], testLabels:Iterable[L], l2f:L=>F): C =
train(classifier, trainLabels, l2f, defaultTestDiagnostic(classifier, trainLabels, testLabels))
/** Train the classifier to convergence, calling no diagnostic function. */
def train[C<:LinearVectorClassifier[L,F],L<:LabeledDiscreteVar,F<:VectorVar](classifier:C, trainLabels:Iterable[L], l2f:L=>F): C = {
train(classifier, trainLabels, l2f, (c:LinearVectorClassifier[L,F]) => ())
classifier
}
}
/** An OptimizingLinearVectorClassifierTrainer pre-tuned with default arguments well-suited to online training, operating on the gradient of one Example at a time. */
class OnlineOptimizingLinearVectorClassifierTrainer(
useParallel:Boolean = false,
optimizer: GradientOptimizer = new AdaGrad with ParameterAveraging,
objective: OptimizableObjectives.Multiclass = OptimizableObjectives.sparseLogMulticlass,
maxIterations: Int = 3,
miniBatch: Int = -1,
nThreads: Int = Runtime.getRuntime.availableProcessors())(implicit random: scala.util.Random)
extends OptimizingLinearVectorClassifierTrainer(optimizer, useParallel, useOnlineTrainer = true, objective, maxIterations, miniBatch, nThreads)
/** An OptimizingLinearVectorClassifierTrainer pre-tuned with default arguments well-suited to batch training, operating on all the gradients of the Examples together. */
class BatchOptimizingLinearVectorClassifierTrainer(useParallel:Boolean = true,
optimizer: GradientOptimizer = new LBFGS with L2Regularization,
objective: OptimizableObjectives.Multiclass = OptimizableObjectives.sparseLogMulticlass,
maxIterations: Int = 200,
nThreads: Int = Runtime.getRuntime.availableProcessors())(implicit random: scala.util.Random)
extends OptimizingLinearVectorClassifierTrainer(optimizer, useParallel, useOnlineTrainer = false, objective, maxIterations, -1, nThreads)
/** An OptimizingLinearVectorClassifierTrainer pre-tuned with default arguments well-suited to training an L2-regularized linear SVM. */
class SVMLinearVectorClassifierTrainer(nThreads: Int = 1, l2: Double = 0.1)(implicit random: scala.util.Random) extends OptimizingLinearVectorClassifierTrainer(optimizer=null, useParallelTrainer=false, useOnlineTrainer=false, objective=null, miniBatch= -1, maxIterations= -1, nThreads= -1) {
val baseTrainer = new backend.SVMMulticlassTrainer(nThreads, l2)
override def train[C<:LinearVectorClassifier[L,F],L<:LabeledDiscreteVar,F<:VectorVar](classifier:C, trainLabels:Iterable[L], l2f:L=>F, diagnostic:C=>Unit): C = {
baseTrainer.baseTrain(classifier, trainLabels.map(_.target.intValue).toSeq, trainLabels.map(l2f(_).value).toSeq, trainLabels.map(l => 1.0).toSeq, c => ())
classifier
}
}
/** Creates a trained naive Bayes classifier by counting feature occurrences, smoothed with pseudo-counts (m-Estimates).
Note that contrary to tradition, this naive Bayes classifier does not include a "bias" weight P(class); it only includes the feature weights, P(feature|class).
If you want a "bias" weight you must include in your data a feature that always has value 1.0. */
class NaiveBayesClassifierTrainer(pseudoCount:Double = 0.1) extends LinearVectorClassifierTrainer {
val baseTrainer = new backend.NaiveBayes(pseudoCount)
def train[C<:LinearVectorClassifier[L,F],L<:LabeledDiscreteVar,F<:VectorVar](classifier:C, trainLabels:Iterable[L], l2f:L=>F): C = {
baseTrainer.baseTrain(classifier, trainLabels.map(_.target.intValue).toSeq, trainLabels.map(l2f(_).value).toSeq, trainLabels.map(l => 1.0).toSeq, c => ())
classifier
}
}
// Decision trees. Just one simple example so far. -akm
class DecisionTreeClassifier[L<:DiscreteVar,F<:VectorVar](val tree:DTree, val labelToFeatures:L=>F) extends VectorClassifier[L,F] {
def classification(label:L): Classification[L] = new Classification(label, predict(labelToFeatures(label).value))
def predict(features: Tensor1) = DTree.score(features, tree)
}
class ID3DecisionTreeClassifier(implicit random: scala.util.Random) extends VectorClassifierTrainer {
def train[L<:LabeledDiscreteVar,F<:VectorVar](labels:Iterable[L], l2f:L=>F): DecisionTreeClassifier[L,F] = {
val labelSize = labels.head.domain.size
val instances = labels.toSeq.map(label => DecisionTreeTrainer.Instance(l2f(label).value, new SingletonBinaryTensor1(labelSize, label.target.intValue), 1.0))
val treeTrainer = new ID3DecisionTreeTrainer // TODO We could make this a flexible choice later. -akm
val dtree = treeTrainer.train(instances)
new DecisionTreeClassifier(dtree, l2f)
}
}
|
iesl/fuse_ttl
|
src/factorie-factorie_2.11-1.1/src/main/scala/cc/factorie/app/classify/Classifier.scala
|
Scala
|
apache-2.0
| 12,013 |
package my.game.pkg.screens
// import my.game.pkg.screens.BaseScreen
import my.game.pkg.utils.Utils._
import my.game.pkg.Asteroidsexample
import my.game.pkg.Settings.font_gen
import com.badlogic.gdx.Game
import com.badlogic.gdx.scenes.scene2d.{Stage, Actor}
import com.badlogic.gdx.Gdx
import com.badlogic.gdx.utils.viewport.{Viewport, ScreenViewport}
import com.badlogic.gdx.scenes.scene2d.ui.{Table, Label, Skin, TextButton}
import com.badlogic.gdx.scenes.scene2d.ui.TextButton.TextButtonStyle
import com.badlogic.gdx.graphics.g2d.BitmapFont
import com.badlogic.gdx.scenes.scene2d.utils.ChangeListener
import com.badlogic.gdx.scenes.scene2d.utils.ChangeListener.ChangeEvent
import com.badlogic.gdx.graphics.{Color, Texture, Pixmap}
import com.badlogic.gdx.graphics.Pixmap.Format
import com.badlogic.gdx.graphics.g2d.freetype._
import scala.util.Random
import scala.math
import scala.language.implicitConversions._
import scala.collection.JavaConversions._
class MainMenu(
stage:Stage = new Stage(new ScreenViewport()),
game: Asteroidsexample
) extends BaseScreen(stage) {
val skin = new Skin()
skin.add("default", new BitmapFont())
val pixmap = new Pixmap(1, 1, Format.RGBA8888);
pixmap.setColor(Color.WHITE);
pixmap.fill();
skin.add("white", new Texture(pixmap));
// label = new Label("New game")
// label = new Label("New game")
// label = new Label("New game")
// label = new Label("New game")
val textButtonStyle = new TextButtonStyle()
// textButtonStyle.up = skin.newDrawable("white", Color.DARK_GRAY)
// textButtonStyle.down = skin.newDrawable("white", Color.DARK_GRAY)
textButtonStyle.checked = skin.newDrawable("white", Color.BLUE)
textButtonStyle.over = skin.newDrawable("white", Color.LIGHT_GRAY)
// textButtonStyle.font = skin.getFont("default")
textButtonStyle.font = font_gen(20)
skin.add("default", textButtonStyle)
val label_style = new Label.LabelStyle(font_gen(40), Color.WHITE)
skin.add("game_name", label_style)
val table = new Table()
table.setFillParent(true)
stage.addActor(table)
val game_name = new Label("ASTEROIDS", skin, "game_name")
val start_game = new TextButton("Start game", skin);
val hall_of_fame = new TextButton("Hall of Fame", skin);
table.add(game_name).spaceBottom(30).row()
table.add(start_game).spaceBottom(20);
table.row()
table.add(hall_of_fame)
start_game.addListener(new ChangeListener() {
def changed(event:ChangeEvent, actor:Actor) {
game.setGame()
}
})
hall_of_fame.addListener(new ChangeListener() {
def changed(event: ChangeEvent, actor:Actor) {
game.setHallOfFame()
}
})
}
|
raymank26/asteroids-scala
|
common/src/main/scala/screens/main_menu.scala
|
Scala
|
gpl-3.0
| 2,753 |
/*
* Created on 2010/04/22
* Copyright (c) 2010-2011, Wei-ju Wu.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* Neither the name of Wei-ju Wu nor the names of its contributors may
* be used to endorse or promote products derived from this software without
* specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package org.zmpp.glk.io
import org.scalatest.FlatSpec
import org.scalatest.matchers.ShouldMatchers
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import java.io._
@RunWith(classOf[JUnitRunner])
class GlkFileSystemSpec extends FlatSpec with ShouldMatchers {
"GlkFileSystem" should "be initialized" in {
val fileSystem = new GlkFileSystem
fileSystem.iterate(0) should be (null)
}
it should "create a fileref by name" in {
val fileSystem = new GlkFileSystem
fileSystem.createFileRefByName(0, "myfile", 0)
val fileRef = fileSystem.iterate(0)
fileRef should not be (null)
fileSystem.iterate(fileRef.id) should be (null)
}
}
|
logicmoo/zmpp2
|
zmpp-glk/src/test/scala/GlkFileSystemTest.scala
|
Scala
|
bsd-3-clause
| 2,251 |
package geostat
/**
* UndirectedLink class
*
* @param nodeA first node
* @param nodeB second node
*/
@SerialVersionUID(123L)
class UndirectedLink(nodeA: MapPoint, nodeB: MapPoint) extends Link(nodeA, nodeB) {
override def hashCode: Int = {
val prime = 31L;
var result = 1L;
var a = 0L;
var b = 0L;
if (nodeA.key < nodeB.key) {
a = nodeA.key
b = nodeB.key
} else {
a = nodeB.key
b = nodeA.key
}
result = prime * result + (a ^ (a >>> 32L))
result = prime * result + (b ^ (b >>> 32L))
result.toInt
}
override def equals(that: Any): Boolean = {
val tt: UndirectedLink = that.asInstanceOf[UndirectedLink]
((nodeA.equals(tt.nodeA) && (nodeB.equals(tt.nodeB))) ||
(nodeA.equals(tt.nodeB) && (nodeB.equals(tt.nodeA))))
}
}
|
alessandroadamo/geostat
|
src/main/scala/geostat/UndirectedLink.scala
|
Scala
|
lgpl-3.0
| 815 |
package redeyes.api.http
import redeyes.api._
import redeyes.parser._
import scalaz.std.list._
import scalaz.std.string._
/**
* This module describes segments of paths that appear in a request.
*/
trait Paths extends ApiModule {
import charParser._
/**
* Describes a prefix of a path -- that is, a path possibly followed by something else.
*
* path("/foo/bar/baz")
*/
def path(value: String): Api[String] = path(string(value))
/**
* Describes a path followed by the end of input.
*/
def path$(value: String): Api[String] = path(string(value) <~< char('/').maybe <~< charEnd)
/**
* Describes an numeric identifier in the path.
*
* {{{
* // /stores/123
* path("/stores/") >~> pathId
* }}}
*/
val pathId: Api[BigInt] = path(integer)
/**
* Describes anything but slashes in a path element. Requires at least one character.
*/
val pathElement: Api[String] = path(satisfy(c => c != '/').some.string)
/**
* Describes the remainder of the path.
*/
val remainingPath: Api[String] = path(anyString <~< charEnd)
}
|
redeyes/redeyes
|
src/main/scala/redeyes/api/http/paths.scala
|
Scala
|
mit
| 1,096 |
package com.overviewdocs.models.tables
import com.overviewdocs.database.Slick.api._
import com.overviewdocs.models.File
class FilesImpl(tag: Tag) extends Table[File](tag, "file") {
def id = column[Long]("id", O.PrimaryKey)
def referenceCount = column[Int]("reference_count")
def name = column[String]("name")
def contentsLocation = column[String]("contents_location")
def contentsSize = column[Long]("contents_size")
def contentsSha1 = column[Array[Byte]]("contents_sha1")
def viewLocation = column[String]("view_location")
def viewSize = column[Long]("view_size")
def * = (
id,
referenceCount,
name,
contentsLocation,
contentsSize,
contentsSha1,
viewLocation,
viewSize
) <> (File.tupled, File.unapply)
}
object Files extends TableQuery(new FilesImpl(_))
|
overview/overview-server
|
common/src/main/scala/com/overviewdocs/models/tables/Files.scala
|
Scala
|
agpl-3.0
| 813 |
package pl.touk.nussknacker.engine.avro.schemaregistry.confluent.serialization.jsonpayload
import io.confluent.kafka.schemaregistry.avro.AvroSchema
import org.apache.avro.Schema
import org.apache.avro.io.{Encoder, NoWrappingJsonEncoder}
import org.apache.kafka.common.serialization.Serializer
import pl.touk.nussknacker.engine.avro.schema.{AvroSchemaEvolution, DefaultAvroSchemaEvolution}
import pl.touk.nussknacker.engine.avro.schemaregistry.confluent.ConfluentUtils
import pl.touk.nussknacker.engine.avro.schemaregistry.confluent.client.{ConfluentSchemaRegistryClient, ConfluentSchemaRegistryClientFactory}
import pl.touk.nussknacker.engine.avro.schemaregistry.confluent.serialization.ConfluentKafkaAvroSerializer
import pl.touk.nussknacker.engine.avro.serialization.KafkaAvroValueSerializationSchemaFactory
import pl.touk.nussknacker.engine.kafka.KafkaConfig
import java.io.OutputStream
//TODO: handle situation, where we have both json and avro payloads for one schema registry
class ConfluentJsonPayloadSerializerFactory(schemaRegistryClientFactory: ConfluentSchemaRegistryClientFactory)
extends KafkaAvroValueSerializationSchemaFactory {
override protected def createValueSerializer(schemaOpt: Option[Schema], version: Option[Int], kafkaConfig: KafkaConfig): Serializer[Any] = {
val schemaRegistryClient = schemaRegistryClientFactory.create(kafkaConfig)
val avroSchemaOpt = schemaOpt.map(ConfluentUtils.convertToAvroSchema(_, version))
new JsonPayloadKafkaSerializer(kafkaConfig, schemaRegistryClient, new DefaultAvroSchemaEvolution, avroSchemaOpt, isKey = false)
}
}
class JsonPayloadKafkaSerializer(kafkaConfig: KafkaConfig,
confluentSchemaRegistryClient: ConfluentSchemaRegistryClient,
schemaEvolutionHandler: AvroSchemaEvolution,
avroSchemaOpt: Option[AvroSchema], isKey: Boolean) extends ConfluentKafkaAvroSerializer(kafkaConfig, confluentSchemaRegistryClient, schemaEvolutionHandler, avroSchemaOpt, isKey = false) {
override protected def encoderToUse(schema: Schema, out: OutputStream): Encoder = new NoWrappingJsonEncoder(schema, out)
override protected def writeHeader(data: Any, avroSchema: Schema, schemaId: Int, out: OutputStream): Unit = {}
}
|
TouK/nussknacker
|
utils/avro-components-utils/src/main/scala/pl/touk/nussknacker/engine/avro/schemaregistry/confluent/serialization/jsonpayload/ConfluentJsonPayloadSerializerFactory.scala
|
Scala
|
apache-2.0
| 2,293 |
/*
* Copyright (C) 2016-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.scaladsl.persistence
import akka.persistence.query.Offset
import akka.stream.scaladsl
import akka.{ Done, NotUsed }
import scala.concurrent.Future
import scala.concurrent.duration._
import scala.reflect.ClassTag
/**
* At system startup all [[PersistentEntity]] classes must be registered here
* with [[PersistentEntityRegistry#register]].
*
* Later, [[com.lightbend.lagom.scaladsl.persistence.PersistentEntityRef]] can be
* retrieved with [[PersistentEntityRegistry#refFor]].
* Commands are sent to a [[com.lightbend.lagom.scaladsl.persistence.PersistentEntity]]
* using a `PersistentEntityRef`.
*/
trait PersistentEntityRegistry {
/**
* At system startup all [[com.lightbend.lagom.scaladsl.persistence.PersistentEntity]]
* classes must be registered with this method.
*
* The `entityFactory` will be called when a new entity instance is to be created.
* That will happen in another thread, so the `entityFactory` must be thread-safe, e.g.
* not close over shared mutable state that is not thread-safe.
*/
def register(entityFactory: => PersistentEntity): Unit
/**
* Retrieve a [[com.lightbend.lagom.scaladsl.persistence.PersistentEntityRef]] for a
* given [[com.lightbend.lagom.scaladsl.persistence.PersistentEntity]] class
* and identifier. Commands are sent to a `PersistentEntity` using a `PersistentEntityRef`.
*/
def refFor[P <: PersistentEntity: ClassTag](entityId: String): PersistentEntityRef[P#Command]
/**
* A stream of the persistent events that have the given `aggregateTag`, e.g.
* all persistent events of all `Order` entities.
*
* The type of the offset is journal dependent, some journals use time-based
* UUID offsets, while others use sequence numbers. The passed in `fromOffset`
* must either be [[akka.persistence.query.NoOffset]], or an offset that has previously been produced
* by this journal.
*
* The stream will begin with events starting ''after'' `fromOffset`.
* To resume an event stream, store the `Offset` corresponding to the most
* recently processed `Event`, and pass that back as the value for
* `fromOffset` to start the stream from events following that one.
*
* @throws IllegalArgumentException If the `fromOffset` type is not supported
* by this journal.
*/
def eventStream[Event <: AggregateEvent[Event]](
aggregateTag: AggregateEventTag[Event],
fromOffset: Offset
): scaladsl.Source[EventStreamElement[Event], NotUsed]
/**
* Gracefully stop the persistent entities and leave the cluster.
* The persistent entities will be started on another node when
* new messages are sent to them.
*
* @return the `Future` is completed when the node has been
* removed from the cluster
*/
def gracefulShutdown(timeout: FiniteDuration): Future[Done]
}
|
edouardKaiser/lagom
|
persistence/scaladsl/src/main/scala/com/lightbend/lagom/scaladsl/persistence/PersistentEntityRegistry.scala
|
Scala
|
apache-2.0
| 2,934 |
import java.io._
trait NotSerializableInterface { def apply(a: Any): Any }
abstract class NotSerializableClass { def apply(a: Any): Any }
// SAM type that supports lambdas-as-invoke-dynamic
trait IsSerializableInterface extends java.io.Serializable { def apply(a: Any): Any }
// SAM type that still requires lambdas-as-anonymous-classes
abstract class IsSerializableClass extends java.io.Serializable { def apply(a: Any): Any }
object Test {
def main(args: Array[String]): Unit = {
val nsi: NotSerializableInterface = x => x
val nsc: NotSerializableClass = x => x
import SerDes._
assertNotSerializable(nsi)
assertNotSerializable(nsc)
assert(serializeDeserialize[IsSerializableInterface](x => x).apply("foo") == "foo")
assert(serializeDeserialize[IsSerializableClass](x => x).apply("foo") == "foo")
assert(ObjectStreamClass.lookup(((x => x): IsSerializableClass).getClass).getSerialVersionUID == 0)
}
}
object SerDes {
def assertNotSerializable(a: AnyRef): Unit = {
try {
serialize(a)
assert(false)
} catch {
case _: NotSerializableException => // okay
}
}
def serialize(obj: AnyRef): Array[Byte] = {
val buffer = new ByteArrayOutputStream
val out = new ObjectOutputStream(buffer)
out.writeObject(obj)
buffer.toByteArray
}
def deserialize(a: Array[Byte]): AnyRef = {
val in = new ObjectInputStream(new ByteArrayInputStream(a))
in.readObject
}
def serializeDeserialize[T <: AnyRef](obj: T) = deserialize(serialize(obj)).asInstanceOf[T]
}
|
scala/scala
|
test/files/run/sammy_seriazable.scala
|
Scala
|
apache-2.0
| 1,547 |
/*
* Licensed to Intel Corporation under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* Intel Corporation licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.torch
import com.intel.analytics.bigdl.nn.DistKLDivCriterion
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.RandomGenerator._
import org.scalatest.{BeforeAndAfter, FlatSpec, Matchers}
import scala.util.Random
@com.intel.analytics.bigdl.tags.Serial
class DistKLDivCriterionSpec extends FlatSpec with BeforeAndAfter with Matchers{
before {
if (!TH.hasTorch()) {
cancel("Torch is not installed")
}
}
"A DistKLDivCriterion " should "generate correct output and grad" in {
val seed = 100
RNG.setSeed(seed)
val input = Tensor[Double](4, 10).apply1(e => Random.nextDouble())
val target = Tensor[Double](4, 10).apply1(e => Random.nextDouble())
val code = "torch.manualSeed(" + seed + ")\\n" +
"module = nn.DistKLDivCriterion(true)\\n" +
"output = module:forward(input, target)\\n" +
"gradInput = module:backward(input, target)"
val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "target" -> target),
Array("output", "gradInput"))
val luaOutput1 = torchResult("output").asInstanceOf[Double]
val luaOutput2 = torchResult("gradInput").asInstanceOf[Tensor[Double]]
val module = new DistKLDivCriterion[Double]()
val start = System.nanoTime()
val output = module.forward(input, target)
val gradInput = module.backward(input, target)
val end = System.nanoTime()
val scalaTime = end - start
output should be(luaOutput1)
gradInput should be(luaOutput2)
println("Test case : DistKLDivCriterion, Torch : " + luaTime +
" s, Scala : " + scalaTime / 1e9 + " s")
}
}
|
SeaOfOcean/BigDL
|
dl/src/test/scala/com/intel/analytics/bigdl/torch/DistKLDivCriterionSpec.scala
|
Scala
|
apache-2.0
| 2,449 |
package gie.ggdrive
import gie.utils.prop.Configuration
import java.io.{FileNotFoundException, RandomAccessFile, File}
import gie.file_utils.ImplicitFileOps._
import gie.utils.{loan}
import gie.file_utils.file
import scala.collection.mutable
import scala.concurrent.{Promise, Future, ExecutionContext}
import com.typesafe.scalalogging.slf4j.LazyLogging
import scala.util.{Failure, Success, Try}
import java.util.concurrent.atomic.AtomicBoolean
package qrec {
trait QRec
case class Rec(resourceId: String, inner: QRec)
case class Noop() extends QRec
case class Download() extends QRec
}
class GDriveQueue(config: Configuration, store: FileStore, gdrive: GDrive)(implicit executor: ExecutionContext) extends ThreadLocks with LazyLogging { driveQueue=>
type ResourceId = String
private val queueRoot = file( config('queue_root) ) / "queue"
{
queueRoot.mkdirs()
}
private class ResourceQueue {
type QueueItem = (qrec.QRec, AtomicBoolean, Promise[Any], AtomicBoolean=>Any)
var current: QueueItem = _
val queue = new mutable.Queue[QueueItem]()
}
private val m_busyResources = new mutable.HashMap[ResourceId, ResourceQueue]()
private var m_currentQueueRecordId: Long = 1
private def impl_nextRecordNo() = {
val r = m_currentQueueRecordId
m_currentQueueRecordId += 1
r
}
private def impl_CreateRecordFile() = queueRoot / impl_nextRecordNo().toString create_!()
private def impl_writeRecord[T <: qrec.QRec](id: ResourceId, record: T): qrec.Rec = {
val tmpRec = qrec.Rec(id, record)
val tmpRecBin = checksum_serializer.sumFrom( tmpRec )
loan.acquire( impl_CreateRecordFile() ){ os =>
os.write(tmpRecBin)
os.flush()
os.getFD.sync()
}
tmpRec
}
//private def impl_sync_scheduleNext(resourceId: ResourceId):Unit = withWriteLock{ impl_ns_scheduleNext(resourceId) }
private def impl_ns_scheduleNext(resourceId: ResourceId): Unit = {
m_busyResources.get(resourceId).fold {
logger.debug(s"[${resourceId}]: no queue, no scheduling next.")
} {
resourceQueue =>
logger.debug(s"[${resourceId}]: scheduling next.")
if(resourceQueue.current ne null) {
logger.debug(s"[${resourceId}]: still running task, delaying.")
} else if (resourceQueue.queue.isEmpty) {
logger.debug(s"[${resourceId}]: queue is empty, deleting.")
val r = m_busyResources.remove(resourceId)
assume(r.isDefined)
} else {
val job = resourceQueue.queue.dequeue()
logger.debug(s"[${resourceId}]: scheduling (${job}).")
resourceQueue.current = job
val fun = job._4
val fut = Future {
fun(job._2)
}
job._3.completeWith( fut )
fut.onComplete{ r=>
logger.debug(s"[${resourceId}]: completed with: ${r}")
driveQueue.withWriteLock{
resourceQueue.current = null
impl_ns_scheduleNext(resourceId)
}
}
}
}
}
private def impl_ns_enqueue[T](resourceId: ResourceId, op: qrec.QRec)(fun: AtomicBoolean=>T): Future[T] = {
val resourceQueue = m_busyResources.getOrElseUpdate(resourceId, new ResourceQueue)
val prom = Promise[T]()
val job = ((op, new AtomicBoolean(false), prom.asInstanceOf[Promise[Any]], fun))
resourceQueue.queue.enqueue(job)
prom.future
}
private def impl_sync_scheduleOpForResource[T](resourceId: ResourceId, op: qrec.QRec)(fun: AtomicBoolean=>T): Future[T] = withWriteLock {
assume(resourceId ne null)
assume(resourceId.length>1)
val fut = impl_ns_enqueue(resourceId, op)(fun)
impl_ns_scheduleNext(resourceId)
fut
}
private def impl_checkInterrupted(interrupt: AtomicBoolean){
if(interrupt.get) throw new InterruptedException()
}
private def impl_DownloadFile(resourceId: ResourceId):Future[Unit] =
impl_sync_scheduleOpForResource(resourceId, qrec.Download()) {
interrupt=>
logger.debug(s"[${resourceId}]: downloading file.")
val file = gdrive.fileFromId(resourceId)
loan.acquire(gdrive.downloadFile(file), store.createFile(resourceId)){ (is, or) =>
gie.file_utils.copyEx()(is.read _)(or.write _){ impl_checkInterrupted(interrupt) }
}
}
def openFile(resourceId: ResourceId, downloadIfNoLocal: Boolean = true): Future[RandomAccessFile] = withWriteLock {
Future.fromTry( Try{ store.openFile(resourceId) } ).recoverWith{
case e:FileNotFoundException if(downloadIfNoLocal) => impl_DownloadFile(resourceId) flatMap { _ => openFile(resourceId, false) }
}
}
}
|
gienanesobaka/ggdrive
|
src/main/scala/gdrive_queue.scala
|
Scala
|
gpl-2.0
| 4,649 |
package pureconfig.generic
import scala.language.experimental.macros
import pureconfig._
/** An object that, when imported, provides implicit `ConfigReader` and `ConfigWriter` instances for value classes,
* tuples, case classes and sealed traits.
*/
object auto {
implicit def exportReader[A]: Exported[ConfigReader[A]] = macro ExportMacros.exportDerivedReader[A]
implicit def exportWriter[A]: Exported[ConfigWriter[A]] = macro ExportMacros.exportDerivedWriter[A]
}
|
melrief/pureconfig
|
modules/generic/src/main/scala/pureconfig/generic/auto.scala
|
Scala
|
mpl-2.0
| 477 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.testingUtils
import java.util
import java.util.concurrent._
import java.util.{Collections, UUID}
import akka.actor.{ActorRef, ActorSystem, Kill, Props}
import akka.pattern.{Patterns, ask}
import com.typesafe.config.ConfigFactory
import grizzled.slf4j.Logger
import org.apache.flink.api.common.time.Time
import org.apache.flink.configuration._
import org.apache.flink.runtime.akka.AkkaUtils
import org.apache.flink.runtime.clusterframework.types.ResourceID
import org.apache.flink.runtime.concurrent.{ScheduledExecutor, ScheduledExecutorServiceAdapter}
import org.apache.flink.runtime.highavailability.HighAvailabilityServices
import org.apache.flink.runtime.instance.{ActorGateway, AkkaActorGateway}
import org.apache.flink.runtime.leaderretrieval.StandaloneLeaderRetrievalService
import org.apache.flink.runtime.messages.TaskManagerMessages.{NotifyWhenRegisteredAtJobManager, RegisteredAtJobManager}
import org.apache.flink.runtime.metrics.{MetricRegistryConfiguration, MetricRegistryImpl}
import org.apache.flink.runtime.taskmanager.TaskManager
import org.apache.flink.runtime.{FlinkActor, LeaderSessionMessageFilter, LogMessages}
import scala.concurrent.duration.{TimeUnit, _}
import scala.concurrent.{Await, ExecutionContext, ExecutionContextExecutor}
import scala.language.postfixOps
/**
* Convenience functions to test actor based components.
*/
object TestingUtils {
private var sharedExecutorInstance: ScheduledExecutorService = _
val testConfig = ConfigFactory.parseString(getDefaultTestingActorSystemConfigString)
val TESTING_DURATION = 2 minute
val TESTING_TIMEOUT = 1 minute
val TIMEOUT = Time.minutes(1L)
val DEFAULT_AKKA_ASK_TIMEOUT = "200 s"
def getDefaultTestingActorSystemConfigString: String = {
val logLevel = AkkaUtils.getLogLevel
s"""akka.daemonic = on
|akka.test.timefactor = 10
|akka.loggers = ["akka.event.slf4j.Slf4jLogger"]
|akka.loglevel = $logLevel
|akka.stdout-loglevel = OFF
|akka.jvm-exit-on-fatal-error = off
|akka.log-config-on-start = off
""".stripMargin
}
def getDefaultTestingActorSystemConfig = testConfig
def infiniteTime: Time = {
Time.milliseconds(Integer.MAX_VALUE);
}
/**
* Gets the shared global testing execution context
*/
def defaultExecutionContext: ExecutionContextExecutor = {
ExecutionContext.fromExecutor(defaultExecutor)
}
/**
* Gets the shared global testing scheduled executor
*/
def defaultExecutor: ScheduledExecutorService = {
synchronized {
if (sharedExecutorInstance == null || sharedExecutorInstance.isShutdown) {
sharedExecutorInstance = Executors.newSingleThreadScheduledExecutor();
}
sharedExecutorInstance
}
}
def defaultScheduledExecutor: ScheduledExecutor = {
val scheduledExecutorService = defaultExecutor
new ScheduledExecutorServiceAdapter(scheduledExecutorService)
}
/** Returns an [[ExecutionContext]] which uses the current thread to execute the runnable.
*
* @return Direct [[ExecutionContext]] which executes runnables directly
*/
def directExecutionContext = ExecutionContext
.fromExecutor(org.apache.flink.runtime.concurrent.Executors.directExecutor())
/** @return A new [[QueuedActionExecutionContext]] */
def queuedActionExecutionContext = {
new QueuedActionExecutionContext(new ActionQueue())
}
/** [[ExecutionContext]] which queues [[Runnable]] up in an [[ActionQueue]] instead of
* execution them. If the automatic execution mode is activated, then the [[Runnable]] are
* executed.
*/
class QueuedActionExecutionContext private[testingUtils] (val actionQueue: ActionQueue)
extends AbstractExecutorService with ExecutionContext with ScheduledExecutorService {
var automaticExecution = false
def toggleAutomaticExecution() = {
automaticExecution = !automaticExecution
}
override def execute(runnable: Runnable): Unit = {
if(automaticExecution){
runnable.run()
}else {
actionQueue.queueAction(runnable)
}
}
override def reportFailure(t: Throwable): Unit = {
t.printStackTrace()
}
override def scheduleAtFixedRate(
command: Runnable,
initialDelay: Long,
period: Long,
unit: TimeUnit): ScheduledFuture[_] = {
throw new UnsupportedOperationException()
}
override def schedule(command: Runnable, delay: Long, unit: TimeUnit): ScheduledFuture[_] = {
throw new UnsupportedOperationException()
}
override def schedule[V](callable: Callable[V], delay: Long, unit: TimeUnit)
: ScheduledFuture[V] = {
throw new UnsupportedOperationException()
}
override def scheduleWithFixedDelay(
command: Runnable,
initialDelay: Long,
delay: Long,
unit: TimeUnit): ScheduledFuture[_] = {
throw new UnsupportedOperationException()
}
override def shutdown(): Unit = ()
override def isTerminated: Boolean = false
override def awaitTermination(timeout: Long, unit: TimeUnit): Boolean = false
override def shutdownNow(): util.List[Runnable] = Collections.emptyList()
override def isShutdown: Boolean = false
}
/** Queue which stores [[Runnable]] */
class ActionQueue {
private val runnables = scala.collection.mutable.Queue[Runnable]()
def triggerNextAction() {
val r = runnables.dequeue
r.run()
}
def popNextAction(): Runnable = {
runnables.dequeue()
}
def queueAction(r: Runnable) {
runnables.enqueue(r)
}
def isEmpty: Boolean = {
runnables.isEmpty
}
}
/** Creates a local TaskManager in the given ActorSystem. It is given a
* [[StandaloneLeaderRetrievalService]] which returns the given jobManagerURL. After creating
* the TaskManager, waitForRegistration specifies whether one waits until the TaskManager has
* registered at the JobManager. An ActorGateway to the TaskManager is returned.
*
* @param actorSystem ActorSystem in which the TaskManager shall be started
* @param highAvailabilityServices Service factory for high availability
* @param configuration Configuration
* @param useLocalCommunication true if the network stack shall use exclusively local
* communication
* @param waitForRegistration true if the method will wait until the TaskManager has connected to
* the JobManager
* @return ActorGateway of the created TaskManager
*/
def createTaskManager(
actorSystem: ActorSystem,
highAvailabilityServices: HighAvailabilityServices,
configuration: Configuration,
useLocalCommunication: Boolean,
waitForRegistration: Boolean)
: ActorGateway = {
createTaskManager(
actorSystem,
highAvailabilityServices,
configuration,
useLocalCommunication,
waitForRegistration,
classOf[TestingTaskManager]
)
}
def createTaskManager(
actorSystem: ActorSystem,
highAvailabilityServices: HighAvailabilityServices,
configuration: Configuration,
useLocalCommunication: Boolean,
waitForRegistration: Boolean,
taskManagerClass: Class[_ <: TaskManager])
: ActorGateway = {
val resultingConfiguration = new Configuration()
resultingConfiguration.setString(TaskManagerOptions.MANAGED_MEMORY_SIZE, "10m")
resultingConfiguration.addAll(configuration)
val metricRegistry = new MetricRegistryImpl(
MetricRegistryConfiguration.fromConfiguration(configuration))
val taskManagerResourceId = ResourceID.generate()
val taskManager = TaskManager.startTaskManagerComponentsAndActor(
resultingConfiguration,
taskManagerResourceId,
actorSystem,
highAvailabilityServices,
metricRegistry,
"localhost",
None,
useLocalCommunication,
taskManagerClass
)
val leaderId = if (waitForRegistration) {
val notificationResult = (taskManager ? NotifyWhenRegisteredAtJobManager)(TESTING_DURATION)
.mapTo[RegisteredAtJobManager]
Await.result(notificationResult, TESTING_DURATION).leaderId
} else {
HighAvailabilityServices.DEFAULT_LEADER_ID
}
new AkkaActorGateway(taskManager, leaderId)
}
/** Stops the given actor by sending it a Kill message
*
* @param actor
*/
def stopActor(actor: ActorRef): Unit = {
if (actor != null) {
actor ! Kill
}
}
/** Stops the given actor by sending it a Kill message
*
* @param actorGateway
*/
def stopActor(actorGateway: ActorGateway): Unit = {
if (actorGateway != null) {
stopActor(actorGateway.actor())
}
}
def stopActorGracefully(actor: ActorRef): Unit = {
val gracefulStopFuture = Patterns.gracefulStop(actor, TestingUtils.TESTING_TIMEOUT)
Await.result(gracefulStopFuture, TestingUtils.TESTING_TIMEOUT)
}
def stopActorGracefully(actorGateway: ActorGateway): Unit = {
stopActorGracefully(actorGateway.actor())
}
def stopActorsGracefully(actors: ActorRef*): Unit = {
val gracefulStopFutures = actors.flatMap{
actor =>
Option(actor) match {
case Some(actorRef) => Some(Patterns.gracefulStop(actorRef, TestingUtils.TESTING_TIMEOUT))
case None => None
}
}
implicit val executionContext = defaultExecutionContext
val globalStopFuture = scala.concurrent.Future.sequence(gracefulStopFutures)
Await.result(globalStopFuture, TestingUtils.TESTING_TIMEOUT)
}
def stopActorsGracefully(actors: java.util.List[ActorRef]): Unit = {
import scala.collection.JavaConverters._
stopActorsGracefully(actors.asScala: _*)
}
def stopActorGatewaysGracefully(actorGateways: ActorGateway*): Unit = {
val actors = actorGateways.flatMap {
actorGateway =>
Option(actorGateway) match {
case Some(actorGateway) => Some(actorGateway.actor())
case None => None
}
}
stopActorsGracefully(actors: _*)
}
def stopActorGatewaysGracefully(actorGateways: java.util.List[ActorGateway]): Unit = {
import scala.collection.JavaConverters._
stopActorGatewaysGracefully(actorGateways.asScala: _*)
}
/** Creates a forwarding JobManager which sends all received message to the forwarding target.
*
* @param actorSystem The actor system to start the actor in.
* @param forwardingTarget Target to forward to.
* @param leaderId leader id for the actor gateway
* @param actorName Name for forwarding Actor
* @return
*/
def createForwardingActor(
actorSystem: ActorSystem,
forwardingTarget: ActorRef,
leaderId: UUID,
actorName: Option[String] = None)
: ActorGateway = {
val actor = actorName match {
case Some(name) =>
actorSystem.actorOf(
Props(
classOf[ForwardingActor],
forwardingTarget,
Option(leaderId)),
name
)
case None =>
actorSystem.actorOf(
Props(
classOf[ForwardingActor],
forwardingTarget,
Option(leaderId))
)
}
new AkkaActorGateway(actor, leaderId)
}
class ForwardingActor(val target: ActorRef, val leaderSessionID: Option[UUID])
extends FlinkActor with LeaderSessionMessageFilter with LogMessages {
/** Handle incoming messages
*
* @return
*/
override def handleMessage: Receive = {
case msg => target.forward(msg)
}
override val log: Logger = Logger(getClass)
}
}
|
ueshin/apache-flink
|
flink-runtime/src/test/scala/org/apache/flink/runtime/testingUtils/TestingUtils.scala
|
Scala
|
apache-2.0
| 12,448 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.predictionio.examples.similarproduct
import org.apache.predictionio.controller.P2LAlgorithm
import org.apache.predictionio.controller.Params
import org.apache.predictionio.data.storage.BiMap
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.mllib.recommendation.ALS
import org.apache.spark.mllib.recommendation.{Rating => MLlibRating}
import grizzled.slf4j.Logger
import scala.collection.mutable.PriorityQueue
case class ALSAlgorithmParams(
rank: Int,
numIterations: Int,
lambda: Double,
seed: Option[Long]) extends Params
class ALSModel(
val productFeatures: Map[Int, Array[Double]],
val itemStringIntMap: BiMap[String, Int],
val items: Map[Int, Item]
) extends Serializable {
@transient lazy val itemIntStringMap = itemStringIntMap.inverse
override def toString = {
s" productFeatures: [${productFeatures.size}]" +
s"(${productFeatures.take(2).toList}...)" +
s" itemStringIntMap: [${itemStringIntMap.size}]" +
s"(${itemStringIntMap.take(2).toString}...)]" +
s" items: [${items.size}]" +
s"(${items.take(2).toString}...)]"
}
}
/**
* Use ALS to build item x feature matrix
*/
class ALSAlgorithm(val ap: ALSAlgorithmParams)
extends P2LAlgorithm[PreparedData, ALSModel, Query, PredictedResult] {
@transient lazy val logger = Logger[this.type]
override
def train(sc: SparkContext, data: PreparedData): ALSModel = {
require(!data.viewEvents.take(1).isEmpty,
s"viewEvents in PreparedData cannot be empty." +
" Please check if DataSource generates TrainingData" +
" and Preprator generates PreparedData correctly.")
require(!data.items.take(1).isEmpty,
s"items in PreparedData cannot be empty." +
" Please check if DataSource generates TrainingData" +
" and Preprator generates PreparedData correctly.")
// create User and item's String ID to integer index BiMap
val userStringIntMap = BiMap.stringInt(data.viewEvents.map(_.user)) // MODIFIED
val itemStringIntMap = BiMap.stringInt(data.items.keys)
// collect Item as Map and convert ID to Int index
val items: Map[Int, Item] = data.items.map { case (id, item) =>
(itemStringIntMap(id), item)
}.collectAsMap.toMap
val mllibRatings = data.viewEvents
.map { r =>
// Convert user and item String IDs to Int index for MLlib
val uindex = userStringIntMap.getOrElse(r.user, -1)
val iindex = itemStringIntMap.getOrElse(r.item, -1)
if (uindex == -1)
logger.info(s"Couldn't convert nonexistent user ID ${r.user}"
+ " to Int index.")
if (iindex == -1)
logger.info(s"Couldn't convert nonexistent item ID ${r.item}"
+ " to Int index.")
((uindex, iindex), 1)
}.filter { case ((u, i), v) =>
// keep events with valid user and item index
(u != -1) && (i != -1)
}.reduceByKey(_ + _) // aggregate all view events of same user-item pair
.map { case ((u, i), v) =>
// MLlibRating requires integer index for user and item
MLlibRating(u, i, v)
}
.cache()
// MLLib ALS cannot handle empty training data.
require(!mllibRatings.take(1).isEmpty,
s"mllibRatings cannot be empty." +
" Please check if your events contain valid user and item ID.")
// seed for MLlib ALS
val seed = ap.seed.getOrElse(System.nanoTime)
val m = ALS.trainImplicit(
ratings = mllibRatings,
rank = ap.rank,
iterations = ap.numIterations,
lambda = ap.lambda,
blocks = -1,
alpha = 1.0,
seed = seed)
new ALSModel(
productFeatures = m.productFeatures.collectAsMap.toMap,
itemStringIntMap = itemStringIntMap,
items = items
)
}
override
def predict(model: ALSModel, query: Query): PredictedResult = {
val productFeatures = model.productFeatures
// convert items to Int index
val queryList: Set[Int] = query.items.map(model.itemStringIntMap.get(_))
.flatten.toSet
val queryFeatures: Vector[Array[Double]] = queryList.toVector
// productFeatures may not contain the requested item
.map { item => productFeatures.get(item) }
.flatten
val whiteList: Option[Set[Int]] = query.whiteList.map( set =>
set.map(model.itemStringIntMap.get(_)).flatten
)
val blackList: Option[Set[Int]] = query.blackList.map ( set =>
set.map(model.itemStringIntMap.get(_)).flatten
)
val ord = Ordering.by[(Int, Double), Double](_._2).reverse
val indexScores: Array[(Int, Double)] = if (queryFeatures.isEmpty) {
logger.info(s"No productFeatures vector for query items ${query.items}.")
Array[(Int, Double)]()
} else {
productFeatures.par // convert to parallel collection
.mapValues { f =>
queryFeatures.map{ qf =>
cosine(qf, f)
}.reduce(_ + _)
}
.filter(_._2 > 0) // keep items with score > 0
.seq // convert back to sequential collection
.toArray
}
val filteredScore = indexScores.view.filter { case (i, v) =>
isCandidateItem(
i = i,
items = model.items,
categories = query.categories,
categoryBlackList = query.categoryBlackList,
queryList = queryList,
whiteList = whiteList,
blackList = blackList
)
}
val topScores = getTopN(filteredScore, query.num)(ord).toArray
val itemScores = topScores.map { case (i, s) =>
ItemScore(
item = model.itemIntStringMap(i),
score = s
)
}
PredictedResult(itemScores)
}
private
def getTopN[T](s: Seq[T], n: Int)(implicit ord: Ordering[T]): Seq[T] = {
val q = PriorityQueue()
for (x <- s) {
if (q.size < n)
q.enqueue(x)
else {
// q is full
if (ord.compare(x, q.head) < 0) {
q.dequeue()
q.enqueue(x)
}
}
}
q.dequeueAll.toSeq.reverse
}
private
def cosine(v1: Array[Double], v2: Array[Double]): Double = {
val size = v1.size
var i = 0
var n1: Double = 0
var n2: Double = 0
var d: Double = 0
while (i < size) {
n1 += v1(i) * v1(i)
n2 += v2(i) * v2(i)
d += v1(i) * v2(i)
i += 1
}
val n1n2 = (math.sqrt(n1) * math.sqrt(n2))
if (n1n2 == 0) 0 else (d / n1n2)
}
private
def isCandidateItem(
i: Int,
items: Map[Int, Item],
categories: Option[Set[String]],
categoryBlackList: Option[Set[String]],
queryList: Set[Int],
whiteList: Option[Set[Int]],
blackList: Option[Set[Int]]
): Boolean = {
whiteList.map(_.contains(i)).getOrElse(true) &&
blackList.map(!_.contains(i)).getOrElse(true) &&
// discard items in query as well
(!queryList.contains(i)) &&
// filter categories
categories.map { cat =>
items(i).categories.map { itemCat =>
// keep this item if has ovelap categories with the query
!(itemCat.toSet.intersect(cat).isEmpty)
}.getOrElse(false) // discard this item if it has no categories
}.getOrElse(true) &&
categoryBlackList.map { cat =>
items(i).categories.map { itemCat =>
// discard this item if has ovelap categories with the query
(itemCat.toSet.intersect(cat).isEmpty)
}.getOrElse(true) // keep this item if it has no categories
}.getOrElse(true)
}
}
|
takezoe/incubator-predictionio
|
examples/scala-parallel-similarproduct/rid-user-set-event/src/main/scala/ALSAlgorithm.scala
|
Scala
|
apache-2.0
| 8,234 |
package tech.sourced.berserker.normalizer.model
import org.apache.spark.sql.types.{BinaryType, StringType, StructField, StructType}
object Schema {
val files = StructType(
StructField("repoId", StringType, nullable = false) ::
StructField("repoUrl", StringType, nullable = false) ::
StructField("hash", StringType, nullable = false) ::
StructField("path", StringType, nullable = false) ::
StructField("lang", StringType, nullable = true) ::
StructField("uast", BinaryType, nullable = true) ::
Nil
)
}
|
bzz/berserker
|
normalizer/src/main/scala/tech/sourced/berserker/normalizer/model/Schema.scala
|
Scala
|
gpl-3.0
| 547 |
package guide
import scala.tools.nsc.Global
import scala.tools.nsc.transform.TypingTransformers
object _21_ParamAlias extends App {
def run(g: Global)(unit: g.CompilationUnit): Unit = {
object transformFields extends TypingTransformers {
override val global: g.type = g
import global._
object trans extends TypingTransformer(unit) {
override def transform(tree: Tree): Tree = tree match {
case vd: ValDef if vd.symbol.owner.isClass =>
val getter = vd.symbol.getterIn(vd.symbol.owner)
if (getter.isParamAccessor) {
if (vd.symbol.alias != NoSymbol) {
println(s"Eliding related method for ${vd.symbol.fullLocationString}, as it is an alias for ${vd.symbol.alias.fullLocationString} / ${vd.symbol.alias.getterIn(vd.symbol.alias.owner).defString}")
super.transform(vd)
} else {
val newMethodTree = {
val sym = currentOwner.newMethodSymbol(TermName(s"${vd.name.dropLocal}$$extraMethod"), vd.symbol.pos, Flag.SYNTHETIC | Flag.STABLE).setInfo(NullaryMethodType(vd.symbol.info))
DefDef(sym, gen.mkAttributedRef(vd.symbol))
}
Block(super.transform(vd) :: localTyper.typedPos(vd.pos)(newMethodTree) :: Nil, EmptyTree)
}
} else super.transform(vd)
case _ => super.transform (tree)
}
override def transformStats(stats: List[Tree], exprOwner: Symbol): List[Tree] = {
super.transformStats(stats, exprOwner).flatMap {
case Block(stats, EmptyTree) => stats
case x => x :: Nil
}
}
}
}
unit.body = transformFields.trans.transform(unit.body)
}
val g1 = newGlobal("-Xprint:all", extraPhases = g2 => newSubcomponent(g2, "typer")((g3, unit) => run(g3)(unit)) :: Nil)
val tree = compile("class C(val i: Int); class D(override val i: Int) extends C(i)", g1).assertNoErrors().tree
import g1._
print(show(tree, printIds = true))
/*
package <empty> {
class C extends Object {
<paramaccessor> private[this] val i: Int = _;
<synthetic> <stable> def i$info(): Int = C.this.i;
<stable> <accessor> <paramaccessor> def i(): Int = C.this.i;
def <init>(i: Int): C = {
C.this.i = i;
C.super.<init>();
()
}
};
class D extends C {
override <stable> <accessor> <paramaccessor> def i(): Int = (D.super.i(): Int);
def <init>(i: Int): D = {
D.super.<init>(i);
()
}
}
}
*/
}
|
retronym/scalac-survival-guide
|
src/main/scala/guide/_21_ParamAlias.scala
|
Scala
|
bsd-3-clause
| 2,551 |
package org.coroutines
import org.coroutines.common._
import scala.annotation.tailrec
import scala.collection._
import scala.language.experimental.macros
import scala.reflect.macros.whitebox.Context
/** Synthesizes all coroutine-related functionality.
*/
private[coroutines] class Synthesizer[C <: Context](val c: C)
extends Analyzer[C]
with CfgGenerator[C]
with AstCanonicalization[C] {
import c.universe._
val NUM_PREDEFINED_ENTRY_STUBS = 30
private def genEntryPoint(cfg: Cfg, subgraph: SubCfg)(
implicit t: Table
): Tree = {
val body = subgraph.emit(cfg)
val defname = TermName(s"$$ep${subgraph.uid}")
val defdef = if (subgraph.uid < NUM_PREDEFINED_ENTRY_STUBS) q"""
override def $defname(
${t.names.coroutineParam}:
_root_.org.coroutines.Coroutine.Instance[${t.yieldType}, ${t.returnType}]
): _root_.scala.Unit = {
$body
}
""" else q"""
def $defname(
${t.names.coroutineParam}:
_root_.org.coroutines.Coroutine.Instance[${t.yieldType}, ${t.returnType}]
): _root_.scala.Unit = {
$body
}
"""
defdef
}
private def genEntryPoints(cfg: Cfg)(implicit table: Table): Map[Long, Tree] = {
val entrypoints = for ((orignode, subgraph) <- cfg.subgraphs) yield {
(subgraph.uid, genEntryPoint(cfg, subgraph))
}
mutable.LinkedHashMap() ++= entrypoints.toSeq.sortBy(_._1)
}
private def genEnterMethod(entrypoints: Map[Long, Tree])(
implicit table: Table
): Tree = {
val rettpt = table.returnType
val yldtpt = table.yieldType
if (entrypoints.size == 1) {
val q"$_ def $ep0($_): _root_.scala.Unit = $_" = entrypoints(0)
q"""
def $$enter(
c: _root_.org.coroutines.Coroutine.Instance[$yldtpt, $rettpt]
): _root_.scala.Unit = $ep0(c)
"""
} else if (entrypoints.size == 2) {
val q"$_ def $ep0($_): _root_.scala.Unit = $_" = entrypoints(0)
val q"$_ def $ep1($_): _root_.scala.Unit = $_" = entrypoints(1)
q"""
def $$enter(
c: _root_.org.coroutines.Coroutine.Instance[$yldtpt, $rettpt]
): _root_.scala.Unit = {
val pc = _root_.org.coroutines.common.Stack.top(c.$$pcstack)
if (pc == 0) $ep0(c) else $ep1(c)
}
"""
} else {
val cases = for ((index, defdef) <- entrypoints) yield {
val q"$_ def $ep($_): _root_.scala.Unit = $rhs" = defdef
cq"${index.toShort} => $ep(c)"
}
q"""
def $$enter(
c: _root_.org.coroutines.Coroutine.Instance[$yldtpt, $rettpt]
): _root_.scala.Unit = {
val pc: Short = _root_.org.coroutines.common.Stack.top(c.$$pcstack)
(pc: @_root_.scala.annotation.switch) match {
case ..$cases
}
}
"""
}
}
private def genReturnValueMethods(cfg: Cfg)(implicit table: Table): List[Tree] = {
List(
genReturnValueMethod(cfg, typeOf[Boolean]),
genReturnValueMethod(cfg, typeOf[Byte]),
genReturnValueMethod(cfg, typeOf[Short]),
genReturnValueMethod(cfg, typeOf[Char]),
genReturnValueMethod(cfg, typeOf[Int]),
genReturnValueMethod(cfg, typeOf[Float]),
genReturnValueMethod(cfg, typeOf[Long]),
genReturnValueMethod(cfg, typeOf[Double]),
genReturnValueMethod(cfg, typeOf[Any])
)
}
private def genReturnValueMethod(cfg: Cfg, tpe: Type)(implicit table: Table): Tree = {
def genReturnValueStore(n: Node) = {
val sub = cfg.subgraphs(n.successors.head)
val pcvalue = sub.uid
val info = table(n.tree.symbol)
val eligible =
(isValType(info.tpe) && (info.tpe =:= tpe)) ||
(tpe =:= typeOf[Any])
if (eligible) {
if (info.tpe =:= typeOf[Unit]) {
(pcvalue, q"()")
} else {
val valuetree =
if (tpe =:= typeOf[Any]) q"v.asInstanceOf[${info.tpe}]" else q"v"
val rvset = info.storeTree(q"c", valuetree)
(pcvalue, q"$rvset")
}
} else {
(pcvalue,
q"""_root_.scala.sys.error("Return method called for incorrect type.")""")
}
}
val returnstores = cfg.start.dfs.collect {
case n @ Node.ApplyCoroutine(_, _, _) => genReturnValueStore(n)
}
val returnvaluemethod = returnValueMethodName(tpe)
val body = {
if (returnstores.size == 0) {
q"()"
} else if (returnstores.size == 1) {
returnstores(0)._2
} else if (returnstores.size == 2) {
q"""
val pc = _root_.org.coroutines.common.Stack.top(c.$$pcstack)
if (pc == ${returnstores(0)._1.toShort}) {
${returnstores(0)._2}
} else {
${returnstores(1)._2}
}
"""
} else {
val cases = for ((pcvalue, rvset) <- returnstores) yield {
cq"${pcvalue.toShort} => $rvset"
}
q"""
val pc = _root_.org.coroutines.common.Stack.top(c.$$pcstack)
(pc: @_root_.scala.annotation.switch) match {
case ..$cases
}
"""
}
}
q"""
def $returnvaluemethod(
c: _root_.org.coroutines.Coroutine.Instance[
${table.yieldType}, ${table.returnType}],
v: $tpe
): _root_.scala.Unit = {
$body
}
"""
}
def genVarPushesAndPops(cfg: Cfg)(implicit table: Table): (List[Tree], List[Tree]) = {
val stackVars = cfg.stackVars
val storedValVars = cfg.storedValVars
val storedRefVars = cfg.storedRefVars
def stackSize(vs: Map[Symbol, VarInfo]) = vs.map(_._2.stackpos._2).sum
def genVarPushes(allvars: Map[Symbol, VarInfo], stack: Tree): List[Tree] = {
val vars = allvars.filter(kv => stackVars.contains(kv._1))
val varsize = stackSize(vars)
val stacksize = math.max(table.initialStackSize, varsize)
val bulkpushes = if (vars.size == 0) Nil else List(q"""
_root_.org.coroutines.common.Stack.bulkPush($stack, $varsize, $stacksize)
""")
val args = vars.values.filter(_.isArg).toList
val argstores = for (a <- args) yield a.storeTree(q"$$c", q"${a.name}")
bulkpushes ::: argstores
}
val varpushes = {
genVarPushes(storedRefVars, q"$$c.$$refstack") ++
genVarPushes(storedValVars, q"$$c.$$valstack")
}
val varpops = (for ((sym, info) <- storedRefVars.toList) yield {
info.popTree
}) ++ (if (storedValVars.size == 0) Nil else List(
q"""
_root_.org.coroutines.common.Stack.bulkPop(
$$c.$$valstack, ${stackSize(storedValVars)})
"""
))
(varpushes, varpops)
}
def specArity1(
argtpts: List[Tree], yldtpt: Tree, rettpt: Tree
): (Tree, List[Tree]) = {
val tpe = argtpts(0).tpe
if (tpe == typeOf[scala.Boolean]) {
(tq"org.coroutines.Coroutine._1", argtpts :+ yldtpt :+ rettpt)
} else if (tpe == typeOf[scala.Byte]) {
(tq"org.coroutines.Coroutine._1", argtpts :+ yldtpt :+ rettpt)
} else if (tpe == typeOf[scala.Short]) {
val nme = TypeName(s"_1$$spec$$S")
(tq"org.coroutines.$nme", yldtpt :: rettpt :: Nil)
} else if (tpe == typeOf[scala.Char]) {
val nme = TypeName(s"_1$$spec$$C")
(tq"org.coroutines.$nme", yldtpt :: rettpt :: Nil)
} else if (tpe == typeOf[scala.Int]) {
val nme = TypeName(s"_1$$spec$$I")
(tq"org.coroutines.$nme", yldtpt :: rettpt :: Nil)
} else if (tpe == typeOf[scala.Float]) {
val nme = TypeName(s"_1$$spec$$F")
(tq"org.coroutines.$nme", yldtpt :: rettpt :: Nil)
} else if (tpe == typeOf[scala.Long]) {
val nme = TypeName(s"_1$$spec$$J")
(tq"org.coroutines.$nme", yldtpt :: rettpt :: Nil)
} else if (tpe == typeOf[scala.Double]) {
val nme = TypeName(s"_1$$spec$$D")
(tq"_root_.org.coroutines.$nme", yldtpt :: rettpt :: Nil)
} else {
val nme = TypeName(s"_1$$spec$$L")
(tq"_root_.org.coroutines.$nme", argtpts :+ yldtpt :+ rettpt)
}
}
def specArity2(
argtpts: List[Tree], yldtpt: Tree, rettpt: Tree
): (Tree, List[Tree]) = {
val (tp0, tp1) = (argtpts(0).tpe, argtpts(1).tpe)
if (tp0 == typeOf[scala.Int] && tp1 == typeOf[scala.Int]) {
val nme = TypeName(s"_2$$spec$$II")
(tq"_root_.org.coroutines.$nme", yldtpt :: rettpt :: Nil)
} else if (tp0 == typeOf[Long] && tp1 == typeOf[Int]) {
val nme = TypeName(s"_2$$spec$$JI")
(tq"_root_.org.coroutines.$nme", yldtpt :: rettpt :: Nil)
} else if (tp0 == typeOf[Double] && tp1 == typeOf[Int]) {
val nme = TypeName(s"_2$$spec$$DI")
(tq"_root_.org.coroutines.$nme", yldtpt :: rettpt :: Nil)
} else if (tp1 == typeOf[Int]) {
val nme = TypeName(s"_2$$spec$$LI")
(tq"_root_.org.coroutines.$nme", argtpts(0) :: yldtpt :: rettpt :: Nil)
} else if (tp0 == typeOf[Int] && tp1 == typeOf[Long]) {
val nme = TypeName(s"_2$$spec$$IJ")
(tq"_root_.org.coroutines.$nme", yldtpt :: rettpt :: Nil)
} else if (tp0 == typeOf[Long] && tp1 == typeOf[Long]) {
val nme = TypeName(s"_2$$spec$$JJ")
(tq"_root_.org.coroutines.$nme", yldtpt :: rettpt :: Nil)
} else if (tp0 == typeOf[Double] && tp1 == typeOf[Long]) {
val nme = TypeName(s"_2$$spec$$DJ")
(tq"_root_.org.coroutines.$nme", yldtpt :: rettpt :: Nil)
} else if (tp1 == typeOf[Long]) {
val nme = TypeName(s"_2$$spec$$LJ")
(tq"_root_.org.coroutines.$nme", argtpts(0) :: yldtpt :: rettpt :: Nil)
} else if (tp0 == typeOf[Int] && tp1 == typeOf[Double]) {
val nme = TypeName(s"_2$$spec$$ID")
(tq"_root_.org.coroutines.$nme", yldtpt :: rettpt :: Nil)
} else if (tp0 == typeOf[Long] && tp1 == typeOf[Double]) {
val nme = TypeName(s"_2$$spec$$JD")
(tq"_root_.org.coroutines.$nme", yldtpt :: rettpt :: Nil)
} else if (tp0 == typeOf[Double] && tp1 == typeOf[Double]) {
val nme = TypeName(s"_2$$spec$$DD")
(tq"_root_.org.coroutines.$nme", yldtpt :: rettpt :: Nil)
} else if (tp1 == typeOf[Double]) {
val nme = TypeName(s"_2$$spec$$LD")
(tq"_root_.org.coroutines.$nme", argtpts(0) :: yldtpt :: rettpt :: Nil)
} else if (tp0 == typeOf[Int]) {
val nme = TypeName(s"_2$$spec$$IL")
(tq"_root_.org.coroutines.$nme", argtpts(1) :: yldtpt :: rettpt :: Nil)
} else if (tp0 == typeOf[Long]) {
val nme = TypeName(s"_2$$spec$$JL")
(tq"_root_.org.coroutines.$nme", argtpts(1) :: yldtpt :: rettpt :: Nil)
} else if (tp0 == typeOf[Double]) {
val nme = TypeName(s"_2$$spec$$DL")
(tq"_root_.org.coroutines.$nme", argtpts(1) :: yldtpt :: rettpt :: Nil)
} else {
val nme = TypeName(s"_2$$spec$$LL")
val tpes = argtpts(0) :: argtpts(1) :: yldtpt :: rettpt :: Nil
(tq"_root_.org.coroutines.$nme", tpes)
}
}
def genCoroutineTpe(
argtpts: List[Tree], yldtpt: Tree, rettpt: Tree
): (Tree, List[Tree]) = {
if (argtpts.length == 1) {
specArity1(argtpts, yldtpt, rettpt)
} else if (argtpts.length == 2) {
specArity2(argtpts, yldtpt, rettpt)
} else if (argtpts.length == 0 || argtpts.length > 2) {
val nme = TypeName(s"_${argtpts.size}")
(tq"_root_.org.coroutines.Coroutine.$nme", argtpts :+ yldtpt :+ rettpt)
} else sys.error("Unreachable case.")
}
def synthesize(rawlambda: Tree): Tree = {
// transform to two operand assignment form
val typedtaflambda = canonicalizeTree(rawlambda)
// println(typedtaflambda)
// println(typedtaflambda.tpe)
implicit val table = new Table(typedtaflambda)
// ensure that argument is a function literal
val q"(..$args) => $body" = typedtaflambda
val argidents = for (arg <- args) yield {
val q"$_ val $argname: $_ = $_" = arg
q"$argname"
}
// extract argument names and types
val (argnames, argtpts) = (for (arg <- args) yield {
val q"$_ val $name: $tpt = $_" = arg
(name, tpt)
}).unzip
// infer coroutine return type
val rettpt = table.returnType
val yldtpt = table.yieldType
// generate control flow graph
val cfg = genControlFlowGraph(args, body, rettpt)
// generate entry points from yields and coroutine applications
val entrypoints = genEntryPoints(cfg)
// generate entry method
val entermethod = genEnterMethod(entrypoints)
// generate return value method
val returnvaluemethods = genReturnValueMethods(cfg)
// generate variable pushes and pops for stack variables
val (varpushes, varpops) = genVarPushesAndPops(cfg)
// emit coroutine instantiation
val (coroutinequal, tparams) = genCoroutineTpe(argtpts, yldtpt, rettpt)
val entrypointmethods = entrypoints.map(_._2)
val valnme = TermName(c.freshName("c"))
val co = q"""
new $coroutinequal[..$tparams] {
def $$call(
..$args
): _root_.org.coroutines.Coroutine.Instance[$yldtpt, $rettpt] = {
val $valnme = new _root_.org.coroutines.Coroutine.Instance[$yldtpt, $rettpt]
$$push($valnme, ..$argidents)
$valnme
}
def apply(..$args): $rettpt = {
_root_.scala.sys.error(
_root_.org.coroutines.COROUTINE_DIRECT_APPLY_ERROR_MESSAGE)
}
def $$push(
$$c: _root_.org.coroutines.Coroutine.Instance[$yldtpt, $rettpt], ..$args
): _root_.scala.Unit = {
_root_.org.coroutines.common.Stack.push($$c.$$costack, this, -1)
_root_.org.coroutines.common.Stack.push($$c.$$pcstack, 0.toShort, -1)
..$varpushes
}
def $$pop(
$$c: _root_.org.coroutines.Coroutine.Instance[$yldtpt, $rettpt]
): _root_.scala.Unit = {
_root_.org.coroutines.common.Stack.pop($$c.$$pcstack)
_root_.org.coroutines.common.Stack.pop($$c.$$costack)
..$varpops
}
$entermethod
..$entrypointmethods
..$returnvaluemethods
}
"""
// println(co)
co
}
def call[R: WeakTypeTag](tree: Tree): Tree = {
val (receiver, args) = tree match {
case q"$r.apply(..$args)" =>
if (!isCoroutineDefMarker(r.tpe))
c.abort(r.pos,
s"Receiver must be a coroutine.\\n" +
s"required: Coroutine[_, ${implicitly[WeakTypeTag[R]]}]\\n" +
s"found: ${r.tpe} (with underlying type ${r.tpe.widen})")
(r, args)
case q"$r.apply[..$_](..$args)(..$_)" =>
if (!isCoroutineDefSugar(r.tpe))
c.abort(r.pos,
s"Receiver must be a coroutine.\\n" +
s"required: Coroutine[_, ${implicitly[WeakTypeTag[R]]}]\\n" +
s"found: ${r.tpe} (with underlying type ${r.tpe.widen})")
(r, args)
case _ =>
c.abort(
tree.pos,
"The call statement must take a coroutine invocation expression:\\n" +
" call(<coroutine>.apply(<arg0>, ..., <argN>))")
}
val tpargs = coroutineMethodArgs(receiver.tpe)
val t = q"""
$receiver.$$call[..$tpargs](..$args)
"""
t
}
}
|
storm-enroute/coroutines
|
src/main/scala/org/coroutines/Synthesizer.scala
|
Scala
|
bsd-3-clause
| 14,974 |
package example
import example.Hello.User
import org.scalatest._
class HelloSpec extends FlatSpec with Matchers {
it should "sort with Null First" in {
val arr: Seq[User] = List(User(Some(1), 3), User(None, 1), User(Some(2), 2))
assert(Hello.sortWithNullFirst(arr) === arr.sortBy(_.order))
}
}
|
rysh/my-scala-playground
|
sort-example/src/test/scala/example/HelloSpec.scala
|
Scala
|
mit
| 312 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.r
import org.apache.hadoop.fs.Path
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.jackson.JsonMethods._
import org.apache.spark.SparkException
import org.apache.spark.ml.{Pipeline, PipelineModel}
import org.apache.spark.ml.attribute.AttributeGroup
import org.apache.spark.ml.feature.RFormula
import org.apache.spark.ml.regression.{AFTSurvivalRegression, AFTSurvivalRegressionModel}
import org.apache.spark.ml.util._
import org.apache.spark.sql.{DataFrame, Dataset}
private[r] class AFTSurvivalRegressionWrapper private (
val pipeline: PipelineModel,
val features: Array[String]) extends MLWritable {
private val aftModel: AFTSurvivalRegressionModel =
pipeline.stages(1).asInstanceOf[AFTSurvivalRegressionModel]
lazy val rCoefficients: Array[Double] = if (aftModel.getFitIntercept) {
Array(aftModel.intercept) ++ aftModel.coefficients.toArray ++ Array(math.log(aftModel.scale))
} else {
aftModel.coefficients.toArray ++ Array(math.log(aftModel.scale))
}
lazy val rFeatures: Array[String] = if (aftModel.getFitIntercept) {
Array("(Intercept)") ++ features ++ Array("Log(scale)")
} else {
features ++ Array("Log(scale)")
}
def transform(dataset: Dataset[_]): DataFrame = {
pipeline.transform(dataset).drop(aftModel.getFeaturesCol)
}
override def write: MLWriter =
new AFTSurvivalRegressionWrapper.AFTSurvivalRegressionWrapperWriter(this)
}
private[r] object AFTSurvivalRegressionWrapper extends MLReadable[AFTSurvivalRegressionWrapper] {
private def formulaRewrite(formula: String): (String, String) = {
var rewritedFormula: String = null
var censorCol: String = null
val regex = """Surv\\(([^,]+), ([^,]+)\\) ~ (.+)""".r
try {
val regex(label, censor, features) = formula
// TODO: Support dot operator.
if (features.contains(".")) {
throw new UnsupportedOperationException(
"Terms of survreg formula can not support dot operator.")
}
rewritedFormula = label.trim + "~" + features.trim
censorCol = censor.trim
} catch {
case e: MatchError =>
throw new SparkException(s"Could not parse formula: $formula")
}
(rewritedFormula, censorCol)
}
def fit(
formula: String,
data: DataFrame,
aggregationDepth: Int): AFTSurvivalRegressionWrapper = {
val (rewritedFormula, censorCol) = formulaRewrite(formula)
val rFormula = new RFormula().setFormula(rewritedFormula)
RWrapperUtils.checkDataColumns(rFormula, data)
val rFormulaModel = rFormula.fit(data)
// get feature names from output schema
val schema = rFormulaModel.transform(data).schema
val featureAttrs = AttributeGroup.fromStructField(schema(rFormula.getFeaturesCol))
.attributes.get
val features = featureAttrs.map(_.name.get)
val aft = new AFTSurvivalRegression()
.setCensorCol(censorCol)
.setFitIntercept(rFormula.hasIntercept)
.setFeaturesCol(rFormula.getFeaturesCol)
.setAggregationDepth(aggregationDepth)
val pipeline = new Pipeline()
.setStages(Array(rFormulaModel, aft))
.fit(data)
new AFTSurvivalRegressionWrapper(pipeline, features)
}
override def read: MLReader[AFTSurvivalRegressionWrapper] = new AFTSurvivalRegressionWrapperReader
override def load(path: String): AFTSurvivalRegressionWrapper = super.load(path)
class AFTSurvivalRegressionWrapperWriter(instance: AFTSurvivalRegressionWrapper)
extends MLWriter {
override protected def saveImpl(path: String): Unit = {
val rMetadataPath = new Path(path, "rMetadata").toString
val pipelinePath = new Path(path, "pipeline").toString
val rMetadata = ("class" -> instance.getClass.getName) ~
("features" -> instance.features.toSeq)
val rMetadataJson: String = compact(render(rMetadata))
sc.parallelize(Seq(rMetadataJson), 1).saveAsTextFile(rMetadataPath)
instance.pipeline.save(pipelinePath)
}
}
class AFTSurvivalRegressionWrapperReader extends MLReader[AFTSurvivalRegressionWrapper] {
override def load(path: String): AFTSurvivalRegressionWrapper = {
implicit val format = DefaultFormats
val rMetadataPath = new Path(path, "rMetadata").toString
val pipelinePath = new Path(path, "pipeline").toString
val rMetadataStr = sc.textFile(rMetadataPath, 1).first()
val rMetadata = parse(rMetadataStr)
val features = (rMetadata \\ "features").extract[Array[String]]
val pipeline = PipelineModel.load(pipelinePath)
new AFTSurvivalRegressionWrapper(pipeline, features)
}
}
}
|
bOOm-X/spark
|
mllib/src/main/scala/org/apache/spark/ml/r/AFTSurvivalRegressionWrapper.scala
|
Scala
|
apache-2.0
| 5,430 |
package ui.shader.builder
import ui.shader.builder.types.GlType
import ui.shader.builder.value.GlValue
import scala.collection.mutable.ListBuffer
class GlModule[T <: GlType](val result: GlValue[T],
val commands: ListBuffer[GlCommand],
val functions: ListBuffer[GlFunction[GlType]]) {
}
|
gvatn/play-scalajs-webgl-spark
|
client/src/main/scala/ui/shader/builder/GlModule.scala
|
Scala
|
mit
| 358 |
package bibcanon
import bibtex.{ Name, TexChars }
import java.nio.charset.{Charset, CharsetEncoder}
import java.util.regex.Pattern
object BibtexFormatter {
val ws = Pattern.compile("\\\\s")
def author2Bibtex(n: Name) = {
// There are three possible formats for a name in BibTeX, but we only
// use two, since the third is redundant. If a name has a "jr" part,
// we write
// von Last, First, jr
// If it has no "jr" part, we simply omit the second comma.
// The only complicated part is encoding "von Last", because there is
// no explicit way to delimit them. The BibTex rule is that the "von"
// part is the longest sequence of words whose last word starts
// with a lower case. This means that if we have lower-case words that
// we want to include in "Last," then we can do so by making them uncased --
// i.e. by enclosing their first letter in braces. On the other hand,
// if we want "von" to end with a non-lower-case word then we're stuck.
// Make any lower-cased words uncased.
def uncase(s: String) = {
if (!s.isEmpty && s(0).isLower) "{" + s + "}"
else s
}
// Make sure no words in the given name are "and." We replace any "and" with "a{nd}",
// which will have the same effect, but won't be recognized by BibTex as a name separator.
def escapeOneAnd(w: String) = if (w == "and") "a{nd}" else w
def escapeAnd(s: String) = {
ws.split(s) map escapeOneAnd mkString " "
}
val lasts = ws.split(n.last) map escapeOneAnd map uncase
if (lasts.isEmpty) {
println("warning: last name should be non-empty")
}
val last = lasts mkString " "
if (!n.von.isEmpty) {
val lastVon = ws.split(n.von).last
if (!lastVon.isEmpty && !lastVon(0).isLower)
println("warning: last word in von is not lower case: bibtex will be unhappy")
}
val von = escapeAnd(n.von)
val first = escapeAnd(n.first)
val jr = if (!n.jr.trim.isEmpty) escapeAnd(n.jr) + ", " else ""
s"$von $last, $jr$first".trim
}
def authors2Bibtex(ns: List[Name]) = (ns map author2Bibtex) mkString " and "
}
|
jneem/bibcleaner
|
src/bibcanon/BibtexFormatter.scala
|
Scala
|
gpl-3.0
| 2,158 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// scalastyle:off println
package org.apache.spark.examples.ml
import scopt.OptionParser
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.examples.mllib.AbstractParams
import org.apache.spark.ml.recommendation.ALS
import org.apache.spark.sql.{Row, SQLContext}
/**
* An example app for ALS on MovieLens data (http://grouplens.org/datasets/movielens/).
* Run with
* {{{
* bin/run-example ml.MovieLensALS
* }}}
*/
object MovieLensALS {
case class Rating(userId: Int, movieId: Int, rating: Float, timestamp: Long)
object Rating {
def parseRating(str: String): Rating = {
val fields = str.split("::")
assert(fields.size == 4)
Rating(fields(0).toInt, fields(1).toInt, fields(2).toFloat, fields(3).toLong)
}
}
case class Movie(movieId: Int, title: String, genres: Seq[String])
object Movie {
def parseMovie(str: String): Movie = {
val fields = str.split("::")
assert(fields.size == 3)
Movie(fields(0).toInt, fields(1), fields(2).split("|"))
}
}
case class Params(
ratings: String = null,
movies: String = null,
maxIter: Int = 10,
regParam: Double = 0.1,
rank: Int = 10,
numBlocks: Int = 10) extends AbstractParams[Params]
def main(args: Array[String]) {
val defaultParams = Params()
val parser = new OptionParser[Params]("MovieLensALS") {
head("MovieLensALS: an example app for ALS on MovieLens data.")
opt[String]("ratings")
.required()
.text("path to a MovieLens dataset of ratings")
.action((x, c) => c.copy(ratings = x))
opt[String]("movies")
.required()
.text("path to a MovieLens dataset of movies")
.action((x, c) => c.copy(movies = x))
opt[Int]("rank")
.text(s"rank, default: ${defaultParams.rank}")
.action((x, c) => c.copy(rank = x))
opt[Int]("maxIter")
.text(s"max number of iterations, default: ${defaultParams.maxIter}")
.action((x, c) => c.copy(maxIter = x))
opt[Double]("regParam")
.text(s"regularization parameter, default: ${defaultParams.regParam}")
.action((x, c) => c.copy(regParam = x))
opt[Int]("numBlocks")
.text(s"number of blocks, default: ${defaultParams.numBlocks}")
.action((x, c) => c.copy(numBlocks = x))
note(
"""
|Example command line to run this app:
|
| bin/spark-submit --class org.apache.spark.examples.ml.MovieLensALS \\
| examples/target/scala-*/spark-examples-*.jar \\
| --rank 10 --maxIter 15 --regParam 0.1 \\
| --movies data/mllib/als/sample_movielens_movies.txt \\
| --ratings data/mllib/als/sample_movielens_ratings.txt
""".stripMargin)
}
parser.parse(args, defaultParams).map { params =>
run(params)
} getOrElse {
System.exit(1)
}
}
def run(params: Params) {
val conf = new SparkConf().setAppName(s"MovieLensALS with $params")
val sc = new SparkContext(conf)
val sqlContext = new SQLContext(sc)
import sqlContext.implicits._
val ratings = sc.textFile(params.ratings).map(Rating.parseRating).cache()
val numRatings = ratings.count()
val numUsers = ratings.map(_.userId).distinct().count()
val numMovies = ratings.map(_.movieId).distinct().count()
println(s"Got $numRatings ratings from $numUsers users on $numMovies movies.")
val splits = ratings.randomSplit(Array(0.8, 0.2), 0L)
val training = splits(0).cache()
val test = splits(1).cache()
val numTraining = training.count()
val numTest = test.count()
println(s"Training: $numTraining, test: $numTest.")
ratings.unpersist(blocking = false)
val als = new ALS()
.setUserCol("userId")
.setItemCol("movieId")
.setRank(params.rank)
.setMaxIter(params.maxIter)
.setRegParam(params.regParam)
.setNumBlocks(params.numBlocks)
val model = als.fit(training.toDF())
val predictions = model.transform(test.toDF()).cache()
// Evaluate the model.
// TODO: Create an evaluator to compute RMSE.
val mse = predictions.select("rating", "prediction").rdd
.flatMap { case Row(rating: Float, prediction: Float) =>
val err = rating.toDouble - prediction
val err2 = err * err
if (err2.isNaN) {
None
} else {
Some(err2)
}
}.mean()
val rmse = math.sqrt(mse)
println(s"Test RMSE = $rmse.")
// Inspect false positives.
// Note: We reference columns in 2 ways:
// (1) predictions("movieId") lets us specify the movieId column in the predictions
// DataFrame, rather than the movieId column in the movies DataFrame.
// (2) $"userId" specifies the userId column in the predictions DataFrame.
// We could also write predictions("userId") but do not have to since
// the movies DataFrame does not have a column "userId."
val movies = sc.textFile(params.movies).map(Movie.parseMovie).toDF()
val falsePositives = predictions.join(movies)
.where((predictions("movieId") === movies("movieId"))
&& ($"rating" <= 1) && ($"prediction" >= 4))
.select($"userId", predictions("movieId"), $"title", $"rating", $"prediction")
val numFalsePositives = falsePositives.count()
println(s"Found $numFalsePositives false positives")
if (numFalsePositives > 0) {
println(s"Example false positives:")
falsePositives.limit(100).collect().foreach(println)
}
sc.stop()
}
}
// scalastyle:on println
|
pronix/spark
|
examples/src/main/scala/org/apache/spark/examples/ml/MovieLensALS.scala
|
Scala
|
apache-2.0
| 6,388 |
package org.bowlerframework.model
class RequestMapperException(cause: String) extends Exception(cause)
|
rkpandey/Bowler
|
core/src/main/scala/org/bowlerframework/model/RequestMapperException.scala
|
Scala
|
bsd-3-clause
| 104 |
// Copyright (C) 2016 IBM Corp. All Rights Reserved.
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.ibm.watson.developer_cloud.concept_insights.v2.model
import com.ibm.watson.developer_cloud.service.GenericModel
import org.joda.time.DateTime
/**
* Created by Martin Harvan on 11/04/16.
*/
case class Corpora(corpora: List[Corpus])
case class Corpus(access: String, accountPermissions: List[AccountPermission], id: String, name: String,
ttlHours: Int, expiresOn: String) extends GenericModel
case class CorpusProcessingStatus(buildStatus: BuildStatus, documents: Int, id: String, lastUpdated: DateTime) extends GenericModel
case class CorpusStats(id: String, lastUpdated: DateTime, topTags: TopTags) extends GenericModel
|
kane77/watson-scala-wrapper
|
src/main/scala/com/ibm/watson/developer_cloud/concept_insights/v2/model/Corpora.scala
|
Scala
|
apache-2.0
| 1,383 |
package scalaxy.streams
package test
import org.junit._
import org.junit.Assert._
class ArrayOpsTest extends StreamComponentsTestBase with StreamTransforms {
import global._
@Test
def testExtraction {
val SomeArrayOpsOp(_, ArrayOpsOp) = typecheck(q"genericArrayOps(Array[Any]())")
val SomeArrayOpsOp(_, ArrayOpsOp) = typecheck(q"refArrayOps(Array[AnyRef]())")
val SomeArrayOpsOp(_, ArrayOpsOp) = typecheck(q"intArrayOps(Array[Int]())")
val SomeArrayOpsOp(_, ArrayOpsOp) = typecheck(q"longArrayOps(Array[Long]())")
val SomeArrayOpsOp(_, ArrayOpsOp) = typecheck(q"byteArrayOps(Array[Byte]())")
val SomeArrayOpsOp(_, ArrayOpsOp) = typecheck(q"shortArrayOps(Array[Short]())")
val SomeArrayOpsOp(_, ArrayOpsOp) = typecheck(q"charArrayOps(Array[Char]())")
val SomeArrayOpsOp(_, ArrayOpsOp) = typecheck(q"booleanArrayOps(Array[Boolean]())")
val SomeArrayOpsOp(_, ArrayOpsOp) = typecheck(q"floatArrayOps(Array[Float]())")
val SomeArrayOpsOp(_, ArrayOpsOp) = typecheck(q"doubleArrayOps(Array[Double]())")
}
@Test
def testJsExtraction {
val SomeJsArrayOpsOp(_, JsArrayOpsOp) = typecheck(
q"scala.scalajs.js.Any.jsArrayOps[Int](scala.scalajs.js.Array[Int]())")
}
}
|
nativelibs4java/scalaxy-streams
|
src/test/scala/ops/ArrayOpsTest.scala
|
Scala
|
bsd-3-clause
| 1,222 |
/*
* Copyright 2014–2017 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.physical.sparkcore.fs.elastic
import slamdata.Predef._
import quasar.{Data, DataCodec}
import quasar.connector._
import quasar.contrib.pathy._
import quasar.contrib.scalaz._
import quasar.effect._
import quasar.fp.ski._
import quasar.fp.free._
import quasar.fp.TaskRef
import quasar.fs._,
QueryFile.ResultHandle,
ReadFile.ReadHandle,
WriteFile.WriteHandle,
FileSystemError._,
PathError._
import quasar.fs.mount._, BackendDef._
import quasar.physical.sparkcore.fs._
import quasar.physical.sparkcore.fs.SparkCore
import quasar.physical.sparkcore.fs.{SparkCore, SparkConnectorDetails}, SparkConnectorDetails._
import org.apache.spark._
import org.apache.spark.rdd._
import org.http4s.{ParseFailure, Uri}
import pathy.Path._
import scalaz.{Failure => _, _}, Scalaz._
import scalaz.concurrent.Task
final case class ElasticConfig(sparkConf: SparkConf, host: String, port: Int)
object SparkElastic extends SparkCore with ManagedWriteFile[AFile] with ChrootedInterpreter {
// TODO[scalaz]: Shadow the scalaz.Monad.monadMTMAB SI-2712 workaround
import EitherT.eitherTMonad
def rootPrefix(cfg: ElasticConfig): ADir = rootDir
val Type = FileSystemType("spark-elastic")
type Eff0[A] = Coproduct[Task, PhysErr, A]
type Eff1[A] = Coproduct[Read[SparkContext, ?], Eff0, A]
type Eff2[A] = Coproduct[ElasticCall, Eff1, A]
type Eff3[A] = Coproduct[MonotonicSeq, Eff2, A]
type Eff4[A] = Coproduct[KeyValueStore[ResultHandle, SparkCursor, ?], Eff3, A]
type Eff5[A] = Coproduct[KeyValueStore[ReadHandle, SparkCursor, ?], Eff4, A]
type Eff6[A] = Coproduct[KeyValueStore[WriteHandle, AFile, ?], Eff5, A]
type Eff[A] = Coproduct[SparkConnectorDetails, Eff6, A]
def ReadSparkContextInj = Inject[Read[SparkContext, ?], Eff]
def RFKeyValueStoreInj = Inject[KeyValueStore[ReadFile.ReadHandle, SparkCursor, ?], Eff]
def MonotonicSeqInj = Inject[MonotonicSeq, Eff]
def TaskInj = Inject[Task, Eff]
def SparkConnectorDetailsInj = Inject[SparkConnectorDetails, Eff]
def QFKeyValueStoreInj = Inject[KeyValueStore[QueryFile.ResultHandle, SparkCursor, ?], Eff]
def MonoSeqM = MonoSeq[M]
def WriteKvsM = Kvs[M, WriteFile.WriteHandle, AFile]
val elasticCallOps: ElasticCall.Ops[Eff] = ElasticCall.Ops[Eff]
val separator = "__"
def file2ES(afile: AFile): IndexType = {
val folder = posixCodec.unsafePrintPath(fileParent(afile))
val typ = fileName(afile).value
val index = folder.substring(1, folder.length - 1).replace("/", separator)
IndexType(index, typ)
}
def dirPath2Index(dirPath: String): String =
dirPath.substring(1).replace("/", separator)
def dir2Index(adir: ADir): String = dirPath2Index(posixCodec.unsafePrintPath(adir))
def toFile(indexType: IndexType): AFile = {
val adir: ADir = indexType.index.split(separator).foldLeft(rootDir){
case (acc, dirName) => acc </> dir(dirName)
}
adir </> file(indexType.typ)
}
def toLowerLevel[S[_]](sc: SparkContext, config: ElasticConfig)(implicit
S0: Task :<: S, S1: PhysErr :<: S
): Task[Free[Eff, ?] ~> Free[S, ?]] =
( TaskRef(0L) |@|
TaskRef(Map.empty[ResultHandle, SparkCursor]) |@|
TaskRef(Map.empty[ReadHandle, SparkCursor]) |@|
TaskRef(Map.empty[WriteHandle, AFile])) {
(genState, rddStates, readCursors, writeCursors) => {
val read = Read.constant[Task, SparkContext](sc)
type Temp1[A] = Coproduct[Task, Read[SparkContext, ?], A]
type Temp[A] = Coproduct[ElasticCall, Temp1, A]
val elasticInterpreter = ElasticCall.interpreter(config.host, config.port)
def temp: Free[Temp, ?] ~> Task =
foldMapNT(elasticInterpreter :+: injectNT[Task, Task] :+: read)
val interpreter: Eff ~> S =
(details.interpreter[Temp] andThen temp andThen injectNT[Task, S]) :+:
(KeyValueStore.impl.fromTaskRef[WriteHandle, AFile](writeCursors) andThen injectNT[Task, S]) :+:
(KeyValueStore.impl.fromTaskRef[ReadHandle, SparkCursor](readCursors) andThen injectNT[Task, S]) :+:
(KeyValueStore.impl.fromTaskRef[ResultHandle, SparkCursor](rddStates) andThen injectNT[Task, S]) :+:
(MonotonicSeq.fromTaskRef(genState) andThen injectNT[Task, S]) :+:
(elasticInterpreter andThen injectNT[Task, S]) :+:
(read andThen injectNT[Task, S]) :+:
injectNT[Task, S] :+:
injectNT[PhysErr, S]
mapSNT[Eff, S](interpreter)
}
}
type Config = ElasticConfig
def parseConfig(connUri: ConnectionUri): DefErrT[Task, Config] = {
def liftErr(msg: String): DefinitionError = NonEmptyList(msg).left[EnvironmentError]
def master(host: String, port: Int): State[SparkConf, Unit] =
State.modify(_.setMaster(s"spark://$host:$port"))
def indexAuto: State[SparkConf, Unit] = State.modify(_.set("es.index.auto.create", "true"))
def appName: State[SparkConf, Unit] = State.modify(_.setAppName("quasar"))
def config(name: String, uri: Uri): State[SparkConf, Unit] =
State.modify(c => uri.params.get(name).fold(c)(c.set(name, _)))
val uriOrErr: DefErrT[Task, Uri] =
EitherT(Uri.fromString(connUri.value).leftMap((pf: ParseFailure) => liftErr(pf.toString)).point[Task])
val sparkConfOrErr: DefErrT[Task, SparkConf] = for {
uri <- uriOrErr
host <- EitherT(uri.host.fold(liftErr("host not provided").left[Uri.Host])(_.right[DefinitionError]).point[Task])
port <- EitherT(uri.port.fold(liftErr("port not provided").left[Int])(_.right[DefinitionError]).point[Task])
} yield {
(master(host.value, port) *>
appName *>
indexAuto *>
config("spark.executor.memory", uri) *>
config("spark.executor.cores", uri) *>
config("spark.executor.extraJavaOptions", uri) *>
config("spark.default.parallelism", uri) *>
config("spark.files.maxPartitionBytes", uri) *>
config("spark.driver.cores", uri) *>
config("spark.driver.maxResultSize", uri) *>
config("spark.driver.memory", uri) *>
config("spark.local.dir", uri) *>
config("spark.reducer.maxSizeInFlight", uri) *>
config("spark.reducer.maxReqsInFlight", uri) *>
config("spark.shuffle.file.buffer", uri) *>
config("spark.shuffle.io.retryWait", uri) *>
config("spark.memory.fraction", uri) *>
config("spark.memory.storageFraction", uri) *>
config("spark.cores.max", uri) *>
config("spark.speculation", uri) *>
config("spark.task.cpus", uri)
).exec(new SparkConf())
}
def fetchParameter(name: String): DefErrT[Task, String] = uriOrErr.flatMap(uri =>
uri.params.get(name).fold(
EitherT(liftErr(s"'$name' parameter not provided").left[String].point[Task]))(_.point[DefErrT[Task, ?]])
)
for {
sparkConf <- sparkConfOrErr
elasticHostAndPort <- fetchParameter("elasticHost").tuple(fetchParameter("elasticPort"))
(elasticHost, elasticPort) = elasticHostAndPort
} yield ElasticConfig(sparkConf, elasticHost, elasticPort.toInt)
}
def getSparkConf: Config => SparkConf = _.sparkConf
def generateSC: (APath, Config) => DefErrT[Task, SparkContext] =
(jar, conf) => initSC(conf).map { sc =>
sc.addJar(posixCodec.printPath(jar))
sc
}
object details {
import org.elasticsearch.spark._
private def parseIndex(adir: ADir) = posixCodec.unsafePrintPath(adir).replace("/", "") // TODO_ES handle invalid paths
private def fromFile(sc: SparkContext, file: AFile): Task[RDD[Data]] = Task.delay {
sc
.esJsonRDD(file2ES(file).shows)
.map(_._2)
.map(raw => DataCodec.parse(raw)(DataCodec.Precise).fold(error => Data.NA, ι))
}
def rddFrom[S[_]](f: AFile)(implicit
read: Read.Ops[SparkContext, S],
E: ElasticCall :<: S,
S: Task :<: S
): Free[S, RDD[Data]] = for {
sc <- read.ask
rdd <- lift(fromFile(sc, f)).into[S]
} yield rdd
def store[S[_]](rdd: RDD[Data], out: AFile)(implicit
S: Task :<: S
): Free[S, Unit] = lift(Task.delay {
rdd.flatMap(DataCodec.render(_)(DataCodec.Precise).toList)
.saveJsonToEs(file2ES(out).shows)
}).into[S]
def fileExists[S[_]](f: AFile)(implicit
E: ElasticCall.Ops[S]
): Free[S, Boolean] = E.typeExists(file2ES(f))
def listContents[S[_]](adir: ADir)(implicit
E: ElasticCall.Ops[S]
): FileSystemErrT[Free[S, ?], Set[PathSegment]] = {
val toDirName: String => PathSegment = t => DirName(t).left[FileName]
val toFileName: String => PathSegment = t => FileName(t).right[DirName]
val rootFolder: String => String = _.split(separator).head
val segments = if(adir === rootDir)
E.listIndices.map(_.map(rootFolder).toSet.map(toDirName))
else {
val prefix = dir2Index(adir)
val folders = E.listIndices.map(indices =>
indices
.filter(_.startsWith(prefix))
.map(s => s.substring(s.indexOf(prefix) + prefix.length))
.map {
case s if s.contains(separator) => s.substring(0, s.indexOf(separator))
case s => s
}
.toSet
.map(toDirName))
val index = if(prefix.endsWith(separator)) prefix.substring(0, prefix.length - separator.length) else prefix
val files = E.listTypes(index).map(_.map(toFileName).toSet)
(folders |@| files)(_ ++ _)
}
EitherT(segments.map(_.right[FileSystemError]))
}
def readChunkSize: Int = 5000
def interpreter[S[_]](implicit
read: Read.Ops[SparkContext, S],
E: ElasticCall :<: S,
S: Task :<: S
): SparkConnectorDetails ~> Free[S, ?] = new (SparkConnectorDetails ~> Free[S, ?]) {
def apply[A](from: SparkConnectorDetails[A]) = from match {
case FileExists(f) => ElasticCall.Ops[S].typeExists(file2ES(f))
case ReadChunkSize => 5000.point[Free[S, ?]]
case StoreData(rdd, out) => lift(Task.delay {
rdd.flatMap(DataCodec.render(_)(DataCodec.Precise).toList)
.saveJsonToEs(file2ES(out).shows)
}).into[S]
case ListContents(d) => listContents[S](d).run
case RDDFrom(f) => rddFrom(f)
}
}
}
object ManagedWriteFileModule extends ManagedWriteFileModule {
import org.elasticsearch.spark._
def writeCursor(file: AFile): Backend[AFile] =
file.point[Backend]
def writeChunk(f: AFile, chunk: Vector[Data]): Configured[Vector[FileSystemError]] = {
implicit val codec: DataCodec = DataCodec.Precise
val lines = chunk.map(data => DataCodec.render(data)).toList.map(_.toList).join
(for {
sc <- readScOps.ask
result <- lift(Task.delay {
sc.makeRDD(lines).saveJsonToEs(file2ES(f).shows)
}).into[Eff].as(Vector.empty[FileSystemError])
} yield result).liftM[ConfiguredT]
}
def closeCursor(f: AFile): Configured[Unit] =
().point[Configured]
}
object ElasticManageFileModule extends SparkCoreManageFileModule {
def moveFile(sf: AFile, df: AFile): Free[Eff, Unit] = for {
src <- file2ES(sf).point[Free[Eff, ?]]
dst <- file2ES(df).point[Free[Eff, ?]]
dstIndexExists <- elasticCallOps.indexExists(dst.index)
dstTypeExists <- elasticCallOps.typeExists(dst)
_ <- if(dstTypeExists) elasticCallOps.deleteType(dst)
else if(!dstIndexExists) elasticCallOps.createIndex(dst.index)
else ().point[Free[Eff, ?]]
_ <- elasticCallOps.copyType(src, dst)
_ <- elasticCallOps.deleteType(src)
} yield ()
def moveDir(sd: ADir, dd: ADir): Free[Eff, Unit] = {
def calculateDestinationIndex(index: String): String = {
val destinationPath = posixCodec.unsafePrintPath(dd) ++ index.diff(posixCodec.unsafePrintPath(sd))
dirPath2Index(destinationPath)
}
for {
src <- dir2Index(sd).point[Free[Eff, ?]]
toMove <- elasticCallOps.listIndices.map(_.filter(i => i.startsWith(src)))
_ <- toMove.map(i => elasticCallOps.copyIndex(i, calculateDestinationIndex(i))).sequence
_ <- toMove.map(elasticCallOps.deleteIndex(_)).sequence
} yield ()
}
def doesPathExist: APath => Free[Eff, Boolean] = (path: APath) => refineType(path).fold(
d => elasticCallOps.listIndices.map(_.contains(dir2Index(d))),
f => elasticCallOps.typeExists(file2ES(f))
)
def delete(path: APath): Backend[Unit] = refineType(path).fold(d => deleteDir(d),f => deleteFile(f)).liftB.unattempt
private def deleteFile(file: AFile): Free[Eff, FileSystemError \\/ Unit] = for {
indexType <- file2ES(file).point[Free[Eff, ?]]
exists <- elasticCallOps.typeExists(indexType)
res <- if(exists) elasticCallOps.deleteType(indexType).map(_.right) else pathErr(pathNotFound(file)).left[Unit].point[Free[Eff, ?]]
} yield res
private def deleteDir(dir: ADir): Free[Eff, FileSystemError \\/ Unit] = for {
indices <- elasticCallOps.listIndices
index = dir2Index(dir)
result <- if(indices.isEmpty) pathErr(pathNotFound(dir)).left[Unit].point[Free[Eff, ?]] else {
indices.filter(_.startsWith(index)).map(elasticCallOps.deleteIndex(_)).sequence.as(().right[FileSystemError])
}
} yield result
}
def ManageFileModule: ManageFileModule = ElasticManageFileModule
}
|
jedesah/Quasar
|
sparkcore/src/main/scala/quasar/physical/sparkcore/fs/elastic/SparkElastic.scala
|
Scala
|
apache-2.0
| 14,437 |
/**
*
* No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
*
* OpenAPI spec version:
*
*
* NOTE: This class is auto generated by the swagger code generator program.
* https://github.com/swagger-api/swagger-codegen.git
* Do not edit the class manually.
*/
package io.swagger.client.model
case class AddActorsToCombatParameters (
actorIds: List[String]
)
|
CucumisSativus/rpgRollerBackend
|
functionalTest/src/main/scala/io/swagger/client/model/AddActorsToCombatParameters.scala
|
Scala
|
mit
| 423 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package docs.home.scaladsl.persistence
import com.lightbend.lagom.scaladsl.persistence.AggregateEventShards
import com.lightbend.lagom.scaladsl.persistence.AggregateEvent
import com.lightbend.lagom.scaladsl.persistence.AggregateEventTag
class ShardedBlogEventTag {
//#sharded-tags
object BlogEvent {
val NumShards = 20
val Tag = AggregateEventTag.sharded[BlogEvent](NumShards)
}
sealed trait BlogEvent extends AggregateEvent[BlogEvent] {
override def aggregateTag: AggregateEventShards[BlogEvent] = BlogEvent.Tag
}
//#sharded-tags
}
|
rcavalcanti/lagom
|
docs/manual/scala/guide/cluster/code/docs/home/scaladsl/persistence/ShardedBlogEventTag.scala
|
Scala
|
apache-2.0
| 645 |
/**
* Copyright (C) 2019 Inera AB (http://www.inera.se)
*
* This file is part of statistik (https://github.com/sklintyg/statistik).
*
* statistik is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* statistik is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package se.inera.statistics.gatling
import io.gatling.core.Predef._
object NationellSjukskrivningPerKonPerLan {
def exec = RestCall.get(
s"getSjukfallPerSexStatistics",
s"${Conf.uri}/api/getSjukfallPerSexStatistics")
}
|
sklintyg/statistik
|
gatling/src/test/scala/se/inera/statistics/gatling/NationellSjukskrivningPerKonPerLan.scala
|
Scala
|
lgpl-3.0
| 1,017 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.