code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package scalaguide.sql
import javax.inject.Inject
import play.api.db.{ Database, NamedDatabase }
import play.api.mvc.Controller
// inject "orders" database instead of "default"
class ScalaInjectNamed @Inject()(
@NamedDatabase("orders") db: Database) extends Controller {
// do whatever you need with the db
}
|
ktoso/playframework
|
documentation/manual/working/scalaGuide/main/sql/code/ScalaInjectNamed.scala
|
Scala
|
apache-2.0
| 315 |
/**
* Copyright 2019 Gianluca Amato <[email protected]>
*
* This file is part of ScalaFix.
* ScalaFix is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* ScalaFix is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of a
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with ScalaFix. If not, see <http://www.gnu.org/licenses/>.
*/
package it.unich.scalafix.assignments
import it.unich.scalafix.Assignment
/**
* An input assignment is an assignment used as the initial assignment for fixpoint solvers. It has a method
* which converts the assignment into a mutable assignment. Actual implementations of InputAssignment should provide
* a toString method which gives detailed information, to be used for tracing and debugging purposes.
*
* @tparam U type for unknowns
* @tparam V type for values
*/
trait InputAssignment[U, V] extends Assignment[U, V] {
/**
* Returns an IOAssignment with the same initial content as the current assignment. Changes to the IOAssignment
* are not reflected to the current assignment.
*/
def toIOAssignment: IOAssignment[U, V]
}
/**
* The companion object for the class `it.unich.scalafix.assignments.InputAssignment`.
*/
object InputAssignment {
protected[assignments] def hashToString[U, V](m: collection.Map[U, V]): String = {
val sb = new StringBuffer()
var first: Boolean = true
sb.append(" [ ")
for ((u, v) <- m) {
if (!first) sb.append(" , ")
first = false
sb.append(u).append(" -> ").append(v)
}
sb.append(" ]")
sb.toString
}
/**
* An assignment that always returns the same default value.
*
* @param v the default value returned by the assignment
*/
implicit class Default[U, V](v: V) extends InputAssignment[U, V] {
def apply(u: U): V = v
def toIOAssignment: IOAssignment[U, V] = new IOAssignment.HashBasedIOAssignment({ _: U => v })
override def toString: String = s"constant value $v"
}
/**
* An input assignment obtained from another input assignment by modifying the value of an unknown with
* a given binding.
*
* @param special the unknown whose value we want to change
* @param value the new value of the unknown special
* @param default the initial input assignment
*/
class Conditional[U, V](special: U, value: V, default: InputAssignment[U, V]) extends InputAssignment[U, V] {
def apply(u: U): V = if (u == special) value else default(u)
def toIOAssignment: IOAssignment[U, V] = new IOAssignment.HashBasedIOAssignment(this)
override def toString: String = s"[ $special -> $value ] else $default"
}
/**
* An implicit class converting a map into an input assignment.
*
* @param m hash map
*/
implicit class FromMap[U, V](m: Map[U, V]) extends InputAssignment[U, V] {
def apply(u: U): V = m(u)
def toIOAssignment: IOAssignment[U, V] = new IOAssignment.HashBasedIOAssignment(this)
override def toString: String = hashToString(m)
}
/**
* An implicit class converting an assignment into an input assignment.
*
* @param a the original assignment
*/
implicit class FromAssignment[U, V](a: Assignment[U, V]) extends InputAssignment[U, V] {
def apply(u: U): V = a(u)
def toIOAssignment: IOAssignment[U, V] = new IOAssignment.HashBasedIOAssignment(a)
override def toString: String = a.toString
}
/**
* Return the input assignment which behaves like `otherwise`, with the excpetion of the unknown
* `special` for which it returns `value`.
*/
def conditional[U, V](special: U, value: V, otherwise: InputAssignment[U, V]) = new Conditional(special, value, otherwise)
}
|
jandom-devel/ScalaFix
|
core/src/main/scala/it/unich/scalafix/assignments/InputAssignment.scala
|
Scala
|
gpl-3.0
| 4,097 |
package com.artclod.markup
import com.artclod.mathml.TextToHtmlGraph
import laika.api.Transform
import laika.parse.markdown.Markdown
import laika.render.HTML
import play.twirl.api.Html
import scala.collection.mutable
import scala.util.Try
object LaikaParser {
val gt = TextToHtmlGraph.graphIndicators
val mt = """$$"""
def parity(parity : Int, index: Int ) = (index % 2) == parity
/**
* Takes a string where section of the string are bounded by special characters and returns
* (1) The text with those section reaplace with an id string (eg $12$ for the 12th one replaced)
* (2) A map that maps those Ids back to their original text
*
* @param text the text to process.
*/
def replaceSpecials(text: String, specialString: String) = {
val startsWithSpecial = text.startsWith(specialString)
val endsWithSpecial = text.startsWith(specialString)
val split = text.split(java.util.regex.Pattern.quote(specialString))
val specialMap = mutable.HashMap[String, String]()
val processed = for(i <- 0 until split.size) yield {
if(i % 2 == 0) {
split(i)
} else {
val id = specialString + (i/2) + specialString
specialMap.put(id, specialString + split(i) + specialString)
id
}
}
(processed.mkString(""), specialMap.toMap)
}
def inlineSpecial(text: String, inline: Map[String, String]) = {
var temp = text
for(entry <- inline) {
temp = temp.replace(entry._1, entry._2)
}
temp
}
def apply(textIn: String) = {
val (textGt, mapGt) = replaceSpecials(textIn, gt)
val (text, mapMt) = replaceSpecials(textGt, mt)
Try(Html( {
val start = (Transform from Markdown.strict to HTML fromString text toString)
inlineSpecial(inlineSpecial(start, mapMt), mapGt)
}))
// Try(Html(Transform from Markdown.strict to HTML fromString text toString()))
}
}
|
kristiankime/calc-tutor
|
app/com/artclod/markup/LaikaParser.scala
|
Scala
|
mit
| 1,911 |
package lila.video
import akka.actor.{ ActorSelection, ActorSystem }
import com.typesafe.config.Config
import scala.concurrent.duration._
import lila.common.PimpedConfig._
final class Env(
config: Config,
scheduler: lila.common.Scheduler,
db: lila.db.Env,
isDev: Boolean) {
private val settings = new {
val CollectionVideo = config getString "collection.video"
val CollectionView = config getString "collection.view"
val SheetUrl = config getString "sheet.url"
val SheetDelay = config duration "sheet.delay"
val YoutubeUrl = config getString "youtube.url"
val YoutubeApiKey = config getString "youtube.api_key"
val YoutubeMax = config getInt "youtube.max"
val YoutubeDelay = config duration "youtube.delay"
}
import settings._
lazy val api = new VideoApi(
videoColl = videoColl,
viewColl = viewColl)
private lazy val sheet = new Sheet(
url = SheetUrl,
api = api)
private lazy val youtube = new Youtube(
url = YoutubeUrl,
apiKey = YoutubeApiKey,
max = YoutubeMax,
api = api)
if (!isDev) {
scheduler.effect(SheetDelay, "video update from sheet") {
sheet.fetchAll logFailure "video sheet"
}
scheduler.effect(YoutubeDelay, "video update from youtube") {
youtube.updateAll logFailure "video youtube"
}
}
private[video] lazy val videoColl = db(CollectionVideo)
private[video] lazy val viewColl = db(CollectionView)
}
object Env {
lazy val current: Env = "video" boot new Env(
config = lila.common.PlayApp loadConfig "video",
scheduler = lila.common.PlayApp.scheduler,
isDev = lila.common.PlayApp.isDev,
db = lila.db.Env.current)
}
|
r0k3/lila
|
modules/video/src/main/Env.scala
|
Scala
|
mit
| 1,683 |
package org.jetbrains.plugins.scala.compiler
import java.io.File
import org.jetbrains.jps.incremental.messages.CustomBuilderMessage
import org.jetbrains.jps.incremental.scala.Client
import org.jetbrains.plugins.scala.compiler.CompilerEventType.CompilerEventType
import org.jetbrains.plugins.scala.util.{CompilationId, ObjectSerialization}
import scala.util.Try
sealed trait CompilerEvent {
def eventType: CompilerEventType
def compilationId: CompilationId
def compilationUnitId: Option[CompilationUnitId]
final def toCustomMessage: CustomBuilderMessage = new CustomBuilderMessage(
CompilerEvent.BuilderId,
eventType.toString,
ObjectSerialization.toBase64(this)
)
}
object CompilerEvent {
// can be sent multiple times for different modules by jps compiler
case class CompilationStarted(override val compilationId: CompilationId,
override val compilationUnitId: Option[CompilationUnitId])
extends CompilerEvent {
override def eventType: CompilerEventType = CompilerEventType.CompilationStarted
}
case class CompilationPhase(override val compilationId: CompilationId,
override val compilationUnitId: Option[CompilationUnitId],
phase: String)
extends CompilerEvent {
override def eventType: CompilerEventType = CompilerEventType.CompilationPhase
}
case class CompilationUnit(override val compilationId: CompilationId,
override val compilationUnitId: Option[CompilationUnitId],
path: String)
extends CompilerEvent {
override def eventType: CompilerEventType = CompilerEventType.CompilationUnit
}
case class MessageEmitted(override val compilationId: CompilationId,
override val compilationUnitId: Option[CompilationUnitId],
msg: Client.ClientMsg)
extends CompilerEvent {
override def eventType: CompilerEventType = CompilerEventType.MessageEmitted
}
case class ProgressEmitted(override val compilationId: CompilationId,
override val compilationUnitId: Option[CompilationUnitId],
progress: Double)
extends CompilerEvent {
override def eventType: CompilerEventType = CompilerEventType.ProgressEmitted
}
// can be sent multiple times for different modules by jps compiler
case class CompilationFinished(override val compilationId: CompilationId,
override val compilationUnitId: Option[CompilationUnitId],
sources: Set[File])
extends CompilerEvent {
override def eventType: CompilerEventType = CompilerEventType.CompilationFinished
}
def fromCustomMessage(customMessage: CustomBuilderMessage): Option[CompilerEvent] = {
val text = customMessage.getMessageText
Option(customMessage)
.filter(_.getBuilderId == BuilderId)
.flatMap { msg => Try(CompilerEventType.withName(msg.getMessageType)).toOption }
.map { _ => ObjectSerialization.fromBase64[CompilerEvent](text) }
}
final val BuilderId = "compiler-event"
}
|
JetBrains/intellij-scala
|
scala/compiler-shared/src/org/jetbrains/plugins/scala/compiler/CompilerEvent.scala
|
Scala
|
apache-2.0
| 3,190 |
// scalastyle:off println
package wordcountscala
import scala.math.random
import org.apache.spark._
object WordCount {
def main(args: Array[String]) {
val conf = new SparkConf().setAppName("Word Count")
val sc = new SparkContext(conf)
// create a SparkContext
//val sc = new SparkContext("local[*]", "WordCount")
//Comentario de David
//crear un RDD con el contenido del archivo don-quijote.txt.gz
//val docs = sc.textFile("wordcountscala/data/donquijote.txt")
//val docs = sc.textFile("hdfs://hadoopMaster:9000/bigfile.txt")
//val docs = sc.textFile("wordcountscala/data/bigfile.txt")
val docs = sc.textFile(args(0))
// Convertir el texto de cada linea en minusculas
val lower = docs.map(line => line.toLowerCase)
// Separa cada linea del texto por palabras (strings separadas por espacio)
// aplana/achata los arrays del comando split
val words = lower.flatMap(line => line.split("\\s+"))
// Crea un tuple (palabra, frecuencia)
// la frecuencia inicial de cada palabra es 1
val counts = words.map(word => (word, 1))
// Agrupa por palabra y suma las frecuencias
val freq = counts.reduceByKey(_ + _)
// Inverte la tupla para (frecuencia, palabra)
val invFreq = freq.map(_.swap)
// Tomas las 20 mayores y las imprime
invFreq.top(20).foreach(println)
}
}
// scalastyle:on println
|
DavidContrerasICAI/sparkCode
|
wordcountscalaSBT/src/main/scala/wordcountscala/WordCount.scala
|
Scala
|
mit
| 1,399 |
package net.truerss.services
import org.specs2.mutable.Specification
import truerss.plugins_discrovery.Discovery
import truerss.util.TaskImplicits
class DiscoveryTests extends Specification {
import TaskImplicits._
private val url = "https://github.com/truerss/plugins/releases/tag/1.0.0"
"fetch" in {
Discovery.fetch(url).materialize must have size 5
}
}
|
truerss/truerss
|
src/real/scala/net/truerss/services/DiscoveryTests.scala
|
Scala
|
mit
| 373 |
package examples
import weightedroundrobin.RoundRobinUtils.{divByFivePenalizer, plusOneRewarder}
import weightedroundrobin.{RoundRobin, WeightedResource}
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import scala.concurrent.Future
import scala.language.higherKinds
import scala.concurrent.ExecutionContext.Implicits.global
object SimpleRoundRobin {
private val resourcePool = mutable.ListBuffer(WeightedResource("nr1", "nr1", 100, 75), WeightedResource("nr2", "nr2", 40, 39), WeightedResource("nr3", "nr3", 50, 0))
private val rr = new RoundRobin[WeightedResource, Seq] {}
def forResource[A](fn: WeightedResource => A ) : Either[Throwable, A] = {
val (_, result): (Seq[WeightedResource], Either[Throwable, A]) = rr.forResource(fn)(plusOneRewarder, divByFivePenalizer, resourcePool)(syncSafelyWithResourcePool(resourcePool))
result
}
def forResourceAsync[A](fn: WeightedResource => Future[A] ) : Future[Either[Throwable, A]] = {
val (_, result): (Future[Seq[WeightedResource]], Future[Either[Throwable, A]]) = rr.forResourceAsync(fn)(plusOneRewarder, divByFivePenalizer, resourcePool)(syncSafelyWithResourcePool(resourcePool))
result
}
private def syncSafelyWithResourcePool[A, F[B] <: mutable.Buffer[B], G[C] <: Seq[C]](global: F[A])(updated: G[A]): F[A] = {
synchronized {
global.indices.foreach(i => {
val e = updated(i)
global.update(i, e)
})
}
global
}
def addResource(weightedResource: WeightedResource) : ListBuffer[WeightedResource] = {
resourcePool.append(weightedResource)
resourcePool
}
}
|
TharinduDG/weightedroundrobin
|
src/main/scala/examples/SimpleRoundRobin.scala
|
Scala
|
apache-2.0
| 1,620 |
/*
* Copyright 2013 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.plugin.wms
import java.awt.image.BufferedImage
import java.awt.{AlphaComposite, Color, Graphics2D, Rectangle}
import java.util.{Date, List => JList}
import com.typesafe.scalalogging.slf4j.Logging
import org.apache.accumulo.core.client.security.tokens.PasswordToken
import org.apache.accumulo.core.client.{IteratorSetting, Scanner, ZooKeeperInstance}
import org.apache.accumulo.core.iterators.user.VersioningIterator
import org.apache.accumulo.core.security.Authorizations
import org.apache.hadoop.io.Text
import org.geotools.coverage.CoverageFactoryFinder
import org.geotools.coverage.grid.io.{AbstractGridCoverage2DReader, AbstractGridFormat}
import org.geotools.coverage.grid.{GridCoverage2D, GridEnvelope2D, GridGeometry2D}
import org.geotools.geometry.GeneralEnvelope
import org.geotools.parameter.Parameter
import org.geotools.util.{DateRange, Utilities}
import org.joda.time.format.DateTimeFormat
import org.joda.time.{DateTime, DateTimeZone}
import org.locationtech.geomesa.core.iterators.{AggregatingKeyIterator, SurfaceAggregatingIterator, TimestampRangeIterator, TimestampSetIterator}
import org.locationtech.geomesa.core.util.{BoundingBoxUtil, SelfClosingBatchScanner}
import org.locationtech.geomesa.utils.geohash.{BoundingBox, Bounds, GeoHash, TwoGeoHashBoundingBox}
import org.opengis.geometry.Envelope
import org.opengis.parameter.{GeneralParameterValue, InvalidParameterValueException}
import scala.collection.JavaConversions._
import scala.util.Random
object CoverageReader {
val GeoServerDateFormat = DateTimeFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'")
val DefaultDateString = GeoServerDateFormat.print(new DateTime(DateTimeZone.forID("UTC")))
}
import org.locationtech.geomesa.plugin.wms.CoverageReader._
class CoverageReader(val url: String) extends AbstractGridCoverage2DReader() with Logging {
logger.debug(s"""creating coverage reader for url "${url.replaceAll(":.*@", ":********@").replaceAll("#auths=.*","#auths=********")}"""")
val FORMAT = """accumulo://(.*):(.*)@(.*)/(.*)/(.*)/(.*)#resolution=([0-9]*)#zookeepers=([^#]*)(?:#auths=)?(.*)$""".r
val FORMAT(user, password, instanceId, table, columnFamily, columnQualifier, resolutionStr, zookeepers, authtokens) = url
logger.debug(s"extracted user $user, password ********, instance id $instanceId, table $table, column family $columnFamily, " +
s"column qualifier $columnQualifier, resolution $resolutionStr, zookeepers $zookeepers, auths ********")
coverageName = table + ":" + columnFamily + ":" + columnQualifier
val metaRow = new Text("~" + columnFamily + "~" + columnQualifier)
this.crs = AbstractGridFormat.getDefaultCRS
this.originalEnvelope = new GeneralEnvelope(Array(-180.0, -90.0), Array(180.0, 90.0))
this.originalEnvelope.setCoordinateReferenceSystem(this.crs)
this.originalGridRange = new GridEnvelope2D(new Rectangle(0, 0, 1024, 512))
this.coverageFactory = CoverageFactoryFinder.getGridCoverageFactory(this.hints)
val zkInstance = new ZooKeeperInstance(instanceId, zookeepers)
val connector = zkInstance.getConnector(user, new PasswordToken(password.getBytes))
// When parsing an old-form Accumulo layer URI the authtokens field matches the empty string, requesting no authorizations
val auths = new Authorizations(authtokens.split(","): _*)
val aggPrefix = AggregatingKeyIterator.aggOpt
lazy val metaData: Map[String,String] = {
val scanner: Scanner = connector.createScanner(table, auths)
scanner.setRange(new org.apache.accumulo.core.data.Range(metaRow))
scanner.iterator()
.map(entry => (entry.getKey.getColumnFamily.toString, entry.getKey.getColumnQualifier.toString))
.toMap
}
/**
* Default implementation does not allow a non-default coverage name
* @param coverageName
* @return
*/
override protected def checkName(coverageName: String) = {
Utilities.ensureNonNull("coverageName", coverageName)
true
}
override def getFormat = new CoverageFormat
def toTimestampString(date: Date) = java.lang.Long.toString(date.getTime/1000)
def getGeohashPrecision = resolutionStr.toInt
def read(parameters: Array[GeneralParameterValue]): GridCoverage2D = {
val paramsMap = parameters.map(gpv => (gpv.getDescriptor.getName.getCode, gpv)).toMap
val gg = paramsMap(AbstractGridFormat.READ_GRIDGEOMETRY2D.getName.toString).asInstanceOf[Parameter[GridGeometry2D]].getValue
val env = gg.getEnvelope
val timeParam = parameters.find(_.getDescriptor.getName.getCode == "TIME")
.flatMap(_.asInstanceOf[Parameter[JList[AnyRef]]].getValue match {
case null => None
case c => c.get(0) match {
case null => None
case date: Date => Some(date)
case dateRange: DateRange => Some(dateRange)
case x =>
throw new InvalidParameterValueException(s"Invalid value for parameter TIME: ${x.toString}", "TIME", x)
}}
)
val tile = getImage(timeParam, env, gg.getGridRange2D.getSpan(0), gg.getGridRange2D.getSpan(1))
this.coverageFactory.create(coverageName, tile, env)
}
def getImage(timeParam: Any, env: Envelope, xDim:Int, yDim:Int) = {
val min = Array(Math.max(env.getMinimum(0), -180) + .00000001, Math.max(env.getMinimum(1), -90) + .00000001)
val max = Array(Math.min(env.getMaximum(0), 180) - .00000001, Math.min(env.getMaximum(1), 90) - .00000001)
val bbox = BoundingBox(Bounds(min(0), max(0)), Bounds(min(1), max(1)))
val ghBbox = TwoGeoHashBoundingBox(bbox,getGeohashPrecision)
val xdim = math.max(1, math.min(xDim, math.round(ghBbox.bbox.longitudeSize /
ghBbox.ur.bbox.longitudeSize - 1)
.asInstanceOf[Int]))
val ydim = math.max(1, math.min(yDim, math.round(ghBbox.bbox.latitudeSize /
ghBbox.ur.bbox.latitudeSize - 1)
.asInstanceOf[Int]))
val bufferList: List[Array[Byte]] =
getScanBuffers(bbox, timeParam, xdim, ydim).map(_.getValue.get()).toList ++ List(Array.ofDim[Byte](xdim*ydim))
val buffer = bufferList.reduce((a, b) => {
for (i <- 0 to a.length - 1) {
a(i) = math.max(a(i) & 0xff, b(i) & 0xff).asInstanceOf[Byte]
}
a
})
ImageUtils.drawImage(Array(buffer),xdim, ydim)
}
def getScanBuffers(bbox: BoundingBox, timeParam: Any, xDim:Int, yDim:Int) = {
val scanner = connector.createBatchScanner(table, auths, 10)
scanner.fetchColumn(new Text(columnFamily), new Text(columnQualifier))
val ranges = BoundingBoxUtil.getRangesByRow(BoundingBox.getGeoHashesFromBoundingBox(bbox))
scanner.setRanges(ranges)
timeParam match {
case date: Date => {
TimestampSetIterator.setupIterator(scanner, date.getTime/1000)
}
case dateRange: DateRange => {
val startDate = dateRange.getMinValue
val endDate = dateRange.getMaxValue
TimestampRangeIterator.setupIterator(scanner, startDate, endDate)
}
case _ => {
val name = "version-" + Random.alphanumeric.take(5)
val cfg = new IteratorSetting(2, name, classOf[VersioningIterator])
VersioningIterator.setMaxVersions(cfg, 1)
scanner.addScanIterator(cfg)
}
}
AggregatingKeyIterator.setupAggregatingKeyIterator(scanner,
1000,
classOf[SurfaceAggregatingIterator],
Map[String,String](aggPrefix + "bottomLeft" -> GeoHash(bbox.ll, getGeohashPrecision).hash,
aggPrefix + "topRight" -> GeoHash(bbox.ur,getGeohashPrecision).hash,
aggPrefix + "precision" -> getGeohashPrecision.toString,
aggPrefix + "dims" -> (xDim +","+yDim)))
SelfClosingBatchScanner(scanner)
}
def getEmptyImage = {
val emptyImage = new BufferedImage(256, 256, BufferedImage.TYPE_4BYTE_ABGR)
val g2D = emptyImage.getGraphics.asInstanceOf[Graphics2D]
val save = g2D.getColor
g2D.setColor(Color.WHITE)
g2D.setComposite(AlphaComposite.Clear)
g2D.fillRect(0, 0, emptyImage.getWidth, emptyImage.getHeight)
g2D.setColor(save)
emptyImage
}
val LOG180 = math.log(180.0)
val LOG2 = math.log(2)
def fromBoundingBox(minY: Double, maxY: Double) =
math.round((LOG180 - math.log(maxY - minY)) / LOG2).intValue()
import org.geotools.coverage.grid.io.GridCoverage2DReader._
override def getMetadataNames: Array[String] = Array[String](TIME_DOMAIN, HAS_TIME_DOMAIN)
override def getMetadataValue(name: String): String = name match{
case TIME_DOMAIN => {
// fetch the list, formatted for GeoServer, of all of the date/times
// for which the current Accumulo surface is available
// (NB: that this should be a list is dictated by the code that
// originally registered the surface with GeoServer)
// short-cut: each of the surface-dates will have a separate "count"
// entry among the metadata; this provides a single list of contiguous
// entries to scan for timestamps
val scanner: Scanner = connector.createScanner(table, auths)
scanner.setRange(new org.apache.accumulo.core.data.Range("~METADATA"))
scanner.fetchColumn(new Text(columnFamily), new Text("count"))
val dtListString = scanner.iterator()
.map(entry => entry.getKey.getTimestamp * 1000L)
.map(millis => new DateTime(millis, DateTimeZone.forID("UTC")))
.map(dt => GeoServerDateFormat.print(dt))
.toList.distinct.mkString(",")
// ensure that at least one (albeit, dummy) date is returned
if (dtListString.trim.length < 1) DefaultDateString else dtListString
}
case HAS_TIME_DOMAIN => "true"
case _ => null
}
}
|
jwkessi/geomesa
|
geomesa-plugin/src/main/scala/org/locationtech/geomesa/plugin/wms/CoverageReader.scala
|
Scala
|
apache-2.0
| 10,729 |
/*
* Copyright (C) 2014 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.cassandra.lucene.mapping
import java.nio.ByteBuffer
import com.stratio.cassandra.lucene.column.{Column, Columns}
import com.stratio.cassandra.lucene.schema.Schema
import org.apache.cassandra.config.{CFMetaData, ColumnDefinition}
import org.apache.cassandra.db.marshal._
import org.apache.cassandra.db.rows.{Cell, ComplexColumnData, Row}
import org.apache.cassandra.db.{Clustering, DecoratedKey}
import org.apache.cassandra.serializers.CollectionSerializer
import org.apache.cassandra.transport.Server._
import org.apache.cassandra.utils.ByteBufferUtil
import scala.collection.JavaConverters._
/** Maps Cassandra rows to [[Columns]].
*
* @param schema a schema
* @param metadata a table metadata
* @author Andres de la Pena `[email protected]`
*/
class ColumnsMapper(schema: Schema, metadata: CFMetaData) {
val mappedCells: Set[String] = schema.mappedCells().asScala.toSet
val keyColumns: List[ColumnDefinition] = metadata.partitionKeyColumns.asScala
.filter(definition => mappedCells.contains(definition.name.toString)).toList
val clusteringColumns: List[ColumnDefinition] = metadata.clusteringColumns.asScala
.filter(definition => mappedCells.contains(definition.name.toString)).toList
/** Returns the mapped, not deleted at the specified time in seconds and not null [[Columns]]
* contained in the specified row.
*
* @param key the partition key
* @param row the row
* @param now now in seconds
*/
def columns(key: DecoratedKey, row: Row, now: Int): Columns = {
columns(key) ++ columns(row.clustering()) ++ columns(row, now)
}
/** Returns the mapped [[Columns]] contained in the specified partition key. */
private[mapping] def columns(key: DecoratedKey): Columns = {
val components = metadata.getKeyValidator match {
case c: CompositeType => c.split(key.getKey)
case _ => Array[ByteBuffer](key.getKey)
}
(keyColumns :\ Columns()) ((definition, columns) => {
val name = definition.name.toString
val value = components(definition.position)
val valueType = definition.cellValueType
Column(name).withValue(value, valueType) :: columns
})
}
/** Returns the mapped [[Columns]] contained in the specified clustering key. */
private[mapping] def columns(clustering: Clustering): Columns = {
(clusteringColumns :\ Columns()) ((definition, columns_s) => {
columns_s ++ ColumnsMapper.columns(Column(definition.name.toString), definition.`type`, clustering.get(definition.position))
})
}
/** Returns the mapped, not deleted at the specified time in seconds and not null [[Columns]]
* contained in the regular columns of the specified row.
*
* @param row a row
* @param now now in seconds
*/
private[mapping] def columns(row: Row, now: Int): Columns = {
(row.columns.asScala :\ Columns()) ((definition, columns) =>
if (definition.isComplex) {
this.columns(row.getComplexColumnData(definition), now) ++ columns
} else {
this.columns(row.getCell(definition), now) ++ columns
}
)
}
/** Returns the mapped, not deleted at the specified time in seconds and not null [[Columns]]
* contained in the specified complex column data.
*
* @param complexColumnData a complex column data
* @param now now in seconds
*/
private[mapping] def columns(complexColumnData: ComplexColumnData, now: Int): Columns = {
(complexColumnData.asScala :\ Columns()) ((cell, columns) => {
this.columns(cell, now) ++ columns
})
}
/** Returns the mapped, not deleted at the specified time in seconds and not null [[Columns]]
* contained in the specified cell.
*
* @param cell a cell
* @param now now in seconds
*/
private[mapping] def columns(cell: Cell, now: Int): Columns =
if (cell.isTombstone
|| cell.localDeletionTime <= now
|| !mappedCells.contains(cell.column.name.toString))
Columns.empty
else ColumnsMapper.columns(cell)
}
/** Companion object for [[ColumnsMapper]]. */
object ColumnsMapper {
/** Returns [[Columns]] contained in the specified cell.
*
* @param cell a cell
*/
private[mapping] def columns(cell: Cell): Columns = {
if (cell == null) return Columns()
val name = cell.column.name.toString
val comparator = cell.column.`type`
val value = cell.value
val column = Column(name)
comparator match {
case setType: SetType[_] if !setType.isFrozenCollection =>
val itemComparator = setType.nameComparator
val itemValue = cell.path.get(0)
columns(column, itemComparator, itemValue)
case listType: ListType[_] if !listType.isFrozenCollection =>
val itemComparator = listType.valueComparator
columns(column, itemComparator, value)
case mapType: MapType[_, _] if !mapType.isFrozenCollection =>
val valueType = mapType.valueComparator
val keyValue = cell.path.get(0)
val keyType = mapType.nameComparator
val keyName = keyType.compose(keyValue).toString
val keyColumn = column.withUDTName(Column.MAP_KEY_SUFFIX).withValue(keyValue, keyType)
val valueColumn = columns(column.withUDTName(Column.MAP_VALUE_SUFFIX), valueType, value)
val entryColumn = columns(column.withMapName(keyName), valueType, value)
keyColumn + valueColumn ++ entryColumn
case userType: UserType =>
val cellPath = cell.path
if (cellPath == null) {
columns(column, comparator, value)
} else {
val position = ByteBufferUtil.toShort(cellPath.get(0))
val name = userType.fieldNameAsString(position)
val typo = userType.`type`(position)
columns(column.withUDTName(name), typo, value)
}
case _ =>
columns(column, comparator, value)
}
}
private[mapping] def columns(column: Column, serializer: AbstractType[_], value: ByteBuffer)
: Columns = serializer match {
case t: SetType[_] => columns(column, t, value)
case t: ListType[_] => columns(column, t, value)
case t: MapType[_, _] => columns(column, t, value)
case t: UserType => columns(column, t, value)
case t: TupleType => columns(column, t, value)
case _ => Columns(column.withValue(value, serializer))
}
private[mapping] def columns(column: Column, set: SetType[_], value: ByteBuffer): Columns = {
val nameType = set.nameComparator
val bb = ByteBufferUtil.clone(value) // CollectionSerializer read functions are impure
((0 until frozenCollectionSize(bb)) :\ Columns()) (
(_, columns) => {
val itemValue = frozenCollectionValue(bb)
this.columns(column, nameType, itemValue) ++ columns
})
}
private[mapping] def columns(column: Column, list: ListType[_], value: ByteBuffer): Columns = {
val valueType = list.valueComparator
val bb = ByteBufferUtil.clone(value) // CollectionSerializer read functions are impure
((0 until frozenCollectionSize(bb)) :\ Columns()) ((_, columns) => {
val itemValue = frozenCollectionValue(bb)
this.columns(column, valueType, itemValue) ++ columns
})
}
private[mapping] def columns(column: Column, map: MapType[_, _], value: ByteBuffer): Columns = {
val itemKeysType = map.nameComparator
val itemValuesType = map.valueComparator
val bb = ByteBufferUtil.clone(value) // CollectionSerializer read functions are impure
((0 until frozenCollectionSize(bb)) :\ Columns()) ((_, columns) => {
val itemKey = frozenCollectionValue(bb)
val itemValue = frozenCollectionValue(bb)
val itemName = itemKeysType.compose(itemKey).toString
val keyColumn = column.withUDTName(Column.MAP_KEY_SUFFIX).withValue(itemKey, itemKeysType)
val valueColumn = this.columns(column.withUDTName(Column.MAP_VALUE_SUFFIX), itemValuesType, itemValue)
val entryColumn = this.columns(column.withMapName(itemName), itemValuesType, itemValue)
keyColumn + valueColumn ++ entryColumn ++ columns
})
}
private[mapping] def columns(column: Column, udt: UserType, value: ByteBuffer): Columns = {
val itemValues = udt.split(value)
((0 until udt.fieldNames.size) :\ Columns()) ((i, columns) => {
val itemValue = itemValues(i)
if (itemValue == null) {
columns
} else {
val itemName = udt.fieldNameAsString(i)
val itemType = udt.fieldType(i)
val itemColumn = column.withUDTName(itemName)
this.columns(itemColumn, itemType, itemValue) ++ columns
}
})
}
private[mapping] def columns(column: Column, tuple: TupleType, value: ByteBuffer): Columns = {
val itemValues = tuple.split(value)
((0 until tuple.size) :\ Columns()) ((i, columns) => {
val itemValue = itemValues(i)
if (itemValue == null) {
columns
} else {
val itemName = i.toString
val itemType = tuple.`type`(i)
val itemColumn = column.withUDTName(itemName)
this.columns(itemColumn, itemType, itemValue) ++ columns
}
})
}
private[this] def frozenCollectionSize(bb: ByteBuffer): Int =
CollectionSerializer.readCollectionSize(bb, CURRENT_VERSION)
private[this] def frozenCollectionValue(bb: ByteBuffer): ByteBuffer =
CollectionSerializer.readValue(bb, CURRENT_VERSION)
}
|
Stratio/cassandra-lucene-index
|
plugin/src/main/scala/com/stratio/cassandra/lucene/mapping/ColumnsMapper.scala
|
Scala
|
apache-2.0
| 9,987 |
package extruder.instances
import cats.Eq
import cats.instances.either._
import cats.instances.int._
import cats.instances.string._
import cats.laws.discipline.AlternativeTests
import extruder.core.{Parser, Show}
import org.scalacheck.Arbitrary
import org.scalatest.funsuite.AnyFunSuite
import org.scalatestplus.scalacheck.ScalaCheckDrivenPropertyChecks
import org.typelevel.discipline.scalatest.Discipline
class ParserInstancesSuite extends AnyFunSuite with ScalaCheckDrivenPropertyChecks with Discipline {
import ParserInstancesSuite._
checkAll("Parser", AlternativeTests[Parser].alternative[Int, Int, Int])
test("Can flatMap the parse result") {
forAll { a: Int =>
assert(Parser[Int].flatMapResult(i => Right(i.toLong)).parse(Show[Int].show(a)) === Right(a.toLong))
}
}
}
object ParserInstancesSuite {
implicit def faArb[A](implicit arb: Arbitrary[A]): Arbitrary[Parser[A]] =
Arbitrary(arb.arbitrary.map(a => Parser(_ => Right(a))))
implicit def fabcEq: Eq[Parser[(Int, Int, Int)]] = new Eq[Parser[(Int, Int, Int)]] {
override def eqv(x: Parser[(Int, Int, Int)], y: Parser[(Int, Int, Int)]): Boolean =
x.parse("") == y.parse("")
}
implicit def faEq[A](implicit eq: Eq[Either[String, A]]): Eq[Parser[A]] = Eq.by(_.parse(""))
}
|
janstenpickle/extruder
|
core/src/test/scala/extruder/instances/ParserInstancesSuite.scala
|
Scala
|
mit
| 1,280 |
/*
* Copyright (C) 2015 Cotiviti Labs ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.signalcollect.triplerush.vertices.query
import scala.concurrent.Promise
import scala.util.Success
import scala.concurrent.Future
import scala.concurrent.CanAwait
import scala.concurrent.duration.Duration
/**
* Tickets are used for synchronizing asynchronous operations on the TripleRush index.
* An operation starts out with an initial number of tickets. The tickets are split up between sub-operations on the index
* and returned by each sub-operation upon completion of that operation.
*
* If the initial tickets are not enough the sub-operation is not executed and it returns its tickets number multiplied by -1.
* This means that the minus prefix indicates that the asynchronous operation could not be completed, for example because the
* number of tickets was too low or the branching factor of the index operation too high.
*
* The initial ticket values can only ever be positive, so it is impossible for a correctly working component to return
* Long.MinValue as the number of tickets.
*/
class TicketSynchronization(
name: String,
val expectedTickets: Long = Long.MaxValue,
outOfTicketsCause: String = "Ran out of tickets: branching factor too high or ticket count too low.",
onFailure: Option[Exception => Unit] = Some(e => throw e)) {
def receivedTickets: Long = _receivedTickets
private[this] var _receivedTickets: Long = 0L
private[this] var ranOutOfTickets: Boolean = false
private[this] var onSuccessHandlers: List[Function0[Unit]] = Nil
private[this] var onFailureHandlers: List[Exception => Unit] = onFailure.toList
private[this] var onCompleteHandlers: List[Boolean => Unit] = Nil
def receive(t: Long): Unit = {
_receivedTickets += {
if (t < 0) {
-t
} else {
t
}
}
if (t < 0) {
ranOutOfTickets = true
}
if (_receivedTickets == expectedTickets) {
if (ranOutOfTickets) {
reportFailure(new OutOfTicketsException(outOfTicketsCause))
} else {
reportSuccess
}
} else if (_receivedTickets > expectedTickets) {
val e = new TooManyTicketsReceivedException
reportFailure(e)
}
}
private[this] def reportSuccess(): Unit = {
reportComplete(success = true)
onSuccessHandlers.foreach(_())
}
private[this] def reportFailure(e: Exception): Unit = {
reportComplete(success = false)
onFailureHandlers.foreach(_(e))
}
private[this] def reportComplete(success: Boolean): Unit = {
onCompleteHandlers.foreach(_(success))
}
private[this] def clearHandlers(): Unit = {
onSuccessHandlers = Nil
onFailureHandlers = Nil
onCompleteHandlers = Nil
}
def onSuccess(f: => Unit): Unit = {
onSuccessHandlers ::= f _
}
def onFailure(f: Exception => Unit): Unit = {
onFailureHandlers ::= f
}
def onComplete(f: Boolean => Unit): Unit = {
onCompleteHandlers ::= f
}
}
class OutOfTicketsException(m: String) extends Exception(m)
class TooManyTicketsReceivedException extends Exception
|
uzh/triplerush
|
src/main/scala/com/signalcollect/triplerush/vertices/query/TicketSynchronization.scala
|
Scala
|
apache-2.0
| 3,651 |
package nestapp.websocketserver
import akka.actor.{Actor, ActorLogging, Props}
import spray.can.Http
/**
* Actor which acts as simple WebSocketServer
* For documentation look spray.io and spray-websocket
*/
class WebSocketServer extends Actor with ActorLogging {
def receive = {
// when a new connection comes in we register a WebSocketConnection actor as the per connection handler
case Http.Connected(remoteAddress, localAddress) =>
val serverConnection = sender()
val conn = context.actorOf(Props(classOf[WebSocketWorker], serverConnection))
serverConnection ! Http.Register(conn)
case PushToChildren(msg: String) =>
val children = context.children
println("pushing to all children : " + msg)
children.foreach(ref => ref ! Push(msg))
}
}
|
rafaelkyrdan/nest-app
|
src/main/scala/nestapp/websocketserver/WebSocketServer.scala
|
Scala
|
mit
| 801 |
/*
* Copyright (C) 2018 Lightbend Inc. <https://www.lightbend.com>
* Copyright (C) 2017-2018 Alexis Seigneurin.
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.streams.scala
import java.util.Properties
import java.util.regex.Pattern
import org.junit.Assert._
import org.junit._
import org.junit.rules.TemporaryFolder
import org.apache.kafka.streams.KeyValue
import org.apache.kafka.streams._
import org.apache.kafka.streams.scala.kstream._
import org.apache.kafka.streams.integration.utils.{EmbeddedKafkaCluster, IntegrationTestUtils}
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.clients.producer.ProducerConfig
import org.apache.kafka.common.serialization._
import org.apache.kafka.common.utils.MockTime
import org.apache.kafka.test.{IntegrationTest, TestUtils}
import ImplicitConversions._
import org.junit.experimental.categories.Category
/**
* Test suite that does a classic word count example.
* <p>
* The suite contains the test case using Scala APIs `testShouldCountWords` and the same test case using the
* Java APIs `testShouldCountWordsJava`. The idea is to demonstrate that both generate the same result.
* <p>
* Note: In the current project settings SAM type conversion is turned off as it's experimental in Scala 2.11.
* Hence the native Java API based version is more verbose.
*/
@Category(Array(classOf[IntegrationTest]))
class WordCountTest extends WordCountTestData {
private val privateCluster: EmbeddedKafkaCluster = new EmbeddedKafkaCluster(1)
@Rule def cluster: EmbeddedKafkaCluster = privateCluster
final val alignedTime = (System.currentTimeMillis() / 1000 + 1) * 1000
val mockTime: MockTime = cluster.time
mockTime.setCurrentTimeMs(alignedTime)
val tFolder: TemporaryFolder = new TemporaryFolder(TestUtils.tempDirectory())
@Rule def testFolder: TemporaryFolder = tFolder
@Before
def startKafkaCluster(): Unit = {
cluster.createTopic(inputTopic)
cluster.createTopic(outputTopic)
cluster.createTopic(inputTopicJ)
cluster.createTopic(outputTopicJ)
}
@Test def testShouldCountWords(): Unit = {
import Serdes._
val streamsConfiguration = getStreamsConfiguration()
val streamBuilder = new StreamsBuilder
val textLines = streamBuilder.stream[String, String](inputTopic)
val pattern = Pattern.compile("\\\\W+", Pattern.UNICODE_CHARACTER_CLASS)
// generate word counts
val wordCounts: KTable[String, Long] =
textLines
.flatMapValues(v => pattern.split(v.toLowerCase))
.groupBy((_, v) => v)
.count()
// write to output topic
wordCounts.toStream.to(outputTopic)
val streams: KafkaStreams = new KafkaStreams(streamBuilder.build(), streamsConfiguration)
streams.start()
// produce and consume synchronously
val actualWordCounts: java.util.List[KeyValue[String, Long]] = produceNConsume(inputTopic, outputTopic)
streams.close()
import collection.JavaConverters._
assertEquals(actualWordCounts.asScala.take(expectedWordCounts.size).sortBy(_.key), expectedWordCounts.sortBy(_.key))
}
@Test def testShouldCountWordsMaterialized(): Unit = {
import Serdes._
val streamsConfiguration = getStreamsConfiguration()
val streamBuilder = new StreamsBuilder
val textLines = streamBuilder.stream[String, String](inputTopic)
val pattern = Pattern.compile("\\\\W+", Pattern.UNICODE_CHARACTER_CLASS)
// generate word counts
val wordCounts: KTable[String, Long] =
textLines
.flatMapValues(v => pattern.split(v.toLowerCase))
.groupBy((k, v) => v)
.count()(Materialized.as("word-count"))
// write to output topic
wordCounts.toStream.to(outputTopic)
val streams: KafkaStreams = new KafkaStreams(streamBuilder.build(), streamsConfiguration)
streams.start()
// produce and consume synchronously
val actualWordCounts: java.util.List[KeyValue[String, Long]] = produceNConsume(inputTopic, outputTopic)
streams.close()
import collection.JavaConverters._
assertEquals(actualWordCounts.asScala.take(expectedWordCounts.size).sortBy(_.key), expectedWordCounts.sortBy(_.key))
}
@Test def testShouldCountWordsJava(): Unit = {
import org.apache.kafka.streams.{KafkaStreams => KafkaStreamsJ, StreamsBuilder => StreamsBuilderJ}
import org.apache.kafka.streams.kstream.{
KTable => KTableJ,
KStream => KStreamJ,
KGroupedStream => KGroupedStreamJ,
_
}
import collection.JavaConverters._
val streamsConfiguration = getStreamsConfiguration()
streamsConfiguration.put(StreamsConfig.DEFAULT_KEY_SERDE_CLASS_CONFIG, Serdes.String.getClass.getName)
streamsConfiguration.put(StreamsConfig.DEFAULT_VALUE_SERDE_CLASS_CONFIG, Serdes.String.getClass.getName)
val streamBuilder = new StreamsBuilderJ
val textLines: KStreamJ[String, String] = streamBuilder.stream[String, String](inputTopicJ)
val pattern = Pattern.compile("\\\\W+", Pattern.UNICODE_CHARACTER_CLASS)
val splits: KStreamJ[String, String] = textLines.flatMapValues {
new ValueMapper[String, java.lang.Iterable[String]] {
def apply(s: String): java.lang.Iterable[String] = pattern.split(s.toLowerCase).toIterable.asJava
}
}
val grouped: KGroupedStreamJ[String, String] = splits.groupBy {
new KeyValueMapper[String, String, String] {
def apply(k: String, v: String): String = v
}
}
val wordCounts: KTableJ[String, java.lang.Long] = grouped.count()
wordCounts.toStream.to(outputTopicJ, Produced.`with`(Serdes.String, Serdes.JavaLong))
val streams: KafkaStreamsJ = new KafkaStreamsJ(streamBuilder.build(), streamsConfiguration)
streams.start()
val actualWordCounts: java.util.List[KeyValue[String, Long]] = produceNConsume(inputTopicJ, outputTopicJ)
streams.close()
assertEquals(actualWordCounts.asScala.take(expectedWordCounts.size).sortBy(_.key), expectedWordCounts.sortBy(_.key))
}
private def getStreamsConfiguration(): Properties = {
val streamsConfiguration: Properties = new Properties()
streamsConfiguration.put(StreamsConfig.APPLICATION_ID_CONFIG, "wordcount-test")
streamsConfiguration.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers())
streamsConfiguration.put(StreamsConfig.COMMIT_INTERVAL_MS_CONFIG, "10000")
streamsConfiguration.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
streamsConfiguration.put(StreamsConfig.STATE_DIR_CONFIG, testFolder.getRoot.getPath)
streamsConfiguration
}
private def getProducerConfig(): Properties = {
val p = new Properties()
p.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers())
p.put(ProducerConfig.ACKS_CONFIG, "all")
p.put(ProducerConfig.RETRIES_CONFIG, "0")
p.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer])
p.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer])
p
}
private def getConsumerConfig(): Properties = {
val p = new Properties()
p.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers())
p.put(ConsumerConfig.GROUP_ID_CONFIG, "wordcount-scala-integration-test-standard-consumer")
p.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
p.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[StringDeserializer])
p.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, classOf[LongDeserializer])
p
}
private def produceNConsume(inputTopic: String, outputTopic: String): java.util.List[KeyValue[String, Long]] = {
val linesProducerConfig: Properties = getProducerConfig()
import collection.JavaConverters._
IntegrationTestUtils.produceValuesSynchronously(inputTopic, inputValues.asJava, linesProducerConfig, mockTime)
val consumerConfig = getConsumerConfig()
IntegrationTestUtils.waitUntilMinKeyValueRecordsReceived(consumerConfig, outputTopic, expectedWordCounts.size)
}
}
trait WordCountTestData {
val inputTopic = s"inputTopic"
val outputTopic = s"outputTopic"
val inputTopicJ = s"inputTopicJ"
val outputTopicJ = s"outputTopicJ"
val inputValues = List(
"Hello Kafka Streams",
"All streams lead to Kafka",
"Join Kafka Summit",
"И теперь пошли русские слова"
)
val expectedWordCounts: List[KeyValue[String, Long]] = List(
new KeyValue("hello", 1L),
new KeyValue("all", 1L),
new KeyValue("streams", 2L),
new KeyValue("lead", 1L),
new KeyValue("to", 1L),
new KeyValue("join", 1L),
new KeyValue("kafka", 3L),
new KeyValue("summit", 1L),
new KeyValue("и", 1L),
new KeyValue("теперь", 1L),
new KeyValue("пошли", 1L),
new KeyValue("русские", 1L),
new KeyValue("слова", 1L)
)
}
|
KevinLiLu/kafka
|
streams/streams-scala/src/test/scala/org/apache/kafka/streams/scala/WordCountTest.scala
|
Scala
|
apache-2.0
| 9,577 |
package outwatch
import cats.effect.IO
import monix.reactive.subjects.PublishSubject
import monix.reactive.Observable
import org.scalajs.dom.{html, _}
import outwatch.dsl._
import colibri.ext.monix._
import org.scalajs.dom.raw.EventInit
import outwatch.reactive.handlers.monix._
import outwatch.util.LocalStorage
import scala.scalajs.js
class DomEventSpec extends JSDomAsyncSpec {
"EventStreams" should "emit and receive events correctly" in {
val vtree = Handler.createF[IO, MouseEvent].map { handler =>
val buttonDisabled = handler.map(_ => true).startWith(Seq(false))
div(idAttr := "click", onClick --> handler,
button(idAttr := "btn", disabled <-- buttonDisabled)
)
}
for {
vtree <- vtree
_ <- OutWatch.renderInto[IO]("#app", vtree)
hasD <- IO(document.getElementById("btn").hasAttribute("disabled"))
_ <- IO(hasD shouldBe false)
event <- IO {
new Event("click", new EventInit {
bubbles = true
cancelable = false
})
}
_ <- IO(document.getElementById("click").dispatchEvent(event))
d <- IO(document.getElementById("btn").getAttribute("disabled"))
_ <- IO(d shouldBe "")
} yield succeed
}
it should "be converted to a generic emitter correctly" in {
val message = "ad"
val vtree = Handler.createF[IO, String].map { handler =>
div(idAttr := "click", onClick.use(message) --> handler,
span(idAttr := "child", handler)
)
}
for {
vtree <- vtree
_ <- OutWatch.renderInto[IO]("#app", vtree)
} yield {
document.getElementById("child").innerHTML shouldBe ""
val event = new Event("click", new EventInit {
bubbles = true
cancelable = false
})
document.getElementById("click").dispatchEvent(event)
document.getElementById("child").innerHTML shouldBe message
//dispatch another event
document.getElementById("click").dispatchEvent(event)
document.getElementById("child").innerHTML shouldBe message
}
}
it should "be converted to a generic stream emitter correctly" in {
Handler.createF[IO, String].flatMap { messages =>
val vtree = Handler.createF[IO, String].map { stream =>
div(idAttr := "click", onClick.useLatest(messages) --> stream,
span(idAttr := "child", stream)
)
}
for {
vtree <- vtree
_ <- OutWatch.renderInto[IO]("#app", vtree)
} yield {
document.getElementById("child").innerHTML shouldBe ""
val firstMessage = "First"
messages.onNext(firstMessage)
val event = new Event("click", new EventInit {
bubbles = true
cancelable = false
})
document.getElementById("click").dispatchEvent(event)
document.getElementById("child").innerHTML shouldBe firstMessage
//dispatch another event
document.getElementById("click").dispatchEvent(event)
document.getElementById("child").innerHTML shouldBe firstMessage
val secondMessage = "Second"
messages.onNext(secondMessage)
document.getElementById("click").dispatchEvent(event)
document.getElementById("child").innerHTML shouldBe secondMessage
}
}
}
it should "be able to set the value of a text field" in {
val values = PublishSubject[String]()
val vtree = input(idAttr := "input", attributes.value <-- values)
OutWatch.renderInto[IO]("#app", vtree).map {_ =>
val patched = document.getElementById("input").asInstanceOf[html.Input]
patched.value shouldBe ""
val value1 = "Hello"
values.onNext(value1)
patched.value shouldBe value1
val value2 = "World"
values.onNext(value2)
patched.value shouldBe value2
values.onNext("")
patched.value shouldBe ""
}
}
it should "preserve user input after setting defaultValue" in {
val defaultValues = PublishSubject[String]()
val vtree = input(idAttr := "input", attributes.defaultValue <-- defaultValues)
OutWatch.renderInto[IO]("#app", vtree).map { _ =>
val patched = document.getElementById("input").asInstanceOf[html.Input]
patched.value shouldBe ""
val value1 = "Hello"
defaultValues.onNext(value1)
patched.value shouldBe value1
val userInput = "user input"
patched.value = userInput
defaultValues.onNext("GoodByte")
patched.value shouldBe userInput
}
}
it should "set input value to the same value after user change" in {
val values = PublishSubject[String]()
val vtree = input(idAttr := "input", attributes.value <-- values)
OutWatch.renderInto[IO]("#app", vtree).map { _ =>
val patched = document.getElementById("input").asInstanceOf[html.Input]
patched.value shouldBe ""
val value1 = "Hello"
values.onNext(value1)
patched.value shouldBe value1
patched.value = "user input"
values.onNext("Hello")
patched.value shouldBe value1
}
}
it should "be bindable to a list of children" in {
val state = PublishSubject[Seq[VNode]]()
val vtree = div(
ul(idAttr := "list", state)
)
OutWatch.renderInto[IO]("#app", vtree).map { _ =>
val list = document.getElementById("list")
list.childElementCount shouldBe 0
val first = "Test"
state.onNext(Seq(span(first)))
list.childElementCount shouldBe 1
list.innerHTML.contains(first) shouldBe true
val second = "Hello"
state.onNext(Seq(span(first), span(second)))
list.childElementCount shouldBe 2
list.innerHTML.contains(first) shouldBe true
list.innerHTML.contains(second) shouldBe true
val third = "World"
state.onNext(Seq(span(first), span(second), span(third)))
list.childElementCount shouldBe 3
list.innerHTML.contains(first) shouldBe true
list.innerHTML.contains(second) shouldBe true
list.innerHTML.contains(third) shouldBe true
state.onNext(Seq(span(first), span(third)))
list.childElementCount shouldBe 2
list.innerHTML.contains(first) shouldBe true
list.innerHTML.contains(third) shouldBe true
}
}
it should "be able to handle two events of the same type" in {
val messages = ("Hello", "World")
val node = Handler.createF[IO, String].flatMap { first =>
Handler.createF[IO, String].map { second =>
div(
button(idAttr := "click", onClick.use(messages._1) --> first, onClick.use(messages._2) --> second),
span(idAttr := "first", first),
span(idAttr := "second", second)
)
}
}
for {
node <- node
_ <- OutWatch.renderInto[IO]("#app", node)
} yield {
val event = new Event("click", new EventInit {
bubbles = true
cancelable = false
})
document.getElementById("click").dispatchEvent(event)
document.getElementById("first").innerHTML shouldBe messages._1
document.getElementById("second").innerHTML shouldBe messages._2
}
}
it should "be able to be transformed by a function in place" in {
val number = 42
val toTuple = (e: MouseEvent) => (e, number)
val node = Handler.createF[IO, (MouseEvent, Int)].map { stream =>
div(
button(idAttr := "click", onClick.map(toTuple) --> stream),
span(idAttr := "num", stream.map(_._2))
)
}
for {
node <- node
_ <- OutWatch.renderInto[IO]("#app", node)
} yield {
val event = new Event("click", new EventInit {
bubbles = true
cancelable = false
})
document.getElementById("click").dispatchEvent(event)
document.getElementById("num").innerHTML shouldBe number.toString
}
}
it should ".transform should work as expected" in {
val numbers = Observable(1, 2)
val transformer = (e: Observable[MouseEvent]) => e.concatMap(_ => numbers)
val node = Handler.createF[IO, Int].map { stream =>
val state = stream.scan(List.empty[Int])((l, s) => l :+ s)
div(
button(idAttr := "click", onClick.transformLifted(transformer) --> stream),
span(idAttr := "num", state.map(nums => nums.map(num => span(num.toString))))
)
}
for {
node <- node
_ <- OutWatch.renderInto[IO]("#app", node)
} yield {
val event = new Event("click", new EventInit {
bubbles = true
cancelable = false
})
document.getElementById("click").dispatchEvent(event)
document.getElementById("num").innerHTML shouldBe "<span>1</span><span>2</span>"
}
}
it should "be able to be transformed from strings" in {
val number = 42
val onInputValue = onInput.value
val node = Handler.createF[IO, Int].map { stream =>
div(
input(idAttr := "input", onInputValue.use(number) --> stream),
span(idAttr := "num", stream)
)
}
for {
node <- node
_ <- OutWatch.renderInto[IO]("#app", node)
} yield {
val inputEvt = new Event("input", new EventInit {
bubbles = false
cancelable = true
})
document.getElementById("input").dispatchEvent(inputEvt)
document.getElementById("num").innerHTML shouldBe number.toString
}
}
it should "handler can trigger side-effecting functions" in {
var triggeredEventFunction = 0
var triggeredIntFunction = 0
var triggeredFunction = 0
var triggeredFunction2 = 0
val stream = PublishSubject[String]()
val node = {
div(
button(idAttr := "button",
onClick foreach (_ => triggeredEventFunction += 1),
onClick.use(1) foreach (triggeredIntFunction += _),
onClick foreach { triggeredFunction += 1 },
onSnabbdomUpdate foreach { triggeredFunction2 += 1 },
stream
)
)
}
OutWatch.renderInto[IO]("#app", node).map {_ =>
val inputEvt = new Event("click", new EventInit {
bubbles = false
cancelable = true
})
document.getElementById("button").dispatchEvent(inputEvt)
stream.onNext("woop")
triggeredEventFunction shouldBe 1
triggeredIntFunction shouldBe 1
triggeredFunction shouldBe 1
triggeredFunction2 shouldBe 1
document.getElementById("button").dispatchEvent(inputEvt)
stream.onNext("waap")
triggeredEventFunction shouldBe 2
triggeredIntFunction shouldBe 2
triggeredFunction shouldBe 2
triggeredFunction2 shouldBe 2
}
}
it should "correctly be transformed from latest in observable" in {
val node = Handler.createF[IO, String].flatMap { submit =>
val state = submit.scan(List.empty[String])((l, s) => l :+ s)
Handler.createF[IO, String].map { stream =>
div(
input(idAttr := "input", tpe := "text", onInput.value --> stream),
button(idAttr := "submit", onClick.useLatest(stream) --> submit),
ul(idAttr := "items",
state.map(items => items.map(it => li(it)))
)
)
}
}
for {
node <- node
_ <- OutWatch.renderInto[IO]("#app", node)
} yield {
val inputElement = document.getElementById("input").asInstanceOf[html.Input]
val submitButton = document.getElementById("submit")
val inputEvt = new Event("input", new EventInit {
bubbles = false
cancelable = true
})
val clickEvt = new Event("click", new EventInit {
bubbles = true
cancelable = true
})
inputElement.value = "item 1"
inputElement.dispatchEvent(inputEvt)
inputElement.value = "item 2"
inputElement.dispatchEvent(inputEvt)
inputElement.value = "item 3"
inputElement.dispatchEvent(inputEvt)
submitButton.dispatchEvent(clickEvt)
document.getElementById("items").childNodes.length shouldBe 1
}
}
"Boolean Props" should "be handled corectly" in {
val node = Handler.createF[IO, Boolean].map { checkValue =>
div(
input(idAttr := "checkbox", `type` := "Checkbox", checked <-- checkValue),
button(idAttr := "on_button", onClick.use(true) --> checkValue, "On"),
button(idAttr := "off_button", onClick.use(false) --> checkValue, "Off")
)
}
for {
node <- node
_ <- OutWatch.renderInto[IO]("#app", node)
} yield {
val checkbox = document.getElementById("checkbox").asInstanceOf[html.Input]
val onButton = document.getElementById("on_button")
val offButton = document.getElementById("off_button")
checkbox.checked shouldBe false
val clickEvt = new Event("click", new EventInit {
bubbles = true
cancelable = true
})
onButton.dispatchEvent(clickEvt)
checkbox.checked shouldBe true
offButton.dispatchEvent(clickEvt)
checkbox.checked shouldBe false
}
}
"DomWindowEvents and DomDocumentEvents" should "trigger correctly" in {
var docClicked = false
var winClicked = false
events.window.onClick.foreach(_ => winClicked = true)
events.document.onClick.foreach(_ => docClicked = true)
val node = div(button(idAttr := "input", tpe := "checkbox"))
OutWatch.renderInto[IO]("#app", node).map { _ =>
val inputEvt = new Event("click", new EventInit {
bubbles = true
cancelable = false
})
document.getElementById("input").dispatchEvent(inputEvt)
winClicked shouldBe true
docClicked shouldBe true
}
}
"EmitterOps" should "correctly work on events" in {
val node = Handler.createF[IO, String].flatMap { _ =>
for {
stringStream <- Handler.createF[IO, String]
doubleStream <- Handler.createF[IO, Double]
boolStream <- Handler.createF[IO, Boolean]
htmlElementStream <- Handler.createF[IO, html.Element]
svgElementTupleStream <- Handler.createF[IO, (org.scalajs.dom.svg.Element, org.scalajs.dom.svg.Element)]
elem = div(
input(
idAttr := "input", tpe := "text",
onSearch.target.value --> stringStream,
onSearch.target.valueAsNumber --> doubleStream,
onSearch.target.checked --> boolStream,
onClick.target.value --> stringStream,
// uses currentTarget and assumes html.Input type by default
onClick.value --> stringStream,
onClick.valueAsNumber --> doubleStream,
onChange.checked --> boolStream,
onClick.filter(_ => true).value --> stringStream,
onSnabbdomInsert.asHtml --> htmlElementStream,
onSnabbdomUpdate.asSvg --> svgElementTupleStream
),
ul(idAttr := "items")
)
} yield elem
}
for {
node <- node
_ <- OutWatch.renderInto[IO]("#app", node)
} yield {
document.getElementById("input") should not be null
}
}
it should "correctly be compiled with currentTarget" in {
Handler.createF[IO, String].flatMap { stringHandler =>
def modifier: VDomModifier = onDrag.value --> stringHandler
Handler.createF[IO, String].flatMap { _ =>
for {
stream <- Handler.createF[IO, String]
eventStream <- Handler.createF[IO, MouseEvent]
elem = div(
input(
idAttr := "input", tpe := "text",
onSearch.target.value --> stream,
onClick.value --> stream,
modifier
),
ul(idAttr := "items"))
_ <- OutWatch.renderInto[IO]("#app", elem)
} yield {
document.getElementById("input") should not be null
}
}
}
}
"Children stream" should "work for string sequences" in {
val myStrings: Observable[Seq[String]] = Observable(Seq("a", "b"))
val node = div(idAttr := "strings",
myStrings
)
OutWatch.renderInto[IO]("#app", node).map( _ =>
document.getElementById("strings").innerHTML shouldBe "ab"
)
}
"LocalStorage" should "have handler with proper events" in {
var option: Option[Option[String]] = None
LocalStorage.handler[IO]("hans").map { handler =>
handler.foreach { o => option = Some(o) }
option shouldBe Some(None)
handler.onNext(Some("gisela"))
option shouldBe Some(Some("gisela"))
handler.onNext(None)
option shouldBe Some(None)
}
}
it should "have handlerWithEventsOnly with proper events" in {
var option: Option[Option[String]] = None
LocalStorage.handlerWithEventsOnly[IO]("hans").map {handler =>
handler.foreach { o => option = Some(o) }
option shouldBe Some(None)
handler.onNext(Some("gisela"))
option shouldBe Some(None)
handler.onNext(None)
option shouldBe Some(None)
}
}
it should "have handlerWithEventsOnly with initial value" in {
import org.scalajs.dom.window.localStorage
localStorage.setItem("hans", "wurst")
var option: Option[Option[String]] = None
LocalStorage.handlerWithEventsOnly[IO]("hans").map { handler =>
handler.foreach { o => option = Some(o) }
option shouldBe Some(Some("wurst"))
}
}
it should "have handlerWithoutEvents with proper events" in {
var option: Option[Option[String]] = None
LocalStorage.handlerWithoutEvents[IO]("hans").map { handler =>
handler.foreach { o => option = Some(o) }
option shouldBe Some(None)
handler.onNext(Some("gisela"))
option shouldBe Some(Some("gisela"))
handler.onNext(None)
option shouldBe Some(None)
}
}
"Emitterbuilder" should "preventDefault (compile only)" in {
val node = div(
idAttr := "click",
onClick.filter(_ => true).preventDefault.map(_ => 4) foreach {()},
onClick.preventDefault.map(_ => 3) foreach {()}
)
val test = for {
_ <- OutWatch.renderInto[IO]("#app", node)
_ <- IO {
val event = new Event("click", new EventInit {
bubbles = true
cancelable = false
})
document.getElementById("click").dispatchEvent(event)
}
} yield {
succeed
}
test
}
it should "stopPropagation" in {
var triggeredFirst = false
var triggeredSecond = false
val node = div(
onClick foreach {triggeredSecond = true},
div(
idAttr := "click",
onClick.map(x => x).stopPropagation foreach {triggeredFirst = true}
)
)
OutWatch.renderInto[IO]("#app", node).map { _ =>
val event = new Event("click", new EventInit {
bubbles = true
cancelable = false
})
document.getElementById("click").dispatchEvent(event)
triggeredFirst shouldBe true
triggeredSecond shouldBe false
}
}
"Global dom events" should "return an observable" in {
var clicked = 0
val sub = events.window.onClick.foreach { _ =>
clicked += 1
}
def newEvent() = {
new Event("click", new EventInit {
bubbles = true
cancelable = false
})
}
clicked shouldBe 0
window.dispatchEvent(newEvent())
clicked shouldBe 1
window.dispatchEvent(newEvent())
clicked shouldBe 2
sub.cancel()
window.dispatchEvent(newEvent())
clicked shouldBe 2
}
it should "have sync operations" in {
var clicked = List.empty[String]
val sub = events.window.onClick.stopPropagation.foreach { ev =>
clicked ++= ev.asInstanceOf[js.Dynamic].testtoken.asInstanceOf[String] :: Nil
}
def newEvent(token: String) = {
val event = new Event("click", new EventInit {
bubbles = true
cancelable = false
})
event.asInstanceOf[js.Dynamic].stopPropagation = { () =>
event.asInstanceOf[js.Dynamic].testtoken = token
()
}: js.Function0[Unit]
event
}
clicked shouldBe Nil
window.dispatchEvent(newEvent("a"))
clicked shouldBe List("a")
window.dispatchEvent(newEvent("b"))
clicked shouldBe List("a", "b")
sub.cancel()
window.dispatchEvent(newEvent("c"))
clicked shouldBe List("a", "b")
}
}
|
OutWatch/outwatch
|
tests/src/test/scala/outwatch/DomEventSpec.scala
|
Scala
|
apache-2.0
| 20,253 |
package org.brandonhaynes.iterators
import scala.language.{implicitConversions, reflectiveCalls}
import org.apache.hadoop.mapreduce.ReduceContext
import org.brandonhaynes.support.Implicits._
/** Implicit helper method for getting an iterator containing the pairs resulting from the intermediate shuffle
*
* Usage: {{{ context.intermediatePairs }}}
*/
object IntermediatePairIterator {
/** Implicit conversion between a Hadoop reducer context and an object exposing the iteration of intermediate pairs */
implicit def toIntermediateIterator[Key, Value, OutputKey, OutputValue](
context:ReduceContext[Key, Value, OutputKey, OutputValue]) =
new { def intermediatePairs:Iterator[(Key, Value)] = new IntermediatePairIterator(context) }
}
/** A class that iterator the intermediate pairs drawn from a Hadoop reducer context
*
* @param context Hadoop context over which to iterate
* @tparam Key Key used in the Hadoop job
* @tparam Value Value used in the Hadoop job
*/
class IntermediatePairIterator[Key, Value, OutputKey, OutputValue](
context:ReduceContext[Key, Value, OutputKey, OutputValue])
extends Iterator[(Key, Value)] {
// Only iterate if we have a valid initial key
private var _hasNext = context.getCurrentKey match {
case reference:AnyRef => reference != null
case _ => true
}
def hasNext = _hasNext
def next() = {
if(hasNext) {
// Save our result, iterate to the next Hadoop key/value pair, and return the saved result
val result = (context.getCurrentKey.cloneIfWritable(context), context.getCurrentValue)
_hasNext = context.nextKeyValue()
result
} else
throw new NoSuchElementException
}
}
|
BrandonHaynes/timr
|
src/org/brandonhaynes/iterators/IntermediatePairIterator.scala
|
Scala
|
mit
| 1,724 |
/*
* Copyright 2016 Coursera Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.coursera.naptime.ari.graphql
import com.linkedin.data.DataMap
import com.linkedin.data.schema.RecordDataSchema
import com.typesafe.scalalogging.StrictLogging
import org.coursera.naptime.ResourceName
import org.coursera.naptime.ari.graphql.schema.NaptimePaginatedResourceField
import org.coursera.naptime.ari.graphql.schema.NaptimePaginationField
import org.coursera.naptime.ari.graphql.schema.NaptimeResourceField
import org.coursera.naptime.ari.graphql.schema.SchemaMetadata
import org.coursera.naptime.schema.ArbitraryValue
import org.coursera.naptime.schema.Handler
import org.coursera.naptime.schema.HandlerKind
import org.coursera.naptime.schema.Resource
import sangria.marshalling.FromInput
import sangria.schema.Argument
import sangria.schema.BigDecimalType
import sangria.schema.BooleanType
import sangria.schema.Context
import sangria.schema.FloatType
import sangria.schema.InputType
import sangria.schema.IntType
import sangria.schema.ListInputType
import sangria.schema.LongType
import sangria.schema.Schema
import sangria.schema.StringType
import sangria.schema.Value
import sangria.marshalling.FromInput._
import sangria.schema.Field
import sangria.schema.ObjectType
import sangria.schema.OptionInputType
import sangria.schema.OptionType
class SangriaGraphQlSchemaBuilder(
resources: Set[Resource],
schemas: Map[String, RecordDataSchema])
extends StrictLogging {
val schemaMetadata = SchemaMetadata(resources, schemas)
/**
* Generates a GraphQL schema for the provided set of resources to this class
* Returns a "root" object that has one field available for each Naptime Resource provided.*
*
* @return a Sangria GraphQL Schema with all resources defined
*/
def generateSchema(): Schema[SangriaGraphQlContext, DataMap] = {
val topLevelResourceObjects = for {
resource <- resources
resourceObject <- (try {
val resourceName = ResourceName(
resource.name, resource.version.getOrElse(0L).toInt).identifier
generateLookupTypeForResource(resourceName)
} catch {
case e: Throwable => None
}).toList if resourceObject.fields.nonEmpty
} yield {
Field.apply[SangriaGraphQlContext, DataMap, DataMap, Any](
formatResourceTopLevelName(resource),
resourceObject,
resolve = (context: Context[SangriaGraphQlContext, DataMap]) => {
Value(new DataMap())
})
}
val dedupedResources = topLevelResourceObjects.groupBy(_.name).map(_._2.head).toList
val rootObject = ObjectType[SangriaGraphQlContext, DataMap](
name = "root",
description = "Top-level accessor for Naptime resources",
fields = dedupedResources)
Schema(rootObject)
}
/**
* Generates an object-type for a given resource name, with each field on the merged output
* schema available on this object-type.
*
* @param resourceName String name of the resource (i.e. 'courses.v1')
* @return ObjectType for the resource
*/
def generateLookupTypeForResource(resourceName: String): Option[ObjectType[SangriaGraphQlContext, DataMap]] = {
try {
val resource = schemaMetadata.getResource(resourceName)
val fields = resource.handlers.flatMap { handler =>
handler.kind match {
// We want to make sure that if a resource has a GET handler, it also has a MULTI_GET handler
case HandlerKind.GET if resource.handlers.exists(_.kind == HandlerKind.MULTI_GET) =>
generateGetHandler(resource, handler)
case HandlerKind.GET_ALL | HandlerKind.MULTI_GET | HandlerKind.FINDER =>
generateListHandler(resource, handler)
case _ => None
}
}.toList
if (fields.nonEmpty) {
val resourceObjectType = ObjectType[SangriaGraphQlContext, DataMap](
name = formatResourceTopLevelName(resource),
fieldsFn = () => fields)
Some(resourceObjectType)
} else {
logger.warn(s"No handlers available for resource $resourceName")
None
}
} catch {
case e: Throwable =>
logger.error(s"Unknown error when generating resource: ${e.getMessage}")
None
}
}
def generateGetHandler(
resource: Resource,
handler: Handler): Option[Field[SangriaGraphQlContext, DataMap]] = {
val arguments = SangriaGraphQlSchemaBuilder.generateHandlerArguments(handler)
val resourceName = ResourceName(resource.name, resource.version.getOrElse(0L).toInt)
val idExtractor = (context: Context[SangriaGraphQlContext, DataMap]) => {
val id = context.arg[AnyRef]("id")
id match {
case idOpt: Option[Any] => idOpt.orNull
case _ => id
}
}
NaptimeResourceField.build(
schemaMetadata = schemaMetadata,
resourceName = resourceName.identifier,
fieldName = "get",
idExtractor = Some(idExtractor))
.map { field =>
field.copy(arguments = arguments ++ field.arguments)
}
}
def generateListHandler(
resource: Resource,
handler: Handler): Option[Field[SangriaGraphQlContext, DataMap]] = {
val resourceName = ResourceName(resource.name, resource.version.getOrElse(0L).toInt)
val arguments = SangriaGraphQlSchemaBuilder.generateHandlerArguments(handler)
val fieldName = handler.kind match {
case HandlerKind.FINDER => handler.name
case HandlerKind.GET_ALL => "getAll"
case HandlerKind.MULTI_GET => "multiGet"
case _ => "error"
}
NaptimePaginatedResourceField.build(
schemaMetadata = schemaMetadata,
resourceName = resourceName.identifier,
fieldName = fieldName,
handlerOverride = Some(handler),
fieldRelation = None).map { field =>
val mergedArguments = (field.arguments ++ arguments)
.groupBy(_.name)
.map(_._2.head)
.map(_.asInstanceOf[Argument[Any]])
.toList
field.copy(arguments = mergedArguments)
}
}
/**
* Converts a resource name to a GraphQL compatible name. (i.e. 'courses.v1' to 'CoursesV1')
*
* @param resource Naptime resource
* @return GraphQL-safe resource name
*/
def formatResourceName(resource: Resource): String = {
s"${resource.name.capitalize}V${resource.version.getOrElse(0)}"
}
/**
* Converts a resource to a GraphQL top-level name. (i.e. 'courses.v1' to 'CoursesV1Resource')
*
* @param resource Naptime resource
* @return GraphQL-safe top-level resource name
*/
def formatResourceTopLevelName(resource: Resource): String = {
s"${formatResourceName(resource)}Resource"
}
}
object SangriaGraphQlSchemaBuilder extends StrictLogging {
val PAGINATION_ARGUMENT_NAMES = NaptimePaginationField.paginationArguments.map(_.name)
def generateHandlerArguments(handler: Handler, includePagination: Boolean = false): List[Argument[Any]] = {
val baseParameters = handler.parameters
.filterNot(parameter => PAGINATION_ARGUMENT_NAMES.contains(parameter.name))
.map { parameter =>
val tpe = parameter.`type`
val inputType = scalaTypeToSangria(tpe)
val fromInputType = scalaTypeToFromInput(tpe)
val (optionalInputType, optionalFromInputType: FromInput[Any]) = (inputType, parameter.required) match {
case (_: OptionInputType[Any], _) => (inputType, fromInputType)
case (_, false) => (OptionInputType(inputType), FromInput.optionInput(fromInputType))
case (_, true) => (inputType, fromInputType)
}
Argument(
name = parameter.name,
argumentType = optionalInputType)(optionalFromInputType, implicitly).asInstanceOf[Argument[Any]]
}.toList
val paginationParameters = if (includePagination) {
NaptimePaginationField.paginationArguments
} else {
List.empty
}
(baseParameters ++ paginationParameters)
.groupBy(_.name)
.map(_._2.head.asInstanceOf[Argument[Any]])
.toList
}
def scalaTypeToSangria(typeName: String): InputType[Any] = {
import sangria.marshalling.FromInput.seqInput
import sangria.marshalling.FromInput.coercedScalaInput
val listPattern = "(Set|List|Seq|immutable.Seq)\\[(.*)\\]".r
val optionPattern = "(Option)\\[(.*)\\]".r
// TODO(bryan): Fill in the missing types here
typeName match {
case listPattern(_, innerType) => ListInputType(scalaTypeToSangria(innerType))
case optionPattern(_, innerType) => OptionInputType(scalaTypeToSangria(innerType))
case "string" | "String" => StringType
case "int" | "Int" => IntType
case "long" | "Long" => LongType
case "float" | "Float" => FloatType
case "decimal" | "Decimal" => BigDecimalType
case "boolean" | "Boolean" => BooleanType
case _ => {
logger.warn(s"could not parse type from $typeName")
StringType
}
}
}
def scalaTypeToFromInput(typeName: String): FromInput[Any] = {
import sangria.marshalling.FromInput.seqInput
import sangria.marshalling.FromInput.coercedScalaInput
val listPattern = "(set|list|seq|immutable.Seq)\\[(.*)\\]".r
val optionPattern = "(Option)\\[(.*)\\]".r
// TODO(bryan): Fix all of this :)
typeName.toLowerCase match {
case listPattern(outerType, innerType) =>
val listType = scalaTypeToFromInput(innerType)
sangria.marshalling.FromInput.seqInput(listType).asInstanceOf[FromInput[Any]]
case "string" | "int" | "long" | "float" | "decimal" | "boolean" =>
sangria.marshalling.FromInput.coercedScalaInput.asInstanceOf[FromInput[Any]]
case _ =>
sangria.marshalling.FromInput.coercedScalaInput.asInstanceOf[FromInput[Any]]
}
}
}
|
vkuo-coursera/naptime
|
naptime-graphql/src/main/scala/org/coursera/naptime/ari/graphql/SangriaGraphQlSchemaBuilder.scala
|
Scala
|
apache-2.0
| 10,286 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.catalog
import java.net.URI
import java.util.Locale
import java.util.concurrent.Callable
import javax.annotation.concurrent.GuardedBy
import scala.collection.mutable
import scala.util.{Failure, Success, Try}
import com.google.common.cache.{Cache, CacheBuilder}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.spark.internal.Logging
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst._
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.analysis.FunctionRegistry.FunctionBuilder
import org.apache.spark.sql.catalyst.expressions.{Expression, ExpressionInfo, ImplicitCastInputTypes}
import org.apache.spark.sql.catalyst.parser.{CatalystSqlParser, ParserInterface}
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, SubqueryAlias, View}
import org.apache.spark.sql.catalyst.util.StringUtils
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.Utils
object SessionCatalog {
val DEFAULT_DATABASE = "default"
}
/**
* An internal catalog that is used by a Spark Session. This internal catalog serves as a
* proxy to the underlying metastore (e.g. Hive Metastore) and it also manages temporary
* views and functions of the Spark Session that it belongs to.
*
* This class must be thread-safe.
*/
class SessionCatalog(
externalCatalogBuilder: () => ExternalCatalog,
globalTempViewManagerBuilder: () => GlobalTempViewManager,
functionRegistry: FunctionRegistry,
conf: SQLConf,
hadoopConf: Configuration,
parser: ParserInterface,
functionResourceLoader: FunctionResourceLoader) extends Logging {
import SessionCatalog._
import CatalogTypes.TablePartitionSpec
// For testing only.
def this(
externalCatalog: ExternalCatalog,
functionRegistry: FunctionRegistry,
conf: SQLConf) {
this(
() => externalCatalog,
() => new GlobalTempViewManager("global_temp"),
functionRegistry,
conf,
new Configuration(),
new CatalystSqlParser(conf),
DummyFunctionResourceLoader)
}
// For testing only.
def this(externalCatalog: ExternalCatalog) {
this(
externalCatalog,
new SimpleFunctionRegistry,
new SQLConf().copy(SQLConf.CASE_SENSITIVE -> true))
}
lazy val externalCatalog = externalCatalogBuilder()
lazy val globalTempViewManager = globalTempViewManagerBuilder()
/** List of temporary views, mapping from table name to their logical plan. */
@GuardedBy("this")
protected val tempViews = new mutable.HashMap[String, LogicalPlan]
// Note: we track current database here because certain operations do not explicitly
// specify the database (e.g. DROP TABLE my_table). In these cases we must first
// check whether the temporary view or function exists, then, if not, operate on
// the corresponding item in the current database.
@GuardedBy("this")
protected var currentDb: String = formatDatabaseName(DEFAULT_DATABASE)
/**
* Checks if the given name conforms the Hive standard ("[a-zA-Z_0-9]+"),
* i.e. if this name only contains characters, numbers, and _.
*
* This method is intended to have the same behavior of
* org.apache.hadoop.hive.metastore.MetaStoreUtils.validateName.
*/
private def validateName(name: String): Unit = {
val validNameFormat = "([\\\\w_]+)".r
if (!validNameFormat.pattern.matcher(name).matches()) {
throw new AnalysisException(s"`$name` is not a valid name for tables/databases. " +
"Valid names only contain alphabet characters, numbers and _.")
}
}
/**
* Format table name, taking into account case sensitivity.
*/
protected[this] def formatTableName(name: String): String = {
if (conf.caseSensitiveAnalysis) name else name.toLowerCase(Locale.ROOT)
}
/**
* Format database name, taking into account case sensitivity.
*/
protected[this] def formatDatabaseName(name: String): String = {
if (conf.caseSensitiveAnalysis) name else name.toLowerCase(Locale.ROOT)
}
private val tableRelationCache: Cache[QualifiedTableName, LogicalPlan] = {
val cacheSize = conf.tableRelationCacheSize
CacheBuilder.newBuilder().maximumSize(cacheSize).build[QualifiedTableName, LogicalPlan]()
}
/** This method provides a way to get a cached plan. */
def getCachedPlan(t: QualifiedTableName, c: Callable[LogicalPlan]): LogicalPlan = {
tableRelationCache.get(t, c)
}
/** This method provides a way to get a cached plan if the key exists. */
def getCachedTable(key: QualifiedTableName): LogicalPlan = {
tableRelationCache.getIfPresent(key)
}
/** This method provides a way to cache a plan. */
def cacheTable(t: QualifiedTableName, l: LogicalPlan): Unit = {
tableRelationCache.put(t, l)
}
/** This method provides a way to invalidate a cached plan. */
def invalidateCachedTable(key: QualifiedTableName): Unit = {
tableRelationCache.invalidate(key)
}
/** This method provides a way to invalidate all the cached plans. */
def invalidateAllCachedTables(): Unit = {
tableRelationCache.invalidateAll()
}
/**
* This method is used to make the given path qualified before we
* store this path in the underlying external catalog. So, when a path
* does not contain a scheme, this path will not be changed after the default
* FileSystem is changed.
*/
private def makeQualifiedPath(path: URI): URI = {
val hadoopPath = new Path(path)
val fs = hadoopPath.getFileSystem(hadoopConf)
fs.makeQualified(hadoopPath).toUri
}
private def requireDbExists(db: String): Unit = {
if (!databaseExists(db)) {
throw new NoSuchDatabaseException(db)
}
}
private def requireTableExists(name: TableIdentifier): Unit = {
if (!tableExists(name)) {
val db = name.database.getOrElse(currentDb)
throw new NoSuchTableException(db = db, table = name.table)
}
}
private def requireTableNotExists(name: TableIdentifier): Unit = {
if (tableExists(name)) {
val db = name.database.getOrElse(currentDb)
throw new TableAlreadyExistsException(db = db, table = name.table)
}
}
// ----------------------------------------------------------------------------
// Databases
// ----------------------------------------------------------------------------
// All methods in this category interact directly with the underlying catalog.
// ----------------------------------------------------------------------------
def createDatabase(dbDefinition: CatalogDatabase, ignoreIfExists: Boolean): Unit = {
val dbName = formatDatabaseName(dbDefinition.name)
if (dbName == globalTempViewManager.database) {
throw new AnalysisException(
s"${globalTempViewManager.database} is a system preserved database, " +
"you cannot create a database with this name.")
}
validateName(dbName)
val qualifiedPath = makeQualifiedPath(dbDefinition.locationUri)
externalCatalog.createDatabase(
dbDefinition.copy(name = dbName, locationUri = qualifiedPath),
ignoreIfExists)
}
def dropDatabase(db: String, ignoreIfNotExists: Boolean, cascade: Boolean): Unit = {
val dbName = formatDatabaseName(db)
if (dbName == DEFAULT_DATABASE) {
throw new AnalysisException(s"Can not drop default database")
}
externalCatalog.dropDatabase(dbName, ignoreIfNotExists, cascade)
}
def alterDatabase(dbDefinition: CatalogDatabase): Unit = {
val dbName = formatDatabaseName(dbDefinition.name)
requireDbExists(dbName)
externalCatalog.alterDatabase(dbDefinition.copy(name = dbName))
}
def getDatabaseMetadata(db: String): CatalogDatabase = {
val dbName = formatDatabaseName(db)
requireDbExists(dbName)
externalCatalog.getDatabase(dbName)
}
def databaseExists(db: String): Boolean = {
val dbName = formatDatabaseName(db)
externalCatalog.databaseExists(dbName)
}
def listDatabases(): Seq[String] = {
externalCatalog.listDatabases()
}
def listDatabases(pattern: String): Seq[String] = {
externalCatalog.listDatabases(pattern)
}
def getCurrentDatabase: String = synchronized { currentDb }
def setCurrentDatabase(db: String): Unit = {
val dbName = formatDatabaseName(db)
if (dbName == globalTempViewManager.database) {
throw new AnalysisException(
s"${globalTempViewManager.database} is a system preserved database, " +
"you cannot use it as current database. To access global temporary views, you should " +
"use qualified name with the GLOBAL_TEMP_DATABASE, e.g. SELECT * FROM " +
s"${globalTempViewManager.database}.viewName.")
}
requireDbExists(dbName)
synchronized { currentDb = dbName }
}
/**
* Get the path for creating a non-default database when database location is not provided
* by users.
*/
def getDefaultDBPath(db: String): URI = {
val database = formatDatabaseName(db)
new Path(new Path(conf.warehousePath), database + ".db").toUri
}
// ----------------------------------------------------------------------------
// Tables
// ----------------------------------------------------------------------------
// There are two kinds of tables, temporary views and metastore tables.
// Temporary views are isolated across sessions and do not belong to any
// particular database. Metastore tables can be used across multiple
// sessions as their metadata is persisted in the underlying catalog.
// ----------------------------------------------------------------------------
// ----------------------------------------------------
// | Methods that interact with metastore tables only |
// ----------------------------------------------------
/**
* Create a metastore table in the database specified in `tableDefinition`.
* If no such database is specified, create it in the current database.
*/
def createTable(
tableDefinition: CatalogTable,
ignoreIfExists: Boolean,
validateLocation: Boolean = true): Unit = {
val db = formatDatabaseName(tableDefinition.identifier.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableDefinition.identifier.table)
val tableIdentifier = TableIdentifier(table, Some(db))
validateName(table)
val newTableDefinition = if (tableDefinition.storage.locationUri.isDefined
&& !tableDefinition.storage.locationUri.get.isAbsolute) {
// make the location of the table qualified.
val qualifiedTableLocation =
makeQualifiedPath(tableDefinition.storage.locationUri.get)
tableDefinition.copy(
storage = tableDefinition.storage.copy(locationUri = Some(qualifiedTableLocation)),
identifier = tableIdentifier)
} else {
tableDefinition.copy(identifier = tableIdentifier)
}
requireDbExists(db)
if (tableExists(newTableDefinition.identifier)) {
if (!ignoreIfExists) {
throw new TableAlreadyExistsException(db = db, table = table)
}
} else if (validateLocation) {
validateTableLocation(newTableDefinition)
}
externalCatalog.createTable(newTableDefinition, ignoreIfExists)
}
def validateTableLocation(table: CatalogTable): Unit = {
// SPARK-19724: the default location of a managed table should be non-existent or empty.
if (table.tableType == CatalogTableType.MANAGED &&
!conf.allowCreatingManagedTableUsingNonemptyLocation) {
val tableLocation =
new Path(table.storage.locationUri.getOrElse(defaultTablePath(table.identifier)))
val fs = tableLocation.getFileSystem(hadoopConf)
if (fs.exists(tableLocation) && fs.listStatus(tableLocation).nonEmpty) {
throw new AnalysisException(s"Can not create the managed table('${table.identifier}')" +
s". The associated location('${tableLocation.toString}') already exists.")
}
}
}
/**
* Alter the metadata of an existing metastore table identified by `tableDefinition`.
*
* If no database is specified in `tableDefinition`, assume the table is in the
* current database.
*
* Note: If the underlying implementation does not support altering a certain field,
* this becomes a no-op.
*/
def alterTable(tableDefinition: CatalogTable): Unit = {
val db = formatDatabaseName(tableDefinition.identifier.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableDefinition.identifier.table)
val tableIdentifier = TableIdentifier(table, Some(db))
val newTableDefinition = tableDefinition.copy(identifier = tableIdentifier)
requireDbExists(db)
requireTableExists(tableIdentifier)
externalCatalog.alterTable(newTableDefinition)
}
/**
* Alter the data schema of a table identified by the provided table identifier. The new data
* schema should not have conflict column names with the existing partition columns, and should
* still contain all the existing data columns.
*
* @param identifier TableIdentifier
* @param newDataSchema Updated data schema to be used for the table
*/
def alterTableDataSchema(
identifier: TableIdentifier,
newDataSchema: StructType): Unit = {
val db = formatDatabaseName(identifier.database.getOrElse(getCurrentDatabase))
val table = formatTableName(identifier.table)
val tableIdentifier = TableIdentifier(table, Some(db))
requireDbExists(db)
requireTableExists(tableIdentifier)
val catalogTable = externalCatalog.getTable(db, table)
val oldDataSchema = catalogTable.dataSchema
// not supporting dropping columns yet
val nonExistentColumnNames =
oldDataSchema.map(_.name).filterNot(columnNameResolved(newDataSchema, _))
if (nonExistentColumnNames.nonEmpty) {
throw new AnalysisException(
s"""
|Some existing schema fields (${nonExistentColumnNames.mkString("[", ",", "]")}) are
|not present in the new schema. We don't support dropping columns yet.
""".stripMargin)
}
externalCatalog.alterTableDataSchema(db, table, newDataSchema)
}
private def columnNameResolved(schema: StructType, colName: String): Boolean = {
schema.fields.map(_.name).exists(conf.resolver(_, colName))
}
/**
* Alter Spark's statistics of an existing metastore table identified by the provided table
* identifier.
*/
def alterTableStats(identifier: TableIdentifier, newStats: Option[CatalogStatistics]): Unit = {
val db = formatDatabaseName(identifier.database.getOrElse(getCurrentDatabase))
val table = formatTableName(identifier.table)
val tableIdentifier = TableIdentifier(table, Some(db))
requireDbExists(db)
requireTableExists(tableIdentifier)
externalCatalog.alterTableStats(db, table, newStats)
// Invalidate the table relation cache
refreshTable(identifier)
}
/**
* Return whether a table/view with the specified name exists. If no database is specified, check
* with current database.
*/
def tableExists(name: TableIdentifier): Boolean = synchronized {
val db = formatDatabaseName(name.database.getOrElse(currentDb))
val table = formatTableName(name.table)
externalCatalog.tableExists(db, table)
}
/**
* Retrieve the metadata of an existing permanent table/view. If no database is specified,
* assume the table/view is in the current database.
*/
@throws[NoSuchDatabaseException]
@throws[NoSuchTableException]
def getTableMetadata(name: TableIdentifier): CatalogTable = {
val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
val table = formatTableName(name.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Some(db)))
externalCatalog.getTable(db, table)
}
/**
* Load files stored in given path into an existing metastore table.
* If no database is specified, assume the table is in the current database.
* If the specified table is not found in the database then a [[NoSuchTableException]] is thrown.
*/
def loadTable(
name: TableIdentifier,
loadPath: String,
isOverwrite: Boolean,
isSrcLocal: Boolean): Unit = {
val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
val table = formatTableName(name.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Some(db)))
externalCatalog.loadTable(db, table, loadPath, isOverwrite, isSrcLocal)
}
/**
* Load files stored in given path into the partition of an existing metastore table.
* If no database is specified, assume the table is in the current database.
* If the specified table is not found in the database then a [[NoSuchTableException]] is thrown.
*/
def loadPartition(
name: TableIdentifier,
loadPath: String,
spec: TablePartitionSpec,
isOverwrite: Boolean,
inheritTableSpecs: Boolean,
isSrcLocal: Boolean): Unit = {
val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
val table = formatTableName(name.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Some(db)))
requireNonEmptyValueInPartitionSpec(Seq(spec))
externalCatalog.loadPartition(
db, table, loadPath, spec, isOverwrite, inheritTableSpecs, isSrcLocal)
}
def defaultTablePath(tableIdent: TableIdentifier): URI = {
val dbName = formatDatabaseName(tableIdent.database.getOrElse(getCurrentDatabase))
val dbLocation = getDatabaseMetadata(dbName).locationUri
new Path(new Path(dbLocation), formatTableName(tableIdent.table)).toUri
}
// ----------------------------------------------
// | Methods that interact with temp views only |
// ----------------------------------------------
/**
* Create a local temporary view.
*/
def createTempView(
name: String,
tableDefinition: LogicalPlan,
overrideIfExists: Boolean): Unit = synchronized {
val table = formatTableName(name)
if (tempViews.contains(table) && !overrideIfExists) {
throw new TempTableAlreadyExistsException(name)
}
tempViews.put(table, tableDefinition)
}
/**
* Create a global temporary view.
*/
def createGlobalTempView(
name: String,
viewDefinition: LogicalPlan,
overrideIfExists: Boolean): Unit = {
globalTempViewManager.create(formatTableName(name), viewDefinition, overrideIfExists)
}
/**
* Alter the definition of a local/global temp view matching the given name, returns true if a
* temp view is matched and altered, false otherwise.
*/
def alterTempViewDefinition(
name: TableIdentifier,
viewDefinition: LogicalPlan): Boolean = synchronized {
val viewName = formatTableName(name.table)
if (name.database.isEmpty) {
if (tempViews.contains(viewName)) {
createTempView(viewName, viewDefinition, overrideIfExists = true)
true
} else {
false
}
} else if (formatDatabaseName(name.database.get) == globalTempViewManager.database) {
globalTempViewManager.update(viewName, viewDefinition)
} else {
false
}
}
/**
* Return a local temporary view exactly as it was stored.
*/
def getTempView(name: String): Option[LogicalPlan] = synchronized {
tempViews.get(formatTableName(name))
}
/**
* Return a global temporary view exactly as it was stored.
*/
def getGlobalTempView(name: String): Option[LogicalPlan] = {
globalTempViewManager.get(formatTableName(name))
}
/**
* Drop a local temporary view.
*
* Returns true if this view is dropped successfully, false otherwise.
*/
def dropTempView(name: String): Boolean = synchronized {
tempViews.remove(formatTableName(name)).isDefined
}
/**
* Drop a global temporary view.
*
* Returns true if this view is dropped successfully, false otherwise.
*/
def dropGlobalTempView(name: String): Boolean = {
globalTempViewManager.remove(formatTableName(name))
}
// -------------------------------------------------------------
// | Methods that interact with temporary and metastore tables |
// -------------------------------------------------------------
/**
* Retrieve the metadata of an existing temporary view or permanent table/view.
*
* If a database is specified in `name`, this will return the metadata of table/view in that
* database.
* If no database is specified, this will first attempt to get the metadata of a temporary view
* with the same name, then, if that does not exist, return the metadata of table/view in the
* current database.
*/
def getTempViewOrPermanentTableMetadata(name: TableIdentifier): CatalogTable = synchronized {
val table = formatTableName(name.table)
if (name.database.isEmpty) {
getTempView(table).map { plan =>
CatalogTable(
identifier = TableIdentifier(table),
tableType = CatalogTableType.VIEW,
storage = CatalogStorageFormat.empty,
schema = plan.output.toStructType)
}.getOrElse(getTableMetadata(name))
} else if (formatDatabaseName(name.database.get) == globalTempViewManager.database) {
globalTempViewManager.get(table).map { plan =>
CatalogTable(
identifier = TableIdentifier(table, Some(globalTempViewManager.database)),
tableType = CatalogTableType.VIEW,
storage = CatalogStorageFormat.empty,
schema = plan.output.toStructType)
}.getOrElse(throw new NoSuchTableException(globalTempViewManager.database, table))
} else {
getTableMetadata(name)
}
}
/**
* Rename a table.
*
* If a database is specified in `oldName`, this will rename the table in that database.
* If no database is specified, this will first attempt to rename a temporary view with
* the same name, then, if that does not exist, rename the table in the current database.
*
* This assumes the database specified in `newName` matches the one in `oldName`.
*/
def renameTable(oldName: TableIdentifier, newName: TableIdentifier): Unit = synchronized {
val db = formatDatabaseName(oldName.database.getOrElse(currentDb))
newName.database.map(formatDatabaseName).foreach { newDb =>
if (db != newDb) {
throw new AnalysisException(
s"RENAME TABLE source and destination databases do not match: '$db' != '$newDb'")
}
}
val oldTableName = formatTableName(oldName.table)
val newTableName = formatTableName(newName.table)
if (db == globalTempViewManager.database) {
globalTempViewManager.rename(oldTableName, newTableName)
} else {
requireDbExists(db)
if (oldName.database.isDefined || !tempViews.contains(oldTableName)) {
requireTableExists(TableIdentifier(oldTableName, Some(db)))
requireTableNotExists(TableIdentifier(newTableName, Some(db)))
validateName(newTableName)
validateNewLocationOfRename(oldName, newName)
externalCatalog.renameTable(db, oldTableName, newTableName)
} else {
if (newName.database.isDefined) {
throw new AnalysisException(
s"RENAME TEMPORARY VIEW from '$oldName' to '$newName': cannot specify database " +
s"name '${newName.database.get}' in the destination table")
}
if (tempViews.contains(newTableName)) {
throw new AnalysisException(s"RENAME TEMPORARY VIEW from '$oldName' to '$newName': " +
"destination table already exists")
}
val table = tempViews(oldTableName)
tempViews.remove(oldTableName)
tempViews.put(newTableName, table)
}
}
}
/**
* Drop a table.
*
* If a database is specified in `name`, this will drop the table from that database.
* If no database is specified, this will first attempt to drop a temporary view with
* the same name, then, if that does not exist, drop the table from the current database.
*/
def dropTable(
name: TableIdentifier,
ignoreIfNotExists: Boolean,
purge: Boolean): Unit = synchronized {
val db = formatDatabaseName(name.database.getOrElse(currentDb))
val table = formatTableName(name.table)
if (db == globalTempViewManager.database) {
val viewExists = globalTempViewManager.remove(table)
if (!viewExists && !ignoreIfNotExists) {
throw new NoSuchTableException(globalTempViewManager.database, table)
}
} else {
if (name.database.isDefined || !tempViews.contains(table)) {
requireDbExists(db)
// When ignoreIfNotExists is false, no exception is issued when the table does not exist.
// Instead, log it as an error message.
if (tableExists(TableIdentifier(table, Option(db)))) {
externalCatalog.dropTable(db, table, ignoreIfNotExists = true, purge = purge)
} else if (!ignoreIfNotExists) {
throw new NoSuchTableException(db = db, table = table)
}
} else {
tempViews.remove(table)
}
}
}
/**
* Return a [[LogicalPlan]] that represents the given table or view.
*
* If a database is specified in `name`, this will return the table/view from that database.
* If no database is specified, this will first attempt to return a temporary view with
* the same name, then, if that does not exist, return the table/view from the current database.
*
* Note that, the global temp view database is also valid here, this will return the global temp
* view matching the given name.
*
* If the relation is a view, we generate a [[View]] operator from the view description, and
* wrap the logical plan in a [[SubqueryAlias]] which will track the name of the view.
* [[SubqueryAlias]] will also keep track of the name and database(optional) of the table/view
*
* @param name The name of the table/view that we look up.
*/
def lookupRelation(name: TableIdentifier): LogicalPlan = {
synchronized {
val db = formatDatabaseName(name.database.getOrElse(currentDb))
val table = formatTableName(name.table)
if (db == globalTempViewManager.database) {
globalTempViewManager.get(table).map { viewDef =>
SubqueryAlias(table, db, viewDef)
}.getOrElse(throw new NoSuchTableException(db, table))
} else if (name.database.isDefined || !tempViews.contains(table)) {
val metadata = externalCatalog.getTable(db, table)
if (metadata.tableType == CatalogTableType.VIEW) {
val viewText = metadata.viewText.getOrElse(sys.error("Invalid view without text."))
// The relation is a view, so we wrap the relation by:
// 1. Add a [[View]] operator over the relation to keep track of the view desc;
// 2. Wrap the logical plan in a [[SubqueryAlias]] which tracks the name of the view.
val child = View(
desc = metadata,
output = metadata.schema.toAttributes,
child = parser.parsePlan(viewText))
SubqueryAlias(table, db, child)
} else {
SubqueryAlias(table, db, UnresolvedCatalogRelation(metadata))
}
} else {
SubqueryAlias(table, tempViews(table))
}
}
}
/**
* Return whether a table with the specified name is a temporary view.
*
* Note: The temporary view cache is checked only when database is not
* explicitly specified.
*/
def isTemporaryTable(name: TableIdentifier): Boolean = synchronized {
val table = formatTableName(name.table)
if (name.database.isEmpty) {
tempViews.contains(table)
} else if (formatDatabaseName(name.database.get) == globalTempViewManager.database) {
globalTempViewManager.get(table).isDefined
} else {
false
}
}
/**
* List all tables in the specified database, including local temporary views.
*
* Note that, if the specified database is global temporary view database, we will list global
* temporary views.
*/
def listTables(db: String): Seq[TableIdentifier] = listTables(db, "*")
/**
* List all matching tables in the specified database, including local temporary views.
*
* Note that, if the specified database is global temporary view database, we will list global
* temporary views.
*/
def listTables(db: String, pattern: String): Seq[TableIdentifier] = {
val dbName = formatDatabaseName(db)
val dbTables = if (dbName == globalTempViewManager.database) {
globalTempViewManager.listViewNames(pattern).map { name =>
TableIdentifier(name, Some(globalTempViewManager.database))
}
} else {
requireDbExists(dbName)
externalCatalog.listTables(dbName, pattern).map { name =>
TableIdentifier(name, Some(dbName))
}
}
val localTempViews = synchronized {
StringUtils.filterPattern(tempViews.keys.toSeq, pattern).map { name =>
TableIdentifier(name)
}
}
dbTables ++ localTempViews
}
/**
* Refresh the cache entry for a metastore table, if any.
*/
def refreshTable(name: TableIdentifier): Unit = synchronized {
val dbName = formatDatabaseName(name.database.getOrElse(currentDb))
val tableName = formatTableName(name.table)
// Go through temporary views and invalidate them.
// If the database is defined, this may be a global temporary view.
// If the database is not defined, there is a good chance this is a temp view.
if (name.database.isEmpty) {
tempViews.get(tableName).foreach(_.refresh())
} else if (dbName == globalTempViewManager.database) {
globalTempViewManager.get(tableName).foreach(_.refresh())
}
// Also invalidate the table relation cache.
val qualifiedTableName = QualifiedTableName(dbName, tableName)
tableRelationCache.invalidate(qualifiedTableName)
}
/**
* Drop all existing temporary views.
* For testing only.
*/
def clearTempTables(): Unit = synchronized {
tempViews.clear()
}
// ----------------------------------------------------------------------------
// Partitions
// ----------------------------------------------------------------------------
// All methods in this category interact directly with the underlying catalog.
// These methods are concerned with only metastore tables.
// ----------------------------------------------------------------------------
// TODO: We need to figure out how these methods interact with our data source
// tables. For such tables, we do not store values of partitioning columns in
// the metastore. For now, partition values of a data source table will be
// automatically discovered when we load the table.
/**
* Create partitions in an existing table, assuming it exists.
* If no database is specified, assume the table is in the current database.
*/
def createPartitions(
tableName: TableIdentifier,
parts: Seq[CatalogTablePartition],
ignoreIfExists: Boolean): Unit = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
requireExactMatchedPartitionSpec(parts.map(_.spec), getTableMetadata(tableName))
requireNonEmptyValueInPartitionSpec(parts.map(_.spec))
externalCatalog.createPartitions(db, table, parts, ignoreIfExists)
}
/**
* Drop partitions from a table, assuming they exist.
* If no database is specified, assume the table is in the current database.
*/
def dropPartitions(
tableName: TableIdentifier,
specs: Seq[TablePartitionSpec],
ignoreIfNotExists: Boolean,
purge: Boolean,
retainData: Boolean): Unit = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
requirePartialMatchedPartitionSpec(specs, getTableMetadata(tableName))
requireNonEmptyValueInPartitionSpec(specs)
externalCatalog.dropPartitions(db, table, specs, ignoreIfNotExists, purge, retainData)
}
/**
* Override the specs of one or many existing table partitions, assuming they exist.
*
* This assumes index i of `specs` corresponds to index i of `newSpecs`.
* If no database is specified, assume the table is in the current database.
*/
def renamePartitions(
tableName: TableIdentifier,
specs: Seq[TablePartitionSpec],
newSpecs: Seq[TablePartitionSpec]): Unit = {
val tableMetadata = getTableMetadata(tableName)
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
requireExactMatchedPartitionSpec(specs, tableMetadata)
requireExactMatchedPartitionSpec(newSpecs, tableMetadata)
requireNonEmptyValueInPartitionSpec(specs)
requireNonEmptyValueInPartitionSpec(newSpecs)
externalCatalog.renamePartitions(db, table, specs, newSpecs)
}
/**
* Alter one or many table partitions whose specs that match those specified in `parts`,
* assuming the partitions exist.
*
* If no database is specified, assume the table is in the current database.
*
* Note: If the underlying implementation does not support altering a certain field,
* this becomes a no-op.
*/
def alterPartitions(tableName: TableIdentifier, parts: Seq[CatalogTablePartition]): Unit = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
requireExactMatchedPartitionSpec(parts.map(_.spec), getTableMetadata(tableName))
requireNonEmptyValueInPartitionSpec(parts.map(_.spec))
externalCatalog.alterPartitions(db, table, parts)
}
/**
* Retrieve the metadata of a table partition, assuming it exists.
* If no database is specified, assume the table is in the current database.
*/
def getPartition(tableName: TableIdentifier, spec: TablePartitionSpec): CatalogTablePartition = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
requireExactMatchedPartitionSpec(Seq(spec), getTableMetadata(tableName))
requireNonEmptyValueInPartitionSpec(Seq(spec))
externalCatalog.getPartition(db, table, spec)
}
/**
* List the names of all partitions that belong to the specified table, assuming it exists.
*
* A partial partition spec may optionally be provided to filter the partitions returned.
* For instance, if there exist partitions (a='1', b='2'), (a='1', b='3') and (a='2', b='4'),
* then a partial spec of (a='1') will return the first two only.
*/
def listPartitionNames(
tableName: TableIdentifier,
partialSpec: Option[TablePartitionSpec] = None): Seq[String] = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
partialSpec.foreach { spec =>
requirePartialMatchedPartitionSpec(Seq(spec), getTableMetadata(tableName))
requireNonEmptyValueInPartitionSpec(Seq(spec))
}
externalCatalog.listPartitionNames(db, table, partialSpec)
}
/**
* List the metadata of all partitions that belong to the specified table, assuming it exists.
*
* A partial partition spec may optionally be provided to filter the partitions returned.
* For instance, if there exist partitions (a='1', b='2'), (a='1', b='3') and (a='2', b='4'),
* then a partial spec of (a='1') will return the first two only.
*/
def listPartitions(
tableName: TableIdentifier,
partialSpec: Option[TablePartitionSpec] = None): Seq[CatalogTablePartition] = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
partialSpec.foreach { spec =>
requirePartialMatchedPartitionSpec(Seq(spec), getTableMetadata(tableName))
requireNonEmptyValueInPartitionSpec(Seq(spec))
}
externalCatalog.listPartitions(db, table, partialSpec)
}
/**
* List the metadata of partitions that belong to the specified table, assuming it exists, that
* satisfy the given partition-pruning predicate expressions.
*/
def listPartitionsByFilter(
tableName: TableIdentifier,
predicates: Seq[Expression]): Seq[CatalogTablePartition] = {
val db = formatDatabaseName(tableName.database.getOrElse(getCurrentDatabase))
val table = formatTableName(tableName.table)
requireDbExists(db)
requireTableExists(TableIdentifier(table, Option(db)))
externalCatalog.listPartitionsByFilter(db, table, predicates, conf.sessionLocalTimeZone)
}
/**
* Verify if the input partition spec has any empty value.
*/
private def requireNonEmptyValueInPartitionSpec(specs: Seq[TablePartitionSpec]): Unit = {
specs.foreach { s =>
if (s.values.exists(_.isEmpty)) {
val spec = s.map(p => p._1 + "=" + p._2).mkString("[", ", ", "]")
throw new AnalysisException(
s"Partition spec is invalid. The spec ($spec) contains an empty partition column value")
}
}
}
/**
* Verify if the input partition spec exactly matches the existing defined partition spec
* The columns must be the same but the orders could be different.
*/
private def requireExactMatchedPartitionSpec(
specs: Seq[TablePartitionSpec],
table: CatalogTable): Unit = {
val defined = table.partitionColumnNames.sorted
specs.foreach { s =>
if (s.keys.toSeq.sorted != defined) {
throw new AnalysisException(
s"Partition spec is invalid. The spec (${s.keys.mkString(", ")}) must match " +
s"the partition spec (${table.partitionColumnNames.mkString(", ")}) defined in " +
s"table '${table.identifier}'")
}
}
}
/**
* Verify if the input partition spec partially matches the existing defined partition spec
* That is, the columns of partition spec should be part of the defined partition spec.
*/
private def requirePartialMatchedPartitionSpec(
specs: Seq[TablePartitionSpec],
table: CatalogTable): Unit = {
val defined = table.partitionColumnNames
specs.foreach { s =>
if (!s.keys.forall(defined.contains)) {
throw new AnalysisException(
s"Partition spec is invalid. The spec (${s.keys.mkString(", ")}) must be contained " +
s"within the partition spec (${table.partitionColumnNames.mkString(", ")}) defined " +
s"in table '${table.identifier}'")
}
}
}
// ----------------------------------------------------------------------------
// Functions
// ----------------------------------------------------------------------------
// There are two kinds of functions, temporary functions and metastore
// functions (permanent UDFs). Temporary functions are isolated across
// sessions. Metastore functions can be used across multiple sessions as
// their metadata is persisted in the underlying catalog.
// ----------------------------------------------------------------------------
// -------------------------------------------------------
// | Methods that interact with metastore functions only |
// -------------------------------------------------------
/**
* Create a function in the database specified in `funcDefinition`.
* If no such database is specified, create it in the current database.
*
* @param ignoreIfExists: When true, ignore if the function with the specified name exists
* in the specified database.
*/
def createFunction(funcDefinition: CatalogFunction, ignoreIfExists: Boolean): Unit = {
val db = formatDatabaseName(funcDefinition.identifier.database.getOrElse(getCurrentDatabase))
requireDbExists(db)
val identifier = FunctionIdentifier(funcDefinition.identifier.funcName, Some(db))
val newFuncDefinition = funcDefinition.copy(identifier = identifier)
if (!functionExists(identifier)) {
externalCatalog.createFunction(db, newFuncDefinition)
} else if (!ignoreIfExists) {
throw new FunctionAlreadyExistsException(db = db, func = identifier.toString)
}
}
/**
* Drop a metastore function.
* If no database is specified, assume the function is in the current database.
*/
def dropFunction(name: FunctionIdentifier, ignoreIfNotExists: Boolean): Unit = {
val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
requireDbExists(db)
val identifier = name.copy(database = Some(db))
if (functionExists(identifier)) {
if (functionRegistry.functionExists(identifier)) {
// If we have loaded this function into the FunctionRegistry,
// also drop it from there.
// For a permanent function, because we loaded it to the FunctionRegistry
// when it's first used, we also need to drop it from the FunctionRegistry.
functionRegistry.dropFunction(identifier)
}
externalCatalog.dropFunction(db, name.funcName)
} else if (!ignoreIfNotExists) {
throw new NoSuchFunctionException(db = db, func = identifier.toString)
}
}
/**
* overwrite a metastore function in the database specified in `funcDefinition`..
* If no database is specified, assume the function is in the current database.
*/
def alterFunction(funcDefinition: CatalogFunction): Unit = {
val db = formatDatabaseName(funcDefinition.identifier.database.getOrElse(getCurrentDatabase))
requireDbExists(db)
val identifier = FunctionIdentifier(funcDefinition.identifier.funcName, Some(db))
val newFuncDefinition = funcDefinition.copy(identifier = identifier)
if (functionExists(identifier)) {
if (functionRegistry.functionExists(identifier)) {
// If we have loaded this function into the FunctionRegistry,
// also drop it from there.
// For a permanent function, because we loaded it to the FunctionRegistry
// when it's first used, we also need to drop it from the FunctionRegistry.
functionRegistry.dropFunction(identifier)
}
externalCatalog.alterFunction(db, newFuncDefinition)
} else {
throw new NoSuchFunctionException(db = db, func = identifier.toString)
}
}
/**
* Retrieve the metadata of a metastore function.
*
* If a database is specified in `name`, this will return the function in that database.
* If no database is specified, this will return the function in the current database.
*/
def getFunctionMetadata(name: FunctionIdentifier): CatalogFunction = {
val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
requireDbExists(db)
externalCatalog.getFunction(db, name.funcName)
}
/**
* Check if the function with the specified name exists
*/
def functionExists(name: FunctionIdentifier): Boolean = {
val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
requireDbExists(db)
functionRegistry.functionExists(name) ||
externalCatalog.functionExists(db, name.funcName)
}
// ----------------------------------------------------------------
// | Methods that interact with temporary and metastore functions |
// ----------------------------------------------------------------
/**
* Constructs a [[FunctionBuilder]] based on the provided class that represents a function.
*/
private def makeFunctionBuilder(name: String, functionClassName: String): FunctionBuilder = {
val clazz = Utils.classForName(functionClassName)
(input: Seq[Expression]) => makeFunctionExpression(name, clazz, input)
}
/**
* Constructs a [[Expression]] based on the provided class that represents a function.
*
* This performs reflection to decide what type of [[Expression]] to return in the builder.
*/
protected def makeFunctionExpression(
name: String,
clazz: Class[_],
input: Seq[Expression]): Expression = {
// Unfortunately we need to use reflection here because UserDefinedAggregateFunction
// and ScalaUDAF are defined in sql/core module.
val clsForUDAF =
Utils.classForName("org.apache.spark.sql.expressions.UserDefinedAggregateFunction")
if (clsForUDAF.isAssignableFrom(clazz)) {
val cls = Utils.classForName("org.apache.spark.sql.execution.aggregate.ScalaUDAF")
val e = cls.getConstructor(classOf[Seq[Expression]], clsForUDAF, classOf[Int], classOf[Int])
.newInstance(input, clazz.newInstance().asInstanceOf[Object], Int.box(1), Int.box(1))
.asInstanceOf[ImplicitCastInputTypes]
// Check input argument size
if (e.inputTypes.size != input.size) {
throw new AnalysisException(s"Invalid number of arguments for function $name. " +
s"Expected: ${e.inputTypes.size}; Found: ${input.size}")
}
e
} else {
throw new AnalysisException(s"No handler for UDAF '${clazz.getCanonicalName}'. " +
s"Use sparkSession.udf.register(...) instead.")
}
}
/**
* Loads resources such as JARs and Files for a function. Every resource is represented
* by a tuple (resource type, resource uri).
*/
def loadFunctionResources(resources: Seq[FunctionResource]): Unit = {
resources.foreach(functionResourceLoader.loadResource)
}
/**
* Registers a temporary or permanent function into a session-specific [[FunctionRegistry]]
*/
def registerFunction(
funcDefinition: CatalogFunction,
overrideIfExists: Boolean,
functionBuilder: Option[FunctionBuilder] = None): Unit = {
val func = funcDefinition.identifier
if (functionRegistry.functionExists(func) && !overrideIfExists) {
throw new AnalysisException(s"Function $func already exists")
}
val info = new ExpressionInfo(funcDefinition.className, func.database.orNull, func.funcName)
val builder =
functionBuilder.getOrElse {
val className = funcDefinition.className
if (!Utils.classIsLoadable(className)) {
throw new AnalysisException(s"Can not load class '$className' when registering " +
s"the function '$func', please make sure it is on the classpath")
}
makeFunctionBuilder(func.unquotedString, className)
}
functionRegistry.registerFunction(func, info, builder)
}
/**
* Drop a temporary function.
*/
def dropTempFunction(name: String, ignoreIfNotExists: Boolean): Unit = {
if (!functionRegistry.dropFunction(FunctionIdentifier(name)) && !ignoreIfNotExists) {
throw new NoSuchTempFunctionException(name)
}
}
/**
* Returns whether it is a temporary function. If not existed, returns false.
*/
def isTemporaryFunction(name: FunctionIdentifier): Boolean = {
// copied from HiveSessionCatalog
val hiveFunctions = Seq("histogram_numeric")
// A temporary function is a function that has been registered in functionRegistry
// without a database name, and is neither a built-in function nor a Hive function
name.database.isEmpty &&
functionRegistry.functionExists(name) &&
!FunctionRegistry.builtin.functionExists(name) &&
!hiveFunctions.contains(name.funcName.toLowerCase(Locale.ROOT))
}
/**
* Return whether this function has been registered in the function registry of the current
* session. If not existed, return false.
*/
def isRegisteredFunction(name: FunctionIdentifier): Boolean = {
functionRegistry.functionExists(name)
}
/**
* Returns whether it is a persistent function. If not existed, returns false.
*/
def isPersistentFunction(name: FunctionIdentifier): Boolean = {
val db = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
databaseExists(db) && externalCatalog.functionExists(db, name.funcName)
}
protected def failFunctionLookup(name: FunctionIdentifier): Nothing = {
throw new NoSuchFunctionException(
db = name.database.getOrElse(getCurrentDatabase), func = name.funcName)
}
/**
* Look up the [[ExpressionInfo]] associated with the specified function, assuming it exists.
*/
def lookupFunctionInfo(name: FunctionIdentifier): ExpressionInfo = synchronized {
// TODO: just make function registry take in FunctionIdentifier instead of duplicating this
val database = name.database.orElse(Some(currentDb)).map(formatDatabaseName)
val qualifiedName = name.copy(database = database)
functionRegistry.lookupFunction(name)
.orElse(functionRegistry.lookupFunction(qualifiedName))
.getOrElse {
val db = qualifiedName.database.get
requireDbExists(db)
if (externalCatalog.functionExists(db, name.funcName)) {
val metadata = externalCatalog.getFunction(db, name.funcName)
new ExpressionInfo(
metadata.className,
qualifiedName.database.orNull,
qualifiedName.identifier)
} else {
failFunctionLookup(name)
}
}
}
/**
* Return an [[Expression]] that represents the specified function, assuming it exists.
*
* For a temporary function or a permanent function that has been loaded,
* this method will simply lookup the function through the
* FunctionRegistry and create an expression based on the builder.
*
* For a permanent function that has not been loaded, we will first fetch its metadata
* from the underlying external catalog. Then, we will load all resources associated
* with this function (i.e. jars and files). Finally, we create a function builder
* based on the function class and put the builder into the FunctionRegistry.
* The name of this function in the FunctionRegistry will be `databaseName.functionName`.
*/
def lookupFunction(
name: FunctionIdentifier,
children: Seq[Expression]): Expression = synchronized {
// Note: the implementation of this function is a little bit convoluted.
// We probably shouldn't use a single FunctionRegistry to register all three kinds of functions
// (built-in, temp, and external).
if (name.database.isEmpty && functionRegistry.functionExists(name)) {
// This function has been already loaded into the function registry.
return functionRegistry.lookupFunction(name, children)
}
// If the name itself is not qualified, add the current database to it.
val database = formatDatabaseName(name.database.getOrElse(getCurrentDatabase))
val qualifiedName = name.copy(database = Some(database))
if (functionRegistry.functionExists(qualifiedName)) {
// This function has been already loaded into the function registry.
// Unlike the above block, we find this function by using the qualified name.
return functionRegistry.lookupFunction(qualifiedName, children)
}
// The function has not been loaded to the function registry, which means
// that the function is a permanent function (if it actually has been registered
// in the metastore). We need to first put the function in the FunctionRegistry.
// TODO: why not just check whether the function exists first?
val catalogFunction = try {
externalCatalog.getFunction(database, name.funcName)
} catch {
case _: AnalysisException => failFunctionLookup(name)
case _: NoSuchPermanentFunctionException => failFunctionLookup(name)
}
loadFunctionResources(catalogFunction.resources)
// Please note that qualifiedName is provided by the user. However,
// catalogFunction.identifier.unquotedString is returned by the underlying
// catalog. So, it is possible that qualifiedName is not exactly the same as
// catalogFunction.identifier.unquotedString (difference is on case-sensitivity).
// At here, we preserve the input from the user.
registerFunction(catalogFunction.copy(identifier = qualifiedName), overrideIfExists = false)
// Now, we need to create the Expression.
functionRegistry.lookupFunction(qualifiedName, children)
}
/**
* List all functions in the specified database, including temporary functions. This
* returns the function identifier and the scope in which it was defined (system or user
* defined).
*/
def listFunctions(db: String): Seq[(FunctionIdentifier, String)] = listFunctions(db, "*")
/**
* List all matching functions in the specified database, including temporary functions. This
* returns the function identifier and the scope in which it was defined (system or user
* defined).
*/
def listFunctions(db: String, pattern: String): Seq[(FunctionIdentifier, String)] = {
val dbName = formatDatabaseName(db)
requireDbExists(dbName)
val dbFunctions = externalCatalog.listFunctions(dbName, pattern).map { f =>
FunctionIdentifier(f, Some(dbName)) }
val loadedFunctions = StringUtils
.filterPattern(functionRegistry.listFunction().map(_.unquotedString), pattern).map { f =>
// In functionRegistry, function names are stored as an unquoted format.
Try(parser.parseFunctionIdentifier(f)) match {
case Success(e) => e
case Failure(_) =>
// The names of some built-in functions are not parsable by our parser, e.g., %
FunctionIdentifier(f)
}
}
val functions = dbFunctions ++ loadedFunctions
// The session catalog caches some persistent functions in the FunctionRegistry
// so there can be duplicates.
functions.map {
case f if FunctionRegistry.functionSet.contains(f) => (f, "SYSTEM")
case f => (f, "USER")
}.distinct
}
// -----------------
// | Other methods |
// -----------------
/**
* Drop all existing databases (except "default"), tables, partitions and functions,
* and set the current database to "default".
*
* This is mainly used for tests.
*/
def reset(): Unit = synchronized {
setCurrentDatabase(DEFAULT_DATABASE)
externalCatalog.setCurrentDatabase(DEFAULT_DATABASE)
listDatabases().filter(_ != DEFAULT_DATABASE).foreach { db =>
dropDatabase(db, ignoreIfNotExists = false, cascade = true)
}
listTables(DEFAULT_DATABASE).foreach { table =>
dropTable(table, ignoreIfNotExists = false, purge = false)
}
listFunctions(DEFAULT_DATABASE).map(_._1).foreach { func =>
if (func.database.isDefined) {
dropFunction(func, ignoreIfNotExists = false)
} else {
dropTempFunction(func.funcName, ignoreIfNotExists = false)
}
}
clearTempTables()
globalTempViewManager.clear()
functionRegistry.clear()
tableRelationCache.invalidateAll()
// restore built-in functions
FunctionRegistry.builtin.listFunction().foreach { f =>
val expressionInfo = FunctionRegistry.builtin.lookupFunction(f)
val functionBuilder = FunctionRegistry.builtin.lookupFunctionBuilder(f)
require(expressionInfo.isDefined, s"built-in function '$f' is missing expression info")
require(functionBuilder.isDefined, s"built-in function '$f' is missing function builder")
functionRegistry.registerFunction(f, expressionInfo.get, functionBuilder.get)
}
}
/**
* Copy the current state of the catalog to another catalog.
*
* This function is synchronized on this [[SessionCatalog]] (the source) to make sure the copied
* state is consistent. The target [[SessionCatalog]] is not synchronized, and should not be
* because the target [[SessionCatalog]] should not be published at this point. The caller must
* synchronize on the target if this assumption does not hold.
*/
private[sql] def copyStateTo(target: SessionCatalog): Unit = synchronized {
target.currentDb = currentDb
// copy over temporary views
tempViews.foreach(kv => target.tempViews.put(kv._1, kv._2))
}
/**
* Validate the new locatoin before renaming a managed table, which should be non-existent.
*/
private def validateNewLocationOfRename(
oldName: TableIdentifier,
newName: TableIdentifier): Unit = {
val oldTable = getTableMetadata(oldName)
if (oldTable.tableType == CatalogTableType.MANAGED) {
val databaseLocation =
externalCatalog.getDatabase(oldName.database.getOrElse(currentDb)).locationUri
val newTableLocation = new Path(new Path(databaseLocation), formatTableName(newName.table))
val fs = newTableLocation.getFileSystem(hadoopConf)
if (fs.exists(newTableLocation)) {
throw new AnalysisException(s"Can not rename the managed table('$oldName')" +
s". The associated location('$newTableLocation') already exists.")
}
}
}
}
|
rikima/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala
|
Scala
|
apache-2.0
| 57,961 |
package org.scalawiki.dto.cmd.email
import org.scalawiki.dto.cmd._
case class EmailUser(override val params: EmailParam[Any]*)
extends EnumArgument[ActionArg]("emailuser", "Email a user.")
with ActionArg
with ArgWithParams[EmailParam[Any], ActionArg]
trait EmailParam[+T] extends Parameter[T]
case class Target(override val arg: String) extends StringParameter("target", "User to send email to.") with EmailParam[String]
case class Subject(override val arg: String) extends StringParameter("subject", "The subject of the message.") with EmailParam[String]
case class Text(override val arg: String) extends StringParameter("text", "The message.") with EmailParam[String]
case class Token(override val arg: String) extends StringParameter("token", "The subject of the message.") with EmailParam[String]
case class CcMe(override val arg: Boolean) extends BooleanParameter("ccme", "If set, a copy of the email will be sent to you.") with EmailParam[Boolean]
|
intracer/scalawiki
|
scalawiki-core/src/main/scala/org/scalawiki/dto/cmd/email/EmailUser.scala
|
Scala
|
apache-2.0
| 975 |
/*******************************************************************************
* Copyright (c) 2016 Logimethods
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the MIT License (MIT)
* which accompanies this distribution, and is available at
* http://opensource.org/licenses/MIT
*******************************************************************************/
// @see https://www.trivento.io/write-custom-protocol-for-gatling/
package com.logimethods.connector.gatling.to_nats
import akka.actor.{ ActorSystem, Props }
import io.gatling.commons.stats.{ KO, OK }
import io.gatling.core.CoreComponents
import io.gatling.commons.stats.{ KO, OK }
import io.gatling.core.CoreComponents
import io.gatling.core.action.builder.ActionBuilder
import io.gatling.core.action.{ Action, ActionActor, ExitableActorDelegatingAction }
import io.gatling.core.config.GatlingConfiguration
import io.gatling.core.protocol._
import io.gatling.core.session.Session
import io.gatling.core.stats.StatsEngine
import io.gatling.core.stats.message.ResponseTimings
import io.gatling.core.structure.ScenarioContext
///- import io.gatling.jms.action.RequestReply._
import io.gatling.commons.util.Clock
import io.gatling.core.util.NameGen
import scala.concurrent.{ Future, Promise }
import java.util.Properties;
import io.nats.client.Connection;
import io.nats.client.Nats;
import io.nats.client.Options
import io.nats.client.Message;
import io.gatling.core.protocol.ProtocolKey;
import com.typesafe.scalalogging.StrictLogging
object NatsProtocol {
// @See http://leaks.wanari.com/2017/02/10/write-custom-protocol-gatling/
val NatsProtocolKey = new ProtocolKey[NatsProtocol, NatsComponents] {
///- type Protocol = NatsProtocol
///- type Components = NatsComponents
def protocolClass: Class[io.gatling.core.protocol.Protocol] = classOf[NatsProtocol].asInstanceOf[Class[io.gatling.core.protocol.Protocol]]
def defaultProtocolValue(configuration: GatlingConfiguration): NatsProtocol = throw new IllegalStateException("Can't provide a default value for NatsProtocol")
def newComponents(coreComponents: CoreComponents): NatsProtocol ⇒ NatsComponents = {
natsProtocol ⇒ NatsComponents(natsProtocol)
}
}
}
/** A Gatling Protocol to inject messages into NATS.
*
* @see [[https://www.trivento.io/write-custom-protocol-for-gatling/ Write a Custom Protocol for Gatling]]
* @see [[https://github.com/nats-io/jnats/blob/jnats-0.4.1/src/main/java/io/nats/client/ConnectionFactory.java ConnectionFactory.java]]
* @see [[http://nats-io.github.io/jnats/io/nats/client/ConnectionFactory.html ConnectionFactory API]]
*
* @constructor create a new Protocol defined by connection to a NATS server and a subject.
* @param properties defining the parameters of NATS server to connect to. This connection is provided by a `new ConnectionFactory(properties)`
* @param subject the subject on which the messages will be pushed to NATS
*/
case class NatsProtocol(properties: Properties, subject: String, serializer: Object => Array[Byte] = (_.toString().getBytes()) )
extends Protocol with StrictLogging {
val connection = Nats.connect(new Options.Builder(properties).build())
logger.info(s"Connection to the NATS Server defined by '${properties}' with '$subject' Subject")
}
case class NatsComponents(natsProtocol: NatsProtocol) extends ProtocolComponents {
override def onStart: Session => Session = ProtocolComponents.NoopOnStart
override def onExit: Session => Unit = ProtocolComponents.NoopOnExit
}
object NatsCall extends NameGen {
def apply(messageProvider: Object, protocol: NatsProtocol, system: ActorSystem, statsEngine: StatsEngine, clock: Clock, next: Action) = {
val actor = system.actorOf(Props(new NatsCall(messageProvider, protocol, next, statsEngine)))
new ExitableActorDelegatingAction(genName("natsCall"), statsEngine, clock, next, actor)
}
}
class NatsCall(messageProvider: Object, protocol: NatsProtocol, val next: Action, statsEngine: StatsEngine) extends ActionActor {
override def execute(session: Session): Unit = {
import com.logimethods.connector.gatling.to_nats.NatsMessage
messageProvider match {
case m: NatsMessage => protocol.connection.publish(protocol.subject + m.getSubject(), m.getPayload())
case other => protocol.connection.publish(protocol.subject, protocol.serializer(messageProvider))
}
next ! session
}
}
/** A Gatling ActionBuilder to inject messages into NATS.
*
* Possible usage:
* {{{
* val natsScn = scenario("NATS call").exec(NatsBuilder(new ValueProvider()))
* }}}
* {{{
* class ValueProvider {
* val incr = 10
* val basedValue = 100 -incr
* val maxIncr = 50
* var actualIncr = 0
*
* override def toString(): String = {
* actualIncr = (actualIncr % (maxIncr + incr)) + incr
* (basedValue + actualIncr).toString()
* }
* }
* }}}
*
* @see [[https://www.trivento.io/write-custom-protocol-for-gatling/ Write a Custom Protocol for Gatling]]
* @constructor create a new NatsBuilder that will emit messages into NATS.
* @param messageProvider the provider of the messages to emit. The actual message will be the output of the toString() method applied to this object
* (which could be a simple String if the message doesn't have to change over time).
*/
case class NatsBuilder(messageProvider: Object) extends ActionBuilder {
def natsProtocol(protocols: Protocols) = protocols.protocol[NatsProtocol].getOrElse(throw new UnsupportedOperationException("NatsProtocol Protocol wasn't registered"))
private def components(protocolComponentsRegistry: ProtocolComponentsRegistry): NatsComponents =
protocolComponentsRegistry.components(NatsProtocol.NatsProtocolKey)
override def build(ctx: ScenarioContext, next: Action): Action = {
import ctx._
val statsEngine = coreComponents.statsEngine
val system = ctx.coreComponents.actorSystem
val clock = ctx.coreComponents.clock
val natsComponents = components(protocolComponentsRegistry)
NatsCall(messageProvider, natsComponents.natsProtocol, system, statsEngine, clock, next)
}
}
|
Logimethods/nats-connector-gatling
|
src/main/scala/com/logimethods/connector/gatling/to_nats/NatsAction.scala
|
Scala
|
mit
| 6,209 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import org.apache.spark.SparkFunSuite
class TryEvalSuite extends SparkFunSuite with ExpressionEvalHelper {
test("try_add") {
Seq(
(1, 1, 2),
(Int.MaxValue, 1, null),
(Int.MinValue, -1, null)
).foreach { case (a, b, expected) =>
val left = Literal(a)
val right = Literal(b)
val input = TryEval(Add(left, right, failOnError = true))
checkEvaluation(input, expected)
}
}
test("try_divide") {
Seq(
(3.0, 2.0, 1.5),
(1.0, 0.0, null),
(-1.0, 0.0, null)
).foreach { case (a, b, expected) =>
val left = Literal(a)
val right = Literal(b)
val input = TryEval(Divide(left, right, failOnError = true))
checkEvaluation(input, expected)
}
}
test("try_subtract") {
Seq(
(1, 1, 0),
(Int.MaxValue, -1, null),
(Int.MinValue, 1, null)
).foreach { case (a, b, expected) =>
val left = Literal(a)
val right = Literal(b)
val input = TryEval(Subtract(left, right, failOnError = true))
checkEvaluation(input, expected)
}
}
test("try_multiply") {
Seq(
(2, 3, 6),
(Int.MaxValue, -10, null),
(Int.MinValue, 10, null)
).foreach { case (a, b, expected) =>
val left = Literal(a)
val right = Literal(b)
val input = TryEval(Multiply(left, right, failOnError = true))
checkEvaluation(input, expected)
}
}
}
|
mahak/spark
|
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/TryEvalSuite.scala
|
Scala
|
apache-2.0
| 2,262 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import kafka.utils.MockTime
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.message.FetchResponseData
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.record.CompressionType
import org.apache.kafka.common.record.MemoryRecords
import org.apache.kafka.common.record.SimpleRecord
import org.apache.kafka.common.requests.FetchMetadata.{FINAL_EPOCH, INVALID_SESSION_ID}
import org.apache.kafka.common.requests.{FetchRequest, FetchMetadata => JFetchMetadata}
import org.apache.kafka.common.utils.Utils
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.{Test, Timeout}
import java.util
import java.util.{Collections, Optional}
import scala.collection.mutable.ArrayBuffer
@Timeout(120)
class FetchSessionTest {
@Test
def testNewSessionId(): Unit = {
val cache = new FetchSessionCache(3, 100)
for (_ <- 0 to 10000) {
val id = cache.newSessionId()
assertTrue(id > 0)
}
}
def assertCacheContains(cache: FetchSessionCache, sessionIds: Int*) = {
var i = 0
for (sessionId <- sessionIds) {
i = i + 1
assertTrue(cache.get(sessionId).isDefined,
"Missing session " + i + " out of " + sessionIds.size + "(" + sessionId + ")")
}
assertEquals(sessionIds.size, cache.size)
}
private def dummyCreate(size: Int): FetchSession.CACHE_MAP = {
val cacheMap = new FetchSession.CACHE_MAP(size)
for (i <- 0 until size) {
cacheMap.add(new CachedPartition("test", i))
}
cacheMap
}
@Test
def testSessionCache(): Unit = {
val cache = new FetchSessionCache(3, 100)
assertEquals(0, cache.size)
val id1 = cache.maybeCreateSession(0, false, 10, () => dummyCreate(10))
val id2 = cache.maybeCreateSession(10, false, 20, () => dummyCreate(20))
val id3 = cache.maybeCreateSession(20, false, 30, () => dummyCreate(30))
assertEquals(INVALID_SESSION_ID, cache.maybeCreateSession(30, false, 40, () => dummyCreate(40)))
assertEquals(INVALID_SESSION_ID, cache.maybeCreateSession(40, false, 5, () => dummyCreate(5)))
assertCacheContains(cache, id1, id2, id3)
cache.touch(cache.get(id1).get, 200)
val id4 = cache.maybeCreateSession(210, false, 11, () => dummyCreate(11))
assertCacheContains(cache, id1, id3, id4)
cache.touch(cache.get(id1).get, 400)
cache.touch(cache.get(id3).get, 390)
cache.touch(cache.get(id4).get, 400)
val id5 = cache.maybeCreateSession(410, false, 50, () => dummyCreate(50))
assertCacheContains(cache, id3, id4, id5)
assertEquals(INVALID_SESSION_ID, cache.maybeCreateSession(410, false, 5, () => dummyCreate(5)))
val id6 = cache.maybeCreateSession(410, true, 5, () => dummyCreate(5))
assertCacheContains(cache, id3, id5, id6)
}
@Test
def testResizeCachedSessions(): Unit = {
val cache = new FetchSessionCache(2, 100)
assertEquals(0, cache.totalPartitions)
assertEquals(0, cache.size)
assertEquals(0, cache.evictionsMeter.count)
val id1 = cache.maybeCreateSession(0, false, 2, () => dummyCreate(2))
assertTrue(id1 > 0)
assertCacheContains(cache, id1)
val session1 = cache.get(id1).get
assertEquals(2, session1.size)
assertEquals(2, cache.totalPartitions)
assertEquals(1, cache.size)
assertEquals(0, cache.evictionsMeter.count)
val id2 = cache.maybeCreateSession(0, false, 4, () => dummyCreate(4))
val session2 = cache.get(id2).get
assertTrue(id2 > 0)
assertCacheContains(cache, id1, id2)
assertEquals(6, cache.totalPartitions)
assertEquals(2, cache.size)
assertEquals(0, cache.evictionsMeter.count)
cache.touch(session1, 200)
cache.touch(session2, 200)
val id3 = cache.maybeCreateSession(200, false, 5, () => dummyCreate(5))
assertTrue(id3 > 0)
assertCacheContains(cache, id2, id3)
assertEquals(9, cache.totalPartitions)
assertEquals(2, cache.size)
assertEquals(1, cache.evictionsMeter.count)
cache.remove(id3)
assertCacheContains(cache, id2)
assertEquals(1, cache.size)
assertEquals(1, cache.evictionsMeter.count)
assertEquals(4, cache.totalPartitions)
val iter = session2.partitionMap.iterator
iter.next()
iter.remove()
assertEquals(3, session2.size)
assertEquals(4, session2.cachedSize)
cache.touch(session2, session2.lastUsedMs)
assertEquals(3, cache.totalPartitions)
}
private val EMPTY_PART_LIST = Collections.unmodifiableList(new util.ArrayList[TopicPartition]())
@Test
def testCachedLeaderEpoch(): Unit = {
val time = new MockTime()
val cache = new FetchSessionCache(10, 1000)
val fetchManager = new FetchManager(time, cache)
val tp0 = new TopicPartition("foo", 0)
val tp1 = new TopicPartition("foo", 1)
val tp2 = new TopicPartition("bar", 1)
def cachedLeaderEpochs(context: FetchContext): Map[TopicPartition, Optional[Integer]] = {
val mapBuilder = Map.newBuilder[TopicPartition, Optional[Integer]]
context.foreachPartition((tp, data) => mapBuilder += tp -> data.currentLeaderEpoch)
mapBuilder.result()
}
val request1 = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData]
request1.put(tp0, new FetchRequest.PartitionData(0, 0, 100, Optional.empty()))
request1.put(tp1, new FetchRequest.PartitionData(10, 0, 100, Optional.of(1)))
request1.put(tp2, new FetchRequest.PartitionData(10, 0, 100, Optional.of(2)))
val context1 = fetchManager.newContext(JFetchMetadata.INITIAL, request1, EMPTY_PART_LIST, false)
val epochs1 = cachedLeaderEpochs(context1)
assertEquals(Optional.empty(), epochs1(tp0))
assertEquals(Optional.of(1), epochs1(tp1))
assertEquals(Optional.of(2), epochs1(tp2))
val response = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
response.put(tp0, new FetchResponseData.PartitionData()
.setPartitionIndex(tp0.partition)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
response.put(tp1, new FetchResponseData.PartitionData()
.setPartitionIndex(tp1.partition)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
response.put(tp2, new FetchResponseData.PartitionData()
.setPartitionIndex(tp2.partition)
.setHighWatermark(5)
.setLastStableOffset(5)
.setLogStartOffset(5))
val sessionId = context1.updateAndGenerateResponseData(response).sessionId()
// With no changes, the cached epochs should remain the same
val request2 = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData]
val context2 = fetchManager.newContext(new JFetchMetadata(sessionId, 1), request2, EMPTY_PART_LIST, false)
val epochs2 = cachedLeaderEpochs(context2)
assertEquals(Optional.empty(), epochs1(tp0))
assertEquals(Optional.of(1), epochs2(tp1))
assertEquals(Optional.of(2), epochs2(tp2))
context2.updateAndGenerateResponseData(response).sessionId()
// Now verify we can change the leader epoch and the context is updated
val request3 = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData]
request3.put(tp0, new FetchRequest.PartitionData(0, 0, 100, Optional.of(6)))
request3.put(tp1, new FetchRequest.PartitionData(10, 0, 100, Optional.empty()))
request3.put(tp2, new FetchRequest.PartitionData(10, 0, 100, Optional.of(3)))
val context3 = fetchManager.newContext(new JFetchMetadata(sessionId, 2), request3, EMPTY_PART_LIST, false)
val epochs3 = cachedLeaderEpochs(context3)
assertEquals(Optional.of(6), epochs3(tp0))
assertEquals(Optional.empty(), epochs3(tp1))
assertEquals(Optional.of(3), epochs3(tp2))
}
@Test
def testLastFetchedEpoch(): Unit = {
val time = new MockTime()
val cache = new FetchSessionCache(10, 1000)
val fetchManager = new FetchManager(time, cache)
val tp0 = new TopicPartition("foo", 0)
val tp1 = new TopicPartition("foo", 1)
val tp2 = new TopicPartition("bar", 1)
def cachedLeaderEpochs(context: FetchContext): Map[TopicPartition, Optional[Integer]] = {
val mapBuilder = Map.newBuilder[TopicPartition, Optional[Integer]]
context.foreachPartition((tp, data) => mapBuilder += tp -> data.currentLeaderEpoch)
mapBuilder.result()
}
def cachedLastFetchedEpochs(context: FetchContext): Map[TopicPartition, Optional[Integer]] = {
val mapBuilder = Map.newBuilder[TopicPartition, Optional[Integer]]
context.foreachPartition((tp, data) => mapBuilder += tp -> data.lastFetchedEpoch)
mapBuilder.result()
}
val request1 = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData]
request1.put(tp0, new FetchRequest.PartitionData(0, 0, 100, Optional.empty[Integer], Optional.empty[Integer]))
request1.put(tp1, new FetchRequest.PartitionData(10, 0, 100, Optional.of(1), Optional.empty[Integer]))
request1.put(tp2, new FetchRequest.PartitionData(10, 0, 100, Optional.of(2), Optional.of(1)))
val context1 = fetchManager.newContext(JFetchMetadata.INITIAL, request1, EMPTY_PART_LIST, false)
assertEquals(Map(tp0 -> Optional.empty, tp1 -> Optional.of(1), tp2 -> Optional.of(2)),
cachedLeaderEpochs(context1))
assertEquals(Map(tp0 -> Optional.empty, tp1 -> Optional.empty, tp2 -> Optional.of(1)),
cachedLastFetchedEpochs(context1))
val response = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
response.put(tp0, new FetchResponseData.PartitionData()
.setPartitionIndex(tp0.partition)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
response.put(tp1, new FetchResponseData.PartitionData()
.setPartitionIndex(tp1.partition)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
response.put(tp2, new FetchResponseData.PartitionData()
.setPartitionIndex(tp2.partition)
.setHighWatermark(5)
.setLastStableOffset(5)
.setLogStartOffset(5))
val sessionId = context1.updateAndGenerateResponseData(response).sessionId()
// With no changes, the cached epochs should remain the same
val request2 = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData]
val context2 = fetchManager.newContext(new JFetchMetadata(sessionId, 1), request2, EMPTY_PART_LIST, false)
assertEquals(Map(tp0 -> Optional.empty, tp1 -> Optional.of(1), tp2 -> Optional.of(2)), cachedLeaderEpochs(context2))
assertEquals(Map(tp0 -> Optional.empty, tp1 -> Optional.empty, tp2 -> Optional.of(1)),
cachedLastFetchedEpochs(context2))
context2.updateAndGenerateResponseData(response).sessionId()
// Now verify we can change the leader epoch and the context is updated
val request3 = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData]
request3.put(tp0, new FetchRequest.PartitionData(0, 0, 100, Optional.of(6), Optional.of(5)))
request3.put(tp1, new FetchRequest.PartitionData(10, 0, 100, Optional.empty[Integer], Optional.empty[Integer]))
request3.put(tp2, new FetchRequest.PartitionData(10, 0, 100, Optional.of(3), Optional.of(3)))
val context3 = fetchManager.newContext(new JFetchMetadata(sessionId, 2), request3, EMPTY_PART_LIST, false)
assertEquals(Map(tp0 -> Optional.of(6), tp1 -> Optional.empty, tp2 -> Optional.of(3)),
cachedLeaderEpochs(context3))
assertEquals(Map(tp0 -> Optional.of(5), tp1 -> Optional.empty, tp2 -> Optional.of(3)),
cachedLastFetchedEpochs(context2))
}
@Test
def testFetchRequests(): Unit = {
val time = new MockTime()
val cache = new FetchSessionCache(10, 1000)
val fetchManager = new FetchManager(time, cache)
// Verify that SESSIONLESS requests get a SessionlessFetchContext
val context = fetchManager.newContext(JFetchMetadata.LEGACY,
new util.HashMap[TopicPartition, FetchRequest.PartitionData](), EMPTY_PART_LIST, true)
assertEquals(classOf[SessionlessFetchContext], context.getClass)
// Create a new fetch session with a FULL fetch request
val reqData2 = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData]
reqData2.put(new TopicPartition("foo", 0), new FetchRequest.PartitionData(0, 0, 100,
Optional.empty()))
reqData2.put(new TopicPartition("foo", 1), new FetchRequest.PartitionData(10, 0, 100,
Optional.empty()))
val context2 = fetchManager.newContext(JFetchMetadata.INITIAL, reqData2, EMPTY_PART_LIST, false)
assertEquals(classOf[FullFetchContext], context2.getClass)
val reqData2Iter = reqData2.entrySet().iterator()
context2.foreachPartition((topicPart, data) => {
val entry = reqData2Iter.next()
assertEquals(entry.getKey, topicPart)
assertEquals(entry.getValue, data)
})
assertEquals(0, context2.getFetchOffset(new TopicPartition("foo", 0)).get)
assertEquals(10, context2.getFetchOffset(new TopicPartition("foo", 1)).get)
val respData2 = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
respData2.put(new TopicPartition("foo", 0),
new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
respData2.put(new TopicPartition("foo", 1),
new FetchResponseData.PartitionData()
.setPartitionIndex(1)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
val resp2 = context2.updateAndGenerateResponseData(respData2)
assertEquals(Errors.NONE, resp2.error())
assertTrue(resp2.sessionId() != INVALID_SESSION_ID)
assertEquals(respData2, resp2.responseData)
// Test trying to create a new session with an invalid epoch
val context3 = fetchManager.newContext(
new JFetchMetadata(resp2.sessionId(), 5), reqData2, EMPTY_PART_LIST, false)
assertEquals(classOf[SessionErrorContext], context3.getClass)
assertEquals(Errors.INVALID_FETCH_SESSION_EPOCH,
context3.updateAndGenerateResponseData(respData2).error())
// Test trying to create a new session with a non-existent session id
val context4 = fetchManager.newContext(
new JFetchMetadata(resp2.sessionId() + 1, 1), reqData2, EMPTY_PART_LIST, false)
assertEquals(classOf[SessionErrorContext], context4.getClass)
assertEquals(Errors.FETCH_SESSION_ID_NOT_FOUND,
context4.updateAndGenerateResponseData(respData2).error())
// Continue the first fetch session we created.
val reqData5 = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData]
val context5 = fetchManager.newContext(
new JFetchMetadata(resp2.sessionId(), 1), reqData5, EMPTY_PART_LIST, false)
assertEquals(classOf[IncrementalFetchContext], context5.getClass)
val reqData5Iter = reqData2.entrySet().iterator()
context5.foreachPartition((topicPart, data) => {
val entry = reqData5Iter.next()
assertEquals(entry.getKey, topicPart)
assertEquals(entry.getValue, data)
})
assertEquals(10, context5.getFetchOffset(new TopicPartition("foo", 1)).get)
val resp5 = context5.updateAndGenerateResponseData(respData2)
assertEquals(Errors.NONE, resp5.error())
assertEquals(resp2.sessionId(), resp5.sessionId())
assertEquals(0, resp5.responseData.size())
// Test setting an invalid fetch session epoch.
val context6 = fetchManager.newContext(
new JFetchMetadata(resp2.sessionId(), 5), reqData2, EMPTY_PART_LIST, false)
assertEquals(classOf[SessionErrorContext], context6.getClass)
assertEquals(Errors.INVALID_FETCH_SESSION_EPOCH,
context6.updateAndGenerateResponseData(respData2).error())
// Test generating a throttled response for the incremental fetch session
val reqData7 = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData]
val context7 = fetchManager.newContext(
new JFetchMetadata(resp2.sessionId(), 2), reqData7, EMPTY_PART_LIST, false)
val resp7 = context7.getThrottledResponse(100)
assertEquals(Errors.NONE, resp7.error())
assertEquals(resp2.sessionId(), resp7.sessionId())
assertEquals(100, resp7.throttleTimeMs())
// Close the incremental fetch session.
val prevSessionId = resp5.sessionId
var nextSessionId = prevSessionId
do {
val reqData8 = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData]
reqData8.put(new TopicPartition("bar", 0), new FetchRequest.PartitionData(0, 0, 100,
Optional.empty()))
reqData8.put(new TopicPartition("bar", 1), new FetchRequest.PartitionData(10, 0, 100,
Optional.empty()))
val context8 = fetchManager.newContext(
new JFetchMetadata(prevSessionId, FINAL_EPOCH), reqData8, EMPTY_PART_LIST, false)
assertEquals(classOf[SessionlessFetchContext], context8.getClass)
assertEquals(0, cache.size)
val respData8 = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
respData8.put(new TopicPartition("bar", 0),
new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
respData8.put(new TopicPartition("bar", 1),
new FetchResponseData.PartitionData()
.setPartitionIndex(1)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
val resp8 = context8.updateAndGenerateResponseData(respData8)
assertEquals(Errors.NONE, resp8.error)
nextSessionId = resp8.sessionId
} while (nextSessionId == prevSessionId)
}
@Test
def testIncrementalFetchSession(): Unit = {
val time = new MockTime()
val cache = new FetchSessionCache(10, 1000)
val fetchManager = new FetchManager(time, cache)
// Create a new fetch session with foo-0 and foo-1
val reqData1 = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData]
reqData1.put(new TopicPartition("foo", 0), new FetchRequest.PartitionData(0, 0, 100,
Optional.empty()))
reqData1.put(new TopicPartition("foo", 1), new FetchRequest.PartitionData(10, 0, 100,
Optional.empty()))
val context1 = fetchManager.newContext(JFetchMetadata.INITIAL, reqData1, EMPTY_PART_LIST, false)
assertEquals(classOf[FullFetchContext], context1.getClass)
val respData1 = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
respData1.put(new TopicPartition("foo", 0), new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
respData1.put(new TopicPartition("foo", 1), new FetchResponseData.PartitionData()
.setPartitionIndex(1)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
val resp1 = context1.updateAndGenerateResponseData(respData1)
assertEquals(Errors.NONE, resp1.error())
assertTrue(resp1.sessionId() != INVALID_SESSION_ID)
assertEquals(2, resp1.responseData.size())
// Create an incremental fetch request that removes foo-0 and adds bar-0
val reqData2 = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData]
reqData2.put(new TopicPartition("bar", 0), new FetchRequest.PartitionData(15, 0, 0,
Optional.empty()))
val removed2 = new util.ArrayList[TopicPartition]
removed2.add(new TopicPartition("foo", 0))
val context2 = fetchManager.newContext(
new JFetchMetadata(resp1.sessionId(), 1), reqData2, removed2, false)
assertEquals(classOf[IncrementalFetchContext], context2.getClass)
val parts2 = Set(new TopicPartition("foo", 1), new TopicPartition("bar", 0))
val reqData2Iter = parts2.iterator
context2.foreachPartition((topicPart, _) => {
assertEquals(reqData2Iter.next(), topicPart)
})
assertEquals(None, context2.getFetchOffset(new TopicPartition("foo", 0)))
assertEquals(10, context2.getFetchOffset(new TopicPartition("foo", 1)).get)
assertEquals(15, context2.getFetchOffset(new TopicPartition("bar", 0)).get)
assertEquals(None, context2.getFetchOffset(new TopicPartition("bar", 2)))
val respData2 = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
respData2.put(new TopicPartition("foo", 1),
new FetchResponseData.PartitionData()
.setPartitionIndex(1)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
respData2.put(new TopicPartition("bar", 0),
new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
val resp2 = context2.updateAndGenerateResponseData(respData2)
assertEquals(Errors.NONE, resp2.error)
assertEquals(1, resp2.responseData.size)
assertTrue(resp2.sessionId > 0)
}
@Test
def testFetchSessionExpiration(): Unit = {
val time = new MockTime()
// set maximum entries to 2 to allow for eviction later
val cache = new FetchSessionCache(2, 1000)
val fetchManager = new FetchManager(time, cache)
// Create a new fetch session, session 1
val session1req = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData]
session1req.put(new TopicPartition("foo", 0), new FetchRequest.PartitionData(0, 0, 100,
Optional.empty()))
session1req.put(new TopicPartition("foo", 1), new FetchRequest.PartitionData(10, 0, 100,
Optional.empty()))
val session1context1 = fetchManager.newContext(JFetchMetadata.INITIAL, session1req, EMPTY_PART_LIST, false)
assertEquals(classOf[FullFetchContext], session1context1.getClass)
val respData1 = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
respData1.put(new TopicPartition("foo", 0), new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
respData1.put(new TopicPartition("foo", 1), new FetchResponseData.PartitionData()
.setPartitionIndex(1)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
val session1resp = session1context1.updateAndGenerateResponseData(respData1)
assertEquals(Errors.NONE, session1resp.error())
assertTrue(session1resp.sessionId() != INVALID_SESSION_ID)
assertEquals(2, session1resp.responseData.size)
// check session entered into case
assertTrue(cache.get(session1resp.sessionId()).isDefined)
time.sleep(500)
// Create a second new fetch session
val session2req = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData]
session2req.put(new TopicPartition("foo", 0), new FetchRequest.PartitionData(0, 0, 100,
Optional.empty()))
session2req.put(new TopicPartition("foo", 1), new FetchRequest.PartitionData(10, 0, 100,
Optional.empty()))
val session2context = fetchManager.newContext(JFetchMetadata.INITIAL, session1req, EMPTY_PART_LIST, false)
assertEquals(classOf[FullFetchContext], session2context.getClass)
val session2RespData = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
session2RespData.put(new TopicPartition("foo", 0),
new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
session2RespData.put(new TopicPartition("foo", 1), new FetchResponseData.PartitionData()
.setPartitionIndex(1)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
val session2resp = session2context.updateAndGenerateResponseData(respData1)
assertEquals(Errors.NONE, session2resp.error())
assertTrue(session2resp.sessionId() != INVALID_SESSION_ID)
assertEquals(2, session2resp.responseData.size)
// both newly created entries are present in cache
assertTrue(cache.get(session1resp.sessionId()).isDefined)
assertTrue(cache.get(session2resp.sessionId()).isDefined)
time.sleep(500)
// Create an incremental fetch request for session 1
val context1v2 = fetchManager.newContext(
new JFetchMetadata(session1resp.sessionId(), 1),
new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData],
new util.ArrayList[TopicPartition], false)
assertEquals(classOf[IncrementalFetchContext], context1v2.getClass)
// total sleep time will now be large enough that fetch session 1 will be evicted if not correctly touched
time.sleep(501)
// create one final session to test that the least recently used entry is evicted
// the second session should be evicted because the first session was incrementally fetched
// more recently than the second session was created
val session3req = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData]
session3req.put(new TopicPartition("foo", 0), new FetchRequest.PartitionData(0, 0, 100,
Optional.empty()))
session3req.put(new TopicPartition("foo", 1), new FetchRequest.PartitionData(0, 0, 100,
Optional.empty()))
val session3context = fetchManager.newContext(JFetchMetadata.INITIAL, session3req, EMPTY_PART_LIST, false)
assertEquals(classOf[FullFetchContext], session3context.getClass)
val respData3 = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
respData3.put(new TopicPartition("foo", 0), new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
respData3.put(new TopicPartition("foo", 1),
new FetchResponseData.PartitionData()
.setPartitionIndex(1)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
val session3resp = session3context.updateAndGenerateResponseData(respData3)
assertEquals(Errors.NONE, session3resp.error())
assertTrue(session3resp.sessionId() != INVALID_SESSION_ID)
assertEquals(2, session3resp.responseData.size)
assertTrue(cache.get(session1resp.sessionId()).isDefined)
assertFalse(cache.get(session2resp.sessionId()).isDefined, "session 2 should have been evicted by latest session, as session 1 was used more recently")
assertTrue(cache.get(session3resp.sessionId()).isDefined)
}
@Test
def testPrivilegedSessionHandling(): Unit = {
val time = new MockTime()
// set maximum entries to 2 to allow for eviction later
val cache = new FetchSessionCache(2, 1000)
val fetchManager = new FetchManager(time, cache)
// Create a new fetch session, session 1
val session1req = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData]
session1req.put(new TopicPartition("foo", 0), new FetchRequest.PartitionData(0, 0, 100,
Optional.empty()))
session1req.put(new TopicPartition("foo", 1), new FetchRequest.PartitionData(10, 0, 100,
Optional.empty()))
val session1context = fetchManager.newContext(JFetchMetadata.INITIAL, session1req, EMPTY_PART_LIST, true)
assertEquals(classOf[FullFetchContext], session1context.getClass)
val respData1 = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
respData1.put(new TopicPartition("foo", 0), new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
respData1.put(new TopicPartition("foo", 1), new FetchResponseData.PartitionData()
.setPartitionIndex(1)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
val session1resp = session1context.updateAndGenerateResponseData(respData1)
assertEquals(Errors.NONE, session1resp.error())
assertTrue(session1resp.sessionId() != INVALID_SESSION_ID)
assertEquals(2, session1resp.responseData.size)
assertEquals(1, cache.size)
// move time forward to age session 1 a little compared to session 2
time.sleep(500)
// Create a second new fetch session, unprivileged
val session2req = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData]
session2req.put(new TopicPartition("foo", 0), new FetchRequest.PartitionData(0, 0, 100,
Optional.empty()))
session2req.put(new TopicPartition("foo", 1), new FetchRequest.PartitionData(10, 0, 100,
Optional.empty()))
val session2context = fetchManager.newContext(JFetchMetadata.INITIAL, session1req, EMPTY_PART_LIST, false)
assertEquals(classOf[FullFetchContext], session2context.getClass)
val session2RespData = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
session2RespData.put(new TopicPartition("foo", 0),
new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
session2RespData.put(new TopicPartition("foo", 1),
new FetchResponseData.PartitionData()
.setPartitionIndex(1)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
val session2resp = session2context.updateAndGenerateResponseData(respData1)
assertEquals(Errors.NONE, session2resp.error())
assertTrue(session2resp.sessionId() != INVALID_SESSION_ID)
assertEquals(2, session2resp.responseData.size)
// both newly created entries are present in cache
assertTrue(cache.get(session1resp.sessionId()).isDefined)
assertTrue(cache.get(session2resp.sessionId()).isDefined)
assertEquals(2, cache.size)
time.sleep(500)
// create a session to test session1 privileges mean that session 1 is retained and session 2 is evicted
val session3req = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData]
session3req.put(new TopicPartition("foo", 0), new FetchRequest.PartitionData(0, 0, 100,
Optional.empty()))
session3req.put(new TopicPartition("foo", 1), new FetchRequest.PartitionData(0, 0, 100,
Optional.empty()))
val session3context = fetchManager.newContext(JFetchMetadata.INITIAL, session3req, EMPTY_PART_LIST, true)
assertEquals(classOf[FullFetchContext], session3context.getClass)
val respData3 = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
respData3.put(new TopicPartition("foo", 0),
new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
respData3.put(new TopicPartition("foo", 1),
new FetchResponseData.PartitionData()
.setPartitionIndex(1)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
val session3resp = session3context.updateAndGenerateResponseData(respData3)
assertEquals(Errors.NONE, session3resp.error())
assertTrue(session3resp.sessionId() != INVALID_SESSION_ID)
assertEquals(2, session3resp.responseData.size)
assertTrue(cache.get(session1resp.sessionId()).isDefined)
// even though session 2 is more recent than session 1, and has not reached expiry time, it is less
// privileged than session 2, and thus session 3 should be entered and session 2 evicted.
assertFalse(cache.get(session2resp.sessionId()).isDefined, "session 2 should have been evicted by session 3")
assertTrue(cache.get(session3resp.sessionId()).isDefined)
assertEquals(2, cache.size)
time.sleep(501)
// create a final session to test whether session1 can be evicted due to age even though it is privileged
val session4req = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData]
session4req.put(new TopicPartition("foo", 0), new FetchRequest.PartitionData(0, 0, 100,
Optional.empty()))
session4req.put(new TopicPartition("foo", 1), new FetchRequest.PartitionData(0, 0, 100,
Optional.empty()))
val session4context = fetchManager.newContext(JFetchMetadata.INITIAL, session4req, EMPTY_PART_LIST, true)
assertEquals(classOf[FullFetchContext], session4context.getClass)
val respData4 = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
respData4.put(new TopicPartition("foo", 0),
new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
respData4.put(new TopicPartition("foo", 1),
new FetchResponseData.PartitionData()
.setPartitionIndex(1)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
val session4resp = session3context.updateAndGenerateResponseData(respData4)
assertEquals(Errors.NONE, session4resp.error())
assertTrue(session4resp.sessionId() != INVALID_SESSION_ID)
assertEquals(2, session4resp.responseData.size)
assertFalse(cache.get(session1resp.sessionId()).isDefined, "session 1 should have been evicted by session 4 even though it is privileged as it has hit eviction time")
assertTrue(cache.get(session3resp.sessionId()).isDefined)
assertTrue(cache.get(session4resp.sessionId()).isDefined)
assertEquals(2, cache.size)
}
@Test
def testZeroSizeFetchSession(): Unit = {
val time = new MockTime()
val cache = new FetchSessionCache(10, 1000)
val fetchManager = new FetchManager(time, cache)
// Create a new fetch session with foo-0 and foo-1
val reqData1 = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData]
reqData1.put(new TopicPartition("foo", 0), new FetchRequest.PartitionData(0, 0, 100,
Optional.empty()))
reqData1.put(new TopicPartition("foo", 1), new FetchRequest.PartitionData(10, 0, 100,
Optional.empty()))
val context1 = fetchManager.newContext(JFetchMetadata.INITIAL, reqData1, EMPTY_PART_LIST, false)
assertEquals(classOf[FullFetchContext], context1.getClass)
val respData1 = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
respData1.put(new TopicPartition("foo", 0), new FetchResponseData.PartitionData()
.setPartitionIndex(0)
.setHighWatermark(100)
.setLastStableOffset(100)
.setLogStartOffset(100))
respData1.put(new TopicPartition("foo", 1), new FetchResponseData.PartitionData()
.setPartitionIndex(1)
.setHighWatermark(10)
.setLastStableOffset(10)
.setLogStartOffset(10))
val resp1 = context1.updateAndGenerateResponseData(respData1)
assertEquals(Errors.NONE, resp1.error)
assertTrue(resp1.sessionId() != INVALID_SESSION_ID)
assertEquals(2, resp1.responseData.size)
// Create an incremental fetch request that removes foo-0 and foo-1
// Verify that the previous fetch session was closed.
val reqData2 = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData]
val removed2 = new util.ArrayList[TopicPartition]
removed2.add(new TopicPartition("foo", 0))
removed2.add(new TopicPartition("foo", 1))
val context2 = fetchManager.newContext(
new JFetchMetadata(resp1.sessionId, 1), reqData2, removed2, false)
assertEquals(classOf[SessionlessFetchContext], context2.getClass)
val respData2 = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
val resp2 = context2.updateAndGenerateResponseData(respData2)
assertEquals(INVALID_SESSION_ID, resp2.sessionId)
assertTrue(resp2.responseData.isEmpty)
assertEquals(0, cache.size)
}
@Test
def testDivergingEpoch(): Unit = {
val time = new MockTime()
val cache = new FetchSessionCache(10, 1000)
val fetchManager = new FetchManager(time, cache)
val tp1 = new TopicPartition("foo", 1)
val tp2 = new TopicPartition("bar", 2)
val reqData = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData]
reqData.put(tp1, new FetchRequest.PartitionData(100, 0, 1000, Optional.of(5), Optional.of(4)))
reqData.put(tp2, new FetchRequest.PartitionData(100, 0, 1000, Optional.of(5), Optional.of(4)))
// Full fetch context returns all partitions in the response
val context1 = fetchManager.newContext(JFetchMetadata.INITIAL, reqData, EMPTY_PART_LIST, isFollower = false)
assertEquals(classOf[FullFetchContext], context1.getClass)
val respData = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
respData.put(tp1, new FetchResponseData.PartitionData()
.setPartitionIndex(tp1.partition)
.setHighWatermark(105)
.setLastStableOffset(105)
.setLogStartOffset(0))
val divergingEpoch = new FetchResponseData.EpochEndOffset().setEpoch(3).setEndOffset(90)
respData.put(tp2, new FetchResponseData.PartitionData()
.setPartitionIndex(tp2.partition)
.setHighWatermark(105)
.setLastStableOffset(105)
.setLogStartOffset(0)
.setDivergingEpoch(divergingEpoch))
val resp1 = context1.updateAndGenerateResponseData(respData)
assertEquals(Errors.NONE, resp1.error)
assertNotEquals(INVALID_SESSION_ID, resp1.sessionId)
assertEquals(Utils.mkSet(tp1, tp2), resp1.responseData.keySet)
// Incremental fetch context returns partitions with divergent epoch even if none
// of the other conditions for return are met.
val context2 = fetchManager.newContext(new JFetchMetadata(resp1.sessionId, 1), reqData, EMPTY_PART_LIST, isFollower = false)
assertEquals(classOf[IncrementalFetchContext], context2.getClass)
val resp2 = context2.updateAndGenerateResponseData(respData)
assertEquals(Errors.NONE, resp2.error)
assertEquals(resp1.sessionId, resp2.sessionId)
assertEquals(Collections.singleton(tp2), resp2.responseData.keySet)
// All partitions with divergent epoch should be returned.
respData.put(tp1, new FetchResponseData.PartitionData()
.setPartitionIndex(tp1.partition)
.setHighWatermark(105)
.setLastStableOffset(105)
.setLogStartOffset(0)
.setDivergingEpoch(divergingEpoch))
val resp3 = context2.updateAndGenerateResponseData(respData)
assertEquals(Errors.NONE, resp3.error)
assertEquals(resp1.sessionId, resp3.sessionId)
assertEquals(Utils.mkSet(tp1, tp2), resp3.responseData.keySet)
// Partitions that meet other conditions should be returned regardless of whether
// divergingEpoch is set or not.
respData.put(tp1, new FetchResponseData.PartitionData()
.setPartitionIndex(tp1.partition)
.setHighWatermark(110)
.setLastStableOffset(110)
.setLogStartOffset(0))
val resp4 = context2.updateAndGenerateResponseData(respData)
assertEquals(Errors.NONE, resp4.error)
assertEquals(resp1.sessionId, resp4.sessionId)
assertEquals(Utils.mkSet(tp1, tp2), resp4.responseData.keySet)
}
@Test
def testDeprioritizesPartitionsWithRecordsOnly(): Unit = {
val time = new MockTime()
val cache = new FetchSessionCache(10, 1000)
val fetchManager = new FetchManager(time, cache)
val tp1 = new TopicPartition("foo", 1)
val tp2 = new TopicPartition("bar", 2)
val tp3 = new TopicPartition("zar", 3)
val reqData = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData]
reqData.put(tp1, new FetchRequest.PartitionData(100, 0, 1000, Optional.of(5), Optional.of(4)))
reqData.put(tp2, new FetchRequest.PartitionData(100, 0, 1000, Optional.of(5), Optional.of(4)))
reqData.put(tp3, new FetchRequest.PartitionData(100, 0, 1000, Optional.of(5), Optional.of(4)))
// Full fetch context returns all partitions in the response
val context1 = fetchManager.newContext(JFetchMetadata.INITIAL, reqData, EMPTY_PART_LIST, isFollower = false)
assertEquals(classOf[FullFetchContext], context1.getClass)
val respData1 = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
respData1.put(tp1, new FetchResponseData.PartitionData()
.setPartitionIndex(tp1.partition)
.setHighWatermark(50)
.setLastStableOffset(50)
.setLogStartOffset(0))
respData1.put(tp2, new FetchResponseData.PartitionData()
.setPartitionIndex(tp2.partition)
.setHighWatermark(50)
.setLastStableOffset(50)
.setLogStartOffset(0))
respData1.put(tp3, new FetchResponseData.PartitionData()
.setPartitionIndex(tp3.partition)
.setHighWatermark(50)
.setLastStableOffset(50)
.setLogStartOffset(0))
val resp1 = context1.updateAndGenerateResponseData(respData1)
assertEquals(Errors.NONE, resp1.error)
assertNotEquals(INVALID_SESSION_ID, resp1.sessionId)
assertEquals(Utils.mkSet(tp1, tp2, tp3), resp1.responseData.keySet())
// Incremental fetch context returns partitions with changes but only deprioritizes
// the partitions with records
val context2 = fetchManager.newContext(new JFetchMetadata(resp1.sessionId, 1), reqData, EMPTY_PART_LIST, isFollower = false)
assertEquals(classOf[IncrementalFetchContext], context2.getClass)
// Partitions are ordered in the session as per last response
assertPartitionsOrder(context2, Seq(tp1, tp2, tp3))
// Response is empty
val respData2 = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
val resp2 = context2.updateAndGenerateResponseData(respData2)
assertEquals(Errors.NONE, resp2.error)
assertEquals(resp1.sessionId, resp2.sessionId)
assertEquals(Collections.emptySet(), resp2.responseData.keySet)
// All partitions with changes should be returned.
val respData3 = new util.LinkedHashMap[TopicPartition, FetchResponseData.PartitionData]
respData3.put(tp1, new FetchResponseData.PartitionData()
.setPartitionIndex(tp1.partition)
.setHighWatermark(60)
.setLastStableOffset(50)
.setLogStartOffset(0))
respData3.put(tp2, new FetchResponseData.PartitionData()
.setPartitionIndex(tp2.partition)
.setHighWatermark(60)
.setLastStableOffset(50)
.setLogStartOffset(0)
.setRecords(MemoryRecords.withRecords(CompressionType.NONE,
new SimpleRecord(100, null))))
respData3.put(tp3, new FetchResponseData.PartitionData()
.setPartitionIndex(tp3.partition)
.setHighWatermark(50)
.setLastStableOffset(50)
.setLogStartOffset(0))
val resp3 = context2.updateAndGenerateResponseData(respData3)
assertEquals(Errors.NONE, resp3.error)
assertEquals(resp1.sessionId, resp3.sessionId)
assertEquals(Utils.mkSet(tp1, tp2), resp3.responseData.keySet)
// Only the partitions whose returned records in the last response
// were deprioritized
assertPartitionsOrder(context2, Seq(tp1, tp3, tp2))
}
private def assertPartitionsOrder(context: FetchContext, partitions: Seq[TopicPartition]): Unit = {
val partitionsInContext = ArrayBuffer.empty[TopicPartition]
context.foreachPartition { (tp, _) =>
partitionsInContext += tp
}
assertEquals(partitions, partitionsInContext.toSeq)
}
}
|
Chasego/kafka
|
core/src/test/scala/unit/kafka/server/FetchSessionTest.scala
|
Scala
|
apache-2.0
| 43,796 |
package org.workcraft.plugins.petri.tools
import java.awt.Component
import java.awt.event.ActionEvent
import java.awt.event.ActionListener
import javax.swing.JButton
import javax.swing.JSlider
import javax.swing.Timer
import javax.swing.event.ChangeEvent
import javax.swing.event.ChangeListener
import org.workcraft.swing.Swing
import org.workcraft.swing.Swing._
import SimulationControlPanel._
import org.workcraft.gui.GUI
import scalaz.Scalaz._
import org.workcraft.scala.effects.IO
import org.workcraft.scala.effects.IO._
object SimulationControlPanel {
type SimulationControl[M[_], State] = SimulationModel[M, Unit, State] {
}
}
// unsafe class -- all the methods including the constructor are side-effectful
class SimulationControlPanel[State](simControl : SimulationControl[Swing, State]) {
val resetButton = new JButton("Reset")
val speedSlider = new JSlider(-1000, 1000, 0)
val autoPlayButton = (GUI.createIconFromSvg("images/icons/svg/start.svg", 16, 16, None) >>= (GUI.createIconButton(_, "Automatic simulation"))).unsafePerformIO
val stopButton = new JButton("Stop")
val backButton = new JButton("Step <")
val stepButton = new JButton("Step >")
val loadTraceButton = new JButton("Load trace")
val saveMarkingButton = new JButton("Save marking")
val loadMarkingButton = new JButton("Load marking")
speedSlider.addChangeListener(new ChangeListener {
override def stateChanged(e : ChangeEvent) {
timer match {
case Some(t) => {
t.stop
t.setInitialDelay(getAnimationDelay)
t.setDelay(getAnimationDelay)
t.start
}
case _ => {}
}
update
}
})
var initialState : State = simControl.saveState.unsafeRun.unsafePerformIO
resetButton.addActionListener(
new ActionListener { override def actionPerformed(e : ActionEvent) = reset })
autoPlayButton.addActionListener(new ActionListener{
override def actionPerformed(e : ActionEvent) {
timer = Some(new Timer(getAnimationDelay, new ActionListener {
override def actionPerformed(e : ActionEvent) {
simControl.fire(()).unsafeRun.unsafePerformIO
}
}))
timer.map(_.start)
update
}
})
stopButton.addActionListener(new ActionListener {
override def actionPerformed(e : ActionEvent) {
timer.map(_.stop)
timer = None
update
}
});
backButton.addActionListener(new ActionListener {
override def actionPerformed(e : ActionEvent) {
simControl.unfire(()).unsafeRun.unsafePerformIO
}
})
stepButton.addActionListener(new ActionListener {
override def actionPerformed(e : ActionEvent) {
simControl.fire(()).unsafeRun.unsafePerformIO
}
})
saveMarkingButton.addActionListener(new ActionListener {
override def actionPerformed(e : ActionEvent) {
savedState = Some(simControl.saveState.unsafeRun.unsafePerformIO)
}
})
loadMarkingButton.addActionListener(new ActionListener {
override def actionPerformed(e : ActionEvent) {
savedState.map(s => simControl.loadState(s).unsafeRun.unsafePerformIO)
}
})
var timer : Option[Timer] = None
var savedState : Option[State] = None
val DEFAULT_SIMULATION_DELAY = 0.3;
val EDGE_SPEED_MULTIPLIER = 10;
def getAnimationDelay : Int = {
return (1000.0 * DEFAULT_SIMULATION_DELAY * scala.math.pow(EDGE_SPEED_MULTIPLIER, -speedSlider.getValue() / 1000.0)).toInt;
}
def components : List[Component] =
List( resetButton
, speedSlider
, autoPlayButton
, stopButton
, backButton
, stepButton
, loadTraceButton
, saveMarkingButton
, loadMarkingButton)
def reset = simControl.loadState(initialState)
def rememberInitialState = { initialState = simControl.saveState.unsafeRun.unsafePerformIO }
def update = {
val atEnd = !(simControl.canFire(()).unsafeRun.unsafePerformIO)
val atStart = !(simControl.canUnfire(()).unsafeRun.unsafePerformIO)
timer match {
case Some(t) => {
if(atEnd) {
t.stop
timer = None
} else {
t.setDelay(getAnimationDelay)
}
}
case None => {}
}
resetButton.setEnabled(!atStart)
autoPlayButton.setEnabled(!atEnd)
stopButton.setEnabled(timer != null)
backButton.setEnabled(!atStart)
stepButton.setEnabled(!atEnd)
loadTraceButton.setEnabled(true)
saveMarkingButton.setEnabled(true)
loadMarkingButton.setEnabled(savedState.isDefined)
}
def asStateControl : SimStateControl[Swing] = new SimStateControl[Swing] {
override def reset = unsafeToSwing(SimulationControlPanel.this.reset)
override def rememberInitialState = unsafeToSwing(SimulationControlPanel.this.rememberInitialState)
};
}
|
tuura/workcraft-2.2
|
ScalaGraphEditorUtil/src/main/scala/org/workcraft/plugins/petri/tools/SimulationControlPanel.scala
|
Scala
|
gpl-3.0
| 4,805 |
/* sbt -- Simple Build Tool
* Copyright 2010 Mark Harrah
*/
package sbt
import Types._
import scala.reflect.Manifest
// T must be invariant to work properly.
// Because it is sealed and the only instances go through AttributeKey.apply,
// a single AttributeKey instance cannot conform to AttributeKey[T] for different Ts
sealed trait AttributeKey[T] {
def manifest: Manifest[T]
def label: String
def description: Option[String]
def extend: Seq[AttributeKey[_]]
def isLocal: Boolean
def rank: Int
}
private[sbt] abstract class SharedAttributeKey[T] extends AttributeKey[T] {
override final def toString = label
override final def hashCode = label.hashCode
override final def equals(o: Any) = (this eq o.asInstanceOf[AnyRef]) || (o match {
case a: SharedAttributeKey[t] => a.label == this.label && a.manifest == this.manifest
case _ => false
})
final def isLocal: Boolean = false
}
object AttributeKey
{
def apply[T](name: String)(implicit mf: Manifest[T]): AttributeKey[T] =
make(name, None, Nil, Int.MaxValue)
def apply[T](name: String, rank: Int)(implicit mf: Manifest[T]): AttributeKey[T] =
make(name, None, Nil, rank)
def apply[T](name: String, description: String)(implicit mf: Manifest[T]): AttributeKey[T] =
apply(name, description, Nil)
def apply[T](name: String, description: String, rank: Int)(implicit mf: Manifest[T]): AttributeKey[T] =
apply(name, description, Nil, rank)
def apply[T](name: String, description: String, extend: Seq[AttributeKey[_]])(implicit mf: Manifest[T]): AttributeKey[T] =
apply(name, description, extend, Int.MaxValue)
def apply[T](name: String, description: String, extend: Seq[AttributeKey[_]], rank: Int)(implicit mf: Manifest[T]): AttributeKey[T] =
make(name, Some(description), extend, rank)
private[this] def make[T](name: String, description0: Option[String], extend0: Seq[AttributeKey[_]], rank0: Int)(implicit mf: Manifest[T]): AttributeKey[T] = new SharedAttributeKey[T] {
def manifest = mf
def label = name
def description = description0
def extend = extend0
def rank = rank0
}
private[sbt] def local[T](implicit mf: Manifest[T]): AttributeKey[T] = new AttributeKey[T] {
def manifest = mf
def label = LocalLabel
def description = None
def extend = Nil
override def toString = label
def isLocal: Boolean = true
def rank = Int.MaxValue
}
private[sbt] final val LocalLabel = "$local"
}
trait AttributeMap
{
def apply[T](k: AttributeKey[T]): T
def get[T](k: AttributeKey[T]): Option[T]
def remove[T](k: AttributeKey[T]): AttributeMap
def contains[T](k: AttributeKey[T]): Boolean
def put[T](k: AttributeKey[T], value: T): AttributeMap
def keys: Iterable[AttributeKey[_]]
def ++(o: Iterable[AttributeEntry[_]]): AttributeMap
def ++(o: AttributeMap): AttributeMap
def entries: Iterable[AttributeEntry[_]]
def isEmpty: Boolean
}
object AttributeMap
{
val empty: AttributeMap = new BasicAttributeMap(Map.empty)
def apply(entries: Iterable[AttributeEntry[_]]): AttributeMap = empty ++ entries
def apply(entries: AttributeEntry[_]*): AttributeMap = empty ++ entries
implicit def toNatTrans(map: AttributeMap): AttributeKey ~> Id = new (AttributeKey ~> Id) {
def apply[T](key: AttributeKey[T]): T = map(key)
}
}
private class BasicAttributeMap(private val backing: Map[AttributeKey[_], Any]) extends AttributeMap
{
def isEmpty: Boolean = backing.isEmpty
def apply[T](k: AttributeKey[T]) = backing(k).asInstanceOf[T]
def get[T](k: AttributeKey[T]) = backing.get(k).asInstanceOf[Option[T]]
def remove[T](k: AttributeKey[T]): AttributeMap = new BasicAttributeMap( backing - k )
def contains[T](k: AttributeKey[T]) = backing.contains(k)
def put[T](k: AttributeKey[T], value: T): AttributeMap = new BasicAttributeMap( backing.updated(k, value) )
def keys: Iterable[AttributeKey[_]] = backing.keys
def ++(o: Iterable[AttributeEntry[_]]): AttributeMap =
{
val newBacking = (backing /: o) { case (b, AttributeEntry(key, value)) => b.updated(key, value) }
new BasicAttributeMap(newBacking)
}
def ++(o: AttributeMap): AttributeMap =
o match {
case bam: BasicAttributeMap => new BasicAttributeMap(backing ++ bam.backing)
case _ => o ++ this
}
def entries: Iterable[AttributeEntry[_]] =
for( (k: AttributeKey[kt], v) <- backing) yield AttributeEntry(k, v.asInstanceOf[kt])
override def toString = entries.mkString("(", ", ", ")")
}
// type inference required less generality
final case class AttributeEntry[T](key: AttributeKey[T], value: T)
{
override def toString = key.label + ": " + value
}
final case class Attributed[D](data: D)(val metadata: AttributeMap)
{
def get[T](key: AttributeKey[T]): Option[T] = metadata.get(key)
def put[T](key: AttributeKey[T], value: T): Attributed[D] = Attributed(data)(metadata.put(key, value))
def map[T](f: D => T): Attributed[T] = Attributed(f(data))(metadata)
}
object Attributed
{
def blankSeq[T](in: Seq[T]): Seq[Attributed[T]] = in map blank
def blank[T](data: T): Attributed[T] = Attributed(data)(AttributeMap.empty)
}
|
jamesward/xsbt
|
util/collection/Attributes.scala
|
Scala
|
bsd-3-clause
| 5,014 |
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.soteradefense.dga.graphx.louvain
import com.esotericsoftware.kryo.io.{Input, Output}
import com.esotericsoftware.kryo.{Kryo, KryoSerializable}
/**
* Louvain vertex state
* Contains all information needed for louvain community detection
*/
class LouvainData(var community: Long, var communitySigmaTot: Long, var internalWeight: Long, var nodeWeight: Long, var changed: Boolean) extends Serializable with KryoSerializable {
def this() = this(-1L, 0L, 0L, 0L, false)
override def toString: String = s"{community:$community,communitySigmaTot:$communitySigmaTot,internalWeight:$internalWeight,nodeWeight:$nodeWeight}"
override def write(kryo: Kryo, output: Output): Unit = {
kryo.writeObject(output, this.community)
kryo.writeObject(output, this.communitySigmaTot)
kryo.writeObject(output, this.internalWeight)
kryo.writeObject(output, this.nodeWeight)
kryo.writeObject(output, this.changed)
}
override def read(kryo: Kryo, input: Input): Unit = {
this.community = kryo.readObject(input, classOf[Long])
this.communitySigmaTot = kryo.readObject(input, classOf[Long])
this.internalWeight = kryo.readObject(input, classOf[Long])
this.nodeWeight = kryo.readObject(input, classOf[Long])
this.changed = kryo.readObject(input, classOf[Boolean])
}
}
|
atomicjets/distributed-graph-analytics
|
dga-graphx/src/main/scala/com/soteradefense/dga/graphx/louvain/LouvainData.scala
|
Scala
|
apache-2.0
| 2,131 |
package org.tribbloid.ispark
import org.tribbloid.ispark.Util.{debug, getpid, log}
import org.tribbloid.ispark.display.{Data, IScala}
import org.tribbloid.ispark.interpreters.{Results, SparkInterpreter}
import org.tribbloid.ispark.json.JsonUtil._
import org.tribbloid.ispark.msg._
import org.zeromq.ZMQ
import sun.misc.{Signal, SignalHandler}
import scala.collection.mutable
import scalax.file.Path
trait Parent {
val profile: Profile
val ipy: Communication
val interpreter: SparkInterpreter //TODO: support multiple interpreter
var in: mutable.Map[Int, String] = mutable.Map()
var out: mutable.Map[Int, Any] = mutable.Map()
val session: Session = new Session
var n: Int = 0
/**
* Public API
*/
def nextInput(): Int = {
n += 1
n
}
/**
* Public API
*/
def storeInput(input: String) {
in(n) = input
session.addHistory(n, input)
}
/**
* Public API
*/
def storeOutput(result: Results.Value, output: String) {
out(n) = result.value
session.addOutputHistory(n, output)
interpreter.bind("_" + n, result.tpe, result.value)
}
}
class Main(options: Options) extends Parent {
val profile = options.profile match {
case Some(path) => Path(path).string.as[Profile]
case None =>
val file = Path(s"profile-${getpid()}.json")
log(s"connect ipython with --existing ${file.toAbsolute.path}")
val profile = Profile.default
file.write(toJSON(profile))
profile
}
override lazy val interpreter = new SparkInterpreter()
interpreter.init(options.tail.toArray)
val zmq = new Sockets(profile)
val ipy = new Communication(zmq, profile)
def welcome() {
import scala.util.Properties._
log(s"Welcome to Scala $versionNumberString ($javaVmName, Java $javaVersion)")
}
Runtime.getRuntime.addShutdownHook(
new Thread() {
override def run() {
debug("Terminating Main")
interpreter.closeAll()
session.endSession(n)
}
}
)
Signal.handle(new Signal("INT"), new SignalHandler {
private var previously = System.currentTimeMillis
def handle(signal: Signal) {
if (!options.parent) {
val now = System.currentTimeMillis
if (now - previously < 500) sys.exit() else previously = now
}
interpreter.cancel()
}
})
class HeartBeat extends Thread {
override def run() {
ZMQ.proxy(zmq.heartbeat, zmq.heartbeat, null)
}
}
(options.profile, options.parent) match {
case (Some(file), true) =>
// This setup means that this kernel was started by IPython. Currently
// IPython is unable to terminate Main without explicitly killing it
// or sending shutdown_request. To fix that, Main watches the profile
// file whether it exists or not. When the file is removed, Main is
// terminated.
class FileWatcher(file: java.io.File, interval: Int) extends Thread {
override def run() {
while (true) {
if (file.exists) Thread.sleep(interval)
else sys.exit()
}
}
}
val fileWatcher = new FileWatcher(file, 1000)
fileWatcher.setName(s"FileWatcher(${file.getPath})")
fileWatcher.start()
case _ =>
}
val ExecuteHandler = new ExecuteHandler(this)
val CompleteHandler = new CompleteHandler(this)
val KernelInfoHandler = new KernelInfoHandler(this)
val ObjectInfoHandler = new ObjectInfoHandler(this)
val ConnectHandler = new ConnectHandler(this)
val ShutdownHandler = new ShutdownHandler(this)
val HistoryHandler = new HistoryHandler(this)
val CommOpenHandler = new CommOpenHandler(this)
val CommMsgHandler = new CommMsgHandler(this)
val CommCloseHandler = new CommCloseHandler(this)
class Conn(msg: Msg[_]) extends display.Conn {
def display_data(data: Data) {
ipy.send_display_data(msg, data)
}
}
class EventLoop(socket: ZMQ.Socket) extends Thread {
def dispatch[T <: FromIPython](msg: Msg[T]) {
IScala.withConn(new Conn(msg)) {
msg.header.msg_type match {
case MsgTypes.execute_request => ExecuteHandler(socket, msg.asInstanceOf[Msg[execute_request]])
case MsgTypes.complete_request => CompleteHandler(socket, msg.asInstanceOf[Msg[complete_request]])
case MsgTypes.kernel_info_request => KernelInfoHandler(socket, msg.asInstanceOf[Msg[kernel_info_request]])
case MsgTypes.object_info_request => ObjectInfoHandler(socket, msg.asInstanceOf[Msg[object_info_request]])
case MsgTypes.connect_request => ConnectHandler(socket, msg.asInstanceOf[Msg[connect_request]])
case MsgTypes.shutdown_request => ShutdownHandler(socket, msg.asInstanceOf[Msg[shutdown_request]])
case MsgTypes.history_request => HistoryHandler(socket, msg.asInstanceOf[Msg[history_request]])
case MsgTypes.comm_open => CommOpenHandler(socket, msg.asInstanceOf[Msg[comm_open]])
case MsgTypes.comm_msg => CommMsgHandler(socket, msg.asInstanceOf[Msg[comm_msg]])
case MsgTypes.comm_close => CommCloseHandler(socket, msg.asInstanceOf[Msg[comm_close]])
case _ =>
}
}
}
override def run() {
try {
while (true) {
ipy.recv(socket).foreach(dispatch)
}
} catch {
case exc: Exception =>
zmq.terminate() // this will gracefully terminate heartbeat
throw exc
}
}
}
val heartBeat = new HeartBeat
heartBeat.setName("HeartBeat")
heartBeat.start()
debug("Starting kernel event loop")
ipy.send_status(ExecutionStates.starting)
val requestsLoop = new EventLoop(zmq.requests)
requestsLoop.setName("RequestsEventLoop")
requestsLoop.start()
welcome()
}
object Main {
def main (args: Array[String]) {
Util.options = new Options(args)
Util.daemon = new Main(Util.options)
Util.daemon.heartBeat.join()
}
}
|
tribbloid/ISpark
|
core/src/main/scala/org/tribbloid/ispark/Main.scala
|
Scala
|
apache-2.0
| 5,990 |
// Project: angulate2 (https://github.com/jokade/angulate2)
// Description:
// Copyright (c) 2016 Johannes.Kastner <[email protected]>
// Distributed under the MIT License (see included LICENSE file)
package angulate2.router
import rxjs.{Observable, RxPromise}
import scala.scalajs.js
import scala.scalajs.js.annotation.JSImport
@js.native
@JSImport("@angular/router","Router")
class Router extends js.Object {
def errorHandler: js.Dynamic = js.native
def navigated: Boolean = js.native
def urlHandlingStrategy: js.Dynamic = js.native
def config: Routes = js.native
def initialNavigation(): Unit = js.native
def setUpLocationChangeListener(): Unit = js.native
def routerState: js.Dynamic = js.native
def url: String = js.native
def events: Observable[js.Dynamic] = js.native
def resetConfig(config: Routes): Unit = js.native
def dispose(): Unit = js.native
def createUrlTree(commands: js.Array[js.Any], extras: js.UndefOr[js.Object] = js.undefined): UrlTree = js.native
def navigateByUrl(url: js.|[String,UrlTree], extras: js.UndefOr[js.Object] = js.undefined): RxPromise[Boolean] = js.native
def navigate(commands: js.Array[js.Any], extras: js.UndefOr[js.Object] = js.undefined): RxPromise[Boolean] = js.native
def serializeUrl(urlTree: UrlTree): String = js.native
def parseUrl(url: String): UrlTree = js.native
def isActive(url: js.|[String,UrlTree], exact: Boolean): Boolean = js.native
}
object Router {
implicit final class RichRouter(val r: Router) extends AnyVal {
import scalajs.js.JSConverters._
import js.Dynamic.literal
/**
* Use `navigateTo("/foo",bar.id)`
* in place of `navigate(js.Array("/foo",bar.id))`.
*
* @param commands routing commands
*/
@inline
def navigateTo(commands: js.Any*): RxPromise[Boolean] = r.navigate(commands.toJSArray)
@inline
def navigateTo(extras: NavigationExtras)(commands: js.Any*): RxPromise[Boolean] = r.navigate(commands.toJSArray,extras)
/**
* Use `navigateRelativeTo(route,bar.id)`
* in place of `navigate(js.Array(bar.id), js.Dynamic.literal(relativeTo = route))`
*/
@inline
def navigateRelativeTo(route: ActivatedRoute, commands: js.Any*): RxPromise[Boolean] =
r.navigate(commands.toJSArray,literal(relativeTo = route))
}
}
|
jokade/angulate2
|
bindings/src/main/scala/angulate2/router/Router.scala
|
Scala
|
mit
| 2,325 |
package com.sksamuel.elastic4s.searches.queries
import com.sksamuel.elastic4s.searches.QueryDefinition
import org.elasticsearch.common.geo.GeoDistance
import org.elasticsearch.common.unit.DistanceUnit
import org.elasticsearch.common.unit.DistanceUnit.Distance
import org.elasticsearch.index.query.QueryBuilders
case class GeoDistanceQueryDefinition(field: String) extends QueryDefinition {
val builder = QueryBuilders.geoDistanceQuery(field)
val _builder = builder
def geoDistance(geoDistance: GeoDistance): GeoDistanceQueryDefinition = {
builder.geoDistance(geoDistance)
this
}
def geohash(geohash: String): GeoDistanceQueryDefinition = {
builder.geohash(geohash)
this
}
def queryName(name: String): GeoDistanceQueryDefinition = {
builder.queryName(name)
this
}
def distance(distance: String): GeoDistanceQueryDefinition = {
builder.distance(distance)
this
}
def distance(distance: Double, unit: DistanceUnit): GeoDistanceQueryDefinition = {
builder.distance(distance, unit)
this
}
def distance(distance: Distance): GeoDistanceQueryDefinition = {
builder.distance(distance.value, distance.unit)
this
}
def point(lat: Double, long: Double): GeoDistanceQueryDefinition = {
builder.point(lat, long)
this
}
}
|
ulric260/elastic4s
|
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/searches/queries/GeoDistanceQueryDefinition.scala
|
Scala
|
apache-2.0
| 1,304 |
package com.wlangiewicz.bitcoin4s
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.stream.ActorMaterializer
import scala.concurrent.Future
class AkkaHttpClient(user: String, password: String, host: String, port: Int)(implicit system: ActorSystem, materializer: ActorMaterializer)
extends HttpClient(user, password, host, port) {
override def performRequest(request: HttpRequest): Future[HttpResponse] = {
Http().singleRequest(request)
}
}
|
wlk/bitcoin4s
|
src/main/scala/com/wlangiewicz/bitcoin4s/AkkaHttpClient.scala
|
Scala
|
apache-2.0
| 510 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.kafka08.consumer
import java.util.Properties
import kafka.common.{OffsetAndMetadata, TopicAndPartition}
import kafka.consumer.{ConsumerConfig, ConsumerTimeoutException}
import kafka.message.Message
import kafka.producer.{KeyedMessage, Producer, ProducerConfig}
import kafka.serializer.StringDecoder
import org.junit.runner.RunWith
import org.locationtech.geomesa.kafka08.{HasEmbeddedKafka, KafkaUtils08}
import org.locationtech.geomesa.kafka08.consumer.offsets._
import org.specs2.mutable.Specification
import org.specs2.runner
import scala.collection.mutable.ArrayBuffer
@RunWith(classOf[runner.JUnitRunner])
class KafkaConsumerTest extends Specification with HasEmbeddedKafka {
sequential
// skip embedded kafka tests unless explicitly enabled, they often fail randomly
skipAllUnless(sys.props.get(SYS_PROP_RUN_TESTS).exists(_.toBoolean))
def getConsumerConfig(group: String, threads: Int = 1) = {
val consumerProps = new Properties
consumerProps.put("group.id", group)
consumerProps.put(KafkaUtils08.brokerParam, brokerConnect)
consumerProps.put("zookeeper.connect", zkConnect)
consumerProps.put("num.consumer.fetchers", threads.toString)
consumerProps.put("auto.commit.enable", "false")
consumerProps.put("consumer.timeout.ms", "1000")
consumerProps.put("rebalance.max.retries", "100")
consumerProps.put("auto.offset.reset", "smallest")
new ConsumerConfig(consumerProps)
}
"KafkaConsumer" should {
val producerProps = new Properties()
producerProps.put(KafkaUtils08.brokerParam, brokerConnect)
producerProps.put("retry.backoff.ms", "100")
producerProps.put("message.send.max.retries", "20") // we have to bump this up as zk is pretty flaky
producerProps.put("serializer.class", "kafka.serializer.DefaultEncoder")
def produceMessages(topic: String) = {
val producer = new Producer[Array[Byte], Array[Byte]](new ProducerConfig(producerProps))
for (i <- 0 until 10) {
producer.send(new KeyedMessage(topic, i.toString.getBytes("UTF-8"), s"test $i".getBytes("UTF-8")))
}
producer.close()
}
"read messages and shutdown appropriately" >> {
val topic = "read-1"
val config = getConsumerConfig(topic)
produceMessages(topic)
val consumer = new KafkaConsumer(topic, config, new StringDecoder, new StringDecoder)
val stream = consumer.createMessageStreams(1, EarliestOffset).head
val messages = stream.iterator.take(10).toList
messages must haveLength(10)
stream.iterator.hasNext must throwA[ConsumerTimeoutException]
consumer.shutdown()
stream.iterator.hasNext must beFalse
for (i <- 0 until 10) {
messages(i).key() mustEqual i.toString
messages(i).message() mustEqual s"test $i"
}
success
}
"balance consumers across threads" >> {
val topic = "balance"
val config = getConsumerConfig(topic)
produceMessages(topic)
val consumer1 = new KafkaConsumer(topic, config, new StringDecoder, new StringDecoder)
val consumer2 = new KafkaConsumer(topic, config, new StringDecoder, new StringDecoder)
val messages: ArrayBuffer[String] = ArrayBuffer.empty[String]
val stream1 = consumer1.createMessageStreams(1, EarliestOffset).head
val stream2 = consumer2.createMessageStreams(1, EarliestOffset).head
try {
while(stream1.iterator.hasNext) {
messages.append(stream1.iterator.next.key())
}
} catch {
case e: ConsumerTimeoutException => // end of stream
}
try {
while(stream2.iterator.hasNext) {
messages.append(stream2.iterator.next.key())
}
} catch {
case e: ConsumerTimeoutException => // end of stream
}
messages must haveLength(10)
consumer1.shutdown()
consumer2.shutdown()
stream1.iterator.hasNext must beFalse
stream2.iterator.hasNext must beFalse
for (i <- 0 until 10) {
messages(i) mustEqual i.toString
}
success
}
"read meZNRecordSerializerssages from various offsets" >> {
"by group" >> {
val topic = "group"
val config = getConsumerConfig(topic)
produceMessages(topic)
// set up the initial group offset
val offsetManager = new OffsetManager(config)
offsetManager.commitOffsets(Map(TopicAndPartition(topic, 0) -> OffsetAndMetadata(3)))
val consumer = new KafkaConsumer(topic, config, new StringDecoder, new StringDecoder)
val stream = consumer.createMessageStreams(1, GroupOffset).head
stream.iterator.hasNext must beTrue
val message = stream.iterator.next()
consumer.shutdown()
message.key() mustEqual "3"
}
"by earliest" >> {
val topic = "earliest"
val config = getConsumerConfig(topic)
produceMessages(topic)
// set up the initial group offset
val offsetManager = new OffsetManager(config)
offsetManager.commitOffsets(Map(TopicAndPartition(topic, 0) -> OffsetAndMetadata(3)))
val consumer = new KafkaConsumer(topic, config, new StringDecoder, new StringDecoder)
val stream = consumer.createMessageStreams(1, EarliestOffset).head
stream.iterator.hasNext must beTrue
val message = stream.iterator.next()
consumer.shutdown()
message.key() mustEqual "0"
}
"by latest" >> {
val topic = "latest"
val config = getConsumerConfig(topic)
produceMessages(topic)
val consumer = new KafkaConsumer(topic, config, new StringDecoder, new StringDecoder)
val stream = consumer.createMessageStreams(1, LatestOffset).head
stream.iterator.hasNext must throwA[ConsumerTimeoutException]
consumer.shutdown()
stream.iterator.hasNext must beFalse
}
"by binary search" >> {
val topic = "search"
val config = getConsumerConfig(topic)
produceMessages(topic)
val decoder = new StringDecoder()
val offset = FindOffset((m: Message) => {
val bb = Array.ofDim[Byte](m.payload.remaining())
m.payload.get(bb)
decoder.fromBytes(bb).substring(5).toInt.compareTo(7)
})
val consumer = new KafkaConsumer(topic, config, new StringDecoder, new StringDecoder)
val stream = consumer.createMessageStreams(1, offset).head
stream.iterator.hasNext must beTrue
val message = stream.iterator.next()
consumer.shutdown()
message.key() mustEqual "7"
}
}
}
step { shutdown() }
}
|
tkunicki/geomesa
|
geomesa-kafka/geomesa-kafka-datastore/geomesa-kafka-08-datastore/src/test/scala/org/locationtech/geomesa/kafka08/consumer/KafkaConsumerTest.scala
|
Scala
|
apache-2.0
| 7,118 |
package org.vs.puzzle
import org.scalatest.{FlatSpec, Matchers}
class PuzzleTableSpec extends FlatSpec with Matchers {
}
|
VlasShatokhin/puzzle-app
|
src/test/scala/org/vs/puzzle/PuzzleTableSpec.scala
|
Scala
|
apache-2.0
| 124 |
/**
* © 2019 Refinitiv. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.util
/**
* the following is taken (with enhancements) from:
* https://github.com/nummulus/boite
*
* (it is unmaintained and not published against new scala versions)
*
*/
sealed abstract class Box[+A] {
self =>
/**
* Returns {@code true} if the box contains no value (Empty or Failure),
* {@code false} otherwise.
*/
def isEmpty: Boolean
/**
* Returns {@code true} if the box is Failure,
* {@code false} otherwise.
*/
def isFailure: Boolean = false
/**
* Returns {@code true} if the box contains a value, {@code false} otherwise.
*/
def isDefined: Boolean = !isEmpty
/**
* Returns the value of the box.
*
* @throws NoSuchElementException if the box is empty, or original exception if the box is failure
*/
def get: A
/**
* Returns the exception if this is failure.
*
* @throws NoSuchElementException if the box is empty, or full
*/
def getFailure: Throwable = throw new NoSuchElementException("Not a BoxedFailure")
/**
* Returns the value of the box if it's full, else the specified default.
*/
def getOrElse[B >: A](default: => B): B
/**
* Applies a function to the value of the box if it's full and returns a
* new box containing the result. Returns empty otherwise.
* <p>
* Differs from flatMap in that the given function is not expected to wrap
* the result in a box.
*
* @see flatMap
*/
def map[B](f: A => B): Box[B] = EmptyBox
/**
* Applies a function to the value of the box if it's full and returns a
* new box containing the result. Returns empty otherwise.
* <p>
* Differs from map in that the given function is expected to return a box.
*
* @see map
*/
def flatMap[B](f: A => Box[B]): Box[B] = EmptyBox
/**
* Applies a function to the value of the box if it's full, otherwise do
* nothing.
*/
def foreach[U](f: A => U) {}
/**
* Returns a List of one element if the box is full or an empty list
* otherwise.
*/
def toList: List[A] = List.empty[A]
/**
* Returns an Option. `Some` if the box is full or `None` otherwise.
*/
def toOption: Option[A] = if (isEmpty) None else Some(this.get)
/**
* Returns {@code true} if both objects are equal based on the contents of
* the box. For failures, equality is based on equivalence of failure
* causes.
*/
override def equals(other: Any): Boolean = (this, other) match {
case (FullBox(x), FullBox(y)) => x == y
case (x, y: AnyRef) => x eq y
case _ => false
}
override def hashCode: Int = this match {
case FullBox(x) => x.##
case _ => super.hashCode
}
}
object Box {
import scala.language.implicitConversions
/**
* Implicitly converts a Box to an Iterable.
* This is needed, for instance to be able to flatten a List[Box[_]].
*/
implicit def box2Iterable[A](b: Box[A]): Iterable[A] = b.toList
/**
* A Box factory which converts a scala.Option to a Box.
*/
def apply[A](o: Option[A]): Box[A] = o match {
case Some(value) => FullBox(value)
case None => EmptyBox
}
/**
* A Box factory which returns a Full(f) if f is not null, Empty if it is,
* and a Failure if f throws an exception.
*/
def wrap[A](f: => A): Box[A] =
try {
val value = f
if (value == null) EmptyBox else FullBox(value)
} catch {
case e: Exception => BoxedFailure(e)
}
def empty[A]: Box[A] = EmptyBox
}
final case class FullBox[+A](value: A) extends Box[A] {
override def isEmpty = false
override def get: A = value
override def getOrElse[B >: A](default: => B): B = value
override def map[B](f: A => B): Box[B] = FullBox(f(value))
override def flatMap[B](f: A => Box[B]): Box[B] = f(value)
override def foreach[U](f: A => U) { f(value) }
override def toList: List[A] = List(value)
}
private[util] sealed abstract class BoxWithNothing extends Box[Nothing] {
override def isEmpty = true
override def getOrElse[B >: Nothing](default: => B): B = default
}
case object EmptyBox extends BoxWithNothing {
override def get: Nothing = throw new NoSuchElementException("Box does not contain a value")
}
final case class BoxedFailure(exception: Throwable) extends BoxWithNothing {
type A = Nothing
override def get: Nothing = throw exception
override def getFailure: Throwable = exception
override def isFailure: Boolean = true
override def map[B](f: A => B): Box[B] = this
override def flatMap[B](f: A => Box[B]): Box[B] = this
override final def equals(other: Any): Boolean = (this, other) match {
case (BoxedFailure(x), BoxedFailure(a)) => (x) == (a)
case _ => false
}
override final def hashCode: Int = exception.##
}
object BoxedFailure {
def apply(message: String) = new BoxedFailure(new Exception(message))
}
|
e-orz/CM-Well
|
server/cmwell-util/src/main/scala/cmwell/util/Box.scala
|
Scala
|
apache-2.0
| 5,585 |
package com.blogspot.ramannanda.scala.algorithms.cp3.adhoc.rl
import scala.collection.mutable.ListBuffer
import scala.io.StdIn
import scala.util.control.Breaks._
//uva 00161
object TrafficLights {
def solveFirstGreenAfterOrange(cycles: Seq[Int]): Int = {
val time = Array.ofDim[Int](cycles.size)
var s = cycles.reduce(Math.min)
while (s <= 18000) {
var allOverlap = true
breakable {
for (i <- time.indices) {
/*
initially this will lead to multiply by 2 and later on only those values
are multiplied till there time+cycle-5<=s
*/
if (time(i) + cycles(i) - 5 <= s) {
time(i) += cycles(i) << 1
}
if (!(time(i) <= s && s < time(i) + cycles(i) - 5)) {
s = time(i) - 1
allOverlap = false
break
}
}
}
if (allOverlap) return s
s += 1
}
s
}
def main(args: Array[String]): Unit = {
var lb = ListBuffer[Int]()
while (true) {
val inputLine = StdIn.readLine().trim
if (inputLine.equals("0 0 0")) return
val cycles = inputLine.split("\\\\s+").map(_.toInt)
if (cycles(cycles.length - 1) == 0) {
lb ++= cycles.slice(0, cycles.length - 1)
val result = solveFirstGreenAfterOrange(lb)
if (result > 18000) {
println(s"Signals fail to synchronise in 5 hours")
}
else {
val hours = result / (60 * 60)
val mins = (result % 3600) / 60
val seconds = result % 60
println(s"$hours:$mins:$seconds")
}
lb = new ListBuffer[Int]
}
else {
lb ++= cycles
}
}
}
}
|
ramannanda9/algorithms-in-scala
|
src/main/scala/com/blogspot/ramannanda/scala/algorithms/cp3/adhoc/rl/TrafficLights.scala
|
Scala
|
gpl-3.0
| 1,699 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui
import scala.xml.{Node, Unparsed}
/**
* A data source that provides data for a page.
* 提供页面数据的数据源
* @param pageSize the number of rows in a page
*/
private[ui] abstract class PagedDataSource[T](val pageSize: Int) {
if (pageSize <= 0) {
throw new IllegalArgumentException("Page size must be positive")
}
/**
* Return the size of all data.
* 返回所有数据的大小
*/
protected def dataSize: Int
/**
* Slice a range of data.
* 切片一系列数据
*/
protected def sliceData(from: Int, to: Int): Seq[T]
/**
* Slice the data for this page
* 切片此页面的数据
*/
def pageData(page: Int): PageData[T] = {
val totalPages = (dataSize + pageSize - 1) / pageSize
if (page <= 0 || page > totalPages) {
throw new IndexOutOfBoundsException(
s"Page $page is out of range. Please select a page number between 1 and $totalPages.")
}
val from = (page - 1) * pageSize
val to = dataSize.min(page * pageSize)
PageData(totalPages, sliceData(from, to))
}
}
/**
* The data returned by `PagedDataSource.pageData`, including the page number, the number of total
* pages and the data in this page.
* 由PagedDataSource.pageData返回的数据,包括页码,页面总数和此页面中的数据。
*/
private[ui] case class PageData[T](totalPage: Int, data: Seq[T])
/**
* A paged table that will generate a HTML table for a specified page and also the page navigation.
* 一个分页表,可以为特定页面生成HTML表格,也可以生成页面导航。
*/
private[ui] trait PagedTable[T] {
def tableId: String
def tableCssClass: String
def dataSource: PagedDataSource[T]
def headers: Seq[Node]
def row(t: T): Seq[Node]
def table(page: Int): Seq[Node] = {
val _dataSource = dataSource
try {
val PageData(totalPages, data) = _dataSource.pageData(page)
<div>
{pageNavigation(page, _dataSource.pageSize, totalPages)}
<table class={tableCssClass} id={tableId}>
{headers}
<tbody>
{data.map(row)}
</tbody>
</table>
</div>
} catch {
case e: IndexOutOfBoundsException =>
val PageData(totalPages, _) = _dataSource.pageData(1)
<div>
{pageNavigation(1, _dataSource.pageSize, totalPages)}
<div class="alert alert-error">{e.getMessage}</div>
</div>
}
}
/**
* Return a page navigation. 返回页面导航
* <ul>
* <li>If the totalPages is 1, the page navigation will be empty
* 如果totalPages为1,页面导航将为空</li>
* <li>
* If the totalPages is more than 1, it will create a page navigation including a group of
* page numbers and a form to submit the page number.
* 如果totalPages超过1,它将创建一个页面导航,包括一组页码和一个表单提交页码。
* </li>
* </ul>
*
* Here are some examples of the page navigation:
* 以下是页面导航的一些示例:
* {{{
* << < 11 12 13* 14 15 16 17 18 19 20 > >>
*
* This is the first group, so "<<" is hidden.
* < 1 2* 3 4 5 6 7 8 9 10 > >>
*
* This is the first group and the first page, so "<<" and "<" are hidden.
* 这是第一组和第一页
* 1* 2 3 4 5 6 7 8 9 10 > >>
*
* Assume totalPages is 19. This is the last group, so ">>" is hidden.
* << < 11 12 13* 14 15 16 17 18 19 >
*
* Assume totalPages is 19. This is the last group and the last page, so ">>" and ">" are hidden.
* << < 11 12 13 14 15 16 17 18 19*
*
* * means the current page number
* << means jumping to the first page of the previous group.
* < means jumping to the previous page.
* >> means jumping to the first page of the next group.
* > means jumping to the next page.
* }}}
*/
private[ui] def pageNavigation(page: Int, pageSize: Int, totalPages: Int): Seq[Node] = {
if (totalPages == 1) {
Nil
} else {
// A group includes all page numbers will be shown in the page navigation.
//一个组包括所有页码将显示在页面导航中
// The size of group is 10 means there are 10 page numbers will be shown.
//组的大小为10表示将显示10个页码
// The first group is 1 to 10, the second is 2 to 20, and so on
//第一组是1到10,第二组是2到20,依此类推
val groupSize = 10
val firstGroup = 0
val lastGroup = (totalPages - 1) / groupSize
val currentGroup = (page - 1) / groupSize
val startPage = currentGroup * groupSize + 1
val endPage = totalPages.min(startPage + groupSize - 1)
val pageTags = (startPage to endPage).map { p =>
if (p == page) {
// The current page should be disabled so that it cannot be clicked.
//应禁用当前页面,以使其无法单击。
<li class="disabled"><a href="#">{p}</a></li>
} else {
<li><a href={pageLink(p)}>{p}</a></li>
}
}
val (goButtonJsFuncName, goButtonJsFunc) = goButtonJavascriptFunction
// When clicking the "Go" button, it will call this javascript method and then call
// "goButtonJsFuncName"
val formJs =
s"""$$(function(){
| $$( "#form-$tableId-page" ).submit(function(event) {
| var page = $$("#form-$tableId-page-no").val()
| var pageSize = $$("#form-$tableId-page-size").val()
| pageSize = pageSize ? pageSize: 100;
| if (page != "") {
| ${goButtonJsFuncName}(page, pageSize);
| }
| event.preventDefault();
| });
|});
""".stripMargin
<div>
<div>
<form id={s"form-$tableId-page"}
class="form-inline pull-right" style="margin-bottom: 0px;">
<label>{totalPages} Pages. Jump to</label>
<input type="text" id={s"form-$tableId-page-no"} value={page.toString} class="span1" />
<label>. Show </label>
<input type="text"
id={s"form-$tableId-page-size"} value={pageSize.toString} class="span1" />
<label>items in a page.</label>
<button type="submit" class="btn">Go</button>
</form>
</div>
<div class="pagination" style="margin-bottom: 0px;">
<span style="float: left; padding-top: 4px; padding-right: 4px;">Page: </span>
<ul>
{if (currentGroup > firstGroup) {
<li>
<a href={pageLink(startPage - groupSize)} aria-label="Previous Group">
<span aria-hidden="true">
<<
</span>
</a>
</li>
}}
{if (page > 1) {
<li>
<a href={pageLink(page - 1)} aria-label="Previous">
<span aria-hidden="true">
<
</span>
</a>
</li>
}}
{pageTags}
{if (page < totalPages) {
<li>
<a href={pageLink(page + 1)} aria-label="Next">
<span aria-hidden="true">></span>
</a>
</li>
}}
{if (currentGroup < lastGroup) {
<li>
<a href={pageLink(startPage + groupSize)} aria-label="Next Group">
<span aria-hidden="true">
>>
</span>
</a>
</li>
}}
</ul>
</div>
<script>
{Unparsed(goButtonJsFunc)}
{Unparsed(formJs)}
</script>
</div>
}
}
/**
* Return a link to jump to a page.
* 返回一个链接跳转到一个页面
*/
def pageLink(page: Int): String
/**
* Only the implementation knows how to create the url with a page number and the page size, so we
* leave this one to the implementation. The implementation should create a JavaScript method that
* accepts a page number along with the page size and jumps to the page. The return value is this
* method name and its JavaScript codes.
*/
def goButtonJavascriptFunction: (String, String)
}
|
tophua/spark1.52
|
core/src/main/scala/org/apache/spark/ui/PagedTable.scala
|
Scala
|
apache-2.0
| 9,056 |
package katas.scala.techstock
import org.scalatest.Matchers
import org.junit.Test
import scala.annotation.tailrec
// rbs techstock 2013
class SumOfPrimes extends Matchers {
@Test def `find sum of primes below 2 million`() {
// too slow for 2M
def from(n: Int): Stream[Int] = Stream.cons(n, from(n + 1))
def sieve(s: Stream[Int]): Stream[Int] =
Stream.cons(s.head, sieve(s.tail filter { _ % s.head != 0}))
def primes = sieve(from(2))
primes.take(100).sum should equal(24133)
// too slow for 2M
@tailrec def primes2(amount: Int = 100, n: Int = 2, result: Seq[Int] = Seq()): Seq[Int] = {
if (n >= amount) result
else if (result.isEmpty || result.forall{ n % _ != 0 }) primes2(amount, n + 1, n +: result)
else primes2(amount, n + 1, result)
}
primes2(100).sum should equal(1060)
}
}
|
dkandalov/katas
|
scala/src/katas/scala/techstock/SumOfPrimes.scala
|
Scala
|
unlicense
| 813 |
package wow.common.codecs
import wow.utils.BigIntExtensions._
import scodec.bits.{BitVector, ByteVector}
import scodec.{Attempt, Codec, DecodeResult, Err, SizeBound}
/**
* Encodes a unsigned big integer in a fixed number of bytes
*/
private[codecs] final class FixedUnsignedLBigIntCodec(sizeInBytes: Long) extends Codec[BigInt] {
require(sizeInBytes > 0, "size must be non null")
assert(sizeInBytes.toInt == sizeInBytes)
/**
* Size in bits
*/
private val sizeInBits = sizeInBytes * 8L
override def sizeBound: SizeBound = SizeBound.exact(sizeInBits)
override def encode(value: BigInt): Attempt[BitVector] = {
try {
val valueBytes = value.toUnsignedLBytes(sizeInBytes.toInt)
val valueBits = ByteVector.view(valueBytes).bits
Attempt.successful(valueBits)
} catch {
case e: IllegalArgumentException => Attempt.failure(Err(e.toString))
}
}
override def decode(bits: BitVector): Attempt[DecodeResult[BigInt]] = {
bits.acquire(sizeInBits) match {
case Left(err) => Attempt.failure(Err(err))
case Right(usableBits) =>
val bigInt = BigInt.fromUnsignedLBytes(usableBits.toByteArray)
Attempt.successful(DecodeResult(bigInt, bits.drop(sizeInBits)))
}
}
override def toString = s"BigIntCodec"
}
|
SKNZ/SpinaciCore
|
wow/core/src/main/scala/wow/common/codecs/FixedUnsignedLBigIntCodec.scala
|
Scala
|
mit
| 1,296 |
/*
* Copyright 2019 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.jdbc.syntax
trait AllSyntax extends ScioContextSyntax with SCollectionSyntax
|
spotify/scio
|
scio-jdbc/src/main/scala/com/spotify/scio/jdbc/syntax/AllSyntax.scala
|
Scala
|
apache-2.0
| 703 |
package org.denigma.kappa.notebook.views.editor
import org.denigma.binding.binders.GeneralBinder
import org.denigma.binding.views.CollectionMapView
import org.denigma.controls.code.CodeBinder
import org.denigma.kappa.messages.KappaSourceFile
import org.denigma.kappa.messages.WebSimMessages.WebSimError
import org.denigma.kappa.notebook.circuits.{ErrorsCircuit, KappaEditorCircuit}
import org.denigma.kappa.notebook.views.common.{FileTabHeaders, TabHeaders}
import org.denigma.kappa.notebook.views.errors.SyntaxErrorsView
import org.scalajs.dom.raw.Element
import rx.Ctx.Owner.Unsafe.Unsafe
import rx._
import scala.collection.immutable._
/**
* View for kappa editor
* @param elem Element to bind to
* @param editorCircuit circuit to deal with code editing messages
* @param errorsCircuit circuit to deal with errors
*/
class KappaCodeEditor(val elem: Element,
val editorCircuit: KappaEditorCircuit,
val errorsCircuit: ErrorsCircuit
) extends CollectionMapView
{
type Key = String
type Value = KappaSourceFile
val headers = editorCircuit.openOrder
val items = editorCircuit.items
val isEmpty = items.map(its=>its.isEmpty)
val selected: Var[String] = Var("")
override type ItemView = KappaCodeTab
override def onRemove(item: Item):Unit = {
val sel = selected.now
super.onRemove(item)
if(sel == item && items.now.nonEmpty) {
selected() = headers.now.last
}
}
override lazy val injector = defaultInjector
.register("headers")((el, args) => new FileTabHeaders(el, headers, editorCircuit.input, selected)(TabHeaders.path2name).withBinder(new GeneralBinder(_)))
.register("SyntaxErrors")((el, args) => new SyntaxErrorsView(el, errorsCircuit).withBinder(new GeneralBinder(_)))
override def updateView(view: KappaCodeTab, key: String, old: KappaSourceFile, current: KappaSourceFile): Unit = {
view.source() = current
}
override def newItemView(key: String, value: KappaSourceFile): KappaCodeTab = this.constructItemView(key) {
case (el, _) =>
println(s"new file ${value.path}")
val itemErrors = errorsCircuit.groupedErrors.map(gp => gp.getOrElse(key, List.empty[WebSimError]))
val view: ItemView = new KappaCodeTab(el,
Var(value),
selected,
editorCircuit.input,
editorCircuit.output,
editorCircuit.editorsUpdates,
editorCircuit.kappaCursor,
itemErrors).withBinder(v => new CodeBinder(v)
)
selected() = key
view
}
}
|
antonkulaga/kappa-notebook
|
app/js/src/main/scala/org/denigma/kappa/notebook/views/editor/KappaCodeEditor.scala
|
Scala
|
mpl-2.0
| 2,553 |
package com.tam.cobol_interpreter.parser.schema.expressions
import com.tam.cobol_interpreter.parser.exceptions.SchemaException
import com.tam.cobol_interpreter.parser.schema.expressions.ExpressionMatcher._
/**
* Created by tamu on 1/4/15.
*/
object ExpressionGenerator {
def recGenerateCases(schemaLines: Array[String]): (Array[Case], Array[String]) = {
if (schemaLines.length <= 0)
(new Array[Case](0), schemaLines)
else
schemaLines.head match {
case CaseMatcher(switchVal) =>
val (caseExpressions, schemaLinesCutCase) = recGenerateExpressionTree(schemaLines.tail)
val (caseList, schemaLinesCut) = recGenerateCases(schemaLinesCutCase)
(new Case(switchVal.toCharArray.map(_.toByte).toArray, caseExpressions) +: caseList, schemaLinesCut)
case CommentMatcher() =>
recGenerateCases(schemaLines.tail)
case EmptySpaceMatcher() =>
recGenerateCases(schemaLines.tail)
case _ =>
(new Array[Case](0), schemaLines)
}
}
def recGenerateExpressionTree(schemaLines: Array[String]): (Array[ParserSchemaExpression], Array[String]) = {
if (schemaLines.length <= 0)
(new Array[ParserSchemaExpression](0), schemaLines)
else
schemaLines.head match {
case ColumnMatcher(name, typ, bytes) =>
val (tree, schemaLinesCut) = recGenerateExpressionTree(schemaLines.tail)
(new Column(name, typ, bytes) +: tree, schemaLinesCut)
case FillerMatcher(bytes) =>
val (tree, schemaLinesCut) = recGenerateExpressionTree(schemaLines.tail)
(new Filler(bytes) +: tree, schemaLinesCut)
case SwitchMatcher(name, typ, bytes) =>
val (cases: Array[Case], schemaLinesCaseCut: Array[String]) = recGenerateCases(schemaLines.tail)
val (tree, schemaLinesCut) = recGenerateExpressionTree(schemaLinesCaseCut)
(new Switch(new Column(name, typ, bytes), cases) +: tree, schemaLinesCut)
case OccursMatcher(bytes) =>
val (occursExpressions: Array[ParserSchemaExpression], schemaLinesOccursCut: Array[String]) = recGenerateExpressionTree(schemaLines.tail)
val (tree, schemaLinesCut) = recGenerateExpressionTree(schemaLinesOccursCut)
(new Occurs(bytes, occursExpressions) +: tree, schemaLinesCut)
case EndOccursMatcher() =>
(new Array[ParserSchemaExpression](0), schemaLines.tail)
case EndCaseMatcher() =>
(new Array[ParserSchemaExpression](0), schemaLines.tail)
case CommentMatcher() =>
recGenerateExpressionTree(schemaLines.tail)
case EmptySpaceMatcher() =>
recGenerateExpressionTree(schemaLines.tail)
case x =>
throw new SchemaException(s"Unrecognized ParserSchemaExpression: '$x'")
}
}
def getTableName(schemaLines:Array[String]): (Option[TableName], Array[String]) = {
if(schemaLines.length <= 0)
(None, schemaLines)
else
schemaLines.head match {
case CommentMatcher() =>
getTableName(schemaLines.tail)
case EmptySpaceMatcher() =>
getTableName(schemaLines.tail)
case TableNameMatcher(x) =>
(Some(TableName(x)), schemaLines.tail)
case _ =>
(None, schemaLines)
}
}
def generateExpressionTree(schemaString: String): Array[ParserSchemaExpression] = {
//TODO: Consolidate this duplicated function
val schemaLines = schemaString.trim().
split('\n').
filter({case CommentMatcher() => false case _ => true}).mkString("\n").
split('|').
filter({case CommentMatcher() => false case _ => true}).mkString("|").
split(Array('|','\n'))
val (tableName, schemaLinesTableCut) = getTableName(schemaLines)
val rowBytes = schemaLinesTableCut.last match {
case RowBytesMatcher(b) => new RowBytes(b)
case _ => throw new Exception("Row Bytes Unspecified. Must be last.")
}
val (tree, _) = recGenerateExpressionTree(schemaLinesTableCut.dropRight(1))
tableName match {
case Some(t) => t +: tree :+ rowBytes
case None => tree :+ rowBytes
}
}
}
|
tamsanh/scala-cobol-interpreter
|
src/main/scala/com/tam/cobol_interpreter/parser/schema/expressions/ExpressionGenerator.scala
|
Scala
|
apache-2.0
| 4,302 |
/*
* Copyright 1998-2017 Linux.org.ru
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ru.org.linux.search
import java.nio.file.Files
import com.sksamuel.elastic4s.ElasticDsl._
import com.sksamuel.elastic4s.TcpClient
import com.sksamuel.elastic4s.embedded.LocalNode
import net.tanesha.recaptcha.ReCaptcha
import org.mockito.Mockito
import org.specs2.mutable.SpecificationWithJUnit
import org.springframework.beans.factory.annotation.Autowired
import org.springframework.context.annotation._
import org.springframework.stereotype.{Repository, Service}
import org.springframework.test.context.{ContextConfiguration, TestContextManager}
import ru.org.linux.auth.FloodProtector
import ru.org.linux.search.ElasticsearchIndexService.MessageIndex
@ContextConfiguration(classes = Array(classOf[SearchIntegrationTestConfiguration]))
class ElasticsearchIndexServiceIntegrationSpec extends SpecificationWithJUnit {
new TestContextManager(this.getClass).prepareTestInstance(this)
@Autowired
var indexService: ElasticsearchIndexService = _
@Autowired
var elastic: TcpClient = _
"ElasticsearchIndexService" should {
"create index" in {
indexService.createIndexIfNeeded()
val exists = elastic execute { indexExists(MessageIndex) } await
exists.isExists must beTrue
}
}
}
@Configuration
@ImportResource(Array("classpath:common.xml", "classpath:database.xml"))
@ComponentScan(
basePackages = Array("ru.org.linux"),
lazyInit = true,
useDefaultFilters = false,
includeFilters = Array(
new ComponentScan.Filter(
`type` = FilterType.ANNOTATION,
value = Array(classOf[Service], classOf[Repository])))
)
class SearchIntegrationTestConfiguration {
class LocalNodeProvider {
val node = LocalNode("test-elastic", Files.createTempDirectory("test-elastic").toFile.getAbsolutePath)
def close(): Unit = node.stop(true)
}
@Bean(destroyMethod="close")
def elasticNode: LocalNodeProvider = new LocalNodeProvider()
@Bean
def elasticClient(node: LocalNodeProvider): TcpClient = {
node.node.elastic4sclient(shutdownNodeOnClose = false)
}
@Bean
def reCaptcha: ReCaptcha = Mockito.mock(classOf[ReCaptcha])
@Bean
def floodProtector: FloodProtector = Mockito.mock(classOf[FloodProtector])
}
|
kloun/lorsource
|
src/test/scala/ru/org/linux/search/ElasticsearchIndexServiceIntegrationSpec.scala
|
Scala
|
apache-2.0
| 2,808 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.manager.utils
import org.apache.curator.framework.CuratorFramework
import org.apache.zookeeper.KeeperException.NodeExistsException
import org.json4s.JsonAST._
import org.slf4j.LoggerFactory
/**
* Borrowed from kafka 0.8.1.1, adapted to use curator framework
* https://git-wip-us.apache.org/repos/asf?p=kafka.git;a=blob;f=core/src/main/scala/kafka/admin/PreferredReplicaLeaderElectionCommand.scala
*/
object PreferredReplicaLeaderElectionCommand {
private[this] val logger = LoggerFactory.getLogger(this.getClass)
def parsePreferredReplicaElectionData(jsonString: String): Set[TopicAndPartition] = {
parseJson(jsonString).findField(_._1 == "partitions") match {
case Some((_, arr)) =>
val result: List[TopicAndPartition] = for {
JArray(elements) <- arr
JObject(children) <- elements
JField("topic", JString(t)) <- children
JField("partition", JInt(p)) <- children
} yield TopicAndPartition(t, p.toInt)
checkCondition(result.nonEmpty, PreferredLeaderElectionErrors.ElectionSetEmptyOnRead(jsonString))
result.toSet
case None =>
throwError(PreferredLeaderElectionErrors.ElectionSetEmptyOnRead(jsonString))
}
}
def writePreferredReplicaElectionData(curator: CuratorFramework,
partitionsUndergoingPreferredReplicaElection: Set[TopicAndPartition]) {
checkCondition(partitionsUndergoingPreferredReplicaElection.nonEmpty,PreferredLeaderElectionErrors.ElectionSetEmptyOnWrite)
val zkPath = ZkUtils.PreferredReplicaLeaderElectionPath
val partitionsList : Set[Map[String,Any]] =
partitionsUndergoingPreferredReplicaElection.map(e => Map[String,Any]("topic" -> e.topic, "partition" -> e.partition))
val jsonData = toJson(Map("version" -> 1, "partitions" -> partitionsList))
try {
ZkUtils.createPersistentPath(curator, zkPath, jsonData)
logger.info("Created preferred replica election path with %s".format(jsonData))
} catch {
case nee: NodeExistsException =>
val partitionsUndergoingPreferredReplicaElection =
PreferredReplicaLeaderElectionCommand.parsePreferredReplicaElectionData(ZkUtils.readData(curator, zkPath)._1)
throwError(PreferredLeaderElectionErrors.ElectionAlreadyInProgress(partitionsUndergoingPreferredReplicaElection))
case e2: Throwable =>
throwError(PreferredLeaderElectionErrors.UnhandledException)
}
}
}
object PreferredLeaderElectionErrors {
class ElectionSetEmptyOnWrite private[PreferredLeaderElectionErrors] extends UtilError("Preferred replica election data is empty")
class ElectionSetEmptyOnRead private[PreferredLeaderElectionErrors] (json: String) extends UtilError(s"Preferred replica election data is empty on read : $json")
class ElectionAlreadyInProgress private[PreferredLeaderElectionErrors] (partitionsUndergoingPreferredReplicaElection: Set[TopicAndPartition]) extends UtilError(
"Preferred replica leader election currently in progress for " +
"%s. Aborting operation".format(partitionsUndergoingPreferredReplicaElection))
class UnhandledException private[PreferredLeaderElectionErrors] extends UtilError("Unhandled exception")
def ElectionSetEmptyOnRead(json: String) = new ElectionSetEmptyOnRead(json)
val ElectionSetEmptyOnWrite = new ElectionSetEmptyOnWrite
def ElectionAlreadyInProgress(set: Set[TopicAndPartition]) = new ElectionAlreadyInProgress(set)
val UnhandledException = new UnhandledException
}
|
patricklucas/kafka-manager
|
app/kafka/manager/utils/PreferredReplicaLeaderElectionCommand.scala
|
Scala
|
apache-2.0
| 4,325 |
package be.wegenenverkeer.atomium.server
import be.wegenenverkeer.atomium.api.FeedPage
import be.wegenenverkeer.atomium.format.Url
/**
* A feed store is responsible for the persistence of feeds.
* This abstract class serves as a base class for more specific FeedStore implementations.
*
* @tparam E type of the elements in the feed
*/
abstract class AbstractFeedStore[E, C <: Context](feedName: String,
title: Option[String],
url: Url) extends FeedStore[E, C] with FeedStoreSupport[E] {
implicit val feedParams = FeedParams(feedName, url, title)
/**
* Retrieves a page of the feed.
*
* @param start the starting entry (exclusive), should not be returned in the feed page
* @param pageSize the number of entries
* @param forward if true navigate to 'previous' elements in feed (towards head of feed)
* else navigate to 'next' elements in feed (towards last page of feed)
* @return the feed page or `None` if the page is not found
*/
override def getFeed(start: Long, pageSize: Int, forward: Boolean)(implicit context: C): Option[FeedPage[E]] = {
require(pageSize > 0, "page size must be greater than 0")
val allowed =
if (forward)
start == 0 || (start < maxId && getNumberOfEntriesLowerThan(start) % pageSize == 0)
else
start > 1 && start <= maxId && getNumberOfEntriesLowerThan(start, inclusive = false) % pageSize == 0
if (allowed) {
// retrieve two entries more then requested and start is inclusive in next call
// this is done to determine if there is a next and/or previous entry relative to the requested page
Some(processFeedEntries(start, minId, pageSize, forward, getFeedEntries(start, pageSize + 2, forward)))
} else {
None
}
}
/**
* Retrieves the head of the feed. This is the first page containing the most recent entries
* @param pageSize the maximum number of feed entries to return. The page could contain less entries
* @return the head of the feed
*/
override def getHeadOfFeed(pageSize: Int)(implicit context: C): FeedPage[E] = {
require(pageSize > 0, "page size must be greater than 0")
//fetch most recent entries from feed, we ask for one more than the pageSize to check if we are on the last page
val entries = getMostRecentFeedEntries(pageSize + 1)
if (entries.nonEmpty) {
processHeadFeedEntries(getNumberOfEntriesLowerThan(entries.head.sequenceNr), minId, pageSize, entries)
} else {
processHeadFeedEntries(0, minId, pageSize, entries)
}
}
/**
* @return one less than the minimum sequence number used in this feed
*/
def minId(implicit context: C): Long
/**
* @return the maximum sequence number used in this feed or minId if feed is empty
*/
def maxId(implicit context: C): Long
/**
* @param sequenceNr sequence number to match
* @param inclusive if true include the specified sequence number
* @return the number of entries in the feed with sequence number lower than specified
*/
def getNumberOfEntriesLowerThan(sequenceNr: Long, inclusive: Boolean = true)(implicit context: C): Long
/**
* Retrieves the most recent entries from the `FeedStore` sorted in descending order
* @param count the amount of recent entries to return
* @return a list of FeedEntries. a FeedEntry is a sequence number and its corresponding entry
* and sorted by descending sequence number
*/
def getMostRecentFeedEntries(count: Int)(implicit context: C): List[FeedEntry]
/**
* Retrieves entries with their sequence numbers from the feed
*
* @param start the starting entry (inclusive), MUST be returned in the entries
* @param count the number of entries to return
* @param ascending if true return entries with sequence numbers >= start in ascending order
* else return entries with sequence numbers <= start in descending order
* @return the corresponding entries sorted accordingly
*/
def getFeedEntries(start: Long, count: Int, ascending: Boolean)(implicit context: C): List[FeedEntry]
}
|
kwark/atomium
|
modules/server/src/main/scala/be/wegenenverkeer/atomium/server/AbstractFeedStore.scala
|
Scala
|
mit
| 4,194 |
package org.jetbrains.plugins.scala.project.template
import java.awt.event.{ActionEvent, ActionListener}
import javax.swing.JComponent
import com.intellij.openapi.ui.Messages
import org.jetbrains.plugins.scala.extensions
import org.jetbrains.plugins.scala.project.{Platform, Version, Versions}
/**
* @author Pavel Fatin
*/
class VersionDialog(parent: JComponent) extends VersionDialogBase(parent) {
init()
setTitle("Download")
myPlatform.setItems(Platform.Values)
myPlatform.addActionListener(new ActionListener() {
def actionPerformed(e: ActionEvent) {
updateVersions()
}
})
myVersion.setTextRenderer(Version.abbreviate)
updateVersions()
override def createCenterPanel(): JComponent = myContent
private def updateVersions() {
val platform = myPlatform.getSelectedItem.asInstanceOf[Platform]
val versions = extensions.withProgressSynchronously(s"Fetching available ${platform.name} versions") { _ =>
Versions.loadScalaVersions(platform)
}
if (versions.length == 0) {
Messages.showErrorDialog(myContent, "No versions available for download", s"Error Downloading ${platform.name} libraries")
} else {
myVersion.setItems(versions)
}
}
def selectedPlatform: Platform = myPlatform.getSelectedItem.asInstanceOf[Platform]
def selectedVersion: String = myVersion.getSelectedItem.asInstanceOf[String]
}
|
loskutov/intellij-scala
|
src/org/jetbrains/plugins/scala/project/template/VersionDialog.scala
|
Scala
|
apache-2.0
| 1,392 |
import com.github.acrisci.commander.Program
val program = new Program()
.version("0.0.1")
.usage("./run-example.sh math.scala [options]")
.description("A program that can sum or multiply a list of numbers")
.option("-o, --operation [operation]", "The operation to perform on the numbers [sum|multiply]", default="sum")
.option("-n, --numbers <numbers>", "Comma-separated list of numbers", fn=_.split(",").map(_.toInt))
.parse(args)
if (args.isEmpty)
program.help()
if (program.operation.equals("sum")) {
val sum = program.numbers[Array[Int]].sum
println(s"the sum is $sum")
} else if (program.operation.equals("multiply")) {
val product = program.numbers[Array[Int]].product
println(s"the product is $product")
} else {
println("Operation must be either 'sum' or 'multiply'")
System.exit(1)
}
|
acrisci/commander-scala
|
examples/math.scala
|
Scala
|
mit
| 822 |
package lila.pref
import play.api.data._
import play.api.data.Forms._
import lila.user.User
private[pref] final class DataForm {
val pref = Form(mapping(
"autoQueen" -> number.verifying(Pref.AutoQueen.choices.toMap contains _),
"autoThreefold" -> number.verifying(Pref.AutoThreefold.choices.toMap contains _),
"takeback" -> number.verifying(Pref.Takeback.choices.toMap contains _),
"clockTenths" -> number.verifying(Pref.ClockTenths.choices.toMap contains _),
"clockBar" -> number.verifying(Set(0, 1) contains _),
"clockSound" -> number.verifying(Set(0, 1) contains _),
"follow" -> number.verifying(Set(0, 1) contains _),
"highlight" -> number.verifying(Set(0, 1) contains _),
"destination" -> number.verifying(Set(0, 1) contains _),
"coords" -> number.verifying(Pref.Coords.choices.toMap contains _),
"replay" -> number.verifying(Pref.Replay.choices.toMap contains _),
"blindfold" -> number.verifying(Pref.Blindfold.choices.toMap contains _),
"challenge" -> number.verifying(Pref.Challenge.choices.toMap contains _),
"premove" -> number.verifying(Set(0, 1) contains _),
"animation" -> number.verifying(Set(0, 1, 2, 3) contains _),
"submitMove" -> number.verifying(Pref.SubmitMove.choices.toMap contains _),
"coachShare" -> number.verifying(Set(0, 1, 2) contains _),
"captured" -> number.verifying(Set(0, 1) contains _)
)(PrefData.apply)(PrefData.unapply))
case class PrefData(
autoQueen: Int,
autoThreefold: Int,
takeback: Int,
clockTenths: Int,
clockBar: Int,
clockSound: Int,
follow: Int,
highlight: Int,
destination: Int,
coords: Int,
replay: Int,
blindfold: Int,
challenge: Int,
premove: Int,
animation: Int,
submitMove: Int,
coachShare: Int,
captured: Int) {
def apply(pref: Pref) = pref.copy(
autoQueen = autoQueen,
autoThreefold = autoThreefold,
takeback = takeback,
clockTenths = clockTenths,
clockBar = clockBar == 1,
clockSound = clockSound == 1,
follow = follow == 1,
highlight = highlight == 1,
destination = destination == 1,
coords = coords,
replay = replay,
blindfold = blindfold,
challenge = challenge,
premove = premove == 1,
animation = animation,
submitMove = submitMove,
coachShare = coachShare,
captured = captured == 1)
}
object PrefData {
def apply(pref: Pref): PrefData = PrefData(
autoQueen = pref.autoQueen,
autoThreefold = pref.autoThreefold,
takeback = pref.takeback,
clockTenths = pref.clockTenths,
clockBar = pref.clockBar.fold(1, 0),
clockSound = pref.clockSound.fold(1, 0),
follow = pref.follow.fold(1, 0),
highlight = pref.highlight.fold(1, 0),
destination = pref.destination.fold(1, 0),
coords = pref.coords,
replay = pref.replay,
blindfold = pref.blindfold,
challenge = pref.challenge,
premove = pref.premove.fold(1, 0),
animation = pref.animation,
submitMove = pref.submitMove,
coachShare = pref.coachShare,
captured = pref.captured.fold(1, 0))
}
def prefOf(p: Pref): Form[PrefData] = pref fill PrefData(p)
val miniPref = Form(mapping(
"autoQueen" -> number.verifying(Pref.AutoQueen.choices.toMap contains _),
"blindfold" -> number.verifying(Pref.Blindfold.choices.toMap contains _),
"clockTenths" -> number.verifying(Pref.ClockTenths.choices.toMap contains _),
"submitMove" -> number.verifying(Pref.SubmitMove.choices.toMap contains _)
)(MiniPrefData.apply)(MiniPrefData.unapply))
case class MiniPrefData(
autoQueen: Int,
blindfold: Int,
clockTenths: Int,
submitMove: Int) {
def apply(pref: Pref) = pref.copy(
autoQueen = autoQueen,
blindfold = blindfold,
clockTenths = clockTenths,
submitMove = submitMove)
}
object MiniPrefData {
def apply(pref: Pref): MiniPrefData = MiniPrefData(
autoQueen = pref.autoQueen,
blindfold = pref.blindfold,
clockTenths = pref.clockTenths,
submitMove = pref.submitMove)
}
def miniPrefOf(p: Pref): Form[MiniPrefData] = miniPref fill MiniPrefData(p)
val theme = Form(single(
"theme" -> nonEmptyText.verifying(Theme contains _)
))
val pieceSet = Form(single(
"set" -> nonEmptyText.verifying(PieceSet contains _)
))
val theme3d = Form(single(
"theme" -> nonEmptyText.verifying(Theme3d contains _)
))
val pieceSet3d = Form(single(
"set" -> nonEmptyText.verifying(PieceSet3d contains _)
))
val soundSet = Form(single(
"set" -> nonEmptyText.verifying(SoundSet contains _)
))
val bg = Form(single(
"bg" -> text.verifying(List("light", "dark", "transp") contains _)
))
val bgImg = Form(single(
"bgImg" -> nonEmptyText
))
val is3d = Form(single(
"is3d" -> text.verifying(List("true", "false") contains _)
))
}
|
ccampo133/lila
|
modules/pref/src/main/DataForm.scala
|
Scala
|
mit
| 4,982 |
package org.zapto.jablo.myml
case object True extends Const {
override def ==(c: Const): Const = c match {
case True => True
case False => False
}
override def !=(c: Const): Const = c match {
case True => False
case False => True
}
override def &&(c: Const): Const = c match {
case True => True
case False => False
}
override def ||(c: Const): Const = c match {
case True => True
case False => True
}
override def unary_! : Const = False
override def infix: String = "true"
}
case object False extends Const {
override def ==(c: Const): Const = c match {
case False => True
case True => False
}
override def !=(c: Const): Const = c match {
case False => False
case True => True
}
override def &&(c: Const): Const = c match {
case False => False
case True => False
}
override def ||(c: Const): Const = c match {
case True => True
case False => False
}
override def unary_! : Const = True
override def infix: String = "false"
}
|
jablo/myml
|
src/main/scala/org/zapto/jablo/myml/Bool.scala
|
Scala
|
artistic-2.0
| 1,041 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.graphx.lib
import scala.reflect.ClassTag
import org.apache.spark.graphx._
/**
* Compute the number of triangles passing through each vertex.
*
* The algorithm is relatively straightforward and can be computed in three steps:
*
* <ul>
* <li> Compute the set of neighbors for each vertex</li>
* <li> For each edge compute the intersection of the sets and send the count to both vertices.</li>
* <li> Compute the sum at each vertex and divide by two since each triangle is counted twice.</li>
* </ul>
*
* There are two implementations. The default `TriangleCount.run` implementation first removes
* self cycles and canonicalizes the graph to ensure that the following conditions hold:
* <ul>
* <li> There are no self edges</li>
* <li> All edges are oriented src > dst</li>
* <li> There are no duplicate edges</li>
* </ul>
* However, the canonicalization procedure is costly as it requires repartitioning the graph.
* If the input data is already in "canonical form" with self cycles removed then the
* `TriangleCount.runPreCanonicalized` should be used instead.
*
* {{{
* val canonicalGraph = graph.mapEdges(e => 1).removeSelfEdges().canonicalizeEdges()
* val counts = TriangleCount.runPreCanonicalized(canonicalGraph).vertices
* }}}
*
*/
object TriangleCount {
def run[VD: ClassTag, ED: ClassTag](graph: Graph[VD, ED]): Graph[Int, ED] = {
// Transform the edge data something cheap to shuffle and then canonicalize
val canonicalGraph = graph.mapEdges(e => true).removeSelfEdges().convertToCanonicalEdges()
// Get the triangle counts
val counters = runPreCanonicalized(canonicalGraph).vertices
// Join them bath with the original graph
graph.outerJoinVertices(counters) { (vid, _, optCounter: Option[Int]) =>
optCounter.getOrElse(0)
}
}
def runPreCanonicalized[VD: ClassTag, ED: ClassTag](graph: Graph[VD, ED]): Graph[Int, ED] = {
// Construct set representations of the neighborhoods
val nbrSets: VertexRDD[VertexSet] =
graph.collectNeighborIds(EdgeDirection.Either).mapValues { (vid, nbrs) =>
val set = new VertexSet(nbrs.length)
var i = 0
while (i < nbrs.length) {
// prevent self cycle
if (nbrs(i) != vid) {
set.add(nbrs(i))
}
i += 1
}
set
}
// join the sets with the graph
val setGraph: Graph[VertexSet, ED] = graph.outerJoinVertices(nbrSets) {
(vid, _, optSet) => optSet.getOrElse(null)
}
// Edge function computes intersection of smaller vertex with larger vertex
def edgeFunc(ctx: EdgeContext[VertexSet, ED, Int]) {
val (smallSet, largeSet) = if (ctx.srcAttr.size < ctx.dstAttr.size) {
(ctx.srcAttr, ctx.dstAttr)
} else {
(ctx.dstAttr, ctx.srcAttr)
}
val iter = smallSet.iterator
var counter: Int = 0
while (iter.hasNext) {
val vid = iter.next()
if (vid != ctx.srcId && vid != ctx.dstId && largeSet.contains(vid)) {
counter += 1
}
}
ctx.sendToSrc(counter)
ctx.sendToDst(counter)
}
// compute the intersection along edges
val counters: VertexRDD[Int] = setGraph.aggregateMessages(edgeFunc, _ + _)
// Merge counters with the graph and divide by two since each triangle is counted twice
graph.outerJoinVertices(counters) { (_, _, optCounter: Option[Int]) =>
val dblCount = optCounter.getOrElse(0)
// This algorithm double counts each triangle so the final count should be even
require(dblCount % 2 == 0, "Triangle count resulted in an invalid number of triangles.")
dblCount / 2
}
}
}
|
gioenn/xSpark
|
graphx/src/main/scala/org/apache/spark/graphx/lib/TriangleCount.scala
|
Scala
|
apache-2.0
| 4,483 |
package org.bitcoins.core.script.flag
/** Created by chris on 4/6/16.
*/
trait ScriptFlagUtil {
/** Checks if the strict encoding is required in the set of flags
* given to us
* https://github.com/bitcoin/bips/blob/master/bip-0062.mediawiki#DER_encoding
* @param flags
* @return
*/
def requiresStrictDerEncoding(flags: Seq[ScriptFlag]): Boolean = {
flags.contains(ScriptVerifyDerSig) || flags.contains(ScriptVerifyStrictEnc)
}
/** Checks if we are required to check for strict encoding
* @param flags
* @return
*/
def requireStrictEncoding(flags: Seq[ScriptFlag]): Boolean =
flags.contains(ScriptVerifyStrictEnc)
/** Checks if the script flag for checklocktimeverify is enabled
* @param flags
* @return
*/
def checkLockTimeVerifyEnabled(flags: Seq[ScriptFlag]): Boolean = {
flags.contains(ScriptVerifyCheckLocktimeVerify)
}
/** Checks if the p2sh flag is enabled
* @param flags
* @return
*/
def p2shEnabled(flags: Seq[ScriptFlag]): Boolean =
flags.contains(ScriptVerifyP2SH)
/** Checks if the script flag for checksequenceverify is enabled
* @param flags
* @return
*/
def checkSequenceVerifyEnabled(flags: Seq[ScriptFlag]): Boolean = {
flags.contains(ScriptVerifyCheckSequenceVerify)
}
/** Checks to see if the script flag is set to discourage NOPs that are not in use
* NOPs are used by soft forks to repurpose NOPs to actual functionality such as checklocktimeverify
* See BIP65 for an example of this seq
* @param flags
* @return
*/
def discourageUpgradableNOPs(flags: Seq[ScriptFlag]): Boolean = {
flags.contains(ScriptVerifyDiscourageUpgradableNOPs)
}
/** Checks to see if the script flag is set to require minimal push operations
* https://github.com/bitcoin/bips/blob/master/bip-0062.mediawiki#Push_operators
* @param flags
* @return
*/
def requireMinimalData(flags: Seq[ScriptFlag]): Boolean =
flags.contains(ScriptVerifyMinimalData)
/** Checks to see if the script flag is set to require low s values in digital signatures
* https://github.com/bitcoin/bips/blob/master/bip-0062.mediawiki#low-s-values-in-signatures
* @param flags
* @return
*/
def requireLowSValue(flags: Seq[ScriptFlag]): Boolean =
flags.contains(ScriptVerifyLowS)
/** Checks to see if the script flag is set to require we only have push operations inside of a scriptSig
* @param flags
* @return
*/
def requirePushOnly(flags: Seq[ScriptFlag]): Boolean =
flags.contains(ScriptVerifySigPushOnly)
/** Checks to see if the script flag is set to require that we need a NULLDUMMY to be OP_0 for
* OP_CHECKMULTISIG operations
* @param flags
* @return
*/
def requireNullDummy(flags: Seq[ScriptFlag]): Boolean =
flags.contains(ScriptVerifyNullDummy)
/** Checks to see if we have segwit enabled */
def segWitEnabled(flags: Seq[ScriptFlag]): Boolean =
flags.contains(ScriptVerifyWitness)
def discourageUpgradableWitnessProgram(flags: Seq[ScriptFlag]): Boolean =
flags.contains(ScriptVerifyDiscourageUpgradableWitnessProgram)
def requireScriptVerifyNullFail(flags: Seq[ScriptFlag]): Boolean =
flags.contains(ScriptVerifyNullFail)
def requireScriptVerifyWitnessPubKeyType(flags: Seq[ScriptFlag]): Boolean =
flags.contains(ScriptVerifyWitnessPubKeyType)
/** Requires that the argument to OP_IF/OP_NOTIF be minimally encoded
* See: https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2016-August/013014.html
*/
def minimalIfEnabled(flags: Seq[ScriptFlag]): Boolean =
flags.contains(ScriptVerifyMinimalIf)
}
object ScriptFlagUtil extends ScriptFlagUtil
|
bitcoin-s/bitcoin-s
|
core/src/main/scala/org/bitcoins/core/script/flag/ScriptFlagUtil.scala
|
Scala
|
mit
| 3,714 |
package sw.streaming
import org.apache.spark._
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.StreamingContext._
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.Duration
import org.apache.spark.streaming.Seconds
object SimpleStreaming extends App {
val sparkConf = new SparkConf()
.setAppName(this.getClass.getName)
.setMaster("local[*]")
val sc = new SparkContext(sparkConf)
val ssc = new StreamingContext(sc, Seconds(10))
val lines = ssc.socketTextStream("localhost", 9999)
lines.print()
ssc.start()
ssc.awaitTermination()
}
object SimpleStreaming2 extends App {
val sparkConf = new SparkConf()
.setAppName(this.getClass.getName)
.setMaster("local[*]")
val sc = new SparkContext(sparkConf)
val ssc = new StreamingContext(sc, Seconds(10))
val lines = ssc.socketTextStream("localhost", 9999)
lines.count().print()
ssc.start()
ssc.awaitTermination()
}
object SimpleStreaming3 extends App {
val sparkConf = new SparkConf()
.setAppName(this.getClass.getName)
.setMaster("local[*]")
val sc = new SparkContext(sparkConf)
val ssc = new StreamingContext(sc, Seconds(10))
val lines = ssc.socketTextStream("localhost", 9999)
val upperW = lines.map(_.toUpperCase()).filter(_.startsWith("W"))
upperW.print()
ssc.start()
ssc.awaitTermination()
}
|
rabbitonweb/spark-workshop
|
src/main/scala/sw/streaming/SimpleStreaming.scala
|
Scala
|
apache-2.0
| 1,397 |
package com.artclod.mathml.scalar.apply
import com.artclod.mathml._
import com.artclod.mathml.scalar._
import com.artclod.mathml.scalar.concept.Constant
import scala.util._
case class ApplyPlus(val values: MathMLElem*)
extends MathMLElem(MathML.h.prefix, "apply", MathML.h.attributes, MathML.h.scope, false, (Seq[MathMLElem](Plus) ++ values): _*) with SomeMathMLChildren {
def eval(boundVariables: Map[String, Double]) = Try(values.map(_.eval(boundVariables).get).reduceLeft(_ + _))
def constant: Option[Constant] =
if (values.forall(_.c.nonEmpty)) {
Some(values.map(_.c.get).reduce(_ + _))
} else {
None
}
def simplifyStep() =
(cns, flattenedMathMLElems) match {
case (Seq(cns @ _*), Seq()) => cns.reduce(_ + _)
case (Seq(), Seq(elem)) => elem
case (Seq(), Seq(elems @ _*)) => ApplyPlus(elems: _*)
case (Seq(cns @ _*), Seq(elems @ _*)) => ApplyPlus(elems ++ Seq(cns.reduce(_ + _)).filterNot(_.isZero): _*)
}
private def cns = values.map(_.c).filter(_.nonEmpty).map(_.get)
private def flattenedMathMLElems: Seq[MathMLElem] = values.filter(_.c.isEmpty).map(_.s)
.flatMap(_ match {
case v: ApplyPlus => v.values
case v: MathMLElem => Seq(v)
})
def variables: Set[String] = values.foldLeft(Set[String]())(_ ++ _.variables)
def derivative(x: String) = ApplyPlus(values.map(_.d(x)): _*).s
def toMathJS = values.map(_.toMathJS).mkString("(", " + " ,")")
def mathMLChildren = values
def copy(children: MathMLElem*) = ApplyPlus(children:_*)
}
|
kristiankime/calc-tutor
|
app/com/artclod/mathml/scalar/apply/ApplyPlus.scala
|
Scala
|
mit
| 1,497 |
package com.krux.hyperion.aws
import com.github.nscala_time.time.Imports.DateTime
/**
* Defines the timing of a scheduled event, such as when an activity runs.
*
* ref: http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-object-schedule.html
*
*/
case class AdpOnDemandSchedule(
id: String,
name: Option[String]
) extends AdpDataPipelineObject {
val `type`: String = "Schedule"
}
/**
* Defines the timing of a scheduled event, such as when an activity runs.
*
* ref: http://docs.aws.amazon.com/datapipeline/latest/DeveloperGuide/dp-object-schedule.html
*
* @param period How often the pipeline should run. The format is "N [minutes|hours|days|weeks|months]",
* where N is a number followed by one of the time specifiers. For example,
* "15 minutes", runs the pipeline every 15 minutes. The minimum period is 15 minutes
* and the maximum period is 3 years.
* @param startAt The date and time at which to start the scheduled pipeline runs. Valid value is
* FIRST_ACTIVATION_DATE_TIME. FIRST_ACTIVATION_DATE_TIME is assumed to be the
* current date and time.
* @param startDateTime The date and time to start the scheduled runs. You must use either
* startDateTime or startAt but not both.
* @param occurrences The number of times to execute the pipeline after it's activated. You can't use
* occurrences with endDateTime.
* @param endDateTime The date and time to end the scheduled runs. Must be a date and time later than
* the value of startDateTime or startAt. The default behavior is to schedule runs
* until the pipeline is shut down.
*/
case class AdpRecurringSchedule(
id: String,
name: Option[String],
period: String,
startAt: Option[String],
startDateTime: Option[DateTime],
endDateTime: Option[DateTime],
occurrences: Option[String]
) extends AdpDataPipelineObject {
val `type`: String = "Schedule"
}
|
hoangelos/hyperion
|
core/src/main/scala/com/krux/hyperion/aws/AdpSchedule.scala
|
Scala
|
apache-2.0
| 2,046 |
package org.apache.mesos.chronos.scheduler.jobs
import com.codahale.metrics.{Counter, Histogram, MetricRegistry}
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.databind.module.SimpleModule
import com.google.inject.Inject
import org.apache.mesos.chronos.scheduler.api.HistogramSerializer
import scala.collection.mutable
/**
* Author: @andykram
*/
class JobMetrics @Inject()(registry: MetricRegistry) {
protected val stats = new mutable.HashMap[String, Histogram]()
protected val statuses = new mutable.HashMap[String, Map[String, Counter]]()
protected val objectMapper = new ObjectMapper
protected val mod = new SimpleModule("JobModule")
mod.addSerializer(classOf[Histogram], new HistogramSerializer)
objectMapper.registerModule(mod)
def updateJobStat(jobName: String, timeMs: Long) {
// Uses a Uniform Histogram by default for long term metrics.
val stat: Histogram = stats.getOrElseUpdate(jobName, mkStat(jobName))
stat.update(timeMs)
}
def getJsonStats(jobName: String): String = {
val snapshot = getJobHistogramStats(jobName)
objectMapper.writeValueAsString(snapshot)
}
def getJobHistogramStats(jobName: String): Histogram = {
stats.getOrElseUpdate(jobName, mkStat(jobName))
}
protected def mkStat(jobName: String, name: String = "time") = {
registry.histogram(MetricRegistry.name("jobs", "run", name, jobName))
}
def updateJobStatus(jobName: String, success: Boolean) {
val statusCounters: Map[String, Counter] = statuses.getOrElseUpdate(jobName,
Map("success" -> mkCounter(jobName, "success"),
"failure" -> mkCounter(jobName, "failure")))
val counter: Counter = if (success) {
statusCounters.get("success").get
} else {
statusCounters.get("failure").get
}
counter.inc()
}
protected def mkCounter(jobName: String, name: String) = {
registry.counter(MetricRegistry.name("jobs", "run", name, jobName))
}
}
|
vixns/chronos
|
src/main/scala/org/apache/mesos/chronos/scheduler/jobs/JobMetrics.scala
|
Scala
|
apache-2.0
| 1,974 |
package djinni
import djinni.ast._
import djinni.generatorTools._
import djinni.meta._
class JNIMarshal(spec: Spec) extends Marshal(spec) {
// For JNI typename() is always fully qualified and describes the mangled Java type to be used in field/method signatures
override def typename(tm: MExpr): String = javaTypeSignature(tm)
def typename(name: String, ty: TypeDef) = s"L${undecoratedTypename(name, ty)};"
override def fqTypename(tm: MExpr): String = typename(tm)
def fqTypename(name: String, ty: TypeDef): String = typename(name, ty)
override def paramType(tm: MExpr): String = toJniType(tm, false)
override def fqParamType(tm: MExpr): String = paramType(tm)
override def returnType(ret: Option[TypeRef]): String = ret.fold("void")(toJniType)
override def fqReturnType(ret: Option[TypeRef]): String = returnType(ret)
override def fieldType(tm: MExpr): String = paramType(tm)
override def fqFieldType(tm: MExpr): String = fqParamType(tm)
override def toCpp(tm: MExpr, expr: String): String = {
s"${helperClass(tm)}::toCpp(jniEnv, $expr)"
}
override def fromCpp(tm: MExpr, expr: String): String = {
s"${helperClass(tm)}::fromCpp(jniEnv, $expr)"
}
// Name for the autogenerated class containing field/method IDs and toJava()/fromJava() methods
def helperClass(name: String) = spec.jniClassIdentStyle(name)
private def helperClass(tm: MExpr): String = helperName(tm) + helperTemplates(tm)
def references(m: Meta, exclude: String = ""): Seq[SymbolReference] = m match {
case o: MOpaque => List(ImportRef(q(spec.jniBaseLibIncludePrefix + "Marshal.hpp")))
case d: MDef => List(ImportRef(include(d.name)))
case e: MExtern => List(ImportRef(e.jni.header))
case _ => List()
}
def include(ident: String) = q(spec.jniIncludePrefix + spec.jniFileIdentStyle(ident) + "." + spec.cppHeaderExt)
def toJniType(ty: TypeRef): String = toJniType(ty.resolved, false)
def toJniType(m: MExpr, needRef: Boolean): String = m.base match {
case p: MPrimitive => if (needRef) "jobject" else p.jniName
case MString => "jstring"
case MOptional => toJniType(m.args.head, true)
case MBinary => "jbyteArray"
case tp: MParam => helperClass(tp.name) + "::JniType"
case e: MExtern => helperClass(m) + (if(needRef) "::Boxed" else "") + "::JniType"
case _ => "jobject"
}
// The mangled Java typename without the "L...;" decoration useful only for class reflection on our own type
def undecoratedTypename(name: String, ty: TypeDef): String = {
val javaClassName = idJava.ty(name)
spec.javaPackage.fold(javaClassName)(p => p.replaceAllLiterally(".", "/") + "/" + javaClassName)
}
private def javaTypeSignature(tm: MExpr): String = tm.base match {
case o: MOpaque => o match {
case p: MPrimitive => p.jSig
case MString => "Ljava/lang/String;"
case MDate => "Ljava/util/Date;"
case MBinary => "[B"
case MOptional => tm.args.head.base match {
case p: MPrimitive => s"Ljava/lang/${p.jBoxed};"
case MOptional => throw new AssertionError("nested optional?")
case m => javaTypeSignature(tm.args.head)
}
case MList => "Ljava/util/ArrayList;"
case MSet => "Ljava/util/HashSet;"
case MMap => "Ljava/util/HashMap;"
}
case e: MExtern => e.jni.typeSignature
case MParam(_) => "Ljava/lang/Object;"
case d: MDef => d.body match {
case e: Enum if e.flags => "Ljava/util/EnumSet;"
case _ => s"L${undecoratedTypename(d.name, d.body)};"
}
}
def javaMethodSignature(params: Iterable[Field], ret: Option[TypeRef]) = {
params.map(f => typename(f.ty)).mkString("(", "", ")") + ret.fold("V")(typename)
}
def helperName(tm: MExpr): String = tm.base match {
case d: MDef => withNs(Some(spec.jniNamespace), helperClass(d.name))
case e: MExtern => e.jni.translator
case o => withNs(Some("djinni"), o match {
case p: MPrimitive => p.idlName match {
case "i8" => "I8"
case "i16" => "I16"
case "i32" => "I32"
case "i64" => "I64"
case "f32" => "F32"
case "f64" => "F64"
case "bool" => "Bool"
}
case MOptional => "Optional"
case MBinary => "Binary"
case MString => if (spec.cppUseWideStrings) "WString" else "String"
case MDate => "Date"
case MList => "List"
case MSet => "Set"
case MMap => "Map"
case d: MDef => throw new AssertionError("unreachable")
case e: MExtern => throw new AssertionError("unreachable")
case p: MParam => throw new AssertionError("not applicable")
})
}
private def helperTemplates(tm: MExpr): String = {
def f() = if(tm.args.isEmpty) "" else tm.args.map(helperClass).mkString("<", ", ", ">")
tm.base match {
case MOptional =>
assert(tm.args.size == 1)
//assert(!isInterface(tm.args.head))
val argHelperClass = helperClass(tm.args.head)
s"<${spec.cppOptionalTemplate}, $argHelperClass>"
case MList | MSet =>
assert(tm.args.size == 1)
f
case MMap =>
assert(tm.args.size == 2)
f
case _ => f
}
}
def isJavaHeapObject(ty: TypeRef): Boolean = isJavaHeapObject(ty.resolved.base)
def isJavaHeapObject(m: Meta): Boolean = m match {
case _: MPrimitive => false
case _ => true
}
}
|
happybits/djinni
|
src/source/JNIMarshal.scala
|
Scala
|
apache-2.0
| 5,349 |
package com.spike.giantdataanalysis.flink.example.streamingledger
import java.nio.file.Paths
import java.util.SplittableRandom
import com.dataartisans.streamingledger.sdk.api.AccessType.READ_WRITE
import com.dataartisans.streamingledger.sdk.api.{StreamingLedger, TransactionProcessFunction}
import com.spike.giantdataanalysis.flink.example.model.streamingledger2._
import org.apache.flink.api.java.functions.KeySelector
import org.apache.flink.runtime.state.StateBackend
import org.apache.flink.runtime.state.filesystem.FsStateBackend
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment
import org.apache.flink.streaming.api.functions.source.{RichParallelSourceFunction, SourceFunction}
object ExampleStreamingLedger {
def main(args: Array[String]): Unit = {
// set up the execution environment and the configuration
val env = StreamExecutionEnvironment.getExecutionEnvironment
// configure Flink
env.setParallelism(4)
env.getConfig.enableObjectReuse
// enable checkpoints once a minute
env.enableCheckpointing(60000)
val uri = Paths.get("./checkpoints").toAbsolutePath.normalize.toUri
val backend = new FsStateBackend(uri, true)
env.setStateBackend(backend)
// start building the transactional streams
val tradeLedger = StreamingLedger.create("simple trade example")
// define the transactional states
// 账户表
val accounts: StreamingLedger.State[String, Long] =
tradeLedger.declareState("accounts").withKeyType(classOf[String]).withValueType(classOf[Long])
// 流水表
val books: StreamingLedger.State[String, Long] =
tradeLedger.declareState("bookEntries").withKeyType(classOf[String]).withValueType(classOf[Long])
// produce the deposits transaction stream
// 取款事件流
val deposits = env.addSource(new DepositsGenerator(1))
// define transactors on states
tradeLedger.usingStream[DepositEvent](deposits, "deposits")
.apply(new DepositHandler().asInstanceOf[TransactionProcessFunction[DepositEvent, Void]])
.on(accounts, new KeySelector[DepositEvent, String]() {
override def getKey(value: DepositEvent): String = value.getAccountId
}, "account", READ_WRITE)
.on(books, new KeySelector[DepositEvent, String]() {
override def getKey(value: DepositEvent): String = value.getBookEntryId
}, "asset", READ_WRITE)
// produce transactions stream
// 转账事件流
val transfers = env.addSource(new TransactionsGenerator(1))
val transactionResults = tradeLedger.usingStream(transfers, "transactions")
.apply(new TxnHandler().asInstanceOf[TransactionProcessFunction[TransactionEvent, TransactionResult]])
.on(accounts, new KeySelector[TransactionEvent, String]() {
override def getKey(value: TransactionEvent): String = value.getSourceAccountId
}, "source-account", READ_WRITE)
.on(accounts, new KeySelector[TransactionEvent, String]() {
override def getKey(value: TransactionEvent): String = value.getTargetAccountId
}, "target-account", READ_WRITE)
.on(books, new KeySelector[TransactionEvent, String]() {
override def getKey(value: TransactionEvent): String = value.getSourceBookEntryId
}, "source-asset", READ_WRITE)
.on(books, new KeySelector[TransactionEvent, String]() {
override def getKey(value: TransactionEvent): String = value.getTargetBookEntryId
}, "target-asset", READ_WRITE).output
// compute the resulting streams.
val resultsStreams = tradeLedger.resultStreams
// output to the console
resultsStreams.getResultStream(transactionResults).print
// trigger program execution
env.execute
}
}
//---------------------------------------------------------------------------
// Txn process
//---------------------------------------------------------------------------
//
// so the 'TransactionProcessFunction' cannot be implemented in Scala??? - 20180917
//
//@SerialVersionUID(1)
//class DepositHandler extends TransactionProcessFunction[DepositEvent, Unit] {
//
// import com.dataartisans.streamingledger.sdk.api.TransactionProcessFunction.State
//
// @ProcessTransaction
// def process(event: DepositEvent,
// ctx: TransactionProcessFunction.Context[Unit],
// @State("account") account: StateAccess[Long],
// @State("asset") asset: StateAccess[Long]): Unit = {
// val newAccountValue = account.readOr(Constant.ZERO) + event.accountTransfer
// account.write(newAccountValue)
// val newAssetValue = asset.readOr(Constant.ZERO) + event.bookEntryTransfer
// asset.write(newAssetValue)
// }
//}
//
//@SerialVersionUID(1)
//class TxnHandler extends TransactionProcessFunction[TransactionEvent, TransactionResult] {
//
// import com.dataartisans.streamingledger.sdk.api.TransactionProcessFunction.State
//
// @ProcessTransaction
// def process(txn: TransactionEvent,
// ctx: TransactionProcessFunction.Context[TransactionResult],
// @State("source-account") sourceAccount: StateAccess[Long],
// @State("target-account") targetAccount: StateAccess[Long],
// @State("source-asset") sourceAsset: StateAccess[Long],
// @State("target-asset") targetAsset: StateAccess[Long]): Unit = {
// val sourceAccountBalance = sourceAccount.readOr(Constant.ZERO)
// val sourceAssetValue = sourceAsset.readOr(Constant.ZERO)
// val targetAccountBalance = targetAccount.readOr(Constant.ZERO)
// val targetAssetValue = targetAsset.readOr(Constant.ZERO)
// // check the preconditions
// if (sourceAccountBalance > txn.minAccountBalance
// && sourceAccountBalance > txn.accountTransfer
// && sourceAssetValue > txn.bookEntryTransfer) { // compute the new balances
// val newSourceBalance = sourceAccountBalance - txn.accountTransfer
// val newTargetBalance = targetAccountBalance + txn.accountTransfer
// val newSourceAssets = sourceAssetValue - txn.bookEntryTransfer
// val newTargetAssets = targetAssetValue + txn.bookEntryTransfer
// // write back the updated values
// sourceAccount.write(newSourceBalance)
// targetAccount.write(newTargetBalance)
// sourceAsset.write(newSourceAssets)
// targetAsset.write(newTargetAssets)
// // emit result event with updated balances and flag to mark transaction as processed
// ctx.emit(new TransactionResult(txn, true, newSourceBalance, newTargetBalance))
// } else { // emit result with unchanged balances and a flag to mark transaction as rejected
// ctx.emit(new TransactionResult(txn, false, sourceAccountBalance, targetAccountBalance))
// }
// }
//}
//---------------------------------------------------------------------------
// Domain
//---------------------------------------------------------------------------
//// 事件: 取款
//case class DepositEvent(var accountId: String,
// var bookEntryId: String,
// var accountTransfer: Long,
// var bookEntryTransfer: Long) {
// def this() = this("", "", 0L, 0L)
//}
//
//
//// 事件: 转账
//case class TransactionEvent(var sourceAccountId: String,
// var targetAccountId: String,
// var sourceBookEntryId: String,
// var targetBookEntryId: String,
// var accountTransfer: Long,
// var bookEntryTransfer: Long,
// var minAccountBalance: Long) {
// def this() = this("", "", "", "", 0L, 0L, 0L)
//}
//
//// 查询: 转账结果
//case class TransactionResult(var transaction: TransactionEvent,
// var success: Boolean,
// var newSourceAccountBalance: Long,
// var newTargetAccountBalance: Long) {
// def this() = this(null, false, 0L, 0L)
//}
object Constant {
val NUM_ACCOUNTS: Int = 1000000
val NUM_BOOK_ENTRIES: Int = 1000000
val ACCOUNT_ID_PREFIX = "ACCT-"
val BOOK_ENTRY_ID_PREFIX = "BOOK-"
val MAX_ACCOUNT_TRANSFER: Long = 10000
val MAX_BOOK_TRANSFER: Long = 1000
val MIN_BALANCE = 0
val ZERO = new java.util.function.Supplier[Long]() {
override def get(): Long = 0L
}
}
//---------------------------------------------------------------------------
// Data Generator
//---------------------------------------------------------------------------
abstract class BaseGenerator[T](maxRecordsPerSecond: Int) extends RichParallelSourceFunction[T] {
checkArgument(maxRecordsPerSecond == -1 || maxRecordsPerSecond > 0,
"maxRecordsPerSecond must be positive or -1 (infinite)": String)
var running: Boolean = false
override def run(ctx: SourceFunction.SourceContext[T]): Unit = {
import java.util.SplittableRandom
val numberOfParallelSubtasks = getRuntimeContext.getNumberOfParallelSubtasks
val throttler = new Throttler(maxRecordsPerSecond, numberOfParallelSubtasks)
val rnd = new SplittableRandom
while ( {
running
}) {
val event = randomEvent(rnd)
ctx.collect(event)
throttler.throttle
}
}
override def cancel(): Unit = {
running = false
}
def randomEvent(rnd: SplittableRandom): T
}
class DepositsGenerator(maxRecordsPerSecond: Int) extends BaseGenerator[DepositEvent](maxRecordsPerSecond) {
override def randomEvent(rnd: SplittableRandom): DepositEvent = {
val account = rnd.nextInt(Constant.NUM_ACCOUNTS)
val book = rnd.nextInt(Constant.NUM_BOOK_ENTRIES)
val accountsDeposit = rnd.nextLong(Constant.MAX_ACCOUNT_TRANSFER)
val deposit = rnd.nextLong(Constant.MAX_BOOK_TRANSFER)
new DepositEvent(Constant.ACCOUNT_ID_PREFIX + account,
Constant.BOOK_ENTRY_ID_PREFIX + book,
accountsDeposit,
deposit)
}
}
class TransactionsGenerator(maxRecordsPerSecond: Int) extends BaseGenerator[TransactionEvent](maxRecordsPerSecond) {
override def randomEvent(rnd: SplittableRandom): TransactionEvent = {
def inner(): TransactionEvent = {
val accountsTransfer = rnd.nextLong(Constant.MAX_ACCOUNT_TRANSFER)
val transfer = rnd.nextLong(Constant.MAX_BOOK_TRANSFER)
val sourceAcct = rnd.nextInt(Constant.NUM_ACCOUNTS)
val targetAcct = rnd.nextInt(Constant.NUM_ACCOUNTS)
val sourceBook = rnd.nextInt(Constant.NUM_BOOK_ENTRIES)
val targetBook = rnd.nextInt(Constant.NUM_BOOK_ENTRIES)
if (sourceAcct != targetAcct && sourceBook != targetBook) {
new TransactionEvent(Constant.ACCOUNT_ID_PREFIX + sourceAcct,
Constant.ACCOUNT_ID_PREFIX + targetAcct,
Constant.BOOK_ENTRY_ID_PREFIX + sourceBook,
Constant.BOOK_ENTRY_ID_PREFIX + targetBook,
accountsTransfer, transfer, Constant.MIN_BALANCE)
} else {
null
}
}
var result = inner()
while (result == null) {
result = inner()
}
result
}
}
class Throttler(maxRecordsPerSecond: Long, numberOfParallelSubtasks: Int) {
final private var throttleBatchSize = 0L
final private var nanosPerBatch = 0L
private var endOfNextBatchNanos = 0L
private var currentBatch = 0
checkArgument(maxRecordsPerSecond == -1 || maxRecordsPerSecond > 0, "maxRecordsPerSecond must be positive or -1 (infinite)")
checkArgument(numberOfParallelSubtasks > 0, "numberOfParallelSubtasks must be greater than 0")
if (maxRecordsPerSecond == -1) { // unlimited speed
throttleBatchSize = -1
nanosPerBatch = 0
endOfNextBatchNanos = System.nanoTime + nanosPerBatch
currentBatch = 0
} else {
val ratePerSubtask: Float = maxRecordsPerSecond.toFloat / numberOfParallelSubtasks
if (ratePerSubtask >= 10000) { // high rates: all throttling in intervals of 2ms
throttleBatchSize = ratePerSubtask.toInt / 500
nanosPerBatch = 2000000L
} else {
throttleBatchSize = (ratePerSubtask / 20).toInt + 1
nanosPerBatch = (1000000000L / ratePerSubtask).toInt * throttleBatchSize
}
this.endOfNextBatchNanos = System.nanoTime + nanosPerBatch
this.currentBatch = 0
}
@throws[InterruptedException]
def throttle(): Unit = {
if (throttleBatchSize == -1) return
if ( {
currentBatch += 1;
currentBatch
} != throttleBatchSize) return
currentBatch = 0
val now = System.nanoTime
val millisRemaining = ((endOfNextBatchNanos - now) / 1000000).toInt
if (millisRemaining > 0) {
endOfNextBatchNanos += nanosPerBatch
Thread.sleep(millisRemaining)
}
else endOfNextBatchNanos = now + nanosPerBatch
}
}
|
zhoujiagen/giant-data-analysis
|
temporal-data-and-realtime-algorithm/scala-temporal-apache-flink/src/main/scala/com/spike/giantdataanalysis/flink/example/streamingledger/ExampleStreamingLedger.scala
|
Scala
|
mit
| 12,637 |
import cats.instances.future._
import cats.syntax.functor._
import com.bot4s.telegram.api.declarative.Commands
import com.bot4s.telegram.future.Polling
import com.bot4s.telegram.models.Message
import scala.concurrent.Future
/**
* Simple extension for having stateful Telegram Bots (per chat).
* The main issue is locking/synchronization, actors (FSM) are a better alternative.
* This can be easily adapted to handle per-user or per-user+chat state.
*/
trait PerChatState[S] {
private val chatState = collection.mutable.Map[Long, S]()
def setChatState(value: S)(implicit msg: Message): Unit = atomic {
chatState.update(msg.chat.id, value)
}
def clearChatState(implicit msg: Message): Unit = atomic {
chatState.remove(msg.chat.id)
}
private def atomic[T](f: => T): T = chatState.synchronized {
f
}
def withChatState(f: Option[S] => Future[Unit])(implicit msg: Message) = f(getChatState)
def getChatState(implicit msg: Message): Option[S] = atomic {
chatState.get(msg.chat.id)
}
}
/**
* Per-chat counter.
*
* @param token Bot's token.
*/
class StatefulBot(token: String) extends ExampleBot(token) with Polling with Commands[Future] with PerChatState[Int] {
onCommand("/inc") { implicit msg =>
withChatState { s =>
val n = s.getOrElse(0)
setChatState(n + 1)
reply(s"Counter: $n").void
}
}
}
|
mukel/telegrambot4s
|
examples/src/StatefulBot.scala
|
Scala
|
apache-2.0
| 1,370 |
/*
* Copyright (c) 2015-2018 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.scalatracker
import io.circe.syntax._
import Emitter.{BufferConfig, Request}
/** Represents an emitter's internal state as it buffers events.
*
* Emitters typically send events in larger batches, depending on the buffering configuration
*/
private[scalatracker] trait Buffer {
import Buffer._
/* Resets the internal state to an empty buffer, i.e. after sending the pending batch in a post request */
def reset: Buffer
/** Update the buffered state with a new payload */
def add(payload: Payload): Buffer
/** Is the buffer full - i.e. is it time to send the buffered batch to the collector */
def isFull: Boolean
/** Convert the pending batch to a Request.
*
* Can return None of the batch was empty
*/
def toRequest: Option[Request]
/** Handle an event to update the state and possibly create a request which should be sent to the collector */
def handle(action: Action): (Buffer, Option[Request]) =
action match {
case Action.Terminate | Action.Flush =>
reset -> toRequest
case Action.Enqueue(payload) =>
val next = add(payload)
if (next.isFull)
reset -> next.toRequest
else
next -> None
}
}
private[scalatracker] object Buffer {
def apply(config: BufferConfig): Buffer =
BufferImpl(Nil, 0, 0, config)
final private case class BufferImpl(toList: List[Payload], count: Int, bytes: Int, config: BufferConfig)
extends Buffer {
override def reset: Buffer = Buffer(config)
override def add(payload: Payload): Buffer = {
val newBytes =
if (toList.isEmpty) {
Payload.postPayload(Seq(payload)).getBytes.length
} else {
payload.asJson.noSpaces.getBytes.length + bytes + 1
}
BufferImpl(payload :: toList, count + 1, newBytes, config)
}
override def isFull: Boolean =
Buffer.isFull(toList, count, bytes, config)
override def toRequest: Option[Request] =
toList match {
case Nil => None
case single :: Nil if config == BufferConfig.NoBuffering =>
Some(Request(single))
case more =>
Some(Request(more))
}
}
/** ADT of actions the emitter needs to handle */
sealed trait Action
object Action {
final case class Enqueue(payload: Payload) extends Action
case object Flush extends Action
case object Terminate extends Action
}
private def isFull(payloads: List[Payload], count: Int, bytes: Int, config: BufferConfig): Boolean =
config match {
case BufferConfig.NoBuffering =>
payloads.nonEmpty
case BufferConfig.EventsCardinality(max) =>
count >= max
case BufferConfig.PayloadSize(max) =>
bytes >= max
case BufferConfig.OneOf(left, right) =>
isFull(payloads, count, bytes, left) || isFull(payloads, count, bytes, right)
}
}
|
snowplow/snowplow-scala-tracker
|
modules/core/src/main/scala/com.snowplowanalytics.snowplow/scalatracker/Buffer.scala
|
Scala
|
apache-2.0
| 3,624 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import java.io.IOException
import java.net.URI
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.collection.parallel.ForkJoinTaskSupport
import scala.concurrent.forkjoin.ForkJoinPool
import scala.util.{Failure, Try}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileStatus, Path}
import org.apache.hadoop.mapreduce._
import org.apache.hadoop.mapreduce.lib.input.FileSplit
import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl
import org.apache.parquet.filter2.compat.FilterCompat
import org.apache.parquet.filter2.predicate.FilterApi
import org.apache.parquet.format.converter.ParquetMetadataConverter.SKIP_ROW_GROUPS
import org.apache.parquet.hadoop._
import org.apache.parquet.hadoop.codec.CodecConfig
import org.apache.parquet.hadoop.util.ContextUtil
import org.apache.parquet.schema.MessageType
import org.apache.spark.{SparkException, TaskContext}
import org.apache.spark.internal.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.codegen.GenerateUnsafeProjection
import org.apache.spark.sql.catalyst.parser.LegacyTypeStringParser
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources._
import org.apache.spark.sql.types._
import org.apache.spark.util.{SerializableConfiguration, ThreadUtils}
class ParquetFileFormat
extends FileFormat
with DataSourceRegister
with Logging
with Serializable {
// Hold a reference to the (serializable) singleton instance of ParquetLogRedirector. This
// ensures the ParquetLogRedirector class is initialized whether an instance of ParquetFileFormat
// is constructed or deserialized. Do not heed the Scala compiler's warning about an unused field
// here.
private val parquetLogRedirector = ParquetLogRedirector.INSTANCE
override def shortName(): String = "parquet"
override def toString: String = "Parquet"
override def hashCode(): Int = getClass.hashCode()
override def equals(other: Any): Boolean = other.isInstanceOf[ParquetFileFormat]
override def prepareWrite(
sparkSession: SparkSession,
job: Job,
options: Map[String, String],
dataSchema: StructType): OutputWriterFactory = {
val parquetOptions = new ParquetOptions(options, sparkSession.sessionState.conf)
val conf = ContextUtil.getConfiguration(job)
val committerClass =
conf.getClass(
SQLConf.PARQUET_OUTPUT_COMMITTER_CLASS.key,
classOf[ParquetOutputCommitter],
classOf[ParquetOutputCommitter])
if (conf.get(SQLConf.PARQUET_OUTPUT_COMMITTER_CLASS.key) == null) {
logInfo("Using default output committer for Parquet: " +
classOf[ParquetOutputCommitter].getCanonicalName)
} else {
logInfo("Using user defined output committer for Parquet: " + committerClass.getCanonicalName)
}
conf.setClass(
SQLConf.OUTPUT_COMMITTER_CLASS.key,
committerClass,
classOf[ParquetOutputCommitter])
// We're not really using `ParquetOutputFormat[Row]` for writing data here, because we override
// it in `ParquetOutputWriter` to support appending and dynamic partitioning. The reason why
// we set it here is to setup the output committer class to `ParquetOutputCommitter`, which is
// bundled with `ParquetOutputFormat[Row]`.
job.setOutputFormatClass(classOf[ParquetOutputFormat[Row]])
ParquetOutputFormat.setWriteSupportClass(job, classOf[ParquetWriteSupport])
// We want to clear this temporary metadata from saving into Parquet file.
// This metadata is only useful for detecting optional columns when pushdowning filters.
ParquetWriteSupport.setSchema(dataSchema, conf)
// Sets flags for `CatalystSchemaConverter` (which converts Catalyst schema to Parquet schema)
// and `CatalystWriteSupport` (writing actual rows to Parquet files).
conf.set(
SQLConf.PARQUET_BINARY_AS_STRING.key,
sparkSession.sessionState.conf.isParquetBinaryAsString.toString)
conf.set(
SQLConf.PARQUET_INT96_AS_TIMESTAMP.key,
sparkSession.sessionState.conf.isParquetINT96AsTimestamp.toString)
conf.set(
SQLConf.PARQUET_WRITE_LEGACY_FORMAT.key,
sparkSession.sessionState.conf.writeLegacyParquetFormat.toString)
conf.set(
SQLConf.PARQUET_INT64_AS_TIMESTAMP_MILLIS.key,
sparkSession.sessionState.conf.isParquetINT64AsTimestampMillis.toString)
// Sets compression scheme
conf.set(ParquetOutputFormat.COMPRESSION, parquetOptions.compressionCodecClassName)
// SPARK-15719: Disables writing Parquet summary files by default.
if (conf.get(ParquetOutputFormat.ENABLE_JOB_SUMMARY) == null) {
conf.setBoolean(ParquetOutputFormat.ENABLE_JOB_SUMMARY, false)
}
new OutputWriterFactory {
// This OutputWriterFactory instance is deserialized when writing Parquet files on the
// executor side without constructing or deserializing ParquetFileFormat. Therefore, we hold
// another reference to ParquetLogRedirector.INSTANCE here to ensure the latter class is
// initialized.
private val parquetLogRedirector = ParquetLogRedirector.INSTANCE
override def newInstance(
path: String,
dataSchema: StructType,
context: TaskAttemptContext): OutputWriter = {
new ParquetOutputWriter(path, context)
}
override def getFileExtension(context: TaskAttemptContext): String = {
CodecConfig.from(context).getCodec.getExtension + ".parquet"
}
}
}
override def inferSchema(
sparkSession: SparkSession,
parameters: Map[String, String],
files: Seq[FileStatus]): Option[StructType] = {
val parquetOptions = new ParquetOptions(parameters, sparkSession.sessionState.conf)
// Should we merge schemas from all Parquet part-files?
val shouldMergeSchemas = parquetOptions.mergeSchema
val mergeRespectSummaries = sparkSession.sessionState.conf.isParquetSchemaRespectSummaries
val filesByType = splitFiles(files)
// Sees which file(s) we need to touch in order to figure out the schema.
//
// Always tries the summary files first if users don't require a merged schema. In this case,
// "_common_metadata" is more preferable than "_metadata" because it doesn't contain row
// groups information, and could be much smaller for large Parquet files with lots of row
// groups. If no summary file is available, falls back to some random part-file.
//
// NOTE: Metadata stored in the summary files are merged from all part-files. However, for
// user defined key-value metadata (in which we store Spark SQL schema), Parquet doesn't know
// how to merge them correctly if some key is associated with different values in different
// part-files. When this happens, Parquet simply gives up generating the summary file. This
// implies that if a summary file presents, then:
//
// 1. Either all part-files have exactly the same Spark SQL schema, or
// 2. Some part-files don't contain Spark SQL schema in the key-value metadata at all (thus
// their schemas may differ from each other).
//
// Here we tend to be pessimistic and take the second case into account. Basically this means
// we can't trust the summary files if users require a merged schema, and must touch all part-
// files to do the merge.
val filesToTouch =
if (shouldMergeSchemas) {
// Also includes summary files, 'cause there might be empty partition directories.
// If mergeRespectSummaries config is true, we assume that all part-files are the same for
// their schema with summary files, so we ignore them when merging schema.
// If the config is disabled, which is the default setting, we merge all part-files.
// In this mode, we only need to merge schemas contained in all those summary files.
// You should enable this configuration only if you are very sure that for the parquet
// part-files to read there are corresponding summary files containing correct schema.
// As filed in SPARK-11500, the order of files to touch is a matter, which might affect
// the ordering of the output columns. There are several things to mention here.
//
// 1. If mergeRespectSummaries config is false, then it merges schemas by reducing from
// the first part-file so that the columns of the lexicographically first file show
// first.
//
// 2. If mergeRespectSummaries config is true, then there should be, at least,
// "_metadata"s for all given files, so that we can ensure the columns of
// the lexicographically first file show first.
//
// 3. If shouldMergeSchemas is false, but when multiple files are given, there is
// no guarantee of the output order, since there might not be a summary file for the
// lexicographically first file, which ends up putting ahead the columns of
// the other files. However, this should be okay since not enabling
// shouldMergeSchemas means (assumes) all the files have the same schemas.
val needMerged: Seq[FileStatus] =
if (mergeRespectSummaries) {
Seq.empty
} else {
filesByType.data
}
needMerged ++ filesByType.metadata ++ filesByType.commonMetadata
} else {
// Tries any "_common_metadata" first. Parquet files written by old versions or Parquet
// don't have this.
filesByType.commonMetadata.headOption
// Falls back to "_metadata"
.orElse(filesByType.metadata.headOption)
// Summary file(s) not found, the Parquet file is either corrupted, or different part-
// files contain conflicting user defined metadata (two or more values are associated
// with a same key in different files). In either case, we fall back to any of the
// first part-file, and just assume all schemas are consistent.
.orElse(filesByType.data.headOption)
.toSeq
}
ParquetFileFormat.mergeSchemasInParallel(filesToTouch, sparkSession)
}
case class FileTypes(
data: Seq[FileStatus],
metadata: Seq[FileStatus],
commonMetadata: Seq[FileStatus])
private def splitFiles(allFiles: Seq[FileStatus]): FileTypes = {
val leaves = allFiles.toArray.sortBy(_.getPath.toString)
FileTypes(
data = leaves.filterNot(f => isSummaryFile(f.getPath)),
metadata =
leaves.filter(_.getPath.getName == ParquetFileWriter.PARQUET_METADATA_FILE),
commonMetadata =
leaves.filter(_.getPath.getName == ParquetFileWriter.PARQUET_COMMON_METADATA_FILE))
}
private def isSummaryFile(file: Path): Boolean = {
file.getName == ParquetFileWriter.PARQUET_COMMON_METADATA_FILE ||
file.getName == ParquetFileWriter.PARQUET_METADATA_FILE
}
/**
* Returns whether the reader will return the rows as batch or not.
*/
override def supportBatch(sparkSession: SparkSession, schema: StructType): Boolean = {
val conf = sparkSession.sessionState.conf
conf.parquetVectorizedReaderEnabled && conf.wholeStageEnabled &&
schema.length <= conf.wholeStageMaxNumFields &&
schema.forall(_.dataType.isInstanceOf[AtomicType])
}
override def isSplitable(
sparkSession: SparkSession,
options: Map[String, String],
path: Path): Boolean = {
true
}
override def buildReaderWithPartitionValues(
sparkSession: SparkSession,
dataSchema: StructType,
partitionSchema: StructType,
requiredSchema: StructType,
filters: Seq[Filter],
options: Map[String, String],
hadoopConf: Configuration): (PartitionedFile) => Iterator[InternalRow] = {
hadoopConf.set(ParquetInputFormat.READ_SUPPORT_CLASS, classOf[ParquetReadSupport].getName)
hadoopConf.set(
ParquetReadSupport.SPARK_ROW_REQUESTED_SCHEMA,
ParquetSchemaConverter.checkFieldNames(requiredSchema).json)
hadoopConf.set(
ParquetWriteSupport.SPARK_ROW_SCHEMA,
ParquetSchemaConverter.checkFieldNames(requiredSchema).json)
ParquetWriteSupport.setSchema(requiredSchema, hadoopConf)
// Sets flags for `CatalystSchemaConverter`
hadoopConf.setBoolean(
SQLConf.PARQUET_BINARY_AS_STRING.key,
sparkSession.sessionState.conf.isParquetBinaryAsString)
hadoopConf.setBoolean(
SQLConf.PARQUET_INT96_AS_TIMESTAMP.key,
sparkSession.sessionState.conf.isParquetINT96AsTimestamp)
hadoopConf.setBoolean(
SQLConf.PARQUET_INT64_AS_TIMESTAMP_MILLIS.key,
sparkSession.sessionState.conf.isParquetINT64AsTimestampMillis)
// Try to push down filters when filter push-down is enabled.
val pushed =
if (sparkSession.sessionState.conf.parquetFilterPushDown) {
filters
// Collects all converted Parquet filter predicates. Notice that not all predicates can be
// converted (`ParquetFilters.createFilter` returns an `Option`). That's why a `flatMap`
// is used here.
.flatMap(ParquetFilters.createFilter(requiredSchema, _))
.reduceOption(FilterApi.and)
} else {
None
}
val broadcastedHadoopConf =
sparkSession.sparkContext.broadcast(new SerializableConfiguration(hadoopConf))
// TODO: if you move this into the closure it reverts to the default values.
// If true, enable using the custom RecordReader for parquet. This only works for
// a subset of the types (no complex types).
val resultSchema = StructType(partitionSchema.fields ++ requiredSchema.fields)
val enableVectorizedReader: Boolean =
sparkSession.sessionState.conf.parquetVectorizedReaderEnabled &&
resultSchema.forall(_.dataType.isInstanceOf[AtomicType])
// Whole stage codegen (PhysicalRDD) is able to deal with batches directly
val returningBatch = supportBatch(sparkSession, resultSchema)
(file: PartitionedFile) => {
assert(file.partitionValues.numFields == partitionSchema.size)
val fileSplit =
new FileSplit(new Path(new URI(file.filePath)), file.start, file.length, Array.empty)
val split =
new org.apache.parquet.hadoop.ParquetInputSplit(
fileSplit.getPath,
fileSplit.getStart,
fileSplit.getStart + fileSplit.getLength,
fileSplit.getLength,
fileSplit.getLocations,
null)
val attemptId = new TaskAttemptID(new TaskID(new JobID(), TaskType.MAP, 0), 0)
val hadoopAttemptContext =
new TaskAttemptContextImpl(broadcastedHadoopConf.value.value, attemptId)
// Try to push down filters when filter push-down is enabled.
// Notice: This push-down is RowGroups level, not individual records.
if (pushed.isDefined) {
ParquetInputFormat.setFilterPredicate(hadoopAttemptContext.getConfiguration, pushed.get)
}
val parquetReader = if (enableVectorizedReader) {
val vectorizedReader = new VectorizedParquetRecordReader()
vectorizedReader.initialize(split, hadoopAttemptContext)
logDebug(s"Appending $partitionSchema ${file.partitionValues}")
vectorizedReader.initBatch(partitionSchema, file.partitionValues)
if (returningBatch) {
vectorizedReader.enableReturningBatches()
}
vectorizedReader
} else {
logDebug(s"Falling back to parquet-mr")
// ParquetRecordReader returns UnsafeRow
val reader = pushed match {
case Some(filter) =>
new ParquetRecordReader[UnsafeRow](
new ParquetReadSupport,
FilterCompat.get(filter, null))
case _ =>
new ParquetRecordReader[UnsafeRow](new ParquetReadSupport)
}
reader.initialize(split, hadoopAttemptContext)
reader
}
val iter = new RecordReaderIterator(parquetReader)
Option(TaskContext.get()).foreach(_.addTaskCompletionListener(_ => iter.close()))
// UnsafeRowParquetRecordReader appends the columns internally to avoid another copy.
if (parquetReader.isInstanceOf[VectorizedParquetRecordReader] &&
enableVectorizedReader) {
iter.asInstanceOf[Iterator[InternalRow]]
} else {
val fullSchema = requiredSchema.toAttributes ++ partitionSchema.toAttributes
val joinedRow = new JoinedRow()
val appendPartitionColumns = GenerateUnsafeProjection.generate(fullSchema, fullSchema)
// This is a horrible erasure hack... if we type the iterator above, then it actually check
// the type in next() and we get a class cast exception. If we make that function return
// Object, then we can defer the cast until later!
if (partitionSchema.length == 0) {
// There is no partition columns
iter.asInstanceOf[Iterator[InternalRow]]
} else {
iter.asInstanceOf[Iterator[InternalRow]]
.map(d => appendPartitionColumns(joinedRow(d, file.partitionValues)))
}
}
}
}
}
object ParquetFileFormat extends Logging {
private[parquet] def readSchema(
footers: Seq[Footer], sparkSession: SparkSession): Option[StructType] = {
def parseParquetSchema(schema: MessageType): StructType = {
val converter = new ParquetSchemaConverter(
sparkSession.sessionState.conf.isParquetBinaryAsString,
sparkSession.sessionState.conf.isParquetBinaryAsString,
sparkSession.sessionState.conf.writeLegacyParquetFormat,
sparkSession.sessionState.conf.isParquetINT64AsTimestampMillis)
converter.convert(schema)
}
val seen = mutable.HashSet[String]()
val finalSchemas: Seq[StructType] = footers.flatMap { footer =>
val metadata = footer.getParquetMetadata.getFileMetaData
val serializedSchema = metadata
.getKeyValueMetaData
.asScala.toMap
.get(ParquetReadSupport.SPARK_METADATA_KEY)
if (serializedSchema.isEmpty) {
// Falls back to Parquet schema if no Spark SQL schema found.
Some(parseParquetSchema(metadata.getSchema))
} else if (!seen.contains(serializedSchema.get)) {
seen += serializedSchema.get
// Don't throw even if we failed to parse the serialized Spark schema. Just fallback to
// whatever is available.
Some(Try(DataType.fromJson(serializedSchema.get))
.recover { case _: Throwable =>
logInfo(
"Serialized Spark schema in Parquet key-value metadata is not in JSON format, " +
"falling back to the deprecated DataType.fromCaseClassString parser.")
LegacyTypeStringParser.parse(serializedSchema.get)
}
.recover { case cause: Throwable =>
logWarning(
s"""Failed to parse serialized Spark schema in Parquet key-value metadata:
|\t$serializedSchema
""".stripMargin,
cause)
}
.map(_.asInstanceOf[StructType])
.getOrElse {
// Falls back to Parquet schema if Spark SQL schema can't be parsed.
parseParquetSchema(metadata.getSchema)
})
} else {
None
}
}
finalSchemas.reduceOption { (left, right) =>
try left.merge(right) catch { case e: Throwable =>
throw new SparkException(s"Failed to merge incompatible schemas $left and $right", e)
}
}
}
/**
* Reads Parquet footers in multi-threaded manner.
* If the config "spark.sql.files.ignoreCorruptFiles" is set to true, we will ignore the corrupted
* files when reading footers.
*/
private[parquet] def readParquetFootersInParallel(
conf: Configuration,
partFiles: Seq[FileStatus],
ignoreCorruptFiles: Boolean): Seq[Footer] = {
val parFiles = partFiles.par
val pool = ThreadUtils.newForkJoinPool("readingParquetFooters", 8)
parFiles.tasksupport = new ForkJoinTaskSupport(pool)
try {
parFiles.flatMap { currentFile =>
try {
// Skips row group information since we only need the schema.
// ParquetFileReader.readFooter throws RuntimeException, instead of IOException,
// when it can't read the footer.
Some(new Footer(currentFile.getPath(),
ParquetFileReader.readFooter(
conf, currentFile, SKIP_ROW_GROUPS)))
} catch { case e: RuntimeException =>
if (ignoreCorruptFiles) {
logWarning(s"Skipped the footer in the corrupted file: $currentFile", e)
None
} else {
throw new IOException(s"Could not read footer for file: $currentFile", e)
}
}
}.seq
} finally {
pool.shutdown()
}
}
/**
* Figures out a merged Parquet schema with a distributed Spark job.
*
* Note that locality is not taken into consideration here because:
*
* 1. For a single Parquet part-file, in most cases the footer only resides in the last block of
* that file. Thus we only need to retrieve the location of the last block. However, Hadoop
* `FileSystem` only provides API to retrieve locations of all blocks, which can be
* potentially expensive.
*
* 2. This optimization is mainly useful for S3, where file metadata operations can be pretty
* slow. And basically locality is not available when using S3 (you can't run computation on
* S3 nodes).
*/
def mergeSchemasInParallel(
filesToTouch: Seq[FileStatus],
sparkSession: SparkSession): Option[StructType] = {
val assumeBinaryIsString = sparkSession.sessionState.conf.isParquetBinaryAsString
val assumeInt96IsTimestamp = sparkSession.sessionState.conf.isParquetINT96AsTimestamp
val writeTimestampInMillis = sparkSession.sessionState.conf.isParquetINT64AsTimestampMillis
val writeLegacyParquetFormat = sparkSession.sessionState.conf.writeLegacyParquetFormat
val serializedConf = new SerializableConfiguration(sparkSession.sessionState.newHadoopConf())
// !! HACK ALERT !!
//
// Parquet requires `FileStatus`es to read footers. Here we try to send cached `FileStatus`es
// to executor side to avoid fetching them again. However, `FileStatus` is not `Serializable`
// but only `Writable`. What makes it worse, for some reason, `FileStatus` doesn't play well
// with `SerializableWritable[T]` and always causes a weird `IllegalStateException`. These
// facts virtually prevents us to serialize `FileStatus`es.
//
// Since Parquet only relies on path and length information of those `FileStatus`es to read
// footers, here we just extract them (which can be easily serialized), send them to executor
// side, and resemble fake `FileStatus`es there.
val partialFileStatusInfo = filesToTouch.map(f => (f.getPath.toString, f.getLen))
// Set the number of partitions to prevent following schema reads from generating many tasks
// in case of a small number of parquet files.
val numParallelism = Math.min(Math.max(partialFileStatusInfo.size, 1),
sparkSession.sparkContext.defaultParallelism)
val ignoreCorruptFiles = sparkSession.sessionState.conf.ignoreCorruptFiles
// Issues a Spark job to read Parquet schema in parallel.
val partiallyMergedSchemas =
sparkSession
.sparkContext
.parallelize(partialFileStatusInfo, numParallelism)
.mapPartitions { iterator =>
// Resembles fake `FileStatus`es with serialized path and length information.
val fakeFileStatuses = iterator.map { case (path, length) =>
new FileStatus(length, false, 0, 0, 0, 0, null, null, null, new Path(path))
}.toSeq
// Reads footers in multi-threaded manner within each task
val footers =
ParquetFileFormat.readParquetFootersInParallel(
serializedConf.value, fakeFileStatuses, ignoreCorruptFiles)
// Converter used to convert Parquet `MessageType` to Spark SQL `StructType`
val converter =
new ParquetSchemaConverter(
assumeBinaryIsString = assumeBinaryIsString,
assumeInt96IsTimestamp = assumeInt96IsTimestamp,
writeLegacyParquetFormat = writeLegacyParquetFormat,
writeTimestampInMillis = writeTimestampInMillis)
if (footers.isEmpty) {
Iterator.empty
} else {
var mergedSchema = ParquetFileFormat.readSchemaFromFooter(footers.head, converter)
footers.tail.foreach { footer =>
val schema = ParquetFileFormat.readSchemaFromFooter(footer, converter)
try {
mergedSchema = mergedSchema.merge(schema)
} catch { case cause: SparkException =>
throw new SparkException(
s"Failed merging schema of file ${footer.getFile}:\n${schema.treeString}", cause)
}
}
Iterator.single(mergedSchema)
}
}.collect()
if (partiallyMergedSchemas.isEmpty) {
None
} else {
var finalSchema = partiallyMergedSchemas.head
partiallyMergedSchemas.tail.foreach { schema =>
try {
finalSchema = finalSchema.merge(schema)
} catch { case cause: SparkException =>
throw new SparkException(
s"Failed merging schema:\n${schema.treeString}", cause)
}
}
Some(finalSchema)
}
}
/**
* Reads Spark SQL schema from a Parquet footer. If a valid serialized Spark SQL schema string
* can be found in the file metadata, returns the deserialized [[StructType]], otherwise, returns
* a [[StructType]] converted from the [[MessageType]] stored in this footer.
*/
def readSchemaFromFooter(
footer: Footer, converter: ParquetSchemaConverter): StructType = {
val fileMetaData = footer.getParquetMetadata.getFileMetaData
fileMetaData
.getKeyValueMetaData
.asScala.toMap
.get(ParquetReadSupport.SPARK_METADATA_KEY)
.flatMap(deserializeSchemaString)
.getOrElse(converter.convert(fileMetaData.getSchema))
}
private def deserializeSchemaString(schemaString: String): Option[StructType] = {
// Tries to deserialize the schema string as JSON first, then falls back to the case class
// string parser (data generated by older versions of Spark SQL uses this format).
Try(DataType.fromJson(schemaString).asInstanceOf[StructType]).recover {
case _: Throwable =>
logInfo(
"Serialized Spark schema in Parquet key-value metadata is not in JSON format, " +
"falling back to the deprecated DataType.fromCaseClassString parser.")
LegacyTypeStringParser.parse(schemaString).asInstanceOf[StructType]
}.recoverWith {
case cause: Throwable =>
logWarning(
"Failed to parse and ignored serialized Spark schema in " +
s"Parquet key-value metadata:\n\t$schemaString", cause)
Failure(cause)
}.toOption
}
}
|
SHASHANKB/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetFileFormat.scala
|
Scala
|
apache-2.0
| 28,178 |
package com.seanshubin.builder.domain
import java.nio.file.Path
class ProcessLoggerFactoryImpl(files: FilesContract,
directory: Path,
rootLogger: String => Unit) extends ProcessLoggerFactory {
override def createAction(action: String): ProcessLogger = {
val path = directory.resolve("action").resolve(action)
new ProcessLoggerImpl(files, path, rootLogger)
}
override def createProjectCommand(project: String, command: String): ProcessLogger = {
val path = directory.resolve("command").resolve(project).resolve(command)
new ProcessLoggerImpl(files, path, rootLogger)
}
}
|
SeanShubin/builder
|
domain/src/main/scala/com/seanshubin/builder/domain/ProcessLoggerFactoryImpl.scala
|
Scala
|
unlicense
| 659 |
/*
* Copyright (c) 2016 SnappyData, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package org.apache.spark.streaming
import java.io.OutputStream
import scala.collection.mutable
import scala.reflect.ClassTag
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.util.RateLimitedOutputStream
/**
* Some utility methods for streaming operations.
*/
object StreamUtils {
private[this] val generatedRDDsMethod = classOf[DStream[_]]
.getMethod("generatedRDDs")
/** Invoke <code>DStream.getOrCompute</code> */
def getOrCompute[T: ClassTag](dStream: DStream[T],
time: Time): Option[RDD[T]] = dStream.getOrCompute(time)
def getGeneratedRDDs[T: ClassTag](dStream: DStream[T]): mutable.Map[Time,
RDD[T]] = {
// using reflection here since Spark's object is a HashMap while it is
// a ConcurrentHashMap in snappydata's version of Spark
// [TODO SPARK PR] it should be a concurrent map in Apache Spark too
generatedRDDsMethod.invoke(dStream).asInstanceOf[mutable.Map[Time, RDD[T]]]
}
def getRateLimitedOutputStream(out: OutputStream,
desiredBytesPerSec: Int): RateLimitedOutputStream = {
new RateLimitedOutputStream(out, desiredBytesPerSec)
}
}
|
vjr/snappydata
|
core/src/main/scala/org/apache/spark/streaming/StreamUtils.scala
|
Scala
|
apache-2.0
| 1,835 |
/*
* Copyright 2015 Webtrends (http://www.webtrends.com)
*
* See the LICENCE.txt file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.webtrends.harness.monitoring
import com.typesafe.config.ConfigFactory
import com.webtrends.harness.component.metrics.monitoring.MonitoringSettings
import com.webtrends.harness.utils.ConfigUtil
import org.specs2.mutable.SpecificationWithJUnit
class MonitoringSettingsSpec extends SpecificationWithJUnit {
"MonitoringSettings" should {
"load properly from the reference file reference.conf" in {
val settings = MonitoringSettings(ConfigUtil.prepareSubConfig(ConfigFactory.load("reference.conf"), "wookiee-metrics"))
settings.ApplicationName must not beEmpty
}
"load properly from parsing a configuration string" in {
val settings = MonitoringSettings(ConfigUtil.prepareSubConfig(ConfigFactory.parseString( """
wookiee-metrics {
application-name = "Webtrends Harness"
metric-prefix = workstations
jmx {
enabled = false
port = 9999
}
graphite {
enabled = false
host = ""
port = 2003
interval = 5
vmmetrics=true
regex=""
}
}
"""), "wookiee-metrics"))
settings.ApplicationName must not beEmpty
}
"throw an error with an invalid configuration" in {
val config = ConfigUtil.prepareSubConfig(ConfigFactory.parseString( """
wookiee-metrics {
application-name = ""
metric-prefix = workstations
jmx {
enabled = false
port = 9999
}
graphite {
enabled = false
host = ""
port = 2003
interval = 5
vmmetrics=true
regex=""
}
}
"""), "wookiee-metrics")
MonitoringSettings(config) must throwA[IllegalArgumentException]
}
}
}
|
Webtrends/wookiee-metrics
|
src/test/scala/com/webtrends/harness/component/monitoring/MonitoringSettingsSpec.scala
|
Scala
|
apache-2.0
| 2,620 |
package akka.s3
import akka.http.scaladsl.model.HttpRequest
import scala.util.Try
case class AuthV2Presigned(req: HttpRequest, getSecretKey: String => String) extends Auth {
val hl = HeaderList.Aggregate(Seq(req.listFromQueryParams, req.listFromHeaders))
override def run = Try {
val accessKey = hl.get("AWSAccessKeyId").get
val expires = hl.get("Expires").get
val signature = hl.get("Signature").get
val alg = AuthV2Common(req, hl, getSecretKey)
val stringToSign = alg.stringToSign(expires)
val computed = alg.computeSignature(stringToSign, getSecretKey(accessKey))
require(computed == signature)
accessKey
}.toOption
}
|
akiradeveloper/akka-s3
|
src/main/scala/akka/s3/auth/AuthV2Presigned.scala
|
Scala
|
apache-2.0
| 664 |
package scala.meta.tests
package ast
import org.scalatest._
import java.io._
import scala.meta._
import scala.meta.dialects.Scala211
import scala.meta.parsers._
// TODO: find a way to move this test back to trees
class SerializationSuite extends FunSuite {
private def tryRoundtrip(x: Any): Unit = {
val baos = new ByteArrayOutputStream()
val oos = new ObjectOutputStream(baos)
oos.writeObject(x)
oos.close()
baos.close()
// TODO: commented out because of an exception described in:
// https://groups.google.com/forum/#!topic/scala-user/6aLchfkzEH4
//
// val bais = new ByteArrayInputStream(baos.toByteArray)
// val ois = new ObjectInputStream(bais)
// ois.readObject.asInstanceOf[Source]
// ois.close()
// bais.close()
}
test("Input.String-based trees are serializable") {
val source = "class C".parse[Source]
tryRoundtrip(source)
}
test("Input.File-based trees are serializable") {
val file = File.createTempFile("dummy", ".scala")
file.deleteOnExit()
val writer = new BufferedWriter(new FileWriter(file))
writer.write("class C")
writer.close()
val source = file.parse[Source]
tryRoundtrip(source)
}
}
|
beni55/scalameta
|
scalameta/parsers/src/test/scala/scala/meta/tests/parsers/SerializationSuite.scala
|
Scala
|
bsd-3-clause
| 1,207 |
package sativum
import org.joda.time.LocalDate
abstract class DatedDag(_dt: String) extends Dag {
val dt = new LocalDate(_dt)
def runDated() {
endpoints.map(sativum(_))
while (!ready()) {
Thread.sleep(waitTime)
}
endpoints.flatMap(_.parents).foreach {
case d: DatedTask => d.delete()
case _ =>
}
endpoints.par.map(sativum(_).get())
}
}
|
mindfulmachines/sativum
|
src/main/scala/sativum/DatedDag.scala
|
Scala
|
mit
| 386 |
/*
* Copyright 2021 ACINQ SAS
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fr.acinq.eclair.db
import akka.actor.{Actor, DiagnosticActorLogging, Props}
import akka.event.Logging.MDC
import fr.acinq.bitcoin.Crypto.PublicKey
import fr.acinq.bitcoin.{ByteVector32, Satoshi}
import fr.acinq.eclair.channel.Helpers.Closing._
import fr.acinq.eclair.channel.Monitoring.{Metrics => ChannelMetrics, Tags => ChannelTags}
import fr.acinq.eclair.channel._
import fr.acinq.eclair.db.DbEventHandler.ChannelEvent
import fr.acinq.eclair.payment.Monitoring.{Metrics => PaymentMetrics, Tags => PaymentTags}
import fr.acinq.eclair.payment._
import fr.acinq.eclair.{Logs, NodeParams}
/**
* This actor sits at the interface between our event stream and the database.
*/
class DbEventHandler(nodeParams: NodeParams) extends Actor with DiagnosticActorLogging {
val auditDb: AuditDb = nodeParams.db.audit
val channelsDb: ChannelsDb = nodeParams.db.channels
context.system.eventStream.subscribe(self, classOf[PaymentSent])
context.system.eventStream.subscribe(self, classOf[PaymentFailed])
context.system.eventStream.subscribe(self, classOf[PaymentReceived])
context.system.eventStream.subscribe(self, classOf[PaymentRelayed])
context.system.eventStream.subscribe(self, classOf[TransactionPublished])
context.system.eventStream.subscribe(self, classOf[TransactionConfirmed])
context.system.eventStream.subscribe(self, classOf[ChannelErrorOccurred])
context.system.eventStream.subscribe(self, classOf[ChannelStateChanged])
context.system.eventStream.subscribe(self, classOf[ChannelClosed])
context.system.eventStream.subscribe(self, classOf[ChannelUpdateParametersChanged])
context.system.eventStream.subscribe(self, classOf[PathFindingExperimentMetrics])
override def receive: Receive = {
case e: PaymentSent =>
PaymentMetrics.PaymentAmount.withTag(PaymentTags.Direction, PaymentTags.Directions.Sent).record(e.recipientAmount.truncateToSatoshi.toLong)
PaymentMetrics.PaymentFees.withTag(PaymentTags.Direction, PaymentTags.Directions.Sent).record(e.feesPaid.truncateToSatoshi.toLong)
PaymentMetrics.PaymentParts.withTag(PaymentTags.Direction, PaymentTags.Directions.Sent).record(e.parts.length)
auditDb.add(e)
e.parts.foreach(p => channelsDb.updateChannelMeta(p.toChannelId, ChannelEvent.EventType.PaymentSent))
case _: PaymentFailed =>
PaymentMetrics.PaymentFailed.withTag(PaymentTags.Direction, PaymentTags.Directions.Sent).increment()
case e: PaymentReceived =>
PaymentMetrics.PaymentAmount.withTag(PaymentTags.Direction, PaymentTags.Directions.Received).record(e.amount.truncateToSatoshi.toLong)
PaymentMetrics.PaymentParts.withTag(PaymentTags.Direction, PaymentTags.Directions.Received).record(e.parts.length)
auditDb.add(e)
e.parts.foreach(p => channelsDb.updateChannelMeta(p.fromChannelId, ChannelEvent.EventType.PaymentReceived))
case e: PaymentRelayed =>
PaymentMetrics.PaymentAmount
.withTag(PaymentTags.Direction, PaymentTags.Directions.Relayed)
.withTag(PaymentTags.Relay, PaymentTags.RelayType(e))
.record(e.amountIn.truncateToSatoshi.toLong)
PaymentMetrics.PaymentFees
.withTag(PaymentTags.Direction, PaymentTags.Directions.Relayed)
.withTag(PaymentTags.Relay, PaymentTags.RelayType(e))
.record((e.amountIn - e.amountOut).truncateToSatoshi.toLong)
e match {
case TrampolinePaymentRelayed(_, incoming, outgoing, _, _, _) =>
PaymentMetrics.PaymentParts.withTag(PaymentTags.Direction, PaymentTags.Directions.Received).record(incoming.length)
PaymentMetrics.PaymentParts.withTag(PaymentTags.Direction, PaymentTags.Directions.Sent).record(outgoing.length)
incoming.foreach(p => channelsDb.updateChannelMeta(p.channelId, ChannelEvent.EventType.PaymentReceived))
outgoing.foreach(p => channelsDb.updateChannelMeta(p.channelId, ChannelEvent.EventType.PaymentSent))
case ChannelPaymentRelayed(_, _, _, fromChannelId, toChannelId, _) =>
channelsDb.updateChannelMeta(fromChannelId, ChannelEvent.EventType.PaymentReceived)
channelsDb.updateChannelMeta(toChannelId, ChannelEvent.EventType.PaymentSent)
}
auditDb.add(e)
case e: TransactionPublished =>
log.info(s"paying mining fee=${e.miningFee} for txid=${e.tx.txid} desc=${e.desc}")
auditDb.add(e)
case e: TransactionConfirmed => auditDb.add(e)
case e: ChannelErrorOccurred =>
// first pattern matching level is to ignore some errors, second level is to separate between different kind of errors
e.error match {
case LocalError(_: CannotAffordFees) => () // will be thrown at each new block if our balance is too low to update the commitment fee
case _ =>
e.error match {
case LocalError(_) => ChannelMetrics.ChannelErrors.withTag(ChannelTags.Origin, ChannelTags.Origins.Local).withTag(ChannelTags.Fatal, value = e.isFatal).increment()
case RemoteError(_) => ChannelMetrics.ChannelErrors.withTag(ChannelTags.Origin, ChannelTags.Origins.Remote).increment()
}
auditDb.add(e)
}
case e: ChannelStateChanged =>
// NB: order matters!
e match {
case ChannelStateChanged(_, channelId, _, remoteNodeId, WAIT_FOR_FUNDING_LOCKED, NORMAL, Some(commitments: Commitments)) =>
ChannelMetrics.ChannelLifecycleEvents.withTag(ChannelTags.Event, ChannelTags.Events.Created).increment()
val event = ChannelEvent.EventType.Created
auditDb.add(ChannelEvent(channelId, remoteNodeId, commitments.capacity, commitments.localParams.isFunder, !commitments.announceChannel, event))
channelsDb.updateChannelMeta(channelId, event)
case ChannelStateChanged(_, _, _, _, WAIT_FOR_INIT_INTERNAL, _, _) =>
case ChannelStateChanged(_, channelId, _, _, OFFLINE, SYNCING, _) =>
channelsDb.updateChannelMeta(channelId, ChannelEvent.EventType.Connected)
case ChannelStateChanged(_, _, _, _, _, CLOSING, _) =>
ChannelMetrics.ChannelLifecycleEvents.withTag(ChannelTags.Event, ChannelTags.Events.Closing).increment()
case _ => ()
}
case e: ChannelClosed =>
ChannelMetrics.ChannelLifecycleEvents.withTag(ChannelTags.Event, ChannelTags.Events.Closed).increment()
val event = ChannelEvent.EventType.Closed(e.closingType)
auditDb.add(ChannelEvent(e.channelId, e.commitments.remoteParams.nodeId, e.commitments.commitInput.txOut.amount, e.commitments.localParams.isFunder, !e.commitments.announceChannel, event))
channelsDb.updateChannelMeta(e.channelId, event)
case u: ChannelUpdateParametersChanged =>
auditDb.addChannelUpdate(u)
case m: PathFindingExperimentMetrics =>
auditDb.addPathFindingExperimentMetrics(m)
}
override def unhandled(message: Any): Unit = log.warning(s"unhandled msg=$message")
override def mdc(currentMessage: Any): MDC = {
currentMessage match {
case msg: TransactionPublished => Logs.mdc(remoteNodeId_opt = Some(msg.remoteNodeId), channelId_opt = Some(msg.channelId))
case msg: TransactionConfirmed => Logs.mdc(remoteNodeId_opt = Some(msg.remoteNodeId), channelId_opt = Some(msg.channelId))
case _ => Logs.mdc()
}
}
}
object DbEventHandler {
def props(nodeParams: NodeParams): Props = Props(new DbEventHandler(nodeParams))
// @formatter:off
case class ChannelEvent(channelId: ByteVector32, remoteNodeId: PublicKey, capacity: Satoshi, isFunder: Boolean, isPrivate: Boolean, event: ChannelEvent.EventType)
object ChannelEvent {
sealed trait EventType { def label: String }
object EventType {
object Created extends EventType { override def label: String = "created" }
object Connected extends EventType { override def label: String = "connected" }
object PaymentSent extends EventType { override def label: String = "sent" }
object PaymentReceived extends EventType { override def label: String = "received" }
case class Closed(closingType: ClosingType) extends EventType {
override def label: String = closingType match {
case _: MutualClose => "mutual"
case _: LocalClose => "local"
case _: CurrentRemoteClose => "remote"
case _: NextRemoteClose => "remote"
case _: RecoveryClose => "recovery"
case _: RevokedClose => "revoked"
}
}
}
}
// @formatter:on
}
|
ACINQ/eclair
|
eclair-core/src/main/scala/fr/acinq/eclair/db/DbEventHandler.scala
|
Scala
|
apache-2.0
| 9,019 |
/*
* This file is part of EasyForger which is released under GPLv3 License.
* See file LICENSE.txt or go to http://www.gnu.org/licenses/gpl-3.0.en.html for full license details.
*/
package com.easyforger.samples.items
import com.easyforger.items.EFItemSword
import net.minecraft.entity.EntityLivingBase
import net.minecraft.init.MobEffects
import net.minecraft.item.Item.ToolMaterial
import net.minecraft.item.ItemStack
import net.minecraft.potion.PotionEffect
class ItemVenomSword(modId: String) extends EFItemSword(modId, "venomsword", ToolMaterial.IRON) {
val poisonDuration = 3 * 20
val poisonLevel = 1
override def hitEntity(stack: ItemStack, target: EntityLivingBase, attacker: EntityLivingBase): Boolean = {
// important: this won't work against Undead monsters - see `EntityLivingBase.isPotionApplicable`
target.addPotionEffect(new PotionEffect(MobEffects.POISON, poisonDuration, poisonLevel, false, true))
super.hitEntity(stack, target, attacker)
}
}
|
easyforger/easyforger
|
mods/src/main/scala/com/easyforger/samples/items/ItemVenomSword.scala
|
Scala
|
gpl-3.0
| 987 |
package objektwerks.app
import java.time.LocalTime
import java.util.concurrent.Executors
import cats.data.Kleisli
import cats.effect._
import io.circe.generic.auto._
import io.circe.syntax._
import org.http4s._
import org.http4s.circe._
import org.http4s.dsl.impl.Root
import org.http4s.dsl.io._
import org.http4s.implicits._
import org.http4s.server.Router
import org.http4s.server.blaze._
import org.http4s.Status.Successful
import scala.concurrent.ExecutionContext
case class Now(time: String = LocalTime.now.toString)
object Now {
implicit val nowDecoder = jsonOf[IO, Now]
}
object Headers {
val noCacheHeader = Header("Cache-Control", "no-cache, no-store, must-revalidate")
def addHeader(route: HttpRoutes[IO], header: Header): HttpRoutes[IO] = Kleisli { request: Request[IO] =>
route(request).map {
case Successful(response) => response.putHeaders(header)
case response => response
}
}
}
object Routes {
import Headers._
val blockingExecutionContext = Blocker.liftExecutionContext( ExecutionContext.fromExecutorService( Executors.newFixedThreadPool(1) ) )
implicit val contextShift: ContextShift[IO] = IO.contextShift( ExecutionContext.global )
val indexRoute = HttpRoutes.of[IO] {
case request @ GET -> Root => StaticFile.fromResource("/index.html", blockingExecutionContext, Some(request))
.getOrElseF( NotFound() )
}
val indexRouteWithNoCacheHeader = addHeader(indexRoute, noCacheHeader)
val resourceRoute = HttpRoutes.of[IO] {
case request @ GET -> Root / path if List(".ico", ".css", ".js")
.exists(path.endsWith) => StaticFile.fromResource("/" + path, blockingExecutionContext, Some(request))
.getOrElseF( NotFound() )
}
val resourceRouteWithNoCacheHeader = addHeader(resourceRoute, noCacheHeader)
val nowRoute = HttpRoutes.of[IO] {
case GET -> Root / "now" => Ok( Now().asJson )
}
val routes = Router("" -> indexRouteWithNoCacheHeader,
"/" -> resourceRouteWithNoCacheHeader,
"/api/v1" -> nowRoute).orNotFound
}
object Http4sApp extends IOApp {
def run(args: List[String]): IO[ExitCode] =
BlazeServerBuilder[IO]( executionContext )
.bindHttp(7777, "localhost")
.withHttpApp( Routes.routes )
.serve
.compile
.drain
.as( ExitCode.Success )
}
|
objektwerks/typelevel
|
src/main/scala/objektwerks/app/Http4sApp.scala
|
Scala
|
apache-2.0
| 2,373 |
package com.prezi.haskell.gradle.extension.impl
import java.io.File
import com.prezi.haskell.gradle.ApiHelper._
import com.prezi.haskell.gradle.Names
import com.prezi.haskell.gradle.extension._
import com.prezi.haskell.gradle.external.{Git, HaskellTools}
import com.prezi.haskell.gradle.io.packers.GradleZipPacker
import com.prezi.haskell.gradle.model.{GHC802, StackYamlWriter}
import com.prezi.haskell.gradle.model.sandboxstore.ProjectSandboxStore
import org.gradle.api.internal.file.FileResolver
import org.gradle.internal.reflect.Instantiator
import resource._
trait HaskellProjectImpl {
this: ProjectExtender =>
protected def instantiator: Instantiator
protected def fileResolver: FileResolver
val sandFixPath: File = project.getBuildDir </> "sandfix"
val stackToolPath: File = project.getBuildDir </> "stack-tooling"
// Helpers
protected def addFields(): Unit = {
val tools = new HaskellTools(project.exec, getStackToolPath())
val unpacker = new GradleZipPacker(project)
val sandboxStore = new ProjectSandboxStore(project.getRootProject, Some(sandFixPath), unpacker, getField[HaskellExtension]("haskell"), tools)
addField("haskellTools", tools)
addField("sandboxStore", sandboxStore)
val git = new Git(project.exec)
addField("git", git)
}
protected def addConfigurations(): Unit = {
val mainConfig = addConfiguration(Names.mainConfiguration)
val testConfig = addConfiguration(Names.testConfiguration)
testConfig.extendsFrom(mainConfig)
}
protected def addSandboxTasks(): Unit = {
new SandboxSupport(project, sandFixPath)
}
protected def addCompilation(): Unit = {
new HaskellCompilationSupport(project, instantiator, fileResolver)
}
protected def addArtifacts(): Unit = {
new ZippedSandboxArtifactSupport(project)
}
protected def addStackSupport(): Unit = {
new StackSupport(project)
}
protected def registerExtension(): Unit = {
createField[HaskellExtension]("haskell", instantiator, project)
}
private def getStackToolPath(): File = {
if (stackToolPath.exists() && (stackToolPath </> "stack.yaml").exists()) {
stackToolPath
} else {
stackToolPath.mkdirs()
for (yaml <- managed(new StackYamlWriter(stackToolPath </> "stack.yaml"))) {
yaml.ghcVersion(haskellExtension.parsedGHCVersion)
}
stackToolPath
}
}
}
|
prezi/gradle-haskell-plugin
|
src/main/scala/com/prezi/haskell/gradle/extension/impl/HaskellProjectImpl.scala
|
Scala
|
apache-2.0
| 2,385 |
/*
* Copyright 2017 David Schmitz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.uport.recipe.model
sealed trait Difficulty
object Difficulty {
case object Easy extends Difficulty
case object Medium extends Difficulty
case object Hard extends Difficulty
}
|
dschmitz/recipe-service
|
src/main/scala/io/uport/recipe/model/Difficulty.scala
|
Scala
|
apache-2.0
| 796 |
package com.github.rgafiyatullin.creek_xml.dom_query
import com.github.rgafiyatullin.creek_xml.dom.Node
import scala.collection.immutable.Queue
case class Path(predicates: Queue[Predicate]) {
def headOption: Option[Predicate] = predicates.headOption
def isLast: Boolean = predicates.nonEmpty && predicates.tail.isEmpty
def matches(node: Node): Boolean = predicates.head(node)
def next: Path = Path(predicates.tail)
def /(predicate: Predicate): Path =
Path(predicates = predicates.enqueue(predicate))
}
|
RGafiyatullin/creek-xml
|
src/main/scala/com/github/rgafiyatullin/creek_xml/dom_query/Path.scala
|
Scala
|
mit
| 520 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.exec
import org.apache.flink.runtime.operators.DamBehavior
import org.apache.flink.table.api.BatchTableEnvironment
import org.apache.flink.table.util.Logging
/**
* Base class for batch ExecNode.
*/
trait BatchExecNode[T] extends ExecNode[BatchTableEnvironment, T] with Logging {
/**
* Returns [[DamBehavior]] of this node.
*/
def getDamBehavior: DamBehavior
}
|
shaoxuan-wang/flink
|
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/plan/nodes/exec/BatchExecNode.scala
|
Scala
|
apache-2.0
| 1,229 |
package spire
package math
import org.scalatest.FunSuite
import org.scalatest.Matchers
import org.scalacheck.Arbitrary._
import org.scalatest._
import prop._
class NumberPropertiesTest extends PropSpec with Matchers with GeneratorDrivenPropertyChecks {
property("Number.apply(Long)") {
forAll { (n: Long) => Number(n) shouldBe n }
forAll { (n: Long) => Number(n) shouldBe SafeLong(n) }
// we need to do (n - 1).abs to ensure we don't get a negative number
forAll { (n: Long) => Number((n - 1).abs) shouldBe Natural((n - 1).abs) }
}
property("Number.apply(BigInt)") {
forAll { (n: BigInt) => Number(n) shouldBe n }
forAll { (n: BigInt) => Number(n) shouldBe SafeLong(n) }
forAll { (n: BigInt) => Number(n.abs) shouldBe Natural(n.abs) }
}
property("Number.apply(BigDecimal)") {
forAll { (n: BigDecimal) => Number(n) shouldBe n }
}
property("Number.apply(Rational)") {
forAll { (n: BigInt, d0: BigInt) =>
val d = if (d0 == 0) BigInt(1) else d0
val r = Rational(n, d)
Number(r) shouldBe r
}
}
def bothEq[A, B](a: A, b: B) = {
a shouldBe b
b shouldBe a
}
property("RationalNumber == Int") {
forAll { (n: Int) => bothEq(Number(Rational(n)), n) }
}
property("RationalNumber == Long") {
forAll { (n: Long) => bothEq(Number(Rational(n)), n) }
}
property("RationalNumber == Double") {
forAll { (n: Double) => bothEq(Number(Rational(n)), n) }
}
property("RationalNumber == BigInt") {
forAll { (n: BigInt) => Number(Rational(n)) shouldBe n }
}
property("Long + Long") {
forAll { (x: Long, y: Long) =>
val lx = Number(x)
val ly = Number(y)
val lz = lx + ly
val bx = Number(BigInt(x))
val by = Number(BigInt(y))
lz shouldBe BigInt(x) + BigInt(y)
bx + by shouldBe lz
lx + by shouldBe lz
bx + ly shouldBe lz
}
}
}
// These tests are mostly about weird interplay between Long/Double/Number.
class NumberTest extends FunSuite {
test("create numbers") {
Number(3.toByte)
Number(3.toShort)
Number(3)
Number(3L)
Number(3.0F)
Number(3.0)
Number("333333333333333333333333333333")
Number("99.253895895395839583958953895389538958395839583958958953")
}
test("doesn't allow sentinel values") {
intercept[IllegalArgumentException] { Number(Double.NaN) }
intercept[IllegalArgumentException] { Number(Double.PositiveInfinity) }
intercept[IllegalArgumentException] { Number(Double.NegativeInfinity) }
}
test("operations") {
assert(Number(3) + Number(4) === Number(7))
// since 30.0 can be repesented as a SafeLong, we get an IntNumber
assert(Number(4) ** Number(30.0) === Number("1152921504606846976"))
// since 30.5 can't, we get a DoubleNumber
assert(Number(4) ** Number(30.5) === FloatNumber(2.305843009213694e18))
assert(Number(100) ** Number(200.0) === Number(100) ** Number(200))
assert(Number(100) ** Number(200) === Number("10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"))
// DecimalNumber is honest when its roots aren't perfect
val z1 = Number("81") ** Number("0.5") - Number("9.0")
assert(z1 != 0)
assert(z1.abs < 0.00000000000001)
//assert(Number("81").sqrt - Number("9") == Number(0))
// FloatNumber lies like a fox
val z2 = Number(81.0) ** Number(0.5) - Number(9.0)
assert(z2 == 0)
// tests near Long.MaxValue are nice because the floating point coverage is
// sparser and fewer of the Long values can be exactly represented.
val m = Long.MaxValue
val d = m.toDouble
// we do want to be able to see if Long values are exactly equal to
// floating point values. this will only be possible when the Long value
// can be exactly represented as a floating-point value (e.g. when
// converting from a Double into a Long).
assert(Number(d.toLong) === Number(d))
// we don't want to coerce Long into Double when doing this test. Long
// values will remain exact unless explicitly combined with inext
// floating-point values.
val n1 = Number(m - 1L)
val n2 = Number(m.toDouble - 1.0)
assert(n1 != n2)
}
}
|
tixxit/spire
|
tests/src/test/scala/spire/math/NumberTest.scala
|
Scala
|
mit
| 4,522 |
package doodle
package golden
import doodle.algebra.{Algebra, Picture}
import doodle.java2d._
import doodle.effect.Writer._
import java.awt.image.BufferedImage
import javax.imageio.ImageIO
import munit._
import doodle.effect.Writer
trait Golden { self: FunSuite =>
val goldenDir = "golden/src/test/golden"
def pixelAbsoluteError(a: Int, b: Int): Int = {
var error = 0
var i = 0
while (i < 4) {
val shift = i * 8
val mask = 0x000000FF << shift
val aValue = (a & mask) >> shift
val bValue = (b & mask) >> shift
error = error + Math.abs(aValue - bValue)
i = i + 1
}
error
}
def absoluteError(
actual: BufferedImage,
golden: BufferedImage
): (Double, BufferedImage) = {
val diff = new BufferedImage(
actual.getWidth(),
actual.getHeight(),
BufferedImage.TYPE_INT_ARGB
)
// Sum of squared error
var error = 0.0
var x = 0
while (x < actual.getWidth()) {
var y = 0
while (y < actual.getHeight()) {
val pixelError = pixelAbsoluteError(
actual.getRGB(x, y),
golden.getRGB(x, y)
)
// Convert pixelError to black and white value for easier rendering
val err =
(256 * ((pixelError.toDouble) / (Int.MaxValue.toDouble))).toInt
val pixel = (err << 16) | (err << 8) | err
diff.setRGB(x, y, pixel)
error = error + pixelError
y = y + 1
}
x = x + 1
}
(error, diff)
}
}
trait GoldenImage extends Golden { self: FunSuite =>
import doodle.image._
import doodle.image.syntax._
import doodle.syntax._
def assertGoldenImage(name: String, image: Image)(implicit loc: Location) = {
import java.io.File
val file = new File(s"${goldenDir}/${name}.png")
if (file.exists()) {
val temp = new File(s"${goldenDir}/${name}.tmp.png")
try {
image.write[Png](temp)
val actual = ImageIO.read(temp)
val expected = ImageIO.read(file)
assertEquals(
actual.getHeight(),
expected.getHeight(),
s"Heights differ"
)
assertEquals(actual.getWidth(), expected.getWidth(), s"Widths differ")
// Fairly arbitrary threshold allowing a 4-bit difference in each component of each pixel
val threshold = actual.getHeight() * actual.getWidth() * 4 * 16
val (error, diff) = absoluteError(actual, expected)
val (_, diff64) = diff.toPicture[Algebra, Drawing].base64[Png]()
assert(clue(error) < clue(threshold), diff64)
} finally {
if (temp.exists()) temp.delete()
()
}
} else {
println(s"Golden: ${file} does not exist. Creating golden image.")
image.write[Png](file)
}
}
def testImage(name: String)(image: Image)(implicit loc: Location) =
test(name) {
assertGoldenImage(name, image)
}
}
trait GoldenPicture extends Golden { self: FunSuite =>
import doodle.syntax._
def assertGoldenPicture[Alg[x[_]] <: Algebra[x], F[_]](
name: String,
picture: Picture[Alg, F, Unit],
frame: Frame = Frame.fitToPicture()
)(implicit loc: Location, w: Writer[Alg, F, Frame, Png]) = {
import java.io.File
val file = new File(s"${goldenDir}/${name}.png")
if (file.exists()) {
val temp = new File(s"${goldenDir}/${name}.tmp.png")
try {
picture.write[Png](temp, frame)
val actual = ImageIO.read(temp)
val expected = ImageIO.read(file)
assertEquals(
actual.getHeight(),
expected.getHeight(),
s"Heights differ"
)
assertEquals(actual.getWidth(), expected.getWidth(), s"Widths differ")
// Fairly arbitrary threshold allowing a 4-bit difference in each pixel
val threshold = actual.getHeight() * actual.getWidth() * 4 * 16 * 16
val (error, diff) = absoluteError(actual, expected)
val (_, diff64) = diff.toPicture[Algebra, Drawing].base64[Png]()
assert(clue(error) < clue(threshold), diff64)
} finally {
if (temp.exists()) temp.delete()
()
}
} else {
println(s"Golden: ${file} does not exist. Creating golden image.")
picture.write[Png](file, frame)
}
}
def testPicture[Alg[x[_]] <: Algebra[x], F[_], A](name: String)(
picture: Picture[Alg, F, Unit]
)(implicit loc: Location, w: Writer[Alg, F, Frame, Png]) =
test(name) {
assertGoldenPicture(name, picture)
}
def testPictureWithFrame[Alg[x[_]] <: Algebra[x], F[_], A](name: String)(frame: Frame)(
picture: Picture[Alg, F, Unit]
)(implicit loc: Location, w: Writer[Alg, F, Frame, Png]) =
test(name) {
assertGoldenPicture(name, picture, frame)
}
}
|
underscoreio/doodle
|
golden/src/test/scala/doodle/golden/Golden.scala
|
Scala
|
apache-2.0
| 4,759 |
package dates
import io.circe.{Decoder, Encoder}
/**
* A typeclass around dates that allow dates to be handled in the same way in the JVM and in JS.
**/
trait DateLike[D] {
/**
* Serialise this date.
* @param d The date to serialise.
* @return An ISO 8601 representation of this date.
*/
def serialise(d: D): String
/**
* Try to deserialise a date.
* @param s An string representation of this date.
* @return Left if the string is not in ISO 8601 date format, or the date the string represents otherwise.
*/
def deserialise(s: String): Either[String, D]
/**
* Compare two dates.
* @param d1 The first date.
* @param d2 The second date.
* @return True if d1 < d2, false otherwise.
*/
def lt(d1: D, d2: D): Boolean
/**
* Format a date.
* @param d The date to format.
* @param fmt The format string to use.
* @return The date formatted using the format string.
*/
def format(d: D, fmt: String): String
}
/**
* Implicits for DateLike.
*/
object DateLike {
implicit def dateLikeEncoder[D](implicit ev: DateLike[D]): Encoder[D] =
Encoder.encodeString.contramap(d => ev.serialise(d))
implicit def dateLikeDecoder[D](implicit ev: DateLike[D]): Decoder[D] =
Decoder.decodeString.emap(d => ev.deserialise(d))
implicit def dateLikeOrdering[D](implicit ev: DateLike[D]): Ordering[D] = Ordering.fromLessThan(ev.lt)
}
|
unclealex72/west-ham-calendar
|
shared/src/main/scala/dates/DateLike.scala
|
Scala
|
apache-2.0
| 1,423 |
package main.scala.HackerRank.WeekOfCode36
import java.io.{ByteArrayInputStream, IOException, InputStream, PrintWriter}
import java.util.InputMismatchException
import scala.collection.generic.CanBuildFrom
import scala.language.higherKinds
import scala.reflect.ClassTag
/**
* Copyright (c) 2017 A. Roberto Fischer
*
* @author A. Roberto Fischer <[email protected]> on 7/21/2017
*/
private[this] object RevisedRussianRoulette {
import Reader._
import Writer._
private[this] val TEST_INPUT: Option[String] = None
//------------------------------------------------------------------------------------------//
// Solution
//------------------------------------------------------------------------------------------//
private[this] def solve(): Unit = {
val n = next[Int]()
val doors = next[Int, Array](n)
var max = 0
var min = 0
for (i <- doors.indices) {
if (doors(i) == 1) {
max += 1
min += 1
doors(i) = 0
if (i < doors.indices.last && doors(i + 1) == 1) {
max += 1
doors(i + 1) = 0
}
}
}
println(s"$min $max")
}
//------------------------------------------------------------------------------------------//
// Run
//------------------------------------------------------------------------------------------//
@throws[Exception]
def main(args: Array[String]): Unit = {
val s = System.currentTimeMillis
solve()
flush()
if (TEST_INPUT.isDefined) System.out.println(System.currentTimeMillis - s + "ms")
}
//------------------------------------------------------------------------------------------//
// Input
//------------------------------------------------------------------------------------------//
private[this] final object Reader {
private[this] implicit val in: InputStream = TEST_INPUT.fold(System.in)(s => new ByteArrayInputStream(s.getBytes))
def next[T: ClassTag](): T = {
implicitly[ClassTag[T]].runtimeClass match {
case java.lang.Integer.TYPE => nextInt().asInstanceOf[T]
case java.lang.Long.TYPE => nextLong().asInstanceOf[T]
case java.lang.Double.TYPE => nextDouble().asInstanceOf[T]
case java.lang.Character.TYPE => nextChar().asInstanceOf[T]
case s if Class.forName("java.lang.String") == s => nextString().asInstanceOf[T]
case b if Class.forName("scala.math.BigInt") == b => BigInt(nextString()).asInstanceOf[T]
case b if Class.forName("scala.math.BigDecimal") == b => BigDecimal(nextString()).asInstanceOf[T]
case _ => throw new RuntimeException("Unsupported input type.")
}
}
def next[T, Coll[_]](reader: => T, n: Int)
(implicit cbf: CanBuildFrom[Coll[T], T, Coll[T]]): Coll[T] = {
val builder = cbf()
builder.sizeHint(n)
for (_ <- 0 until n) {
builder += reader
}
builder.result()
}
def nextWithIndex[T, Coll[_]](reader: => T, n: Int)
(implicit cbf: CanBuildFrom[Coll[(T, Int)], (T, Int), Coll[(T, Int)]]): Coll[(T, Int)] = {
val builder = cbf()
builder.sizeHint(n)
for (i <- 0 until n) {
builder += ((reader, i))
}
builder.result()
}
def next[T: ClassTag, Coll[_]](n: Int)
(implicit cbf: CanBuildFrom[Coll[T], T, Coll[T]]): Coll[T] = {
val builder = cbf()
builder.sizeHint(n)
for (_ <- 0 until n) {
builder += next[T]()
}
builder.result()
}
def nextWithIndex[T: ClassTag, Coll[_]](n: Int)
(implicit cbf: CanBuildFrom[Coll[(T, Int)], (T, Int), Coll[(T, Int)]]): Coll[(T, Int)] = {
val builder = cbf()
builder.sizeHint(n)
for (i <- 0 until n) {
builder += ((next[T](), i))
}
builder.result()
}
def nextMultiLine[T: ClassTag](n: Int, m: Int): Seq[Seq[T]] = {
val map = Vector.newBuilder[Vector[T]]
var i = 0
while (i < n) {
map += next[T, Vector](m)
i += 1
}
map.result()
}
private[this] def nextDouble(): Double = nextString().toDouble
private[this] def nextChar(): Char = skip.toChar
private[this] def nextString(): String = {
var b = skip
val sb = new java.lang.StringBuilder
while (!isSpaceChar(b)) {
sb.appendCodePoint(b)
b = readByte().toInt
}
sb.toString
}
private[this] def nextInt(): Int = {
var num = 0
var b = 0
var minus = false
while ( {
b = readByte().toInt
b != -1 && !((b >= '0' && b <= '9') || b == '-')
}) {}
if (b == '-') {
minus = true
b = readByte().toInt
}
while (true) {
if (b >= '0' && b <= '9') {
num = num * 10 + (b - '0')
} else {
if (minus) return -num else return num
}
b = readByte().toInt
}
throw new IOException("Read Int")
}
private[this] def nextLong(): Long = {
var num = 0L
var b = 0
var minus = false
while ( {
b = readByte().toInt
b != -1 && !((b >= '0' && b <= '9') || b == '-')
}) {}
if (b == '-') {
minus = true
b = readByte().toInt
}
while (true) {
if (b >= '0' && b <= '9') {
num = num * 10 + (b - '0')
} else {
if (minus) return -num else return num
}
b = readByte().toInt
}
throw new IOException("Read Long")
}
private[this] val inputBuffer = new Array[Byte](1024)
private[this] var lenBuffer = 0
private[this] var ptrBuffer = 0
private[this] def readByte()(implicit in: java.io.InputStream): Byte = {
if (lenBuffer == -1) throw new InputMismatchException
if (ptrBuffer >= lenBuffer) {
ptrBuffer = 0
try {
lenBuffer = in.read(inputBuffer)
} catch {
case _: IOException =>
throw new InputMismatchException
}
if (lenBuffer <= 0) return -1
}
inputBuffer({
ptrBuffer += 1
ptrBuffer - 1
})
}
private[this] def isSpaceChar(c: Int) = !(c >= 33 && c <= 126)
private[this] def skip = {
var b = 0
while ( {
b = readByte().toInt
b != -1 && isSpaceChar(b)
}) {}
b
}
}
//------------------------------------------------------------------------------------------//
// Output
//------------------------------------------------------------------------------------------//
private[this] final object Writer {
private[this] val out = new PrintWriter(System.out)
def flush(): Unit = out.flush()
def println(x: Any): Unit = out.println(x)
def print(x: Any): Unit = out.print(x)
}
}
|
robertoFischer/hackerrank
|
src/main/scala/HackerRank/WeekOfCode36/RevisedRussianRoulette.scala
|
Scala
|
mit
| 6,867 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.dataload
import java.io.{BufferedWriter, File, FileWriter, FilenameFilter}
import org.apache.spark.sql.Row
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.spark.sql.test.util.QueryTest
class TestBatchSortDataLoad extends QueryTest with BeforeAndAfterAll {
var filePath: String = _
def buildTestData() = {
filePath = s"${integrationPath}/spark-common-test/target/big.csv"
val file = new File(filePath)
val writer = new BufferedWriter(new FileWriter(file))
writer.write("c1,c2,c3, c4, c5, c6, c7, c8, c9, c10")
writer.newLine()
for(i <- 0 until 100000) {
writer.write("a" + i%1000 + "," +
"b" + i%1000 + "," +
"c" + i%1000 + "," +
"d" + i%1000 + "," +
"e" + i%1000 + "," +
"f" + i%1000 + "," +
i%1000 + "," +
i%1000 + "," +
i%1000 + "," +
i%1000 + "\\n")
if ( i % 10000 == 0) {
writer.flush()
}
}
writer.close()
}
def dropTable() = {
sql("DROP TABLE IF EXISTS carbon_load1")
sql("DROP TABLE IF EXISTS carbon_load2")
sql("DROP TABLE IF EXISTS carbon_load3")
sql("DROP TABLE IF EXISTS carbon_load4")
sql("DROP TABLE IF EXISTS carbon_load5")
sql("DROP TABLE IF EXISTS carbon_load6")
}
override def beforeAll {
dropTable
buildTestData
}
test("test batch sort load by passing option to load command") {
sql(
"""
| CREATE TABLE carbon_load1(c1 string, c2 string, c3 string, c4 string, c5 string,
| c6 string, c7 int, c8 int, c9 int, c10 int)
| STORED BY 'org.apache.carbondata.format'
| TBLPROPERTIES('dictionary_include'='c1,c2,c3,c4,c5,c6')
""".stripMargin)
sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load1 " +
s"OPTIONS('sort_scope'='batch_sort', 'batch_sort_size_inmb'='1')")
checkAnswer(sql("select count(*) from carbon_load1"), Seq(Row(100000)))
assert(getIndexfileCount("carbon_load1") == 5, "Something wrong in batch sort")
}
test("test batch sort load by passing option to load command and compare with normal load") {
sql(
"""
| CREATE TABLE carbon_load2(c1 string, c2 string, c3 string, c4 string, c5 string,
| c6 string, c7 int, c8 int, c9 int, c10 int)
| STORED BY 'org.apache.carbondata.format'
""".stripMargin)
sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load2 ")
checkAnswer(sql("select * from carbon_load1 where c1='a1' order by c1"),
sql("select * from carbon_load2 where c1='a1' order by c1"))
}
test("test batch sort load by passing option and compaction") {
sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load1 " +
s"OPTIONS('sort_scope'='batch_sort', 'batch_sort_size_inmb'='1')")
sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load1 " +
s"OPTIONS('sort_scope'='batch_sort', 'batch_sort_size_inmb'='1')")
sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load1 " +
s"OPTIONS('sort_scope'='batch_sort', 'batch_sort_size_inmb'='1')")
sql("alter table carbon_load1 compact 'major'")
Thread.sleep(4000)
checkAnswer(sql("select count(*) from carbon_load1"), Seq(Row(400000)))
assert(getIndexfileCount("carbon_load1", "0.1") == 1, "Something wrong in compaction after batch sort")
}
test("test batch sort load by passing option in one load and with out option in other load and then do compaction") {
sql(
"""
| CREATE TABLE carbon_load5(c1 string, c2 string, c3 string, c4 string, c5 string,
| c6 string, c7 int, c8 int, c9 int, c10 int)
| STORED BY 'org.apache.carbondata.format'
""".stripMargin)
sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load5 " +
s"OPTIONS('sort_scope'='batch_sort', 'batch_sort_size_inmb'='1')")
sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load5 ")
sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load5 " +
s"OPTIONS('sort_scope'='batch_sort', 'batch_sort_size_inmb'='1')")
sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load5 ")
checkAnswer(sql("select count(*) from carbon_load5"), Seq(Row(400000)))
checkAnswer(sql("select * from carbon_load1 where c1='a1' order by c1"),
sql("select * from carbon_load5 where c1='a1' order by c1"))
sql("alter table carbon_load5 compact 'major'")
Thread.sleep(4000)
assert(getIndexfileCount("carbon_load5", "0.1") == 1,
"Something wrong in compaction after batch sort")
checkAnswer(sql("select * from carbon_load1 where c1='a1' order by c1"),
sql("select * from carbon_load5 where c1='a1' order by c1"))
}
test("test batch sort load by passing option with single pass") {
sql(
"""
| CREATE TABLE carbon_load3(c1 string, c2 string, c3 string, c4 string, c5 string,
| c6 string, c7 int, c8 int, c9 int, c10 int)
| STORED BY 'org.apache.carbondata.format'
| TBLPROPERTIES('dictionary_include'='c1,c2,c3,c4,c5,c6')
""".stripMargin)
sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load3 " +
s"OPTIONS('sort_scope'='batch_sort', 'batch_sort_size_inmb'='1', 'single_pass'='true')")
checkAnswer(sql("select count(*) from carbon_load3"), Seq(Row(100000)))
assert(getIndexfileCount("carbon_load3") == 5, "Something wrong in batch sort")
checkAnswer(sql("select * from carbon_load3 where c1='a1' order by c1"),
sql("select * from carbon_load2 where c1='a1' order by c1"))
}
test("test batch sort load by with out passing option but through carbon properties") {
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.LOAD_SORT_SCOPE, "BATCH_SORT")
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.LOAD_BATCH_SORT_SIZE_INMB, "1")
sql(
"""
| CREATE TABLE carbon_load4(c1 string, c2 string, c3 string, c4 string, c5 string,
| c6 string, c7 int, c8 int, c9 int, c10 int)
| STORED BY 'org.apache.carbondata.format'
| TBLPROPERTIES('dictionary_include'='c1,c2,c3,c4,c5,c6')
""".stripMargin)
sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load4 " )
checkAnswer(sql("select count(*) from carbon_load4"), Seq(Row(100000)))
assert(getIndexfileCount("carbon_load4") == 5, "Something wrong in batch sort")
CarbonProperties.getInstance().
addProperty(CarbonCommonConstants.LOAD_SORT_SCOPE,
CarbonCommonConstants.LOAD_SORT_SCOPE_DEFAULT)
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.LOAD_BATCH_SORT_SIZE_INMB, "0")
}
test("test batch sort load by with out passing option but through carbon properties with default size") {
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.LOAD_SORT_SCOPE, "BATCH_SORT")
sql(
"""
| CREATE TABLE carbon_load6(c1 string, c2 string, c3 string, c4 string, c5 string,
| c6 string, c7 int, c8 int, c9 int, c10 int)
| STORED BY 'org.apache.carbondata.format'
| TBLPROPERTIES('dictionary_include'='c1,c2,c3,c4,c5,c6')
""".stripMargin)
sql(s"LOAD DATA LOCAL INPATH '$filePath' into table carbon_load6 " )
checkAnswer(sql("select count(*) from carbon_load6"), Seq(Row(100000)))
assert(getIndexfileCount("carbon_load6") == 1, "Something wrong in batch sort")
CarbonProperties.getInstance().
addProperty(CarbonCommonConstants.LOAD_SORT_SCOPE,
CarbonCommonConstants.LOAD_SORT_SCOPE_DEFAULT)
}
def getIndexfileCount(tableName: String, segmentNo: String = "0"): Int = {
val store = storeLocation +"/default/"+ tableName + "/Fact/Part0/Segment_"+segmentNo
val list = new File(store).list(new FilenameFilter {
override def accept(dir: File, name: String) = name.endsWith(".carbonindex")
})
list.size
}
override def afterAll {
dropTable
new File(filePath).delete()
}
}
|
shivangi1015/incubator-carbondata
|
integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/dataload/TestBatchSortDataLoad.scala
|
Scala
|
apache-2.0
| 9,038 |
import scala.collection.{immutable, mutable}
import java.nio.file.Paths
class repeatedArgs {
def bar(xs: String*): Int = xs.length
def test(xs: immutable.Seq[String], ys: collection.Seq[String], zs: Array[String]): Unit = {
bar("a", "b", "c")
bar(xs: _*)
bar(ys: _*) // error: immutable.Seq expected, found Seq
bar(zs: _*) // old-error: Remove (compiler generated) Array to Seq conversion in 2.13?
Paths.get("Hello", "World")
Paths.get("Hello", xs: _*)
Paths.get("Hello", ys: _*) // error: immutable.Seq expected, found Seq
Paths.get("Hello", zs: _*)
}
}
|
som-snytt/dotty
|
tests/neg/repeatedArgs213.scala
|
Scala
|
apache-2.0
| 596 |
package services
import models.{ MovieReservationDetail, ReservationCounter, ReservationCreate, ReservationCreateResult }
import utils.CacheConstants
import scala.concurrent.{ ExecutionContext, Future }
import scala.language.postfixOps
trait ReservationService {
def makeReservation(reservation: ReservationCreate): Future[ReservationCreateResult]
def getReservationDetail(imdbId: String, screenId: String): Future[Option[MovieReservationDetail]]
}
class ReservationServiceImpl(
val moviesService: MoviesService,
val cacheService: CachingService
)(implicit executionContext: ExecutionContext)
extends ReservationService with CacheConstants with utils.Messages.Reservation {
import cacheService._
import models.ModelCodecs.reservationCounter._
import moviesService._
//TODO: Implement distributed lock mechanism (redis, zookeeper etc..)
//INFO: https://redis.io/topics/distlock
def makeReservation(reservationOption: Option[ReservationCounter]): Future[(String, Boolean)] = {
reservationOption match {
case Some(value) => {
//ACQUIRE LOCK
if (value.availableSeats > value.reservedSeats) {
value.makeReservation()
Future {
(RESERVATION_CREATED, true)
}
} else {
Future {
(RESERVATION_CREATE_NO_AVAILABLE_SEAT, false)
}
}
//RELEASE LOCK
}
case None => Future {
(RESERVATION_CREATE_NO_AVAILABLE_MOVIE, false)
}
}
}
def processReservation(key: String): Future[ReservationCreateResult] = {
for {
cachedReservationOpt <- getFromCache[ReservationCounter](key)(decodeReservationCounter)
(message, ok) <- makeReservation(cachedReservationOpt)
added <- addToCache[ReservationCounter](key, cachedReservationOpt.get)(encodeReservationCounter)
} yield ReservationCreateResult(message = message, success = added & ok)
}
override def makeReservation(reservation: ReservationCreate): Future[ReservationCreateResult] = {
val key = RESERVATION_TRACK_KEY_TPL.format(reservation.imdbId, reservation.screenId)
val message = Future {
ReservationCreateResult(message = RESERVATION_CREATE_NO_AVAILABLE_MOVIE, success = false)
}
for {
exists: Boolean <- existsInCache(key)
result <- if (exists) message else processReservation(key)
} yield result
}
override def getReservationDetail(imdbId: String, screenId: String): Future[Option[MovieReservationDetail]] = {
//INFO: Implement caching to hit to it first
val key = RESERVATION_TRACK_KEY_TPL.format(imdbId, screenId)
for {
movieOption <- findMovieByImdbId(imdbId)
cachedReservationOption <- getFromCache[ReservationCounter](key)(decodeReservationCounter)
} yield {
movieOption match {
case Some(movie) => {
cachedReservationOption match {
case Some(cachedReservation) => {
Some(MovieReservationDetail(
imdbId = imdbId,
movieTitle = movie.movieTitle,
screenId = screenId,
availableSeats = cachedReservation.availableSeats,
reservedSeats = cachedReservation.reservedSeats
))
}
case None => None
}
}
case None => None
}
}
}
}
|
ziyasal/Reserveon
|
src/main/scala/services/ReservationService.scala
|
Scala
|
mit
| 3,351 |
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.connector.evaluate
import slamdata.Predef.Option
import quasar.contrib.pathy.AFile
import quasar.qscript.QScriptEducated
import cats.{Functor, Show}
import monocle.macros.Lenses
/** A QScript query over possibly many sources.
*
* NB: This encoding exists due to the cost incurred when extending
* `QScriptTotal`, both in compilation time and boilerplate.
*
* If we're ever able to get rid of `QScriptTotal`, we could just use
* a variant of QScript containing `Const[Read[Source[S]], ?]` instead.
*/
@Lenses
final case class FederatedQuery[T[_[_]], S](
query: T[QScriptEducated[T, ?]],
sources: AFile => Option[Source[S]])
object FederatedQuery extends FederatedQueryInstances
sealed abstract class FederatedQueryInstances {
implicit def federatedQueryFunctor[T[_[_]]]: Functor[FederatedQuery[T, ?]] =
new Functor[FederatedQuery[T, ?]] {
def map[A, B](fa: FederatedQuery[T, A])(f: A => B) =
FederatedQuery(fa.query, p => fa.sources(p).map(_.map(f)))
}
implicit def federatedQueryShow[T[_[_]], A]: Show[FederatedQuery[T, A]] =
Show.show(_ => "FederatedQuery")
}
|
slamdata/quasar
|
connector/src/main/scala/quasar/connector/evaluate/FederatedQuery.scala
|
Scala
|
apache-2.0
| 1,752 |
package server
import database.DocumentTag
/**
* Created by jannis on 12/6/15.
*
* Contains classes which are just for communication to fulfil the API
*/
object Protocol {
case class MessagePost(message: String)
case class TagPost(name: String, language: String)
case class AddTagsPost(tags: Seq[DocumentTag])
case class QuestionPost(data: String, topic: String)
case class SendableDocument(id: Int, url: String, typ: String, tags: Seq[DocumentTag], tagged: Boolean)
}
|
dadarakt/hack4good
|
hack2godly/src/main/scala/server/Protocol.scala
|
Scala
|
apache-2.0
| 485 |
/**
* Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0
* See accompanying LICENSE file.
*/
package kafka.manager
import java.util.Properties
import java.util.concurrent.atomic.AtomicBoolean
import com.typesafe.config.{Config, ConfigFactory}
import kafka.manager.features.KMDeleteTopicFeature
import kafka.manager.model.{Kafka_0_8_1_1, ActorModel}
import kafka.manager.utils.CuratorAwareTest
import kafka.manager.model.ActorModel.{KafkaManagedConsumer, ZKManagedConsumer, TopicList}
import kafka.test.{NewKafkaManagedConsumer, SimpleProducer, HighLevelConsumer, SeededBroker}
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.util.Try
/**
* @author hiral
*/
class TestKafkaManager extends CuratorAwareTest with BaseTest {
private[this] val seededTopic = "km-api-test"
private[this] val broker = new SeededBroker(seededTopic,4)
private[this] val kafkaServerZkPath = broker.getZookeeperConnectionString
private[this] val akkaConfig: Properties = new Properties()
akkaConfig.setProperty("pinned-dispatcher.type","PinnedDispatcher")
akkaConfig.setProperty("pinned-dispatcher.executor","thread-pool-executor")
akkaConfig.setProperty(KafkaManager.ZkHosts,testServer.getConnectString)
akkaConfig.setProperty(KafkaManager.BrokerViewUpdateSeconds,"1")
akkaConfig.setProperty(KafkaManager.KafkaManagerUpdateSeconds,"1")
akkaConfig.setProperty(KafkaManager.DeleteClusterUpdateSeconds,"1")
akkaConfig.setProperty(KafkaManager.ConsumerPropertiesFile,"conf/consumer.properties")
private[this] val config : Config = ConfigFactory.parseProperties(akkaConfig)
private[this] val kafkaManager : KafkaManager = new KafkaManager(config)
private[this] val duration = FiniteDuration(10,SECONDS)
private[this] val createTopicNameA = "km-unit-test-a"
private[this] val createTopicNameB = "km-unit-test-b"
private[this] val createLogkafkaLogkafkaId = "km-unit-test-logkafka-logkafka_id"
private[this] val createLogkafkaLogPath = "/km-unit-test-logkafka-logpath"
private[this] val createLogkafkaTopic = "km-unit-test-logkafka-topic"
private[this] var hlConsumer : Option[HighLevelConsumer] = None
private[this] var hlConsumerThread : Option[Thread] = None
private[this] val hlShutdown = new AtomicBoolean(false)
private[this] var newConsumer : Option[NewKafkaManagedConsumer] = None
private[this] var newConsumerThread : Option[Thread] = None
private[this] val newShutdown = new AtomicBoolean(false)
private[this] var simpleProducer : Option[SimpleProducer] = None
private[this] var simpleProducerThread : Option[Thread] = None
override protected def beforeAll() : Unit = {
super.beforeAll()
Thread.sleep(2000)
hlConsumer = Option(broker.getHighLevelConsumer)
hlConsumerThread = Option(new Thread() {
override def run(): Unit = {
while(!hlShutdown.get()) {
hlConsumer.map(_.read { ba =>
Option(ba).map(asString).foreach( s => println(s"hl consumer read message : $s"))
})
Thread.sleep(500)
}
}
})
hlConsumerThread.foreach(_.start())
newConsumer = Option(broker.getNewConsumer)
newConsumerThread = Option(new Thread() {
override def run(): Unit = {
while(!newShutdown.get()) {
newConsumer.map(_.read { message =>
Option(message).foreach( s => println(s"new consumer read message : $s"))
})
Thread.sleep(500)
}
}
})
newConsumerThread.foreach(_.start())
simpleProducer = Option(broker.getSimpleProducer)
simpleProducerThread = Option(new Thread() {
override def run(): Unit = {
var count = 0
while(!hlShutdown.get()) {
simpleProducer.foreach { p =>
p.send(s"simple message $count", null)
count+=1
Thread.sleep(500)
}
}
}
})
simpleProducerThread.foreach(_.start())
Thread.sleep(1000)
}
override protected def afterAll(): Unit = {
Try(newShutdown.set(true))
Try(hlShutdown.set(true))
Try(simpleProducerThread.foreach(_.interrupt()))
Try(hlConsumerThread.foreach(_.interrupt()))
Try(hlConsumer.foreach(_.close()))
Try(newConsumerThread.foreach(_.interrupt()))
Try(newConsumer.foreach(_.close()))
if(kafkaManager!=null) {
kafkaManager.shutdown()
}
Try(broker.shutdown())
super.afterAll()
}
private[this] def getTopicList() : TopicList = {
val future = kafkaManager.getTopicList("dev")
val result = Await.result(future,duration)
result.toOption.get
}
test("add cluster") {
val future = kafkaManager.addCluster("dev","0.8.2.0",kafkaServerZkPath, jmxEnabled = false, pollConsumers = true, filterConsumers = true, jmxUser = None, jmxPass = None, jmxSsl = false, tuning = Option(kafkaManager.defaultTuning), securityProtocol="PLAINTEXT")
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
}
test("create topic") {
val futureA = kafkaManager.createTopic("dev",createTopicNameA,4,1)
val resultA = Await.result(futureA,duration)
val futureB = kafkaManager.createTopic("dev",createTopicNameB,4,1)
val resultB = Await.result(futureB,duration)
assert(resultA.isRight === true)
assert(resultB.isRight === true)
Thread.sleep(2000)
}
test("fail to create topic again") {
val future = kafkaManager.createTopic("dev",createTopicNameA,4,1)
val result = Await.result(future,duration)
assert(result.isLeft === true)
Thread.sleep(2000)
}
test("get topic list") {
val future = kafkaManager.getTopicList("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
assert(result.toOption.get.list.nonEmpty === true)
}
test("query request for invalid cluster") {
val future = kafkaManager.getTopicList("blah")
val result = Await.result(future,duration)
assert(result.isLeft === true)
assert(result.swap.toOption.get.msg.contains("blah") === true)
}
test("get broker list") {
val future = kafkaManager.getBrokerList("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
assert(result.toOption.nonEmpty === true)
}
test("get topic identity") {
val future = kafkaManager.getTopicList("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
assert(result.toOption.get.list.nonEmpty === true)
result.toOption.get.list.foreach { topic =>
val future2 = kafkaManager.getTopicIdentity("dev",topic)
val result2 = Await.result(future2, duration)
assert(result2.isRight === true)
}
//seeded topic should have offsets
val future2 = kafkaManager.getTopicIdentity("dev",seededTopic)
val result2 = Await.result(future2, duration)
assert(result2.isRight === true)
assert(result2.toOption.get.summedTopicOffsets > 0)
}
test("get cluster list") {
val future = kafkaManager.getClusterList
val result = Await.result(future,duration)
assert(result.isRight === true)
assert(result.toOption.get.active.nonEmpty === true)
}
test("get cluster view") {
val future = kafkaManager.getClusterView("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
}
test("get cluster config") {
val future = kafkaManager.getClusterConfig("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
}
test("get cluster context") {
val future = kafkaManager.getClusterContext("dev")
val result = Await.result(future,duration)
assert(result.isRight === true, s"Failed : ${result}")
assert(result.toOption.get.clusterFeatures.features(KMDeleteTopicFeature))
}
test("get consumer list passive mode") {
val future = kafkaManager.getConsumerListExtended("dev")
val result = Await.result(future,duration)
assert(result.isRight === true, s"Failed : ${result}")
assert(result.toOption.get.clusterContext.config.activeOffsetCacheEnabled === false, s"Failed : ${result}")
assert(result.toOption.get.list.map(_._1).contains((newConsumer.get.groupId, KafkaManagedConsumer)), s"Failed : ${result}")
assert(result.toOption.get.list.map(_._1).contains((hlConsumer.get.groupId, ZKManagedConsumer)), s"Failed : ${result}")
}
test("get consumer identity passive mode for old consumer") {
val future = kafkaManager.getConsumerIdentity("dev", hlConsumer.get.groupId, "ZK")
val result = Await.result(future,duration)
assert(result.isRight === true, s"Failed : ${result}")
assert(result.toOption.get.clusterContext.config.activeOffsetCacheEnabled === false, s"Failed : ${result}")
assert(result.toOption.get.topicMap.head._1 === seededTopic, s"Failed : ${result}")
}
test("get consumer identity passive mode for new consumer") {
val future = kafkaManager.getConsumerIdentity("dev", newConsumer.get.groupId, "KF")
val result = Await.result(future,duration)
assert(result.isRight === true, s"Failed : ${result}")
assert(result.toOption.get.clusterContext.config.activeOffsetCacheEnabled === false, s"Failed : ${result}")
assert(result.toOption.get.topicMap.head._1 === seededTopic, s"Failed : ${result}")
}
test("run preferred leader election") {
val topicList = getTopicList()
val future = kafkaManager.runPreferredLeaderElection("dev",topicList.list.toSet)
val result = Await.result(future,duration)
//TODO: this is a failure since there is nothing to do, need a better test
assert(result.isLeft === true)
Thread.sleep(2000)
}
test("get preferred leader election") {
val future = kafkaManager.getPreferredLeaderElection("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
println(result.toOption.get)
}
test("generate partition assignments") {
val topicList = getTopicList()
val future = kafkaManager.generatePartitionAssignments("dev",topicList.list.toSet,Set(0))
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
}
test("run reassign partitions") {
val topicList = getTopicList()
val future = kafkaManager.runReassignPartitions("dev",topicList.list.toSet)
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
}
test("get reassign partitions") {
val future = kafkaManager.getReassignPartitions("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
}
test("add topic partitions") {
val tiFuture= kafkaManager.getTopicIdentity("dev",createTopicNameA)
val tiOrError = Await.result(tiFuture, duration)
assert(tiOrError.isRight, "Failed to get topic identity!")
val ti = tiOrError.toOption.get
val future = kafkaManager.addTopicPartitions("dev",createTopicNameA,Seq(0),ti.partitions + 1,ti.readVersion)
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
//check new partition num
{
val tiFuture= kafkaManager.getTopicIdentity("dev",createTopicNameA)
val tiOrError = Await.result(tiFuture, duration)
assert(tiOrError.isRight, "Failed to get topic identity!")
val ti = tiOrError.toOption.get
assert(ti.partitions === 5)
}
}
test("add multiple topics partitions") {
val tiFutureA = kafkaManager.getTopicIdentity("dev",createTopicNameA)
val tiFutureB = kafkaManager.getTopicIdentity("dev",createTopicNameB)
val tiOrErrorA = Await.result(tiFutureA,duration)
val tiOrErrorB = Await.result(tiFutureB,duration)
assert(tiOrErrorA.isRight, "Failed to get topic identity for topic A!")
assert(tiOrErrorB.isRight, "Failed to get topic identity for topic B!")
val tiA = tiOrErrorA.toOption.get
val tiB = tiOrErrorB.toOption.get
val newPartitionNum = tiA.partitions + 1
val future = kafkaManager.addMultipleTopicsPartitions("dev",Seq(createTopicNameA, createTopicNameB),Set(0),newPartitionNum,Map(createTopicNameA->tiA.readVersion,createTopicNameB->tiB.readVersion))
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
{
val tiFutureA = kafkaManager.getTopicIdentity("dev",createTopicNameA)
val tiFutureB = kafkaManager.getTopicIdentity("dev",createTopicNameB)
val tiOrErrorA = Await.result(tiFutureA,duration)
val tiOrErrorB = Await.result(tiFutureB,duration)
assert(tiOrErrorA.isRight, "Failed to get topic identity for topic A!")
assert(tiOrErrorB.isRight, "Failed to get topic identity for topic B!")
val tiA = tiOrErrorA.toOption.get
val tiB = tiOrErrorB.toOption.get
assert(tiA.partitions === newPartitionNum)
assert(tiB.partitions === newPartitionNum)
}
}
test("update topic config") {
val tiFuture= kafkaManager.getTopicIdentity("dev",createTopicNameA)
val tiOrError = Await.result(tiFuture, duration)
assert(tiOrError.isRight, "Failed to get topic identity!")
val ti = tiOrError.toOption.get
val config = new Properties()
config.put(kafka.manager.utils.zero82.LogConfig.RententionMsProp,"1800000")
val configReadVersion = ti.configReadVersion
val future = kafkaManager.updateTopicConfig("dev",createTopicNameA,config,configReadVersion)
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
//check new topic config
{
val tiFuture= kafkaManager.getTopicIdentity("dev",createTopicNameA)
val tiOrError = Await.result(tiFuture, duration)
assert(tiOrError.isRight, "Failed to get topic identity!")
val ti = tiOrError.toOption.get
assert(ti.configReadVersion > configReadVersion)
assert(ti.config.toMap.apply(kafka.manager.utils.zero82.LogConfig.RententionMsProp) === "1800000")
}
}
test("delete topic") {
val futureA = kafkaManager.deleteTopic("dev",createTopicNameA)
val resultA = Await.result(futureA,duration)
assert(resultA.isRight === true, resultA)
Thread.sleep(2000)
val futureA2 = kafkaManager.getTopicList("dev")
val resultA2 = Await.result(futureA2,duration)
assert(resultA2.isRight === true, resultA2)
assert(resultA2.toOption.get.deleteSet(createTopicNameA),"Topic not in delete set")
val futureB = kafkaManager.deleteTopic("dev",createTopicNameB)
val resultB = Await.result(futureB,duration)
assert(resultB.isRight === true, resultB)
Thread.sleep(2000)
val futureB2 = kafkaManager.getTopicList("dev")
val resultB2 = Await.result(futureB2,duration)
assert(resultB2.isRight === true, resultB2)
assert(resultB2.toOption.get.deleteSet(createTopicNameB),"Topic not in delete set")
}
test("fail to delete non-existent topic") {
val future = kafkaManager.deleteTopic("dev","delete_me")
val result = Await.result(future,duration)
assert(result.isLeft === true)
}
test("update cluster zkhost") {
val future = kafkaManager.updateCluster("dev","0.8.2.0",testServer.getConnectString, jmxEnabled = false, pollConsumers = true, filterConsumers = true, jmxUser = None, jmxSsl = false, jmxPass = None, tuning = Option(defaultTuning), securityProtocol="PLAINTEXT")
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
val future2 = kafkaManager.getClusterList
val result2 = Await.result(future2,duration)
assert(result2.isRight === true)
assert((result2.toOption.get.pending.nonEmpty === true) ||
(result2.toOption.get.active.find(c => c.name == "dev").get.curatorConfig.zkConnect === testServer.getConnectString))
Thread.sleep(2000)
}
test("disable cluster") {
val future = kafkaManager.disableCluster("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
val future2 = kafkaManager.getClusterList
val result2 = Await.result(future2,duration)
assert(result2.isRight === true)
assert((result2.toOption.get.pending.nonEmpty === true) ||
(result2.toOption.get.active.find(c => c.name == "dev").get.enabled === false))
Thread.sleep(2000)
}
test("enable cluster") {
val future = kafkaManager.enableCluster("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
}
test("update cluster version") {
val future = kafkaManager.updateCluster("dev","0.8.1.1",testServer.getConnectString, jmxEnabled = false, pollConsumers = true, filterConsumers = true, jmxUser = None, jmxPass = None, jmxSsl = false, tuning = Option(defaultTuning), securityProtocol="PLAINTEXT")
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
val future2 = kafkaManager.getClusterList
val result2 = Await.result(future2,duration)
assert(result2.isRight === true)
assert((result2.toOption.get.pending.nonEmpty === true) ||
(result2.toOption.get.active.find(c => c.name == "dev").get.version === Kafka_0_8_1_1))
Thread.sleep(2000)
}
test("delete topic not supported prior to 0.8.2.0") {
val future = kafkaManager.deleteTopic("dev",createTopicNameA)
val result = Await.result(future,duration)
assert(result.isLeft === true, result)
assert(result.swap.toOption.get.msg.contains("not supported"))
Thread.sleep(2000)
}
test("update cluster logkafka enabled and activeOffsetCache enabled") {
val future = kafkaManager.updateCluster("dev","0.8.2.0",testServer.getConnectString, jmxEnabled = false, pollConsumers = true, filterConsumers = true, logkafkaEnabled = true, activeOffsetCacheEnabled = true, jmxUser = None, jmxPass = None, jmxSsl = false, tuning = Option(defaultTuning), securityProtocol="PLAINTEXT")
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
val future2 = kafkaManager.getClusterList
val result2 = Await.result(future2,duration)
assert(result2.isRight === true)
assert((result2.toOption.get.active.find(c => c.name == "dev").get.logkafkaEnabled === true) &&
(result2.toOption.get.active.find(c => c.name == "dev").get.activeOffsetCacheEnabled === true))
Thread.sleep(2000)
}
/*
test("get consumer list active mode") {
val future = kafkaManager.getConsumerListExtended("dev")
val result = Await.result(future,duration)
assert(result.isRight === true, s"Failed : ${result}")
assert(result.toOption.get.clusterContext.config.activeOffsetCacheEnabled === false, s"Failed : ${result}")
assert(result.toOption.get.list.head._1 === hlConsumer.get.groupId, s"Failed : ${result}")
}
test("get consumer identity active mode") {
val future = kafkaManager.getConsumerIdentity("dev", hlConsumer.get.groupId)
val result = Await.result(future,duration)
assert(result.isRight === true, s"Failed : ${result}")
assert(result.toOption.get.clusterContext.config.activeOffsetCacheEnabled === false, s"Failed : ${result}")
assert(result.toOption.get.topicMap.head._1 === seededTopic, s"Failed : ${result}")
}*/
test("create logkafka") {
val config = new Properties()
config.put(kafka.manager.utils.logkafka82.LogConfig.TopicProp,createLogkafkaTopic)
val future = kafkaManager.createLogkafka("dev",createLogkafkaLogkafkaId,createLogkafkaLogPath,config)
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
}
test("get logkafka identity") {
val future = kafkaManager.getLogkafkaLogkafkaIdList("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
assert(result.toOption.get.list.nonEmpty === true)
result.toOption.get.list.foreach { logkafka_id =>
val future2 = kafkaManager.getLogkafkaIdentity("dev",logkafka_id)
val result2 = Await.result(future2, duration)
assert(result2.isRight === true)
}
}
test("update logkafka config") {
val liFuture= kafkaManager.getLogkafkaIdentity("dev",createLogkafkaLogkafkaId)
val liOrError = Await.result(liFuture, duration)
assert(liOrError.isRight, "Failed to get logkafka identity!")
val li = liOrError.toOption.get
val config = new Properties()
config.put(kafka.manager.utils.logkafka82.LogConfig.TopicProp,createLogkafkaTopic)
config.put(kafka.manager.utils.logkafka82.LogConfig.PartitionProp,"1")
val future = kafkaManager.updateLogkafkaConfig("dev",createLogkafkaLogkafkaId,createLogkafkaLogPath,config)
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(1000)
//check new logkafka config
{
val liFuture= kafkaManager.getLogkafkaIdentity("dev",createLogkafkaLogkafkaId)
val liOrError = Await.result(liFuture, duration)
assert(liOrError.isRight, "Failed to get logkafka identity!")
val li = liOrError.toOption.get
assert(li.identityMap.get(createLogkafkaLogPath).get._1.get.apply(kafka.manager.utils.logkafka82.LogConfig.PartitionProp) === "1")
}
}
test("delete logkafka") {
val future = kafkaManager.deleteLogkafka("dev",createLogkafkaLogkafkaId,createLogkafkaLogPath)
val result = Await.result(future,duration)
assert(result.isRight === true, result)
Thread.sleep(2000)
val liFuture= kafkaManager.getLogkafkaIdentity("dev",createLogkafkaLogkafkaId)
val liOrError = Await.result(liFuture, duration)
assert(liOrError.isRight, "Failed to get logkafka identity!")
val li = liOrError.toOption.get
assert(li.identityMap.get(createLogkafkaLogPath) === None)
Thread.sleep(2000)
}
test("delete cluster") {
//first have to disable in order to delete
{
val future = kafkaManager.disableCluster("dev")
val result = Await.result(future, duration)
assert(result.isRight === true)
Thread.sleep(2000)
}
val future = kafkaManager.deleteCluster("dev")
val result = Await.result(future,duration)
assert(result.isRight === true)
Thread.sleep(2000)
val future2 = kafkaManager.getClusterList
val result2 = Await.result(future2,duration)
assert(result2.isRight === true)
assert(result2.toOption.get.pending.isEmpty === true)
assert(result2.toOption.get.active.isEmpty === true)
}
}
|
radicalbit/kafka-manager
|
test/kafka/manager/TestKafkaManager.scala
|
Scala
|
apache-2.0
| 22,433 |
import org.specs2._
trait MockOutput extends Output {
var messages: Seq[String] = Seq()
override def print(s: String) = messages = messages :+ s
}
class QuickStartSpec extends Specification {
def is = s2"""
Testing hw
contains print | as ground $e1
"""
def ground = new Ground with MockOutput
ground.show()
def e1 = ground.messages should contain("|")
}
|
chuajiesheng/scala-dino
|
src/test/scala/test_ground.scala
|
Scala
|
gpl-2.0
| 383 |
/*
* Copyright 2016 okumin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package influent.internal.msgpack
import org.msgpack.value._
import org.scalacheck.Arbitrary.arbitrary
import org.scalacheck.{Arbitrary, Gen}
import scala.collection.JavaConverters._
object MsgpackUnpackerArbitrary {
implicit lazy val arbValue: Arbitrary[ImmutableValue] = Arbitrary(genValue(0))
private[this] def genValue(level: Int): Gen[ImmutableValue] = {
def genScalar: Gen[ImmutableValue] = Gen.oneOf(
arbitrary[ImmutableBinaryValue],
arbitrary[ImmutableBooleanValue],
arbitrary[ImmutableIntegerValue],
arbitrary[ImmutableFloatValue],
arbitrary[ImmutableNilValue],
arbitrary[ImmutableStringValue],
arbitrary[ImmutableExtensionValue]
)
def genCollection(level: Int): Gen[ImmutableValue] = Gen.oneOf(
genArray(level),
genMap(level)
)
level match {
case 2 => genScalar
case x => Gen.frequency(50 -> genScalar, 1 -> genCollection(x + 1))
}
}
implicit lazy val arbBinary: Arbitrary[ImmutableBinaryValue] = Arbitrary {
Gen.listOf(Arbitrary.arbByte.arbitrary).map(_.toArray).map(ValueFactory.newBinary)
}
implicit lazy val arbBoolean: Arbitrary[ImmutableBooleanValue] = Arbitrary {
arbitrary[Boolean].map(ValueFactory.newBoolean)
}
implicit lazy val arbInteger: Arbitrary[ImmutableIntegerValue] = Arbitrary(Gen.oneOf(
arbitrary[Long].map(ValueFactory.newInteger),
arbitrary[BigInt].filter { value =>
value.bitLength <= 63 || value.bitLength == 64 && value.signum == 1
}.map(_.bigInteger).map(ValueFactory.newInteger)
))
implicit lazy val arbFloat: Arbitrary[ImmutableFloatValue] = Arbitrary(Gen.oneOf(
arbitrary[Float].map(ValueFactory.newFloat),
arbitrary[Double].map(ValueFactory.newFloat)
))
implicit lazy val arbNil: Arbitrary[ImmutableNilValue] = Arbitrary(Gen.const(ValueFactory.newNil()))
implicit lazy val arbString: Arbitrary[ImmutableStringValue] = Arbitrary {
Gen.alphaStr.map(ValueFactory.newString)
}
implicit lazy val arbExtension: Arbitrary[ImmutableExtensionValue] = Arbitrary {
for {
extType <- Arbitrary.arbByte.arbitrary
data <- Gen.listOf(Arbitrary.arbByte.arbitrary).map(_.toArray)
} yield ValueFactory.newExtension(extType, data)
}
private[this] def genArray(level: Int): Gen[ImmutableArrayValue] = {
Gen.listOf(genValue(level)).map(_.asJava).map(ValueFactory.newArray)
}
implicit lazy val arbArray: Arbitrary[ImmutableArrayValue] = Arbitrary(genArray(0))
private[this] def genMap(level: Int): Gen[ImmutableMapValue] = {
val genKV = for {
k <- genValue(level)
v <- genValue(level)
} yield (k, v)
Gen.mapOf(genKV).flatMap { kvs =>
kvs.map { case (k, v) => (k: Value, v: Value) }
}.map(_.asJava).map { x => ValueFactory.newMap(x) }
}
implicit lazy val arbMap: Arbitrary[ImmutableMapValue] = Arbitrary(genMap(0))
}
|
okumin/influent
|
influent-java/src/test/scala/influent/internal/msgpack/MsgpackUnpackerArbitrary.scala
|
Scala
|
apache-2.0
| 3,468 |
package types
import Types._
/**
* Created by Truji on 09/06/2016.
*/
object SemanticTableau {
/**
* Indica si una fórmula es una doble negación
*
* @param prop fórmula
*/
def isDoubleNeg(prop: Prop) = prop match {
case Neg(Neg(_)) => true
case _ => false
}
/**
* Indica si una proposición es una fórmula Alfa
* @param prop fórmula a analizar
* @return
*/
def isAlfa(prop: Prop) = prop match {
case Conj(_, _) => true
case Neg(Impl(_, _)) => true
case Neg(Disj(_, _)) => true
case _ => false
}
/**
* Indica si una proposición es una fórmula Beta
* @param prop fórmula a analizar
* @return
*/
def isBeta(prop: Prop) = prop match {
case Disj(_, _) => true
case Impl(_, _) => true
case Neg(Conj(_, _)) => true
case Equi(_, _) => true
case Neg(Equi(_, _)) => true
case _ => false
}
/**
* Devuelve un Set que contiene los componentes de la formula prop
* @param prop
* @return
*/
def components(prop: Prop): Set[Prop] = prop match {
case Neg(Neg(f)) => Set(f)
case Conj(f, g) => Set(f, g)
case Neg(Impl(f, g)) => Set(f, Neg(g))
case Neg(Disj(f, g)) => Set(Neg(f), Neg(g))
case Disj(f, g) => Set(f, g)
case Impl(f, g) => Set(Neg(f), g)
case Neg(Conj(f, g)) => Set(Neg(f), Neg(g))
case Equi(f, g) => Set(Conj(f, g), Conj(no(f), no(g)))
case Neg(Equi(f, g)) => Set(Conj(f, Neg(g)), Conj(Neg(f), g))
}
/**
* Si todos las fórmulas de un conjunto son literales
* @param props
* @return
*/
def allLiterals(props: Iterable[Prop]) = props.forall(_.isLiteral)
def hasContradiction(props: Iterable[Prop]) = props.exists(p => props.exists(_ == Neg(p)))
/**
* Expansión de un conjunto de fórmulas mediante doble negación
*
*/
def expDN(props: Iterable[Prop], prop: Prop) =
Set(components(prop) ++ props.filterNot(_ == prop))
/**
* Expansión de un conjunto de fórmulas mediante expasión alfa
*
*/
def expAlfa(props: Iterable[Prop], prop: Prop) =
Set(components(prop) ++ props.filterNot(_ == prop))
/**
* Expansión de un conjunto de fórmulas mediante expasión beta
*
*/
def expBeta(props: Iterable[Prop], prop: Prop) = {
val _props = props.filterNot(_ == prop)
components(prop) map (p => Set(p) ++ _props )
}
//@TODO ??? si llega al else && betas.isEmpty??
/**
*
* @param props
* @return
*/
def successors(props: Iterable[Prop]) = {
lazy val doubleNegs = props.find(isDoubleNeg)
lazy val alfas = props.find(isAlfa)
lazy val betas = props.find(isBeta)
if (doubleNegs.nonEmpty) expDN(props, doubleNegs.get)
else if(alfas.nonEmpty) expAlfa(props, alfas.get)
else expBeta(props, betas.get)
}
/**
* Obtiene los modelos (en forma de conjunto de proposiciones) de un conjunto de fórmulas
* por el método de tableros semánticos
* @param props
* @return
*/
def modelsByTableaux(props: Iterable[Prop]): Set[Set[Prop]] =
if (hasContradiction(props)) Set.empty
else if (allLiterals(props)) Set(props.toSet)
else successors(props) flatMap modelsByTableaux
/**
* Conjunto de modelos generales de props por el método de tableros semánticos
* @param props
* @return
*/
def generalModels(props: Iterable[Prop]) = {
val models = modelsByTableaux(props)
models.filter(
m => !(models - m).exists(_.subsetOf(m))
)
}
/**
* Indica si una fórmula es un teorema por el método de tableros semanticos
* @param prop
* @return
*/
def isTheoremByTableaux(prop: Prop) = modelsByTableaux(Set(Neg(prop))) isEmpty
/**
* Verifica si la fórmula prop es consecuencia lógica del conjunto de fórmulas props mediante tableros semánticos
* @param props
* @param prop
* @return
*/
def isDeductibleByTableaux(props: Iterable[Prop], prop: Prop) =
modelsByTableaux(Set(Neg(prop)) ++ props).isEmpty
}
|
Truji92/Logic
|
src/main/scala/types/SemanticTableau.scala
|
Scala
|
mit
| 4,113 |
/*
* Copyright 2014 - 2015 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package slamdata.engine
import slamdata.Predef._
import scalaz._
package object analysis {
object -> {
def unapply[A, B](value: (A, B)) = Some(value)
}
type -> [A, B] = (A, B)
type Analyzer[N, A, E] = (N => A, N) => Validation[E, A]
type AnalysisResult[N, A, E] = Validation[E, AnnotatedTree[N, A]]
type Analysis[N, A, B, E] = AnnotatedTree[N, A] => AnalysisResult[N, B, E]
implicit def AnalysisArrow[N, E] = new Arrow[Analysis[N, ?, ?, E]] {
def arr[A, B](f: (A) => B): Analysis[N, A, B, E] = tree => Validation.success(tree.annotate(n => f(tree.attr(n))))
def compose[A, B, C](f: Analysis[N, B, C, E], g: Analysis[N, A, B, E]): Analysis[N, A, C, E] =
tree => g(tree).fold(
Validation.failure,
tree2 => f(tree2)
)
def first[A, B, C](f: Analysis[N, A, B, E]): Analysis[N, (A, C), (B, C), E] = treeAC =>
f(treeAC.annotate(n => treeAC.attr(n)._1)).map(treeB => treeB.annotate(n => (treeB.attr(n), treeAC.attr(n)._2)))
def id[A]: Analysis[N, A, A, E] = tree => Validation.success(tree)
}
implicit def AnalysisFunctor[N, A, E] = new Functor[Analysis[N, A, ?, E]] {
def map[B, C](fa: Analysis[N, A, B, E])(f: (B) => C): Analysis[N, A, C, E] = {
(tree: AnnotatedTree[N, A]) => fa(tree).map(tree => tree.annotate(n => f(tree.attr(n))))
}
}
implicit class AnalysisW[N, A, B, E](self: Analysis[N, A, B, E]) {
def >>> [C](that: Analysis[N, B, C, E]) = AnalysisArrow[N, E].compose(that, self)
def <<< [C](that: Analysis[N, C, A, E]) = AnalysisArrow[N, E].compose(self, that)
final def first[C]: Analysis[N, (A, C), (B, C), E] = AnalysisArrow[N, E].first(self)
final def second[C]: Analysis[N, (C, A), (C, B), E] = AnalysisArrow[N, E].second(self)
final def *** [C, D](k: Analysis[N, C, D, E]): Analysis[N, (A, C), (B, D), E] = AnalysisArrow[N, E].splitA(self, k)
final def &&& [C](k: Analysis[N, A, C, E]): Analysis[N, A, (B, C), E] = AnalysisArrow[N, E].combine(self, k)
final def product: Analysis[N, (A, A), (B, B), E] = AnalysisArrow[N, E].product(self)
final def push[C](c: C): Analysis[N, A, (B, C), E] = AnalysisFunctor[N, A, E].map(self)(b => (b, c))
}
implicit class AnalysisW1To2[N, A, B, C, E](self: Analysis[N, A, (C, B), E]) {
final def dup2: Analysis[N, A, (((C, B), B), B), E] = AnalysisFunctor.map(self) { case (t, h) => (((t, h), h), h) }
}
implicit class AnalysisW2To1[N, A, B, C, E](self: Analysis[N, (B, A), C, E]) {
final def pop2[D]: Analysis[N, ((D, B), A), (D, C), E] = {
val c = AnalysisArrow[N, E].second[(B, A), C, D](self)
AnalysisArrow.mapfst[(D, (B, A)), (D, C), ((D, B), A)](c) { case ((d, b), a) => (d, (b, a)) }
}
}
}
|
wemrysi/quasar
|
core/src/main/scala/slamdata/engine/analysis/package.scala
|
Scala
|
apache-2.0
| 3,323 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.integration.torch
import com.intel.analytics.bigdl.dllib.nn.MultiLabelMarginCriterion
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import scala.util.Random
@com.intel.analytics.bigdl.tags.Serial
class MultiLabelMarginCriterionSpec extends TorchSpec {
"A MultiLabelMarginCriterion " should "generate correct output and grad whith one dimension" in {
torchCheck()
val layer = new MultiLabelMarginCriterion[Double]()
val input = Tensor[Double](4).apply1(e => Random.nextDouble())
val target = Tensor[Double](4)
target(Array(1)) = 3
target(Array(2)) = 2
target(Array(3)) = 1
target(Array(4)) = 0
val start = System.nanoTime()
val output = layer.forward(input, target)
val gradInput = layer.backward(input, target)
val end = System.nanoTime()
val scalaTime = end - start
val code = "module = nn.MultiLabelMarginCriterion()\\n" +
"output = module:forward(input, target)\\n" +
"gradInput = module:backward(input, target)"
val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "target" -> target),
Array("output", "gradInput"))
val luaOutput = torchResult("output").asInstanceOf[Double]
val luaGradInput = torchResult("gradInput").asInstanceOf[Tensor[Double]]
output should be(luaOutput)
gradInput should be(luaGradInput)
println("Test case : MultiLabelMarginCriterion, Torch : " + luaTime +
" s, Scala : " + scalaTime / 1e9 + " s")
}
"A MultiLabelMarginCriterion " should "generate correct output and grad with two dimensions" in {
torchCheck()
val layer = new MultiLabelMarginCriterion[Double]()
val input = Tensor[Double](2, 4).apply1(e => Random.nextDouble())
val target = Tensor[Double](2, 4)
target(Array(1, 1)) = 1
target(Array(1, 2)) = 0
target(Array(1, 3)) = 3
target(Array(1, 4)) = 0
target(Array(2, 1)) = 4
target(Array(2, 2)) = 0
target(Array(2, 3)) = 1
target(Array(2, 4)) = 0
val start = System.nanoTime()
val output = layer.forward(input, target)
val gradInput = layer.backward(input, target)
val end = System.nanoTime()
val scalaTime = end - start
val code = "module = nn.MultiLabelMarginCriterion()\\n" +
"output = module:forward(input, target)\\n" +
"gradInput = module:backward(input, target)"
val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "target" -> target),
Array("output", "gradInput"))
val luaOutput = torchResult("output").asInstanceOf[Double]
val luaGradInput = torchResult("gradInput").asInstanceOf[Tensor[Double]]
output should be (luaOutput)
gradInput should be (luaGradInput)
println("Test case : MultiLabelMarginCriterion, Torch : " + luaTime +
" s, Scala : " + scalaTime / 1e9 + " s")
}
}
|
intel-analytics/BigDL
|
scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/integration/torch/MultiLabelMarginCriterionSpec.scala
|
Scala
|
apache-2.0
| 3,436 |
package io.youi.paint
import io.youi.Color
case class GradientStop(color: Color, offset: Double)
|
outr/youi
|
gui/src/main/scala/io/youi/paint/GradientStop.scala
|
Scala
|
mit
| 99 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.fiware.cosmos.orion.flink.connector
import java.net.InetSocketAddress
import java.util.concurrent.atomic.AtomicBoolean
import io.netty.bootstrap.ServerBootstrap
import io.netty.channel.nio.NioEventLoopGroup
import io.netty.channel.socket.SocketChannel
import io.netty.channel.socket.nio.NioServerSocketChannel
import io.netty.channel.{Channel, ChannelInitializer, ChannelOption}
import io.netty.handler.codec.http.{HttpObjectAggregator, HttpServerCodec}
import io.netty.handler.logging.{LogLevel, LoggingHandler}
import org.apache.flink.streaming.api.functions.source.SourceFunction.SourceContext
import org.slf4j.LoggerFactory
/**
* Netty HTTP server
*
* @param ctx Flink source context for collect received message
* @param threadNum cpu number used by netty epoll
* @param logLevel netty log level
*/
class OrionHttpServerLD(
ctx: SourceContext[NgsiEventLD],
threadNum: Int = Runtime.getRuntime.availableProcessors(),
logLevel: LogLevel = LogLevel.INFO
) extends ServerTrait {
private lazy val logger = LoggerFactory.getLogger(getClass)
private lazy val bossGroup = new NioEventLoopGroup(threadNum)
private lazy val workerGroup = new NioEventLoopGroup
private lazy val isRunning = new AtomicBoolean(false)
private final val CHANNEL_OPTION = 1024
private final val HOA = 1048576
private var currentAddr: InetSocketAddress = _
override def close(): Unit = {
bossGroup.shutdownGracefully()
workerGroup.shutdownGracefully()
logger.info("successfully close netty server source")
}
def startNettyServer(
portNotInUse: Int,
callbackUrl: Option[String]
): InetSocketAddress = synchronized {
if (!isRunning.get()) {
val b: ServerBootstrap = new ServerBootstrap
b
.option[java.lang.Integer](ChannelOption.SO_BACKLOG, CHANNEL_OPTION)
.group(bossGroup, workerGroup)
.channel(classOf[NioServerSocketChannel])
.handler(new LoggingHandler(logLevel))
.childHandler(new ChannelInitializer[SocketChannel] {
override def initChannel(ch: SocketChannel): Unit = {
val p = ch.pipeline()
p.addLast(new HttpServerCodec)
p.addLast(new HttpObjectAggregator(HOA))
p.addLast(new OrionHttpHandlerLD(ctx))
}
})
val f = b.bind(portNotInUse)
f.syncUninterruptibly()
val ch: Channel = f.channel()
isRunning.set(true)
currentAddr = ch.localAddress().asInstanceOf[InetSocketAddress]
register(currentAddr, callbackUrl)
ch.closeFuture().sync()
currentAddr
} else {
currentAddr
}
}
}
|
Fiware/context.Cosmos
|
src/main/scala/org/fiware/cosmos/orion/flink/connector/OrionHttpServerLD.scala
|
Scala
|
agpl-3.0
| 3,425 |
package com.daxin
object TestPerson {
def main(args :Array[String]){
val p1=new Person
println("Person中的Gnder是private权限,所以在其他文件中不可以访问")
println(p1.id +" "+p1.name)
// println(p1.country)// private[this] var country:String=_
println(p1.getCountry())
// cat的定义:private[daxin] class Cat private
// val cat=new Cat //constructor Cat in class Cat cannot be accessed in object TestPerson 意思是说:在TestPerson无法访问私有构造方法
}
}
|
Dax1n/Scala
|
ObjectOrientedDemo/src/com/daxin/TestPerson.scala
|
Scala
|
apache-2.0
| 548 |
package org.sofi.deadman.test.event
import akka.actor._
import com.rbmhtechnology.eventuate.EventsourcedView
import org.sofi.deadman.messages.command._
import org.sofi.deadman.messages.event._
import org.sofi.deadman.test.TestSystem
import scala.concurrent.duration._
final class TaskWarningTest extends TestSystem {
// Helper view that forwards a `TaskWarning` event back to the test actor for assertion
final class TaskWarningForwarder(val id: String, val eventLog: ActorRef) extends EventsourcedView {
def onCommand = { case _ ⇒ }
def onEvent = {
case event: TaskWarning ⇒
testActor ! event
}
}
"A task actor" must {
"Successfully persist a task warning event" in {
system.actorOf(Props(new TaskWarningForwarder(aggregate, eventLog)))
taskActor ! ScheduleTask("test", aggregate, "0", 10.days.toMillis, Seq(1.second.toMillis))
expectMsg(CommandResponse(ResponseType.SUCCESS))
expectMsgPF() {
case event: TaskWarning ⇒
event.ttw must be(1.second.toMillis)
event.task.key must be("test")
event.task.aggregate must be(aggregate)
event.task.entity must be("0")
}
}
}
}
|
SocialFinance/deadman-switch
|
core/src/test/scala/org/sofi/deadman/test/event/TaskWarningTest.scala
|
Scala
|
bsd-3-clause
| 1,197 |
package scala.collection.hmap
package test
import org.scalacheck._
import org.scalacheck.Prop._
import org.scalacheck.Gen._
import org.scalacheck.Arbitrary._
object WithPhantomProperties extends Properties("WithPhantom") {
/** Value1 and Value2 must be different types */
def prop_typeMiss[T, Value1: Manifest, Value2: Manifest](x: T) : Prop =
WithPhantom[T, Value1](x) != WithPhantom[T, Value2](x)
property("typeMiss") = forAll { x:Int => prop_typeMiss[Int, Boolean, String](x) }
}
|
kennknowles/scala-heterogeneous-map
|
src/test/scala/WithPhantomProperties.scala
|
Scala
|
apache-2.0
| 498 |
package a05
trait 自然数 {
type 后继[T] <: 自然数
def 后继[T](item: T): 后继[T]
type 消融1[T <: 自然数] <: 整数定义
def 消融1[T <: 自然数](item: T): 消融1[T]
type 消融2[T <: 自然数, I] <: 整数定义
def 消融2[T <: 自然数, I](item: T, i: I): 消融2[T, I]
type Plus[T <: 自然数] <: 自然数
def plus[T <: 自然数](h: T): Plus[T]
}
trait 整数定义 {
type 正数部分 <: 自然数
type 负数部分 <: 自然数
def 正数部分: 正数部分
def 负数部分: 负数部分
type 加[T <: 整数定义] <: 整数定义
def 加[T <: 整数定义](item: T): 加[T]
type 自己 <: 整数定义
def 自己: 自己
type 相反数 <: 整数定义
def 相反数: 相反数
}
class 整数[T1 <: 自然数, T2 <: 自然数](override val 负数部分: T1, override val 正数部分: T2) extends 整数定义 {
self =>
override type 自己 = 整数[T1, T2]
override def 自己: 整数[T1, T2] = self
override type 加[T <: 整数定义] = T#负数部分#Plus[负数部分]#消融1[T#正数部分#Plus[正数部分]]
override def 加[T <: 整数定义](item: T): T#负数部分#Plus[负数部分]#消融1[T#正数部分#Plus[正数部分]] = item.负数部分.plus(负数部分).消融1(item.正数部分.plus(正数部分))
override type 负数部分 = T1
override type 正数部分 = T2
override type 相反数 = 整数[T2, T1]
override def 相反数: 整数[T2, T1] = new 整数(负数部分 = 正数部分, 正数部分 = 负数部分)
override def toString: String = s"(负数部分: ${负数部分}, 正数部分: ${正数部分})"
}
class 零 extends 自然数 {
self =>
override type 后继[T] = 正数[零, T]
override def 后继[T](item: T): 正数[零, T] = new 正数(self, item)
override type 消融1[T <: 自然数] = 别名.自然数整数版[T]
override def 消融1[T <: 自然数](item: T): 别名.自然数整数版[T] = 别名.自然数整数版(item)
override type 消融2[T <: 自然数, I] = 整数[T#后继[I], 零]
override def 消融2[T <: 自然数, I](item: T, i: I): 整数[T#后继[I], 零] = new 整数(负数部分 = item.后继(i), 正数部分 = 别名.自然数零)
override type Plus[T <: 自然数] = T
override def plus[T <: 自然数](h: T): T = h
override def toString: String = "零"
}
object 别名 {
val 自然数零: 零 = new 零
type 整数零 = 整数[零, 零]
val 整数零: 整数零 = new 整数(负数部分 = 自然数零, 正数部分 = 自然数零)
type 自然数整数版[T <: 自然数] = 整数[零, T]
def 自然数整数版[T <: 自然数](item: T): 自然数整数版[T] = new 整数(负数部分 = 自然数零, 正数部分 = item)
}
class 正数[Tail <: 自然数, H](val tail: Tail, val head: H) extends 自然数 {
self =>
override type 后继[I] = 正数[正数[Tail, H], I]
override def 后继[I](item: I): 正数[正数[Tail, H], I] = new 正数(self, item)
override type 消融1[T <: 自然数] = T#消融2[Tail, H]#自己
override def 消融1[T <: 自然数](item: T): T#消融2[Tail, H]#自己 = item.消融2(tail, head).自己
override type 消融2[T <: 自然数, I] = T#消融1[Tail]
override def 消融2[T <: 自然数, I](item: T, i: I): T#消融1[Tail] = item.消融1(tail)
override type Plus[T <: 自然数] = Tail#Plus[T]#后继[H]
override def plus[T <: 自然数](h: T): Tail#Plus[T]#后继[H] = tail.plus(h).后继(head)
override def toString: String = s"$tail :: $head"
}
|
djx314/ubw
|
a08-整数加法/src/main/scala/HList.scala
|
Scala
|
bsd-3-clause
| 3,527 |
package org.jetbrains.plugins.scala
package editor.smartEnter.fixers
import com.intellij.openapi.editor.Editor
import com.intellij.psi._
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.plugins.scala.editor.smartEnter.ScalaSmartEnterProcessor
import org.jetbrains.plugins.scala.extensions.PsiElementExt
import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScBlockExpr, ScWhileStmt}
/**
* @author Dmitry.Naydanov
* @author Ksenia.Sautina
* @since 1/30/13
*/
@SuppressWarnings(Array("HardCodedStringLiteral"))
class ScalaWhileConditionFixer extends ScalaFixer {
def apply(editor: Editor, processor: ScalaSmartEnterProcessor, psiElement: PsiElement): OperationPerformed = {
val whileStatement = PsiTreeUtil.getParentOfType(psiElement, classOf[ScWhileStmt], false)
if (whileStatement == null) return NoOperation
val doc = editor.getDocument
val leftParenthesis = whileStatement.getLeftParenthesis.orNull
val rightParenthesis = whileStatement.getRightParenthesis.orNull
whileStatement.condition match {
case None if leftParenthesis != null && !leftParenthesis.getNextSibling.isInstanceOf[PsiErrorElement] &&
whileStatement.lastChild.exists(_.isInstanceOf[PsiErrorElement]) =>
doc.insertString(whileStatement.lastChild.get.getTextRange.getEndOffset, ") {}")
WithEnter(3)
case None if leftParenthesis == null || rightParenthesis == null =>
val whileStartOffset = whileStatement.getTextRange.getStartOffset
var stopOffset = doc.getLineEndOffset(doc getLineNumber whileStartOffset)
val whLength = "while (".length
whileStatement.body.foreach(bl => stopOffset = Math.min(stopOffset, bl.getTextRange.getStartOffset))
doc.replaceString(whileStartOffset, stopOffset, "while () {\\n\\n}")
moveToStart(editor, whileStatement)
WithReformat(whLength)
case None =>
moveToStart(editor, leftParenthesis)
doc.insertString(rightParenthesis.getTextRange.getEndOffset, " {\\n\\n}")
WithReformat(1)
case Some(_) if rightParenthesis != null && whileStatement.body.isDefined =>
whileStatement.body match {
case Some(block: ScBlockExpr) =>
return placeInWholeBlock(block, editor)
case Some(expr) => moveToEnd(editor, expr)
case _ =>
}
WithReformat(0)
case Some(cond) if rightParenthesis == null =>
doc.insertString(cond.getTextRange.getEndOffset, ")")
WithReformat(0)
case _ => NoOperation
}
}
}
|
loskutov/intellij-scala
|
src/org/jetbrains/plugins/scala/editor/smartEnter/fixers/ScalaWhileConditionFixer.scala
|
Scala
|
apache-2.0
| 2,544 |
/**
* Copyright (C) 2013 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.fr.persistence.ddl
import org.junit.Test
import org.orbeon.oxf.fr.persistence.Persistence
import org.orbeon.oxf.fr.persistence.db._
import org.orbeon.oxf.fr.persistence.relational.Provider
import org.orbeon.oxf.fr.persistence.relational.Provider._
import org.orbeon.oxf.test.ResourceManagerTestBase
import org.orbeon.oxf.util.CollectionUtils._
import org.orbeon.oxf.util.IOUtils._
import org.orbeon.oxf.util.{IndentedLogger, LoggerFactory, Logging}
import org.scalatest.junit.AssertionsForJUnit
import org.orbeon.oxf.fr.persistence.relational.Provider.{MySQL, PostgreSQL}
import org.orbeon.oxf.util.IOUtils._
/**
* Test the DDL we provide to create and update databases.
*/
class DDLTest extends ResourceManagerTestBase with AssertionsForJUnit with Logging {
private implicit val Logger = new IndentedLogger(LoggerFactory.createLogger(classOf[DDLTest]), true)
case class TableMeta(tableName: String, colsMeta: Seq[ColMeta])
case class ColMeta(colName: String, meta: Set[ColKeyVal])
case class ColKeyVal(key: String, value: AnyRef)
/**
* Runs the SQL, and returns the information about the tables as defined in the database. The form in which this
* information is returned varies depending on the database, hence the Any return type.
*/
private def sqlToTableInfo(provider: Provider, sql: Seq[String]): List[TableMeta] = {
Connect.withNewDatabase(provider) { connection ⇒
val statement = connection.createStatement
SQL.executeStatements(provider, statement, sql)
val query = provider match {
// On Oracle, column order is "non-relevant", so we order by column name instead of position
case MySQL ⇒
""" SELECT *
| FROM information_schema.columns
| WHERE table_name = ?
| AND table_schema = DATABASE()
| ORDER BY ordinal_position"""
case PostgreSQL ⇒
""" SELECT *
| FROM information_schema.columns
| WHERE table_name = ?
| ORDER BY ordinal_position"""
case provider ⇒
throw new IllegalArgumentException(s"unsupported provider `${provider.pathToken}`")
}
Connect.getTableNames(provider, connection).map { tableName ⇒
useAndClose(connection.prepareStatement(query.stripMargin)) { ps ⇒
ps.setString(1, tableName)
useAndClose(ps.executeQuery()) { tableInfoResultSet ⇒
def tableInfo(): ColMeta = {
val colName = tableInfoResultSet.getString("column_name")
val interestingKeys = Set("is_nullable", "data_type")
val colKeyVals = for (metaKey ← interestingKeys) yield
ColKeyVal(metaKey, tableInfoResultSet.getObject(metaKey))
ColMeta(colName, colKeyVals)
}
val colsMeta = Iterator.iterateWhile(tableInfoResultSet.next(), tableInfo()).toList.sortBy(_.colName)
assert(colsMeta.nonEmpty)
TableMeta(tableName, colsMeta)
}
}
}
}
}
private def assertSameTable(provider: Provider, from: String, to: String): Unit = {
val name = provider.pathToken
withDebug("comparing upgrade to straight", List("provider" → name, "from" → from, "to" → to)) {
val upgrade = sqlToTableInfo(provider, SQL.read(s"$name-$from.sql") ++ SQL.read(s"$name-$from-to-$to.sql"))
val straight = sqlToTableInfo(provider, SQL.read(s"$name-$to.sql"))
assert(upgrade === straight, s"$name from $from to $to")
}
}
@Test def createAndUpgradeTest(): Unit = {
Persistence.ProvidersTestedAutomatically.foreach {
case provider @ MySQL ⇒
assertSameTable(provider, "4_3" , "4_4")
assertSameTable(provider, "4_4" , "4_5")
assertSameTable(provider, "4_5" , "4_6")
assertSameTable(provider, "4_5" , "4_6")
assertSameTable(provider, "4_6" , "2016_2")
assertSameTable(provider, "2016_2", "2016_3")
case provider @ PostgreSQL ⇒
assertSameTable(provider, "4_8" , "2016_2")
assertSameTable(provider, "2016_2" , "2016_3")
}
}
}
|
brunobuzzi/orbeon-forms
|
form-runner/jvm/src/db/scala/org/orbeon/oxf/fr/persistence/ddl/DDLTest.scala
|
Scala
|
lgpl-2.1
| 4,826 |
package metal
/** Non-null pointer on a container element.
*
* This class is a value class only in Scala 2.11, due to bugs in the generation
* of bridge methods when overloading methods that take/return a value class.
*/
final class VPtr[C <: Pointable with Singleton](val raw: Long) extends PtrVersions.Base { lhs =>
override def toString = s"VPtr($raw)"
@inline final def isNull: Boolean = false
@inline final def nonNull: Boolean = true
/** Comparison method. */
def compare(rhs: VPtr[C]): Int = spire.std.long.LongAlgebra.compare(lhs.raw, rhs.raw)
def min(rhs: VPtr[C]): VPtr[C] = if (lhs.compare(rhs) < 0) lhs else rhs
def max(rhs: VPtr[C]): VPtr[C] = if (lhs.compare(rhs) > 0) lhs else rhs
}
object VPtr {
@inline final def apply(c: Pointable, raw: Long): VPtr[c.type] = new VPtr[c.type](raw)
@inline final def apply[C <: Pointable with Singleton](raw: Long): VPtr[C] = new VPtr[C](raw)
// implicit conversions for valid pointers to containers with differing capabilities
implicit def elements1[C <: Elements1[E1] with Singleton, E1](vPtr: VPtr[C with Elements1[E1]]): Elements1VPtr[C, E1] = new Elements1VPtr[C, E1](vPtr.raw)
implicit def elements2[C <: Elements2[E2] with Singleton, E2](vPtr: VPtr[C with Elements2[E2]]): Elements2VPtr[C, E2] = new Elements2VPtr[C, E2](vPtr.raw)
implicit def elements3[C <: Elements3[E3] with Singleton, E3](vPtr: VPtr[C with Elements3[E3]]): Elements3VPtr[C, E3] = new Elements3VPtr[C, E3](vPtr.raw)
implicit def keys[C <: Keys[K] with Singleton, K](vPtr: VPtr[C with Keys[K]]): KeysVPtr[C, K] = new KeysVPtr[C, K](vPtr.raw)
implicit def nextable[C <: Nextable with Singleton](vPtr: VPtr[C]): NextableVPtr[C] = new NextableVPtr[C](vPtr.raw)
implicit def removable[C <: Removable with Singleton](vPtr: VPtr[C]): RemovableVPtr[C] = new RemovableVPtr[C](vPtr.raw)
implicit def updatable[C <: Updatable[V] with Singleton, V](vPtr: VPtr[C with Updatable[V]]): UpdatableVPtr[C, V] = new UpdatableVPtr[C, V](vPtr.raw)
implicit def updatable1[C <: Updatable1[V1] with Singleton, V1](vPtr: VPtr[C with Updatable1[V1]]): Updatable1VPtr[C, V1] = new Updatable1VPtr[C, V1](vPtr.raw)
implicit def updatable2[C <: Updatable2[V2] with Singleton, V2](vPtr: VPtr[C with Updatable2[V2]]): Updatable2VPtr[C, V2] = new Updatable2VPtr[C, V2](vPtr.raw)
implicit def values[C <: Values[V] with Singleton, V](vPtr: VPtr[C with Values[V]]): ValuesVPtr[C, V] = new ValuesVPtr[C, V](vPtr.raw)
implicit def values1[C <: Values1[V1] with Singleton, V1](vPtr: VPtr[C with Values1[V1]]): Values1VPtr[C, V1] = new Values1VPtr[C, V1](vPtr.raw)
implicit def values2[C <: Values2[V2] with Singleton, V2](vPtr: VPtr[C with Values2[V2]]): Values2VPtr[C, V2] = new Values2VPtr[C, V2](vPtr.raw)
}
// capability-based valid pointer types
final class Elements1VPtr[C <: Elements1[E1] with Singleton, E1](val raw: Long) extends AnyVal {
final def element: E1 = macro macros.VPtrOps.element1[C, E1]
final def element1: E1 = macro macros.VPtrOps.element1[C, E1]
}
final class Elements2VPtr[C <: Elements2[E2] with Singleton, E2](val raw: Long) extends AnyVal {
final def element2: E2 = macro macros.VPtrOps.element2[C, E2]
}
final class Elements3VPtr[C <: Elements3[E3] with Singleton, E3](val raw: Long) extends AnyVal {
final def element3: E3 = macro macros.VPtrOps.element3[C, E3]
}
final class KeysVPtr[C <: Keys[K] with Singleton, K](val raw: Long) extends AnyVal {
final def key: K = macro macros.VPtrOps.key[C, K]
}
final class NextableVPtr[C <: Nextable with Singleton](val raw: Long) extends AnyVal {
final def next: Ptr[C] = macro macros.VPtrOps.next[C]
}
final class RemovableVPtr[C <: Removable with Singleton](val raw: Long) extends AnyVal {
final def remove(): Unit = macro macros.VPtrOps.remove[C]
final def removeAndAdvance(): Ptr[C] = macro macros.VPtrOps.removeAndAdvance[C]
}
final class UpdatableVPtr[C <: Updatable[V] with Singleton, V](val raw: Long) extends AnyVal {
final def value_=(newValue: V): Unit = macro macros.VPtrOps.update[C, V]
}
final class Updatable1VPtr[C <: Updatable1[V1] with Singleton, V1](val raw: Long) extends AnyVal {
final def value1_=(newValue1: V1): Unit = macro macros.VPtrOps.update1[C, V1]
}
final class Updatable2VPtr[C <: Updatable2[V2] with Singleton, V2](val raw: Long) extends AnyVal {
final def value2_=(newValue2: V2): Unit = macro macros.VPtrOps.update2[C, V2]
}
final class ValuesVPtr[C <: Values[V] with Singleton, V](val raw: Long) extends AnyVal {
final def value: V = macro macros.VPtrOps.value[C, V]
}
final class Values1VPtr[C <: Values1[V1] with Singleton, V1](val raw: Long) extends AnyVal {
final def value1: V1 = macro macros.VPtrOps.value1[C, V1]
}
final class Values2VPtr[C <: Values2[V2] with Singleton, V2](val raw: Long) extends AnyVal {
final def value2: V2 = macro macros.VPtrOps.value2[C, V2]
}
|
denisrosset/ptrcoll
|
core/src/main/scala/metal/VPtr.scala
|
Scala
|
mit
| 4,918 |
package core.db.users
import java.util.UUID
import core.db.PostgreSQLExtensions
import core.entities.{CredentialSet, UserID, UserSecret}
import scalikejdbc._
case class CredentialsDaoEntity(userId: String, password: String, salt: String, algorithm: String)
object CredentialsDaoEntity extends SQLSyntaxSupport[CredentialsDaoEntity] {
override def tableName: String = "user_credentials"
def apply(uc: SyntaxProvider[CredentialsDaoEntity])(rs: WrappedResultSet): CredentialsDaoEntity =
apply(uc.resultName)(rs)
def apply(uc: ResultName[CredentialsDaoEntity])(rs: WrappedResultSet): CredentialsDaoEntity =
CredentialsDaoEntity(rs.string(uc.userId), rs.string(uc.password), rs.string(uc.salt), rs.string(uc.algorithm))
}
class SqlUserCredentialsDao(credentialsGenerator: UserSecret => Option[CredentialSet])
extends UserCredentialsDao with PostgreSQLExtensions {
val c = CredentialsDaoEntity.syntax("uc")
override def findUserCredentials(userId: UserID)(implicit session: DBSession): Option[CredentialSet] = {
val id = UUID.fromString(userId.id)
withSQL {
select(c.result.*).from(CredentialsDaoEntity as c).where.eq(c.userId, id)
}.map(CredentialsDaoEntity(c)).single().apply().map(toCredentialSet)
}
override def createUserCredentials(userId: UserID, secret: UserSecret)(implicit session: DBSession): Unit = {
val credentials = credentialsGenerator(secret)
.getOrElse(throw new RuntimeException("Failed to generate credential."))
val id = UUID.fromString(userId.id)
val ucc = CredentialsDaoEntity.column
withSQL {
insert
.into(CredentialsDaoEntity)
.namedValues(ucc.userId -> id,
ucc.password -> credentials.password,
ucc.salt -> credentials.salt,
ucc.algorithm -> credentials.algorithm)
}.update().apply()
}
override def updateUserCredentials(userId: UserID, secret: UserSecret)(implicit session: DBSession): Unit = {
val credentials = credentialsGenerator(secret)
.getOrElse(throw new RuntimeException("Failed to generate credential."))
val id = UUID.fromString(userId.id)
val ucc = CredentialsDaoEntity.column
withSQL {
update(CredentialsDaoEntity)
.set(ucc.password -> credentials.password,
ucc.salt -> credentials.salt,
ucc.algorithm -> credentials.algorithm)
.where
.eq(ucc.userId, id)
}.update().apply()
}
private def toCredentialSet(entity: CredentialsDaoEntity): CredentialSet = {
CredentialSet(entity.password, entity.salt, entity.algorithm)
}
}
|
lymr/fun-chat
|
fun-chat-server/src/main/scala/core/db/users/SqlUserCredentialsDao.scala
|
Scala
|
mit
| 2,635 |
/*
Copyright (c) 2010, Tobias Knerr
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.openstreetmap.wiki.tttbot.config
import scala.xml
import java.io.File
import org.openstreetmap.wiki.tttbot.output.Writer
/** configuration of output targets */
class OutputConfig (
val targets : Seq[Writer[_]]
) { }
object OutputConfig {
def fromXMLDocument (root : xml.Node) : OutputConfig = {
val targetNodes = root \\ "output" \\ "target"
val targets = for (targetNode <- targetNodes) yield
Writer.fromXMLNode(targetNode, root)
return new OutputConfig(targets)
}
}
|
tordanik/TemplatesToTables
|
src/org/openstreetmap/wiki/tttbot/config/OutputConfig.scala
|
Scala
|
bsd-2-clause
| 1,818 |
package client.appstate.groups
import autowire._
import diode._
import diode.data._
import scala.scalajs.concurrent.JSExecutionContext.Implicits.queue
import boopickle.Default._
import client.MessageFeedback
import client.appstate.{GroupFeedbackReporting, Groups}
import client.services.AjaxClient
import shared._
import shared.entities.GroupDetail
import shared.requests.groups._
import shared.responses.groups.GroupDeleteResponse
import shared.utils.constants._
import scala.concurrent.Future
// Group actions
case object FetchGroupsToReset extends Action
case class FetchGroups(request: ReadGroupsRequest) extends Action
case class SetGroups(groups: Either[FoulkonError, (TotalGroups, List[GroupDetail])]) extends Action
case class UpdateGroup(organizationId: String, originalName: String, updatedName: String, updatedPath: String) extends Action
case class CreateGroup(organizationId: String, name: String, path: String) extends Action
case class DeleteGroup(organizationId: String, name: String) extends Action
case class UpdateGroupFeedbackReporting(feedback: Either[FoulkonError, MessageFeedback]) extends Action
case object RemoveGroupFeedbackReporting extends Action
case class UpdateTotalGroupsAndPages(totalGroups: TotalGroups) extends Action
case class UpdateSelectedPage(selectedPage: SelectedPage) extends Action
// Group handlers
class GroupHandler[M](modelRW: ModelRW[M, Pot[Groups]]) extends ActionHandler(modelRW) {
override protected def handle: PartialFunction[Any, ActionResult[M]] = {
case FetchGroupsToReset =>
effectOnly(
Effect(
AjaxClient[Api]
.readGroups(ReadGroupsRequest(offset = 0))
.call
.map(SetGroups)
) >> Effect(
Future(UpdateSelectedPage(0))
)
)
case FetchGroups(request) =>
effectOnly(
Effect(
AjaxClient[Api]
.readGroups(request)
.call
.map(SetGroups)
)
)
case SetGroups(groups) =>
groups match {
case rightResult @ Right((total, _)) =>
updated(
Ready(Groups(rightResult.map(_._2))),
Effect(Future(UpdateTotalGroupsAndPages(total)))
)
case leftResult @ Left(_) =>
updated(
Ready(Groups(leftResult.map(_._2))),
Effect(Future(UpdateTotalGroupsAndPages(0)))
>> Effect(Future(UpdateSelectedPage(0)))
)
}
case UpdateGroup(organizationId, originalName, updatedName, updatedPath) =>
val updateRequest = UpdateGroupRequest(
UpdateGroupRequestPathParams(organizationId, originalName),
UpdateGroupRequestBody(updatedName, updatedPath)
)
effectOnly(
Effect(
AjaxClient[Api]
.updateGroup(updateRequest)
.call
.map {
case Left(foulkonError) => UpdateGroupFeedbackReporting(Left(foulkonError))
case Right(GroupDetail(_, nameg, _, _, _, _, _)) => UpdateGroupFeedbackReporting(Right(s"group $nameg updated successfully!"))
}
)
)
case CreateGroup(organizationId, name, path) =>
val createRequest = CreateGroupRequest(
CreateGroupRequestPathParams(organizationId),
CreateGroupRequestBody(name, path)
)
effectOnly(
Effect(
AjaxClient[Api]
.createGroup(createRequest)
.call
.map {
case Left(foulkonError) => UpdateGroupFeedbackReporting(Left(foulkonError))
case Right(GroupDetail(_, nameg, _, _, _, _, _)) => UpdateGroupFeedbackReporting(Right(s"group $nameg created successfully!"))
}
)
)
case DeleteGroup(organizationId, name) =>
effectOnly(
Effect(
AjaxClient[Api]
.deleteGroup(organizationId, name)
.call
.map {
case Left(foulkonError) => UpdateGroupFeedbackReporting(Left(foulkonError))
case Right(GroupDeleteResponse(org, nam)) => UpdateGroupFeedbackReporting(Right(s"Group $nam with org $org deleted successfully!"))
}
)
)
}
}
class GroupFeedbackHandler[M](modelRW: ModelRW[M, Option[GroupFeedbackReporting]]) extends ActionHandler(modelRW) {
override protected def handle: PartialFunction[Any, ActionResult[M]] = {
case UpdateGroupFeedbackReporting(feedback) =>
updated(Some(GroupFeedbackReporting(feedback)), Effect(Future(FetchGroupsToReset)))
case RemoveGroupFeedbackReporting =>
updated(
None
)
}
}
class GroupPagesAndTotalHandler[M](modelRW: ModelRW[M, (TotalGroups, TotalPages, SelectedPage)]) extends ActionHandler(modelRW) {
override protected def handle: PartialFunction[Any, ActionResult[M]] = {
case UpdateTotalGroupsAndPages(totalGroups) =>
val totalPages = (totalGroups.toFloat / PageSize.toFloat).ceil.toInt
val stateSelectedPage = modelRW()._3
updated((totalGroups, totalPages, stateSelectedPage))
case UpdateSelectedPage(selectedPage) =>
updated(modelRW().copy(_3 = selectedPage))
}
}
|
beikern/foulkon-ui
|
client/src/main/scala/client/appstate/groups/DiodeGroup.scala
|
Scala
|
apache-2.0
| 5,643 |
package lore.compiler.core
import lore.compiler.syntax.Node.Index
/**
* A position identifies a code location across a whole Lore project.
*/
case class Position(fragment: Fragment, startIndex: Index, endIndex: Index) {
def <(other: Position): Boolean = {
this.fragment.name < other.fragment.name || (this.fragment.name == other.fragment.name && this.startIndex < other.startIndex)
}
/**
* The pretty index which is used for printing the position.
*
* This is a lazy value because it has heavy implications on parsing performance.
*/
lazy val prettyIndex: String = fragment.input.prettyIndex(startIndex)
/**
* The line and column numbers of the position as 1-based indices.
*/
lazy val (startLine, startColumn, endLine, endColumn): (Int, Int, Int, Int) = {
// Not the prettiest way to implement this, but fastparse doesn't seem to expose a line/column interface. This is
// the most convenient way to access line/column numbers, as far as I can see.
val Array(startLine, startColumn) = prettyIndex.split(":").map(Integer.parseInt)
val Array(endLine, endColumn) = fragment.input.prettyIndex(endIndex).split(":").map(Integer.parseInt)
(startLine, startColumn, endLine, endColumn)
}
/**
* The length of the position, possibly spanning multiple lines.
*/
def length: Int = endIndex - startIndex
/**
* Creates a new position that spans from the start index of this position to the end index of `end`.
*/
def to(end: Position): Position = {
if (this.fragment != end.fragment) {
throw CompilationException(s"Cannot create a spanning position from $this to $end. The positions must be located" +
s" in the same fragment!")
}
Position(this.fragment, this.startIndex, end.endIndex)
}
/**
* The actual code from the start to the end of the position.
*/
lazy val code: String = fragment.input.slice(startIndex, endIndex).strip()
/**
* The first line of `code`.
*/
lazy val truncatedCode: String = code.takeWhile(_ != '\\n')
/**
* A complete string representation of this position.
*/
override def toString: String = s"${fragment.name} ($prettyIndex)"
/**
* We have to override the equals method to incorporate the notion of a wildcard position, which is used in
* tests. A wildcard position is always equal to any other position, including itself.
*/
override def equals(obj: Any): Boolean = obj match {
case other: Position => this.eq(Position.wildcard) || other.eq(Position.wildcard) || super.equals(other)
case _ => false
}
}
object Position {
/**
* A Position that is equal to any other position. This is used in tests to make nodes equal regardless of their
* position. It should never be used by the compiler!
*/
val wildcard: Position = Position(Fragment("wildcard", ""), 0, 0)
/**
* An "internal" position that signals compiler-generated code without any sensible anchor.
*/
val internal: Position = Position(Fragment("internal (compiler-generated)", ""), 0, 0)
/**
* A position referring to an unknown fragment, for example a file that wasn't found.
*/
val unknown: Position = Position(Fragment("unknown", ""), 0, 0)
}
|
marcopennekamp/lore
|
compiler/src/lore/compiler/core/Position.scala
|
Scala
|
mit
| 3,248 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming
import java.util.concurrent.TimeUnit
import org.apache.spark.internal.config.ConfigBuilder
import org.apache.spark.streaming.util.OpenHashMapBasedStateMap.DELTA_CHAIN_LENGTH_THRESHOLD
object StreamingConf {
private[streaming] val BACKPRESSURE_ENABLED =
ConfigBuilder("spark.streaming.backpressure.enabled")
.version("1.5.0")
.booleanConf
.createWithDefault(false)
private[streaming] val RECEIVER_MAX_RATE =
ConfigBuilder("spark.streaming.receiver.maxRate")
.version("1.0.2")
.longConf
.createWithDefault(Long.MaxValue)
private[streaming] val BACKPRESSURE_INITIAL_RATE =
ConfigBuilder("spark.streaming.backpressure.initialRate")
.version("2.0.0")
.fallbackConf(RECEIVER_MAX_RATE)
private[streaming] val BLOCK_INTERVAL =
ConfigBuilder("spark.streaming.blockInterval")
.version("0.8.0")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefaultString("200ms")
private[streaming] val RECEIVER_WAL_ENABLE_CONF_KEY =
ConfigBuilder("spark.streaming.receiver.writeAheadLog.enable")
.version("1.2.1")
.booleanConf
.createWithDefault(false)
private[streaming] val RECEIVER_WAL_CLASS_CONF_KEY =
ConfigBuilder("spark.streaming.receiver.writeAheadLog.class")
.version("1.4.0")
.stringConf
.createOptional
private[streaming] val RECEIVER_WAL_ROLLING_INTERVAL_CONF_KEY =
ConfigBuilder("spark.streaming.receiver.writeAheadLog.rollingIntervalSecs")
.version("1.4.0")
.intConf
.createWithDefault(60)
private[streaming] val RECEIVER_WAL_MAX_FAILURES_CONF_KEY =
ConfigBuilder("spark.streaming.receiver.writeAheadLog.maxFailures")
.version("1.2.0")
.intConf
.createWithDefault(3)
private[streaming] val RECEIVER_WAL_CLOSE_AFTER_WRITE_CONF_KEY =
ConfigBuilder("spark.streaming.receiver.writeAheadLog.closeFileAfterWrite")
.version("1.6.0")
.booleanConf
.createWithDefault(false)
private[streaming] val DRIVER_WAL_CLASS_CONF_KEY =
ConfigBuilder("spark.streaming.driver.writeAheadLog.class")
.version("1.4.0")
.stringConf
.createOptional
private[streaming] val DRIVER_WAL_ROLLING_INTERVAL_CONF_KEY =
ConfigBuilder("spark.streaming.driver.writeAheadLog.rollingIntervalSecs")
.version("1.4.0")
.intConf
.createWithDefault(60)
private[streaming] val DRIVER_WAL_MAX_FAILURES_CONF_KEY =
ConfigBuilder("spark.streaming.driver.writeAheadLog.maxFailures")
.version("1.4.0")
.intConf
.createWithDefault(3)
private[streaming] val DRIVER_WAL_CLOSE_AFTER_WRITE_CONF_KEY =
ConfigBuilder("spark.streaming.driver.writeAheadLog.closeFileAfterWrite")
.version("1.6.0")
.booleanConf
.createWithDefault(false)
private[streaming] val DRIVER_WAL_BATCHING_CONF_KEY =
ConfigBuilder("spark.streaming.driver.writeAheadLog.allowBatching")
.version("1.6.0")
.booleanConf
.createWithDefault(true)
private[streaming] val DRIVER_WAL_BATCHING_TIMEOUT_CONF_KEY =
ConfigBuilder("spark.streaming.driver.writeAheadLog.batchingTimeout")
.version("1.6.0")
.longConf
.createWithDefault(5000)
private[streaming] val STREAMING_UNPERSIST =
ConfigBuilder("spark.streaming.unpersist")
.version("0.9.0")
.booleanConf
.createWithDefault(true)
private[streaming] val STOP_GRACEFULLY_ON_SHUTDOWN =
ConfigBuilder("spark.streaming.stopGracefullyOnShutdown")
.version("1.4.0")
.booleanConf
.createWithDefault(false)
private[streaming] val UI_RETAINED_BATCHES =
ConfigBuilder("spark.streaming.ui.retainedBatches")
.version("1.0.0")
.intConf
.createWithDefault(1000)
private[streaming] val SESSION_BY_KEY_DELTA_CHAIN_THRESHOLD =
ConfigBuilder("spark.streaming.sessionByKey.deltaChainThreshold")
.version("1.6.0")
.intConf
.createWithDefault(DELTA_CHAIN_LENGTH_THRESHOLD)
private[streaming] val BACKPRESSURE_RATE_ESTIMATOR =
ConfigBuilder("spark.streaming.backpressure.rateEstimator")
.version("1.5.0")
.stringConf
.createWithDefault("pid")
private[streaming] val BACKPRESSURE_PID_PROPORTIONAL =
ConfigBuilder("spark.streaming.backpressure.pid.proportional")
.version("1.5.0")
.doubleConf
.createWithDefault(1.0)
private[streaming] val BACKPRESSURE_PID_INTEGRAL =
ConfigBuilder("spark.streaming.backpressure.pid.integral")
.version("1.5.0")
.doubleConf
.createWithDefault(0.2)
private[streaming] val BACKPRESSURE_PID_DERIVED =
ConfigBuilder("spark.streaming.backpressure.pid.derived")
.version("1.5.0")
.doubleConf
.createWithDefault(0.0)
private[streaming] val BACKPRESSURE_PID_MIN_RATE =
ConfigBuilder("spark.streaming.backpressure.pid.minRate")
.version("1.5.0")
.doubleConf
.createWithDefault(100)
private[streaming] val CONCURRENT_JOBS =
ConfigBuilder("spark.streaming.concurrentJobs")
.version("0.7.0")
.intConf
.createWithDefault(1)
private[streaming] val GRACEFUL_STOP_TIMEOUT =
ConfigBuilder("spark.streaming.gracefulStopTimeout")
.version("1.0.0")
.timeConf(TimeUnit.MILLISECONDS)
.createOptional
private[streaming] val MANUAL_CLOCK_JUMP =
ConfigBuilder("spark.streaming.manualClock.jump")
.version("0.7.0")
.longConf
.createWithDefault(0)
}
|
shaneknapp/spark
|
streaming/src/main/scala/org/apache/spark/streaming/StreamingConf.scala
|
Scala
|
apache-2.0
| 6,279 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.boxes
import uk.gov.hmrc.ct.accounts.{MockFrs102AccountsRetriever, AccountsMoneyValidationFixture}
import uk.gov.hmrc.ct.accounts.frs102.retriever.Frs102AccountsBoxRetriever
class AC161Spec extends AccountsMoneyValidationFixture[Frs102AccountsBoxRetriever] with MockFrs102AccountsRetriever {
testAccountsMoneyValidationWithMin("AC161",0, AC161.apply)
}
|
liquidarmour/ct-calculations
|
src/test/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/AC161Spec.scala
|
Scala
|
apache-2.0
| 1,003 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.