code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* Copyright 2018 Analytics Zoo Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.zoo.pipeline.api.keras.layers
import com.intel.analytics.bigdl.nn._
import com.intel.analytics.bigdl.nn.{Sequential => TSequential}
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, DataFormat, Activity}
import com.intel.analytics.bigdl.nn.keras.Pooling1D
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.Shape
import com.intel.analytics.zoo.pipeline.api.Net
import scala.tools.nsc.interpreter.JList
import com.intel.analytics.zoo.pipeline.api.keras.layers.utils.KerasUtils
import scala.reflect.ClassTag
/**
* Applies max pooling operation for temporal data.
* The input of this layer should be 3D.
*
* When you use this layer as the first layer of a model, you need to provide the argument
* inputShape (a Single Shape, does not include the batch dimension).
*
* @param poolLength Size of the region to which max pooling is applied. Integer. Default is 2.
* @param stride Factor by which to downscale. Integer, or -1. 2 will halve the input.
* If -1, it will default to poolLength. Default is -1.
* @param borderMode Either 'valid' or 'same'. Default is 'valid'.
* @param inputShape A Single Shape, does not include the batch dimension.
* @tparam T Numeric type of parameter(e.g. weight, bias). Only support float/double now
*/
class MaxPooling1D[T: ClassTag](
override val poolLength: Int = 2,
override val stride: Int = -1,
override val borderMode: String = "valid",
override val inputShape: Shape = null,
val pad: Int = 0)(implicit ev: TensorNumeric[T])
extends Pooling1D[T](
poolLength, stride, borderMode, inputShape) with Net {
override def doBuild(inputShape: Shape): AbstractModule[Activity, Activity, T] = {
val input = inputShape.toSingle().toArray
val pads = KerasUtils.getPadsFromBorderMode(borderMode, if (pad == 0) {
null
} else {
Array(pad, 0)
})
val model = TSequential[T]()
model.add(com.intel.analytics.bigdl.nn.Reshape(Array(input(1), 1, input(2)), Some(true)))
val layer = SpatialMaxPooling(
kW = 1,
kH = poolLength,
dW = 1,
dH = strideValue,
padW = pads._2,
padH = pads._1,
format = DataFormat.NHWC)
model.add(layer)
model.add(com.intel.analytics.bigdl.nn.Squeeze(3))
model.asInstanceOf[AbstractModule[Activity, Activity, T]]
}
}
object MaxPooling1D {
def apply[@specialized(Float, Double) T: ClassTag](
poolLength: Int = 2,
stride: Int = -1,
borderMode: String = "valid",
inputShape: Shape = null,
pads: Int = 0)(implicit ev: TensorNumeric[T]): MaxPooling1D[T] = {
new MaxPooling1D[T](poolLength, stride, borderMode, inputShape, pads)
}
}
|
intel-analytics/analytics-zoo
|
zoo/src/main/scala/com/intel/analytics/zoo/pipeline/api/keras/layers/MaxPooling1D.scala
|
Scala
|
apache-2.0
| 3,406 |
package cpup.mc.oldenMagic.api.oldenLanguage.textParsing
class InvalidTransformException(msg: String) extends Exception(msg)
|
CoderPuppy/oldenmagic-mc
|
src/main/scala/cpup/mc/oldenMagic/api/oldenLanguage/textParsing/InvalidTransformException.scala
|
Scala
|
mit
| 125 |
package chapter13
/**
* 13μ₯ ν¨ν€μ§μ μν¬νΈ
*
* νΉν κ·λͺ¨κ° ν° νλ‘κ·Έλ¨μ μμ± μ, 컀νλ§μ μ΅μν νλ κ²μ΄ μ€μ.
* λͺ¨λν νλ€. κ° λͺ¨λμ λ΄λΆ(ꡬν) μΈλΆ(μΈν°νμ΄μ€)κ° μλ€. μ€μ λ³κ²½μΌλ‘ μΈν
* μν₯λλ μΈλΆ(μΈν°νμ΄μ€)μ κ±Έλ €μμΌλ, νμ
μ΄ κΉλν΄μ§λ€.
*
* 13μ₯μ νλ‘κ·Έλ¨μ λͺ¨λν μ€νμΌλ‘ μμ±νλ κ²μ λμμ£Όλ μ¬λ¬ ꡬμ±μμλ₯Ό μ€λͺ
νλ€.
* - ν¨ν€μ§ μμ μ½λ μ±μ°κΈ°
* - μν¬νΈλ‘ μΈλΆ μ΄λ¦ λΆλ¬μ€κΈ°
* - μ κ·Ό μμμλ₯Ό ν΅ν΄ μ κ·Ό μ μ΄
*
* μλ°μ μ μ¬ν΄ 보μ΄κ² μ§λ§ μ€μΉΌλΌμͺ½μ΄ λ μΌκ΄μ±μ΄ μλ€.
*
* 13.1 ν¨ν€μ§ μμ μ½λ μμ±νκΈ°
*
* μ€μΉΌλΌ μ½λλ μλ° νλ«νΌμ μ μ ν¨ν€μ§(global package) κ³μΈ΅ μμ μλ€.
* μ€μΉΌλΌ μ½λλ μλ° μν μμ€ν
μ μΌλΆμ΄κΈ°μ, 곡κ°νλ ν¨ν€μ§ μ΄λ¦μ λ§λ€ λ, μλ° κ΄μ΅λλ‘
* λλ©μΈ μ΄λ¦μ μμμΌλ‘ μ¬μ©νλκ² μ’λ€.
*
* ν νμΌμμ μ¬λ¬ ν¨ν€μ§λ₯Ό λ£μ λ λ€μκ³Ό κ°μ΄ ν μ μμΌλ©°,
* λ³λμ ν¨ν€μ§μμ λ λ€μ ν
μ€νΈ μ½λλ₯Ό ν¬ν¨μν¬ μ μλ€.
*
*/
/*package bobsrockets {
package navigation {
class Navigator
package tests {
class NavigatorSuite
}
}
}*/
|
seraekim/srkim-lang-scala
|
src/main/java/chapter13/c13_i01.scala
|
Scala
|
bsd-3-clause
| 1,352 |
package cwe.scala.library.reasoning
/**
* Represents a choice on options of type T
*/
trait Choice[T] {
/**
* Adopted options of type T
*/
def adopted: List[T]
/**
* Rejected options of type T
*/
def rejected: List[T]
}
|
wwwigii-system/research
|
cwe-scala-library/src/cwe/scala/library/reasoning/Choice.scala
|
Scala
|
gpl-3.0
| 234 |
/*
* Copyright 2007-2010 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb {
package builtin {
package snippet {
import _root_.net.liftweb.http._
import _root_.scala.xml._
import _root_.net.liftweb.util.Helpers._
import _root_.net.liftweb.util.Log
import S._
import _root_.net.liftweb.common.{Box, Full, Empty}
/**
* This built in snippet renders messages (Errors, Warnings, Notices) in a <i>div</i>.
* Typically it is used in templates as a place holder for any messages that are <b>not</b> associated with an ID.
* Setting the attribute <i>showAll</i> to <i>true</i> will render all messages, with and without an ID.
* This will lead to duplicate messages if additionally the <i>Msg</i> built in snippet is used to show
* messages associated with an ID.
*
* E.g. (child nodes are optional)
* <pre>
* <lift:Msgs showAll="false">
* <lift:error_msg>Error! The details are:</lift:error_msg>
* <lift:error_class>errorBox</lift:error_class>
* <lift:warning_msg>Whoops, I had a problem:</lift:warning_msg>
* <lift:warning_class>warningBox</lift:warning_class>
* <lift:notice_msg>Note:</lift:notice_msg>
* <lift:notice_class>noticeBox</lift:notice_class>
* </lift:snippet>
* </pre>
*
*/
object Msgs extends DispatchSnippet {
def dispatch: DispatchIt = {
case _ => render
}
def render(styles: NodeSeq): NodeSeq = {
val f = if (toBoolean(attr("showAll"))) messages _
else noIdMessages _
val makeTitle: (String) => String = {text =>
Log.debug("Msgs: Default " + text + " is not rendered as the default title is now empty string")
""
}
val msgs = List((f(S.errors),
(styles \\\\ "error_msg"), S.??("msg.error"),
((styles \\\\ "error_class") ++
(styles \\\\ "error_msg" \\\\ "@class")), "error"),
(f(S.warnings),
(styles \\\\ "warning_msg"), S.??("msg.warning"),
((styles \\\\ "warning_class")++
(styles \\\\ "warning_msg" \\\\ "@class")), "warn"),
(f(S.notices),
(styles \\\\ "notice_msg"), S.??("msg.notice"),
((styles \\\\ "notice_class")) ++
(styles \\\\ "notice_msg" \\\\ "@class"), "notice")).flatMap
{
case (msg, titleList, defaultTitle, styleList, ord) =>
val title: String = titleList.toList.filter(_.prefix == "lift").
map(_.text.trim).filter(_.length > 0) headOr makeTitle(defaultTitle)
val styles = styleList.toList.map(_.text.trim)
if (!styles.isEmpty) {
ord match {
case "error" => MsgsErrorMeta(Full(AjaxMessageMeta(Full(title),
Full(styles.mkString(" ")))))
case "warn" => MsgsWarningMeta(Full(AjaxMessageMeta(Full(title),
Full(styles.mkString(" ")))))
case "notice" => MsgsNoticeMeta(Full(AjaxMessageMeta(Full(title),
Full(styles.mkString(" ")))))
}
}
msg.toList.map(e => (<li>{e}</li>) ) match {
case Nil => Nil
case msgList => val ret = (<div id={LiftRules.noticesContainerId + "_" + ord}>{title}<ul>{msgList}</ul></div>)
styles.foldLeft(ret)((xml, style) => xml % new UnprefixedAttribute("class", Text(style), Null))
}
}
<div>{msgs}</div> % ("id" -> LiftRules.noticesContainerId)
}
}
object MsgsNoticeMeta extends SessionVar[Box[AjaxMessageMeta]](Empty)
object MsgsWarningMeta extends SessionVar[Box[AjaxMessageMeta]](Empty)
object MsgsErrorMeta extends SessionVar[Box[AjaxMessageMeta]](Empty)
case class AjaxMessageMeta(title: Box[String], cssClass: Box[String])
}
}
}
|
jeppenejsum/liftweb
|
framework/lift-base/lift-webkit/src/main/scala/net/liftweb/builtin/snippet/Msgs.scala
|
Scala
|
apache-2.0
| 4,461 |
/*
Copyright 2013 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.summingbird.batch.state.storehaus
import com.twitter.summingbird.batch.state.Versioning
import com.twitter.summingbird.batch.state.VersioningCheckpointStore
import com.twitter.summingbird.batch.state.VersioningCheckpointState
import com.twitter.summingbird.batch.Batcher
import com.twitter.summingbird.batch.Timestamp
import com.twitter.summingbird.batch.state.VersioningCheckpointStore
import com.twitter.summingbird.batch.state.Versioning
import com.twitter.summingbird.batch.state.VersioningCheckpointState
object StorehausVersioningCheckpointState {
case class Config(
init: CassandraVersionStoreFactory,
startTime: Option[Timestamp],
numBatches: Long)
def apply(
init: CassandraVersionStoreFactory,
startTime: Option[Timestamp] = None,
numBatches: Long = 1)(implicit b: Batcher): VersioningCheckpointState =
StorehausVersioningCheckpointState(Config(init, startTime, numBatches))
def apply(config: Config)(implicit batcher: Batcher): VersioningCheckpointState =
new VersioningCheckpointState(new StorehausVersioningCheckpointStore(config))
}
class StorehausVersioningCheckpointStore(val config: StorehausVersioningCheckpointState.Config)(implicit batcher: Batcher)
extends VersioningCheckpointStore(config.startTime, config.numBatches) {
protected lazy val versionedStore =
new StorehausVersionTracking(config.init)
def getVersioning(): Versioning = versionedStore
}
|
zirpins/summingbird
|
summingbird-scalding/src/main/scala/com/twitter/summingbird/batch/state/storehaus/StorehausVersioningCheckpointState.scala
|
Scala
|
apache-2.0
| 2,018 |
package pl.suder.scala.auctionHouse
import akka.actor._
import akka.event.LoggingReceive
import pl.suder.scala.auctionHouse.Message._
import scala.concurrent.duration.`package`.DurationInt
import akka.actor.SupervisorStrategy.{ Restart, Stop }
import java.util.concurrent.TimeoutException
class Notifier extends Actor {
override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = 5, withinTimeRange = 1 minute) {
case _: ActorNotFound => Restart
case _: TimeoutException => Restart
case e: Exception => Stop
}
override def receive = LoggingReceive {
case notify: Notify => context.actorOf(Props[NotifierRequest]) ! notify
}
}
|
Materix/Sem7-Scala
|
src/main/scala/pl/suder/scala/auctionHouse/Notifier.scala
|
Scala
|
mit
| 672 |
package com.sksamuel.elastic4s.searches.queries
import com.sksamuel.elastic4s.searches.queries.term.TermsLookupQueryDefinition
import org.elasticsearch.index.query.{QueryBuilders, TermsQueryBuilder}
object TermsLookupQueryBuilderFn {
def apply(q: TermsLookupQueryDefinition): TermsQueryBuilder = {
val builder = QueryBuilders.termsLookupQuery(q.field, q.termsLookup)
q.queryName.foreach(builder.queryName)
builder
}
}
|
aroundus-inc/elastic4s
|
elastic4s-tcp/src/main/scala/com/sksamuel/elastic4s/searches/queries/TermsLookupQueryBuilderFn.scala
|
Scala
|
apache-2.0
| 436 |
package definiti.core.end2end.controls
import definiti.common.ast.Root
import definiti.common.program.Ko
import definiti.common.tests.{ConfigurationMock, LocationPath}
import definiti.core.ProgramResultMatchers._
import definiti.core._
import definiti.core.end2end.EndToEndSpec
import definiti.core.validation.controls.VerificationIsBooleanControl
class VerificationIsBooleanControlSpec extends EndToEndSpec {
import VerificationIsBooleanControlSpec._
"Project.generatePublicAST" should "validate a verification returning boolean" in {
val output = processFile("controls.verificationIsBoolean.nominal", configuration)
output shouldBe ok[Root]
}
it should "invalidate a verification returning number" in {
val expected = Ko[Root](
VerificationIsBooleanControl.errorNotBoolean("InvalidNumber", Constants.integer, numberLocation(3, 3, 5, 4))
)
val output = processFile("controls.verificationIsBoolean.number", configuration)
output should beResult(expected)
}
it should "invalidate a verification when condition does not return a boolean on each branch" in {
val expected = Ko[Root](
VerificationIsBooleanControl.errorNotBoolean("InvalidCondition", Constants.unit, conditionLocation(3, 3, 9, 4))
)
val output = processFile("controls.verificationIsBoolean.condition", configuration)
output should beResult(expected)
}
}
object VerificationIsBooleanControlSpec {
val configuration = ConfigurationMock().withOnlyControls(VerificationIsBooleanControl)
val numberLocation = LocationPath.control(VerificationIsBooleanControl, "number")
val conditionLocation = LocationPath.control(VerificationIsBooleanControl, "condition")
}
|
definiti/definiti-core
|
src/test/scala/definiti/core/end2end/controls/VerificationIsBooleanControlSpec.scala
|
Scala
|
mit
| 1,695 |
package models
import play.api.libs.json.Json
case class Genre(
name: String,
description: Option[String]
)
object Genre {
implicit val jsonFormat = Json.format[Genre]
}
|
leanovate/book-db-sample
|
backends/play-scala-slick/app/models/Genre.scala
|
Scala
|
mit
| 226 |
package io.datalayer.randomforest
object Main extends App {
/* ----------------------------------------
* Using the old data representation
* ---------------------------------------- */
// Generating some data
val train = dataGenerator.genLabeled(numInstances=200, numFeatures=10)
val test = dataGenerator.genLabeled(numInstances=200, numFeatures=10)
// Preparing the Extra-Trees forest
val forest = new Forest(min_samples_split=10,n_estimators=100, max_features=5)
println(forest)
// Training the model
forest.fit(train)
println("Accuracy = " + forest.predictEval(test)._2)
/* ----------------------------------------
* Using the Datalayer DataDNA
* ---------------------------------------- */
// Generating some data
val labeled = dataGenerator.genData(50,10,true)
// Preparing the Extra-Trees forest
val trees = new Forest(min_samples_split=10,n_estimators=10,max_features=5)
// Training the model
trees.fit(labeled)
// Print the (re-substitution) accuracy
val preds = trees.predict(labeled)
println("Score: " + trees.score(preds, labeled.getLabels()))
}
|
0asa/algorithm
|
src/main/scala/io/datalayer/randomforest/Main.scala
|
Scala
|
apache-2.0
| 1,168 |
/**
* Copyright 2015 Yahoo Inc. Licensed under the Apache License, Version 2.0
* See accompanying LICENSE file.
*/
package kafka.manager.utils
import kafka.manager.ClusterConfig
import org.scalatest.{Matchers, FunSuite}
/**
* @author hiral
*/
class TestClusterConfig extends FunSuite with Matchers {
test("invalid zk hosts") {
intercept[IllegalArgumentException] {
ClusterConfig("qa","0.8.1.1","localhost")
}
}
test("invalid name") {
intercept[IllegalArgumentException] {
ClusterConfig("qa!","0.8.1.1","localhost")
}
}
test("invalid kafka version") {
intercept[IllegalArgumentException] {
ClusterConfig("qa","0.8.1","localhost:2181")
}
}
test("case insensitive name") {
assert(ClusterConfig("QA","0.8.1.1","localhost:2181").name === "qa")
}
test("case insensitive zk hosts") {
assert(ClusterConfig("QA","0.8.1.1","LOCALHOST:2181").curatorConfig.zkConnect === "localhost:2181")
}
test("serialize and deserialize") {
val cc = ClusterConfig("qa","0.8.2-beta","localhost:2181")
val serialize: String = ClusterConfig.serialize(cc)
val deserialize = ClusterConfig.deserialize(serialize)
assert(deserialize.isSuccess === true)
cc == deserialize.get
}
test("deserialize without version") {
val cc = ClusterConfig("qa","0.8.2-beta","localhost:2181")
val serialize: String = ClusterConfig.serialize(cc)
val noverison = serialize.replace(""","kafkaVersion":"0.8.2-beta"""","")
assert(!noverison.contains("kafkaVersion"))
val deserialize = ClusterConfig.deserialize(noverison)
assert(deserialize.isSuccess === true)
cc == deserialize.get
}
}
|
patricklucas/kafka-manager
|
test/kafka/manager/utils/TestClusterConfig.scala
|
Scala
|
apache-2.0
| 1,670 |
package org.raisercostin.own
import org.scalatest._
import org.junit.runner.RunWith
import org.junit.Assert._
import org.scalatest.junit.JUnitRunner
import org.raisercostin.tags.raw
import org.raisercostin.jedi._
import org.raisercostin.jedi.Locations
import org.joda.time.DateTime
import org.joda.time.DateTimeZone
import org.joda.time.tz.DateTimeZoneBuilder
import org.raisercostin.tags.Item
import org.raisercostin.tags.Tags
import org.raisercostin.tags.FormatAnalyser
@RunWith(classOf[JUnitRunner])
class RawTest extends FunSuite with BeforeAndAfterAll with TryValues {
test("extract exif from one file") {
val tags = raw.all(false)(Locations.classpath("MVI_2366.MOV"))
assertEquals(67, tags.size)
}
test("extract exif from one pair too") {
val tags = raw.all(true)(Locations.classpath("MVI_2366.MOV"))
assertEquals(234, tags.size)
}
test("extractor that combines MOV and THM", Tag("failed"), Tag("feature")) {
val file = Locations.classpath("MVI_2366.MOV")
val tags = raw.loadExifTags(file)
val all = tags.tags.tags.toSeq.sortBy(_._1)
println(all.mkString("\n"))
assertEquals("MOV", tags.fileExtension.get)
assertEquals("${const:MVI}_${exifFileNumberMinor}.${fileExtension}", tags.analyse(file.name).get)
assertEquals(240, all.size)
assertEquals("""compDetectedFormat
compDetectedPathFormat
dateTime
dateTimeZone
exifAEBBracketValue
exifAESetting
exifAFAreaHeights
exifAFAreaMode
exifAFAreaWidths
exifAFAreaXPositions
exifAFAreaYPositions
exifAFImageHeight
exifAFImageWidth
exifAFPoint
exifAFPointsInFocus
exifAperture
exifApertureValue
exifAudioBitrate
exifAudioBitsPerSample
exifAudioChannels
exifAudioChannels#THM
exifAudioFormat
exifAudioSampleRate
exifAudioSampleRate#THM
exifAutoExposureBracketing
exifAutoISO
exifAutoRotate
exifAvgBitrate
exifBalance
exifBitDepth
exifBitsPerSample
exifBulbDuration
exifCameraISO
exifCameraTemperature
exifCameraType
exifCanonExposureMode
exifCanonFirmwareVersion
exifCanonFlashMode
exifCanonImageHeight
exifCanonImageSize
exifCanonImageType
exifCanonImageWidth
exifCanonModelID
exifCategories
exifCircleOfConfusion
exifColorComponents
exifColorSpace
exifCompatibleBrands
exifComponentsConfiguration
exifCompressedBitsPerPixel
exifCompressorID
exifCompressorVersion
exifContinuousDrive
exifContrast
exifControlMode
exifCreateDate
exifCreateDate#THM
exifCurrentTime
exifCustomRendered
exifDateStampMode
exifDateTimeOriginal
exifDigitalZoom
exifDigitalZoomRatio
exifDirectory
exifDirectory#THM
exifDriveMode
exifDuration
exifDuration#THM
exifEasyMode
exifEncodingProcess
exifExifByteOrder
exifExifImageHeight
exifExifImageWidth
exifExifToolVersion
exifExifToolVersion#THM
exifExifVersion
exifExposureCompensation
exifExposureMode
exifExposureTime
exifFNumber
exifFOV
exifFileAccessDate
exifFileAccessDate#THM
exifFileCreateDate
exifFileCreateDate#THM
exifFileModifyDate
exifFileModifyDate#THM
exifFileName
exifFileName#THM
exifFileNumber
exifFileNumberMajor
exifFileNumberMinor
exifFilePermissions
exifFilePermissions#THM
exifFileSize
exifFileSize#THM
exifFileSource
exifFileType
exifFileType#THM
exifFirmwareRevision
exifFlash
exifFlashActivity
exifFlashBits
exifFlashExposureComp
exifFlashGuideNumber
exifFlashOutput
exifFlashpixVersion
exifFocalLength
exifFocalLength35efl
exifFocalPlaneResolutionUnit
exifFocalPlaneXResolution
exifFocalPlaneXSize
exifFocalPlaneYResolution
exifFocalPlaneYSize
exifFocalType
exifFocalUnits
exifFocusContinuous
exifFocusDistanceLower
exifFocusDistanceUpper
exifFocusMode
exifFocusRange
exifFrameCount
exifFrameRate
exifGraphicsMode
exifHandlerClass
exifHandlerType
exifHyperfocalDistance
exifISO
exifImageDescription
exifImageHeight
exifImageHeight#THM
exifImageSize
exifImageSize#THM
exifImageStabilization
exifImageUniqueID
exifImageWidth
exifImageWidth#THM
exifIntelligentContrast
exifInteropIndex
exifInteropVersion
exifLens
exifLens35efl
exifLensID
exifLensType
exifLightValue
exifMIMEType
exifMIMEType#THM
exifMacroMode
exifMajorBrand
exifMake
exifManualFlashOutput
exifMatrixStructure
exifMaxAperture
exifMaxApertureValue
exifMaxFocalLength
exifMeasuredEV
exifMediaCreateDate
exifMediaDuration
exifMediaHeaderVersion
exifMediaModifyDate
exifMediaTimeScale
exifMeteringMode
exifMinAperture
exifMinFocalLength
exifMinorVersion
exifModel
exifModifyDate
exifModifyDate#THM
exifMovieDataOffset
exifMovieDataSize
exifMovieHeaderVersion
exifMyColorMode
exifNDFilter
exifNextTrackID
exifNumAFPoints
exifOpColor
exifOpticalZoomCode
exifOrientation
exifOwnerName
exifPosterTime
exifPreferredRate
exifPreferredVolume
exifPreviewDuration
exifPreviewTime
exifPrimaryAFPoint
exifQuality
exifRecordMode
exifRelatedImageHeight
exifRelatedImageWidth
exifResolutionUnit
exifRotation
exifRotation#THM
exifSaturation
exifScaleFactor35efl
exifSceneCaptureType
exifSelectionDuration
exifSelectionTime
exifSelfTimer
exifSelfTimer2
exifSensingMethod
exifSequenceNumber
exifSharpness
exifShootingMode
exifShutterSpeed
exifShutterSpeedValue
exifSlowShutter
exifSourceImageHeight
exifSourceImageWidth
exifSpotMeteringMode
exifTargetAperture
exifTargetExposureTime
exifThumbnailImageValidArea
exifTimeScale
exifTrackCreateDate
exifTrackDuration
exifTrackHeaderVersion
exifTrackID
exifTrackLayer
exifTrackModifyDate
exifTrackVolume
exifUserComment
exifVRDOffset
exifValidAFPoints
exifVideoCodec
exifVideoFrameRate
exifWhiteBalance
exifXResolution
exifXResolution#THM
exifYCbCrPositioning
exifYCbCrSubSampling
exifYResolution
exifYResolution#THM
exifZoomSourceWidth
exifZoomTargetWidth
fileCreated
fileCreated#THM
fileExtension
fileExtension#THM
fileModification
fileModification#THM""".replaceAll("\\s+", "\n"), all.map(_._1).mkString("\n"))
//val tags2 = raw2.BestExifExtractor.extract(Locations.classpath("MVI_2366.MOV")).get.tags.toSeq.sortBy(_._1)
//println(tags.mkString("\n"))
// assertEquals(214, tags2.size)
// assertEquals(214, tags.size)
//assertEquals(tags2.mkString("\n"), tags.mkString("\n"))
}
def checkTime(expectedDateTime: DateTime, exifValue: String) =
assertEquals("Picture taken at [" + expectedDateTime + "] with timezone " + expectedDateTime.getZone() + " at " + expectedDateTime.toDateTimeISO(),
expectedDateTime.toString("yyyy:MM:dd HH:mm:ss"), exifValue)
val local = DateTimeZone.forOffsetHours(2)
def extract(file: String, discoverPairs: Boolean = true): String = Tags(raw.all(discoverPairs)(Locations.classpath(file))).interpolate("$exifCreateDate#THM|$exifCreateDate").get
//on G11
//the dailight saving time modifies the creation date (you can detect this only by comparison with other files
//mov use the utc time
//jpg and thm (associated with mov) use the local time
test("times1") {
checkTime(new DateTime(2015, 1, 9, 0, 0, 36, local), extract("time1-IMG_2384.JPG"))
}
ignore("times1-sanselan") {
checkTime(new DateTime(2015, 1, 9, 0, 0, 36, local), raw.extractor.sanselanExifExtractor(Locations.classpath("time1-IMG_2384.JPG"))("time1-IMG_2384.JPG"))
}
ignore("times1-fileAttribute") {
checkTime(new DateTime(2015, 1, 9, 0, 0, 36, local), raw.extractor.fileAttributesExtractor(Locations.classpath("time1-IMG_2384.JPG"))("time1-IMG_2384.JPG"))
}
test("times2") {
checkTime(new DateTime(2015, 1, 9, 0, 0, 42, local), extract("time2-MVI_2385.THM", false))
checkTime(new DateTime(2015, 1, 9, 0, 0, 42, local).withZone(DateTimeZone.UTC), extract("time2-MVI_2385.MOV", false))
checkTime(new DateTime(2015, 1, 9, 0, 0, 42, local), extract("time2-MVI_2385.MOV"))
}
test("times3 in utc+2 and dailight saving time") {
//println(SanselanExifExtractor.extract(Locations.classpath("time3-IMG_2386.JPG"))map(_.tags.mkString("\n")))
//checkTime(new DateTime(2015, 1, 9, 0, 1, 31, local), extract("time3-IMG_2386.JPG",SanselanExifExtractor))
//the time is affected by daylight saving
val hourWithoutDailightSavingCorrection = 1
val hourWithDailightSavingCorrection = 0
val expectedHour = hourWithoutDailightSavingCorrection //should be hourWithDailightSavingCorrection
checkTime(new DateTime(2015, 1, 9, 1, hourWithoutDailightSavingCorrection, 31, local), extract("time3-IMG_2386.JPG"))
}
test("times4 in utc+2 and dailight saving time") {
val hourWithoutDailightSavingCorrection = 1
val hourWithDailightSavingCorrection = 0
val expectedHour = hourWithoutDailightSavingCorrection //should be hourWithDailightSavingCorrection
checkTime(new DateTime(2015, 1, 9, hourWithoutDailightSavingCorrection, 1, 39, local), extract("time4-MVI_2387.THM", false))
checkTime(new DateTime(2015, 1, 9, hourWithDailightSavingCorrection, 1, 39, local).withZone(DateTimeZone.UTC), extract("time4-MVI_2387.MOV", false))
checkTime(new DateTime(2015, 1, 9, hourWithoutDailightSavingCorrection, 1, 39, local), extract("time4-MVI_2387.MOV"))
}
test("times5") {
checkTime(new DateTime(2015, 1, 9, 0, 2, 36, local), extract("time5-IMG_2388.JPG"))
}
test("times6") {
checkTime(new DateTime(2015, 1, 9, 0, 2, 46, local), extract("time6-MVI_2389.THM", false))
checkTime(new DateTime(2015, 1, 9, 0, 2, 46, local).withZone(DateTimeZone.UTC), extract("time6-MVI_2389.MOV", false))
checkTime(new DateTime(2015, 1, 9, 0, 2, 46, local), extract("time6-MVI_2389.MOV"))
}
ignore("find similar to movie image file") {
val src = Locations.classpath("time6-MVI_2389.MOV")
val tags = raw.loadExifTags(src)
val fileNameFormat = tags.analyse(src.name).get
assertEquals("time6-${const:MVI}_${exifFileNumberMinor}.${fileExtension}", fileNameFormat)
assertEquals("time6", FormatAnalyser.cleanFormat(fileNameFormat))
val variable = FormatAnalyser.cleanFormat(fileNameFormat)
val fileNamePattern = fileNameFormat.replaceAllLiterally(variable, "")
assertEquals("-${const:MVI}_${exifFileNumberMinor}.${fileExtension}", fileNamePattern)
val numberMinor = tags.fileNumberMinor.get
val fileNumber = tags.fileNumber.get
val delta = 10
val range = numberMinor - delta to numberMinor + delta
//search for a jpg that has a counter slightly before the current exifFileNumber
def nameMightContainNumberInRange(range: Range)(name: String) = range.find(x => name contains x.toString).isDefined
val files = src.asFile.parent.list.filter(_.extension.toLowerCase == "jpg").toList
//(1 to delta).find(x )
val pairs = files. //filter { file => nameMightContainNumberInRange(range)(file.name) }.
map { x => println(s"fileNumber in $x"); (x, raw.loadExifTags(x).fileNumber.map { _ - fileNumber }) }.
filter(x => x._2.getOrElse(0) != 0).
map(x => (x._1, x._2.get.abs)).
toList.sortBy(_._2)
assertEquals("", pairs.mkString("\n"))
}
}
|
raisercostin/ownit
|
src/test/scala/org/raisercostin/own/RawTest.scala
|
Scala
|
apache-2.0
| 10,676 |
package domain.time
import java.time.temporal.ChronoUnit
import java.time.ZonedDateTime
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.duration._
object ScheduleHelper {
def initialDelay(interval: FiniteDuration): FiniteDuration = {
initialDelay(interval, DateUtil.now())
}
def initialDelay(interval: FiniteDuration, dateTime: ZonedDateTime): FiniteDuration = {
var temp = dateTime.withHour(0).withMinute(0).withSecond(0).withNano(0)
while (temp.isBefore(dateTime)) {
temp = temp.plusSeconds(interval.toSeconds)
}
ChronoUnit.MILLIS.between(dateTime, temp).milliseconds
}
}
|
rysh/scalatrader
|
scalatrader/app/domain/time/ScheduleHelper.scala
|
Scala
|
mit
| 638 |
package tscfg.exceptions
/** Exception to indicate, that there is a problem in the definition of an
* object
*
* @param msg
* Error message
* @param cause
* What caused the error
*/
final case class ObjectDefinitionException(
private val msg: String = "",
private val cause: Throwable = None.orNull
) extends Exception(msg, cause)
|
carueda/tscfg
|
src/main/scala/tscfg/exceptions/ObjectDefinitionException.scala
|
Scala
|
apache-2.0
| 358 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.graph.scala.utils
import org.apache.flink.api.common.functions.MapFunction
import org.apache.flink.graph.Edge
@SerialVersionUID(1L)
class EdgeToTuple3Map[K, EV] extends MapFunction[Edge[K, EV], (K, K, EV)] {
override def map(value: Edge[K, EV]): (K, K, EV) = {
(value.getSource, value.getTarget, value.getValue)
}
}
|
hequn8128/flink
|
flink-libraries/flink-gelly-scala/src/main/scala/org/apache/flink/graph/scala/utils/EdgeToTuple3Map.scala
|
Scala
|
apache-2.0
| 1,159 |
package scala.tools.nsc
package backend.jvm
package opt
import org.junit.Assert._
import org.junit.Test
import org.junit.runner.RunWith
import org.junit.runners.JUnit4
import scala.tools.asm.Opcodes._
import scala.tools.partest.ASMConverters._
import scala.tools.testing.AssertUtil._
import scala.tools.testing.BytecodeTesting._
import scala.tools.testing.ClearAfterClass
@RunWith(classOf[JUnit4])
class UnreachableCodeTest extends ClearAfterClass {
// jvm-1.6 enables emitting stack map frames, which impacts the code generation wrt dead basic blocks,
// see comment in BCodeBodyBuilder
val methodOptCompiler = cached("methodOptCompiler", () => newCompiler(extraArgs = "-opt:l:method"))
val dceCompiler = cached("dceCompiler", () => newCompiler(extraArgs = "-opt:unreachable-code"))
val noOptCompiler = cached("noOptCompiler", () => newCompiler(extraArgs = "-opt:l:none"))
def assertEliminateDead(code: (Instruction, Boolean)*): Unit = {
val method = genMethod()(code.map(_._1): _*)
dceCompiler.global.genBCode.bTypes.localOpt.removeUnreachableCodeImpl(method, "C")
val nonEliminated = instructionsFromMethod(method)
val expectedLive = code.filter(_._2).map(_._1).toList
assertSameCode(nonEliminated, expectedLive)
}
@Test
def basicElimination(): Unit = {
assertEliminateDead(
Op(ACONST_NULL),
Op(ATHROW),
Op(RETURN).dead
)
assertEliminateDead(
Op(RETURN)
)
assertEliminateDead(
Op(RETURN),
Op(ACONST_NULL).dead,
Op(ATHROW).dead
)
}
@Test
def eliminateNop(): Unit = {
assertEliminateDead(
// reachable, but removed anyway.
Op(NOP).dead,
Op(RETURN),
Op(NOP).dead
)
}
@Test
def eliminateBranchOver(): Unit = {
assertEliminateDead(
Jump(GOTO, Label(1)),
Op(ACONST_NULL).dead,
Op(ATHROW).dead,
Label(1),
Op(RETURN)
)
assertEliminateDead(
Jump(GOTO, Label(1)),
Label(1),
Op(RETURN)
)
}
@Test
def deadLabelsRemain(): Unit = {
assertEliminateDead(
Op(RETURN),
Jump(GOTO, Label(1)).dead,
// not dead - labels may be referenced from other places in a classfile (eg exceptions table).
// will need a different opt to get rid of them
Label(1)
)
}
@Test
def pushPopNotEliminated(): Unit = {
assertEliminateDead(
// not dead, visited by data flow analysis.
Op(ACONST_NULL),
Op(POP),
Op(RETURN)
)
}
@Test
def nullnessNotConsidered(): Unit = {
assertEliminateDead(
Op(ACONST_NULL),
Jump(IFNULL, Label(1)),
Op(RETURN), // not dead
Label(1),
Op(RETURN)
)
}
@Test
def basicEliminationCompiler(): Unit = {
val code = "def f: Int = { return 1; 2 }"
val withDce = dceCompiler.compileInstructions(code)
assertSameCode(withDce.dropNonOp, List(Op(ICONST_1), Op(IRETURN)))
val noDce = noOptCompiler.compileInstructions(code)
// The emitted code is ICONST_1, IRETURN, ICONST_2, IRETURN. The latter two are dead.
//
// GenBCode puts the last IRETURN into a new basic block: it emits a label before the second
// IRETURN. This is an implementation detail, it may change; it affects the outcome of this test.
//
// During classfile writing with COMPUTE_FAMES (-target:jvm-1.6 or larger), the ClassfileWriter
// puts the ICONST_2 into a new basic block, because the preceding operation (IRETURN) ends
// the current block. We get something like
//
// L1: ICONST_1; IRETURN
// L2: ICONST_2 << dead
// L3: IRETURN << dead
//
// Finally, instructions in the dead basic blocks are replaced by ATHROW, as explained in
// a comment in BCodeBodyBuilder.
assertSameCode(noDce.dropNonOp, List(Op(ICONST_1), Op(IRETURN), Op(ATHROW), Op(ATHROW)))
}
@Test
def eliminateDeadCatchBlocks(): Unit = {
// the Label(1) is live: it's used in the local variable descriptor table (local variable "this" has a range from 0 to 1).
def wrapInDefault(code: Instruction*) = List(Label(0), LineNumber(1, Label(0))) ::: code.toList ::: List(Label(1))
val code = "def f: Int = { return 0; try { 1 } catch { case _: Exception => 2 } }"
val m = dceCompiler.compileMethod(code)
assertTrue(m.handlers.isEmpty) // redundant (if code is gone, handler is gone), but done once here for extra safety
assertSameCode(m.instructions,
wrapInDefault(Op(ICONST_0), Op(IRETURN)))
val code2 = "def f: Unit = { try { } catch { case _: Exception => () }; () }"
// requires fixpoint optimization of methodOptCompiler (dce alone is not enough): first the handler is eliminated, then it's dead catch block.
assertSameCode(methodOptCompiler.compileInstructions(code2), wrapInDefault(Op(RETURN)))
val code3 = "def f: Unit = { try { } catch { case _: Exception => try { } catch { case _: Exception => () } }; () }"
assertSameCode(methodOptCompiler.compileInstructions(code3), wrapInDefault(Op(RETURN)))
// this example requires two iterations to get rid of the outer handler.
// the first iteration of DCE cannot remove the inner handler. then the inner (empty) handler is removed.
// then the second iteration of DCE removes the inner catch block, and then the outer handler is removed.
val code4 = "def f: Unit = { try { try { } catch { case _: Exception => () } } catch { case _: Exception => () }; () }"
assertSameCode(methodOptCompiler.compileInstructions(code4), wrapInDefault(Op(RETURN)))
}
@Test // test the dce-testing tools
def metaTest(): Unit = {
assertThrows[AssertionError](
assertEliminateDead(Op(RETURN).dead),
_.contains("Expected: List()\nActual : List(Op(RETURN))")
)
assertThrows[AssertionError](
assertEliminateDead(Op(RETURN), Op(RETURN)),
_.contains("Expected: List(Op(RETURN), Op(RETURN))\nActual : List(Op(RETURN))")
)
}
@Test
def bytecodeEquivalence(): Unit = {
assertTrue(List(VarOp(ILOAD, 1)) ===
List(VarOp(ILOAD, 2)))
assertTrue(List(VarOp(ILOAD, 1), VarOp(ISTORE, 1)) ===
List(VarOp(ILOAD, 2), VarOp(ISTORE, 2)))
// the first Op will associate 1->2, then the 2->2 will fail
assertFalse(List(VarOp(ILOAD, 1), VarOp(ISTORE, 2)) ===
List(VarOp(ILOAD, 2), VarOp(ISTORE, 2)))
// will associate 1->2 and 2->1, which is OK
assertTrue(List(VarOp(ILOAD, 1), VarOp(ISTORE, 2)) ===
List(VarOp(ILOAD, 2), VarOp(ISTORE, 1)))
assertTrue(List(Label(1), Label(2), Label(1)) ===
List(Label(2), Label(4), Label(2)))
assertTrue(List(LineNumber(1, Label(1)), Label(1)) ===
List(LineNumber(1, Label(3)), Label(3)))
assertFalse(List(LineNumber(1, Label(1)), Label(1)) ===
List(LineNumber(1, Label(3)), Label(1)))
assertTrue(List(TableSwitch(TABLESWITCH, 1, 3, Label(4), List(Label(5), Label(6))), Label(4), Label(5), Label(6)) ===
List(TableSwitch(TABLESWITCH, 1, 3, Label(9), List(Label(3), Label(4))), Label(9), Label(3), Label(4)))
assertTrue(List(FrameEntry(F_FULL, List(INTEGER, DOUBLE, Label(3)), List("java/lang/Object", Label(4))), Label(3), Label(4)) ===
List(FrameEntry(F_FULL, List(INTEGER, DOUBLE, Label(1)), List("java/lang/Object", Label(3))), Label(1), Label(3)))
}
@Test
def loadNullNothingBytecode(): Unit = {
val code =
"""class C {
| def nl: Null = null
| def nt: Nothing = throw new Error("")
| def cons(a: Any) = ()
|
| def t1 = cons(null)
| def t2 = cons(nl)
| def t3 = cons(throw new Error(""))
| def t4 = cons(nt)
|}
""".stripMargin
val c = noOptCompiler.compileClass(code)
assertSameSummary(getMethod(c, "nl"), List(ACONST_NULL, ARETURN))
assertSameSummary(getMethod(c, "nt"), List(
NEW, DUP, LDC, "<init>", ATHROW))
assertSameSummary(getMethod(c, "t1"), List(
ALOAD, ACONST_NULL, "cons", RETURN))
// GenBCode introduces POP; ACONST_NULL after loading an expression of type scala.runtime.Null$,
// see comment in BCodeBodyBuilder.adapt
assertSameSummary(getMethod(c, "t2"), List(
ALOAD, ALOAD, "nl", POP, ACONST_NULL, "cons", RETURN))
// the bytecode generated by GenBCode is ... ATHROW; INVOKEVIRTUAL C.cons; RETURN
// the ASM classfile writer creates a new basic block (creates a label) right after the ATHROW
// and replaces all instructions by NOP*; ATHROW, see comment in BCodeBodyBuilder.adapt
// NOTE: DCE is enabled by default and gets rid of the redundant code (tested below)
assertSameSummary(getMethod(c, "t3"), List(
ALOAD, NEW, DUP, LDC, "<init>", ATHROW, NOP, NOP, NOP, ATHROW))
// GenBCode introduces an ATHROW after the invocation of C.nt, see BCodeBodyBuilder.adapt
// NOTE: DCE is enabled by default and gets rid of the redundant code (tested below)
assertSameSummary(getMethod(c, "t4"), List(
ALOAD, ALOAD, "nt", ATHROW, NOP, NOP, NOP, ATHROW))
val cDCE = dceCompiler.compileClass(code)
assertSameSummary(getMethod(cDCE, "t3"), List(ALOAD, NEW, DUP, LDC, "<init>", ATHROW))
assertSameSummary(getMethod(cDCE, "t4"), List(ALOAD, ALOAD, "nt", ATHROW))
}
}
|
felixmulder/scala
|
test/junit/scala/tools/nsc/backend/jvm/opt/UnreachableCodeTest.scala
|
Scala
|
bsd-3-clause
| 9,361 |
package colang.ast.parsed.routines
import colang.ast.parsed.statement.Statement
import colang.ast.parsed.{LocalContext, Type, Variable}
import colang.issues.{Issue, Terms}
import colang.ast.raw
import colang.tokens.StaticKeyword
private[routines] object RegisterStaticVariables {
/**
* "Registers" static variables in their types and generates necessary initialization statements.
* @param types types to analyze
* @return (static variables, initialization statements, encountered issues)
*/
def registerStaticVariables(types: Seq[Type]): (Seq[Variable], Seq[Statement], Seq[Issue]) = {
val result = types flatMap { type_ =>
val initializerLocalContext = LocalContext(
Terms.Function,
expectedReturnType = Some(type_.scope.get.root.voidType))
type_.definition.toSeq flatMap { typeDef =>
typeDef.body.members flatMap {
case varsDef: raw.statement.VariablesDefinition if varsDef.specifiers.has(classOf[StaticKeyword]) =>
Seq(RegisterVariables.registerVariables(type_, initializerLocalContext, varsDef))
case _ => Seq.empty
}
}
}
val variables = result flatMap { _._1 }
val initializationStatements = result flatMap { _._2 }
val issues = result flatMap { _._3 }
(variables, initializationStatements, issues)
}
}
|
psenchanka/colang
|
src/main/scala/colang/ast/parsed/routines/RegisterStaticVariables.scala
|
Scala
|
mit
| 1,343 |
package $organization$
import akka.actor.{Props, ActorRef, ActorSystem}
import akka.io.IO
import spray.can.Http
object Boot extends App {
implicit val actorSystem = ActorSystem()
val router: ActorRef = actorSystem.actorOf(Props[RestRouter])
val port = sys.env.get("PORT").map(_.toInt).getOrElse(8080)
IO(Http) ! Http.Bind(router, interface = "0.0.0.0", port = port)
}
|
polymorphic/spray-svc.g8
|
src/main/g8/src/main/scala/$organization__packaged$/Boot.scala
|
Scala
|
apache-2.0
| 383 |
package dx.api
import spray.json._
case class DxWorkflowStageDesc(id: String, executable: String, name: String, input: JsValue)
// A stand in for the DxWorkflow.Stage inner class (we don't have a constructor for it)
case class DxWorkflowStage(id: String) {
def getId: String = id
def getInputReference(inputName: String): JsValue = {
JsObject(
"$dnanexus_link" -> JsObject("stage" -> JsString(id), "inputField" -> JsString(inputName))
)
}
def getOutputReference(outputName: String): JsValue = {
JsObject(
"$dnanexus_link" -> JsObject("stage" -> JsString(id), "outputField" -> JsString(outputName))
)
}
}
case class DxWorkflowDescribe(project: String,
id: String,
name: String,
folder: String,
created: Long,
modified: Long,
properties: Option[Map[String, String]],
details: Option[JsValue],
inputSpec: Option[Vector[IOParameter]],
outputSpec: Option[Vector[IOParameter]],
stages: Option[Vector[DxWorkflowStageDesc]],
title: Option[String] = None,
summary: Option[String] = None,
description: Option[String] = None,
tags: Option[Vector[String]] = None,
types: Option[Vector[String]] = None,
inputs: Option[Vector[IOParameter]] = None,
outputs: Option[Vector[IOParameter]] = None)
extends DxObjectDescribe
case class DxWorkflow(dxApi: DxApi, id: String, project: Option[DxProject]) extends DxExecutable {
private def parseStages(jsv: JsValue): Vector[DxWorkflowStageDesc] = {
val jsVec = jsv match {
case JsArray(a) => a
case other => throw new Exception(s"Malfored JSON ${other}")
}
jsVec.map { jsv2 =>
val stage = jsv2.asJsObject.getFields("id", "executable", "name", "input") match {
case Seq(JsString(id), JsString(exec), JsString(name), input) =>
DxWorkflowStageDesc(id, exec, name, input)
case other =>
throw new Exception(s"Malfored JSON ${other}")
}
stage
}
}
def describe(fields: Set[Field.Value] = Set.empty): DxWorkflowDescribe = {
val projSpec = DxObject.maybeSpecifyProject(project)
// TODO: working around an API bug where describing a workflow and requesting inputSpec
// and outputSpec as part of fields results in a 500 error. Instead, request default fields,
// which includes inputSpec and outputSpec.
val defaultFields = Set(Field.Project,
Field.Id,
Field.Name,
Field.Folder,
Field.Created,
Field.Modified
//Field.InputSpec,
//Field.OutputSpec
)
val allFields = fields ++ defaultFields
val descJs = dxApi.workflowDescribe(
id,
projSpec
+ ("fields" -> DxObject.requestFields(allFields))
+ ("defaultFields" -> JsBoolean(true))
)
val desc = descJs.getFields("project",
"id",
"name",
"folder",
"created",
"modified",
"inputSpec",
"outputSpec") match {
case Seq(JsString(projectId),
JsString(id),
JsString(name),
JsString(folder),
JsNumber(created),
JsNumber(modified),
JsArray(inputSpec),
JsArray(outputSpec)) =>
DxWorkflowDescribe(
projectId,
id,
name,
folder,
created.toLong,
modified.toLong,
None,
None,
Some(IOParameter.parseIOSpec(dxApi, inputSpec)),
Some(IOParameter.parseIOSpec(dxApi, outputSpec)),
None
)
case _ =>
throw new Exception(s"Malformed JSON ${descJs}")
}
val descFields: Map[String, JsValue] = descJs.fields
val details = descFields.get("details")
val props = descFields.get("properties").map(DxObject.parseJsonProperties)
val stages = descFields.get("stages").map(parseStages)
val description = descFields.get("description").flatMap(unwrapString)
val summary = descFields.get("summary").flatMap(unwrapString)
val title = descFields.get("title").flatMap(unwrapString)
val types = descFields.get("types").flatMap(unwrapStringArray)
val tags = descFields.get("tags").flatMap(unwrapStringArray)
val inputs = descFields.get("inputs") match {
case Some(JsArray(inps)) => Some(IOParameter.parseIOSpec(dxApi, inps))
case _ => None
}
val outputs = descFields.get("outputs") match {
case Some(JsArray(outs)) => Some(IOParameter.parseIOSpec(dxApi, outs))
case _ => None
}
desc.copy(
details = details,
properties = props,
stages = stages,
description = description,
summary = summary,
title = title,
types = types,
tags = tags,
inputs = inputs,
outputs = outputs
)
}
def unwrapString(jsValue: JsValue): Option[String] = {
jsValue match {
case JsString(value) => Some(value)
case _ => None
}
}
def unwrapStringArray(jsValue: JsValue): Option[Vector[String]] = {
jsValue match {
case JsArray(array) => Some(array.flatMap(unwrapString))
case _ => None
}
}
def close(): Unit = {
dxApi.workflowClose(id)
}
def newRun(name: String,
input: JsValue,
delayWorkspaceDestruction: Option[Boolean] = None): DxAnalysis = {
val req = Map("name" -> JsString(name), "input" -> input.asJsObject)
val dwd = delayWorkspaceDestruction match {
case Some(true) => Map("delayWorkspaceDestruction" -> JsTrue)
case _ => Map.empty
}
val repJs = dxApi.workflowRun(id, req ++ dwd)
repJs.fields.get("id") match {
case None =>
throw new Exception("id not returned in response")
case Some(JsString(x)) =>
dxApi.analysis(x)
case Some(other) =>
throw new Exception(s"malformed json response ${other}")
}
}
}
|
dnanexus-rnd/dxWDL
|
src/main/scala/dx/api/DxWorkflow.scala
|
Scala
|
apache-2.0
| 6,734 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.parser
import java.sql.{Date, Timestamp}
import java.time.{Duration, LocalDateTime, Period}
import java.util.concurrent.TimeUnit
import scala.language.implicitConversions
import org.apache.spark.sql.catalyst.FunctionIdentifier
import org.apache.spark.sql.catalyst.analysis.{UnresolvedAttribute, _}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.{First, Last}
import org.apache.spark.sql.catalyst.util.{DateTimeTestUtils, IntervalUtils}
import org.apache.spark.sql.catalyst.util.DateTimeConstants._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf.TimestampTypes
import org.apache.spark.sql.types._
import org.apache.spark.sql.types.{DayTimeIntervalType => DT, YearMonthIntervalType => YM}
import org.apache.spark.unsafe.types.{CalendarInterval, UTF8String}
/**
* Test basic expression parsing.
* If the type of an expression is supported it should be tested here.
*
* Please note that some of the expressions test don't have to be sound expressions, only their
* structure needs to be valid. Unsound expressions should be caught by the Analyzer or
* CheckAnalysis classes.
*/
class ExpressionParserSuite extends AnalysisTest {
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.dsl.plans._
implicit def stringToUTF8Str(str: String): UTF8String = UTF8String.fromString(str)
val defaultParser = CatalystSqlParser
def assertEqual(
sqlCommand: String,
e: Expression,
parser: ParserInterface = defaultParser): Unit = {
compareExpressions(parser.parseExpression(sqlCommand), e)
}
private def intercept(sqlCommand: String, messages: String*): Unit =
interceptParseException(defaultParser.parseExpression)(sqlCommand, messages: _*)()
private def intercept(sqlCommand: String, errorClass: Option[String], messages: String*): Unit =
interceptParseException(defaultParser.parseExpression)(sqlCommand, messages: _*)(errorClass)
def assertEval(
sqlCommand: String,
expect: Any,
parser: ParserInterface = defaultParser): Unit = {
assert(parser.parseExpression(sqlCommand).eval() === expect)
}
test("star expressions") {
// Global Star
assertEqual("*", UnresolvedStar(None))
// Targeted Star
assertEqual("a.b.*", UnresolvedStar(Option(Seq("a", "b"))))
}
// NamedExpression (Alias/Multialias)
test("named expressions") {
// No Alias
val r0 = 'a
assertEqual("a", r0)
// Single Alias.
val r1 = 'a as "b"
assertEqual("a as b", r1)
assertEqual("a b", r1)
// Multi-Alias
assertEqual("a as (b, c)", MultiAlias('a, Seq("b", "c")))
assertEqual("a() (b, c)", MultiAlias('a.function(), Seq("b", "c")))
// Numeric literals without a space between the literal qualifier and the alias, should not be
// interpreted as such. An unresolved reference should be returned instead.
// TODO add the JIRA-ticket number.
assertEqual("1SL", Symbol("1SL"))
// Aliased star is allowed.
assertEqual("a.* b", UnresolvedStar(Option(Seq("a"))) as 'b)
}
test("binary logical expressions") {
// And
assertEqual("a and b", 'a && 'b)
// Or
assertEqual("a or b", 'a || 'b)
// Combination And/Or check precedence
assertEqual("a and b or c and d", ('a && 'b) || ('c && 'd))
assertEqual("a or b or c and d", 'a || 'b || ('c && 'd))
// Multiple AND/OR get converted into a balanced tree
assertEqual("a or b or c or d or e or f", (('a || 'b) || 'c) || (('d || 'e) || 'f))
assertEqual("a and b and c and d and e and f", (('a && 'b) && 'c) && (('d && 'e) && 'f))
}
test("long binary logical expressions") {
def testVeryBinaryExpression(op: String, clazz: Class[_]): Unit = {
val sql = (1 to 1000).map(x => s"$x == $x").mkString(op)
val e = defaultParser.parseExpression(sql)
assert(e.collect { case _: EqualTo => true }.size === 1000)
assert(e.collect { case x if clazz.isInstance(x) => true }.size === 999)
}
testVeryBinaryExpression(" AND ", classOf[And])
testVeryBinaryExpression(" OR ", classOf[Or])
}
test("not expressions") {
assertEqual("not a", !'a)
assertEqual("!a", !'a)
assertEqual("not true > true", Not(GreaterThan(true, true)))
}
test("exists expression") {
assertEqual(
"exists (select 1 from b where b.x = a.x)",
Exists(table("b").where(Symbol("b.x") === Symbol("a.x")).select(1)))
}
test("comparison expressions") {
assertEqual("a = b", 'a === 'b)
assertEqual("a == b", 'a === 'b)
assertEqual("a <=> b", 'a <=> 'b)
assertEqual("a <> b", 'a =!= 'b)
assertEqual("a != b", 'a =!= 'b)
assertEqual("a < b", 'a < 'b)
assertEqual("a <= b", 'a <= 'b)
assertEqual("a !> b", 'a <= 'b)
assertEqual("a > b", 'a > 'b)
assertEqual("a >= b", 'a >= 'b)
assertEqual("a !< b", 'a >= 'b)
}
test("between expressions") {
assertEqual("a between b and c", 'a >= 'b && 'a <= 'c)
assertEqual("a not between b and c", !('a >= 'b && 'a <= 'c))
}
test("in expressions") {
assertEqual("a in (b, c, d)", 'a in ('b, 'c, 'd))
assertEqual("a not in (b, c, d)", !('a in ('b, 'c, 'd)))
}
test("in sub-query") {
assertEqual(
"a in (select b from c)",
InSubquery(Seq('a), ListQuery(table("c").select('b))))
assertEqual(
"(a, b, c) in (select d, e, f from g)",
InSubquery(Seq('a, 'b, 'c), ListQuery(table("g").select('d, 'e, 'f))))
assertEqual(
"(a, b) in (select c from d)",
InSubquery(Seq('a, 'b), ListQuery(table("d").select('c))))
assertEqual(
"(a) in (select b from c)",
InSubquery(Seq('a), ListQuery(table("c").select('b))))
}
test("like expressions") {
assertEqual("a like 'pattern%'", 'a like "pattern%")
assertEqual("a not like 'pattern%'", !('a like "pattern%"))
assertEqual("a rlike 'pattern%'", 'a rlike "pattern%")
assertEqual("a not rlike 'pattern%'", !('a rlike "pattern%"))
assertEqual("a regexp 'pattern%'", 'a rlike "pattern%")
assertEqual("a not regexp 'pattern%'", !('a rlike "pattern%"))
}
test("like escape expressions") {
val message = "Escape string must contain only one character."
assertEqual("a like 'pattern%' escape '#'", 'a.like("pattern%", '#'))
assertEqual("a like 'pattern%' escape '\\"'", 'a.like("pattern%", '\\"'))
intercept("a like 'pattern%' escape '##'", message)
intercept("a like 'pattern%' escape ''", message)
assertEqual("a not like 'pattern%' escape '#'", !('a.like("pattern%", '#')))
assertEqual("a not like 'pattern%' escape '\\"'", !('a.like("pattern%", '\\"')))
intercept("a not like 'pattern%' escape '\\"/'", message)
intercept("a not like 'pattern%' escape ''", message)
}
test("like expressions with ESCAPED_STRING_LITERALS = true") {
withSQLConf(SQLConf.ESCAPED_STRING_LITERALS.key -> "true") {
val parser = new CatalystSqlParser()
assertEqual("a rlike '^\\\\x20[\\\\x20-\\\\x23]+$'", 'a rlike "^\\\\x20[\\\\x20-\\\\x23]+$", parser)
assertEqual("a rlike 'pattern\\\\\\\\'", 'a rlike "pattern\\\\\\\\", parser)
assertEqual("a rlike 'pattern\\\\t\\\\n'", 'a rlike "pattern\\\\t\\\\n", parser)
}
}
test("(NOT) LIKE (ANY | SOME | ALL) expressions") {
Seq("any", "some").foreach { quantifier =>
assertEqual(s"a like $quantifier ('foo%', 'b%')", 'a likeAny("foo%", "b%"))
assertEqual(s"a not like $quantifier ('foo%', 'b%')", 'a notLikeAny("foo%", "b%"))
assertEqual(s"not (a like $quantifier ('foo%', 'b%'))", !('a likeAny("foo%", "b%")))
}
assertEqual("a like all ('foo%', 'b%')", 'a likeAll("foo%", "b%"))
assertEqual("a not like all ('foo%', 'b%')", 'a notLikeAll("foo%", "b%"))
assertEqual("not (a like all ('foo%', 'b%'))", !('a likeAll("foo%", "b%")))
Seq("any", "some", "all").foreach { quantifier =>
intercept(s"a like $quantifier()", "Expected something between '(' and ')'")
}
}
test("is null expressions") {
assertEqual("a is null", 'a.isNull)
assertEqual("a is not null", 'a.isNotNull)
assertEqual("a = b is null", ('a === 'b).isNull)
assertEqual("a = b is not null", ('a === 'b).isNotNull)
}
test("is distinct expressions") {
assertEqual("a is distinct from b", !('a <=> 'b))
assertEqual("a is not distinct from b", 'a <=> 'b)
}
test("binary arithmetic expressions") {
// Simple operations
assertEqual("a * b", 'a * 'b)
assertEqual("a / b", 'a / 'b)
assertEqual("a DIV b", 'a div 'b)
assertEqual("a % b", 'a % 'b)
assertEqual("a + b", 'a + 'b)
assertEqual("a - b", 'a - 'b)
assertEqual("a & b", 'a & 'b)
assertEqual("a ^ b", 'a ^ 'b)
assertEqual("a | b", 'a | 'b)
// Check precedences
assertEqual(
"a * t | b ^ c & d - e + f % g DIV h / i * k",
'a * 't | ('b ^ ('c & ('d - 'e + (('f % 'g div 'h) / 'i * 'k)))))
}
test("unary arithmetic expressions") {
assertEqual("+a", +'a)
assertEqual("-a", -'a)
assertEqual("~a", ~'a)
assertEqual("-+~~a", -( +(~(~'a))))
}
test("cast expressions") {
// Note that DataType parsing is tested elsewhere.
assertEqual("cast(a as int)", 'a.cast(IntegerType))
assertEqual("cast(a as timestamp)", 'a.cast(TimestampType))
assertEqual("cast(a as array<int>)", 'a.cast(ArrayType(IntegerType)))
assertEqual("cast(cast(a as int) as long)", 'a.cast(IntegerType).cast(LongType))
}
test("function expressions") {
assertEqual("foo()", 'foo.function())
assertEqual("foo.bar()",
UnresolvedFunction(FunctionIdentifier("bar", Some("foo")), Seq.empty, isDistinct = false))
assertEqual("foo(*)", 'foo.function(star()))
assertEqual("count(*)", 'count.function(1))
assertEqual("foo(a, b)", 'foo.function('a, 'b))
assertEqual("foo(all a, b)", 'foo.function('a, 'b))
assertEqual("foo(distinct a, b)", 'foo.distinctFunction('a, 'b))
assertEqual("grouping(distinct a, b)", 'grouping.distinctFunction('a, 'b))
assertEqual("`select`(all a, b)", 'select.function('a, 'b))
intercept("foo(a x)", "extraneous input 'x'")
}
private def lv(s: Symbol) = UnresolvedNamedLambdaVariable(Seq(s.name))
test("lambda functions") {
assertEqual("x -> x + 1", LambdaFunction(lv('x) + 1, Seq(lv('x))))
assertEqual("(x, y) -> x + y", LambdaFunction(lv('x) + lv('y), Seq(lv('x), lv('y))))
}
test("window function expressions") {
val func = 'foo.function(star())
def windowed(
partitioning: Seq[Expression] = Seq.empty,
ordering: Seq[SortOrder] = Seq.empty,
frame: WindowFrame = UnspecifiedFrame): Expression = {
WindowExpression(func, WindowSpecDefinition(partitioning, ordering, frame))
}
// Basic window testing.
assertEqual("foo(*) over w1", UnresolvedWindowExpression(func, WindowSpecReference("w1")))
assertEqual("foo(*) over ()", windowed())
assertEqual("foo(*) over (partition by a, b)", windowed(Seq('a, 'b)))
assertEqual("foo(*) over (distribute by a, b)", windowed(Seq('a, 'b)))
assertEqual("foo(*) over (cluster by a, b)", windowed(Seq('a, 'b)))
assertEqual("foo(*) over (order by a desc, b asc)", windowed(Seq.empty, Seq('a.desc, 'b.asc)))
assertEqual("foo(*) over (sort by a desc, b asc)", windowed(Seq.empty, Seq('a.desc, 'b.asc)))
assertEqual("foo(*) over (partition by a, b order by c)", windowed(Seq('a, 'b), Seq('c.asc)))
assertEqual("foo(*) over (distribute by a, b sort by c)", windowed(Seq('a, 'b), Seq('c.asc)))
// Test use of expressions in window functions.
assertEqual(
"sum(product + 1) over (partition by ((product) + (1)) order by 2)",
WindowExpression('sum.function('product + 1),
WindowSpecDefinition(Seq('product + 1), Seq(Literal(2).asc), UnspecifiedFrame)))
assertEqual(
"sum(product + 1) over (partition by ((product / 2) + 1) order by 2)",
WindowExpression('sum.function('product + 1),
WindowSpecDefinition(Seq('product / 2 + 1), Seq(Literal(2).asc), UnspecifiedFrame)))
}
test("range/rows window function expressions") {
val func = 'foo.function(star())
def windowed(
partitioning: Seq[Expression] = Seq.empty,
ordering: Seq[SortOrder] = Seq.empty,
frame: WindowFrame = UnspecifiedFrame): Expression = {
WindowExpression(func, WindowSpecDefinition(partitioning, ordering, frame))
}
val frameTypes = Seq(("rows", RowFrame), ("range", RangeFrame))
val boundaries = Seq(
// No between combinations
("unbounded preceding", UnboundedPreceding, CurrentRow),
("2147483648 preceding", -Literal(2147483648L), CurrentRow),
("10 preceding", -Literal(10), CurrentRow),
("3 + 1 preceding", -Add(Literal(3), Literal(1)), CurrentRow),
("0 preceding", -Literal(0), CurrentRow),
("current row", CurrentRow, CurrentRow),
("0 following", Literal(0), CurrentRow),
("3 + 1 following", Add(Literal(3), Literal(1)), CurrentRow),
("10 following", Literal(10), CurrentRow),
("2147483649 following", Literal(2147483649L), CurrentRow),
("unbounded following", UnboundedFollowing, CurrentRow), // Will fail during analysis
// Between combinations
("between unbounded preceding and 5 following",
UnboundedPreceding, Literal(5)),
("between unbounded preceding and 3 + 1 following",
UnboundedPreceding, Add(Literal(3), Literal(1))),
("between unbounded preceding and 2147483649 following",
UnboundedPreceding, Literal(2147483649L)),
("between unbounded preceding and current row", UnboundedPreceding, CurrentRow),
("between 2147483648 preceding and current row", -Literal(2147483648L), CurrentRow),
("between 10 preceding and current row", -Literal(10), CurrentRow),
("between 3 + 1 preceding and current row", -Add(Literal(3), Literal(1)), CurrentRow),
("between 0 preceding and current row", -Literal(0), CurrentRow),
("between current row and current row", CurrentRow, CurrentRow),
("between current row and 0 following", CurrentRow, Literal(0)),
("between current row and 5 following", CurrentRow, Literal(5)),
("between current row and 3 + 1 following", CurrentRow, Add(Literal(3), Literal(1))),
("between current row and 2147483649 following", CurrentRow, Literal(2147483649L)),
("between current row and unbounded following", CurrentRow, UnboundedFollowing),
("between 2147483648 preceding and unbounded following",
-Literal(2147483648L), UnboundedFollowing),
("between 10 preceding and unbounded following",
-Literal(10), UnboundedFollowing),
("between 3 + 1 preceding and unbounded following",
-Add(Literal(3), Literal(1)), UnboundedFollowing),
("between 0 preceding and unbounded following", -Literal(0), UnboundedFollowing),
// Between partial and full range
("between 10 preceding and 5 following", -Literal(10), Literal(5)),
("between unbounded preceding and unbounded following",
UnboundedPreceding, UnboundedFollowing)
)
frameTypes.foreach {
case (frameTypeSql, frameType) =>
boundaries.foreach {
case (boundarySql, begin, end) =>
val query = s"foo(*) over (partition by a order by b $frameTypeSql $boundarySql)"
val expr = windowed(Seq('a), Seq('b.asc), SpecifiedWindowFrame(frameType, begin, end))
assertEqual(query, expr)
}
}
// We cannot use an arbitrary expression.
intercept("foo(*) over (partition by a order by b rows exp(b) preceding)",
"Frame bound value must be a literal.")
}
test("row constructor") {
// Note that '(a)' will be interpreted as a nested expression.
assertEqual("(a, b)", CreateStruct(Seq('a, 'b)))
assertEqual("(a, b, c)", CreateStruct(Seq('a, 'b, 'c)))
assertEqual("(a as b, b as c)", CreateStruct(Seq('a as 'b, 'b as 'c)))
}
test("scalar sub-query") {
assertEqual(
"(select max(val) from tbl) > current",
ScalarSubquery(table("tbl").select('max.function('val))) > 'current)
assertEqual(
"a = (select b from s)",
'a === ScalarSubquery(table("s").select('b)))
}
test("case when") {
assertEqual("case a when 1 then b when 2 then c else d end",
CaseKeyWhen('a, Seq(1, 'b, 2, 'c, 'd)))
assertEqual("case (a or b) when true then c when false then d else e end",
CaseKeyWhen('a || 'b, Seq(true, 'c, false, 'd, 'e)))
assertEqual("case 'a'='a' when true then 1 end",
CaseKeyWhen("a" === "a", Seq(true, 1)))
assertEqual("case when a = 1 then b when a = 2 then c else d end",
CaseWhen(Seq(('a === 1, 'b.expr), ('a === 2, 'c.expr)), 'd))
assertEqual("case when (1) + case when a > b then c else d end then f else g end",
CaseWhen(Seq((Literal(1) + CaseWhen(Seq(('a > 'b, 'c.expr)), 'd.expr), 'f.expr)), 'g))
}
test("dereference") {
assertEqual("a.b", UnresolvedAttribute("a.b"))
assertEqual("`select`.b", UnresolvedAttribute("select.b"))
assertEqual("(a + b).b", ('a + 'b).getField("b")) // This will fail analysis.
assertEqual(
"struct(a, b).b",
namedStruct(Literal("a"), 'a, Literal("b"), 'b).getField("b"))
}
test("reference") {
// Regular
assertEqual("a", 'a)
// Starting with a digit.
assertEqual("1a", Symbol("1a"))
// Quoted using a keyword.
assertEqual("`select`", 'select)
// Unquoted using an unreserved keyword.
assertEqual("columns", 'columns)
}
test("subscript") {
assertEqual("a[b]", 'a.getItem('b))
assertEqual("a[1 + 1]", 'a.getItem(Literal(1) + 1))
assertEqual("`c`.a[b]", UnresolvedAttribute("c.a").getItem('b))
}
test("parenthesis") {
assertEqual("(a)", 'a)
assertEqual("r * (a + b)", 'r * ('a + 'b))
}
test("type constructors") {
def checkTimestampNTZAndLTZ(): Unit = {
// Timestamp with local time zone
assertEqual("tImEstAmp_LTZ '2016-03-11 20:54:00.000'",
Literal(Timestamp.valueOf("2016-03-11 20:54:00.000")))
intercept("timestamP_LTZ '2016-33-11 20:54:00.000'", "Cannot parse the TIMESTAMP_LTZ value")
// Timestamp without time zone
assertEqual("tImEstAmp_Ntz '2016-03-11 20:54:00.000'",
Literal(LocalDateTime.parse("2016-03-11T20:54:00.000")))
intercept("tImEstAmp_Ntz '2016-33-11 20:54:00.000'", "Cannot parse the TIMESTAMP_NTZ value")
}
// Dates.
assertEqual("dAte '2016-03-11'", Literal(Date.valueOf("2016-03-11")))
intercept("DAtE 'mar 11 2016'", "Cannot parse the DATE value")
// Timestamps.
assertEqual("tImEstAmp '2016-03-11 20:54:00.000'",
Literal(Timestamp.valueOf("2016-03-11 20:54:00.000")))
intercept("timestamP '2016-33-11 20:54:00.000'", "Cannot parse the TIMESTAMP value")
checkTimestampNTZAndLTZ()
withSQLConf(SQLConf.TIMESTAMP_TYPE.key -> TimestampTypes.TIMESTAMP_NTZ.toString) {
assertEqual("tImEstAmp '2016-03-11 20:54:00.000'",
Literal(LocalDateTime.parse("2016-03-11T20:54:00.000")))
intercept("timestamP '2016-33-11 20:54:00.000'", "Cannot parse the TIMESTAMP value")
// If the timestamp string contains time zone, return a timestamp with local time zone literal
assertEqual("tImEstAmp '1970-01-01 00:00:00.000 +01:00'",
Literal(-3600000000L, TimestampType))
// The behavior of TIMESTAMP_NTZ and TIMESTAMP_LTZ is independent of SQLConf.TIMESTAMP_TYPE
checkTimestampNTZAndLTZ()
}
// Interval.
val ymIntervalLiteral = Literal.create(Period.of(1, 2, 0), YearMonthIntervalType())
assertEqual("InterVal 'interval 1 year 2 month'", ymIntervalLiteral)
assertEqual("INTERVAL '1 year 2 month'", ymIntervalLiteral)
intercept("Interval 'interval 1 yearsss 2 monthsss'",
"Cannot parse the INTERVAL value: interval 1 yearsss 2 monthsss")
assertEqual("-interval '1 year 2 month'", UnaryMinus(ymIntervalLiteral))
val dtIntervalLiteral = Literal.create(
Duration.ofDays(1).plusHours(2).plusMinutes(3).plusSeconds(4).plusMillis(5).plusNanos(6000))
assertEqual("InterVal 'interval 1 day 2 hour 3 minute 4.005006 second'", dtIntervalLiteral)
assertEqual("INTERVAL '1 day 2 hour 3 minute 4.005006 second'", dtIntervalLiteral)
intercept("Interval 'interval 1 daysss 2 hoursss'",
"Cannot parse the INTERVAL value: interval 1 daysss 2 hoursss")
assertEqual("-interval '1 day 2 hour 3 minute 4.005006 second'", UnaryMinus(dtIntervalLiteral))
intercept("INTERVAL '1 year 2 second'",
"Cannot mix year-month and day-time fields: INTERVAL '1 year 2 second'")
withSQLConf(SQLConf.LEGACY_INTERVAL_ENABLED.key -> "true") {
val intervalLiteral = Literal(IntervalUtils.stringToInterval("interval 3 month 1 hour"))
assertEqual("InterVal 'interval 3 month 1 hour'", intervalLiteral)
assertEqual("INTERVAL '3 month 1 hour'", intervalLiteral)
intercept("Interval 'interval 3 monthsss 1 hoursss'", "Cannot parse the INTERVAL value")
assertEqual(
"-interval '3 month 1 hour'",
UnaryMinus(Literal(IntervalUtils.stringToInterval("interval 3 month 1 hour"))))
val intervalStrWithAllUnits = "1 year 3 months 2 weeks 2 days 1 hour 3 minutes 2 seconds " +
"100 millisecond 200 microseconds"
assertEqual(
s"interval '$intervalStrWithAllUnits'",
Literal(IntervalUtils.stringToInterval(intervalStrWithAllUnits)))
}
// Binary.
assertEqual("X'A'", Literal(Array(0x0a).map(_.toByte)))
assertEqual("x'A10C'", Literal(Array(0xa1, 0x0c).map(_.toByte)))
intercept("x'A1OC'")
// Unsupported datatype.
intercept("GEO '(10,-6)'", "Literals of type 'GEO' are currently not supported.")
}
test("literals") {
def testDecimal(value: String): Unit = {
assertEqual(value, Literal(BigDecimal(value).underlying))
}
// NULL
assertEqual("null", Literal(null))
// Boolean
assertEqual("trUe", Literal(true))
assertEqual("False", Literal(false))
// Integral should have the narrowest possible type
assertEqual("787324", Literal(787324))
assertEqual("7873247234798249234", Literal(7873247234798249234L))
testDecimal("78732472347982492793712334")
// Decimal
testDecimal("7873247234798249279371.2334")
// SPARK-29956: Scientific Decimal is parsed as Double by default.
assertEqual("9.0e1", Literal(90.toDouble))
assertEqual(".9e+2", Literal(90.toDouble))
assertEqual("0.9e+2", Literal(90.toDouble))
// Scientific Decimal with suffix BD should still be parsed as Decimal
assertEqual("900e-1BD", Literal(BigDecimal("900e-1").underlying()))
assertEqual("900.0E-1BD", Literal(BigDecimal("900.0E-1").underlying()))
assertEqual("9.e+1BD", Literal(BigDecimal("9.e+1").underlying()))
intercept(".e3")
// Tiny Int Literal
assertEqual("10Y", Literal(10.toByte))
intercept("-1000Y", s"does not fit in range [${Byte.MinValue}, ${Byte.MaxValue}]")
// Small Int Literal
assertEqual("10S", Literal(10.toShort))
intercept("40000S", s"does not fit in range [${Short.MinValue}, ${Short.MaxValue}]")
// Long Int Literal
assertEqual("10L", Literal(10L))
intercept("78732472347982492793712334L",
s"does not fit in range [${Long.MinValue}, ${Long.MaxValue}]")
// Double Literal
assertEqual("10.0D", Literal(10.0D))
intercept("-1.8E308D", s"does not fit in range")
intercept("1.8E308D", s"does not fit in range")
// BigDecimal Literal
assertEqual("90912830918230182310293801923652346786BD",
Literal(BigDecimal("90912830918230182310293801923652346786").underlying()))
assertEqual("123.0E-28BD", Literal(BigDecimal("123.0E-28").underlying()))
assertEqual("123.08BD", Literal(BigDecimal("123.08").underlying()))
intercept("1.20E-38BD", "decimal can only support precision up to 38")
}
test("SPARK-30252: Decimal should set zero scale rather than negative scale by default") {
assertEqual("123.0BD", Literal(Decimal(BigDecimal("123.0")), DecimalType(4, 1)))
assertEqual("123BD", Literal(Decimal(BigDecimal("123")), DecimalType(3, 0)))
assertEqual("123E10BD", Literal(Decimal(BigDecimal("123E10")), DecimalType(13, 0)))
assertEqual("123E+10BD", Literal(Decimal(BigDecimal("123E+10")), DecimalType(13, 0)))
assertEqual("123E-10BD", Literal(Decimal(BigDecimal("123E-10")), DecimalType(10, 10)))
assertEqual("1.23E10BD", Literal(Decimal(BigDecimal("1.23E10")), DecimalType(11, 0)))
assertEqual("-1.23E10BD", Literal(Decimal(BigDecimal("-1.23E10")), DecimalType(11, 0)))
}
test("SPARK-29956: scientific decimal should be parsed as Decimal in legacy mode") {
def testDecimal(value: String, parser: ParserInterface): Unit = {
assertEqual(value, Literal(BigDecimal(value).underlying), parser)
}
withSQLConf(SQLConf.LEGACY_EXPONENT_LITERAL_AS_DECIMAL_ENABLED.key -> "true") {
val parser = new CatalystSqlParser()
testDecimal("9e1", parser)
testDecimal("9e-1", parser)
testDecimal("-9e1", parser)
testDecimal("9.0e1", parser)
testDecimal(".9e+2", parser)
testDecimal("0.9e+2", parser)
}
}
test("strings") {
Seq(true, false).foreach { escape =>
withSQLConf(SQLConf.ESCAPED_STRING_LITERALS.key -> escape.toString) {
val parser = new CatalystSqlParser()
// tests that have same result whatever the conf is
// Single Strings.
assertEqual("\\"hello\\"", "hello", parser)
assertEqual("'hello'", "hello", parser)
// Multi-Strings.
assertEqual("\\"hello\\" 'world'", "helloworld", parser)
assertEqual("'hello' \\" \\" 'world'", "hello world", parser)
// 'LIKE' string literals. Notice that an escaped '%' is the same as an escaped '\\' and a
// regular '%'; to get the correct result you need to add another escaped '\\'.
// TODO figure out if we shouldn't change the ParseUtils.unescapeSQLString method?
assertEqual("'pattern%'", "pattern%", parser)
assertEqual("'no-pattern\\\\%'", "no-pattern\\\\%", parser)
// tests that have different result regarding the conf
if (escape) {
// When SQLConf.ESCAPED_STRING_LITERALS is enabled, string literal parsing falls back to
// Spark 1.6 behavior.
// 'LIKE' string literals.
assertEqual("'pattern\\\\\\\\%'", "pattern\\\\\\\\%", parser)
assertEqual("'pattern\\\\\\\\\\\\%'", "pattern\\\\\\\\\\\\%", parser)
// Escaped characters.
// Unescape string literal "'\\\\0'" for ASCII NUL (X'00') doesn't work
// when ESCAPED_STRING_LITERALS is enabled.
// It is parsed literally.
assertEqual("'\\\\0'", "\\\\0", parser)
// Note: Single quote follows 1.6 parsing behavior
// when ESCAPED_STRING_LITERALS is enabled.
val e = intercept[ParseException](parser.parseExpression("'\\''"))
assert(e.message.contains("extraneous input '''"))
// The unescape special characters (e.g., "\\\\t") for 2.0+ don't work
// when ESCAPED_STRING_LITERALS is enabled. They are parsed literally.
assertEqual("'\\\\\\"'", "\\\\\\"", parser) // Double quote
assertEqual("'\\\\b'", "\\\\b", parser) // Backspace
assertEqual("'\\\\n'", "\\\\n", parser) // Newline
assertEqual("'\\\\r'", "\\\\r", parser) // Carriage return
assertEqual("'\\\\t'", "\\\\t", parser) // Tab character
// The unescape Octals for 2.0+ don't work when ESCAPED_STRING_LITERALS is enabled.
// They are parsed literally.
assertEqual("'\\\\110\\\\145\\\\154\\\\154\\\\157\\\\041'", "\\\\110\\\\145\\\\154\\\\154\\\\157\\\\041", parser)
// The unescape Unicode for 2.0+ doesn't work when ESCAPED_STRING_LITERALS is enabled.
// They are parsed literally.
assertEqual("'\\\\u0057\\\\u006F\\\\u0072\\\\u006C\\\\u0064\\\\u0020\\\\u003A\\\\u0029'",
"\\\\u0057\\\\u006F\\\\u0072\\\\u006C\\\\u0064\\\\u0020\\\\u003A\\\\u0029", parser)
} else {
// Default behavior
// 'LIKE' string literals.
assertEqual("'pattern\\\\\\\\%'", "pattern\\\\%", parser)
assertEqual("'pattern\\\\\\\\\\\\%'", "pattern\\\\\\\\%", parser)
// Escaped characters.
// See: http://dev.mysql.com/doc/refman/5.7/en/string-literals.html
assertEqual("'\\\\0'", "\\u0000", parser) // ASCII NUL (X'00')
assertEqual("'\\\\''", "\\'", parser) // Single quote
assertEqual("'\\\\\\"'", "\\"", parser) // Double quote
assertEqual("'\\\\b'", "\\b", parser) // Backspace
assertEqual("'\\\\n'", "\\n", parser) // Newline
assertEqual("'\\\\r'", "\\r", parser) // Carriage return
assertEqual("'\\\\t'", "\\t", parser) // Tab character
assertEqual("'\\\\Z'", "\\u001A", parser) // ASCII 26 - CTRL + Z (EOF on windows)
// Octals
assertEqual("'\\\\110\\\\145\\\\154\\\\154\\\\157\\\\041'", "Hello!", parser)
// Unicode
assertEqual("'\\\\u0057\\\\u006F\\\\u0072\\\\u006C\\\\u0064\\\\u0020\\\\u003A\\\\u0029'", "World :)",
parser)
}
}
}
}
val ymIntervalUnits = Seq("year", "month")
val dtIntervalUnits = Seq("week", "day", "hour", "minute", "second", "millisecond", "microsecond")
def ymIntervalLiteral(u: String, s: String): Literal = {
val period = u match {
case "year" => Period.ofYears(Integer.parseInt(s))
case "month" => Period.ofMonths(Integer.parseInt(s))
}
Literal.create(period, YearMonthIntervalType(YM.stringToField(u)))
}
def dtIntervalLiteral(u: String, s: String): Literal = {
val value = if (u == "second") {
(BigDecimal(s) * NANOS_PER_SECOND).toLong
} else {
java.lang.Long.parseLong(s)
}
val (duration, field) = u match {
case "week" => (Duration.ofDays(value * 7), DT.DAY)
case "day" => (Duration.ofDays(value), DT.DAY)
case "hour" => (Duration.ofHours(value), DT.HOUR)
case "minute" => (Duration.ofMinutes(value), DT.MINUTE)
case "second" => (Duration.ofNanos(value), DT.SECOND)
case "millisecond" => (Duration.ofMillis(value), DT.SECOND)
case "microsecond" => (Duration.ofNanos(value * NANOS_PER_MICROS), DT.SECOND)
}
Literal.create(duration, DayTimeIntervalType(field))
}
def legacyIntervalLiteral(u: String, s: String): Literal = {
Literal(IntervalUtils.stringToInterval(s + " " + u.toString))
}
test("intervals") {
def checkIntervals(intervalValue: String, expected: Literal): Unit = {
Seq(
"" -> expected,
"-" -> UnaryMinus(expected)
).foreach { case (sign, expectedLiteral) =>
assertEqual(s"${sign}interval $intervalValue", expectedLiteral)
}
}
// Empty interval statement
intercept("interval", "at least one time unit should be given for interval literal")
// Single Intervals.
val forms = Seq("", "s")
val values = Seq("0", "10", "-7", "21")
ymIntervalUnits.foreach { unit =>
forms.foreach { form =>
values.foreach { value =>
val expected = ymIntervalLiteral(unit, value)
checkIntervals(s"$value $unit$form", expected)
checkIntervals(s"'$value' $unit$form", expected)
}
}
}
dtIntervalUnits.foreach { unit =>
forms.foreach { form =>
values.foreach { value =>
val expected = dtIntervalLiteral(unit, value)
checkIntervals(s"$value $unit$form", expected)
checkIntervals(s"'$value' $unit$form", expected)
}
}
}
// Hive nanosecond notation.
checkIntervals("13.123456789 seconds", dtIntervalLiteral("second", "13.123456789"))
withSQLConf(SQLConf.LEGACY_INTERVAL_ENABLED.key -> "true") {
(ymIntervalUnits ++ dtIntervalUnits).foreach { unit =>
forms.foreach { form =>
values.foreach { value =>
val expected = legacyIntervalLiteral(unit, value)
checkIntervals(s"$value $unit$form", expected)
checkIntervals(s"'$value' $unit$form", expected)
}
}
}
checkIntervals(
"-13.123456789 second",
Literal(new CalendarInterval(
0,
0,
DateTimeTestUtils.secFrac(-13, -123, -456))))
checkIntervals(
"13.123456 second",
Literal(new CalendarInterval(
0,
0,
DateTimeTestUtils.secFrac(13, 123, 456))))
checkIntervals("1.001 second",
Literal(IntervalUtils.stringToInterval("1 second 1 millisecond")))
}
// Non Existing unit
intercept("interval 10 nanoseconds", "invalid unit 'nanoseconds'")
withSQLConf(SQLConf.LEGACY_INTERVAL_ENABLED.key -> "true") {
// Year-Month intervals.
val yearMonthValues = Seq("123-10", "496-0", "-2-3", "-123-0", "\\t -1-2\\t")
yearMonthValues.foreach { value =>
val result = Literal(IntervalUtils.fromYearMonthString(value))
checkIntervals(s"'$value' year to month", result)
}
// Day-Time intervals.
val datTimeValues = Seq(
"99 11:22:33.123456789",
"-99 11:22:33.123456789",
"10 9:8:7.123456789",
"1 0:0:0",
"-1 0:0:0",
"1 0:0:1",
"\\t 1 0:0:1 ")
datTimeValues.foreach { value =>
val result = Literal(IntervalUtils.fromDayTimeString(value))
checkIntervals(s"'$value' day to second", result)
}
// Hour-Time intervals.
val hourTimeValues = Seq(
"11:22:33.123456789",
"9:8:7.123456789",
"-19:18:17.123456789",
"0:0:0",
"0:0:1")
hourTimeValues.foreach { value =>
val result = Literal(IntervalUtils.fromDayTimeString(
value, DayTimeIntervalType.HOUR, DayTimeIntervalType.SECOND))
checkIntervals(s"'$value' hour to second", result)
}
}
// Unknown FROM TO intervals
intercept("interval '10' month to second",
"Intervals FROM month TO second are not supported.")
// Composed intervals.
checkIntervals(
"10 years 3 months", Literal.create(Period.of(10, 3, 0), YearMonthIntervalType()))
checkIntervals(
"8 days 2 hours 3 minutes 21 seconds",
Literal.create(Duration.ofDays(8).plusHours(2).plusMinutes(3).plusSeconds(21)))
Seq(true, false).foreach { legacyEnabled =>
withSQLConf(SQLConf.LEGACY_INTERVAL_ENABLED.key -> legacyEnabled.toString) {
val intervalStr = "3 monThs 4 dayS 22 sEcond 1 millisecond"
if (legacyEnabled) {
checkIntervals(intervalStr, Literal(new CalendarInterval(3, 4, 22001000L)))
} else {
intercept(s"interval $intervalStr",
s"Cannot mix year-month and day-time fields: interval $intervalStr")
}
}
}
}
test("composed expressions") {
assertEqual("1 + r.r As q", (Literal(1) + UnresolvedAttribute("r.r")).as("q"))
assertEqual("1 - f('o', o(bar))", Literal(1) - 'f.function("o", 'o.function('bar)))
intercept("1 - f('o', o(bar)) hello * world", Some("PARSE_INPUT_MISMATCHED"),
"Syntax error at or near '*'")
}
test("SPARK-17364, fully qualified column name which starts with number") {
assertEqual("123_", UnresolvedAttribute("123_"))
assertEqual("1a.123_", UnresolvedAttribute("1a.123_"))
// ".123" should not be treated as token of type DECIMAL_VALUE
assertEqual("a.123A", UnresolvedAttribute("a.123A"))
// ".123E3" should not be treated as token of type SCIENTIFIC_DECIMAL_VALUE
assertEqual("a.123E3_column", UnresolvedAttribute("a.123E3_column"))
// ".123D" should not be treated as token of type DOUBLE_LITERAL
assertEqual("a.123D_column", UnresolvedAttribute("a.123D_column"))
// ".123BD" should not be treated as token of type BIGDECIMAL_LITERAL
assertEqual("a.123BD_column", UnresolvedAttribute("a.123BD_column"))
}
test("SPARK-17832 function identifier contains backtick") {
val complexName = FunctionIdentifier("`ba`r", Some("`fo`o"))
assertEqual(complexName.quotedString, UnresolvedAttribute(Seq("`fo`o", "`ba`r")))
intercept(complexName.unquotedString, Some("PARSE_INPUT_MISMATCHED"),
"Syntax error at or near")
// Function identifier contains continuous backticks should be treated correctly.
val complexName2 = FunctionIdentifier("ba``r", Some("fo``o"))
assertEqual(complexName2.quotedString, UnresolvedAttribute(Seq("fo``o", "ba``r")))
}
test("SPARK-19526 Support ignore nulls keywords for first and last") {
assertEqual("first(a ignore nulls)", First('a, true).toAggregateExpression())
assertEqual("first(a)", First('a, false).toAggregateExpression())
assertEqual("last(a ignore nulls)", Last('a, true).toAggregateExpression())
assertEqual("last(a)", Last('a, false).toAggregateExpression())
}
test("timestamp literals") {
DateTimeTestUtils.outstandingZoneIds.foreach { zid =>
withSQLConf(SQLConf.SESSION_LOCAL_TIMEZONE.key -> zid.getId) {
def toMicros(time: LocalDateTime): Long = {
val seconds = time.atZone(zid).toInstant.getEpochSecond
TimeUnit.SECONDS.toMicros(seconds)
}
assertEval(
sqlCommand = "TIMESTAMP '2019-01-14 20:54:00.000'",
expect = toMicros(LocalDateTime.of(2019, 1, 14, 20, 54)))
assertEval(
sqlCommand = "Timestamp '2000-01-01T00:55:00'",
expect = toMicros(LocalDateTime.of(2000, 1, 1, 0, 55)))
// Parsing of the string does not depend on the SQL config because the string contains
// time zone offset already.
assertEval(
sqlCommand = "TIMESTAMP '2019-01-16 20:50:00.567000+01:00'",
expect = 1547668200567000L)
}
}
}
test("date literals") {
DateTimeTestUtils.outstandingTimezonesIds.foreach { timeZone =>
withSQLConf(SQLConf.SESSION_LOCAL_TIMEZONE.key -> timeZone) {
assertEval("DATE '2019-01-14'", 17910)
assertEval("DATE '2019-01'", 17897)
assertEval("DATE '2019'", 17897)
}
}
}
test("current date/timestamp braceless expressions") {
withSQLConf(SQLConf.ANSI_ENABLED.key -> "true",
SQLConf.ENFORCE_RESERVED_KEYWORDS.key -> "true") {
assertEqual("current_date", CurrentDate())
assertEqual("current_timestamp", CurrentTimestamp())
}
def testNonAnsiBehavior(): Unit = {
assertEqual("current_date", UnresolvedAttribute.quoted("current_date"))
assertEqual("current_timestamp", UnresolvedAttribute.quoted("current_timestamp"))
}
withSQLConf(
SQLConf.ANSI_ENABLED.key -> "false",
SQLConf.ENFORCE_RESERVED_KEYWORDS.key -> "true") {
testNonAnsiBehavior()
}
withSQLConf(
SQLConf.ANSI_ENABLED.key -> "true",
SQLConf.ENFORCE_RESERVED_KEYWORDS.key -> "false") {
testNonAnsiBehavior()
}
}
test("SPARK-36736: (NOT) ILIKE (ANY | SOME | ALL) expressions") {
Seq("any", "some").foreach { quantifier =>
assertEqual(s"a ilike $quantifier ('FOO%', 'b%')", lower($"a") likeAny("foo%", "b%"))
assertEqual(s"a not ilike $quantifier ('foo%', 'B%')", lower($"a") notLikeAny("foo%", "b%"))
assertEqual(s"not (a ilike $quantifier ('FOO%', 'B%'))", !(lower($"a") likeAny("foo%", "b%")))
}
assertEqual("a ilike all ('Foo%', 'b%')", lower($"a") likeAll("foo%", "b%"))
assertEqual("a not ilike all ('foo%', 'B%')", lower($"a") notLikeAll("foo%", "b%"))
assertEqual("not (a ilike all ('foO%', 'b%'))", !(lower($"a") likeAll("foo%", "b%")))
Seq("any", "some", "all").foreach { quantifier =>
intercept(s"a ilike $quantifier()", "Expected something between '(' and ')'")
}
}
}
|
mahak/spark
|
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ExpressionParserSuite.scala
|
Scala
|
apache-2.0
| 40,485 |
package microtools.models
import java.time.format.DateTimeFormatter
import java.time.{DateTimeException, Instant}
import play.api.libs.json._
trait Protocol {
def enumReads[E <: Enumeration](enum: E): Reads[E#Value] =
implicitly[Reads[String]].flatMap { s =>
Reads[E#Value] { _ =>
try {
JsSuccess(enum.withName(s))
} catch {
case _: NoSuchElementException =>
JsError(
s"Enumeration expected of type: '${enum.getClass}', but it does not appear to contain the value: '$s'"
)
}
}
}
def enumWrites[E <: Enumeration]: Writes[E#Value] =
Writes[E#Value] { v: E#Value =>
JsString(v.toString)
}
def enumFormat[E <: Enumeration](enum: E): Format[E#Value] = {
Format(enumReads(enum), enumWrites)
}
implicit def instantWrites: Writes[Instant] = Writes[Instant] { o =>
JsNumber(o.getEpochSecond)
}
implicit def instantReads: Reads[Instant] = Reads[Instant] {
case JsNumber(time) => JsSuccess(Instant.ofEpochSecond(time.toLong))
case JsString(str) =>
try {
JsSuccess(Instant.from(DateTimeFormatter.ISO_DATE_TIME.parse(str)))
} catch {
case _: DateTimeException =>
JsError(
Seq(
JsPath() ->
Seq(JsonValidationError("error.expected.date.isoformat"))
)
)
}
case _ =>
JsError(
Seq(
JsPath() ->
Seq(JsonValidationError("error.expected.date"))
)
)
}
}
|
21re/play-micro-tools
|
src/main/scala/microtools/models/Protocol.scala
|
Scala
|
mit
| 1,549 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.compiler.v2_3.pipes
import org.neo4j.cypher.internal.compiler.v2_3.ExecutionContext
import org.neo4j.cypher.internal.compiler.v2_3.executionplan.{ReadsAllNodes, Effects}
import org.neo4j.cypher.internal.compiler.v2_3.planDescription.{NoChildren, PlanDescriptionImpl}
import org.neo4j.cypher.internal.compiler.v2_3.symbols.SymbolTable
import org.neo4j.cypher.internal.frontend.v2_3.symbols._
case class AllNodesScanPipe(ident: String)(val estimatedCardinality: Option[Double] = None)
(implicit pipeMonitor: PipeMonitor) extends Pipe with RonjaPipe {
protected def internalCreateResults(state: QueryState): Iterator[ExecutionContext] = {
val baseContext = state.initialContext.getOrElse(ExecutionContext.empty)
state.query.nodeOps.all.map(n => baseContext.newWith1(ident, n))
}
def exists(predicate: Pipe => Boolean): Boolean = predicate(this)
def planDescriptionWithoutCardinality = PlanDescriptionImpl(this.id, "AllNodesScan", NoChildren, Seq(), identifiers)
def symbols = new SymbolTable(Map(ident -> CTNode))
override def monitor = pipeMonitor
override def localEffects: Effects = Effects(ReadsAllNodes)
def dup(sources: List[Pipe]): Pipe = {
require(sources.isEmpty)
this
}
def sources: Seq[Pipe] = Seq.empty
def withEstimatedCardinality(estimated: Double) = copy()(Some(estimated))
}
|
HuangLS/neo4j
|
community/cypher/cypher-compiler-2.3/src/main/scala/org/neo4j/cypher/internal/compiler/v2_3/pipes/AllNodesScanPipe.scala
|
Scala
|
apache-2.0
| 2,194 |
package mesosphere.marathon
package core.launcher
import org.rogach.scallop.ScallopConf
trait OfferProcessorConfig extends ScallopConf {
lazy val declineOfferDuration = opt[Long](
"decline_offer_duration",
descr = "(Default: 120 seconds) " +
"The duration (milliseconds) for which to decline offers by default",
default = Some(120000))
}
|
gsantovena/marathon
|
src/main/scala/mesosphere/marathon/core/launcher/OfferProcessorConfig.scala
|
Scala
|
apache-2.0
| 361 |
package scalax.collection
/**
* This package contains type constructors facilitating input (loading) from
* and output (unloading) to streams and specific external data stores.
*
* @author Peter Empen
*/
package object io {
}
|
opyate/scala-graph
|
core/src/main/scala/scalax/collection/io/package.scala
|
Scala
|
bsd-3-clause
| 232 |
package spire.algebra
import spire.math._
import org.scalatest.FunSuite
import scala.reflect.ClassTag
class NRootTest extends FunSuite {
def testIntegralNRoot[A: Numeric: ClassTag]: Unit = {
val cls = implicitly[ClassTag[A]].runtimeClass.getSimpleName
test("Integral NRoot (%s)" format cls) {
val one = Rig[A].one
assert(NRoot[A].nroot(Rig[A].one, 2) === Rig[A].one)
assert(NRoot[A].nroot(Numeric[A].fromInt(1234), 2) === Numeric[A].fromInt(35))
assert(NRoot[A].nroot(Numeric[A].fromInt(912384), 3) === Numeric[A].fromInt(96))
}
}
testIntegralNRoot[Int]
testIntegralNRoot[Long]
testIntegralNRoot[BigInt]
val DECIMAL1 = new java.math.MathContext(1)
// Returns the smallest value that can be added/removed from x.
def eps(x: BigDecimal): BigDecimal =
x.round(DECIMAL1) * BigDecimal(1, x.mc.getPrecision - 1)
def checkNRoot(x: BigDecimal, n: Int): Unit = {
import spire.implicits._
val y = x nroot n
val e = eps(y)
if (x > 0) {
assert(((y - e) ** n) < x, "expected %s ** %d < %s" format (y - e, n, x))
assert(((y + e) ** n) > x, "expected %s ** %d > %s" format (y + e, n, x))
} else {
assert(((y + e) ** n) < x, "expected %s ** %d < %s" format (y + e, n, x))
assert(((y - e) ** n) > x, "expected %s ** %d > %s" format (y - e, n, x))
}
}
val HighPrecision = new java.math.MathContext(250)
val bases = Seq(
BigDecimal(2),
BigDecimal(3),
BigDecimal("3492919288716623419872.99818234", HighPrecision),
BigDecimal("0.00000000000000000000000000000012345")
)
val roots = Seq(2, 3, 6, 9, 23, 53)
test("BigDecimal NRoot") {
bases foreach { x =>
roots foreach (checkNRoot(x, _))
}
bases map (-_) foreach { x =>
roots filter (_ % 2 == 1) foreach (checkNRoot(x, _))
}
}
}
|
woparry/spire
|
tests/src/test/scala/spire/algebra/NRootTest.scala
|
Scala
|
mit
| 1,836 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.batch.sql.agg
import org.apache.flink.api.scala._
import org.apache.flink.table.api.config.OptimizerConfigOptions
import org.apache.flink.table.api.scala._
import org.apache.flink.table.api.{TableException, ValidationException}
import org.apache.flink.table.planner.runtime.utils.JavaUserDefinedAggFunctions.WeightedAvgWithMerge
import org.apache.flink.table.planner.utils.{AggregatePhaseStrategy, CountAggFunction, TableTestBase}
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import org.junit.{Before, Test}
import java.sql.Timestamp
import java.util
import scala.collection.JavaConversions._
@RunWith(classOf[Parameterized])
class WindowAggregateTest(aggStrategy: AggregatePhaseStrategy) extends TableTestBase {
private val util = batchTestUtil()
@Before
def before(): Unit = {
util.tableEnv.getConfig.getConfiguration.setString(
OptimizerConfigOptions.TABLE_OPTIMIZER_AGG_PHASE_STRATEGY, aggStrategy.toString)
util.addFunction("countFun", new CountAggFunction)
util.addTableSource[(Int, Timestamp, Int, Long)]("MyTable", 'a, 'b, 'c, 'd)
util.addTableSource[(Timestamp, Long, Int, String)]("MyTable1", 'ts, 'a, 'b, 'c)
util.addTableSource[(Int, Long, String, Int, Timestamp)]("MyTable2", 'a, 'b, 'c, 'd, 'ts)
}
@Test(expected = classOf[TableException])
def testHopWindowNoOffset(): Unit = {
val sqlQuery =
"SELECT SUM(a) AS sumA, COUNT(b) AS cntB FROM MyTable2 " +
"GROUP BY HOP(ts, INTERVAL '1' HOUR, INTERVAL '2' HOUR, TIME '10:00:00')"
util.verifyPlan(sqlQuery)
}
@Test(expected = classOf[TableException])
def testSessionWindowNoOffset(): Unit = {
val sqlQuery =
"SELECT SUM(a) AS sumA, COUNT(b) AS cntB FROM MyTable2 " +
"GROUP BY SESSION(ts, INTERVAL '2' HOUR, TIME '10:00:00')"
util.verifyPlan(sqlQuery)
}
@Test(expected = classOf[TableException])
def testVariableWindowSize(): Unit = {
util.verifyPlan("SELECT COUNT(*) FROM MyTable2 GROUP BY TUMBLE(ts, b * INTERVAL '1' MINUTE)")
}
@Test(expected = classOf[ValidationException])
def testTumbleWindowWithInvalidUdAggArgs(): Unit = {
val weightedAvg = new WeightedAvgWithMerge
util.addFunction("weightedAvg", weightedAvg)
val sql = "SELECT weightedAvg(c, a) AS wAvg FROM MyTable2 " +
"GROUP BY TUMBLE(ts, INTERVAL '4' MINUTE)"
util.verifyPlan(sql)
}
@Test(expected = classOf[ValidationException])
def testWindowProctime(): Unit = {
val sqlQuery =
"SELECT TUMBLE_PROCTIME(ts, INTERVAL '4' MINUTE) FROM MyTable2 " +
"GROUP BY TUMBLE(ts, INTERVAL '4' MINUTE), c"
// should fail because PROCTIME properties are not yet supported in batch
util.verifyPlan(sqlQuery)
}
@Test(expected = classOf[AssertionError])
def testWindowAggWithGroupSets(): Unit = {
// TODO supports group sets
// currently, the optimized plan is not collect, and an exception will be thrown in code-gen
val sql =
"""
|SELECT COUNT(*),
| TUMBLE_END(ts, INTERVAL '15' MINUTE) + INTERVAL '1' MINUTE
|FROM MyTable1
| GROUP BY rollup(TUMBLE(ts, INTERVAL '15' MINUTE), b)
""".stripMargin
util.verifyPlanNotExpected(sql, "TUMBLE(ts")
}
@Test
def testNoGroupingTumblingWindow(): Unit = {
val sqlQuery = "SELECT AVG(c), SUM(a) FROM MyTable GROUP BY TUMBLE(b, INTERVAL '3' SECOND)"
util.verifyPlan(sqlQuery)
}
@Test
def testTumblingWindowSortAgg1(): Unit = {
val sqlQuery = "SELECT MAX(c) FROM MyTable1 GROUP BY a, TUMBLE(ts, INTERVAL '3' SECOND)"
util.verifyPlan(sqlQuery)
}
@Test
def testTumblingWindowSortAgg2(): Unit = {
val sqlQuery = "SELECT AVG(c), countFun(a) FROM MyTable " +
"GROUP BY a, d, TUMBLE(b, INTERVAL '3' SECOND)"
util.verifyPlan(sqlQuery)
}
@Test
def testTumblingWindowHashAgg1(): Unit = {
val sqlQuery = "SELECT COUNT(c) FROM MyTable1 GROUP BY a, TUMBLE(ts, INTERVAL '3' SECOND)"
util.verifyPlan(sqlQuery)
}
@Test
def testTumblingWindowHashAgg2(): Unit = {
val sql = "SELECT AVG(c), COUNT(a) FROM MyTable GROUP BY a, d, TUMBLE(b, INTERVAL '3' SECOND)"
util.verifyPlan(sql)
}
@Test
def testNonPartitionedTumblingWindow(): Unit = {
val sqlQuery =
"SELECT SUM(a) AS sumA, COUNT(b) AS cntB FROM MyTable2 GROUP BY TUMBLE(ts, INTERVAL '2' HOUR)"
util.verifyPlan(sqlQuery)
}
@Test
def testPartitionedTumblingWindow(): Unit = {
val sqlQuery =
"""
|SELECT TUMBLE_START(ts, INTERVAL '4' MINUTE),
| TUMBLE_END(ts, INTERVAL '4' MINUTE),
| TUMBLE_ROWTIME(ts, INTERVAL '4' MINUTE),
| c,
| SUM(a) AS sumA,
| MIN(b) AS minB
|FROM MyTable2
| GROUP BY TUMBLE(ts, INTERVAL '4' MINUTE), c
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testTumblingWindowWithUdAgg(): Unit = {
util.addFunction("weightedAvg", new WeightedAvgWithMerge)
val sql = "SELECT weightedAvg(b, a) AS wAvg FROM MyTable2 " +
"GROUP BY TUMBLE(ts, INTERVAL '4' MINUTE)"
util.verifyPlan(sql)
}
@Test
def testNoGroupingSlidingWindow(): Unit = {
val sqlQuery =
"""
|SELECT SUM(a),
| HOP_START(b, INTERVAL '3' SECOND, INTERVAL '3' SECOND),
| HOP_END(b, INTERVAL '3' SECOND, INTERVAL '3' SECOND)
|FROM MyTable
| GROUP BY HOP(b, INTERVAL '3' SECOND, INTERVAL '3' SECOND)
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testSlidingWindowSortAgg1(): Unit = {
val sqlQuery = "SELECT MAX(c) FROM MyTable1 " +
"GROUP BY a, HOP(ts, INTERVAL '3' SECOND, INTERVAL '1' HOUR)"
util.verifyPlan(sqlQuery)
}
@Test
def testSlidingWindowSortAgg2(): Unit = {
val sqlQuery = "SELECT MAX(c) FROM MyTable1 " +
"GROUP BY b, HOP(ts, INTERVAL '0.111' SECOND(1,3), INTERVAL '1' SECOND)"
util.verifyPlan(sqlQuery)
}
@Test
def testSlidingWindowSortAgg3(): Unit = {
val sqlQuery = "SELECT countFun(c) FROM MyTable " +
" GROUP BY a, d, HOP(b, INTERVAL '3' SECOND, INTERVAL '1' HOUR)"
util.verifyPlan(sqlQuery)
}
@Test
def testSlidingWindowSortAggWithPaneOptimization(): Unit = {
val sqlQuery = "SELECT COUNT(c) FROM MyTable1 " +
"GROUP BY a, HOP(ts, INTERVAL '3' SECOND, INTERVAL '1' HOUR)"
util.verifyPlan(sqlQuery)
}
@Test
def testSlidingWindowHashAgg(): Unit = {
val sqlQuery = "SELECT count(c) FROM MyTable1 " +
"GROUP BY b, HOP(ts, INTERVAL '3' SECOND, INTERVAL '1' HOUR)"
util.verifyPlan(sqlQuery)
}
@Test
def testNonPartitionedSlidingWindow(): Unit = {
val sqlQuery =
"SELECT SUM(a) AS sumA, COUNT(b) AS cntB " +
"FROM MyTable2 " +
"GROUP BY HOP(ts, INTERVAL '15' MINUTE, INTERVAL '90' MINUTE)"
util.verifyPlan(sqlQuery)
}
@Test
def testPartitionedSlidingWindow(): Unit = {
val sqlQuery =
"SELECT " +
" c, " +
" HOP_END(ts, INTERVAL '1' HOUR, INTERVAL '3' HOUR), " +
" HOP_START(ts, INTERVAL '1' HOUR, INTERVAL '3' HOUR), " +
" HOP_ROWTIME(ts, INTERVAL '1' HOUR, INTERVAL '3' HOUR), " +
" SUM(a) AS sumA, " +
" AVG(b) AS avgB " +
"FROM MyTable2 " +
"GROUP BY HOP(ts, INTERVAL '1' HOUR, INTERVAL '3' HOUR), d, c"
util.verifyPlan(sqlQuery)
}
@Test(expected = classOf[TableException])
// TODO session window is not supported now
def testNonPartitionedSessionWindow(): Unit = {
val sqlQuery = "SELECT COUNT(*) AS cnt FROM MyTable2 GROUP BY SESSION(ts, INTERVAL '30' MINUTE)"
util.verifyPlan(sqlQuery)
}
@Test(expected = classOf[TableException])
// TODO session window is not supported now
def testPartitionedSessionWindow(): Unit = {
val sqlQuery =
"""
|SELECT c, d,
| SESSION_START(ts, INTERVAL '12' HOUR),
| SESSION_END(ts, INTERVAL '12' HOUR),
| SESSION_ROWTIME(ts, INTERVAL '12' HOUR),
| SUM(a) AS sumA,
| MIN(b) AS minB
|FROM MyTable2
| GROUP BY SESSION(ts, INTERVAL '12' HOUR), c, d
""".stripMargin
util.verifyPlan(sqlQuery)
}
@Test
def testWindowEndOnly(): Unit = {
val sqlQuery =
"SELECT TUMBLE_END(ts, INTERVAL '4' MINUTE) FROM MyTable2 " +
"GROUP BY TUMBLE(ts, INTERVAL '4' MINUTE), c"
util.verifyPlan(sqlQuery)
}
@Test
def testExpressionOnWindowHavingFunction(): Unit = {
val sql =
"""
|SELECT COUNT(*),
| HOP_START(ts, INTERVAL '15' MINUTE, INTERVAL '1' MINUTE)
|FROM MyTable2
| GROUP BY HOP(ts, INTERVAL '15' MINUTE, INTERVAL '1' MINUTE)
| HAVING
| SUM(a) > 0 AND
| QUARTER(HOP_START(ts, INTERVAL '15' MINUTE, INTERVAL '1' MINUTE)) = 1
""".stripMargin
util.verifyPlan(sql)
}
@Test
def testDecomposableAggFunctions(): Unit = {
val sql =
"""
|SELECT VAR_POP(b),
| VAR_SAMP(b),
| STDDEV_POP(b),
| STDDEV_SAMP(b),
| TUMBLE_START(ts, INTERVAL '15' MINUTE),
| TUMBLE_END(ts, INTERVAL '15' MINUTE)
|FROM MyTable1
| GROUP BY TUMBLE(ts, INTERVAL '15' MINUTE)
""".stripMargin
util.verifyPlan(sql)
}
@Test
def testReturnTypeInferenceForWindowAgg() = {
val sql =
"""
|SELECT
| SUM(correct) AS s,
| AVG(correct) AS a,
| TUMBLE_START(b, INTERVAL '15' MINUTE) AS wStart
|FROM (
| SELECT CASE a
| WHEN 1 THEN 1
| ELSE 99
| END AS correct, b
| FROM MyTable
|)
|GROUP BY TUMBLE(b, INTERVAL '15' MINUTE)
""".stripMargin
util.verifyPlan(sql)
}
}
object WindowAggregateTest {
@Parameterized.Parameters(name = "aggStrategy={0}")
def parameters(): util.Collection[AggregatePhaseStrategy] = {
Seq[AggregatePhaseStrategy](
AggregatePhaseStrategy.AUTO,
AggregatePhaseStrategy.ONE_PHASE,
AggregatePhaseStrategy.TWO_PHASE
)
}
}
|
fhueske/flink
|
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/batch/sql/agg/WindowAggregateTest.scala
|
Scala
|
apache-2.0
| 10,877 |
package com.twitter.io
import java.nio.CharBuffer
import java.nio.charset.Charset
/**
* Buf represents a fixed, immutable byte buffer. Buffers may be
* sliced and concatenated, and thus be used to implement
* bytestreams.
*
* Note: There is a Java-friendly API for this trait: [[com.twitter.io.AbstractBuf]].
*/
trait Buf { outer =>
/**
* Write the entire contents of the buffer into the given array at
* the given offset. Partial writes aren't supported directly
* through this API; they easily performed by first slicing the
* buffer.
* @throws IllegalArgumentException when `output` is too small to
* contain all the data.
*/
@throws(classOf[IllegalArgumentException])
def write(output: Array[Byte], off: Int): Unit
/**
* The number of bytes in the buffer
*/
def length: Int
/**
* Returns a new buffer representing a slice of this buffer, delimited
* by the indices `from` inclusive and `until` exclusive: `[from, until)`.
* Out of bounds indices are truncated. Negative indices are not accepted.
*/
def slice(from: Int, until: Int): Buf
/**
* Concatenate this buffer with the given buffer.
*/
def concat(right: Buf): Buf =
if (right.isEmpty) outer else ConcatBuf(Vector(outer)).concat(right)
override def hashCode = Buf.hash(this)
override def equals(other: Any): Boolean = other match {
case other: Buf => Buf.equals(this, other)
case _ => false
}
def isEmpty = length == 0
/** Helper to support 0-copy coercion to Buf.ByteArray. */
protected def unsafeByteArrayBuf: Option[Buf.ByteArray]
/** May require copying. */
protected def unsafeByteArray: Array[Byte] = unsafeByteArrayBuf match {
case Some(Buf.ByteArray.Owned(bytes, 0, end)) if end == bytes.length =>
bytes
case _ =>
copiedByteArray
}
/** Definitely requires copying. */
protected def copiedByteArray: Array[Byte] = {
val bytes = new Array[Byte](length)
write(bytes, 0)
bytes
}
}
private[io] case class ConcatBuf(chain: Vector[Buf]) extends Buf {
require(chain.length > 0)
override def concat(right: Buf): Buf = right match {
case buf if buf.isEmpty => this
case ConcatBuf(rightChain) => ConcatBuf(chain ++ rightChain)
case buf => ConcatBuf(chain :+ right)
}
// Incrementally determine equality over each segment of the ConcatBuf.
// TODO detect if the other Buf is a ConcatBuf and special-case.
override def equals(other: Any): Boolean = other match {
case other: Buf if isEmpty && other.isEmpty => true
case other: Buf if other.length == length =>
var i = 0
var offset = 0
while (i < chain.length) {
val buf = chain(i)
val sz = buf.length
if (!buf.equals(other.slice(offset, offset + sz))) return false
offset += sz
i += 1
}
true
case _ => false
}
def length: Int = {
var i = 0
var sum = 0
while (i < chain.length) {
sum += chain(i).length
i += 1
}
sum
}
def write(output: Array[Byte], off: Int) = {
require(length <= output.length - off)
var offset = off
chain foreach { buf =>
buf.write(output, offset)
offset += buf.length
}
}
/**
* @note we are foregoing clarity for performance
* slice only entails 3 necessary allocations
*/
def slice(from: Int, until: Int): Buf = {
if (from == until) return Buf.Empty
require(0 <= from && from < until)
var begin = from
var end = until
var start, startBegin, startEnd, finish, finishBegin, finishEnd = -1
var cur = 0
while (cur < chain.length && finish == -1) {
val buf = chain(cur)
val len = buf.length
if (begin >= 0 && begin < len) {
start = cur
startBegin = begin
startEnd = end
}
if (end <= len) {
finish = cur
finishBegin = math.max(0, begin)
finishEnd = end
}
begin -= len
end -= len
cur += 1
}
if (start == -1) Buf.Empty
else if (start == finish || (start == (cur - 1) && finish == -1)) {
chain(start).slice(startBegin, startEnd)
} else if (finish == -1) {
val untrimmedFirst = chain(start)
val first: Buf =
if (startBegin == 0 && startEnd >= untrimmedFirst.length) null
else untrimmedFirst.slice(startBegin, startEnd)
ConcatBuf(
if (first == null) chain.slice(start, length)
else first +: chain.slice(start + 1, length))
} else {
val untrimmedFirst = chain(start)
val first: Buf =
if (startBegin == 0 && startEnd >= untrimmedFirst.length) null
else untrimmedFirst.slice(startBegin, startEnd)
val untrimmedLast = chain(finish)
val last: Buf =
if (finishBegin == 0 && finishEnd >= untrimmedLast.length) null
else untrimmedLast.slice(finishBegin, finishEnd)
ConcatBuf(
if (first == null && last == null) chain.slice(start, finish + 1)
else if (first == null) chain.slice(start, finish) :+ last
else if (last == null) first +: chain.slice(start + 1, finish + 1)
else first +: chain.slice(start + 1, finish) :+ last)
}
}
protected def unsafeByteArrayBuf: Option[Buf.ByteArray] = None
}
/**
* Abstract `Buf` class for Java compatibility.
*/
abstract class AbstractBuf extends Buf
/**
* Buf wrapper-types (like Buf.ByteArray and Buf.ByteBuffer) provide Shared and
* Owned APIs, each of which with construction & extraction utilities.
*
* The Owned APIs may provide direct access to a Buf's underlying
* implementation; and so mutating the data structure invalidates a Buf's
* immutability constraint. Users must take care to handle this data
* immutably.
*
* The Shared variants, on the other hand, ensure that the Buf shares no state
* with the caller (at the cost of additional allocation).
*
* Note: There is a Java-friendly API for this object: [[com.twitter.io.Bufs]].
*/
object Buf {
private class NoopBuf extends Buf {
def write(buf: Array[Byte], off: Int) = ()
override val isEmpty = true
def length = 0
def slice(from: Int, until: Int): Buf = {
require(from >= 0 && until >= 0, "Index out of bounds")
this
}
override def concat(right: Buf) = right
protected def unsafeByteArrayBuf: Option[Buf.ByteArray] = None
}
/**
* An empty buffer.
*/
val Empty: Buf = new NoopBuf
/**
* A buffer representing an array of bytes.
*/
class ByteArray(
private[Buf] val bytes: Array[Byte],
private[Buf] val begin: Int,
private[Buf] val end: Int
) extends Buf {
def write(buf: Array[Byte], off: Int): Unit =
System.arraycopy(bytes, begin, buf, off, length)
def slice(from: Int, until: Int): Buf = {
require(from >=0 && until >= 0, "Index out of bounds")
if (until <= from || from >= length) Buf.Empty
else if (from == 0 && until >= length) this
else {
val cap = math.min(until, length)
ByteArray.Owned(bytes, begin+from, math.min(begin+cap, end))
}
}
def length = end-begin
override def toString = s"ByteArray($length)"
private[this] def equalsBytes(other: Array[Byte], offset: Int): Boolean = {
var i = 0
while (i < length) {
if (bytes(begin+i) != other(offset+i)) {
return false
}
i += 1
}
true
}
override def equals(other: Any): Boolean = other match {
case other: Buf.ByteArray if other.length == length =>
equalsBytes(other.bytes, other.begin)
case other: Buf if other.length == length =>
other.unsafeByteArrayBuf match {
case Some(other) =>
equalsBytes(other.bytes, other.begin)
case None =>
equalsBytes(other.copiedByteArray, 0)
}
case _ => false
}
protected def unsafeByteArrayBuf: Option[Buf.ByteArray] = Some(this)
}
object ByteArray {
/**
* Construct a buffer representing the given bytes.
*/
def apply(bytes: Byte*): Buf = Owned(bytes.toArray)
/**
* Construct a buffer representing the provided array of bytes without copying.
*/
@deprecated("Use Buf.ByteArray.Shared or Buf.ByteArray.Owned.", "6.23.0")
def apply(bytes: Array[Byte]): Buf = Owned(bytes)
/**
* Construct a buffer representing the provided array of bytes
* at the given offsets without copying.
*/
@deprecated("Use Buf.ByteArray.Shared or Buf.ByteArray.Owned.", "6.23.0")
def apply(bytes: Array[Byte], begin: Int, end: Int): Buf = Owned(bytes, begin, end)
/** Extract a ByteArray's underlying data and offsets. */
@deprecated("Use Buf.ByteArray.Shared or Buf.ByteArray.Owned.", "6.23.0")
def unapply(ba: ByteArray): Option[(Array[Byte], Int, Int)] = ByteArray.Owned.unapply(ba)
/**
* Safely coerce a buffer to a Buf.ByteArray, potentially without copying its underlying
* data.
*/
def coerce(buf: Buf): Buf.ByteArray = buf match {
case buf: Buf.ByteArray => buf
case buf => buf.unsafeByteArrayBuf match {
case Some(buf) => buf
case None =>
val bytes = buf.copiedByteArray
new ByteArray(bytes, 0, bytes.length)
}
}
/** Owned non-copying constructors/extractors for Buf.ByteArray. */
object Owned {
/**
* Construct a buffer representing the provided array of bytes
* at the given offsets.
*/
def apply(bytes: Array[Byte], begin: Int, end: Int): Buf =
if (begin == end) Buf.Empty
else new ByteArray(bytes, begin, end)
/** Construct a buffer representing the provided array of bytes. */
def apply(bytes: Array[Byte]): Buf = apply(bytes, 0, bytes.length)
/** Extract the buffer's underlying offsets and array of bytes. */
def unapply(buf: ByteArray): Option[(Array[Byte], Int, Int)] =
Some(buf.bytes, buf.begin, buf.end)
/**
* Get a reference to a Buf's data as an array of bytes.
*
* A copy may be performed if necessary.
*/
def extract(buf: Buf): Array[Byte] = Buf.ByteArray.coerce(buf) match {
case Buf.ByteArray.Owned(bytes, 0, end) if end == bytes.length =>
bytes
case Buf.ByteArray.Shared(bytes) =>
// If the unsafe version included offsets, we need to create a new array
// containing only the relevant bytes.
bytes
}
}
/** Safe copying constructors / extractors for Buf.ByteArray. */
object Shared {
/** Construct a buffer representing a copy of an array of bytes at the given offsets. */
def apply(bytes: Array[Byte], begin: Int, end: Int): Buf =
if (begin == end) Buf.Empty
else {
val copy = java.util.Arrays.copyOfRange(bytes, begin, end-begin)
new ByteArray(copy, 0, end-begin)
}
/** Construct a buffer representing a copy of the entire byte array. */
def apply(bytes: Array[Byte]): Buf = apply(bytes, 0, bytes.length)
/** Extract a copy of the buffer's underlying array of bytes. */
def unapply(ba: ByteArray): Option[Array[Byte]] = Some(ba.copiedByteArray)
/** Get a copy of a a Buf's data as an array of bytes. */
def extract(buf: Buf): Array[Byte] = Buf.ByteArray.coerce(buf).copiedByteArray
}
}
/**
* A buffer representing the remaining bytes in the
* given ByteBuffer. The given buffer will not be
* affected.
*
* Modifications to the ByteBuffer's content will be
* visible to the resulting Buf. The ByteBuffer should
* be immutable in practice.
*/
class ByteBuffer(private[Buf] val underlying: java.nio.ByteBuffer) extends Buf {
def length = underlying.remaining
override def toString = s"ByteBuffer($length)"
def write(output: Array[Byte], off: Int): Unit = {
require(length <= output.length - off)
underlying.duplicate.get(output, off, length)
}
def slice(from: Int, until: Int): Buf = {
require(from >= 0 && until >= 0, "Index out of bounds")
if (until <= from || from >= length) Buf.Empty
else if (from == 0 && until >= length) this
else {
val dup = underlying.duplicate()
val limit = dup.position + math.min(until, length)
if (dup.limit > limit) dup.limit(limit)
dup.position(dup.position + from)
new ByteBuffer(dup)
}
}
override def equals(other: Any): Boolean = other match {
case ByteBuffer(otherBB) =>
underlying.equals(otherBB)
case buf: Buf => Buf.equals(this, buf)
case _ => false
}
protected def unsafeByteArrayBuf: Option[Buf.ByteArray] =
if (underlying.hasArray) {
val array = underlying.array
val begin = underlying.arrayOffset + underlying.position
val end = begin + underlying.remaining
Some(new ByteArray(array, begin, end))
} else None
}
object ByteBuffer {
/**
* Construct a buffer representing the provided [[java.nio.ByteBuffer]].
*
* The ByteBuffer is duplicated but the underlying data is not copied.
*/
@deprecated("Use Buf.ByteBuffer.Shared or Buf.ByteBuffer.Owned.", "6.23.0")
def apply(bb: java.nio.ByteBuffer): Buf = Owned(bb.duplicate)
/** Extract a read-only view of the underlying [[java.nio.ByteBuffer]]. */
def unapply(buf: ByteBuffer): Option[java.nio.ByteBuffer] =
Some(buf.underlying.asReadOnlyBuffer)
/** Coerce a generic buffer to a Buf.ByteBuffer, potentially without copying data. */
def coerce(buf: Buf): ByteBuffer = buf match {
case buf: ByteBuffer => buf
case _ =>
val bb = buf.unsafeByteArrayBuf match {
case Some(ByteArray.Owned(bytes, begin, end)) =>
java.nio.ByteBuffer.wrap(bytes, begin, end-begin)
case None =>
java.nio.ByteBuffer.wrap(buf.copiedByteArray)
}
new ByteBuffer(bb)
}
/** Owned non-copying constructors/extractors for Buf.ByteBuffer. */
object Owned {
// N.B. We cannot use ByteBuffer.asReadOnly to ensure correctness because
// it prevents direct access to its underlying byte array.
/**
* Create a Buf.ByteBuffer by directly wrapping the provided [[java.nio.ByteBuffer]].
*/
def apply(bb: java.nio.ByteBuffer): Buf =
if (bb.remaining == 0) Buf.Empty
else new ByteBuffer(bb)
/** Extract the buffer's underlying [[java.nio.ByteBuffer]]. */
def unapply(buf: ByteBuffer): Option[java.nio.ByteBuffer] = Some(buf.underlying)
/**
* Get a reference to a Buf's data as a ByteBuffer.
*
* A copy may be performed if necessary.
*/
def extract(buf: Buf): java.nio.ByteBuffer = Buf.ByteBuffer.coerce(buf).underlying
}
/** Safe copying constructors/extractors for Buf.ByteBuffer. */
object Shared {
private[this] def copy(orig: java.nio.ByteBuffer): java.nio.ByteBuffer = {
val copy = java.nio.ByteBuffer.allocate(orig.remaining)
copy.put(orig.duplicate)
copy.flip()
copy
}
def apply(bb: java.nio.ByteBuffer): Buf = Owned(copy(bb))
def unapply(buf: ByteBuffer): Option[java.nio.ByteBuffer] = Owned.unapply(buf).map(copy)
def extract(buf: Buf): java.nio.ByteBuffer = copy(Owned.extract(buf))
}
}
/** Convert the Buf to a [[java.nio.ByteBuffer]]. */
@deprecated("Use Buf.ByteBuffer.Owned.extract.", "6.23.0")
def toByteBuffer(buf: Buf): java.nio.ByteBuffer = Buf.ByteBuffer.Owned.extract(buf)
/**
* Byte equality between two buffers. May copy.
*
* Relies on Buf.ByteArray.equals.
*/
def equals(x: Buf, y: Buf): Boolean = {
if (x.length != y.length) return false
Buf.ByteArray.coerce(x).equals(Buf.ByteArray.coerce(y))
}
/** The 32-bit FNV-1 of Buf */
def hash(buf: Buf): Int = finishHash(hashBuf(buf))
// Adapted from util-hashing.
private[this] val UintMax: Long = 0xFFFFFFFFL
private[this] val Fnv1a32Prime: Int = 16777619
private[this] val Fnv1a32Init: Long = 0x811c9dc5L
private[this] def finishHash(hash: Long): Int = (hash & UintMax).toInt
private[this] def hashBuf(buf: Buf, init: Long = Fnv1a32Init): Long = buf match {
case buf if buf.isEmpty => init
case buf: ConcatBuf =>
var i = 0
var h = init
while (i < buf.chain.length) {
h = hashBuf(buf.chain(i), h)
i += 1
}
h
case buf =>
val ba = Buf.ByteArray.coerce(buf)
var i = ba.begin
var h = init
while (i < ba.end) {
h = (h ^ (ba.bytes(i) & 0xff)) * Fnv1a32Prime
i += 1
}
h
}
/**
* Return a string representing the buffer
* contents in hexadecimal.
*/
def slowHexString(buf: Buf): String = {
val ba = Buf.ByteArray.coerce(buf)
val digits = new StringBuilder(2 * ba.length)
var i = ba.begin
while (i < ba.end) {
digits ++= f"${ba.bytes(i)}%02x"
i += 1
}
digits.toString
}
/**
* Create and deconstruct Utf-8 encoded buffers.
* @note Malformed and unmappable input is silently replaced
* see [[java.nio.charset.CodingErrorAction.REPLACE]]
*/
object Utf8 extends StringCoder(Charsets.Utf8)
/**
* Create and deconstruct 16-bit UTF buffers.
* @note Malformed and unmappable input is silently replaced
* see [[java.nio.charset.CodingErrorAction.REPLACE]]
*/
object Utf16 extends StringCoder(Charsets.Utf16)
/**
* Create and deconstruct buffers encoded by the 16-bit UTF charset
* with big-endian byte order.
* @note Malformed and unmappable input is silently replaced
* see [[java.nio.charset.CodingErrorAction.REPLACE]]
*/
object Utf16BE extends StringCoder(Charsets.Utf16BE)
/**
* Create and deconstruct buffers encoded by the 16-bit UTF charset
* with little-endian byte order.
* @note Malformed and unmappable input is silently replaced
* see [[java.nio.charset.CodingErrorAction.REPLACE]]
*/
object Utf16LE extends StringCoder(Charsets.Utf16LE)
/**
* Create and deconstruct buffers encoded by the
* ISO Latin Alphabet No. 1 charset.
* @note Malformed and unmappable input is silently replaced
* see [[java.nio.charset.CodingErrorAction.REPLACE]]
*/
object Iso8859_1 extends StringCoder(Charsets.Iso8859_1)
/**
* Create and deconstruct buffers encoded by the 7-bit ASCII,
* also known as ISO646-US or the Basic Latin block of the
* Unicode character set.
* @note Malformed and unmappable input is silently replaced
* see [[java.nio.charset.CodingErrorAction.REPLACE]]
*/
object UsAscii extends StringCoder(Charsets.UsAscii)
/**
* a StringCoder for a given [[java.nio.charset.Charset]] provides an
* encoder: String -> Buf and an extractor: Buf -> Option[String].
*
* @note Malformed and unmappable input is silently replaced
* see [[java.nio.charset.CodingErrorAction.REPLACE]]
*/
private[io] abstract class StringCoder(charset: Charset) {
/**
* Encode the String to its Buf representation per the charset
*/
def apply(s: String): Buf = {
val enc = Charsets.encoder(charset)
val cb = CharBuffer.wrap(s.toCharArray)
Buf.ByteBuffer.Owned(enc.encode(cb))
}
/**
* @return Some(String representation of the Buf)
* @note This extractor does *not* return None to indicate a failed
* or impossible decoding. Malformed or unmappable bytes will
* instead be silently replaced by the replacement character
* ("\\uFFFD") in the returned String. This behavior may change
* in the future.
*/
def unapply(buf: Buf): Option[String] = {
val dec = Charsets.decoder(charset)
val bb = Buf.ByteBuffer.Owned.extract(buf).asReadOnlyBuffer
Some(dec.decode(bb).toString)
}
}
/**
* Create and deconstruct unsigned 32-bit
* big endian encoded buffers.
*
* Deconstructing will return the value
* as well as the remaining buffer.
*/
object U32BE {
def apply(i: Int): Buf = {
val arr = new Array[Byte](4)
arr(0) = ((i >> 24) & 0xff).toByte
arr(1) = ((i >> 16) & 0xff).toByte
arr(2) = ((i >> 8) & 0xff).toByte
arr(3) = ((i ) & 0xff).toByte
ByteArray.Owned(arr)
}
def unapply(buf: Buf): Option[(Int, Buf)] =
if (buf.length < 4) None else {
val arr = new Array[Byte](4)
buf.slice(0, 4).write(arr, 0)
val rem = buf.slice(4, buf.length)
val value =
((arr(0) & 0xff) << 24) |
((arr(1) & 0xff) << 16) |
((arr(2) & 0xff) << 8) |
((arr(3) & 0xff) )
Some(value, rem)
}
}
/**
* Create and deconstruct unsigned 64-bit
* big endian encoded buffers.
*
* Deconstructing will return the value
* as well as the remaining buffer.
*/
object U64BE {
def apply(l: Long): Buf = {
val arr = new Array[Byte](8)
arr(0) = ((l >> 56) & 0xff).toByte
arr(1) = ((l >> 48) & 0xff).toByte
arr(2) = ((l >> 40) & 0xff).toByte
arr(3) = ((l >> 32) & 0xff).toByte
arr(4) = ((l >> 24) & 0xff).toByte
arr(5) = ((l >> 16) & 0xff).toByte
arr(6) = ((l >> 8) & 0xff).toByte
arr(7) = ((l ) & 0xff).toByte
ByteArray.Owned(arr)
}
def unapply(buf: Buf): Option[(Long, Buf)] =
if (buf.length < 8) None else {
val arr = new Array[Byte](8)
buf.slice(0, 8).write(arr, 0)
val rem = buf.slice(8, buf.length)
val value =
((arr(0) & 0xff).toLong << 56) |
((arr(1) & 0xff).toLong << 48) |
((arr(2) & 0xff).toLong << 40) |
((arr(3) & 0xff).toLong << 32) |
((arr(4) & 0xff).toLong << 24) |
((arr(5) & 0xff).toLong << 16) |
((arr(6) & 0xff).toLong << 8) |
((arr(7) & 0xff).toLong )
Some(value, rem)
}
}
/**
* Create and deconstruct unsigned 32-bit
* little endian encoded buffers.
*
* Deconstructing will return the value
* as well as the remaining buffer.
*/
object U32LE {
def apply(i: Int): Buf = {
val arr = new Array[Byte](4)
arr(0) = ((i ) & 0xff).toByte
arr(1) = ((i >> 8) & 0xff).toByte
arr(2) = ((i >> 16) & 0xff).toByte
arr(3) = ((i >> 24) & 0xff).toByte
ByteArray.Owned(arr)
}
def unapply(buf: Buf): Option[(Int, Buf)] =
if (buf.length < 4) None else {
val arr = new Array[Byte](4)
buf.slice(0, 4).write(arr, 0)
val rem = buf.slice(4, buf.length)
val value =
((arr(0) & 0xff) ) |
((arr(1) & 0xff) << 8) |
((arr(2) & 0xff) << 16) |
((arr(3) & 0xff) << 24)
Some(value, rem)
}
}
/**
* Create and deconstruct unsigned 64-bit
* little endian encoded buffers.
*
* Deconstructing will return the value
* as well as the remaining buffer.
*/
object U64LE {
def apply(l: Long): Buf = {
val arr = new Array[Byte](8)
arr(0) = ((l ) & 0xff).toByte
arr(1) = ((l >> 8) & 0xff).toByte
arr(2) = ((l >> 16) & 0xff).toByte
arr(3) = ((l >> 24) & 0xff).toByte
arr(4) = ((l >> 32) & 0xff).toByte
arr(5) = ((l >> 40) & 0xff).toByte
arr(6) = ((l >> 48) & 0xff).toByte
arr(7) = ((l >> 56) & 0xff).toByte
ByteArray.Owned(arr)
}
def unapply(buf: Buf): Option[(Long, Buf)] =
if (buf.length < 8) None else {
val arr = new Array[Byte](8)
buf.slice(0, 8).write(arr, 0)
val rem = buf.slice(8, buf.length)
val value =
((arr(0) & 0xff).toLong ) |
((arr(1) & 0xff).toLong << 8) |
((arr(2) & 0xff).toLong << 16) |
((arr(3) & 0xff).toLong << 24) |
((arr(4) & 0xff).toLong << 32) |
((arr(5) & 0xff).toLong << 40) |
((arr(6) & 0xff).toLong << 48) |
((arr(7) & 0xff).toLong << 56)
Some(value, rem)
}
}
}
|
stremlenye/util
|
util-core/src/main/scala/com/twitter/io/Buf.scala
|
Scala
|
apache-2.0
| 24,117 |
package com.arcusys.valamis.web.servlet.scorm
import java.net.URLDecoder
import com.arcusys.valamis.lesson.model.LessonType
import com.arcusys.valamis.lesson.scorm.model.manifest.{LeafActivity, ResourceUrl}
import com.arcusys.valamis.lesson.scorm.model.sequencing.{ProcessorResponseDelivery, ProcessorResponseEndSession}
import com.arcusys.valamis.lesson.scorm.service.sequencing.SequencingProcessor
import com.arcusys.valamis.lesson.scorm.service.{ActivityServiceContract, ScormPackageService}
import com.arcusys.valamis.lesson.service.{LessonLimitService, LessonPlayerService, LessonService}
import com.arcusys.valamis.lesson.tincan.service.TincanPackageService
import com.arcusys.valamis.slide.model.SlideSetStatus
import com.arcusys.valamis.slide.storage.SlideSetRepository
import com.arcusys.valamis.util.serialization.JsonHelper
import com.arcusys.valamis.web.servlet.base.{BaseApiController, PermissionUtil}
import com.arcusys.valamis.web.servlet.request.Parameter
import org.apache.http.HttpStatus
import org.scalatra.{ScalatraBase, SinatraRouteMatcher}
class SequencingServlet extends BaseApiController {
lazy val activityManager = inject[ActivityServiceContract]
lazy val tincanLessonService = inject[TincanPackageService]
lazy val scormLessonService = inject[ScormPackageService]
lazy val lessonService = inject[LessonService]
lazy val lessonLimitService = inject[LessonLimitService]
lazy val slideSetRepository = inject[SlideSetRepository]
lazy val lessonPlayerService = inject[LessonPlayerService]
implicit val scalatra: ScalatraBase = this
implicit override def string2RouteMatcher(path: String) = new SinatraRouteMatcher(path)
// get possible navigation types, check which navigation controls should be hidden
get("/sequencing/NavigationRules/:packageID/:currentScormActivityID") {
val packageID = Parameter("packageID").intRequired
val activityID = Parameter("currentScormActivityID").required
val activity = activityManager.getActivity(packageID, activityID)
JsonHelper.toJson("hiddenUI" -> activity.hiddenNavigationControls.map(_.toString))
}
post("/sequencing/Tincan/:lessonId") {
val lessonId = Parameter("lessonId").intRequired
val lesson = lessonPlayerService.getLessonIfAvailable(lessonId, PermissionUtil.getLiferayUser)
.getOrElse {
halt(HttpStatus.SC_FORBIDDEN, reason = "Lesson is not available", body = "unavailablePackageException")
}
assert(lesson.lessonType == LessonType.Tincan)
val mainFileName = tincanLessonService.getTincanLaunch(lessonId)
val activityId = lessonService.getRootActivityId(lesson)
val slideSetList = slideSetRepository.getByActivityId(activityId)
.filter(_.status == SlideSetStatus.Published)
val versionNumber =
if (slideSetList.isEmpty) 0
else slideSetList.map(_.version).max
JsonHelper.toJson(Map("launchURL" -> mainFileName, "versionNumber" -> versionNumber, "activityId" -> activityId))
}
get("/sequencing/NavigationRequest/:currentScormPackageID/:currentOrganizationID/:sequencingRequest") {
val userID = getUserId.toInt
val packageID = Parameter("currentScormPackageID").intRequired
val organizationID = Parameter("currentOrganizationID").required
val lesson = lessonPlayerService.getLessonIfAvailable(packageID, PermissionUtil.getLiferayUser)
if (lesson.isEmpty) {
"The lesson you are trying to open seems to be unavailable."
} else {
assert(lesson.map(_.lessonType).contains(LessonType.Scorm))
val currentAttempt = activityManager.getActiveAttempt(userID, packageID, organizationID)
val tree = activityManager.getActivityStateTreeForAttemptOrCreate(currentAttempt)
val processor = new SequencingProcessor(currentAttempt, tree)
val sequencingRequest = URLDecoder.decode(Parameter("sequencingRequest").required, "UTF-8")
val jsonData = JsonHelper.toJson(processor.process(sequencingRequest) match {
case ProcessorResponseDelivery(tree) => {
activityManager.updateActivityStateTree(currentAttempt.id.toInt, tree)
val currentActivityID = tree.currentActivity.map(_.item.activity.id).getOrElse("")
Map("currentActivity" -> currentActivityID, "endSession" -> false) ++ getActivityData(packageID, currentActivityID)
}
case ProcessorResponseEndSession(tree) => {
activityManager.updateActivityStateTree(currentAttempt.id.toInt, tree)
activityManager.markAsComplete(currentAttempt.id.toInt)
val currentActivityID = tree.currentActivity.map(_.item.activity.id).getOrElse("")
Map("currentActivity" -> currentActivityID, "endSession" -> true) ++ getActivityData(packageID, currentActivityID)
}
})
contentType = "text/html"
val headScriptData = scala.xml.Unparsed(
"""
function findPlayerView(win) {
var findPlayerTries = 0;
while ( !win.lessonViewer && (win.parent != null) && (win.parent != win)) {
findPlayerTries++;
if (findPlayerTries > 20) return null;
win = win.parent;
}
return win.lessonViewer.playerLayoutView;
}
function getPlayerView() {
var thePlayer = findPlayerView(window);
if ((thePlayer == null)) {
if ((window.opener != null) && (typeof(window.opener) != "undefined"))
thePlayer = thePlayer(window.opener);
}
return thePlayer;
}
function init(){
getPlayerView().loadView(""" + jsonData + """);
}""")
<html>
<head>
<script language="javascript">
{ headScriptData }
</script>
</head>
<body onload="init()"></body>
</html>
}
}
// private methods
private def getActivityData(packageID: Int, id: String): Map[String, Any] = {
val activityOption = activityManager.getActivityOption(packageID, id)
if (activityOption.isDefined) {
val activity = activityOption.get
if (activity.isInstanceOf[LeafActivity]) {
val leafActivity = activity.asInstanceOf[LeafActivity]
val resource = activityManager.getResource(packageID, leafActivity.resourceIdentifier)
val manifest = scormLessonService.getManifest(packageID).get
val resultedURL = if (resource.href.get.startsWith("http://") || resource.href.get.startsWith("https://")) {
resource.href.get
} else {
val manifestRelativeResourceUrl = ResourceUrl(manifest.base, manifest.resourcesBase, resource.base, resource.href.get, leafActivity.resourceParameters)
servletContext.getContextPath + "/" + contextRelativeResourceURL(packageID, manifestRelativeResourceUrl)
}
Map("activityURL" -> resultedURL,
"activityTitle" -> leafActivity.title,
"activityDesc" -> leafActivity.title,
"hiddenUI" -> leafActivity.hiddenNavigationControls.map(_.toString))
} else Map()
} else Map()
}
//todo: is it deprecate
private def contextRelativeResourceURL(packageID: Int, manifestRelativeResourceUrl: String): String =
"SCORMData/data/" + packageID.toString + "/" + manifestRelativeResourceUrl
}
|
arcusys/Valamis
|
valamis-portlets/src/main/scala/com/arcusys/valamis/web/servlet/scorm/SequencingServlet.scala
|
Scala
|
gpl-3.0
| 7,249 |
import leon.lang._
object ObjectHierarchyMutation6 {
case class A(var x: Int, var y: Int)
case class B(a1: A, a2: A)
case class C(b1: B, b2: B)
def updateA(a: A): Unit = {
a.x = 43
}
def update(c: C): Int = {
updateA(c.b2.a1)
c.b2.a1.x
} ensuring(res => res == 43)
}
|
regb/leon
|
src/test/resources/regression/verification/xlang/valid/ObjectHierarchyMutation6.scala
|
Scala
|
gpl-3.0
| 298 |
package com.arcusys.valamis.settings.model
object SettingType {
val IssuerName = "IssuerName"
val IssuerURL = "IssuerURL"
val IssuerOrganization = "IssuerOrganization"
val IssuerEmail = "IssuerEmail"
val DBVersion = "DBVersion"
val GoogleClientId = "GoogleClientId"
val GoogleAppId = "GoogleAppId"
val GoogleApiKey = "GoogleApiKey"
var LtiVersion = "LtiVersion"
var LtiMessageType = "LtiMessageType"
var LtiLaunchPresentationReturnUrl = "LtiLaunchPresentationReturnUrl"
var LtiOauthVersion = "LtiOauthVersion"
var LtiOauthSignatureMethod = "LtiOauthSignatureMethod"
var BetaStudioUrl = "BetaStudioUrl"
}
|
arcusys/Valamis
|
valamis-core/src/main/scala/com/arcusys/valamis/settings/model/SettingType.scala
|
Scala
|
gpl-3.0
| 633 |
package com.twitter.finagle.memcached.integration
import _root_.java.io.ByteArrayOutputStream
import _root_.java.lang.{Boolean => JBoolean}
import com.twitter.common.application.ShutdownRegistry.ShutdownRegistryImpl
import com.twitter.common.zookeeper.ServerSet.EndpointStatus
import com.twitter.common.zookeeper.testing.ZooKeeperTestServer
import com.twitter.common.zookeeper.{ServerSets, ZooKeeperClient, ZooKeeperUtils}
import com.twitter.concurrent.Spool
import com.twitter.concurrent.Spool.*::
import com.twitter.conversions.time._
import com.twitter.finagle.builder.{ClientBuilder, Cluster}
import com.twitter.finagle.memcached.protocol.text.Memcached
import com.twitter.finagle.memcached.{CacheNode, CachePoolCluster, CachePoolConfig, Client, KetamaClientBuilder, PartitionedClient}
import com.twitter.finagle.memcached.util.ChannelBufferUtils._
import com.twitter.finagle.zookeeper.ZookeeperServerSetCluster
import com.twitter.finagle.{Name, Resolver}
import com.twitter.io.Charsets
import com.twitter.util.{Await, Duration, Future}
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.{BeforeAndAfter, FunSuite, Outcome}
import scala.collection.mutable
@RunWith(classOf[JUnitRunner])
class ClusterClientTest extends FunSuite with BeforeAndAfter {
/**
* Note: This integration test requires a real Memcached server to run.
*/
var shutdownRegistry: ShutdownRegistryImpl = null
var testServers: List[TestMemcachedServer] = List()
var zkServerSetCluster: ZookeeperServerSetCluster = null
var zookeeperClient: ZooKeeperClient = null
val zkPath = "/cache/test/silly-cache"
var zookeeperServer: ZooKeeperTestServer = null
var dest: Name = null
before {
// start zookeeper server and create zookeeper client
shutdownRegistry = new ShutdownRegistryImpl
zookeeperServer = new ZooKeeperTestServer(0, shutdownRegistry)
zookeeperServer.startNetwork()
// connect to zookeeper server
zookeeperClient = zookeeperServer.createClient(ZooKeeperClient.digestCredentials("user","pass"))
// create serverset
val serverSet = ServerSets.create(zookeeperClient, ZooKeeperUtils.EVERYONE_READ_CREATOR_ALL, zkPath)
zkServerSetCluster = new ZookeeperServerSetCluster(serverSet)
// start five memcached server and join the cluster
(0 to 4) foreach { _ =>
TestMemcachedServer.start() match {
case Some(server) =>
testServers :+= server
zkServerSetCluster.join(server.address)
case None =>
throw new Exception("could not start TestMemcachedServer")
}
}
if (!testServers.isEmpty) {
// set cache pool config node data
val cachePoolConfig: CachePoolConfig = new CachePoolConfig(cachePoolSize = 5)
val output: ByteArrayOutputStream = new ByteArrayOutputStream
CachePoolConfig.jsonCodec.serialize(cachePoolConfig, output)
zookeeperClient.get().setData(zkPath, output.toByteArray, -1)
// a separate client which only does zk discovery for integration test
zookeeperClient = zookeeperServer.createClient(ZooKeeperClient.digestCredentials("user","pass"))
// destination of the test cache endpoints
dest = Resolver.eval("twcache!localhost:" + zookeeperServer.getPort.toString + "!" + zkPath)
}
}
after {
// shutdown zookeeper server and client
shutdownRegistry.execute()
if (!testServers.isEmpty) {
// shutdown memcached server
testServers foreach { _.stop() }
testServers = List()
}
}
override def withFixture(test: NoArgTest): Outcome = {
if (!testServers.isEmpty) test()
else {
info("Cannot start memcached. Skipping test...")
cancel()
}
}
test("Simple ClusterClient using finagle load balancing - many keys") {
// create simple cluster client
val mycluster =
new ZookeeperServerSetCluster(
ServerSets.create(zookeeperClient, ZooKeeperUtils.EVERYONE_READ_CREATOR_ALL, zkPath))
Await.result(mycluster.ready) // give it sometime for the cluster to get the initial set of memberships
val client = Client(mycluster)
val count = 100
(0 until count).foreach{
n => {
client.set("foo"+n, "bar"+n)
}
}
val tmpClients = testServers map {
case(server) =>
Client(
ClientBuilder()
.hosts(server.address)
.codec(new Memcached)
.hostConnectionLimit(1)
.daemon(true)
.build())
}
(0 until count).foreach {
n => {
var found = false
tmpClients foreach {
c =>
if (Await.result(c.get("foo"+n))!=None) {
assert(!found)
found = true
}
}
assert(found)
}
}
}
if (!sys.props.contains("SKIP_FLAKY"))
test("Cache specific cluster - add and remove") {
// the cluster initially must have 5 members
val myPool = initializePool(5)
var additionalServers = List[EndpointStatus]()
/***** start 5 more memcached servers and join the cluster ******/
// cache pool should remain the same size at this moment
intercept[com.twitter.util.TimeoutException] {
expectPoolStatus(myPool, currentSize = 5, expectedPoolSize = -1, expectedAdd = -1, expectedRem = -1) {
additionalServers = addMoreServers(5)
}.get(2.seconds)()
}
// update config data node, which triggers the pool update
// cache pool cluster should be updated
try {
expectPoolStatus(myPool, currentSize = 5, expectedPoolSize = 10, expectedAdd = 5, expectedRem = 0) {
updateCachePoolConfigData(10)
}.get(10.seconds)()
}
catch { case _: Exception => fail("it shouldn't trown an exception") }
/***** remove 2 servers from the zk serverset ******/
// cache pool should remain the same size at this moment
intercept[com.twitter.util.TimeoutException] {
expectPoolStatus(myPool, currentSize = 10, expectedPoolSize = -1, expectedAdd = -1, expectedRem = -1) {
additionalServers(0).leave()
additionalServers(1).leave()
}.get(2.seconds)()
}
// update config data node, which triggers the pool update
// cache pool should be updated
try {
expectPoolStatus(myPool, currentSize = 10, expectedPoolSize = 8, expectedAdd = 0, expectedRem = 2) {
updateCachePoolConfigData(8)
}.get(10.seconds)()
}
catch { case _: Exception => fail("it shouldn't trown an exception") }
/***** remove 2 more then add 3 ******/
// cache pool should remain the same size at this moment
intercept[com.twitter.util.TimeoutException] {
expectPoolStatus(myPool, currentSize = 8, expectedPoolSize = -1, expectedAdd = -1, expectedRem = -1) {
additionalServers(2).leave()
additionalServers(3).leave()
addMoreServers(3)
}.get(2.seconds)()
}
// update config data node, which triggers the pool update
// cache pool should be updated
try {
expectPoolStatus(myPool, currentSize = 8, expectedPoolSize = 9, expectedAdd = 3, expectedRem = 2) {
updateCachePoolConfigData(9)
}.get(10.seconds)()
}
catch { case _: Exception => fail("it shouldn't trown an exception") }
}
if (!Option(System.getProperty("SKIP_FLAKY")).isDefined)
test("zk failures test") {
// the cluster initially must have 5 members
val myPool = initializePool(5)
/***** fail the server here to verify the pool manager will re-establish ******/
// cache pool cluster should remain the same
intercept[com.twitter.util.TimeoutException] {
expectPoolStatus(myPool, currentSize = 5, expectedPoolSize = -1, expectedAdd = -1, expectedRem = -1) {
zookeeperServer.expireClientSession(zookeeperClient)
zookeeperServer.shutdownNetwork()
}.get(2.seconds)()
}
/***** start the server now ******/
// cache pool cluster should remain the same
intercept[com.twitter.util.TimeoutException] {
expectPoolStatus(myPool, currentSize = 5, expectedPoolSize = -1, expectedAdd = -1, expectedRem = -1) {
zookeeperServer.startNetwork
Thread.sleep(2000)
}.get(2.seconds)()
}
/***** start 5 more memcached servers and join the cluster ******/
// update config data node, which triggers the pool update
// cache pool cluster should still be able to see undelrying pool changes
try {
expectPoolStatus(myPool, currentSize = 5, expectedPoolSize = 10, expectedAdd = 5, expectedRem = 0) {
addMoreServers(5)
updateCachePoolConfigData(10)
}.get(10.seconds)()
}
catch { case _: Exception => fail("it shouldn't trown an exception") }
}
if (!Option(System.getProperty("SKIP_FLAKY")).isDefined)
test("using backup pools") {
// shutdown the server before initializing our cache pool cluster
zookeeperServer.shutdownNetwork()
// the cache pool cluster should pickup backup pools
// the underlying pool will continue trying to connect to zk
val myPool = initializePool(2, Some(scala.collection.immutable.Set(
new CacheNode("host1", 11211, 1),
new CacheNode("host2", 11212, 1))))
// bring the server back online
// give it some time we should see the cache pool cluster pick up underlying pool
try {
expectPoolStatus(myPool, currentSize = 2, expectedPoolSize = 5, expectedAdd = 5, expectedRem = 2) {
zookeeperServer.startNetwork
}.get(10.seconds)()
}
catch { case _: Exception => fail("it shouldn't trown an exception") }
/***** start 5 more memcached servers and join the cluster ******/
// update config data node, which triggers the pool update
// cache pool cluster should still be able to see undelrying pool changes
try {
expectPoolStatus(myPool, currentSize = 5, expectedPoolSize = 10, expectedAdd = 5, expectedRem = 0) {
addMoreServers(5)
updateCachePoolConfigData(10)
}.get(10.seconds)()
}
catch { case _: Exception => fail("it shouldn't trown an exception") }
}
test("Ketama ClusterClient using a distributor - set & get") {
val client = KetamaClientBuilder()
.clientBuilder(ClientBuilder().hostConnectionLimit(1).codec(Memcached()).failFast(false))
.failureAccrualParams(Int.MaxValue, Duration.Top)
.dest(dest)
.build()
client.delete("foo")()
assert(client.get("foo")() === None)
client.set("foo", "bar")()
assert(client.get("foo")().get.toString(Charsets.Utf8) === "bar")
}
test("Ketama ClusterClient using a distributor - many keys") {
val client = KetamaClientBuilder()
.clientBuilder(ClientBuilder().hostConnectionLimit(1).codec(Memcached()).failFast(false))
.failureAccrualParams(Int.MaxValue, Duration.Top)
.dest(dest)
.build()
.asInstanceOf[PartitionedClient]
val count = 100
(0 until count).foreach{
n => {
client.set("foo"+n, "bar"+n)()
}
}
(0 until count).foreach {
n => {
val c = client.clientOf("foo"+n)
assert(c.get("foo"+n)().get.toString(Charsets.Utf8) === "bar"+n)
}
}
}
test("Ketama ClusterClient using a distributor - use custom keys") {
// create my cluster client solely based on a zk client and a path
val mycluster = CachePoolCluster.newZkCluster(zkPath, zookeeperClient)
mycluster.ready() // give it sometime for the cluster to get the initial set of memberships
val customKey = "key-"
var shardId = -1
val myClusterWithCustomKey = mycluster map {
case node: CacheNode => {
shardId += 1
CacheNode(node.host, node.port, node.weight, Some(customKey + shardId.toString))
}
}
val client = KetamaClientBuilder()
.clientBuilder(ClientBuilder().hostConnectionLimit(1).codec(Memcached()).failFast(false))
.failureAccrualParams(Int.MaxValue, Duration.Top)
.cachePoolCluster(myClusterWithCustomKey)
.build()
assert(trackCacheShards(client.asInstanceOf[PartitionedClient]).size === 5)
}
if (!Option(System.getProperty("SKIP_FLAKY")).isDefined)
test("Ketama ClusterClient using a distributor - cache pool is changing") {
// create my cluster client solely based on a zk client and a path
val mycluster = initializePool(5)
val client = KetamaClientBuilder()
.clientBuilder(ClientBuilder().hostConnectionLimit(1).codec(Memcached()).failFast(false))
.failureAccrualParams(Int.MaxValue, Duration.Top)
.cachePoolCluster(mycluster)
.build()
.asInstanceOf[PartitionedClient]
// initially there should be 5 cache shards being used
assert(trackCacheShards(client).size === 5)
// add 4 more cache servers and update cache pool config data, now there should be 7 shards
var additionalServers = List[EndpointStatus]()
try {
expectPoolStatus(mycluster, currentSize = 5, expectedPoolSize = 9, expectedAdd = 4, expectedRem = 0) {
additionalServers = addMoreServers(4)
updateCachePoolConfigData(9)
}.get(10.seconds)()
}
catch { case _: Exception => fail("it shouldn't trown an exception") }
// Unlike CachePoolCluster, our KetamaClient doesn't have api to expose its internal state and
// it shouldn't, hence here I don't really have a better way to wait for the client's key ring
// redistribution to finish other than sleep for a while.
Thread.sleep(1000)
assert(trackCacheShards(client).size === 9)
// remove 2 cache servers and update cache pool config data, now there should be 7 shards
try {
expectPoolStatus(mycluster, currentSize = 9, expectedPoolSize = 7, expectedAdd = 0, expectedRem = 2) {
additionalServers(0).leave()
additionalServers(1).leave()
updateCachePoolConfigData(7)
}.get(10.seconds)()
}
catch { case _: Exception => fail("it shouldn't trown an exception") }
Thread.sleep(1000)
assert(trackCacheShards(client).size === 7)
// remove another 2 cache servers and update cache pool config data, now there should be 5 shards
try {
expectPoolStatus(mycluster, currentSize = 7, expectedPoolSize = 5, expectedAdd = 0, expectedRem = 2) {
additionalServers(2).leave()
additionalServers(3).leave()
updateCachePoolConfigData(5)
}.get(10.seconds)()
}
catch { case _: Exception => fail("it shouldn't trown an exception") }
Thread.sleep(1000)
assert(trackCacheShards(client).size === 5)
// add 2 more cache servers and update cache pool config data, now there should be 7 shards
try {
expectPoolStatus(mycluster, currentSize = 5, expectedPoolSize = 7, expectedAdd = 2, expectedRem = 0) {
additionalServers = addMoreServers(2)
updateCachePoolConfigData(7)
}.get(10.seconds)()
}
catch { case _: Exception => fail("it shouldn't trown an exception") }
Thread.sleep(1000)
assert(trackCacheShards(client).size === 7)
// add another 2 more cache servers and update cache pool config data, now there should be 9 shards
try {
expectPoolStatus(mycluster, currentSize = 7, expectedPoolSize = 9, expectedAdd = 2, expectedRem = 0) {
additionalServers = addMoreServers(2)
updateCachePoolConfigData(9)
}.get(10.seconds)()
}
catch { case _: Exception => fail("it shouldn't trown an exception") }
Thread.sleep(1000)
assert(trackCacheShards(client).size === 9)
// remove 2 and add 2, now there should be still 9 shards
try {
expectPoolStatus(mycluster, currentSize = 9, expectedPoolSize = 9, expectedAdd = 2, expectedRem = 2) {
additionalServers(0).leave()
additionalServers(1).leave()
addMoreServers(2)
updateCachePoolConfigData(9)
}.get(10.seconds)()
}
catch { case _: Exception => fail("it shouldn't trown an exception") }
Thread.sleep(1000)
assert(trackCacheShards(client).size === 9)
}
if (!Option(System.getProperty("SKIP_FLAKY")).isDefined)
test("Ketama ClusterClient using a distributor - unmanaged cache pool is changing") {
// create my cluster client solely based on a zk client and a path
val mycluster = initializePool(5, ignoreConfigData = true)
val client = KetamaClientBuilder()
.clientBuilder(ClientBuilder().hostConnectionLimit(1).codec(Memcached()).failFast(false))
.failureAccrualParams(Int.MaxValue, Duration.Top)
.cachePoolCluster(mycluster)
.build()
.asInstanceOf[PartitionedClient]
// initially there should be 5 cache shards being used
assert(trackCacheShards(client).size === 5)
// add 4 more cache servers and update cache pool config data, now there should be 7 shards
var additionalServers = List[EndpointStatus]()
try {
expectPoolStatus(mycluster, currentSize = 5, expectedPoolSize = 9, expectedAdd = 4, expectedRem = 0) {
additionalServers = addMoreServers(4)
}.get(10.seconds)()
}
catch { case _: Exception => fail("it shouldn't trown an exception") }
// Unlike CachePoolCluster, our KetamaClient doesn't have api to expose its internal state and
// it shouldn't, hence here I don't really have a better way to wait for the client's key ring
// redistribution to finish other than sleep for a while.
Thread.sleep(1000)
assert(trackCacheShards(client).size === 9)
// remove 2 cache servers and update cache pool config data, now there should be 7 shards
try {
expectPoolStatus(mycluster, currentSize = 9, expectedPoolSize = 7, expectedAdd = 0, expectedRem = 2) {
additionalServers(0).leave()
additionalServers(1).leave()
}.get(10.seconds)()
}
catch { case _: Exception => fail("it shouldn't trown an exception") }
Thread.sleep(1000)
assert(trackCacheShards(client).size === 7)
}
def updateCachePoolConfigData(size: Int) {
val cachePoolConfig: CachePoolConfig = new CachePoolConfig(cachePoolSize = size)
var output: ByteArrayOutputStream = new ByteArrayOutputStream
CachePoolConfig.jsonCodec.serialize(cachePoolConfig, output)
zookeeperClient.get().setData(zkPath, output.toByteArray, -1)
}
// create temporary zk clients for additional cache servers since we will need to
// de-register these services by expiring corresponding zk client session
def addMoreServers(size: Int): List[EndpointStatus] = {
(1 to size) map { _ =>
val server = TestMemcachedServer.start()
testServers :+= server.get
zkServerSetCluster.joinServerSet(server.get.address)
} toList
}
def initializePool(
expectedSize: Int,
backupPool: Option[scala.collection.immutable.Set[CacheNode]]=None,
ignoreConfigData: Boolean = false
): Cluster[CacheNode] = {
val myCachePool =
if (! ignoreConfigData) CachePoolCluster.newZkCluster(zkPath, zookeeperClient, backupPool = backupPool)
else CachePoolCluster.newUnmanagedZkCluster(zkPath, zookeeperClient)
Await.result(myCachePool.ready) // wait until the pool is ready
myCachePool.snap match {
case (cachePool, changes) =>
assert(cachePool.size === expectedSize)
}
myCachePool
}
/**
* return a future which will be complete only if the cache pool changed AND the changes
* meet all expected conditions after executing operation passed in
* @param currentSize expected current pool size
* @param expectedPoolSize expected pool size after changes, use -1 to expect any size
* @param expectedAdd expected number of add event happened, use -1 to expect any number
* @param expectedRem expected number of rem event happened, use -1 to expect any number
* @param ops operation to execute
*/
def expectPoolStatus(
myCachePool: Cluster[CacheNode],
currentSize: Int,
expectedPoolSize: Int,
expectedAdd: Int,
expectedRem: Int
)(ops: => Unit): Future[Unit] = {
var addSeen = 0
var remSeen = 0
var poolSeen = mutable.HashSet[CacheNode]()
def expectMore(spoolChanges: Spool[Cluster.Change[CacheNode]]): Future[Unit] = {
spoolChanges match {
case change *:: tail =>
change match {
case Cluster.Add(node) =>
addSeen += 1
poolSeen.add(node)
case Cluster.Rem(node) =>
remSeen += 1
poolSeen.remove(node)
}
if ((expectedAdd == -1 || addSeen == expectedAdd) &&
(expectedRem == -1 || remSeen == expectedRem) &&
(expectedPoolSize == -1 || poolSeen.size == expectedPoolSize)) Future.Done
else tail flatMap expectMore
}
}
myCachePool.snap match {
case (cachePool, changes) =>
assert(cachePool.size === currentSize)
poolSeen ++= cachePool
val retFuture = changes flatMap expectMore
ops // invoke the function now
retFuture
}
}
def trackCacheShards(client: PartitionedClient) = mutable.Set.empty[Client] ++
((0 until 100).map { n => client.clientOf("foo"+n) })
}
|
kristofa/finagle
|
finagle-memcached/src/test/scala/com/twitter/finagle/memcached/integration/ClusterClientTest.scala
|
Scala
|
apache-2.0
| 21,541 |
package views.html.tv
import lila.api.Context
import lila.app.templating.Environment._
import lila.app.ui.ScalatagsTemplate._
import controllers.routes
object side {
def channels(
channel: lila.tv.Tv.Channel,
champions: lila.tv.Tv.Champions,
baseUrl: String
): Frag =
div(cls := "tv-channels subnav")(
lila.tv.Tv.Channel.all.map { c =>
a(
href := s"$baseUrl/${c.key}",
cls := List(
"tv-channel" -> true,
c.key -> true,
"active" -> (c == channel)
)
)(
span(dataIcon := c.icon)(
span(
strong(c.name),
span(cls := "champion")(
champions.get(c).fold[Frag](raw(" - ")) { p =>
frag(
p.user.title.fold[Frag](p.user.name)(t => frag(t, nbsp, p.user.name)),
ratingTag(
" ",
p.rating
)
)
}
)
)
)
)
}
)
private val separator = " β’ "
def meta(pov: lila.game.Pov)(implicit ctx: Context): Frag = {
import pov._
div(cls := "game__meta")(
st.section(
div(cls := "game__meta__infos", dataIcon := views.html.game.bits.gameIcon(game))(
div(cls := "header")(
div(cls := "setup")(
views.html.game.widgets showClock game,
separator,
(if (game.rated) trans.rated else trans.casual).txt(),
separator,
views.html.game.bits.variantLink(game.variant, game.perfType, shortName = true)
)
)
),
div(cls := "game__meta__players")(
game.players.map { p =>
div(cls := s"player color-icon is ${p.color.name} text")(
playerLink(p, withOnline = false, withDiff = true, withBerserk = true)
)
}
)
),
game.tournamentId map { tourId =>
st.section(cls := "game__tournament-link")(
a(href := routes.Tournament.show(tourId), dataIcon := "ξ", cls := "text")(
tournamentIdToName(tourId)
)
)
}
)
}
def sides(
pov: lila.game.Pov,
cross: Option[lila.game.Crosstable.WithMatchup]
)(implicit ctx: Context) =
div(cls := "sides")(
cross.map {
views.html.game.crosstable(_, pov.gameId.some)
}
)
}
|
luanlv/lila
|
app/views/tv/side.scala
|
Scala
|
mit
| 2,492 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.tensor
import breeze.linalg.{DenseMatrix, DenseVector}
import com.intel.analytics.bigdl.mkl.Memory
import com.intel.analytics.bigdl.dllib.nn.mkldnn.{MemoryOwner, Releasable}
import com.intel.analytics.bigdl.dllib.tensor.DnnTensor.DnnTensorUnsupportOperations
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.dllib.utils.Table
import org.apache.spark.mllib.linalg
import org.apache.spark.mllib.linalg.Matrix
import scala.reflect.ClassTag
class DnnTensor[T: ClassTag](
private var _storage: DnnStorage[T],
private var sizes: Array[Int]
) (implicit ev: TensorNumeric[T], owner: MemoryOwner)
extends DnnTensorUnsupportOperations[T] with Releasable {
owner.registerResource(this)
// performance regression, the sizes.product will make the performance downgrade.
private val _nElement: Int = sizes.product
override def nElement(): Int = _nElement
override def copy(other: Tensor[T]): Tensor[T] = {
other match {
case t: DenseTensor[_] =>
require(DnnTensor.noTransposed(t), "dense tensor should not be transposed")
require(this.nElement() == other.nElement(), "tensor elements number must be same")
this._storage.copy(other.storage(), 0, other.storageOffset() - 1, this.nElement())
case t: DnnTensor[_] =>
require(this.nElement() == other.nElement(), "tensor elements number must be same")
this._storage.copy(other.storage(), 0, 0, this.nElement())
case _ => throw new UnsupportedOperationException(
"Only support copy from dense tensor and dnn tensor")
}
this
}
def release(): Unit = {
_storage.release()
}
def storageAddress(): Long = _storage.ptr.address
def isReleased(): Boolean = _storage.isReleased()
override def storage(): Storage[T] = _storage
override def resize(s: Array[Int], stride: Array[Int] = null): this.type = {
require(stride == null, "dnn tensor doesn't have stride")
if (s.product > nElement()) {
_storage.release()
_storage = new DnnStorage[T](s.product)
}
this.sizes = s.clone()
this
}
override def resize(s: Int): this.type = {
if (s > nElement()) {
_storage.release()
_storage = new DnnStorage[T](s)
}
this.sizes = Array(s)
this
}
override def add(x: Tensor[T]): Tensor[T] = {
require(x.isInstanceOf[DnnTensor[_]], "Just support two dnn tensor add")
Memory.SAdd(this.nElement(), this._storage.ptr.address, 0,
x.asInstanceOf[DnnTensor[T]]._storage.ptr.address, 0, this._storage.ptr.address, 0)
this
}
override def zero(): Tensor[T] = {
Memory.Zero(this._storage.ptr.address, this.nElement(), DnnStorage.FLOAT_BYTES)
this
}
def axpby(a: Float, b: Float, to: DnnTensor[T]): Unit = {
val x = this._storage.ptr.address
val y = to._storage.ptr.address
Memory.Axpby(this.nElement(), a, x, b, y)
}
def scale(from: DnnTensor[T], scal: Float): Unit = {
val length = this.nElement()
Memory.Scale(length, scal, from._storage.ptr.address, this._storage.ptr.address)
}
override def toTensor[D](implicit ev: TensorNumeric[D]): DnnTensor[D] = {
this.asInstanceOf[DnnTensor[D]]
}
override def size(): Array[Int] = sizes.clone()
override def size(d: Int): Int = sizes(d - 1)
override def dim(): Int = size().length
override def nDimension(): Int = size().length
override def getTensorType: TensorType = MklDnnType
override def equals(obj: Any): Boolean = {
if (obj == null) {
return false
}
if (!obj.isInstanceOf[DnnTensor[T]]) {
return false
}
val other = obj.asInstanceOf[DnnTensor[T]]
if (this.size().deep != other.size().deep) {
return false
}
if (this._storage.ptr != other._storage.ptr) {
return false
}
true
}
override def getType(): TensorDataType = {
ev.getType()
}
override def hashCode(): Int = {
val seed = 37
var hash = 1
hash = hash * seed + this.nDimension
var d = 1
while (d <= this.nDimension) {
hash = hash * seed + this.size(d)
d += 1
}
hash = hash * seed + this._storage.ptr.hashCode()
hash
}
override def set(): Tensor[T] = {
// TODO we will do nothing. the behavior is not the same with DenseTensor
this
}
override def set(other: Tensor[T]): Tensor[T] = {
require(other.isInstanceOf[DnnTensor[T]], s"only support to set DnnTensor")
this._storage.release()
this._storage = other.storage().asInstanceOf[DnnStorage[T]]
this
}
override def toString: String = {
ev.getType() match {
case FloatType =>
if (size().product != this.nElement()) {
val dense = Tensor[Float](Array(this.nElement()))
Memory.CopyPtr2Array(this.storageAddress(), 0, dense.storage().array(),
0, nElement(), 4)
dense.toString
} else {
val dense = Tensor[Float](size())
dense.copy(this.asInstanceOf[DnnTensor[Float]])
dense.toString
}
case ByteType =>
val array = new Array[Byte](nElement())
Memory.CopyPtr2ByteArray(this.asInstanceOf[DnnTensor[Byte]].storageAddress(), 0,
array, 0, nElement(), 1)
array.mkString("\\t")
case IntType =>
val array = new Array[Int](nElement())
Memory.CopyPtr2IntArray(this.storageAddress(), 0, array, 0, nElement(), 4)
array.mkString("\\t")
case _ => "unknown type"
}
}
}
object DnnTensor {
// scalastyle:off
private def ???(): Nothing = {
throw new UnsupportedOperationException("DnnTensor doesn't support this operation")
}
// scalastyle:on
private[tensor] def noTransposed(t: DenseTensor[_]): Boolean = {
var product = 1
var i = t.dim()
while(i > 0) {
if (product != t.stride(i)) return false
product *= t.size(i)
i -= 1
}
return true
}
def apply[T: ClassTag](sizes: Array[Int])(
implicit ev: TensorNumeric[T], owner: MemoryOwner): DnnTensor[T] = {
val storage = new DnnStorage[T](sizes.product)
new DnnTensor[T](storage, sizes)
}
def apply[T: ClassTag](sizes: Array[Int], realSize: Long)(
implicit ev: TensorNumeric[T], owner: MemoryOwner): DnnTensor[T] = {
val storage = new DnnStorage[T](realSize.toInt) // FIXME if size more than int ?
new DnnTensor[T](storage, sizes)
}
def apply[T: ClassTag](d1: Int)(
implicit ev: TensorNumeric[T], owner: MemoryOwner): DnnTensor[T] = {
val storage = new DnnStorage[T](d1)
new DnnTensor[T](storage, Array(d1))
}
def apply[T: ClassTag](d1: Int, d2: Int)(
implicit ev: TensorNumeric[T], owner: MemoryOwner): DnnTensor[T] = {
val storage = new DnnStorage[T](d1 * d2)
new DnnTensor[T](storage, Array(d1, d2))
}
def apply[T: ClassTag](d1: Int, d2: Int, d3: Int)(
implicit ev: TensorNumeric[T], owner: MemoryOwner): DnnTensor[T] = {
val storage = new DnnStorage[T](d1 * d2 * d3)
new DnnTensor[T](storage, Array(d1, d2, d3))
}
def apply[T: ClassTag](d1: Int, d2: Int, d3: Int, d4: Int)(
implicit ev: TensorNumeric[T], owner: MemoryOwner): DnnTensor[T] = {
val storage = new DnnStorage[T](d1 * d2 * d3 * d4)
new DnnTensor[T](storage, Array(d1, d2, d3, d4))
}
def apply[T: ClassTag](d1: Int, d2: Int, d3: Int, d4: Int, d5: Int)(
implicit ev: TensorNumeric[T], owner: MemoryOwner): DnnTensor[T] = {
val storage = new DnnStorage[T](d1 * d2 * d3 * d4 * d5)
new DnnTensor[T](storage, Array(d1, d2, d3, d4, d5))
}
class DnnTensorUnsupportOperations[T: ClassTag](implicit ev: TensorNumeric[T]) extends Tensor[T] {
// scalastyle:off
override def isEmpty: Boolean = ???
override def isScalar: Boolean = ???
override def nDimension(): Int = ???
override def dim(): Int = ???
override def size(): Array[Int] = ???
override def size(dim: Int): Int = ???
override def stride(): Array[Int] = ???
override def stride(dim: Int): Int = ???
override def fill(v: T): Tensor[T] = ???
override def forceFill(v: Any): Tensor[T] = ???
override def zero(): Tensor[T] = ???
override def randn(): Tensor[T] = ???
override def randn(mean: Double, stdv: Double): Tensor[T] = ???
override def rand(): Tensor[T] = ???
override def rand(lowerBound: Double, upperBound: Double): Tensor[T] = ???
override def bernoulli(p: Double): Tensor[T] = ???
override def transpose(dim1: Int, dim2: Int): Tensor[T] = ???
override def t(): Tensor[T] = ???
override def apply(index: Int): Tensor[T] = ???
override def apply(indexes: Array[Int]): T = ???
override def value(): T = ???
override def valueAt(d1: Int): T = ???
override def valueAt(d1: Int, d2: Int): T = ???
override def valueAt(d1: Int, d2: Int, d3: Int): T = ???
override def valueAt(d1: Int, d2: Int, d3: Int, d4: Int): T = ???
override def valueAt(d1: Int, d2: Int, d3: Int, d4: Int, d5: Int): T = ???
override def apply(t: Table): Tensor[T] = ???
override def update(index: Int, value: T): Unit = ???
override def update(index: Int, src: Tensor[T]): Unit = ???
override def update(indexes: Array[Int], value: T): Unit = ???
override def setValue(value: T): DnnTensorUnsupportOperations.this.type = ???
override def setValue(d1: Int, value: T): DnnTensorUnsupportOperations.this.type = ???
override def setValue(d1: Int, d2: Int, value: T): DnnTensorUnsupportOperations.this.type = ???
override def setValue(d1: Int, d2: Int, d3: Int, value: T): DnnTensorUnsupportOperations.this.type = ???
override def setValue(d1: Int, d2: Int, d3: Int, d4: Int, value: T): DnnTensorUnsupportOperations.this.type = ???
override def setValue(d1: Int, d2: Int, d3: Int, d4: Int, d5: Int, value: T): DnnTensorUnsupportOperations.this.type = ???
override def update(t: Table, value: T): Unit = ???
override def update(t: Table, src: Tensor[T]): Unit = ???
override def update(filter: (T) => Boolean, value: T): Unit = ???
override def isContiguous(): Boolean = ???
override def contiguous(): Tensor[T] = ???
override def isSameSizeAs(other: Tensor[_]): Boolean = ???
override def emptyInstance(): Tensor[T] = ???
override def resizeAs(src: Tensor[_]): Tensor[T] = ???
override def cast[D: ClassManifest](castTensor: Tensor[D])(implicit ev: TensorNumeric[D]): Tensor[D] = ???
override def resize(sizes: Array[Int], strides: Array[Int]): Tensor[T] = ???
override def resize(size1: Int): Tensor[T] = ???
override def resize(size1: Int, size2: Int): Tensor[T] = ???
override def resize(size1: Int, size2: Int, size3: Int): Tensor[T] = ???
override def resize(size1: Int, size2: Int, size3: Int, size4: Int): Tensor[T] = ???
override def resize(size1: Int, size2: Int, size3: Int, size4: Int, size5: Int): Tensor[T] = ???
override def nElement(): Int = ???
override def select(dim: Int, index: Int): Tensor[T] = ???
override def storage(): Storage[T] = ???
override def storageOffset(): Int = ???
override def set(other: Tensor[T]): Tensor[T] = ???
override def set(storage: Storage[T], storageOffset: Int, sizes: Array[Int], strides: Array[Int]): Tensor[T] = ???
override def set(): Tensor[T] = ???
override def narrow(dim: Int, index: Int, size: Int): Tensor[T] = ???
override def copy(other: Tensor[T]): Tensor[T] = ???
override def applyFun[A: ClassManifest](t: Tensor[A], func: (A) => T): Tensor[T] = ???
override def apply1(func: (T) => T): Tensor[T] = ???
override def zipWith[A: ClassManifest, B: ClassManifest](t1: Tensor[A], t2: Tensor[B], func: (A, B) => T): Tensor[T] = ???
override def map(other: Tensor[T], func: (T, T) => T): Tensor[T] = ???
override def squeeze(): Tensor[T] = ???
override def squeeze(dim: Int): Tensor[T] = ???
override def squeezeNewTensor(): Tensor[T] = ???
override def view(sizes: Array[Int]): Tensor[T] = ???
override def unfold(dim: Int, size: Int, step: Int): Tensor[T] = ???
override def repeatTensor(sizes: Array[Int]): Tensor[T] = ???
override def expandAs(template: Tensor[T]): Tensor[T] = ???
override def expand(sizes: Array[Int]): Tensor[T] = ???
override def split(size: Int, dim: Int): Array[Tensor[T]] = ???
override def split(dim: Int): Array[Tensor[T]] = ???
override def toBreezeVector(): DenseVector[T] = ???
override def toMLlibVector(): linalg.Vector = ???
override def toBreezeMatrix(): DenseMatrix[T] = ???
override def toMLlibMatrix(): Matrix = ???
override def getType(): TensorDataType = ???
override def diff(other: Tensor[T], count: Int, reverse: Boolean): Boolean = ???
override def addSingletonDimension(t: Tensor[T], dim: Int): Tensor[T] = ???
override def addMultiDimension(t: Tensor[T], dims: Array[Int]): Tensor[T] = ???
override def reshape(sizes: Array[Int]): Tensor[T] = ???
override def save(path: String, overWrite: Boolean): DnnTensorUnsupportOperations.this.type = ???
override def getTensorNumeric(): TensorNumeric[T] = ???
override def getTensorType: TensorType = ???
override def toArray(): Array[T] = ???
override def +(s: T): Tensor[T] = ???
override def +(t: Tensor[T]): Tensor[T] = ???
override def -(s: T): Tensor[T] = ???
override def -(t: Tensor[T]): Tensor[T] = ???
override def unary_-(): Tensor[T] = ???
override def /(s: T): Tensor[T] = ???
override def /(t: Tensor[T]): Tensor[T] = ???
override def *(s: T): Tensor[T] = ???
override def *(t: Tensor[T]): Tensor[T] = ???
override def sum(): T = ???
override def prod(): T = ???
override def prod(x: Tensor[T], dim: Int): Tensor[T] = ???
override def sum(dim: Int): Tensor[T] = ???
override def sum(x: Tensor[T], dim: Int): Tensor[T] = ???
override def mean(): T = ???
override def mean(dim: Int): Tensor[T] = ???
override def max(): T = ???
override def max(dim: Int): (Tensor[T], Tensor[T]) = ???
override def max(values: Tensor[T], indices: Tensor[T], dim: Int): (Tensor[T], Tensor[T]) = ???
override def min(): T = ???
override def min(dim: Int): (Tensor[T], Tensor[T]) = ???
override def min(values: Tensor[T], indices: Tensor[T], dim: Int): (Tensor[T], Tensor[T]) = ???
override def scatter(dim: Int, index: Tensor[T], src: Tensor[T]): Tensor[T] = ???
override def gather(dim: Int, index: Tensor[T], src: Tensor[T]): Tensor[T] = ???
override def conv2(kernel: Tensor[T], vf: Char): Tensor[T] = ???
override def xcorr2(kernel: Tensor[T], vf: Char): Tensor[T] = ???
override def sqrt(): Tensor[T] = ???
override def tanh(): Tensor[T] = ???
override def abs(): Tensor[T] = ???
override def add(value: T, y: Tensor[T]): Tensor[T] = ???
override def add(y: Tensor[T]): Tensor[T] = ???
override def add(x: Tensor[T], value: T, y: Tensor[T]): Tensor[T] = ???
override def add(value: T): Tensor[T] = ???
override def add(x: Tensor[T], y: Tensor[T]): Tensor[T] = ???
override def dot(y: Tensor[T]): T = ???
override def cmax(value: T): Tensor[T] = ???
override def dist(y: Tensor[T], norm: Int): T = ???
override def addcmul(value: T, tensor1: Tensor[T], tensor2: Tensor[T]): Tensor[T] = ???
override def addcmul(tensor1: Tensor[T], tensor2: Tensor[T]): Tensor[T] = ???
override def addcdiv(value: T, tensor1: Tensor[T], tensor2: Tensor[T]): Tensor[T] = ???
override def sub(value: T, y: Tensor[T]): Tensor[T] = ???
override def sub(x: Tensor[T], value: T, y: Tensor[T]): Tensor[T] = ???
override def sub(y: Tensor[T]): Tensor[T] = ???
override def sub(x: Tensor[T], y: Tensor[T]): Tensor[T] = ???
override def sub(value: T): Tensor[T] = ???
override def cmul(y: Tensor[T]): Tensor[T] = ???
override def cmul(x: Tensor[T], y: Tensor[T]): Tensor[T] = ???
override def cdiv(y: Tensor[T]): Tensor[T] = ???
override def cdiv(x: Tensor[T], y: Tensor[T]): Tensor[T] = ???
override def mul(value: T): Tensor[T] = ???
override def div(value: T): Tensor[T] = ???
override def div(y: Tensor[T]): Tensor[T] = ???
override def mul(x: Tensor[T], value: T): Tensor[T] = ???
override def addmm(v1: T, M: Tensor[T], v2: T, mat1: Tensor[T], mat2: Tensor[T]): Tensor[T] = ???
override def addmm(M: Tensor[T], mat1: Tensor[T], mat2: Tensor[T]): Tensor[T] = ???
override def addmm(mat1: Tensor[T], mat2: Tensor[T]): Tensor[T] = ???
override def addmm(v2: T, mat1: Tensor[T], mat2: Tensor[T]): Tensor[T] = ???
override def addmm(v1: T, v2: T, mat1: Tensor[T], mat2: Tensor[T]): Tensor[T] = ???
override def mm(mat1: Tensor[T], mat2: Tensor[T]): Tensor[T] = ???
override def addr(t1: Tensor[T], t2: Tensor[T]): Tensor[T] = ???
override def addr(v1: T, t1: Tensor[T], t2: Tensor[T]): Tensor[T] = ???
override def addr(v1: T, t1: Tensor[T], v2: T, t2: Tensor[T]): Tensor[T] = ???
override def addr(v1: T, t1: Tensor[T], v2: T, t2: Tensor[T], t3: Tensor[T]): Tensor[T] = ???
override def uniform(args: T*): T = ???
override def addmv(beta: T, vec1: Tensor[T], alpha: T, mat: Tensor[T], vec2: Tensor[T]): Tensor[T] = ???
override def addmv(beta: T, alpha: T, mat: Tensor[T], vec2: Tensor[T]): Tensor[T] = ???
override def addmv(alpha: T, mat: Tensor[T], vec2: Tensor[T]): Tensor[T] = ???
override def mv(mat: Tensor[T], vec2: Tensor[T]): Tensor[T] = ???
override def baddbmm(beta: T, M: Tensor[T], alpha: T, batch1: Tensor[T], batch2: Tensor[T]): Tensor[T] = ???
override def baddbmm(beta: T, alpha: T, batch1: Tensor[T], batch2: Tensor[T]): Tensor[T] = ???
override def baddbmm(alpha: T, batch1: Tensor[T], batch2: Tensor[T]): Tensor[T] = ???
override def bmm(batch1: Tensor[T], batch2: Tensor[T]): Tensor[T] = ???
override def pow(y: Tensor[T], n: T): Tensor[T] = ???
override def pow(n: T): Tensor[T] = ???
override def square(): Tensor[T] = ???
override def floor(y: Tensor[T]): Tensor[T] = ???
override def floor(): Tensor[T] = ???
override def ceil(): Tensor[T] = ???
override def inv(): Tensor[T] = ???
override def erf(): Tensor[T] = ???
override def erfc(): Tensor[T] = ???
override def logGamma(): Tensor[T] = ???
override def digamma(): Tensor[T] = ???
override def topk(k: Int, dim: Int, increase: Boolean, result: Tensor[T], indices: Tensor[T], sortedResult: Boolean): (Tensor[T], Tensor[T]) = ???
override def log(y: Tensor[T]): Tensor[T] = ???
override def exp(y: Tensor[T]): Tensor[T] = ???
override def sqrt(y: Tensor[T]): Tensor[T] = ???
override def tanh(y: Tensor[T]): Tensor[T] = ???
override def log1p(y: Tensor[T]): Tensor[T] = ???
override def log(): Tensor[T] = ???
override def exp(): Tensor[T] = ???
override def log1p(): Tensor[T] = ???
override def abs(x: Tensor[T]): Tensor[T] = ???
override def norm(y: Tensor[T], value: Int, dim: Int): Tensor[T] = ???
override def gt(x: Tensor[T], y: Tensor[T]): Tensor[T] = ???
override def lt(x: Tensor[T], y: Tensor[T]): Tensor[T] = ???
override def le(x: Tensor[T], y: Tensor[T]): Tensor[T] = ???
override def eq(x: Tensor[T], y: T): Tensor[T] = ???
override def maskedFill(mask: Tensor[T], e: T): Tensor[T] = ???
override def maskedCopy(mask: Tensor[T], y: Tensor[T]): Tensor[T] = ???
override def maskedSelect(mask: Tensor[T], y: Tensor[T]): Tensor[T] = ???
override def norm(value: Int): T = ???
override def sign(): Tensor[T] = ???
override def ge(x: Tensor[T], value: Double): Tensor[T] = ???
override def indexAdd(dim: Int, index: Tensor[T], y: Tensor[T]): Tensor[T] = ???
override def index(dim: Int, index: Tensor[T], y: Tensor[T]): Tensor[T] = ???
override def cmax(y: Tensor[T]): Tensor[T] = ???
override def cmin(y: Tensor[T]): Tensor[T] = ???
override def cmax(x: Tensor[T], y: Tensor[T]): Tensor[T] = ???
override def cmin(x: Tensor[T], y: Tensor[T]): Tensor[T] = ???
override def range(xmin: Double, xmax: Double, step: Int): Tensor[T] = ???
override def negative(x: Tensor[T]): Tensor[T] = ???
override def reduce(dim: Int, result: Tensor[T], reducer: (T, T) => T): Tensor[T] = ???
override def sumSquare(): T = ???
override def clamp(min: Double, max: Double): Tensor[T] = ???
override def toTensor[D](implicit ev: TensorNumeric[D]): Tensor[D] = ???
override private[bigdl] def toQuantizedTensor = ???
// scalastyle: on
}
}
|
intel-analytics/BigDL
|
scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/tensor/DnnTensor.scala
|
Scala
|
apache-2.0
| 21,114 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding {
import cascading.operation._
import cascading.tuple._
import cascading.flow._
import cascading.pipe.assembly.AggregateBy
import com.twitter.chill.MeatLocker
import scala.collection.JavaConverters._
import com.twitter.algebird.{AdaptiveCache, Semigroup, SummingWithHitsCache}
import com.twitter.scalding.mathematics.Poisson
import serialization.Externalizer
import scala.util.Try
trait ScaldingPrepare[C] extends Operation[C] {
abstract override def prepare(flowProcess: FlowProcess[_], operationCall: OperationCall[C]): Unit = {
RuntimeStats.addFlowProcess(flowProcess)
super.prepare(flowProcess, operationCall)
}
}
class FlatMapFunction[S, T](
@transient fn: S => TraversableOnce[T],
fields: Fields,
conv: TupleConverter[S],
set: TupleSetter[T]
) extends BaseOperation[Any](fields)
with Function[Any]
with ScaldingPrepare[Any] {
val lockedFn = Externalizer(fn)
/**
* Private helper to get at the function that this FlatMapFunction wraps
*/
private[scalding] def getFunction = fn
def operate(flowProcess: FlowProcess[_], functionCall: FunctionCall[Any]): Unit =
lockedFn.get(conv(functionCall.getArguments)).foreach { arg: T =>
val this_tup = set(arg)
functionCall.getOutputCollector.add(this_tup)
}
}
class MapFunction[S, T](@transient fn: S => T, fields: Fields, conv: TupleConverter[S], set: TupleSetter[T])
extends BaseOperation[Any](fields)
with Function[Any]
with ScaldingPrepare[Any] {
val lockedFn = Externalizer(fn)
def operate(flowProcess: FlowProcess[_], functionCall: FunctionCall[Any]): Unit = {
val res = lockedFn.get(conv(functionCall.getArguments))
functionCall.getOutputCollector.add(set(res))
}
}
/*
The IdentityFunction puts empty nodes in the cascading graph. We use these to nudge the cascading planner
in some edge cases.
*/
object IdentityFunction
extends BaseOperation[Any](Fields.ALL)
with Function[Any]
with ScaldingPrepare[Any] {
def operate(flowProcess: FlowProcess[_], functionCall: FunctionCall[Any]): Unit =
functionCall.getOutputCollector.add(functionCall.getArguments)
}
class CleanupIdentityFunction(@transient fn: () => Unit)
extends BaseOperation[Any](Fields.ALL)
with Filter[Any]
with ScaldingPrepare[Any] {
val lockedEf = Externalizer(fn)
def isRemove(flowProcess: FlowProcess[_], filterCall: FilterCall[Any]) = false
override def cleanup(flowProcess: FlowProcess[_], operationCall: OperationCall[Any]): Unit =
Try(lockedEf.get).foreach(_())
}
class CollectFunction[S, T](
@transient fn: PartialFunction[S, T],
fields: Fields,
conv: TupleConverter[S],
set: TupleSetter[T]
) extends BaseOperation[Any](fields)
with Function[Any]
with ScaldingPrepare[Any] {
val lockedFn = Externalizer(fn)
def operate(flowProcess: FlowProcess[_], functionCall: FunctionCall[Any]): Unit = {
val partialfn = lockedFn.get
val args = conv(functionCall.getArguments)
if (partialfn.isDefinedAt(args)) {
functionCall.getOutputCollector.add(set(partialfn(args)))
}
}
}
/**
* An implementation of map-side combining which is appropriate for associative and commutative functions If
* a cacheSize is given, it is used, else we query the config for cascading.aggregateby.threshold (standard
* cascading param for an equivalent case) else we use a default value of 100,000
*
* This keeps a cache of keys up to the cache-size, summing values as keys collide On eviction, or
* completion of this Operation, the key-value pairs are put into outputCollector.
*
* This NEVER spills to disk and generally never be a performance penalty. If you have poor locality in the
* keys, you just don't get any benefit but little added cost.
*
* Note this means that you may still have repeated keys in the output even on a single mapper since the key
* space may be so large that you can't fit all of them in the cache at the same time.
*
* You can use this with the Fields-API by doing:
* {{{
* val msr = new MapsideReduce(Semigroup.from(fn), 'key, 'value, None)
* // MUST map onto the same key,value space (may be multiple fields)
* val mapSideReduced = pipe.eachTo(('key, 'value) -> ('key, 'value)) { _ => msr }
* }}}
* That said, this is equivalent to AggregateBy, and the only value is that it is much simpler than
* AggregateBy. AggregateBy assumes several parallel reductions are happening, and thus has many loops, and
* array lookups to deal with that. Since this does many fewer allocations, and has a smaller code-path it
* may be faster for the typed-API.
*/
object MapsideReduce {
val COUNTER_GROUP = "MapsideReduce"
}
class MapsideReduce[V](
@transient commutativeSemigroup: Semigroup[V],
keyFields: Fields,
valueFields: Fields,
cacheSize: Option[Int]
)(implicit conv: TupleConverter[V], set: TupleSetter[V])
extends BaseOperation[MapsideCache[Tuple, V]](Fields.join(keyFields, valueFields))
with Function[MapsideCache[Tuple, V]]
with ScaldingPrepare[MapsideCache[Tuple, V]] {
val boxedSemigroup = Externalizer(commutativeSemigroup)
override def prepare(
flowProcess: FlowProcess[_],
operationCall: OperationCall[MapsideCache[Tuple, V]]
): Unit = {
//Set up the context:
implicit val sg: Semigroup[V] = boxedSemigroup.get
val cache = MapsideCache[Tuple, V](cacheSize, flowProcess)
operationCall.setContext(cache)
}
@inline
private def add(
evicted: Option[Map[Tuple, V]],
functionCall: FunctionCall[MapsideCache[Tuple, V]]
): Unit =
// Use iterator and while for optimal performance (avoid closures/fn calls)
if (evicted.isDefined) {
// Don't use pattern matching in performance-critical code
@SuppressWarnings(Array("org.wartremover.warts.OptionPartial"))
val it = evicted.get.iterator
val tecol = functionCall.getOutputCollector
while (it.hasNext) {
val (key, value) = it.next
// Safe to mutate this key as it is evicted from the map
key.addAll(set(value))
tecol.add(key)
}
}
override def operate(
flowProcess: FlowProcess[_],
functionCall: FunctionCall[MapsideCache[Tuple, V]]
): Unit = {
val cache = functionCall.getContext
val keyValueTE = functionCall.getArguments
// Have to keep a copy of the key tuple because cascading will modify it
val key = keyValueTE.selectEntry(keyFields).getTupleCopy
val value = conv(keyValueTE.selectEntry(valueFields))
val evicted = cache.put(key, value)
add(evicted, functionCall)
}
override def flush(
flowProcess: FlowProcess[_],
operationCall: OperationCall[MapsideCache[Tuple, V]]
): Unit = {
// Docs say it is safe to do this cast:
// http://docs.cascading.org/cascading/2.1/javadoc/cascading/operation/Operation.html#flush(cascading.flow.FlowProcess, cascading.operation.OperationCall)
val functionCall = operationCall.asInstanceOf[FunctionCall[MapsideCache[Tuple, V]]]
val cache = functionCall.getContext
add(cache.flush, functionCall)
}
override def cleanup(
flowProcess: FlowProcess[_],
operationCall: OperationCall[MapsideCache[Tuple, V]]
): Unit =
// The cache may be large, but super sure we drop any reference to it ASAP
// probably overly defensive, but it's super cheap.
operationCall.setContext(null)
}
class TypedMapsideReduce[K, V](
@transient fn: TupleEntry => TraversableOnce[(K, V)],
@transient commutativeSemigroup: Semigroup[V],
sourceFields: Fields,
keyFields: Fields,
valueFields: Fields,
cacheSize: Option[Int]
)(implicit setKV: TupleSetter[(K, V)])
extends BaseOperation[MapsideCache[K, V]](Fields.join(keyFields, valueFields))
with Function[MapsideCache[K, V]]
with ScaldingPrepare[MapsideCache[K, V]] {
val boxedSemigroup = Externalizer(commutativeSemigroup)
val lockedFn = Externalizer(fn)
override def prepare(
flowProcess: FlowProcess[_],
operationCall: OperationCall[MapsideCache[K, V]]
): Unit = {
//Set up the context:
implicit val sg: Semigroup[V] = boxedSemigroup.get
val cache = MapsideCache[K, V](cacheSize, flowProcess)
operationCall.setContext(cache)
}
// Don't use pattern matching in a performance-critical section
@SuppressWarnings(Array("org.wartremover.warts.OptionPartial"))
@inline
private def add(evicted: Option[Map[K, V]], functionCall: FunctionCall[MapsideCache[K, V]]): Unit =
// Use iterator and while for optimal performance (avoid closures/fn calls)
if (evicted.isDefined) {
val it = evicted.get.iterator
val tecol = functionCall.getOutputCollector
while (it.hasNext) {
val (key, value) = it.next
// Safe to mutate this key as it is evicted from the map
tecol.add(setKV(key, value))
}
}
import scala.collection.mutable.{Map => MMap}
private[this] class CollectionBackedMap[K, V](val backingMap: MMap[K, V])
extends Map[K, V]
with java.io.Serializable {
def get(key: K) = backingMap.get(key)
def iterator = backingMap.iterator
def +[B1 >: V](kv: (K, B1)) = backingMap.toMap + kv
def -(key: K) = backingMap.toMap - key
}
// Don't use pattern matching in a performance-critical section
@SuppressWarnings(Array("org.wartremover.warts.OptionPartial"))
private[this] def mergeTraversableOnce[K, V: Semigroup](items: TraversableOnce[(K, V)]): Map[K, V] = {
val mutable =
scala.collection.mutable
.OpenHashMap[K, V]() // Scala's OpenHashMap seems faster than Java and Scala's HashMap Impl's
val innerIter = items.toIterator
while (innerIter.hasNext) {
val (k, v) = innerIter.next
val oldVOpt: Option[V] = mutable.get(k)
// sorry for the micro optimization here: avoiding a closure
val newV: V = if (oldVOpt.isEmpty) v else Semigroup.plus(oldVOpt.get, v)
mutable.update(k, newV)
}
new CollectionBackedMap(mutable)
}
override def operate(
flowProcess: FlowProcess[_],
functionCall: FunctionCall[MapsideCache[K, V]]
): Unit = {
val cache = functionCall.getContext
implicit val sg: Semigroup[V] = boxedSemigroup.get
val res: Map[K, V] = mergeTraversableOnce(lockedFn.get(functionCall.getArguments))
val evicted = cache.putAll(res)
add(evicted, functionCall)
}
override def flush(
flowProcess: FlowProcess[_],
operationCall: OperationCall[MapsideCache[K, V]]
): Unit = {
// Docs say it is safe to do this cast:
// http://docs.cascading.org/cascading/2.1/javadoc/cascading/operation/Operation.html#flush(cascading.flow.FlowProcess, cascading.operation.OperationCall)
val functionCall = operationCall.asInstanceOf[FunctionCall[MapsideCache[K, V]]]
val cache = functionCall.getContext
add(cache.flush, functionCall)
}
override def cleanup(
flowProcess: FlowProcess[_],
operationCall: OperationCall[MapsideCache[K, V]]
): Unit =
// The cache may be large, but super sure we drop any reference to it ASAP
// probably overly defensive, but it's super cheap.
operationCall.setContext(null)
}
sealed trait MapsideCache[K, V] {
def flush: Option[Map[K, V]]
def put(key: K, value: V): Option[Map[K, V]]
def putAll(key: Map[K, V]): Option[Map[K, V]]
}
object MapsideCache {
val DEFAULT_CACHE_SIZE = 100000
val SIZE_CONFIG_KEY = AggregateBy.AGGREGATE_BY_THRESHOLD
val ADAPTIVE_CACHE_KEY = "scalding.mapsidecache.adaptive"
private def getCacheSize(fp: FlowProcess[_]): Int =
Option(fp.getStringProperty(SIZE_CONFIG_KEY))
.filterNot(_.isEmpty)
.map(_.toInt)
.getOrElse(DEFAULT_CACHE_SIZE)
def apply[K, V: Semigroup](cacheSize: Option[Int], flowProcess: FlowProcess[_]): MapsideCache[K, V] = {
val size = cacheSize.getOrElse(getCacheSize(flowProcess))
val adaptive = Option(flowProcess.getStringProperty(ADAPTIVE_CACHE_KEY)).isDefined
if (adaptive)
new AdaptiveMapsideCache(flowProcess, new AdaptiveCache(size))
else
new SummingMapsideCache(flowProcess, new SummingWithHitsCache(size))
}
}
final class SummingMapsideCache[K, V](flowProcess: FlowProcess[_], summingCache: SummingWithHitsCache[K, V])
extends MapsideCache[K, V] {
private[this] val misses = CounterImpl(flowProcess, StatKey(MapsideReduce.COUNTER_GROUP, "misses"))
private[this] val hits = CounterImpl(flowProcess, StatKey(MapsideReduce.COUNTER_GROUP, "hits"))
private[this] val evictions = CounterImpl(flowProcess, StatKey(MapsideReduce.COUNTER_GROUP, "evictions"))
def flush = summingCache.flush
// Don't use pattern matching in performance-critical code
@SuppressWarnings(Array("org.wartremover.warts.OptionPartial"))
def put(key: K, value: V): Option[Map[K, V]] = {
val (curHits, evicted) = summingCache.putWithHits(Map(key -> value))
misses.increment(1 - curHits)
hits.increment(curHits)
if (evicted.isDefined)
evictions.increment(evicted.get.size)
evicted
}
// Don't use pattern matching in a performance-critical section
@SuppressWarnings(Array("org.wartremover.warts.OptionPartial"))
def putAll(kvs: Map[K, V]): Option[Map[K, V]] = {
val (curHits, evicted) = summingCache.putWithHits(kvs)
misses.increment(kvs.size - curHits)
hits.increment(curHits)
if (evicted.isDefined)
evictions.increment(evicted.get.size)
evicted
}
}
final class AdaptiveMapsideCache[K, V](flowProcess: FlowProcess[_], adaptiveCache: AdaptiveCache[K, V])
extends MapsideCache[K, V] {
private[this] val misses = CounterImpl(flowProcess, StatKey(MapsideReduce.COUNTER_GROUP, "misses"))
private[this] val hits = CounterImpl(flowProcess, StatKey(MapsideReduce.COUNTER_GROUP, "hits"))
private[this] val capacity = CounterImpl(flowProcess, StatKey(MapsideReduce.COUNTER_GROUP, "capacity"))
private[this] val sentinel = CounterImpl(flowProcess, StatKey(MapsideReduce.COUNTER_GROUP, "sentinel"))
private[this] val evictions = CounterImpl(flowProcess, StatKey(MapsideReduce.COUNTER_GROUP, "evictions"))
def flush = adaptiveCache.flush
// Don't use pattern matching in performance-critical code
@SuppressWarnings(Array("org.wartremover.warts.OptionPartial"))
def put(key: K, value: V) = {
val (stats, evicted) = adaptiveCache.putWithStats(Map(key -> value))
misses.increment(1 - stats.hits)
hits.increment(stats.hits)
capacity.increment(stats.cacheGrowth)
sentinel.increment(stats.sentinelGrowth)
if (evicted.isDefined)
evictions.increment(evicted.get.size)
evicted
}
// Don't use pattern matching in a performance-critical section
@SuppressWarnings(Array("org.wartremover.warts.OptionPartial"))
def putAll(kvs: Map[K, V]): Option[Map[K, V]] = {
val (stats, evicted) = adaptiveCache.putWithStats(kvs)
misses.increment(kvs.size - stats.hits)
hits.increment(stats.hits)
capacity.increment(stats.cacheGrowth)
sentinel.increment(stats.sentinelGrowth)
if (evicted.isDefined)
evictions.increment(evicted.get.size)
evicted
}
}
/*
* BaseOperation with support for context
*/
abstract class SideEffectBaseOperation[C](
@transient bf: => C, // begin function returns a context
@transient ef: C => Unit, // end function to clean up context object
fields: Fields
) extends BaseOperation[C](fields)
with ScaldingPrepare[C] {
val lockedBf = Externalizer(() => bf)
val lockedEf = Externalizer(ef)
override def prepare(flowProcess: FlowProcess[_], operationCall: OperationCall[C]): Unit =
operationCall.setContext(lockedBf.get.apply)
override def cleanup(flowProcess: FlowProcess[_], operationCall: OperationCall[C]): Unit =
lockedEf.get(operationCall.getContext)
}
/*
* A map function that allows state object to be set up and tear down.
*/
class SideEffectMapFunction[S, C, T](
bf: => C, // begin function returns a context
@transient fn: (C, S) => T, // function that takes a context and a tuple and generate a new tuple
ef: C => Unit, // end function to clean up context object
fields: Fields,
conv: TupleConverter[S],
set: TupleSetter[T]
) extends SideEffectBaseOperation[C](bf, ef, fields)
with Function[C] {
val lockedFn = Externalizer(fn)
override def operate(flowProcess: FlowProcess[_], functionCall: FunctionCall[C]): Unit = {
val context = functionCall.getContext
val s = conv(functionCall.getArguments)
val res = lockedFn.get(context, s)
functionCall.getOutputCollector.add(set(res))
}
}
/*
* A flatmap function that allows state object to be set up and tear down.
*/
class SideEffectFlatMapFunction[S, C, T](
bf: => C, // begin function returns a context
@transient fn: (
C,
S
) => TraversableOnce[T], // function that takes a context and a tuple, returns TraversableOnce of T
ef: C => Unit, // end function to clean up context object
fields: Fields,
conv: TupleConverter[S],
set: TupleSetter[T]
) extends SideEffectBaseOperation[C](bf, ef, fields)
with Function[C] {
val lockedFn = Externalizer(fn)
override def operate(flowProcess: FlowProcess[_], functionCall: FunctionCall[C]): Unit = {
val context = functionCall.getContext
val s = conv(functionCall.getArguments)
lockedFn.get(context, s).foreach(t => functionCall.getOutputCollector.add(set(t)))
}
}
class FilterFunction[T](@transient fn: T => Boolean, conv: TupleConverter[T])
extends BaseOperation[Any]
with Filter[Any]
with ScaldingPrepare[Any] {
val lockedFn = Externalizer(fn)
def isRemove(flowProcess: FlowProcess[_], filterCall: FilterCall[Any]) =
!lockedFn.get(conv(filterCall.getArguments))
}
// All the following are operations for use in GroupBuilder
class FoldAggregator[T, X](
@transient fn: (X, T) => X,
@transient init: X,
fields: Fields,
conv: TupleConverter[T],
set: TupleSetter[X]
) extends BaseOperation[X](fields)
with Aggregator[X]
with ScaldingPrepare[X] {
val lockedFn = Externalizer(fn)
private val lockedInit = MeatLocker(init)
def initCopy = lockedInit.copy
def start(flowProcess: FlowProcess[_], call: AggregatorCall[X]): Unit =
call.setContext(initCopy)
def aggregate(flowProcess: FlowProcess[_], call: AggregatorCall[X]): Unit = {
val left = call.getContext
val right = conv(call.getArguments)
call.setContext(lockedFn.get(left, right))
}
def complete(flowProcess: FlowProcess[_], call: AggregatorCall[X]): Unit =
call.getOutputCollector.add(set(call.getContext))
}
/*
* fields are the declared fields of this aggregator
*/
class MRMAggregator[T, X, U](
@transient inputFsmf: T => X,
@transient inputRfn: (X, X) => X,
@transient inputMrfn: X => U,
fields: Fields,
conv: TupleConverter[T],
set: TupleSetter[U]
) extends BaseOperation[Tuple](fields)
with Aggregator[Tuple]
with ScaldingPrepare[Tuple] {
val fsmf = Externalizer(inputFsmf)
val rfn = Externalizer(inputRfn)
val mrfn = Externalizer(inputMrfn)
// The context is a singleton Tuple, which is mutable so
// we don't have to allocate at every step of the loop:
def start(flowProcess: FlowProcess[_], call: AggregatorCall[Tuple]): Unit =
call.setContext(null)
def extractArgument(call: AggregatorCall[Tuple]): X = fsmf.get(conv(call.getArguments))
def aggregate(flowProcess: FlowProcess[_], call: AggregatorCall[Tuple]): Unit = {
val arg = extractArgument(call)
val ctx = call.getContext
if (ctx == null) {
// Initialize the context, this is the only allocation done by this loop.
val newCtx = Tuple.size(1)
newCtx.set(0, arg.asInstanceOf[AnyRef])
call.setContext(newCtx)
} else {
// Mutate the context:
val oldValue = ctx.getObject(0).asInstanceOf[X]
val newValue = rfn.get(oldValue, arg)
ctx.set(0, newValue.asInstanceOf[AnyRef])
}
}
def complete(flowProcess: FlowProcess[_], call: AggregatorCall[Tuple]): Unit = {
val ctx = call.getContext
if (null != ctx) {
val lastValue = ctx.getObject(0).asInstanceOf[X]
// Make sure to drop the reference to the lastValue as soon as possible (it may be big)
call.setContext(null)
call.getOutputCollector.add(set(mrfn.get(lastValue)))
} else {
throw new Exception("MRMAggregator completed without any args")
}
}
}
/**
* This handles the mapReduceMap work on the map-side of the operation. The code below attempts to be
* optimal with respect to memory allocations and performance, not functional style purity.
*/
abstract class FoldFunctor[X](fields: Fields) extends AggregateBy.Functor {
// Extend these three methods:
def first(args: TupleEntry): X
def subsequent(oldValue: X, newArgs: TupleEntry): X
def finish(lastValue: X): Tuple
override final def getDeclaredFields = fields
/*
* It's important to keep all state in the context as Cascading seems to
* reuse these objects, so any per instance state might give unexpected
* results.
*/
override final def aggregate(flowProcess: FlowProcess[_], args: TupleEntry, context: Tuple) = {
var nextContext: Tuple = null
val newContextObj = if (context == null) {
// First call, make a new mutable tuple to reduce allocations:
nextContext = Tuple.size(1)
first(args)
} else {
//We are updating
val oldValue = context.getObject(0).asInstanceOf[X]
nextContext = context
subsequent(oldValue, args)
}
nextContext.set(0, newContextObj.asInstanceOf[AnyRef])
//Return context for reuse next time:
nextContext
}
override final def complete(flowProcess: FlowProcess[_], context: Tuple) =
if (context == null) {
throw new Exception("FoldFunctor completed with any aggregate calls")
} else {
val res = context.getObject(0).asInstanceOf[X]
// Make sure we remove the ref to the context ASAP:
context.set(0, null)
finish(res)
}
}
/**
* This handles the mapReduceMap work on the map-side of the operation. The code below attempts to be
* optimal with respect to memory allocations and performance, not functional style purity.
*/
class MRMFunctor[T, X](
@transient inputMrfn: T => X,
@transient inputRfn: (X, X) => X,
fields: Fields,
conv: TupleConverter[T],
set: TupleSetter[X]
) extends FoldFunctor[X](fields) {
val mrfn = Externalizer(inputMrfn)
val rfn = Externalizer(inputRfn)
override def first(args: TupleEntry): X = mrfn.get(conv(args))
override def subsequent(oldValue: X, newArgs: TupleEntry) = {
val right = mrfn.get(conv(newArgs))
rfn.get(oldValue, right)
}
override def finish(lastValue: X) = set(lastValue)
}
/**
* MapReduceMapBy Class
*/
class MRMBy[T, X, U](
arguments: Fields,
middleFields: Fields,
declaredFields: Fields,
mfn: T => X,
rfn: (X, X) => X,
mfn2: X => U,
startConv: TupleConverter[T],
midSet: TupleSetter[X],
midConv: TupleConverter[X],
endSet: TupleSetter[U]
) extends AggregateBy(
arguments,
new MRMFunctor[T, X](mfn, rfn, middleFields, startConv, midSet),
new MRMAggregator[X, X, U](args => args, rfn, mfn2, declaredFields, midConv, endSet)
)
class BufferOp[I, T, X](
@transient init: I,
@transient inputIterfn: (I, Iterator[T]) => TraversableOnce[X],
fields: Fields,
conv: TupleConverter[T],
set: TupleSetter[X]
) extends BaseOperation[Any](fields)
with Buffer[Any]
with ScaldingPrepare[Any] {
val iterfn = Externalizer(inputIterfn)
private val lockedInit = MeatLocker(init)
def initCopy = lockedInit.copy
def operate(flowProcess: FlowProcess[_], call: BufferCall[Any]): Unit = {
val oc = call.getOutputCollector
val in = call.getArgumentsIterator.asScala.map(entry => conv(entry))
iterfn.get(initCopy, in).foreach(x => oc.add(set(x)))
}
}
/*
* A buffer that allows state object to be set up and tear down.
*/
class SideEffectBufferOp[I, T, C, X](
@transient init: I,
bf: => C, // begin function returns a context
@transient inputIterfn: (I, C, Iterator[T]) => TraversableOnce[X],
ef: C => Unit, // end function to clean up context object
fields: Fields,
conv: TupleConverter[T],
set: TupleSetter[X]
) extends SideEffectBaseOperation[C](bf, ef, fields)
with Buffer[C] {
val iterfn = Externalizer(inputIterfn)
private val lockedInit = MeatLocker(init)
def initCopy = lockedInit.copy
def operate(flowProcess: FlowProcess[_], call: BufferCall[C]): Unit = {
val context = call.getContext
val oc = call.getOutputCollector
val in = call.getArgumentsIterator.asScala.map(entry => conv(entry))
iterfn.get(initCopy, context, in).foreach(x => oc.add(set(x)))
}
}
class SampleWithReplacement(frac: Double, val seed: Int = new java.util.Random().nextInt)
extends BaseOperation[Poisson]()
with Function[Poisson]
with ScaldingPrepare[Poisson] {
override def prepare(flowProcess: FlowProcess[_], operationCall: OperationCall[Poisson]): Unit = {
super.prepare(flowProcess, operationCall)
val p = new Poisson(frac, seed)
operationCall.setContext(p)
}
def operate(flowProcess: FlowProcess[_], functionCall: FunctionCall[Poisson]): Unit = {
val r = functionCall.getContext.nextInt
for (i <- 0 until r)
functionCall.getOutputCollector().add(Tuple.NULL)
}
}
/** In the typed API every reduce operation is handled by this Buffer */
class TypedBufferOp[K, V, U](
conv: TupleConverter[K],
convV: TupleConverter[V],
@transient reduceFn: (K, Iterator[V]) => Iterator[U],
valueField: Fields
) extends BaseOperation[Any](valueField)
with Buffer[Any]
with ScaldingPrepare[Any] {
val reduceFnSer = Externalizer(reduceFn)
def operate(flowProcess: FlowProcess[_], call: BufferCall[Any]): Unit = {
val oc = call.getOutputCollector
val key = conv(call.getGroup)
val values = call.getArgumentsIterator.asScala
.map(convV(_))
// Avoiding a lambda here
val resIter = reduceFnSer.get(key, values)
while (resIter.hasNext) {
val tup = Tuple.size(1)
tup.set(0, resIter.next)
oc.add(tup)
}
}
}
/**
* This gets a pair out of a tuple, incruments the counters with the left, and passes the value on
*/
class IncrementCounters[A](pass: Fields, conv: TupleConverter[(A, Iterable[((String, String), Long)])])
extends BaseOperation[Any](pass)
with Function[Any] {
override def operate(flowProcess: FlowProcess[_], functionCall: FunctionCall[Any]): Unit = {
val (a, inc) = conv(functionCall.getArguments)
val iter = inc.iterator
while (iter.hasNext) {
val ((k1, k2), amt) = iter.next
flowProcess.increment(k1, k2, amt)
}
val tup = Tuple.size(1)
tup.set(0, a)
functionCall.getOutputCollector.add(tup)
}
}
}
|
twitter/scalding
|
scalding-core/src/main/scala/com/twitter/scalding/Operations.scala
|
Scala
|
apache-2.0
| 28,769 |
package edu.nus.systemtesting
import java.nio.file.Files
import java.nio.file.Path
import java.nio.file.Paths
import scala.sys.process.Process
import org.scalatest.BeforeAndAfter
import org.scalatest.FlatSpec
import com.typesafe.config.ConfigException
import com.typesafe.config.ConfigFactory
import edu.nus.systemtesting.hg.Repository
import edu.nus.systemtesting.hipsleek.SleekTestCase
import edu.nus.systemtesting.hipsleek.app.AppConfig
import edu.nus.systemtesting.hipsleek.app.RunHipSleek
/**
* @author richardg
*/
class DirtyRepoSpec extends FlatSpec with BeforeAndAfter {
/**
* A revision known to not have compiler errors
*/
val KnownGoodRevision = "7ac44bdb0dfd"
// Assumes presence of a config
val configuration = ConfigFactory.load()
// assume(configuration., clue)
val REPO_DIR = try {
Paths.get(configuration.getString("REPO_DIR"))
} catch {
case e: ConfigException.Missing => {
cancel("`REPO_DIR` key not in config, cannot test system.", e)
Paths.get("/path/to/repo")
}
}
val repo = new Repository(REPO_DIR)
val KnownGoodCommit = repo.identify(Some(KnownGoodRevision))
var tmpRepoDir: Path = _
var tmpResultsDir: Path = _
var tmpBinCacheDir: Path = _
before {
// Clone the repo
tmpRepoDir = Files.createTempDirectory("edunussystestrepo")
tmpResultsDir = Files.createTempDirectory("edunussystestresults")
tmpBinCacheDir = Files.createTempDirectory("edunussystestbincache")
assume(repo.clone(tmpRepoDir, Some(KnownGoodCommit)))
// Replace the expected output of "Valid." with anything else.
// This works for this file, for this revision.
val ReplaceCmd = Seq("sed", "-i", "s/ Valid\\\\. / Gibberish\\\\. /", "sleekengine.ml")
val sedProc = Process(ReplaceCmd, tmpRepoDir.toFile)
val sedExecOutp = Runnable.executeProc(sedProc)
println("Running sed. Okay? " + (sedExecOutp.exitValue == 0))
assume(sedExecOutp.exitValue == 0)
}
after {
// delete the archive
FileSystemUtilities rmdir tmpRepoDir
FileSystemUtilities rmdir tmpResultsDir
FileSystemUtilities rmdir tmpBinCacheDir
}
// We know the system works for 'dirty' repos if it gets different results
// for a test after a commit is made.
val KnownGoodTestCase = new TestCaseBuilder(Paths.get("sleek"), Paths.get("examples/working/sleek/sleek.slk"), "", "Valid, Valid, Valid, Fail")
"Dirty Repositories" should "be build in place; not " taggedAs(SlowTest) in {
// Load the config, using `tmpRepoDir` instead of config's `REPO_DIR`.
val tmpConfig = AppConfig.load(configuration, Some(tmpRepoDir))
// Replace the bin cache, results dir in the config.
println("DirtyRepoTest: binCacheDir is " + tmpBinCacheDir.toString)
println("DirtyRepoTest: resultsDir is " + tmpResultsDir.toString)
val TestConfig = tmpConfig.copy(resultsDir = tmpResultsDir.toString,
binCacheDir = tmpBinCacheDir.toString)
// Run some test on the repo;
val repo = new Repository(tmpRepoDir)
val runHipSleek = new RunHipSleek(TestConfig)
val repoCommit = repo.identify()
assert(repoCommit.isDirty)
// Run the test
val tsr = runHipSleek.runTests(SleekTestCase.constructTestCase, List(KnownGoodTestCase))(repoCommit)
val results = tsr.results
assert(!results.isEmpty)
val tcr = results.head
// Since the "Valid." output was replaced,
// only "Fail." will be caught from the actual `sleek` binary.
// This yields an "Invalid" test.
assert(!tcr.executionSucceeded)
assert(!tcr.passed)
// check that nothing was added to binCache;
// (breaks abstraction to check dir, but oh well)
assert(FileSystemUtilities isEmptyDirectory tmpBinCacheDir)
// check that nothing was added to the resArch
assert(FileSystemUtilities isEmptyDirectory tmpResultsDir)
}
}
|
rgoulter/system-testing
|
src/test/scala/edu/nus/systemtesting/DirtyRepoSpec.scala
|
Scala
|
mit
| 3,880 |
package org.bfc.streaming.twitter
trait Credentials {_: org.bfc.streaming.core.util.Config =>
import Credentials._
System.setProperty(ConsumerKey, config.getString(ConsumerKey))
System.setProperty(ConsumerSecret, config.getString(ConsumerSecret))
System.setProperty(AccessToken, config.getString(AccessToken))
System.setProperty(AccessTokenSecret, config.getString(AccessTokenSecret))
}
object Credentials {
val ConsumerKey = "twitter4j.oauth.consumerKey"
val ConsumerSecret = "twitter4j.oauth.consumerSecret"
val AccessToken = "twitter4j.oauth.accessToken"
val AccessTokenSecret = "twitter4j.oauth.accessTokenSecret"
}
|
JSantosP/twitter-stream
|
src/main/scala/org/bfc/streaming/twitter/Credentials.scala
|
Scala
|
apache-2.0
| 652 |
package pages.disposal_of_vehicle
import org.openqa.selenium.WebDriver
import org.scalatest.selenium.WebBrowser
import WebBrowser.find
import WebBrowser.id
import WebBrowser.Element
import uk.gov.dvla.vehicles.presentation.common.helpers
import helpers.webbrowser.{Page, WebDriverFactory}
import views.disposal_of_vehicle.DisposeFailure
import DisposeFailure.{SetupTradeDetailsId, VehicleLookupId}
object DisposeFailurePage extends Page {
final val address = buildAppUrl("sell-to-the-trade-failure")
final override val title: String = "Buying a vehicle into trade: failure"
override lazy val url: String = WebDriverFactory.testUrl + address.substring(1)
def setuptradedetails(implicit driver: WebDriver): Element = find(id(SetupTradeDetailsId)).get
def vehiclelookup(implicit driver: WebDriver): Element = find(id(VehicleLookupId)).get
}
|
dvla/vehicles-online
|
test/pages/disposal_of_vehicle/DisposeFailurePage.scala
|
Scala
|
mit
| 852 |
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datamountaineer.streamreactor.connect.elastic
import java.util
import com.datamountaineer.streamreactor.connect.elastic.config.{ElasticConfig, ElasticConfigConstants}
import com.datamountaineer.streamreactor.connect.utils.{ProgressCounter, JarManifest}
import com.typesafe.scalalogging.slf4j.StrictLogging
import org.apache.kafka.clients.consumer.OffsetAndMetadata
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.connect.sink.{SinkRecord, SinkTask}
import scala.collection.JavaConversions._
class ElasticSinkTask extends SinkTask with StrictLogging {
private var writer: Option[ElasticJsonWriter] = None
private val progressCounter = new ProgressCounter
private var enableProgress: Boolean = false
private val manifest = JarManifest(getClass.getProtectionDomain.getCodeSource.getLocation)
/**
* Parse the configurations and setup the writer
**/
override def start(props: util.Map[String, String]): Unit = {
logger.info(scala.io.Source.fromInputStream(getClass.getResourceAsStream("/elastic-ascii.txt")).mkString + s" v $version")
logger.info(manifest.printManifest())
ElasticConfig.config.parse(props)
val sinkConfig = ElasticConfig(props)
enableProgress = sinkConfig.getBoolean(ElasticConfigConstants.PROGRESS_COUNTER_ENABLED)
writer = Some(ElasticWriter(config = sinkConfig, context = context))
}
/**
* Pass the SinkRecords to the writer for Writing
**/
override def put(records: util.Collection[SinkRecord]): Unit = {
require(writer.nonEmpty, "Writer is not set!")
writer.foreach(w => w.write(records.toSet))
val seq = records.toVector
if (enableProgress) {
progressCounter.update(seq)
}
}
/**
* Clean up writer
**/
override def stop(): Unit = {
logger.info("Stopping Elastic sink.")
writer.foreach(w => w.close())
progressCounter.empty
}
override def flush(map: util.Map[TopicPartition, OffsetAndMetadata]): Unit = {
logger.info("Flushing Elastic Sink")
}
override def version: String = manifest.version()
}
|
CodeSmell/stream-reactor
|
kafka-connect-elastic/src/main/scala/com/datamountaineer/streamreactor/connect/elastic/ElasticSinkTask.scala
|
Scala
|
apache-2.0
| 2,675 |
package com.bicou.play.json
import play.api.libs.json._
package object optionoption {
implicit class PlayJsonOptionOption(path: JsPath) {
def readOptionOption[A](implicit r: Reads[A]): Reads[Option[Option[A]]] = OptionOption.reads[A](path)(r)
def writeOptionOption[A](implicit w: Writes[A]): OWrites[Option[Option[A]]] = OptionOption.writes[A](path)(w)
def formatOptionOption[A](implicit f: Format[A]): OFormat[Option[Option[A]]] = OptionOption.format[A](path)(f)
}
}
|
bicouy0/play-json-option-option
|
src/main/scala/com/bicou/play/json/package.scala
|
Scala
|
mit
| 491 |
package component
import core._
import akka.actor.{Actor, ActorRefFactory, Props}
import java.util.UUID
import org.joda.time.DateTime
import scala.collection.immutable.Map
class ModelBlog(val mode: Option[String]) extends Actor {
// Dummy data for illustration purposes, in ascending order by date
val tableBlog = (for {
x <- 1 to 100
} yield Blog(x.toString, "jim", new DateTime().minusDays(101-x),
s"Title ${x}", s"#Title ${x}.\\n* Mode: ${mode}\\n* new item")).reverse
def receive: Receive = process(tableBlog)
def process(tableBlog: IndexedSeq[Blog]): Receive = {
case GetEntity(uuid) =>
sender ! tableBlog.find(_.id == uuid.toString)
case ListWithOffset(Blog, _, offset, limit) =>
sender ! EntityList(tableBlog.drop(offset).take(limit))
case AddEntity(blog: Blog, _*) =>
val newTableBlog = tableBlog.filterNot(_.id == blog.id)
context.become(process(blog +: newTableBlog))
sender ! blog
case DeleteEntity(id) =>
val entity = tableBlog.find(_.id == id)
context.become(process(tableBlog.filterNot(_.id == id)))
sender ! entity
}
}
object ModelBlog {
def name = "modelBlog"
def apply(mode: Option[String])(implicit factory: ActorRefFactory) =
factory.actorOf(Props(new ModelBlog(mode)), name)
}
|
enpassant/jeeves
|
src/main/scala/component/ModelBlog.scala
|
Scala
|
apache-2.0
| 1,301 |
package com.github.mpetruska.ukmodulo.table
import java.io.Reader
import com.github.mpetruska.ukmodulo.Error
import com.github.mpetruska.ukmodulo.digits.AccountDigits
import com.github.mpetruska.ukmodulo.table.ModulusWeightRowParser._
object ModulusWeightTable extends ResourceTable {
type Row = ModulusWeightRow
val resourcePath = "/valacdos-v680.txt"
def parseAllRows(in: Reader): Either[Error, List[Row]] = {
ModulusWeightRowParser.parseAllRows(in) match {
case Success (value, _) => Right(value)
case Failure(error, _) => Left(error)
case Error(error, _) => Left(error)
}
}
def getWeightRowsFor(accountDigits: AccountDigits): Either[Error, List[ModulusWeightRow]] = {
val accountNumber = AccountDigits.getSortCode(accountDigits)
table.map { weightTable =>
weightTable.filter(row => row.rangeStart <= accountNumber && row.rangeEnd >= accountNumber)
}
}
}
|
mpetruska/uk-modulo-scala
|
src/main/scala/com/github/mpetruska/ukmodulo/table/ModulusWeightTable.scala
|
Scala
|
mit
| 923 |
package io.pathfinder.data
import com.avaje.ebean.Model
import com.avaje.ebean.Model.Find
/**
* subclasses can use callbacks to listen to changes to models in the specified dao
*/
abstract class ObserverDao[V <: Model](dao: CrudDao[Long,V]) extends CrudDao[Long,V] {
def this(find: Find[Long,V]) = this(new EbeanCrudDao[Long,V](find))
protected def onCreated(model: V): Unit
protected def onDeleted(model: V): Unit
protected def onUpdated(model: V): Unit
final override def update(id: Long, update: Resource[V]): Option[V] = for{
mod <- dao.update(id, update)
_ = onUpdated(mod)
} yield mod
final override def update(model: V): Option[V] = for{
mod <- dao.update(model)
_ = onUpdated(mod)
} yield mod
final override def delete(id: Long): Option[V] = for{
mod <- dao.delete(id)
_ = onDeleted(mod)
} yield mod
final override def readAll: Seq[V] = dao.readAll
final override def read(id: Long): Option[V] = dao.read(id)
final override def create(model: V): V = {
val mod: V = dao.create(model)
onCreated(mod)
mod
}
final override def create(create: Resource[V]): Option[V] = for{
mod <- dao.create(create)
_ = onCreated(mod)
} yield mod
}
|
CSSE497/pathfinder-server
|
app/io/pathfinder/data/ObserverDao.scala
|
Scala
|
mit
| 1,237 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.streaming.api.scala
import org.apache.flink.runtime.minicluster.LocalFlinkMiniCluster
import org.apache.flink.streaming.util.TestStreamEnvironment
import org.apache.flink.test.util.TestBaseUtils
import org.junit.{After, Before}
import org.scalatest.junit.JUnitSuiteLike
trait ScalaStreamingMultipleProgramsTestBase
extends TestBaseUtils
with JUnitSuiteLike {
val parallelism = 4
var cluster: Option[LocalFlinkMiniCluster] = None
@Before
def beforeAll(): Unit = {
val cluster = Some(
TestBaseUtils.startCluster(
1,
parallelism,
false,
false,
true
)
)
TestStreamEnvironment.setAsContext(cluster.get, parallelism)
}
@After
def afterAll(): Unit = {
TestStreamEnvironment.unsetAsContext()
cluster.foreach {
TestBaseUtils.stopCluster(_, TestBaseUtils.DEFAULT_TIMEOUT)
}
}
}
|
WangTaoTheTonic/flink
|
flink-streaming-scala/src/test/scala/org/apache/flink/streaming/api/scala/ScalaStreamingMultipleProgramsTestBase.scala
|
Scala
|
apache-2.0
| 1,711 |
package dotty.tools
package repl
import scala.language.unsafeNulls
import java.io.File
import java.nio.file.{Path, Files}
import java.util.Comparator
import org.junit.{Test, Ignore, BeforeClass, AfterClass}
import dotc.Driver
import dotc.reporting.TestReporter
import dotc.interfaces.Diagnostic.ERROR
import vulpix.{TestConfiguration, TestFlags}
/** Test that the REPL can shadow artifacts in the local filesystem on the classpath.
* Since the REPL launches with the current directory on the classpath, stray .class
* files containing definitions in the empty package will be in scope in the REPL.
* Additionally, any subdirectories will be treated as package names in scope.
* As this may come as a surprise to an unsuspecting user, we would like definitions
* from the REPL session to shadow these names.
*
* Provided here is a framework for creating the filesystem artifacts to be shadowed
* and running scripted REPL tests with them on the claspath.
*/
object ShadowingTests:
def classpath = TestConfiguration.basicClasspath + File.pathSeparator + shadowDir
def options = ReplTest.commonOptions ++ Array("-classpath", classpath)
def shadowDir = dir.toAbsolutePath.toString
def createSubDir(name: String): Path =
val subdir = dir.resolve(name)
try Files.createDirectory(subdir)
catch case _: java.nio.file.FileAlreadyExistsException =>
assert(Files.isDirectory(subdir), s"failed to create shadowed subdirectory $subdir")
subdir
// The directory on the classpath containing artifacts to be shadowed
private var dir: Path = null
@BeforeClass def setupDir: Unit =
dir = Files.createTempDirectory("repl-shadow")
@AfterClass def tearDownDir: Unit =
Files.walk(dir).sorted(Comparator.reverseOrder).forEach(Files.delete)
dir = null
class ShadowingTests extends ReplTest(options = ShadowingTests.options):
// delete contents of shadowDir after each test
override def cleanup: Unit =
super.cleanup
val dir = ShadowingTests.dir
Files.walk(dir)
.filter(_ != dir)
.sorted(Comparator.reverseOrder)
.forEach(Files.delete)
/** Run a scripted REPL test with the compilation artifacts of `shadowed` on the classpath */
def shadowedScriptedTest(name: String, shadowed: String, script: String): Unit =
compileShadowed(shadowed)
testScript(name, script.linesIterator.toList)
/** Compile the given source text and output to the shadow dir on the classpath */
private def compileShadowed(src: String): Unit =
val file: Path = Files.createTempFile("repl-shadow-test", ".scala")
Files.write(file, src.getBytes)
val flags =
TestFlags(TestConfiguration.basicClasspath, TestConfiguration.noCheckOptions)
.and("-d", ShadowingTests.shadowDir)
val driver = new Driver
val reporter = TestReporter.reporter(System.out, logLevel = ERROR)
driver.process(flags.all :+ file.toString, reporter)
assert(!reporter.hasErrors, s"compilation of $file failed")
Files.delete(file)
end compileShadowed
@Test def i7635 = shadowedScriptedTest(name = "<i7635>",
shadowed = "class C(val c: Int)",
script =
"""|scala> new C().c
|-- Error: ----------------------------------------------------------------------
|1 | new C().c
| | ^^^^^^^
| | missing argument for parameter c of constructor C in class C: (c: Int): C
|1 error found
|
|scala> new C(13).c
|val res0: Int = 13
|
|scala> class C { val c = 42 }
|// defined class C
|
|scala> new C().c
|val res1: Int = 42
|""".stripMargin
)
@Test def `shadow subdirectories on classpath` =
// NB: Tests of shadowing of subdirectories on the classpath are only valid
// when the subdirectories exist prior to initialization of the REPL driver.
// In the tests below this is enforced by the call to `testScript` which
// in turn invokes `ReplDriver#resetToInitial`. When testing interactively,
// the subdirectories may be created before launching the REPL, or during
// an existing session followed by the `:reset` command.
ShadowingTests.createSubDir("foo")
testScript(name = "<shadow-subdir-foo>",
"""|scala> val foo = 3
|val foo: Int = 3
|
|scala> foo
|val res0: Int = 3
|""".stripMargin.linesIterator.toList
)
ShadowingTests.createSubDir("x")
testScript(name = "<shadow-subdir-x>",
"""|scala> val (x, y) = (42, "foo")
|val x: Int = 42
|val y: String = foo
|
|scala> if (true) x else y
|val res0: Matchable = 42
|""".stripMargin.linesIterator.toList
)
ShadowingTests.createSubDir("util")
testScript(name = "<shadow-subdir-util>",
"""|scala> import util.Try
|
|scala> object util { class Try { override def toString = "you've gotta try!" } }
|// defined object util
|
|scala> import util.Try
|scala> new Try
|val res0: util.Try = you've gotta try!
|""".stripMargin.linesIterator.toList
)
end ShadowingTests
|
dotty-staging/dotty
|
compiler/test/dotty/tools/repl/ShadowingTests.scala
|
Scala
|
apache-2.0
| 5,191 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cs.ucla.edu.bwaspark
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.broadcast.Broadcast
import cs.ucla.edu.bwaspark.datatype._
import cs.ucla.edu.bwaspark.worker1.BWAMemWorker1._
import cs.ucla.edu.bwaspark.worker1.BWAMemWorker1Batched._
import cs.ucla.edu.bwaspark.worker2.BWAMemWorker2._
import cs.ucla.edu.bwaspark.worker2.MemSamPe._
import cs.ucla.edu.bwaspark.sam.SAMHeader
import cs.ucla.edu.bwaspark.sam.SAMWriter
import cs.ucla.edu.bwaspark.sam.SAMHDFSWriter
import cs.ucla.edu.bwaspark.debug.DebugFlag._
import cs.ucla.edu.bwaspark.fastq._
import cs.ucla.edu.bwaspark.util.SWUtil._
import cs.ucla.edu.avro.fastq._
import cs.ucla.edu.bwaspark.commandline._
import cs.ucla.edu.bwaspark.broadcast.ReferenceBroadcast
import org.bdgenomics.formats.avro.AlignmentRecord
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.models.{SequenceDictionary, RecordGroup, RecordGroupDictionary}
import htsjdk.samtools.SAMFileHeader
import java.io.FileReader
import java.io.BufferedReader
import java.text.SimpleDateFormat
import java.util.Calendar
import scala.concurrent._
import ExecutionContext.Implicits.global
import scala.util.{Success, Failure}
import scala.concurrent.duration._
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.fs.Path
import java.net.URI
object FastMap {
private val MEM_F_PE: Int = 0x2
private val MEM_F_ALL = 0x8
private val MEM_F_NO_MULTI = 0x10
private val packageVersion = "cloud-scale-bwamem-0.2.2"
private val NO_OUT_FILE = 0
private val SAM_OUT_LOCAL = 1
private val ADAM_OUT = 2
private val SAM_OUT_DFS = 3
/**
* memMain: the main function to perform read mapping
*
* @param sc the spark context object
* @param bwamemArgs the arguments of CS-BWAMEM
*/
def memMain(sc: SparkContext, bwamemArgs: BWAMEMCommand)
{
val fastaLocalInputPath = bwamemArgs.fastaInputPath // the local BWA index files (bns, pac, and so on)
val fastqHDFSInputPath = bwamemArgs.fastqHDFSInputPath // the raw read file stored in HDFS
val isPairEnd = bwamemArgs.isPairEnd // perform pair-end or single-end mapping
val batchFolderNum = bwamemArgs.batchedFolderNum // the number of raw read folders in a batch to be processed
val isPSWBatched = bwamemArgs.isPSWBatched // whether the pair-end Smith Waterman is performed in a batched way
val subBatchSize = bwamemArgs.subBatchSize // the number of reads to be processed in a subbatch
val isPSWJNI = bwamemArgs.isPSWJNI // whether the native JNI library is called for better performance
val jniLibPath = bwamemArgs.jniLibPath // the JNI library path in the local machine
val outputChoice = bwamemArgs.outputChoice // the output format choice
val outputPath = bwamemArgs.outputPath // the output path in the local or distributed file system
val readGroupString = bwamemArgs.headerLine // complete read group header line: Example: @RG\\tID:foo\\tSM:bar
val samHeader = new SAMHeader
var adamHeader = new SequenceDictionary
val samFileHeader = new SAMFileHeader
var seqDict: SequenceDictionary = null
var readGroupDict: RecordGroupDictionary = null
var readGroup: RecordGroup = null
// get HDFS information
val conf: Configuration = new Configuration
val hdfs: FileSystem = FileSystem.get(new URI(fastqHDFSInputPath), conf)
val status = hdfs.listStatus(new Path(fastqHDFSInputPath))
val fastqInputFolderNum = status.size // the number of folders generated in the HDFS for the raw reads
bwamemArgs.fastqInputFolderNum = fastqInputFolderNum // the number of folders generated in the HDFS for the raw reads
println("HDFS master: " + hdfs.getUri.toString)
println("Input HDFS folder number: " + bwamemArgs.fastqInputFolderNum)
if(samHeader.bwaSetReadGroup(readGroupString)) {
println("Head line: " + samHeader.readGroupLine)
println("Read Group ID: " + samHeader.bwaReadGroupID)
}
else println("Error on reading header")
val readGroupName = samHeader.bwaReadGroupID
// loading index files
println("Load Index Files")
val bwaIdx = new BWAIdxType
bwaIdx.load(fastaLocalInputPath, 0)
// loading BWA MEM options
println("Load BWA-MEM options")
val bwaMemOpt = new MemOptType
bwaMemOpt.load
bwaMemOpt.flag |= MEM_F_ALL
bwaMemOpt.flag |= MEM_F_NO_MULTI
// write SAM header
println("Output choice: " + outputChoice)
if(outputChoice == ADAM_OUT) {
samHeader.bwaGenSAMHeader(bwaIdx.bns, packageVersion, readGroupString, samFileHeader)
seqDict = SequenceDictionary(samFileHeader)
readGroupDict = RecordGroupDictionary.fromSAMHeader(samFileHeader)
readGroup = readGroupDict(readGroupName)
}
// pair-end read mapping
if(isPairEnd) {
bwaMemOpt.flag |= MEM_F_PE
if(outputChoice == SAM_OUT_LOCAL || outputChoice == SAM_OUT_DFS)
memPairEndMapping(sc, bwamemArgs, bwaMemOpt, bwaIdx, samHeader)
else if(outputChoice == ADAM_OUT)
memPairEndMapping(sc, bwamemArgs, bwaMemOpt, bwaIdx, samHeader, seqDict, readGroup)
}
// single-end read mapping
else {
if(outputChoice == SAM_OUT_LOCAL || outputChoice == SAM_OUT_DFS)
memSingleEndMapping(sc, fastaLocalInputPath, fastqHDFSInputPath, fastqInputFolderNum, batchFolderNum, bwaMemOpt, bwaIdx, outputChoice, outputPath, samHeader)
else if(outputChoice == ADAM_OUT)
memSingleEndMapping(sc, fastaLocalInputPath, fastqHDFSInputPath, fastqInputFolderNum, batchFolderNum, bwaMemOpt, bwaIdx, outputChoice, outputPath, samHeader, seqDict, readGroup)
}
}
/**
* memPairEndMapping: the main function to perform pair-end read mapping
*
* @param sc the spark context object
* @param bwamemArgs the arguments of CS-BWAMEM
* @param bwaMemOpt the MemOptType object
* @param bwaIdx the BWAIdxType object
* @param samHeader the SAM header file used for writing SAM output file
* @param seqDict (optional) the sequences (chromosome) dictionary: used for ADAM format output
* @param readGroup (optional) the read group: used for ADAM format output
*/
private def memPairEndMapping(sc: SparkContext, bwamemArgs: BWAMEMCommand, bwaMemOpt: MemOptType, bwaIdx: BWAIdxType,
samHeader: SAMHeader, seqDict: SequenceDictionary = null, readGroup: RecordGroup = null)
{
// Get the input arguments
val fastaLocalInputPath = bwamemArgs.fastaInputPath // the local BWA index files (bns, pac, and so on)
val fastqHDFSInputPath = bwamemArgs.fastqHDFSInputPath // the raw read file stored in HDFS
val fastqInputFolderNum = bwamemArgs.fastqInputFolderNum // the number of folders generated in the HDFS for the raw reads
val batchFolderNum = bwamemArgs.batchedFolderNum // the number of raw read folders in a batch to be processed
val isPSWBatched = bwamemArgs.isPSWBatched // whether the pair-end Smith Waterman is performed in a batched way
val subBatchSize = bwamemArgs.subBatchSize // the number of reads to be processed in a subbatch
val isPSWJNI = bwamemArgs.isPSWJNI // whether the native JNI library is called for better performance
val jniLibPath = bwamemArgs.jniLibPath // the JNI library path in the local machine
val outputChoice = bwamemArgs.outputChoice // the output format choice
val outputPath = bwamemArgs.outputPath // the output path in the local or distributed file system
val isSWExtBatched = bwamemArgs.isSWExtBatched // whether the SWExtend is executed in a batched way
val swExtBatchSize = bwamemArgs.swExtBatchSize // the batch size used for used for SWExtend
val isFPGAAccSWExtend = bwamemArgs.isFPGAAccSWExtend // whether the FPGA accelerator is used for accelerating SWExtend
val fpgaSWExtThreshold = bwamemArgs.fpgaSWExtThreshold // the threshold of using FPGA accelerator for SWExtend
val jniSWExtendLibPath = bwamemArgs.jniSWExtendLibPath // (optional) the JNI library path used for SWExtend FPGA acceleration
// Initialize output writer
val samWriter = new SAMWriter
val samHDFSWriter = new SAMHDFSWriter(outputPath)
if(outputChoice == SAM_OUT_LOCAL) {
samWriter.init(outputPath)
samWriter.writeString(samHeader.bwaGenSAMHeader(bwaIdx.bns, packageVersion))
}
else if(outputChoice == SAM_OUT_DFS) {
samHDFSWriter.init
samHDFSWriter.writeString(samHeader.bwaGenSAMHeader(bwaIdx.bns, packageVersion))
}
// broadcast shared variables
// If each node has its own copy of human reference genome, we can bypass the broadcast from the driver node.
// Otherwise, we need to use Spark broadcast
var isLocalRef = false
if(bwamemArgs.localRef == 1)
isLocalRef = true
val bwaIdxGlobal = sc.broadcast(new ReferenceBroadcast(sc.broadcast(bwaIdx), isLocalRef, fastaLocalInputPath))
val bwaMemOptGlobal = sc.broadcast(bwaMemOpt)
// Used to avoid time consuming adamRDD.count (numProcessed += adamRDD.count)
// Assume the number of read in one batch is the same (This is determined when uploading FASTQ to HDFS)
val fastqRDDLoaderTmp = new FASTQRDDLoader(sc, fastqHDFSInputPath, fastqInputFolderNum)
val rddTmp = fastqRDDLoaderTmp.PairEndRDDLoadOneBatch(0, batchFolderNum)
val batchedReadNum = rddTmp.count
rddTmp.unpersist(true)
// ***** PROFILING *******
var worker1Time: Long = 0
var calMetricsTime: Long = 0
var worker2Time: Long = 0
var ioWaitingTime: Long = 0
var numProcessed: Long = 0
// Process the reads in a batched fashion
var i: Int = 0
var folderID: Int = 0
var isSAMWriteDone: Boolean = true // a done signal for writing SAM file
//var isFinalIteration: Boolean = false
while(i < fastqInputFolderNum) {
var pes: Array[MemPeStat] = new Array[MemPeStat](4)
var j = 0
while(j < 4) {
pes(j) = new MemPeStat
j += 1
}
// loading reads
println("Load FASTQ files")
val pairEndFASTQRDDLoader = new FASTQRDDLoader(sc, fastqHDFSInputPath, fastqInputFolderNum)
val restFolderNum = fastqInputFolderNum - i
var pairEndFASTQRDD: RDD[PairEndFASTQRecord] = null
if(restFolderNum >= batchFolderNum) {
pairEndFASTQRDD = pairEndFASTQRDDLoader.PairEndRDDLoadOneBatch(i, batchFolderNum)
i += batchFolderNum
}
else {
pairEndFASTQRDD = pairEndFASTQRDDLoader.PairEndRDDLoadOneBatch(i, restFolderNum)
i += restFolderNum
//isFinalIteration = true
}
// Worker1 (Map step)
// ***** PROFILING *******
val startTime = System.currentTimeMillis
println("@Worker1")
var reads: RDD[PairEndReadType] = null
// SWExtend() is not processed in a batched way (by default)
if(!isSWExtBatched) {
reads = pairEndFASTQRDD.map( pairSeq => pairEndBwaMemWorker1(bwaMemOptGlobal.value, bwaIdxGlobal.value.value.bwt, bwaIdxGlobal.value.value.bns, bwaIdxGlobal.value.value.pac, null, pairSeq) )
}
// SWExtend() is processed in a batched way. FPGA accelerating may be applied
else {
def it2ArrayIt_W1(iter: Iterator[PairEndFASTQRecord]): Iterator[Array[PairEndReadType]] = {
val batchedDegree = swExtBatchSize
var counter = 0
var ret: Vector[Array[PairEndReadType]] = scala.collection.immutable.Vector.empty
var end1 = new Array[FASTQRecord](batchedDegree)
var end2 = new Array[FASTQRecord](batchedDegree)
while(iter.hasNext) {
val pairEnd = iter.next
end1(counter) = pairEnd.seq0
end2(counter) = pairEnd.seq1
counter += 1
if(counter == batchedDegree) {
ret = ret :+ pairEndBwaMemWorker1Batched(bwaMemOptGlobal.value, bwaIdxGlobal.value.value.bwt, bwaIdxGlobal.value.value.bns, bwaIdxGlobal.value.value.pac,
null, end1, end2, batchedDegree, isFPGAAccSWExtend, fpgaSWExtThreshold, jniSWExtendLibPath)
counter = 0
}
}
if(counter != 0) {
ret = ret :+ pairEndBwaMemWorker1Batched(bwaMemOptGlobal.value, bwaIdxGlobal.value.value.bwt, bwaIdxGlobal.value.value.bns, bwaIdxGlobal.value.value.pac,
null, end1, end2, counter, isFPGAAccSWExtend, fpgaSWExtThreshold, jniSWExtendLibPath)
}
ret.toArray.iterator
}
reads = pairEndFASTQRDD.mapPartitions(it2ArrayIt_W1).flatMap(s => s)
}
pairEndFASTQRDD.unpersist(true)
reads.cache
// MemPeStat (Reduce step)
val peStatPrepRDD = reads.map( pairSeq => memPeStatPrep(bwaMemOptGlobal.value, bwaIdxGlobal.value.value.bns.l_pac, pairSeq) )
val peStatPrepArray = peStatPrepRDD.collect
// ***** PROFILING *******
val worker1EndTime = System.currentTimeMillis
worker1Time += (worker1EndTime - startTime)
memPeStatCompute(bwaMemOptGlobal.value, peStatPrepArray, pes)
// ***** PROFILING *******
val calMetricsEndTime = System.currentTimeMillis
calMetricsTime += (calMetricsEndTime - worker1EndTime)
println("@MemPeStat")
j = 0
while(j < 4) {
println("pes(" + j + "): " + pes(j).low + " " + pes(j).high + " " + pes(j).failed + " " + pes(j).avg + " " + pes(j).std)
j += 1
}
// Check if the I/O thread has completed writing the output SAM file
// If not, wait here!!!
// This implementation use only worker1 stage to hide the I/O latency
// It is slower but consume less memory footprint
/*if(outputChoice == SAM_OUT_LOCAL) {
println("[DEBUG] Main thread, Before while loop: isSAMWriteDone = " + isSAMWriteDone)
while(!isSAMWriteDone) {
try {
println("Waiting for I/O")
ioWaitingTime += 1
Thread.sleep(1000) //1000 milliseconds is one second.
} catch {
case e: InterruptedException => Thread.currentThread().interrupt()
}
}
println("[DEBUG] Main thread, After while loop: isSAMWriteDone = " + isSAMWriteDone)
this.synchronized {
isSAMWriteDone = false
}
println("[DEBUG] Main thread, Final value: isSAMWriteDone = " + isSAMWriteDone)
}*/
// Worker2 (Map step)
println("@Worker2: Started")
// NOTE: we may need to find how to utilize the numProcessed variable!!!
// Batched Processing for P-SW kernel
if(isPSWBatched) {
// Not output SAM format file
if(outputChoice == NO_OUT_FILE) {
def it2ArrayIt(iter: Iterator[PairEndReadType]): Iterator[Unit] = {
var counter = 0
var ret: Vector[Unit] = scala.collection.immutable.Vector.empty
var subBatch = new Array[PairEndReadType](subBatchSize)
while (iter.hasNext) {
subBatch(counter) = iter.next
counter = counter + 1
if (counter == subBatchSize) {
ret = ret :+ pairEndBwaMemWorker2PSWBatched(bwaMemOptGlobal.value, bwaIdxGlobal.value.value.bns, bwaIdxGlobal.value.value.pac, 0, pes, subBatch, subBatchSize, isPSWJNI, jniLibPath, samHeader)
counter = 0
}
}
if (counter != 0)
ret = ret :+ pairEndBwaMemWorker2PSWBatched(bwaMemOptGlobal.value, bwaIdxGlobal.value.value.bns, bwaIdxGlobal.value.value.pac, 0, pes, subBatch, counter, isPSWJNI, jniLibPath, samHeader)
ret.toArray.iterator
}
val count = reads.mapPartitions(it2ArrayIt).count
println("Count: " + count)
reads.unpersist(true) // free RDD; seems to be needed (free storage information is wrong)
}
// Output SAM format file
else if(outputChoice == SAM_OUT_LOCAL || outputChoice == SAM_OUT_DFS) {
def it2ArrayIt(iter: Iterator[PairEndReadType]): Iterator[Array[Array[String]]] = {
var counter = 0
var ret: Vector[Array[Array[String]]] = scala.collection.immutable.Vector.empty
var subBatch = new Array[PairEndReadType](subBatchSize)
while (iter.hasNext) {
subBatch(counter) = iter.next
counter = counter + 1
if (counter == subBatchSize) {
ret = ret :+ pairEndBwaMemWorker2PSWBatchedSAMRet(bwaMemOptGlobal.value, bwaIdxGlobal.value.value.bns, bwaIdxGlobal.value.value.pac, 0, pes, subBatch, subBatchSize, isPSWJNI, jniLibPath, samHeader)
counter = 0
}
}
if (counter != 0)
ret = ret :+ pairEndBwaMemWorker2PSWBatchedSAMRet(bwaMemOptGlobal.value, bwaIdxGlobal.value.value.bns, bwaIdxGlobal.value.value.pac, 0, pes, subBatch, counter, isPSWJNI, jniLibPath, samHeader)
ret.toArray.iterator
}
if(outputChoice == SAM_OUT_LOCAL) {
println("@worker2")
val samStrings = reads.mapPartitions(it2ArrayIt).collect
println("worker2 done!")
// This implementation can hide the I/O latency with both worker1 and worker2 stages
// However, it requires 2X memory footprint on the driver node
// The program will crash if the memory (Java heap) on the driver node is not large enough
// test
println("[DEBUG] Main thread, Before while loop: isSAMWriteDone = " + isSAMWriteDone)
while(!isSAMWriteDone) {
try {
println("Waiting for I/O")
ioWaitingTime += 1
Thread.sleep(1000) //1000 milliseconds is one second.
} catch {
case e: InterruptedException => Thread.currentThread().interrupt()
}
}
println("[DEBUG] Main thread, After while loop: isSAMWriteDone = " + isSAMWriteDone)
this.synchronized {
isSAMWriteDone = false
}
println("[DEBUG] Main thread, Final value: isSAMWriteDone = " + isSAMWriteDone)
// end of test
println("Count: " + samStrings.size)
reads.unpersist(true) // free RDD; seems to be needed (free storage information is wrong)
val f: Future[Int] = Future {
samStrings.foreach(s => {
s.foreach(pairSeq => {
samWriter.writeString(pairSeq(0))
samWriter.writeString(pairSeq(1))
} )
} )
//samWriter.flush
1
}
f onComplete {
case Success(s) => {
println("[DEBUG] Forked thread, Before: isSAMWriteDone = " + isSAMWriteDone)
println("Successfully write the SAM strings to a local file: " + s)
this.synchronized {
isSAMWriteDone = true
}
println("[DEBUG] Forked thread, After: isSAMWriteDone = " + isSAMWriteDone)
/*if(isFinalIteration) {
println("[DEBUG] Final iteration, Close samWriter")
samWriter.close
val today = Calendar.getInstance().getTime()
// create the date/time formatters
val minuteFormat = new SimpleDateFormat("mm")
val hourFormat = new SimpleDateFormat("hh")
val secondFormat = new SimpleDateFormat("ss")
val currentHour = hourFormat.format(today) // 12
val currentMinute = minuteFormat.format(today) // 29
val currentSecond = secondFormat.format(today) // 50
println("samWriter is closed: " + currentHour + ":" + currentMinute + ":" + currentSecond)
}*/
}
case Failure(f) => println("An error has occured: " + f.getMessage)
}
/*if(isFinalIteration) {
println("Main thread: waiting for closing samWriter (wait for at most 1000 seconds)")
Await.result(f, 1000.second)
println("Main thread: samWriter closed!")
}*/
}
else if(outputChoice == SAM_OUT_DFS) {
val samStrings = reads.mapPartitions(it2ArrayIt).flatMap(s => s).map(pairSeq => pairSeq(0) + pairSeq(1))
reads.unpersist(true)
samStrings.saveAsTextFile(outputPath + "/body")
}
}
// Output ADAM format file
else if(outputChoice == ADAM_OUT) {
def it2ArrayIt(iter: Iterator[PairEndReadType]): Iterator[Array[AlignmentRecord]] = {
var counter = 0
var ret: Vector[Array[AlignmentRecord]] = scala.collection.immutable.Vector.empty
var subBatch = new Array[PairEndReadType](subBatchSize)
while (iter.hasNext) {
subBatch(counter) = iter.next
counter = counter + 1
if (counter == subBatchSize) {
ret = ret :+ pairEndBwaMemWorker2PSWBatchedADAMRet(bwaMemOptGlobal.value, bwaIdxGlobal.value.value.bns, bwaIdxGlobal.value.value.pac, 0, pes, subBatch, subBatchSize, isPSWJNI, jniLibPath, samHeader, seqDict, readGroup)
counter = 0
}
}
if (counter != 0)
ret = ret :+ pairEndBwaMemWorker2PSWBatchedADAMRet(bwaMemOptGlobal.value, bwaIdxGlobal.value.value.bns, bwaIdxGlobal.value.value.pac, 0, pes, subBatch, counter, isPSWJNI, jniLibPath, samHeader, seqDict, readGroup)
ret.toArray.iterator
}
//val adamObjRDD = sc.union(reads.mapPartitions(it2ArrayIt))
val adamObjRDD = reads.mapPartitions(it2ArrayIt).flatMap(r => r)
adamObjRDD.adamParquetSave(outputPath + "/" + folderID.toString())
println("@Worker2: Completed")
numProcessed += batchedReadNum
folderID += 1
reads.unpersist(true)
adamObjRDD.unpersist(true) // free RDD; seems to be needed (free storage information is wrong)
}
}
// NOTE: need to be modified!!!
// Normal read-based processing
else {
val count = reads.map(pairSeq => pairEndBwaMemWorker2(bwaMemOptGlobal.value, bwaIdxGlobal.value.value.bns, bwaIdxGlobal.value.value.pac, 0, pes, pairSeq, samHeader) ).count
numProcessed += count.toLong
}
// ***** PROFILING *******
val worker2EndTime = System.currentTimeMillis
worker2Time += (worker2EndTime - calMetricsEndTime)
}
if(outputChoice == SAM_OUT_LOCAL) {
println("[DEBUG] Main thread, Final iteration, Before: isSAMWriteDone = " + isSAMWriteDone)
while(!isSAMWriteDone) {
try {
println("Waiting for I/O, at final iteration")
ioWaitingTime += 1
Thread.sleep(1000) //1000 milliseconds is one second.
} catch {
case e: InterruptedException => Thread.currentThread().interrupt()
}
}
println("[DEBUG] Main thread, Final iteration, After: isSAMWriteDone = " + isSAMWriteDone)
//samWriter.flush
samWriter.close
val today = Calendar.getInstance().getTime()
// create the date/time formatters
val minuteFormat = new SimpleDateFormat("mm")
val hourFormat = new SimpleDateFormat("hh")
val secondFormat = new SimpleDateFormat("ss")
val currentHour = hourFormat.format(today) // 12
val currentMinute = minuteFormat.format(today) // 29
val currentSecond = secondFormat.format(today) // 50
println("SAMWriter close: " + currentHour + ":" + currentMinute + ":" + currentSecond)
}
else if(outputChoice == SAM_OUT_DFS)
samHDFSWriter.close
println("Summary:")
println("Worker1 Time: " + worker1Time)
println("Calculate Metrics Time: " + calMetricsTime)
println("Worker2 Time: " + worker2Time)
println("I/O waiting time for writing data to the disk (for local SAM format only): " + ioWaitingTime)
sc.stop
}
/**
* memSingleEndMapping: the main function to perform single-end read mapping
*
* @param sc the spark context object
* @param fastaLocalInputPath the local BWA index files (bns, pac, and so on)
* @param fastqHDFSInputPath the raw read file stored in HDFS
* @param fastqInputFolderNum the number of folders generated in the HDFS for the raw reads
* @param batchFolderNum the number of raw read folders in a batch to be processed
* @param bwaMemOpt the MemOptType object
* @param bwaIdx the BWAIdxType object
* @param outputChoice the output format choice
* @param outputPath the output path in the local or distributed file system
* @param samHeader the SAM header file used for writing SAM output file
* @param seqDict (optional) the sequences (chromosome) dictionary: used for ADAM format output
* @param readGroup (optional) the read group: used for ADAM format output
*/
private def memSingleEndMapping(sc: SparkContext, fastaLocalInputPath: String, fastqHDFSInputPath: String, fastqInputFolderNum: Int, batchFolderNum: Int,
bwaMemOpt: MemOptType, bwaIdx: BWAIdxType, outputChoice: Int, outputPath: String, samHeader: SAMHeader,
seqDict: SequenceDictionary = null, readGroup: RecordGroup = null)
{
// Initialize output writer
val samWriter = new SAMWriter
val samHDFSWriter = new SAMHDFSWriter(outputPath)
if(outputChoice == SAM_OUT_LOCAL) {
samWriter.init(outputPath)
samWriter.writeString(samHeader.bwaGenSAMHeader(bwaIdx.bns, packageVersion))
}
else if(outputChoice == SAM_OUT_DFS) {
samHDFSWriter.init
samHDFSWriter.writeString(samHeader.bwaGenSAMHeader(bwaIdx.bns, packageVersion))
}
// broadcast shared variables
//val bwaIdxGlobal = sc.broadcast(bwaIdx, fastaLocalInputPath) // read from local disks!!!
val bwaIdxGlobal = sc.broadcast(bwaIdx) // broadcast
val bwaMemOptGlobal = sc.broadcast(bwaMemOpt)
val fastqRDDLoader = new FASTQRDDLoader(sc, fastqHDFSInputPath, fastqInputFolderNum)
// Not output SAM file
// For runtime estimation
if(outputChoice == NO_OUT_FILE) {
// loading reads
println("Load FASTQ files")
val fastqRDD = fastqRDDLoader.RDDLoadAll
println("@Worker1")
val reads = fastqRDD.map( seq => bwaMemWorker1(bwaMemOptGlobal.value, bwaIdxGlobal.value.bwt, bwaIdxGlobal.value.bns, bwaIdxGlobal.value.pac, null, seq) )
println("@Worker2")
val c = reads.map( r => singleEndBwaMemWorker2(bwaMemOptGlobal.value, r.regs, bwaIdxGlobal.value.bns, bwaIdxGlobal.value.pac, r.seq, 0, samHeader) ).count
println("Count: " + c)
}
// output SAM file
else if(outputChoice == SAM_OUT_LOCAL || outputChoice == SAM_OUT_DFS) {
var numProcessed: Long = 0
// Process the reads in a batched fashion
var i: Int = 0
while(i < fastqInputFolderNum) {
val restFolderNum = fastqInputFolderNum - i
var singleEndFASTQRDD: RDD[FASTQRecord] = null
if(restFolderNum >= batchFolderNum) {
singleEndFASTQRDD = fastqRDDLoader.SingleEndRDDLoadOneBatch(i, batchFolderNum)
i += batchFolderNum
}
else {
singleEndFASTQRDD = fastqRDDLoader.SingleEndRDDLoadOneBatch(i, restFolderNum)
i += restFolderNum
}
// Write to an output file in the local file system in a sequencial way
if(outputChoice == SAM_OUT_LOCAL) {
// worker1, worker2, and return SAM format strings
val samStrings = singleEndFASTQRDD.map(seq => bwaMemWorker1(bwaMemOptGlobal.value, bwaIdxGlobal.value.bwt, bwaIdxGlobal.value.bns, bwaIdxGlobal.value.pac, null, seq) )
.map(r => singleEndBwaMemWorker2(bwaMemOptGlobal.value, r.regs, bwaIdxGlobal.value.bns, bwaIdxGlobal.value.pac, r.seq, numProcessed, samHeader) )
.collect
numProcessed += samStrings.size
samWriter.writeStringArray(samStrings)
//samWriter.flush
}
// Write to HDFS
else if(outputChoice == SAM_OUT_DFS) {
// worker1, worker2, and return SAM format strings
val samStrings = singleEndFASTQRDD.map(seq => bwaMemWorker1(bwaMemOptGlobal.value, bwaIdxGlobal.value.bwt, bwaIdxGlobal.value.bns, bwaIdxGlobal.value.pac, null, seq) )
.map(r => singleEndBwaMemWorker2(bwaMemOptGlobal.value, r.regs, bwaIdxGlobal.value.bns, bwaIdxGlobal.value.pac, r.seq, numProcessed, samHeader) )
samStrings.saveAsTextFile(outputPath + "/body")
}
singleEndFASTQRDD.unpersist(true)
}
if(outputChoice == SAM_OUT_LOCAL)
samWriter.close
else if(outputChoice == SAM_OUT_DFS)
samHDFSWriter.close
}
// output ADAM format to the distributed file system
else if(outputChoice == ADAM_OUT) {
var numProcessed: Long = 0
// Used to avoid time consuming adamRDD.count (numProcessed += adamRDD.count)
// Assume the number of read in one batch is the same (This is determined when uploading FASTQ to HDFS)
val fastqRDDLoaderTmp = new FASTQRDDLoader(sc, fastqHDFSInputPath, fastqInputFolderNum)
val rddTmp = fastqRDDLoaderTmp.SingleEndRDDLoadOneBatch(0, batchFolderNum)
val batchedReadNum = rddTmp.count
rddTmp.unpersist(true)
// Process the reads in a batched fashion
var i: Int = 0
var folderID: Int = 0
while(i < fastqInputFolderNum) {
val restFolderNum = fastqInputFolderNum - i
var singleEndFASTQRDD: RDD[FASTQRecord] = null
if(restFolderNum >= batchFolderNum) {
singleEndFASTQRDD = fastqRDDLoader.SingleEndRDDLoadOneBatch(i, batchFolderNum)
i += batchFolderNum
}
else {
singleEndFASTQRDD = fastqRDDLoader.SingleEndRDDLoadOneBatch(i, restFolderNum)
i += restFolderNum
}
// worker1, worker2, and return SAM format strings
val adamRDD = singleEndFASTQRDD.map(seq => bwaMemWorker1(bwaMemOptGlobal.value, bwaIdxGlobal.value.bwt, bwaIdxGlobal.value.bns, bwaIdxGlobal.value.pac, null, seq) )
.flatMap(r => singleEndBwaMemWorker2ADAMOut(bwaMemOptGlobal.value, r.regs, bwaIdxGlobal.value.bns, bwaIdxGlobal.value.pac,
r.seq, numProcessed, samHeader, seqDict, readGroup) )
adamRDD.adamParquetSave(outputPath + "/" + folderID.toString())
numProcessed += batchedReadNum
folderID += 1
singleEndFASTQRDD.unpersist(true)
adamRDD.unpersist(true)
}
}
else {
println("[Error] Undefined output choice" + outputChoice)
exit(1)
}
}
}
|
ytchen0323/cloud-scale-bwamem
|
src/main/scala/cs/ucla/edu/bwaspark/FastMap.scala
|
Scala
|
apache-2.0
| 32,470 |
package se.stagehand.swing.lib
import java.awt.Point
import java.awt.Dimension
/**
* Class for standard vector operations with Integer coordinates.
*/
case class Vector2(x:Int,y:Int) {
def this(i:Int) = this(i,i)
def +(v: Vector2): Vector2 = Vector2(x + v.x, y + v.y)
def -(v: Vector2): Vector2 = Vector2(x - v.x, y - v.y)
def length = Math.sqrt(x ^ 2 + y ^ 2)
def neg = Vector2(-x,-y)
}
/**
* Implicit conversions for Vector2, to handle Swingy stuff like it's nuttin, brah!
*/
object Vector2 {
implicit def fromPoint(p:Point) = Vector2(p.x,p.y)
implicit def toPoint(v:Vector2) = new Point(v.x,v.y)
implicit def fromDim(d:Dimension) = Vector2(d.width,d.height)
implicit def toDim(v:Vector2) = new Dimension(v.x,v.y)
implicit def fromInt(i:Int) = new Vector2(i)
def max(v1: Vector2, v2:Vector2) = if (v1.length >= v2.length) v1; else v2;
}
|
evilcandybag/Stagehand-core
|
src/main/scala/se/stagehand/swing/lib/Vector2.scala
|
Scala
|
gpl-2.0
| 875 |
package io.iohk.ethereum.db.storage
import akka.util.ByteString
import boopickle.Default._
import io.iohk.ethereum.db.dataSource.DataSource
import io.iohk.ethereum.db.storage.ChainWeightStorage._
import io.iohk.ethereum.domain.ChainWeight
import io.iohk.ethereum.utils.ByteUtils.{byteSequenceToBuffer, compactPickledBytes}
/**
* This class is used to store the ChainWeight of blocks, by using:
* Key: hash of the block
* Value: ChainWeight
*/
class ChainWeightStorage(val dataSource: DataSource) extends TransactionalKeyValueStorage[BlockHash, ChainWeight] {
val namespace: IndexedSeq[Byte] = Namespaces.ChainWeightNamespace
val keySerializer: BlockHash => ByteString = identity
val keyDeserializer: IndexedSeq[Byte] => BlockHash = bytes => ByteString(bytes: _*)
val valueSerializer: ChainWeight => IndexedSeq[Byte] = Pickle.intoBytes[ChainWeight] _ andThen compactPickledBytes
val valueDeserializer: IndexedSeq[Byte] => ChainWeight =
byteSequenceToBuffer _ andThen Unpickle[ChainWeight].fromBytes
}
object ChainWeightStorage {
type BlockHash = ByteString
}
|
input-output-hk/etc-client
|
src/main/scala/io/iohk/ethereum/db/storage/ChainWeightStorage.scala
|
Scala
|
mit
| 1,089 |
import scala.compiletime.{constValueTuple, constValue}
object Sums extends App:
println(constValueTuple[Plus[(true, true, true), (true, true)]]) // works
println(constValueTuple[Plus1[(true, true, true), (true, true)]]) // fails
println(constValueTuple[
Reverse[PlusLoop[Reverse[(true, true, true)], Reverse[(true, true)], false]]]
) // also works despite it's just an unfold of `Plus1` application
type Plus[A <: Tuple, B <: Tuple] <: Tuple = (A, B) match
case (EmptyTuple, EmptyTuple) => EmptyTuple
case (a, b) => Reverse[PlusLoop[Reverse[A], Reverse[B], false]]
type Plus1[A <: Tuple, B <: Tuple] = Reverse[PlusLoop[Reverse[A], Reverse[B], false]]
type ReverseLoop[A, XS <: Tuple] <: Tuple = A match {
case EmptyTuple => XS
case x *: xs => ReverseLoop[xs, x *: XS]
}
type Reverse[A] = ReverseLoop[A, EmptyTuple]
type PlusTri[A, B, C] = (A, B, C) match
case (false, false, false) => (false, false)
case (true, false, false) | (false, true, false) | (false, false, true) => (false, true)
case (true, true, false) | (true, false, true) | (false, true, true) => (true, false)
case (true, true, true) => (true, true)
type Inc[A <: Tuple] <: Tuple = A match
case EmptyTuple => true *: EmptyTuple
case t *: as =>
t match
case false => true *: as
case true => false *: Inc[as]
type IncT[A <: Tuple, O <: Boolean] <: Tuple = O match
case false => A
case true => Inc[A]
type PlusLoop[A <: Tuple, B <: Tuple, O] <: Tuple = (A, B) match
case (EmptyTuple, EmptyTuple) =>
O match
case true => (true *: EmptyTuple)
case false => EmptyTuple
case (EmptyTuple, B) => IncT[B, O]
case (A, EmptyTuple) => IncT[A, O]
case (a *: as, b *: bs) =>
PlusTri[a, b, O] match
case (x, y) => y *: PlusLoop[as, bs, x]
|
lampepfl/dotty
|
tests/pos/13633.scala
|
Scala
|
apache-2.0
| 2,060 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.kafka010
import java.io._
import java.nio.charset.StandardCharsets.UTF_8
import java.nio.file.{Files, Paths}
import java.util.Properties
import java.util.concurrent.ConcurrentLinkedQueue
import java.util.concurrent.atomic.AtomicInteger
import scala.collection.mutable
import scala.util.Random
import org.apache.kafka.clients.producer.RecordMetadata
import org.apache.kafka.common.TopicPartition
import org.scalatest.concurrent.Eventually._
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatest.time.SpanSugar._
import org.apache.spark.SparkContext
import org.apache.spark.sql.ForeachWriter
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.functions.{count, window}
import org.apache.spark.sql.kafka010.KafkaSourceProvider._
import org.apache.spark.sql.streaming.{ProcessingTime, StreamTest}
import org.apache.spark.sql.test.{SharedSQLContext, TestSparkSession}
abstract class KafkaSourceTest extends StreamTest with SharedSQLContext {
protected var testUtils: KafkaTestUtils = _
override val streamingTimeout = 30.seconds
override def beforeAll(): Unit = {
super.beforeAll()
testUtils = new KafkaTestUtils
testUtils.setup()
}
override def afterAll(): Unit = {
if (testUtils != null) {
testUtils.teardown()
testUtils = null
super.afterAll()
}
}
protected def makeSureGetOffsetCalled = AssertOnQuery { q =>
// Because KafkaSource's initialPartitionOffsets is set lazily, we need to make sure
// its "getOffset" is called before pushing any data. Otherwise, because of the race contion,
// we don't know which data should be fetched when `startingOffsets` is latest.
q.processAllAvailable()
true
}
/**
* Add data to Kafka.
*
* `topicAction` can be used to run actions for each topic before inserting data.
*/
case class AddKafkaData(topics: Set[String], data: Int*)
(implicit ensureDataInMultiplePartition: Boolean = false,
concurrent: Boolean = false,
message: String = "",
topicAction: (String, Option[Int]) => Unit = (_, _) => {}) extends AddData {
override def addData(query: Option[StreamExecution]): (Source, Offset) = {
if (query.get.isActive) {
// Make sure no Spark job is running when deleting a topic
query.get.processAllAvailable()
}
val existingTopics = testUtils.getAllTopicsAndPartitionSize().toMap
val newTopics = topics.diff(existingTopics.keySet)
for (newTopic <- newTopics) {
topicAction(newTopic, None)
}
for (existingTopicPartitions <- existingTopics) {
topicAction(existingTopicPartitions._1, Some(existingTopicPartitions._2))
}
// Read all topics again in case some topics are delete.
val allTopics = testUtils.getAllTopicsAndPartitionSize().toMap.keys
require(
query.nonEmpty,
"Cannot add data when there is no query for finding the active kafka source")
val sources = query.get.logicalPlan.collect {
case StreamingExecutionRelation(source, _) if source.isInstanceOf[KafkaSource] =>
source.asInstanceOf[KafkaSource]
}
if (sources.isEmpty) {
throw new Exception(
"Could not find Kafka source in the StreamExecution logical plan to add data to")
} else if (sources.size > 1) {
throw new Exception(
"Could not select the Kafka source in the StreamExecution logical plan as there" +
"are multiple Kafka sources:\\n\\t" + sources.mkString("\\n\\t"))
}
val kafkaSource = sources.head
val topic = topics.toSeq(Random.nextInt(topics.size))
val sentMetadata = testUtils.sendMessages(topic, data.map { _.toString }.toArray)
def metadataToStr(m: (String, RecordMetadata)): String = {
s"Sent ${m._1} to partition ${m._2.partition()}, offset ${m._2.offset()}"
}
// Verify that the test data gets inserted into multiple partitions
if (ensureDataInMultiplePartition) {
require(
sentMetadata.groupBy(_._2.partition).size > 1,
s"Added data does not test multiple partitions: ${sentMetadata.map(metadataToStr)}")
}
val offset = KafkaSourceOffset(testUtils.getLatestOffsets(topics))
logInfo(s"Added data, expected offset $offset")
(kafkaSource, offset)
}
override def toString: String =
s"AddKafkaData(topics = $topics, data = $data, message = $message)"
}
}
class KafkaSourceSuite extends KafkaSourceTest {
import testImplicits._
private val topicId = new AtomicInteger(0)
testWithUninterruptibleThread(
"deserialization of initial offset with Spark 2.1.0") {
withTempDir { metadataPath =>
val topic = newTopic
testUtils.createTopic(topic, partitions = 3)
val provider = new KafkaSourceProvider
val parameters = Map(
"kafka.bootstrap.servers" -> testUtils.brokerAddress,
"subscribe" -> topic
)
val source = provider.createSource(spark.sqlContext, metadataPath.getAbsolutePath, None,
"", parameters)
source.getOffset.get // Write initial offset
// Make sure Spark 2.1.0 will throw an exception when reading the new log
intercept[java.lang.IllegalArgumentException] {
// Simulate how Spark 2.1.0 reads the log
val in = new FileInputStream(metadataPath.getAbsolutePath + "/0")
val length = in.read()
val bytes = new Array[Byte](length)
in.read(bytes)
KafkaSourceOffset(SerializedOffset(new String(bytes, UTF_8)))
}
}
}
testWithUninterruptibleThread("deserialization of initial offset written by Spark 2.1.0") {
withTempDir { metadataPath =>
val topic = "kafka-initial-offset-2-1-0"
testUtils.createTopic(topic, partitions = 3)
val provider = new KafkaSourceProvider
val parameters = Map(
"kafka.bootstrap.servers" -> testUtils.brokerAddress,
"subscribe" -> topic
)
val from = Paths.get(
getClass.getResource("/kafka-source-initial-offset-version-2.1.0.bin").getPath)
val to = Paths.get(s"${metadataPath.getAbsolutePath}/0")
Files.copy(from, to)
val source = provider.createSource(spark.sqlContext, metadataPath.getAbsolutePath, None,
"", parameters)
val deserializedOffset = source.getOffset.get
val referenceOffset = KafkaSourceOffset((topic, 0, 0L), (topic, 1, 0L), (topic, 2, 0L))
assert(referenceOffset == deserializedOffset)
}
}
testWithUninterruptibleThread("deserialization of initial offset written by future version") {
withTempDir { metadataPath =>
val futureMetadataLog =
new HDFSMetadataLog[KafkaSourceOffset](sqlContext.sparkSession,
metadataPath.getAbsolutePath) {
override def serialize(metadata: KafkaSourceOffset, out: OutputStream): Unit = {
out.write(0)
val writer = new BufferedWriter(new OutputStreamWriter(out, UTF_8))
writer.write(s"v99999\\n${metadata.json}")
writer.flush
}
}
val topic = newTopic
testUtils.createTopic(topic, partitions = 3)
val offset = KafkaSourceOffset((topic, 0, 0L), (topic, 1, 0L), (topic, 2, 0L))
futureMetadataLog.add(0, offset)
val provider = new KafkaSourceProvider
val parameters = Map(
"kafka.bootstrap.servers" -> testUtils.brokerAddress,
"subscribe" -> topic
)
val source = provider.createSource(spark.sqlContext, metadataPath.getAbsolutePath, None,
"", parameters)
val e = intercept[java.lang.IllegalStateException] {
source.getOffset.get // Read initial offset
}
Seq(
s"maximum supported log version is v${KafkaSource.VERSION}, but encountered v99999",
"produced by a newer version of Spark and cannot be read by this version"
).foreach { message =>
assert(e.getMessage.contains(message))
}
}
}
test("(de)serialization of initial offsets") {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 64)
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("subscribe", topic)
testStream(reader.load)(
makeSureGetOffsetCalled,
StopStream,
StartStream(),
StopStream)
}
test("maxOffsetsPerTrigger") {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 3)
testUtils.sendMessages(topic, (100 to 200).map(_.toString).toArray, Some(0))
testUtils.sendMessages(topic, (10 to 20).map(_.toString).toArray, Some(1))
testUtils.sendMessages(topic, Array("1"), Some(2))
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("maxOffsetsPerTrigger", 10)
.option("subscribe", topic)
.option("startingOffsets", "earliest")
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped: org.apache.spark.sql.Dataset[_] = kafka.map(kv => kv._2.toInt)
val clock = new StreamManualClock
val waitUntilBatchProcessed = AssertOnQuery { q =>
eventually(Timeout(streamingTimeout)) {
if (!q.exception.isDefined) {
assert(clock.isStreamWaitingAt(clock.getTimeMillis()))
}
}
if (q.exception.isDefined) {
throw q.exception.get
}
true
}
testStream(mapped)(
StartStream(ProcessingTime(100), clock),
waitUntilBatchProcessed,
// 1 from smallest, 1 from middle, 8 from biggest
CheckAnswer(1, 10, 100, 101, 102, 103, 104, 105, 106, 107),
AdvanceManualClock(100),
waitUntilBatchProcessed,
// smallest now empty, 1 more from middle, 9 more from biggest
CheckAnswer(1, 10, 100, 101, 102, 103, 104, 105, 106, 107,
11, 108, 109, 110, 111, 112, 113, 114, 115, 116
),
StopStream,
StartStream(ProcessingTime(100), clock),
waitUntilBatchProcessed,
AdvanceManualClock(100),
waitUntilBatchProcessed,
// smallest now empty, 1 more from middle, 9 more from biggest
CheckAnswer(1, 10, 100, 101, 102, 103, 104, 105, 106, 107,
11, 108, 109, 110, 111, 112, 113, 114, 115, 116,
12, 117, 118, 119, 120, 121, 122, 123, 124, 125
),
AdvanceManualClock(100),
waitUntilBatchProcessed,
// smallest now empty, 1 more from middle, 9 more from biggest
CheckAnswer(1, 10, 100, 101, 102, 103, 104, 105, 106, 107,
11, 108, 109, 110, 111, 112, 113, 114, 115, 116,
12, 117, 118, 119, 120, 121, 122, 123, 124, 125,
13, 126, 127, 128, 129, 130, 131, 132, 133, 134
)
)
}
test("cannot stop Kafka stream") {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 5)
testUtils.sendMessages(topic, (101 to 105).map { _.toString }.toArray)
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("subscribePattern", s"topic-.*")
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped = kafka.map(kv => kv._2.toInt + 1)
testStream(mapped)(
makeSureGetOffsetCalled,
StopStream
)
}
for (failOnDataLoss <- Seq(true, false)) {
test(s"assign from latest offsets (failOnDataLoss: $failOnDataLoss)") {
val topic = newTopic()
testFromLatestOffsets(
topic,
addPartitions = false,
failOnDataLoss = failOnDataLoss,
"assign" -> assignString(topic, 0 to 4))
}
test(s"assign from earliest offsets (failOnDataLoss: $failOnDataLoss)") {
val topic = newTopic()
testFromEarliestOffsets(
topic,
addPartitions = false,
failOnDataLoss = failOnDataLoss,
"assign" -> assignString(topic, 0 to 4))
}
test(s"assign from specific offsets (failOnDataLoss: $failOnDataLoss)") {
val topic = newTopic()
testFromSpecificOffsets(
topic,
failOnDataLoss = failOnDataLoss,
"assign" -> assignString(topic, 0 to 4),
"failOnDataLoss" -> failOnDataLoss.toString)
}
test(s"subscribing topic by name from latest offsets (failOnDataLoss: $failOnDataLoss)") {
val topic = newTopic()
testFromLatestOffsets(
topic,
addPartitions = true,
failOnDataLoss = failOnDataLoss,
"subscribe" -> topic)
}
test(s"subscribing topic by name from earliest offsets (failOnDataLoss: $failOnDataLoss)") {
val topic = newTopic()
testFromEarliestOffsets(
topic,
addPartitions = true,
failOnDataLoss = failOnDataLoss,
"subscribe" -> topic)
}
test(s"subscribing topic by name from specific offsets (failOnDataLoss: $failOnDataLoss)") {
val topic = newTopic()
testFromSpecificOffsets(topic, failOnDataLoss = failOnDataLoss, "subscribe" -> topic)
}
test(s"subscribing topic by pattern from latest offsets (failOnDataLoss: $failOnDataLoss)") {
val topicPrefix = newTopic()
val topic = topicPrefix + "-suffix"
testFromLatestOffsets(
topic,
addPartitions = true,
failOnDataLoss = failOnDataLoss,
"subscribePattern" -> s"$topicPrefix-.*")
}
test(s"subscribing topic by pattern from earliest offsets (failOnDataLoss: $failOnDataLoss)") {
val topicPrefix = newTopic()
val topic = topicPrefix + "-suffix"
testFromEarliestOffsets(
topic,
addPartitions = true,
failOnDataLoss = failOnDataLoss,
"subscribePattern" -> s"$topicPrefix-.*")
}
test(s"subscribing topic by pattern from specific offsets (failOnDataLoss: $failOnDataLoss)") {
val topicPrefix = newTopic()
val topic = topicPrefix + "-suffix"
testFromSpecificOffsets(
topic,
failOnDataLoss = failOnDataLoss,
"subscribePattern" -> s"$topicPrefix-.*")
}
}
test("subscribing topic by pattern with topic deletions") {
val topicPrefix = newTopic()
val topic = topicPrefix + "-seems"
val topic2 = topicPrefix + "-bad"
testUtils.createTopic(topic, partitions = 5)
testUtils.sendMessages(topic, Array("-1"))
require(testUtils.getLatestOffsets(Set(topic)).size === 5)
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("subscribePattern", s"$topicPrefix-.*")
.option("failOnDataLoss", "false")
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped = kafka.map(kv => kv._2.toInt + 1)
testStream(mapped)(
makeSureGetOffsetCalled,
AddKafkaData(Set(topic), 1, 2, 3),
CheckAnswer(2, 3, 4),
Assert {
testUtils.deleteTopic(topic)
testUtils.createTopic(topic2, partitions = 5)
true
},
AddKafkaData(Set(topic2), 4, 5, 6),
CheckAnswer(2, 3, 4, 5, 6, 7)
)
}
test("starting offset is latest by default") {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 5)
testUtils.sendMessages(topic, Array("0"))
require(testUtils.getLatestOffsets(Set(topic)).size === 5)
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("subscribe", topic)
val kafka = reader.load()
.selectExpr("CAST(value AS STRING)")
.as[String]
val mapped = kafka.map(_.toInt)
testStream(mapped)(
makeSureGetOffsetCalled,
AddKafkaData(Set(topic), 1, 2, 3),
CheckAnswer(1, 2, 3) // should not have 0
)
}
test("bad source options") {
def testBadOptions(options: (String, String)*)(expectedMsgs: String*): Unit = {
val ex = intercept[IllegalArgumentException] {
val reader = spark
.readStream
.format("kafka")
options.foreach { case (k, v) => reader.option(k, v) }
reader.load()
}
expectedMsgs.foreach { m =>
assert(ex.getMessage.toLowerCase.contains(m.toLowerCase))
}
}
// Specifying an ending offset
testBadOptions("endingOffsets" -> "latest")("Ending offset not valid in streaming queries")
// No strategy specified
testBadOptions()("options must be specified", "subscribe", "subscribePattern")
// Multiple strategies specified
testBadOptions("subscribe" -> "t", "subscribePattern" -> "t.*")(
"only one", "options can be specified")
testBadOptions("subscribe" -> "t", "assign" -> """{"a":[0]}""")(
"only one", "options can be specified")
testBadOptions("assign" -> "")("no topicpartitions to assign")
testBadOptions("subscribe" -> "")("no topics to subscribe")
testBadOptions("subscribePattern" -> "")("pattern to subscribe is empty")
}
test("unsupported kafka configs") {
def testUnsupportedConfig(key: String, value: String = "someValue"): Unit = {
val ex = intercept[IllegalArgumentException] {
val reader = spark
.readStream
.format("kafka")
.option("subscribe", "topic")
.option("kafka.bootstrap.servers", "somehost")
.option(s"$key", value)
reader.load()
}
assert(ex.getMessage.toLowerCase.contains("not supported"))
}
testUnsupportedConfig("kafka.group.id")
testUnsupportedConfig("kafka.auto.offset.reset")
testUnsupportedConfig("kafka.enable.auto.commit")
testUnsupportedConfig("kafka.interceptor.classes")
testUnsupportedConfig("kafka.key.deserializer")
testUnsupportedConfig("kafka.value.deserializer")
testUnsupportedConfig("kafka.auto.offset.reset", "none")
testUnsupportedConfig("kafka.auto.offset.reset", "someValue")
testUnsupportedConfig("kafka.auto.offset.reset", "earliest")
testUnsupportedConfig("kafka.auto.offset.reset", "latest")
}
test("input row metrics") {
val topic = newTopic()
testUtils.createTopic(topic, partitions = 5)
testUtils.sendMessages(topic, Array("-1"))
require(testUtils.getLatestOffsets(Set(topic)).size === 5)
val kafka = spark
.readStream
.format("kafka")
.option("subscribe", topic)
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped = kafka.map(kv => kv._2.toInt + 1)
testStream(mapped)(
StartStream(trigger = ProcessingTime(1)),
makeSureGetOffsetCalled,
AddKafkaData(Set(topic), 1, 2, 3),
CheckAnswer(2, 3, 4),
AssertOnQuery { query =>
val recordsRead = query.recentProgress.map(_.numInputRows).sum
recordsRead == 3
}
)
}
test("delete a topic when a Spark job is running") {
KafkaSourceSuite.collectedData.clear()
val topic = newTopic()
testUtils.createTopic(topic, partitions = 1)
testUtils.sendMessages(topic, (1 to 10).map(_.toString).toArray)
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("subscribe", topic)
// If a topic is deleted and we try to poll data starting from offset 0,
// the Kafka consumer will just block until timeout and return an empty result.
// So set the timeout to 1 second to make this test fast.
.option("kafkaConsumer.pollTimeoutMs", "1000")
.option("startingOffsets", "earliest")
.option("failOnDataLoss", "false")
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
KafkaSourceSuite.globalTestUtils = testUtils
// The following ForeachWriter will delete the topic before fetching data from Kafka
// in executors.
val query = kafka.map(kv => kv._2.toInt).writeStream.foreach(new ForeachWriter[Int] {
override def open(partitionId: Long, version: Long): Boolean = {
KafkaSourceSuite.globalTestUtils.deleteTopic(topic)
true
}
override def process(value: Int): Unit = {
KafkaSourceSuite.collectedData.add(value)
}
override def close(errorOrNull: Throwable): Unit = {}
}).start()
query.processAllAvailable()
query.stop()
// `failOnDataLoss` is `false`, we should not fail the query
assert(query.exception.isEmpty)
}
test("get offsets from case insensitive parameters") {
for ((optionKey, optionValue, answer) <- Seq(
(STARTING_OFFSETS_OPTION_KEY, "earLiEst", EarliestOffsetRangeLimit),
(ENDING_OFFSETS_OPTION_KEY, "laTest", LatestOffsetRangeLimit),
(STARTING_OFFSETS_OPTION_KEY, """{"topic-A":{"0":23}}""",
SpecificOffsetRangeLimit(Map(new TopicPartition("topic-A", 0) -> 23))))) {
val offset = getKafkaOffsetRangeLimit(Map(optionKey -> optionValue), optionKey, answer)
assert(offset === answer)
}
for ((optionKey, answer) <- Seq(
(STARTING_OFFSETS_OPTION_KEY, EarliestOffsetRangeLimit),
(ENDING_OFFSETS_OPTION_KEY, LatestOffsetRangeLimit))) {
val offset = getKafkaOffsetRangeLimit(Map.empty, optionKey, answer)
assert(offset === answer)
}
}
private def newTopic(): String = s"topic-${topicId.getAndIncrement()}"
private def assignString(topic: String, partitions: Iterable[Int]): String = {
JsonUtils.partitions(partitions.map(p => new TopicPartition(topic, p)))
}
private def testFromSpecificOffsets(
topic: String,
failOnDataLoss: Boolean,
options: (String, String)*): Unit = {
val partitionOffsets = Map(
new TopicPartition(topic, 0) -> -2L,
new TopicPartition(topic, 1) -> -1L,
new TopicPartition(topic, 2) -> 0L,
new TopicPartition(topic, 3) -> 1L,
new TopicPartition(topic, 4) -> 2L
)
val startingOffsets = JsonUtils.partitionOffsets(partitionOffsets)
testUtils.createTopic(topic, partitions = 5)
// part 0 starts at earliest, these should all be seen
testUtils.sendMessages(topic, Array(-20, -21, -22).map(_.toString), Some(0))
// part 1 starts at latest, these should all be skipped
testUtils.sendMessages(topic, Array(-10, -11, -12).map(_.toString), Some(1))
// part 2 starts at 0, these should all be seen
testUtils.sendMessages(topic, Array(0, 1, 2).map(_.toString), Some(2))
// part 3 starts at 1, first should be skipped
testUtils.sendMessages(topic, Array(10, 11, 12).map(_.toString), Some(3))
// part 4 starts at 2, first and second should be skipped
testUtils.sendMessages(topic, Array(20, 21, 22).map(_.toString), Some(4))
require(testUtils.getLatestOffsets(Set(topic)).size === 5)
val reader = spark
.readStream
.format("kafka")
.option("startingOffsets", startingOffsets)
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("failOnDataLoss", failOnDataLoss.toString)
options.foreach { case (k, v) => reader.option(k, v) }
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped: org.apache.spark.sql.Dataset[_] = kafka.map(kv => kv._2.toInt)
testStream(mapped)(
makeSureGetOffsetCalled,
CheckAnswer(-20, -21, -22, 0, 1, 2, 11, 12, 22),
StopStream,
StartStream(),
CheckAnswer(-20, -21, -22, 0, 1, 2, 11, 12, 22), // Should get the data back on recovery
AddKafkaData(Set(topic), 30, 31, 32, 33, 34)(ensureDataInMultiplePartition = true),
CheckAnswer(-20, -21, -22, 0, 1, 2, 11, 12, 22, 30, 31, 32, 33, 34),
StopStream
)
}
test("Kafka column types") {
val now = System.currentTimeMillis()
val topic = newTopic()
testUtils.createTopic(newTopic(), partitions = 1)
testUtils.sendMessages(topic, Array(1).map(_.toString))
val kafka = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("startingOffsets", s"earliest")
.option("subscribe", topic)
.load()
val query = kafka
.writeStream
.format("memory")
.outputMode("append")
.queryName("kafkaColumnTypes")
.start()
query.processAllAvailable()
val rows = spark.table("kafkaColumnTypes").collect()
assert(rows.length === 1, s"Unexpected results: ${rows.toList}")
val row = rows(0)
assert(row.getAs[Array[Byte]]("key") === null, s"Unexpected results: $row")
assert(row.getAs[Array[Byte]]("value") === "1".getBytes(UTF_8), s"Unexpected results: $row")
assert(row.getAs[String]("topic") === topic, s"Unexpected results: $row")
assert(row.getAs[Int]("partition") === 0, s"Unexpected results: $row")
assert(row.getAs[Long]("offset") === 0L, s"Unexpected results: $row")
// We cannot check the exact timestamp as it's the time that messages were inserted by the
// producer. So here we just use a low bound to make sure the internal conversion works.
assert(row.getAs[java.sql.Timestamp]("timestamp").getTime >= now, s"Unexpected results: $row")
assert(row.getAs[Int]("timestampType") === 0, s"Unexpected results: $row")
query.stop()
}
test("KafkaSource with watermark") {
val now = System.currentTimeMillis()
val topic = newTopic()
testUtils.createTopic(newTopic(), partitions = 1)
testUtils.sendMessages(topic, Array(1).map(_.toString))
val kafka = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("startingOffsets", s"earliest")
.option("subscribe", topic)
.load()
val windowedAggregation = kafka
.withWatermark("timestamp", "10 seconds")
.groupBy(window($"timestamp", "5 seconds") as 'window)
.agg(count("*") as 'count)
.select($"window".getField("start") as 'window, $"count")
val query = windowedAggregation
.writeStream
.format("memory")
.outputMode("complete")
.queryName("kafkaWatermark")
.start()
query.processAllAvailable()
val rows = spark.table("kafkaWatermark").collect()
assert(rows.length === 1, s"Unexpected results: ${rows.toList}")
val row = rows(0)
// We cannot check the exact window start time as it depands on the time that messages were
// inserted by the producer. So here we just use a low bound to make sure the internal
// conversion works.
assert(
row.getAs[java.sql.Timestamp]("window").getTime >= now - 5 * 1000,
s"Unexpected results: $row")
assert(row.getAs[Int]("count") === 1, s"Unexpected results: $row")
query.stop()
}
private def testFromLatestOffsets(
topic: String,
addPartitions: Boolean,
failOnDataLoss: Boolean,
options: (String, String)*): Unit = {
testUtils.createTopic(topic, partitions = 5)
testUtils.sendMessages(topic, Array("-1"))
require(testUtils.getLatestOffsets(Set(topic)).size === 5)
val reader = spark
.readStream
.format("kafka")
.option("startingOffsets", s"latest")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("failOnDataLoss", failOnDataLoss.toString)
options.foreach { case (k, v) => reader.option(k, v) }
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped = kafka.map(kv => kv._2.toInt + 1)
testStream(mapped)(
makeSureGetOffsetCalled,
AddKafkaData(Set(topic), 1, 2, 3),
CheckAnswer(2, 3, 4),
StopStream,
StartStream(),
CheckAnswer(2, 3, 4), // Should get the data back on recovery
StopStream,
AddKafkaData(Set(topic), 4, 5, 6), // Add data when stream is stopped
StartStream(),
CheckAnswer(2, 3, 4, 5, 6, 7), // Should get the added data
AddKafkaData(Set(topic), 7, 8),
CheckAnswer(2, 3, 4, 5, 6, 7, 8, 9),
AssertOnQuery("Add partitions") { query: StreamExecution =>
if (addPartitions) {
testUtils.addPartitions(topic, 10)
}
true
},
AddKafkaData(Set(topic), 9, 10, 11, 12, 13, 14, 15, 16),
CheckAnswer(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17)
)
}
private def testFromEarliestOffsets(
topic: String,
addPartitions: Boolean,
failOnDataLoss: Boolean,
options: (String, String)*): Unit = {
testUtils.createTopic(topic, partitions = 5)
testUtils.sendMessages(topic, (1 to 3).map { _.toString }.toArray)
require(testUtils.getLatestOffsets(Set(topic)).size === 5)
val reader = spark.readStream
reader
.format(classOf[KafkaSourceProvider].getCanonicalName.stripSuffix("$"))
.option("startingOffsets", s"earliest")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("failOnDataLoss", failOnDataLoss.toString)
options.foreach { case (k, v) => reader.option(k, v) }
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped = kafka.map(kv => kv._2.toInt + 1)
testStream(mapped)(
AddKafkaData(Set(topic), 4, 5, 6), // Add data when stream is stopped
CheckAnswer(2, 3, 4, 5, 6, 7),
StopStream,
StartStream(),
CheckAnswer(2, 3, 4, 5, 6, 7),
StopStream,
AddKafkaData(Set(topic), 7, 8),
StartStream(),
CheckAnswer(2, 3, 4, 5, 6, 7, 8, 9),
AssertOnQuery("Add partitions") { query: StreamExecution =>
if (addPartitions) {
testUtils.addPartitions(topic, 10)
}
true
},
AddKafkaData(Set(topic), 9, 10, 11, 12, 13, 14, 15, 16),
CheckAnswer(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17)
)
}
}
object KafkaSourceSuite {
@volatile var globalTestUtils: KafkaTestUtils = _
val collectedData = new ConcurrentLinkedQueue[Any]()
}
class KafkaSourceStressSuite extends KafkaSourceTest {
import testImplicits._
val topicId = new AtomicInteger(1)
@volatile var topics: Seq[String] = (1 to 5).map(_ => newStressTopic)
def newStressTopic: String = s"stress${topicId.getAndIncrement()}"
private def nextInt(start: Int, end: Int): Int = {
start + Random.nextInt(start + end - 1)
}
test("stress test with multiple topics and partitions") {
topics.foreach { topic =>
testUtils.createTopic(topic, partitions = nextInt(1, 6))
testUtils.sendMessages(topic, (101 to 105).map { _.toString }.toArray)
}
// Create Kafka source that reads from latest offset
val kafka =
spark.readStream
.format(classOf[KafkaSourceProvider].getCanonicalName.stripSuffix("$"))
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("subscribePattern", "stress.*")
.option("failOnDataLoss", "false")
.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val mapped = kafka.map(kv => kv._2.toInt + 1)
runStressTest(
mapped,
Seq(makeSureGetOffsetCalled),
(d, running) => {
Random.nextInt(5) match {
case 0 => // Add a new topic
topics = topics ++ Seq(newStressTopic)
AddKafkaData(topics.toSet, d: _*)(message = s"Add topic $newStressTopic",
topicAction = (topic, partition) => {
if (partition.isEmpty) {
testUtils.createTopic(topic, partitions = nextInt(1, 6))
}
})
case 1 if running =>
// Only delete a topic when the query is running. Otherwise, we may lost data and
// cannot check the correctness.
val deletedTopic = topics(Random.nextInt(topics.size))
if (deletedTopic != topics.head) {
topics = topics.filterNot(_ == deletedTopic)
}
AddKafkaData(topics.toSet, d: _*)(message = s"Delete topic $deletedTopic",
topicAction = (topic, partition) => {
// Never remove the first topic to make sure we have at least one topic
if (topic == deletedTopic && deletedTopic != topics.head) {
testUtils.deleteTopic(deletedTopic)
}
})
case 2 => // Add new partitions
AddKafkaData(topics.toSet, d: _*)(message = "Add partition",
topicAction = (topic, partition) => {
testUtils.addPartitions(topic, partition.get + nextInt(1, 6))
})
case _ => // Just add new data
AddKafkaData(topics.toSet, d: _*)
}
},
iterations = 50)
}
}
class KafkaSourceStressForDontFailOnDataLossSuite extends StreamTest with SharedSQLContext {
import testImplicits._
private var testUtils: KafkaTestUtils = _
private val topicId = new AtomicInteger(0)
private def newTopic(): String = s"failOnDataLoss-${topicId.getAndIncrement()}"
override def createSparkSession(): TestSparkSession = {
// Set maxRetries to 3 to handle NPE from `poll` when deleting a topic
new TestSparkSession(new SparkContext("local[2,3]", "test-sql-context", sparkConf))
}
override def beforeAll(): Unit = {
super.beforeAll()
testUtils = new KafkaTestUtils {
override def brokerConfiguration: Properties = {
val props = super.brokerConfiguration
// Try to make Kafka clean up messages as fast as possible. However, there is a hard-code
// 30 seconds delay (kafka.log.LogManager.InitialTaskDelayMs) so this test should run at
// least 30 seconds.
props.put("log.cleaner.backoff.ms", "100")
props.put("log.segment.bytes", "40")
props.put("log.retention.bytes", "40")
props.put("log.retention.check.interval.ms", "100")
props.put("delete.retention.ms", "10")
props.put("log.flush.scheduler.interval.ms", "10")
props
}
}
testUtils.setup()
}
override def afterAll(): Unit = {
if (testUtils != null) {
testUtils.teardown()
testUtils = null
super.afterAll()
}
}
test("stress test for failOnDataLoss=false") {
val reader = spark
.readStream
.format("kafka")
.option("kafka.bootstrap.servers", testUtils.brokerAddress)
.option("kafka.metadata.max.age.ms", "1")
.option("subscribePattern", "failOnDataLoss.*")
.option("startingOffsets", "earliest")
.option("failOnDataLoss", "false")
.option("fetchOffset.retryIntervalMs", "3000")
val kafka = reader.load()
.selectExpr("CAST(key AS STRING)", "CAST(value AS STRING)")
.as[(String, String)]
val query = kafka.map(kv => kv._2.toInt).writeStream.foreach(new ForeachWriter[Int] {
override def open(partitionId: Long, version: Long): Boolean = {
true
}
override def process(value: Int): Unit = {
// Slow down the processing speed so that messages may be aged out.
Thread.sleep(Random.nextInt(500))
}
override def close(errorOrNull: Throwable): Unit = {
}
}).start()
val testTime = 1.minutes
val startTime = System.currentTimeMillis()
// Track the current existing topics
val topics = mutable.ArrayBuffer[String]()
// Track topics that have been deleted
val deletedTopics = mutable.Set[String]()
while (System.currentTimeMillis() - testTime.toMillis < startTime) {
Random.nextInt(10) match {
case 0 => // Create a new topic
val topic = newTopic()
topics += topic
// As pushing messages into Kafka updates Zookeeper asynchronously, there is a small
// chance that a topic will be recreated after deletion due to the asynchronous update.
// Hence, always overwrite to handle this race condition.
testUtils.createTopic(topic, partitions = 1, overwrite = true)
logInfo(s"Create topic $topic")
case 1 if topics.nonEmpty => // Delete an existing topic
val topic = topics.remove(Random.nextInt(topics.size))
testUtils.deleteTopic(topic)
logInfo(s"Delete topic $topic")
deletedTopics += topic
case 2 if deletedTopics.nonEmpty => // Recreate a topic that was deleted.
val topic = deletedTopics.toSeq(Random.nextInt(deletedTopics.size))
deletedTopics -= topic
topics += topic
// As pushing messages into Kafka updates Zookeeper asynchronously, there is a small
// chance that a topic will be recreated after deletion due to the asynchronous update.
// Hence, always overwrite to handle this race condition.
testUtils.createTopic(topic, partitions = 1, overwrite = true)
logInfo(s"Create topic $topic")
case 3 =>
Thread.sleep(1000)
case _ => // Push random messages
for (topic <- topics) {
val size = Random.nextInt(10)
for (_ <- 0 until size) {
testUtils.sendMessages(topic, Array(Random.nextInt(10).toString))
}
}
}
// `failOnDataLoss` is `false`, we should not fail the query
if (query.exception.nonEmpty) {
throw query.exception.get
}
}
query.stop()
// `failOnDataLoss` is `false`, we should not fail the query
if (query.exception.nonEmpty) {
throw query.exception.get
}
}
}
|
spark0001/spark2.1.1
|
external/kafka-0-10-sql/src/test/scala/org/apache/spark/sql/kafka010/KafkaSourceSuite.scala
|
Scala
|
apache-2.0
| 38,976 |
package com.lynbrookrobotics.potassium
import com.lynbrookrobotics.potassium.tasks.{ContinuousTask, FiniteTask, Task}
import org.scalatest.{BeforeAndAfter, FunSuite}
class ParallelContinuousTaskTest extends FunSuite with BeforeAndAfter {
after {
Task.abortCurrentTask()
}
test("Parallel continuous task goes through correct flow") {
var task1Started = false
var task1Ended = false
var task2Started = false
var task2Ended = false
val task1 = new ContinuousTask {
override def onStart(): Unit = {
task1Started = true
}
override def onEnd(): Unit = {
task1Ended = true
}
}
val task2 = new ContinuousTask {
override def onStart(): Unit = {
task2Started = true
}
override def onEnd(): Unit = {
task2Ended = true
}
}
val parallel = task1 and task2
assert(!task1Started && !task1Ended && !task2Started && !task2Ended)
Task.executeTask(parallel)
parallel.abort()
assert(task1Started && task1Ended && task2Started && task2Ended)
}
}
|
Team846/potassium
|
core/shared/src/test/scala/com/lynbrookrobotics/potassium/ParallelContinuousTaskTest.scala
|
Scala
|
mit
| 1,081 |
package support
import play.api.i18n._
import play.api.mvc._
trait LangLookupSupport {
implicit def lang(implicit request: RequestHeader): Lang = request.acceptLanguages.head
}
|
bravegag/play-authenticate-usage-scala
|
app/support/LangLookupSupport.scala
|
Scala
|
apache-2.0
| 181 |
/*
* @author Daniel Strebel
* @author Philip Stutz
*
* Copyright 2012 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.signalcollect
import org.scalatest.Matchers
import org.scalatest.FlatSpec
import com.signalcollect.util.TestAnnouncements
class GraphModificationSpec extends FlatSpec with Matchers with TestAnnouncements {
"GraphEditor" should "support modification functions" in {
val graph = TestConfig.graphProvider().build
try {
graph.modifyGraph({ _.addVertex(new GraphModificationVertex(0, 1)) }, Some(0))
graph.modifyGraph({ _.addVertex(new GraphModificationVertex(1, 1)) }, Some(1))
graph.modifyGraph({ _.addVertex(new GraphModificationVertex(2, 1)) }, Some(2))
graph.modifyGraph({ _.addVertex(new GraphModificationVertex(3, 1)) }, Some(3))
graph.modifyGraph({ _.addEdge(0, new StateForwarderEdge(1)) }, Some(0))
graph.modifyGraph({ _.addEdge(1, new StateForwarderEdge(3)) }, Some(1))
var statistics = graph.execute
graph.aggregate(new CountVertices[GraphModificationVertex]) === 4
statistics.aggregatedWorkerStatistics.numberOfVertices === 4
statistics.aggregatedWorkerStatistics.verticesAdded === 4
statistics.aggregatedWorkerStatistics.verticesRemoved === 0
statistics.aggregatedWorkerStatistics.numberOfOutgoingEdges === 2
statistics.aggregatedWorkerStatistics.outgoingEdgesAdded === 2
statistics.aggregatedWorkerStatistics.outgoingEdgesRemoved === 0
graph.modifyGraph({ _.removeVertex(0, true) }, Some(0))
graph.modifyGraph({ _.removeVertex(2, true) }, Some(2))
statistics = graph.execute
graph.aggregate(new CountVertices[GraphModificationVertex]) === 2
statistics.aggregatedWorkerStatistics.numberOfVertices === 2
statistics.aggregatedWorkerStatistics.verticesAdded === 4
statistics.aggregatedWorkerStatistics.verticesRemoved === 2
statistics.aggregatedWorkerStatistics.numberOfOutgoingEdges === 1
statistics.aggregatedWorkerStatistics.outgoingEdgesAdded === 2
statistics.aggregatedWorkerStatistics.outgoingEdgesRemoved === 1
} finally {
graph.shutdown
}
}
it should "keep accurate statistics when using individual vertex removals" in {
val system = TestConfig.actorSystem()
val graph = TestConfig.graphProvider().build
try {
graph.addVertex(new GraphModificationVertex(0, 1))
graph.addVertex(new GraphModificationVertex(1, 1))
graph.addVertex(new GraphModificationVertex(2, 1))
graph.addVertex(new GraphModificationVertex(3, 1))
graph.addEdge(0, new StateForwarderEdge(1))
graph.addEdge(1, new StateForwarderEdge(3))
var statistics = graph.execute
graph.aggregate(new CountVertices[GraphModificationVertex]) === 4
statistics.aggregatedWorkerStatistics.numberOfVertices === 4
statistics.aggregatedWorkerStatistics.verticesAdded === 4
statistics.aggregatedWorkerStatistics.verticesRemoved === 0
statistics.aggregatedWorkerStatistics.numberOfOutgoingEdges === 2
statistics.aggregatedWorkerStatistics.outgoingEdgesAdded === 2
statistics.aggregatedWorkerStatistics.outgoingEdgesRemoved === 0
graph.removeVertex(0, true)
graph.removeVertex(2, true)
statistics = graph.execute
graph.aggregate(new CountVertices[GraphModificationVertex]) === 2
statistics.aggregatedWorkerStatistics.numberOfVertices === 2
statistics.aggregatedWorkerStatistics.verticesAdded === 4
statistics.aggregatedWorkerStatistics.verticesRemoved === 2
statistics.aggregatedWorkerStatistics.numberOfOutgoingEdges === 1
statistics.aggregatedWorkerStatistics.outgoingEdgesAdded === 2
statistics.aggregatedWorkerStatistics.outgoingEdgesRemoved === 1
} finally {
graph.shutdown
}
}
}
class GraphModificationVertex(id: Int, state: Int) extends DataGraphVertex(id, state) {
def collect = 1
}
|
hicolour/signal-collect
|
src/test/scala/com/signalcollect/GraphModificationSpec.scala
|
Scala
|
apache-2.0
| 4,484 |
package org.raml.metamodel;
object Protocol extends EnumType(Str) {
values("HTTP", "HTTPS")
}
object ParameterType extends EnumType(Str) {
values("date", "string", "number", "integer", "file", "boolean");
}
object ActionType extends EnumType(Str) {
values("GET", "POST", "PUT", "DELETE", "PATCH", "HEAD","OPTIONS");
}
object ExampleOrDefaultValType extends Child("Represents example or default", Parameter, Str) {
override def validation(): Condition = parent.ConstraintDetails.selectActual.tValidator();
}
object Parameter extends MetaType("Abstract parameter in RAML model") {
val paramerterName = req(key(str));
val parameterType = prop(ParameterType);
val repeat = bool();
val required = bool();
val example = prop(ExampleOrDefaultValType);
val default = prop(ExampleOrDefaultValType);
object ConstraintDetails extends OneOfTypes[TypeAttrs](StringAttrs, NumberAttrs, OtherAttrs) with Embedded {
descriminator(
(parameterType.$ === "string") -> StringAttrs,
(parameterType.$ === "number") -> NumberAttrs,
(parameterType.$ === "integer") -> NumberAttrs,
(parameterType.$ === "file") -> FileAttrs,
otherwise -> OtherAttrs);
}
}
object FormParameter extends MetaType("form parameters", Parameter) {
}
object QueryParameter extends MetaType("query parameters", Parameter) {
}
object UriParameter extends MetaType("uri parameters", Parameter) {
}
object HeaderParameter extends MetaType("query parameters", Parameter) {
}
object RAMLAction extends MetaType("Raml action model", Parameter){
val queryParameters=multivalue(prop(QueryParameter));
val actionType=prop(ActionType);
}
trait TypeAttrs extends Type {
def tValidator(): Condition;
}
object OtherAttrs extends Child("date, or boolean", Parameter) with TypeAttrs {
def tValidator(): Condition = null;
}
object FileAttrs extends Child("file", Parameter) with TypeAttrs {
require(parent.actualType() instanceOf FormParameter).description("form type is allowed only in forms");
def tValidator(): Condition = null;
}
object StringAttrs extends Child("String typed parameter", Parameter) with TypeAttrs {
val pattern = prop(RegExp);
val minLength = prop(PositiveInt);
val maxLength = prop(PositiveInt);
val enum = prop(Enum);
mutuallyExclusive(pattern, enum, unionOf(minLength, maxLength));
def tValidator(): Condition = null;
}
object NumberAttrs extends Child("Number typed parameter", Parameter) with TypeAttrs {
val min = prop(NumberType).description("This is minimum value");
val max = prop(NumberType).description("This is maximum value");
require(min.$ < max.$).description("min should be less then max");
def tValidator() = ($ < max.$ && $ > min.$).description("value should be less then max and more then min")
}
|
petrochenko-pavel-a/raml-experimentsscala
|
org.raml.model.diet/src/org/raml/metamodel/modelDefinitionExample.scala
|
Scala
|
epl-1.0
| 2,892 |
package com.twitter.util
import org.specs.SpecificationWithJUnit
import com.twitter.conversions.storage._
class StorageUnitSpec extends SpecificationWithJUnit {
"StorageUnit" should {
"convert whole numbers into storage units (back and forth)" in {
1.byte.inBytes mustEqual(1)
1.kilobyte.inBytes mustEqual(1024)
1.megabyte.inMegabytes mustEqual(1.0)
1.gigabyte.inMegabytes mustEqual(1024.0)
1.gigabyte.inKilobytes mustEqual(1024.0 * 1024.0)
}
"confer an essential humanity" in {
900.bytes.toHuman mustEqual "900 B"
1.kilobyte.toHuman mustEqual "1024 B"
2.kilobytes.toHuman mustEqual "2.0 KiB"
Int.MaxValue.bytes.toHuman mustEqual "2.0 GiB"
Long.MaxValue.bytes.toHuman mustEqual "8.0 EiB"
}
"accept humanity" in {
StorageUnit.parse("142.bytes") must be_==(142.bytes)
StorageUnit.parse("78.kilobytes") must be_==(78.kilobytes)
StorageUnit.parse("1.megabyte") must be_==(1.megabyte)
StorageUnit.parse("873.gigabytes") must be_==(873.gigabytes)
StorageUnit.parse("3.terabytes") must be_==(3.terabytes)
StorageUnit.parse("9.petabytes") must be_==(9.petabytes)
StorageUnit.parse("-3.megabytes") must be_==(-3.megabytes)
}
"reject soulless robots" in {
StorageUnit.parse("100.bottles") must throwA[NumberFormatException]
StorageUnit.parse("100 bytes") must throwA[NumberFormatException]
}
"deal with negative values" in {
-123.bytes.inBytes mustEqual(-123)
-2.kilobytes.toHuman mustEqual("-2.0 KiB")
}
}
}
|
mosesn/util
|
util-core/src/test/scala/com/twitter/util/StorageUnitSpec.scala
|
Scala
|
apache-2.0
| 1,579 |
package controllers.conservation
import models.conservation.events._
import no.uio.musit.formatters.DateTimeFormatters.dateTimeFormatter
import no.uio.musit.models._
import no.uio.musit.security.BearerToken
import no.uio.musit.test.matchers.DateTimeMatchers
import no.uio.musit.test.{FakeUsers, MusitSpecWithServerPerSuite}
import no.uio.musit.time
import org.joda.time.DateTime
import play.api.http.Status
import play.api.libs.json._
import play.api.test.Helpers._
//Hint, to run only this test, type:
//test-only controllers.conservation.ConservationReportControllerSpec
class ConservationReportControllerSpec
extends MusitSpecWithServerPerSuite
with DateTimeMatchers
with ConservationJsonGenerators
with ConservationJsonValidators {
val mid = MuseumId(99)
var cid = "2e4f2455-1b3b-4a04-80a1-ba92715ff613"
val token = BearerToken(FakeUsers.testAdminToken)
val tokenGodRole = BearerToken(FakeUsers.superUserToken)
val tokenRead = BearerToken(FakeUsers.testReadToken)
val tokenTest = BearerToken(FakeUsers.testUserToken)
val baseUrl = (mid: Int) => s"/$mid/conservation"
val baseEventUrl = (mid: Int) => s"/$mid/conservation/events"
val typesUrl = (mid: Int) => s"${baseUrl(mid)}/types"
val eventByIdUrl = (mid: Int) => (id: Long) => s"${baseEventUrl(mid)}/$id"
val eventsByObjectUuid = (mid: Int) =>
(id: String) => s"/$mid/conservation/events/object/$id"
val cpsKeyDataByObjectUuid = (mid: Int) =>
(id: String) => s"/$mid/conservation/conservations/object/$id"
val currentMaterialdataForObjectUuid = (mid: Int) =>
(id: String) => s"/$mid/conservation/object/$id/materials"
val currentMeasurementdataForObjectUuid = (mid: Int) =>
(id: String) => s"/$mid/conservation/object/$id/measurements"
val deleteSubEventsUrl = (mid: Int, eventIds: String) =>
baseEventUrl(mid) + s"?eventIds=$eventIds"
val conservationReportUrl = (mid: Int, collectionId: String, EventId: Long) =>
s"${baseUrl(mid)}/conservationReport/$EventId?collectionId=$collectionId"
val conservationReportHTMLUrl = (mid: Int, collectionId: String, EventId: Long) =>
s"${baseUrl(mid)}/conservationReportHTML/$EventId?collectionId=$collectionId"
/* val materialListUrl = (mid: Int, collectionId: String) =>
s"/$mid/conservation/materials?collectionId=$collectionId"*/
def postEvent(json: JsObject, t: BearerToken = token) = {
wsUrl(baseEventUrl(mid)).withHttpHeaders(t.asHeader).post(json).futureValue
}
def getEvent(eventId: Long, t: BearerToken = token) = {
wsUrl(eventByIdUrl(mid)(eventId)).withHttpHeaders(t.asHeader).get().futureValue
}
def putEvent(eventId: Long, json: JsObject, t: BearerToken = token) = {
wsUrl(eventByIdUrl(mid)(eventId)).withHttpHeaders(t.asHeader).put(json).futureValue
}
def getEventForObject(oid: String, t: BearerToken = token) = {
wsUrl(eventsByObjectUuid(mid)(oid)).withHttpHeaders(t.asHeader).get().futureValue
}
def getCurrentMaterialDataForObject(oid: String, t: BearerToken = token) = {
wsUrl(currentMaterialdataForObjectUuid(mid)(oid))
.withHttpHeaders(t.asHeader)
.get()
.futureValue
}
def getCurrentMeasurementDataForObject(oid: String, t: BearerToken = token) = {
wsUrl(currentMeasurementdataForObjectUuid(mid)(oid))
.withHttpHeaders(t.asHeader)
.get()
.futureValue
}
def deleteEvents(eventIds: String, t: BearerToken = token) = {
wsUrl(deleteSubEventsUrl(mid, eventIds))
.withHttpHeaders(t.asHeader)
.delete()
.futureValue
}
def getConservationReport(
eventId: Long,
mid: Int,
collectionId: String,
t: BearerToken = token
) = {
wsUrl(conservationReportUrl(mid, collectionId, eventId))
.withHttpHeaders(t.asHeader)
.get()
.futureValue
}
def getConservationReportHTML(
eventId: Long,
mid: Int,
collectionId: String,
t: BearerToken = token
) = {
wsUrl(conservationReportHTMLUrl(mid, collectionId, eventId))
.withHttpHeaders(t.asHeader)
.get()
.futureValue
}
implicit val minReads = ConservationModuleEvent.reads
implicit val cpReads = ConservationProcess.reads
def getEventObject(
eventId: Long,
t: BearerToken = token
) = {
val res = getEvent(eventId, t)
res.json.validate[ConservationModuleEvent].get
}
def MaybeGetEventObject(
eventId: Long,
t: BearerToken = token
) = {
val res = getEvent(eventId, t)
if (res.status == OK) Some(res.json.validate[ConservationModuleEvent].get)
else None
}
def addDummyConservationProcess(t: BearerToken = token) = {
val js =
dummyEventJSON(
conservationProcessEventTypeId,
Some("testKommentar"),
Some("777"),
Some(testAffectedThings),
true
)
postEvent(js)
}
def getConservationProcess(
mid: MuseumId,
eventId: EventId,
t: BearerToken = token
): ConservationProcess = {
val cp = getEvent(eventId, t)
cp.json.validate[ConservationProcess].get
}
def putConservationProcess(
eventId: EventId,
json: JsObject,
t: BearerToken = token
): ConservationProcess = {
val cp = putEvent(eventId, json, t)
cp.json.validate[ConservationProcess].get
}
def getCpsKeyDataForObject(oid: String, t: BearerToken = token) = {
wsUrl(cpsKeyDataByObjectUuid(mid)(oid)).withHttpHeaders(t.asHeader).get().futureValue
}
val standaloneTreatmentId = 4L
val compositeConservationProcessEventId = standaloneTreatmentId + 1
val treatmentId = compositeConservationProcessEventId + 2 //The second child
val treatmentIdWithActors = treatmentId + 2 // one spesific treatment to check for later
val compositeConservationProcessSingleObjectEventId = 8L
val edate = DateTime.now
"Using the conservationProcess controller" when {
"fetching conservationProcess types" should {
"return all event types" in {
val res =
wsUrl(typesUrl(mid)).withHttpHeaders(tokenRead.asHeader).get().futureValue
res.status mustBe OK
res.json.as[JsArray].value.size mustBe 10
}
}
/*
"should return NOT_FOUND on an not-existing id" in {
val res = getEvent(999, token)
println(s"res: $res")
res.status mustBe NOT_FOUND
}
*/
"should be able to get existing event without isUpdated in json" in {
//First we tried some special eventId like 666, but then the next eventId became 667, which ruined most of our tests,
//so then we instead used -1 as the test-event inserted in the database
//val cp = getEventObject(-1).asInstanceOf[ConservationProcess]
val res = getEvent(-1, token)
res.status mustBe OK
}
"working with conservationProcess" should {
"add a new conservationProcess" in {
val res = addDummyConservationProcess()
res.status mustBe CREATED // creates ids 1 to 2
(res.json \\ "id").as[Int] mustBe 1
(res.json \\ "registeredBy").asOpt[String] mustBe Some(
"d63ab290-2fab-42d2-9b57-2475dfbd0b3c"
)
}
"get a conservationProcess by it's ID" in {
val eventId = 1L
val res1 = getConservationProcess(mid, eventId)
res1.eventTypeId.underlying mustBe 1
res1.id.get.underlying mustBe 1
}
"successfully update a conservation process" in {
val jso = addDummyConservationProcess()
jso.status mustBe CREATED
val eventId = (jso.json \\ "id").as[EventId]
eventId.underlying mustBe 2
val oids = Seq(
"2350578d-0bb0-4601-92d4-817478ad0952",
"c182206b-530c-4a40-b9aa-fba044ecb953",
"376d41e7-c463-45e8-9bde-7a2c9844637e"
)
val updJson = Json.obj(
"id" -> eventId,
"note" -> "Updated note",
"eventTypeId" -> conservationProcessEventTypeId, // Should not be modified by the server.
"caseNumber" -> "666",
"isUpdated" -> true,
"affectedThings" -> oids
)
val updRes = putEvent(eventId, updJson)
updRes.status mustBe OK
val mdatetime = time.dateTimeNow.plusDays(20)
(updRes.json \\ "id").as[Int] mustBe 2
(updRes.json \\ "eventTypeId").as[Int] mustBe 1
(updRes.json \\ "note").as[String] must include("Updated")
(updRes.json \\ "caseNumber").asOpt[String] mustBe Some("666")
(updRes.json \\ "affectedThings").asOpt[Seq[String]].get.length mustBe 3
}
"return FORBIDDEN when trying to update a conservation process without permissions" in {
val updJson = Json.obj(
"note" -> "Updated2 note"
)
val updRes = putEvent(2L, updJson, tokenRead)
updRes.status mustBe FORBIDDEN
}
"return not OK when update a conservation process with another eventId than" +
"the Id in JSon " in {
val jso = addDummyConservationProcess()
jso.status mustBe CREATED
val updJson = jso.json.as[JsObject] ++ Json.obj(
"id" -> 200,
"note" -> "Updated note",
"eventTypeId" -> conservationProcessEventTypeId, // Should not be modified by the server.
"isUpdated" -> true
)
val updRes = putEvent(3L, updJson)
assert(updRes.status == BAD_REQUEST)
(updRes.json \\ "message").as[String] must include("Inconsistent")
}
"add standalone treatment having data in one of the 'extra' attributes" in {
val treatmentJson = Json.obj(
"eventTypeId" -> treatmentEventTypeId,
"doneBy" -> adminId,
"completedBy" -> adminId,
"note" -> "en annen fin treatment",
"materials" -> Seq(1, 2, 3),
"affectedThings" -> Seq("2350578d-0bb0-4601-92d4-817478ad0952"),
"actorsAndRoles" -> Seq(
Json.obj(
"roleId" -> 1,
"actorId" -> adminId,
"date" -> time.dateTimeNow.plusDays(20)
),
Json.obj(
"roleId" -> 2,
"actorId" -> testUserId,
"date" -> time.dateTimeNow.plusDays(10)
)
),
"isUpdated" -> true
)
val res = postEvent(treatmentJson)
res.status mustBe CREATED
val eventId = (res.json \\ "id").as[EventId]
eventId.underlying mustBe standaloneTreatmentId
val treatment = getEventObject(eventId).asInstanceOf[Treatment]
treatment.actorsAndRoles.get.length mustBe 2
val myActors = treatment.actorsAndRoles.get.sortBy(_.roleId)
myActors.head.roleId mustBe 1
}
val oids = Seq(
ObjectUUID.unsafeFromString("2350578d-0bb0-4601-92d4-817478ad0952"),
ObjectUUID.unsafeFromString("c182206b-530c-4a40-b9aa-fba044ecb953"),
ObjectUUID.unsafeFromString("376d41e7-c463-45e8-9bde-7a2c9844637e")
)
"add composite ConservationProcess (ie with children)" in {
val treatment1 = Json.obj(
"eventTypeId" -> treatmentEventTypeId,
"note" -> "en fin treatment",
"affectedThings" -> Seq("c182206b-530c-4a40-b9aa-fba044ecb953"),
"actorsAndRoles" -> Seq(
Json.obj(
"roleId" -> 1,
"actorId" -> adminId,
"date" -> time.dateTimeNow.plusDays(1)
),
Json.obj(
"roleId" -> 1,
"actorId" -> testUserId,
"date" -> time.dateTimeNow.plusDays(2)
)
),
"isUpdated" -> true
)
val treatment2 = Json.obj(
"eventTypeId" -> treatmentEventTypeId,
"note" -> "en annen fin treatment",
"materials" -> Seq(1, 2, 3),
"affectedThings" -> Seq("376d41e7-c463-45e8-9bde-7a2c9844637e"),
"actorsAndRoles" -> Seq(
Json.obj(
"roleId" -> 2,
"actorId" -> adminId,
"date" -> time.dateTimeNow.plusDays(3)
),
Json.obj(
"roleId" -> 2,
"actorId" -> testUserId,
"date" -> time.dateTimeNow.plusDays(4)
)
),
"isUpdated" -> true
)
val json = Json.obj(
"eventTypeId" -> conservationProcessEventTypeId,
"events" -> Json.arr(treatment1, treatment2),
"affectedThings" -> oids,
"actorsAndRoles" -> Seq(
Json.obj(
"roleId" -> 2,
"actorId" -> adminId,
"date" -> time.dateTimeNow.plusDays(5)
),
Json.obj(
"roleId" -> 1,
"actorId" -> testUserId,
"date" -> time.dateTimeNow.plusDays(6)
)
),
"isUpdated" -> true
)
val res = postEvent(json)
res.status mustBe CREATED
val eventId = (res.json \\ "id").as[EventId]
eventId.underlying mustBe compositeConservationProcessEventId
val cpr = getEventObject(eventId).asInstanceOf[ConservationProcess]
cpr.actorsAndRoles.get.length mustBe 2
val cprActors = cpr.actorsAndRoles.get.sortBy(_.roleId)
cprActors.head.roleId mustBe 1
val trm1 = cpr.events.map { m =>
val first = m.head.id
first.map(eventId => {
val trm = getEventObject(eventId)
trm.actorsAndRoles.get.length mustBe 2
val trmActors = trm.actorsAndRoles.get.sortBy(_.roleId)
trmActors.head.roleId mustBe 1
})
}
val trm2 = cpr.events.map { m =>
val second = m.tail.head.id
second.map(eventId => {
val trm = getEventObject(eventId)
trm.actorsAndRoles.get.length mustBe 2
val trmActors = trm.actorsAndRoles.get.sortBy(_.roleId)
trmActors.head.roleId mustBe 2
})
}
}
"get composite ConservationProcess (ie with children)" in {
val res = getEvent(compositeConservationProcessEventId)
res.status mustBe OK
val consProcess = res.json.validate[ConservationProcess].get
consProcess.events.get.length must be >= 2
consProcess.registeredBy must not be None
//consProcess.affectedThings mustBe Some(oids)
consProcess.affectedThings.get.length mustBe 3
val firstEvent = consProcess.events.get.head
firstEvent.affectedThings mustBe Some(
Seq(ObjectUUID.unsafeFromString("c182206b-530c-4a40-b9aa-fba044ecb953"))
)
}
"add composite ConservationProcess with single object (ie with children)" in {
val treatment1 = Json.obj(
"eventTypeId" -> treatmentEventTypeId,
"note" -> "en fin treatment",
"affectedThings" -> Seq("c182206b-530c-4a40-b9aa-fba044ecb953"),
"actorsAndRoles" -> Seq(
Json.obj(
"roleId" -> 1,
"actorId" -> adminId,
"date" -> time.dateTimeNow.plusDays(1)
),
Json.obj(
"roleId" -> 1,
"actorId" -> testUserId,
"date" -> time.dateTimeNow.plusDays(2)
)
),
"isUpdated" -> true
)
val treatment2 = Json.obj(
"eventTypeId" -> treatmentEventTypeId,
"note" -> "en annen fin treatment",
"materials" -> Seq(1, 2, 3),
"affectedThings" -> Seq("c182206b-530c-4a40-b9aa-fba044ecb953"),
"actorsAndRoles" -> Seq(
Json.obj(
"roleId" -> 2,
"actorId" -> adminId,
"date" -> time.dateTimeNow.plusDays(3)
),
Json.obj(
"roleId" -> 2,
"actorId" -> testUserId,
"date" -> time.dateTimeNow.plusDays(4)
)
),
"isUpdated" -> true
)
val json = Json.obj(
"eventTypeId" -> conservationProcessEventTypeId,
"events" -> Json.arr(treatment1, treatment2),
"affectedThings" -> Seq("c182206b-530c-4a40-b9aa-fba044ecb953"),
"actorsAndRoles" -> Seq(
Json.obj(
"roleId" -> 2,
"actorId" -> adminId,
"date" -> time.dateTimeNow.plusDays(5)
),
Json.obj(
"roleId" -> 1,
"actorId" -> testUserId,
"date" -> time.dateTimeNow.plusDays(6)
)
),
"isUpdated" -> true
)
val res = postEvent(json)
res.status mustBe CREATED
val eventId = (res.json \\ "id").as[EventId]
eventId.underlying mustBe compositeConservationProcessSingleObjectEventId
val cpr = getEventObject(compositeConservationProcessSingleObjectEventId)
.asInstanceOf[ConservationProcess]
cpr.actorsAndRoles.get.length mustBe 2
val cprActors = cpr.actorsAndRoles.get.sortBy(_.roleId)
cprActors.head.roleId mustBe 1
val trm1 = cpr.events.map { m =>
val first = m.head.id
first.map(eventId => {
val trm = getEventObject(eventId)
trm.actorsAndRoles.get.length mustBe 2
val trmActors = trm.actorsAndRoles.get.sortBy(_.roleId)
trmActors.head.roleId mustBe 1
})
}
val trm2 = cpr.events.map { m =>
val second = m.tail.head.id
second.map(eventId => {
val trm = getEventObject(eventId)
trm.actorsAndRoles.get.length mustBe 2
val trmActors = trm.actorsAndRoles.get.sortBy(_.roleId)
trmActors.head.roleId mustBe 2
})
}
}
"get composite ConservationProcess with single object (ie with children)" in {
val res = getEvent(compositeConservationProcessEventId)
res.status mustBe OK
val consProcess = res.json.validate[ConservationProcess].get
consProcess.events.get.length must be >= 2
consProcess.registeredBy must not be None
//consProcess.affectedThings mustBe Some(oids)
consProcess.affectedThings.get.length mustBe 3
val firstEvent = consProcess.events.get.head
firstEvent.affectedThings mustBe Some(
Seq(ObjectUUID.unsafeFromString("c182206b-530c-4a40-b9aa-fba044ecb953"))
)
}
"get Conservation Report" in {
val res = getConservationReport(compositeConservationProcessEventId, 99, cid)
res.status mustBe OK
/* val consProcess = res.json.validate[ConservationProcessForReport].get
consProcess.events.get.length must be >= 2
consProcess.registeredBy must not be None
//consProcess.affectedThings mustBe Some(oids)
consProcess.affectedThings.get.length mustBe 3
val firstEvent = consProcess.events.get.head
firstEvent.affectedThings mustBe Some(
Seq(ObjectUUID.unsafeFromString("c182206b-530c-4a40-b9aa-fba044ecb953"))
)*/
}
"get Conservation Report HTML" in {
val res = getConservationReportHTML(compositeConservationProcessEventId, 99, cid)
res.status mustBe OK
// println(res.body)
/* val consProcess = res.json.validate[ConservationProcessForReport].get
consProcess.events.get.length must be >= 2
consProcess.registeredBy must not be None
//consProcess.affectedThings mustBe Some(oids)
consProcess.affectedThings.get.length mustBe 3
val firstEvent = consProcess.events.get.head
firstEvent.affectedThings mustBe Some(
Seq(ObjectUUID.unsafeFromString("c182206b-530c-4a40-b9aa-fba044ecb953"))
)*/
}
"get Conservation Report HTML with single object" in {
val res = getConservationReportHTML(
compositeConservationProcessSingleObjectEventId,
99,
cid
)
res.status mustBe OK
/* val consProcess = res.json.validate[ConservationProcessForReport].get
consProcess.events.get.length must be >= 2
consProcess.registeredBy must not be None
//consProcess.affectedThings mustBe Some(oids)
consProcess.affectedThings.get.length mustBe 3
val firstEvent = consProcess.events.get.head
firstEvent.affectedThings mustBe Some(
Seq(ObjectUUID.unsafeFromString("c182206b-530c-4a40-b9aa-fba044ecb953"))
)*/
}
}
// "searching for filenames" should {
// "return a list of results matching the query paramter" in {
// val queryParam =
// (fileIds: String) => s"/99/conservation/conservationReport/attachments/$fileIds"
//
// val fakeToken = BearerToken(FakeUsers.testReadToken)
// val myurl = queryParam("096b554a-a3e6-439c-b46d-638021cb9aee")
// println("myurl: " + myurl)
//
// val res = wsUrl(myurl).withHttpHeaders(fakeToken.asHeader).get().futureValue
// res.status mustBe Status.OK
// }
// }
}
}
|
MUSIT-Norway/musit
|
service_backend/test/controllers/conservation/ConservationReportControllerSpec.scala
|
Scala
|
gpl-2.0
| 21,336 |
class MainClass {
def main(args: Array[String]) {
println("Hello, World!")
}
}
|
teodorlu/ballmer
|
WebKomScalaTesting/src/MainClass.scala
|
Scala
|
gpl-2.0
| 87 |
/*
* =========================================================================================
* Copyright Β© 2015 the khronus project <https://github.com/hotels-tech/khronus>
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the
* License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
* =========================================================================================
*/
package com.searchlight.khronus.service
import akka.io.IO
import com.searchlight.khronus.service.HandShakeProtocol.{ KhronusStarted, Register }
import com.searchlight.khronus.util.Settings
import spray.can.Http
trait KhronusService {
this: ActorSystemSupport β
val handlerActor = system.actorOf(KhronusHandler.props, KhronusHandler.Name)
IO(Http) ! Http.Bind(handlerActor, Settings.Http.Interface, Settings.Http.Port)
val khronusActor = system.actorOf(KhronusActor.props, KhronusActor.Name)
val versionActor = system.actorOf(VersionActor.props, VersionActor.Name)
handlerActor ! Register(KhronusActor.Path, khronusActor)
handlerActor ! Register(VersionActor.Path, versionActor)
system.eventStream.publish(KhronusStarted(handlerActor))
}
|
despegar/khronus
|
khronus/src/main/scala/com/searchlight/khronus/service/KhronusService.scala
|
Scala
|
apache-2.0
| 1,587 |
package org.http4s
import java.util.Locale
import scalaz.scalacheck.ScalazProperties
import org.http4s.parser.Rfc2616BasicRules
import org.scalacheck.Prop.forAll
import Http4s._
class MethodSpec extends Http4sSpec {
import Method._
"parses own string rendering to equal value" in {
forAll(tokens) { token => fromString(token).map(_.renderString) must be_\\/-(token) }
}
"only tokens are valid methods" in {
prop { s: String => fromString(s).isRight must_== (Rfc2616BasicRules.isToken(s)) }
}
"name is case sensitive" in {
prop { m: Method => {
val upper = m.name.toUpperCase(Locale.ROOT)
val lower = m.name.toLowerCase(Locale.ROOT)
(upper != lower) ==> { fromString(upper) must_!= fromString(lower) }
}}
}
checkAll(ScalazProperties.equal.laws[Method])
"methods are equal by name" in {
prop { m: Method => Method.fromString(m.name) must be_\\/-(m) }
}
"safety implies idempotence" in {
foreach(Method.registered.filter(_.isSafe)) { _.isIdempotent }
}
}
|
hvesalai/http4s
|
core/src/test/scala/org/http4s/MethodSpec.scala
|
Scala
|
apache-2.0
| 1,027 |
/*
* Copyright 2001-2014 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalactic.anyvals
import org.scalactic.Resources
import scala.quoted._
/**
* Trait providing assertion methods that can be called at compile time from macros
* to validate literals in source code.
*
* <p>
* The intent of <code>CompileTimeAssertions</code> is to make it easier to create
* <code>AnyVal</code>s that restrict the values of types for which Scala supports
* literals: <code>Int</code>, <code>Long</code>, <code>Float</code>, <code>Double</code>, <code>Char</code>,
* and <code>String</code>. For example, if you are using odd integers in many places
* in your code, you might have validity checks scattered throughout your code. Here's
* an example of a method that both requires an odd <code>Int</code> is passed (as a
* <em>precondition</em>, and ensures an odd * <code>Int</code> is returned (as
* a <em>postcondition</em>):
* </p>
*
* <pre class="stHighlight">
* def nextOdd(i: Int): Int = {
* def isOdd(x: Int): Boolean = x.abs % 2 == 1
* require(isOdd(i))
* (i + 2) ensuring (isOdd(_))
* }
* </pre>
*
* <p>
* In either the precondition or postcondition check fails, an exception will
* be thrown at runtime. If you have many methods like this you may want to
* create a type to represent an odd <code>Int</code>, so that the checking
* for validity errors is isolated in just one place. By using an <code>AnyVal</code>
* you can avoid boxing the <code>Int</code>, which may be more efficient.
* This might look like:
* </p>
*
* <pre class="stHighlight">
* final class OddInt private (val value: Int) extends AnyVal {
* override def toString: String = s"OddInt($value)"
* }
*
* object OddInt {
* def apply(value: Int): OddInt = {
* require(value.abs % 2 == 1)
* new OddInt(value)
* }
* }
* </pre>
*
* <p>
* An <code>AnyVal</code> cannot have any constructor code, so to ensure that
* any <code>Int</code> passed to the <code>OddInt</code> constructor is actually
* odd, the constructor must be private. That way the only way to construct a
* new <code>OddInt</code> is via the <code>apply</code> factory method in the
* <code>OddInt</code> companion object, which can require that the value be
* odd. This design eliminates the need for placing <code>require</code> and
* <code>ensuring</code> clauses anywhere else that odd <code>Int</code>s are
* needed, because the type promises the constraint. The <code>nextOdd</code>
* method could, therefore, be rewritten as:
* </p>
*
* <pre class="stHighlight">
* def nextOdd(oi: OddInt): OddInt = OddInt(oi.value + 2)
* </pre>
*
* <p>
* Using the compile-time assertions provided by this trait, you can construct
* a factory method implemented via a macro that causes a compile failure
* if <code>OddInt.apply</code> is passed anything besides an odd
* <code>Int</code> literal. Class <code>OddInt</code> would look exactly the
* same as before:
* </p>
*
* <pre class="stHighlight">
* final class OddInt private (val value: Int) extends AnyVal {
* override def toString: String = s"OddInt($value)"
* }
* </pre>
*
* <p>
* In the companion object, however, the <code>apply</code> method would
* be implemented in terms of a macro. Because the <code>apply</code> method
* will only work with literals, you'll need a second method that can work
* an any expression of type <code>Int</code>. We recommend a <code>from</code> method
* that returns an <code>Option[OddInt]</code> that returns <code>Some[OddInt}</code> if the passed <code>Int</code> is odd,
* else returns <code>None</code>, and an <code>ensuringValid</code> method that returns an <code>OddInt</code>
* if the passed <code>Int</code> is valid, else throws <code>AssertionError</code>.
* </p>
*
* <pre class="stHighlight">
* object OddInt {
*
* // The from factory method validates at run time
* def from(value: Int): Option[OddInt] =
* if (OddIntMacro.isValid(value)) Some(new OddInt(value)) else None
*
* // The ensuringValid factory method validates at run time, but throws
* // an AssertionError if invalid
* def ensuringValid(value: Int): OddInt =
* if (OddIntMacro.isValid(value)) new OddInt(value) else {
* throw new AssertionError(s"$value was not a valid OddInt")
* }
*
* // The apply factory method validates at compile time
* import scala.language.experimental.macros
* def apply(value: Int): OddInt = macro OddIntMacro.apply
* }
* </pre>
*
* <p>
* The <code>apply</code> method refers to a macro implementation method in class
* <code>PosIntMacro</code>. The macro implementation of any such method can look
* very similar to this one. The only changes you'd need to make is the
* <code>isValid</code> method implementation and the text of the error messages.
* </p>
*
* <pre class="stHighlight">
* import org.scalactic.anyvals.CompileTimeAssertions
* import reflect.macros.Context
*
* object OddIntMacro extends CompileTimeAssertions {
*
* // Validation method used at both compile- and run-time
* def isValid(i: Int): Boolean = i.abs % 2 == 1
*
* // Apply macro that performs a compile-time assertion
* def apply(c: Context)(value: c.Expr[Int]): c.Expr[OddInt] = {
*
* // Prepare potential compiler error messages
* val notValidMsg = "OddInt.apply can only be invoked on odd Int literals, like OddInt(3)."
* val notLiteralMsg = "OddInt.apply can only be invoked on Int literals, like " +
* "OddInt(3). Please use OddInt.from instead."
*
* // Validate via a compile-time assertion
* ensureValidIntLiteral(c)(value, notValidMsg, notLiteralMsg)(isValid)
*
* // Validated, so rewrite the apply call to a from call
* c.universe.reify { OddInt.ensuringValid(value.splice) }
* }
* }
* </pre>
*
* <p>
* The <code>isValid</code> method just takes the underlying type and returns <code>true</code> if it is valid,
* else <code>false</code>. This method is placed here so the same valiation code can be used both in
* the <code>from</code> method at runtime and the <code>apply</code> macro at compile time. The <code>apply</code>
* actually does just two things. It calls a <code>ensureValidIntLiteral</code>, performing a compile-time assertion
* that value passed to <code>apply</code> is an <code>Int</code> literal that is valid (in this case, odd).
* If the assertion fails, <code>ensureValidIntLiteral</code> will complete abruptly with an exception that will
* contain an appropriate error message (one of the two you passed in) and cause a compiler error with that message.
* If the assertion succeeds, <code>ensureValidIntLiteral</code> will just return normally. The next line of code
* will then execute. This line of code must construct an AST (abstract syntax tree) of code that will replace
* the <code>OddInt.apply</code> invocation. We invoke the other factory method that either returns an <code>OddInt</code>
* or throws an <code>AssertionError</code>, since we've proven at compile time that the call will succeed.
* </p>
*
* <p>
* You may wish to use quasi-quotes instead of reify. The reason we use reify is that this also works on 2.10 without
* any additional plugin (i.e., you don't need macro paradise), and Scalactic supports 2.10.
* </p>
*/
trait CompileTimeAssertions {
/**
* Ensures a given expression of type <code>Int</code> is a literal with a valid value according to a given validation function.
*
* <p>
* If the given <code>Int</code> expression is a literal whose value satisfies the given validation function, this method will
* return normally. Otherwise, if the given <code>Int</code> expression is not a literal, this method will complete abruptly with
* an exception whose detail message includes the <code>String</code> passed as <code>notLiteralMsg</code>. Otherwise, the
* given <code>Int</code> expression is a literal that does <em>not</em> satisfy the given validation function, so this method will
* complete abruptly with an exception whose detail message includes the <code>String</code> passed as <code>notValidMsg</code>.
* </p>
*
* <p>
* This method is intended to be invoked at compile time from macros. When called from a macro, exceptions thrown by this method
* will result in compiler errors. The detail message of the thrown exception will appear as the compiler error message.
* </p>
*
* @param c the compiler context for this assertion
* @param value the <code>Int</code> expression to validate
* @param notValidMsg a <code>String</code> message to include in the exception thrown if the expression is a literal, but not valid
* @param notLiteralMsg a <code>String</code> message to include in the exception thrown if the expression is not a literal
* @param isValid a function used to validate a literal value parsed from the given expression
*/
def ensureValidIntLiteral(value: Expr[Int], notValidMsg: String, notLiteralMsg: String)(isValid: Int => Boolean)(implicit qctx: QuoteContext): Unit = {
import qctx.tasty._
value.unseal.underlyingArgument match {
case Literal(intConst) =>
val literalValue = intConst.value.toString.toInt
if (!isValid(literalValue))
error(notValidMsg, value.unseal.pos)
case _ =>
error(notLiteralMsg, value.unseal.pos)
}
}
/**
* Ensures a given expression of type <code>Long</code> is a literal with a valid value according to a given validation function.
*
* <p>
* If the given <code>Long</code> expression is a literal whose value satisfies the given validation function, this method will
* return normally. Otherwise, if the given <code>Long</code> expression is not a literal, this method will complete abruptly with
* an exception whose detail message includes the <code>String</code> passed as <code>notLiteralMsg</code>. Otherwise, the
* given <code>Long</code> expression is a literal that does <em>not</em> satisfy the given validation function, so this method will
* complete abruptly with an exception whose detail message includes the <code>String</code> passed as <code>notValidMsg</code>.
* </p>
*
* <p>
* This method is intended to be invoked at compile time from macros. When called from a macro, exceptions thrown by this method
* will result in compiler errors. The detail message of the thrown exception will appear as the compiler error message.
* </p>
*
* @param c the compiler context for this assertion
* @param value the <code>Long</code> expression to validate
* @param notValidMsg a <code>String</code> message to include in the exception thrown if the expression is a literal, but not valid
* @param notLiteralMsg a <code>String</code> message to include in the exception thrown if the expression is not a literal
* @param isValid a function used to validate a literal value parsed from the given expression
*/
def ensureValidLongLiteral(value: Expr[Long], notValidMsg: String, notLiteralMsg: String)(isValid: Long => Boolean)(implicit qctx: QuoteContext): Unit = {
import qctx.tasty._
value.unseal.underlyingArgument match {
case Literal(longConst) =>
val literalValue = longConst.value.toString.toLong
if (!isValid(literalValue))
error(notValidMsg, value.unseal.pos)
case _ =>
error(notLiteralMsg, value.unseal.pos)
}
}
/**
* Ensures a given expression of type <code>Float</code> is a literal with a valid value according to a given validation function.
*
* <p>
* If the given <code>Float</code> expression is a literal whose value satisfies the given validation function, this method will
* return normally. Otherwise, if the given <code>Float</code> expression is not a literal, this method will complete abruptly with
* an exception whose detail message includes the <code>String</code> passed as <code>notLiteralMsg</code>. Otherwise, the
* given <code>Float</code> expression is a literal that does <em>not</em> satisfy the given validation function, so this method will
* complete abruptly with an exception whose detail message includes the <code>String</code> passed as <code>notValidMsg</code>.
* </p>
*
* <p>
* This method is intended to be invoked at compile time from macros. When called from a macro, exceptions thrown by this method
* will result in compiler errors. The detail message of the thrown exception will appear as the compiler error message.
* </p>
*
* @param c the compiler context for this assertion
* @param value the <code>Float</code> expression to validate
* @param notValidMsg a <code>String</code> message to include in the exception thrown if the expression is a literal, but not valid
* @param notLiteralMsg a <code>String</code> message to include in the exception thrown if the expression is not a literal
* @param isValid a function used to validate a literal value parsed from the given expression
*/
def ensureValidFloatLiteral(value: Expr[Float], notValidMsg: String, notLiteralMsg: String)(isValid: Float => Boolean)(implicit qctx: QuoteContext): Unit = {
import qctx.tasty._
value.unseal.underlyingArgument match {
case Literal(floatConst) =>
val literalValue = floatConst.value.toString.toFloat
if (!isValid(literalValue))
error(notValidMsg, value.unseal.pos)
case _ =>
error(notLiteralMsg, value.unseal.pos)
}
}
/**
* Ensures a given expression of type <code>Double</code> is a literal with a valid value according to a given validation function.
*
* <p>
* If the given <code>Double</code> expression is a literal whose value satisfies the given validation function, this method will
* return normally. Otherwise, if the given <code>Double</code> expression is not a literal, this method will complete abruptly with
* an exception whose detail message includes the <code>String</code> passed as <code>notLiteralMsg</code>. Otherwise, the
* given <code>Double</code> expression is a literal that does <em>not</em> satisfy the given validation function, so this method will
* complete abruptly with an exception whose detail message includes the <code>String</code> passed as <code>notValidMsg</code>.
* </p>
*
* <p>
* This method is intended to be invoked at compile time from macros. When called from a macro, exceptions thrown by this method
* will result in compiler errors. The detail message of the thrown exception will appear as the compiler error message.
* </p>
*
* @param c the compiler context for this assertion
* @param value the <code>Double</code> expression to validate
* @param notValidMsg a <code>String</code> message to include in the exception thrown if the expression is a literal, but not valid
* @param notLiteralMsg a <code>String</code> message to include in the exception thrown if the expression is not a literal
* @param isValid a function used to validate a literal value parsed from the given expression
*/
def ensureValidDoubleLiteral(value: Expr[Double], notValidMsg: String, notLiteralMsg: String)(isValid: Double => Boolean)(implicit qctx: QuoteContext): Unit = {
import qctx.tasty._
value.unseal.underlyingArgument match {
case Literal(doubleConst) =>
val literalValue = doubleConst.value.toString.toDouble
if (!isValid(literalValue))
error(notValidMsg, value.unseal.pos)
case _ =>
error(notLiteralMsg, value.unseal.pos)
}
}
/**
* Ensures a given expression of type <code>String</code> is a literal with a valid value according to a given validation function.
*
* <p>
* If the given <code>String</code> expression is a literal whose value satisfies the given validation function, this method will
* return normally. Otherwise, if the given <code>String</code> expression is not a literal, this method will complete abruptly with
* an exception whose detail message includes the <code>String</code> passed as <code>notLiteralMsg</code>. Otherwise, the
* given <code>String</code> expression is a literal that does <em>not</em> satisfy the given validation function, so this method will
* complete abruptly with an exception whose detail message includes the <code>String</code> passed as <code>notValidMsg</code>.
* </p>
*
* <p>
* This method is intended to be invoked at compile time from macros. When called from a macro, exceptions thrown by this method
* will result in compiler errors. The detail message of the thrown exception will appear as the compiler error message.
* </p>
*
* @param c the compiler context for this assertion
* @param value the <code>String</code> expression to validate
* @param notValidMsg a <code>String</code> message to include in the exception thrown if the expression is a literal, but not valid
* @param notLiteralMsg a <code>String</code> message to include in the exception thrown if the expression is not a literal
* @param isValid a function used to validate a literal value parsed from the given expression
*/
def ensureValidStringLiteral(value: Expr[String], notValidMsg: String, notLiteralMsg: String)(isValid: String => Boolean)(implicit qctx: QuoteContext): Unit = {
import qctx.tasty._
value.unseal.underlyingArgument match {
case Literal(stringConst) =>
val literalValue = stringConst.value.toString
if (!isValid(literalValue))
error(notValidMsg, value.unseal.pos)
case _ =>
error(notLiteralMsg, value.unseal.pos)
}
}
/**
* Ensures a given expression of type <code>Char</code> is a literal with a valid value according to a given validation function.
*
* <p>
* If the given <code>Char</code> expression is a literal whose value satisfies the given validation function, this method will
* return normally. Otherwise, if the given <code>Char</code> expression is not a literal, this method will complete abruptly with
* an exception whose detail message includes the <code>String</code> passed as <code>notLiteralMsg</code>. Otherwise, the
* given <code>Char</code> expression is a literal that does <em>not</em> satisfy the given validation function, so this method will
* complete abruptly with an exception whose detail message includes the <code>String</code> passed as <code>notValidMsg</code>.
* </p>
*
* <p>
* This method is intended to be invoked at compile time from macros. When called from a macro, exceptions thrown by this method
* will result in compiler errors. The detail message of the thrown exception will appear as the compiler error message.
* </p>
*
* @param c the compiler context for this assertion
* @param value the <code>Char</code> expression to validate
* @param notValidMsg a <code>String</code> message to include in the exception thrown if the expression is a literal, but not valid
* @param notLiteralMsg a <code>String</code> message to include in the exception thrown if the expression is not a literal
* @param isValid a function used to validate a literal value parsed from the given expression
*/
def ensureValidCharLiteral(value: Expr[Char], notValidMsg: String, notLiteralMsg: String)(isValid: Char => Boolean)(implicit qctx: QuoteContext): Unit = {
import qctx.tasty._
value.unseal.underlyingArgument match {
case Literal(charConst) =>
val literalValue = charConst.value.toString.head
if (!isValid(literalValue))
error(notValidMsg, value.unseal.pos)
case _ =>
error(notLiteralMsg, value.unseal.pos)
}
}
}
/**
* Companion object that facilitates the importing of <code>CompileTimeAssertions</code> members as
* an alternative to mixing in the trait.
*/
object CompileTimeAssertions extends CompileTimeAssertions
|
dotty-staging/scalatest
|
scalactic.dotty/src/main/scala/org/scalactic/anyvals/CompileTimeAssertions.scala
|
Scala
|
apache-2.0
| 20,457 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the βLicenseβ); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an βAS ISβ BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.bg.test
import java.util.concurrent.TimeoutException
import cmwell.fts._
import cmwell.util.concurrent.SimpleScheduler
import com.typesafe.config.{Config, ConfigFactory}
import com.typesafe.scalalogging.Logger
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
/**
* Created by israel on 06/03/2017.
*/
object FailingFTSServiceMockup {
def apply(esClasspathYml:String, errorModuloDivisor:Int) = new FailingFTSServiceMockup(ConfigFactory.load(), esClasspathYml, errorModuloDivisor)
}
class FailingFTSServiceMockup(config: Config, esClasspathYaml: String, errorModuloDivisor:Int) extends FTSServiceNew(config, esClasspathYaml) {
var errorModuloDividend = 0
var errorCount = 0
override def executeIndexRequests(indexRequests: Iterable[ESIndexRequest])
(implicit executionContext: ExecutionContext, logger:Logger = loger): Future[BulkIndexResult] = {
errorModuloDividend += 1
if(errorModuloDividend % errorModuloDivisor == 0)
Future.successful(new RejectedBulkIndexResult("fake"))
else if(errorModuloDividend % errorModuloDivisor == 2 && errorCount <=2)
SimpleScheduler.scheduleFuture(15.seconds)(super.executeIndexRequests(indexRequests))
else
super.executeIndexRequests(indexRequests)
}
/**
* execute bulk index requests
*
* @param indexRequests
* @param numOfRetries
* @param waitBetweenRetries
* @param executionContext
* @return
*/
override def executeBulkIndexRequests(indexRequests: Iterable[ESIndexRequest], numOfRetries: Int,
waitBetweenRetries: Long)
(implicit executionContext: ExecutionContext, logger:Logger = loger) = {
errorModuloDividend += 1
logger info s"executeBulkIndexRequests: errorModuloDividend=$errorModuloDividend"
if(errorModuloDividend % errorModuloDivisor == 2 && errorCount <=2 ) {
errorCount += 1
logger info s"delaying response"
throw new TimeoutException("fake")
}
else {
logger info "forwarding to real ftsservice"
super.executeBulkIndexRequests(indexRequests, numOfRetries, waitBetweenRetries)
}
}
}
|
bryaakov/CM-Well
|
server/cmwell-bg/src/test/scala/cmwell/bg/test/FailingFTSServiceMockup.scala
|
Scala
|
apache-2.0
| 2,865 |
/**
* Generated by Scrooge
* version: ?
* rev: ?
* built at: ?
*/
package com.twitter.scrooge.test.gold.thriftscala
import com.twitter.io.Buf
import com.twitter.scrooge.{
InvalidFieldsException,
LazyTProtocol,
StructBuilder,
StructBuilderFactory,
TFieldBlob,
ThriftStruct,
ThriftStructCodec3,
ThriftStructField,
ThriftStructFieldInfo,
ThriftStructMetaData,
ValidatingThriftStruct,
ValidatingThriftStructCodec3
}
import com.twitter.scrooge.adapt.{AccessRecorder, AdaptTProtocol, Decoder}
import org.apache.thrift.protocol._
import org.apache.thrift.transport.TMemoryBuffer
import scala.collection.immutable.{Map => immutable$Map}
import scala.collection.mutable.Builder
import scala.reflect.{ClassTag, classTag}
object Response extends ValidatingThriftStructCodec3[Response] with StructBuilderFactory[Response] {
val Struct: TStruct = new TStruct("Response")
val StatusCodeField: TField = new TField("statusCode", TType.I32, 1)
val StatusCodeFieldManifest: Manifest[Int] = manifest[Int]
val ResponseUnionField: TField = new TField("responseUnion", TType.STRUCT, 2)
val ResponseUnionFieldManifest: Manifest[com.twitter.scrooge.test.gold.thriftscala.ResponseUnion] = manifest[com.twitter.scrooge.test.gold.thriftscala.ResponseUnion]
/**
* Field information in declaration order.
*/
lazy val fieldInfos: scala.List[ThriftStructFieldInfo] = scala.List[ThriftStructFieldInfo](
new ThriftStructFieldInfo(
StatusCodeField,
false,
false,
StatusCodeFieldManifest,
_root_.scala.None,
_root_.scala.None,
immutable$Map.empty[String, String],
immutable$Map.empty[String, String],
None,
_root_.scala.Option(0)
),
new ThriftStructFieldInfo(
ResponseUnionField,
false,
false,
ResponseUnionFieldManifest,
_root_.scala.None,
_root_.scala.None,
immutable$Map.empty[String, String],
immutable$Map.empty[String, String],
None,
_root_.scala.Option(com.twitter.scrooge.test.gold.thriftscala.ResponseUnion.unsafeEmpty)
)
)
lazy val structAnnotations: immutable$Map[String, String] =
immutable$Map[String, String](
("com.twitter.scrooge.scala.generateStructProxy", "true")
)
private val fieldTypes: IndexedSeq[ClassTag[_]] = IndexedSeq[ClassTag[_]](
classTag[Int].asInstanceOf[ClassTag[_]],
classTag[com.twitter.scrooge.test.gold.thriftscala.ResponseUnion].asInstanceOf[ClassTag[_]]
)
private[this] val structFields: Seq[ThriftStructField[Response]] = Seq[ThriftStructField[Response]](
new ThriftStructField[Response](
StatusCodeField,
_root_.scala.Some(StatusCodeFieldManifest),
classOf[Response]) {
def getValue[R](struct: Response): R = struct.statusCode.asInstanceOf[R]
},
new ThriftStructField[Response](
ResponseUnionField,
_root_.scala.Some(ResponseUnionFieldManifest),
classOf[Response]) {
def getValue[R](struct: Response): R = struct.responseUnion.asInstanceOf[R]
}
)
override lazy val metaData: ThriftStructMetaData[Response] =
ThriftStructMetaData(this, structFields, fieldInfos, Nil, structAnnotations)
/**
* Checks that all required fields are non-null.
*/
def validate(_item: Response): Unit = {
}
/**
* Checks that the struct is a valid as a new instance. If there are any missing required or
* construction required fields, return a non-empty list.
*/
def validateNewInstance(item: Response): scala.Seq[com.twitter.scrooge.validation.Issue] = {
val buf = scala.collection.mutable.ListBuffer.empty[com.twitter.scrooge.validation.Issue]
buf ++= validateField(item.statusCode)
buf ++= validateField(item.responseUnion)
buf.toList
}
/**
* Validate that all validation annotations on the struct meet the criteria defined in the
* corresponding [[com.twitter.scrooge.validation.ThriftConstraintValidator]].
*/
def validateInstanceValue(item: Response): Set[com.twitter.scrooge.thrift_validation.ThriftValidationViolation] = {
val violations = scala.collection.mutable.Set.empty[com.twitter.scrooge.thrift_validation.ThriftValidationViolation]
violations ++= validateFieldValue("statusCode", item.statusCode, fieldInfos.apply(0).fieldAnnotations, scala.None)
violations ++= validateFieldValue("responseUnion", item.responseUnion, fieldInfos.apply(1).fieldAnnotations, scala.None)
violations.toSet
}
def withoutPassthroughFields(original: Response): Response =
new Immutable(
statusCode = original.statusCode,
responseUnion =
{
val field = original.responseUnion
com.twitter.scrooge.test.gold.thriftscala.ResponseUnion.withoutPassthroughFields(field)
}
)
lazy val unsafeEmpty: Response = {
val statusCode: Int = 0
val responseUnion: com.twitter.scrooge.test.gold.thriftscala.ResponseUnion = com.twitter.scrooge.test.gold.thriftscala.ResponseUnion.unsafeEmpty
new Immutable(
statusCode,
responseUnion,
_root_.com.twitter.scrooge.internal.TProtocols.NoPassthroughFields
)
}
def newBuilder(): StructBuilder[Response] = new ResponseStructBuilder(_root_.scala.None, fieldTypes)
override def encode(_item: Response, _oproto: TProtocol): Unit = {
_item.write(_oproto)
}
@volatile private[this] var adaptiveDecoder: Decoder[Response] = _
private[this] val accessRecordingDecoderBuilder: AccessRecorder => Decoder[Response] = { accessRecorder =>
new Decoder[Response] {
def apply(prot: AdaptTProtocol): Response = new AccessRecordingWrapper(decodeInternal(prot, true), accessRecorder)
}
}
private[this] val fallbackDecoder = new Decoder[Response] {
def apply(prot: AdaptTProtocol): Response = decodeInternal(prot, true)
}
private[this] def adaptiveDecode(_iprot: AdaptTProtocol): Response = {
val adaptContext = _iprot.adaptContext
val reloadRequired = adaptContext.shouldReloadDecoder
synchronized {
if ((adaptiveDecoder eq null) || reloadRequired) {
adaptiveDecoder = adaptContext.buildDecoder(this, fallbackDecoder, accessRecordingDecoderBuilder)
}
}
adaptiveDecoder(_iprot)
}
/**
* AccessRecordingWrapper keeps track of fields that are accessed while
* delegating to underlying struct.
*/
private[this] class AccessRecordingWrapper(underlying: Response, accessRecorder: AccessRecorder) extends Response {
override def statusCode: Int = {
accessRecorder.fieldAccessed(1)
underlying.statusCode
}
override def responseUnion: com.twitter.scrooge.test.gold.thriftscala.ResponseUnion = {
accessRecorder.fieldAccessed(2)
underlying.responseUnion
}
override def write(_oprot: TProtocol): Unit = underlying.write(_oprot)
override def _passthroughFields: immutable$Map[Short, TFieldBlob] = underlying._passthroughFields
}
override def decode(_iprot: TProtocol): Response = {
if (_iprot.isInstanceOf[LazyTProtocol]) {
decodeInternal(_iprot, true)
} else if (_iprot.isInstanceOf[AdaptTProtocol]) {
adaptiveDecode(_iprot.asInstanceOf[AdaptTProtocol])
} else {
decodeInternal(_iprot, false)
}
}
private[thriftscala] def eagerDecode(_iprot: TProtocol): Response = {
decodeInternal(_iprot, false)
}
private[this] def decodeInternal(_iprot: TProtocol, lazily: Boolean): Response = {
var statusCode: Int = 0
var responseUnion: com.twitter.scrooge.test.gold.thriftscala.ResponseUnion = null
var _passthroughFields: Builder[(Short, TFieldBlob), immutable$Map[Short, TFieldBlob]] = null
var _done = false
val _start_offset = if (lazily) _iprot.asInstanceOf[LazyTProtocol].offset else -1
_iprot.readStructBegin()
do {
val _field = _iprot.readFieldBegin()
val _fieldType = _field.`type`
if (_fieldType == TType.STOP) {
_done = true
} else {
_field.id match {
case 1 =>
_root_.com.twitter.scrooge.internal.TProtocols.validateFieldType(TType.I32, _fieldType, "statusCode")
statusCode = _iprot.readI32()
case 2 =>
_root_.com.twitter.scrooge.internal.TProtocols.validateFieldType(TType.STRUCT, _fieldType, "responseUnion")
responseUnion = com.twitter.scrooge.test.gold.thriftscala.ResponseUnion.decode(_iprot)
case _ =>
_passthroughFields = _root_.com.twitter.scrooge.internal.TProtocols.readPassthroughField(_iprot, _field, _passthroughFields)
}
_iprot.readFieldEnd()
}
} while (!_done)
_iprot.readStructEnd()
val _passthroughFieldsResult =
if (_passthroughFields eq null) _root_.com.twitter.scrooge.internal.TProtocols.NoPassthroughFields
else _passthroughFields.result()
if (lazily) {
val _lazyProt = _iprot.asInstanceOf[LazyTProtocol]
new LazyImmutable(
_lazyProt,
_lazyProt.buffer,
_start_offset,
_lazyProt.offset,
statusCode,
responseUnion,
_passthroughFieldsResult
)
} else {
new Immutable(
statusCode,
responseUnion,
_passthroughFieldsResult
)
}
}
def apply(
statusCode: Int,
responseUnion: com.twitter.scrooge.test.gold.thriftscala.ResponseUnion
): Response =
new Immutable(
statusCode,
responseUnion
)
def unapply(_item: Response): _root_.scala.Option[_root_.scala.Tuple2[Int, com.twitter.scrooge.test.gold.thriftscala.ResponseUnion]] = _root_.scala.Some(_item.toTuple)
object Immutable extends ThriftStructCodec3[Response] {
override def encode(_item: Response, _oproto: TProtocol): Unit = { _item.write(_oproto) }
override def decode(_iprot: TProtocol): Response = Response.decode(_iprot)
override lazy val metaData: ThriftStructMetaData[Response] = Response.metaData
}
/**
* The default read-only implementation of Response. You typically should not need to
* directly reference this class; instead, use the Response.apply method to construct
* new instances.
*/
class Immutable(
val statusCode: Int,
val responseUnion: com.twitter.scrooge.test.gold.thriftscala.ResponseUnion,
override val _passthroughFields: immutable$Map[Short, TFieldBlob])
extends Response {
def this(
statusCode: Int,
responseUnion: com.twitter.scrooge.test.gold.thriftscala.ResponseUnion
) = this(
statusCode,
responseUnion,
immutable$Map.empty[Short, TFieldBlob]
)
}
/**
* This is another Immutable, this however keeps strings as lazy values that are lazily decoded from the backing
* array byte on read.
*/
private[this] class LazyImmutable(
_proto: LazyTProtocol,
_buf: Array[Byte],
_start_offset: Int,
_end_offset: Int,
val statusCode: Int,
val responseUnion: com.twitter.scrooge.test.gold.thriftscala.ResponseUnion,
override val _passthroughFields: immutable$Map[Short, TFieldBlob])
extends Response {
override def write(_oprot: TProtocol): Unit = {
if (_oprot.isInstanceOf[LazyTProtocol]) {
_oprot.asInstanceOf[LazyTProtocol].writeRaw(_buf, _start_offset, _end_offset - _start_offset)
} else {
super.write(_oprot)
}
}
/**
* Override the super hash code to make it a lazy val rather than def.
*
* Calculating the hash code can be expensive, caching it where possible
* can provide significant performance wins. (Key in a hash map for instance)
* Usually not safe since the normal constructor will accept a mutable map or
* set as an arg
* Here however we control how the class is generated from serialized data.
* With the class private and the contract that we throw away our mutable references
* having the hash code lazy here is safe.
*/
override lazy val hashCode: Int = super.hashCode
}
/**
* This Proxy trait allows you to extend the Response trait with additional state or
* behavior and implement the read-only methods from Response using an underlying
* instance.
*/
trait Proxy extends Response {
protected def _underlying_Response: Response
override def statusCode: Int = _underlying_Response.statusCode
override def responseUnion: com.twitter.scrooge.test.gold.thriftscala.ResponseUnion = _underlying_Response.responseUnion
override def _passthroughFields: immutable$Map[Short, TFieldBlob] = _underlying_Response._passthroughFields
}
}
/**
* Prefer the companion object's [[com.twitter.scrooge.test.gold.thriftscala.Response.apply]]
* for construction if you don't need to specify passthrough fields.
*/
trait Response
extends ThriftStruct
with _root_.scala.Product2[Int, com.twitter.scrooge.test.gold.thriftscala.ResponseUnion]
with ValidatingThriftStruct[Response]
with java.io.Serializable
{
import Response._
def statusCode: Int
def responseUnion: com.twitter.scrooge.test.gold.thriftscala.ResponseUnion
def _passthroughFields: immutable$Map[Short, TFieldBlob] = immutable$Map.empty
def _1: Int = statusCode
def _2: com.twitter.scrooge.test.gold.thriftscala.ResponseUnion = responseUnion
def toTuple: _root_.scala.Tuple2[Int, com.twitter.scrooge.test.gold.thriftscala.ResponseUnion] =
_root_.scala.Tuple2[Int, com.twitter.scrooge.test.gold.thriftscala.ResponseUnion](
statusCode,
responseUnion
)
/**
* Gets a field value encoded as a binary blob using TCompactProtocol. If the specified field
* is present in the passthrough map, that value is returned. Otherwise, if the specified field
* is known and not optional and set to None, then the field is serialized and returned.
*/
def getFieldBlob(_fieldId: Short): _root_.scala.Option[TFieldBlob] = {
val passedthroughValue = _passthroughFields.get(_fieldId)
if (passedthroughValue.isDefined) {
passedthroughValue
} else {
val _buff = new TMemoryBuffer(32)
val _oprot = new TCompactProtocol(_buff)
val _fieldOpt: _root_.scala.Option[TField] = _fieldId match {
case 1 =>
_oprot.writeI32(statusCode)
_root_.scala.Some(Response.StatusCodeField)
case 2 =>
if (responseUnion ne null) {
responseUnion.write(_oprot)
_root_.scala.Some(Response.ResponseUnionField)
} else {
_root_.scala.None
}
case _ => _root_.scala.None
}
if (_fieldOpt.isDefined) {
_root_.scala.Some(TFieldBlob(_fieldOpt.get, Buf.ByteArray.Owned(_buff.getArray)))
} else {
_root_.scala.None
}
}
}
/**
* Collects TCompactProtocol-encoded field values according to `getFieldBlob` into a map.
*/
def getFieldBlobs(ids: TraversableOnce[Short]): immutable$Map[Short, TFieldBlob] =
(ids.flatMap { id => getFieldBlob(id).map { fieldBlob => (id, fieldBlob) } }).toMap
/**
* Sets a field using a TCompactProtocol-encoded binary blob. If the field is a known
* field, the blob is decoded and the field is set to the decoded value. If the field
* is unknown and passthrough fields are enabled, then the blob will be stored in
* _passthroughFields.
*/
def setField(_blob: TFieldBlob): Response = {
var statusCode: Int = this.statusCode
var responseUnion: com.twitter.scrooge.test.gold.thriftscala.ResponseUnion = this.responseUnion
var _passthroughFields = this._passthroughFields
val _iprot = _blob.read
_blob.id match {
case 1 =>
statusCode = _iprot.readI32()
case 2 =>
responseUnion = com.twitter.scrooge.test.gold.thriftscala.ResponseUnion.decode(_iprot)
case _ => _passthroughFields += _root_.scala.Tuple2(_blob.id, _blob)
}
new Immutable(
statusCode,
responseUnion,
_passthroughFields
)
}
/**
* If the specified field is optional, it is set to None. Otherwise, if the field is
* known, it is reverted to its default value; if the field is unknown, it is removed
* from the passthroughFields map, if present.
*/
def unsetField(_fieldId: Short): Response = {
var statusCode: Int = this.statusCode
var responseUnion: com.twitter.scrooge.test.gold.thriftscala.ResponseUnion = this.responseUnion
_fieldId match {
case 1 =>
statusCode = 0
case 2 =>
responseUnion = null
case _ =>
}
new Immutable(
statusCode,
responseUnion,
_passthroughFields - _fieldId
)
}
/**
* If the specified field is optional, it is set to None. Otherwise, if the field is
* known, it is reverted to its default value; if the field is unknown, it is removed
* from the passthroughFields map, if present.
*/
def unsetStatusCode: Response = unsetField(1)
def unsetResponseUnion: Response = unsetField(2)
override def write(_oprot: TProtocol): Unit = {
Response.validate(this)
_oprot.writeStructBegin(Struct)
_oprot.writeFieldBegin(StatusCodeField)
_oprot.writeI32(statusCode)
_oprot.writeFieldEnd()
if (responseUnion ne null) {
_oprot.writeFieldBegin(ResponseUnionField)
responseUnion.write(_oprot)
_oprot.writeFieldEnd()
}
_root_.com.twitter.scrooge.internal.TProtocols.finishWritingStruct(_oprot, _passthroughFields)
}
def copy(
statusCode: Int = this.statusCode,
responseUnion: com.twitter.scrooge.test.gold.thriftscala.ResponseUnion = this.responseUnion,
_passthroughFields: immutable$Map[Short, TFieldBlob] = this._passthroughFields
): Response =
new Immutable(
statusCode,
responseUnion,
_passthroughFields
)
override def canEqual(other: Any): Boolean = other.isInstanceOf[Response]
private[this] def _equals(other: Response): Boolean =
this.productArity == other.productArity &&
this.productIterator.sameElements(other.productIterator) &&
this._passthroughFields == other._passthroughFields
override def equals(other: Any): Boolean =
canEqual(other) && _equals(other.asInstanceOf[Response])
override def hashCode: Int = {
_root_.scala.runtime.ScalaRunTime._hashCode(this)
}
override def toString: String = _root_.scala.runtime.ScalaRunTime._toString(this)
override def productPrefix: String = "Response"
def _codec: ValidatingThriftStructCodec3[Response] = Response
def newBuilder(): StructBuilder[Response] = new ResponseStructBuilder(_root_.scala.Some(this), fieldTypes)
}
private[thriftscala] class ResponseStructBuilder(instance: _root_.scala.Option[Response], fieldTypes: IndexedSeq[ClassTag[_]])
extends StructBuilder[Response](fieldTypes) {
def build(): Response = {
val _fieldArray = fieldArray // shadow variable
if (instance.isDefined) {
val instanceValue = instance.get
Response(
if (_fieldArray(0) == null) instanceValue.statusCode else _fieldArray(0).asInstanceOf[Int],
if (_fieldArray(1) == null) instanceValue.responseUnion else _fieldArray(1).asInstanceOf[com.twitter.scrooge.test.gold.thriftscala.ResponseUnion]
)
} else {
if (genericArrayOps(_fieldArray).contains(null)) throw new InvalidFieldsException(structBuildError("Response"))
Response(
_fieldArray(0).asInstanceOf[Int],
_fieldArray(1).asInstanceOf[com.twitter.scrooge.test.gold.thriftscala.ResponseUnion]
)
}
}
}
private class Response__AdaptDecoder {
def decode(_iprot: AdaptTProtocol): Response = {
import Response._
var _passthroughFields: Builder[(Short, TFieldBlob), immutable$Map[Short, TFieldBlob]] = null
var _done = false
val _start_offset = _iprot.offset
val adapt = new Response__Adapt(
_iprot,
_iprot.buffer,
_start_offset)
AdaptTProtocol.usedStartMarker(1)
var statusCode: Int = 0
AdaptTProtocol.usedEndMarker(1)
AdaptTProtocol.usedStartMarker(2)
var responseUnion: com.twitter.scrooge.test.gold.thriftscala.ResponseUnion = null
AdaptTProtocol.usedEndMarker(2)
_iprot.readStructBegin()
do {
val _field = _iprot.readFieldBegin()
val _fieldType = _field.`type`
if (_field.`type` == TType.STOP) {
_done = true
} else {
_field.id match {
case 1 => {
if (_fieldType == TType.I32) {
AdaptTProtocol.usedStartMarker(1)
statusCode = _iprot.readI32()
AdaptTProtocol.usedEndMarker(1)
AdaptTProtocol.unusedStartMarker(1)
_iprot.offsetSkipI32()
AdaptTProtocol.unusedEndMarker(1)
} else {
throw AdaptTProtocol.unexpectedTypeException(
TType.I32,
_fieldType,
"statusCode"
)
}
AdaptTProtocol.usedStartMarker(1)
adapt.set_statusCode(statusCode)
AdaptTProtocol.usedEndMarker(1)
}
case 2 => {
if (_fieldType == TType.STRUCT) {
AdaptTProtocol.usedStartMarker(2)
responseUnion = com.twitter.scrooge.test.gold.thriftscala.ResponseUnion.decode(_iprot)
AdaptTProtocol.usedEndMarker(2)
AdaptTProtocol.unusedStartMarker(2)
_iprot.offsetSkipStruct()
AdaptTProtocol.unusedEndMarker(2)
} else {
throw AdaptTProtocol.unexpectedTypeException(
TType.STRUCT,
_fieldType,
"responseUnion"
)
}
AdaptTProtocol.usedStartMarker(2)
adapt.set_responseUnion(responseUnion)
AdaptTProtocol.usedEndMarker(2)
}
case _ =>
if (_passthroughFields eq null)
_passthroughFields = immutable$Map.newBuilder[Short, TFieldBlob]
_passthroughFields += _root_.scala.Tuple2(_field.id, TFieldBlob.read(_field, _iprot))
}
_iprot.readFieldEnd()
}
} while (!_done)
_iprot.readStructEnd()
adapt.set__endOffset(_iprot.offset)
if (_passthroughFields ne null) {
adapt.set__passthroughFields(_passthroughFields.result())
}
adapt
}
}
/**
* This is the base template for Adaptive decoding. This class gets pruned and
* reloaded at runtime.
*/
private class Response__Adapt(
_proto: AdaptTProtocol,
_buf: Array[Byte],
_start_offset: Int) extends Response {
/**
* In case any unexpected field is accessed, fallback to eager decoding.
*/
private[this] lazy val delegate: Response = {
val bytes = _root_.java.util.Arrays.copyOfRange(_buf, _start_offset, _end_offset)
val proto = _proto.withBytes(bytes)
Response.eagerDecode(proto)
}
private[this] var m_statusCode: Int = _
def set_statusCode(statusCode: Int): Unit = m_statusCode = statusCode
// This will be removed by ASM if field is unused.
def statusCode: Int = m_statusCode
// This will be removed by ASM if field is used otherwise renamed to statusCode.
def delegated_statusCode: Int = delegate.statusCode
private[this] var m_responseUnion: com.twitter.scrooge.test.gold.thriftscala.ResponseUnion = _
def set_responseUnion(responseUnion: com.twitter.scrooge.test.gold.thriftscala.ResponseUnion): Unit = m_responseUnion = responseUnion
// This will be removed by ASM if field is unused.
def responseUnion: com.twitter.scrooge.test.gold.thriftscala.ResponseUnion = m_responseUnion
// This will be removed by ASM if field is used otherwise renamed to responseUnion.
def delegated_responseUnion: com.twitter.scrooge.test.gold.thriftscala.ResponseUnion = delegate.responseUnion
private[this] var _end_offset: Int = _
def set__endOffset(offset: Int): Unit = _end_offset = offset
private[this] var __passthroughFields: immutable$Map[Short, TFieldBlob] = _root_.com.twitter.scrooge.internal.TProtocols.NoPassthroughFields
def set__passthroughFields(passthroughFields: immutable$Map[Short, TFieldBlob]): Unit =
__passthroughFields = passthroughFields
override def _passthroughFields: immutable$Map[Short, TFieldBlob] = __passthroughFields
/*
Override the super hash code to make it a lazy val rather than def.
Calculating the hash code can be expensive, caching it where possible
can provide significant performance wins. (Key in a hash map for instance)
Usually not safe since the normal constructor will accept a mutable map or
set as an arg
Here however we control how the class is generated from serialized data.
With the class private and the contract that we throw away our mutable references
having the hash code lazy here is safe.
*/
override lazy val hashCode: Int = super.hashCode
override def write(_oprot: TProtocol): Unit = {
if (_oprot.isInstanceOf[AdaptTProtocol]) {
_oprot.asInstanceOf[AdaptTProtocol].writeRaw(_buf, _start_offset, _end_offset - _start_offset)
} else {
super.write(_oprot)
}
}
}
|
twitter/scrooge
|
scrooge-generator-tests/src/test/resources/gold_file_output_scala/com/twitter/scrooge/test/gold/thriftscala/Response.scala
|
Scala
|
apache-2.0
| 24,984 |
/*
* Copyright 2012-2013 Stephane Godbillon (@sgodbillon)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package play.modules.reactivemongo.json
import play.api.libs.json._
import play.modules.reactivemongo.ReactiveMongoPlayJsonException
import reactivemongo.bson._
import reactivemongo.bson.utils.Converters
import scala.math.BigDecimal.{
double2bigDecimal,
int2bigDecimal,
long2bigDecimal
}
object `package` extends ImplicitBSONHandlers {
object readOpt {
implicit def optionReads[T](implicit r: Reads[T]): Reads[Option[T]] = Reads.optionWithNull[T]
def apply[T](lookup: JsLookupResult)(implicit r: Reads[T]): JsResult[Option[T]] = lookup.toOption.fold[JsResult[Option[T]]](JsSuccess(None))(_.validate[Option[T]])
}
}
object BSONFormats extends BSONFormats
/**
* JSON Formats for BSONValues.
*/
sealed trait BSONFormats extends LowerImplicitBSONHandlers {
trait PartialFormat[T <: BSONValue] extends Format[T] {
def partialReads: PartialFunction[JsValue, JsResult[T]]
def partialWrites: PartialFunction[BSONValue, JsValue]
def writes(t: T): JsValue = partialWrites(t)
def reads(json: JsValue) = partialReads.lift(json).getOrElse(JsError(s"unhandled json value: $json"))
}
implicit object BSONDoubleFormat extends PartialFormat[BSONDouble] {
val partialReads: PartialFunction[JsValue, JsResult[BSONDouble]] = {
case JsNumber(f) => JsSuccess(BSONDouble(f.toDouble))
case DoubleValue(value) => JsSuccess(BSONDouble(value))
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case double: BSONDouble => JsNumber(double.value)
}
private object DoubleValue {
def unapply(obj: JsObject): Option[Double] =
(obj \\ "$double").asOpt[JsNumber].map(_.value.toDouble)
}
}
implicit object BSONStringFormat extends PartialFormat[BSONString] {
val partialReads: PartialFunction[JsValue, JsResult[BSONString]] = {
case JsString(str) => JsSuccess(BSONString(str))
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case str: BSONString => JsString(str.value)
}
}
class BSONDocumentFormat(toBSON: JsValue => JsResult[BSONValue], toJSON: BSONValue => JsValue) extends PartialFormat[BSONDocument] {
val partialReads: PartialFunction[JsValue, JsResult[BSONDocument]] = {
case obj: JsObject =>
try {
JsSuccess(bson(obj))
} catch {
case e: Throwable => JsError(e.getMessage)
}
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case doc: BSONDocument => json(doc)
}
// UNSAFE - FOR INTERNAL USE
private[json] def bson(obj: JsObject): BSONDocument = BSONDocument(
obj.fields.map { tuple =>
tuple._1 -> (toBSON(tuple._2) match {
case JsSuccess(bson, _) => bson
case JsError(err) => throw new ReactiveMongoPlayJsonException(err.toString)
})
})
// UNSAFE - FOR INTERNAL USE
private[json] def json(bson: BSONDocument): JsObject =
JsObject(bson.elements.map(elem => elem._1 -> toJSON(elem._2)))
}
implicit object BSONDocumentFormat extends BSONDocumentFormat(toBSON, toJSON)
class BSONArrayFormat(toBSON: JsValue => JsResult[BSONValue], toJSON: BSONValue => JsValue) extends PartialFormat[BSONArray] {
val partialReads: PartialFunction[JsValue, JsResult[BSONArray]] = {
case arr: JsArray =>
try {
JsSuccess(BSONArray(arr.value.map { value =>
toBSON(value) match {
case JsSuccess(bson, _) => bson
case JsError(err) => throw new ReactiveMongoPlayJsonException(err.toString)
}
}))
} catch {
case e: Throwable => JsError(e.getMessage)
}
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case array: BSONArray => JsArray(array.values.map(toJSON))
}
}
implicit object BSONArrayFormat extends BSONArrayFormat(toBSON, toJSON)
implicit object BSONObjectIDFormat extends PartialFormat[BSONObjectID] {
val partialReads: PartialFunction[JsValue, JsResult[BSONObjectID]] = {
case OidValue(oid) => JsSuccess(BSONObjectID(oid))
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case oid: BSONObjectID => Json.obj("$oid" -> oid.stringify)
}
private object OidValue {
def unapply(obj: JsObject): Option[String] =
if (obj.fields.size != 1) None else (obj \\ "$oid").asOpt[String]
}
}
implicit object BSONBooleanFormat extends PartialFormat[BSONBoolean] {
val partialReads: PartialFunction[JsValue, JsResult[BSONBoolean]] = {
case JsBoolean(v) => JsSuccess(BSONBoolean(v))
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case boolean: BSONBoolean => JsBoolean(boolean.value)
}
}
implicit object BSONDateTimeFormat extends PartialFormat[BSONDateTime] {
val partialReads: PartialFunction[JsValue, JsResult[BSONDateTime]] = {
case DateValue(value) => JsSuccess(BSONDateTime(value))
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case dt: BSONDateTime => Json.obj("$date" -> dt.value)
}
private object DateValue {
def unapply(obj: JsObject): Option[Long] = (obj \\ "$date").asOpt[Long]
}
}
implicit object BSONTimestampFormat extends PartialFormat[BSONTimestamp] {
val partialReads: PartialFunction[JsValue, JsResult[BSONTimestamp]] = {
case TimeValue((time, i)) => JsSuccess(BSONTimestamp((time << 32) ^ i))
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case ts: BSONTimestamp => Json.obj(
"$time" -> (ts.value >>> 32), "$i" -> ts.value.toInt)
}
private object TimeValue {
def unapply(obj: JsObject): Option[(Long, Int)] = for {
time <- (obj \\ "$time").asOpt[Long]
i <- (obj \\ "$i").asOpt[Int]
} yield (time, i)
}
}
implicit object BSONRegexFormat extends PartialFormat[BSONRegex] {
val partialReads: PartialFunction[JsValue, JsResult[BSONRegex]] = {
case js: JsObject if js.values.size == 1 && js.fields.head._1 == "$regex" =>
js.fields.head._2.asOpt[String].
map(rx => JsSuccess(BSONRegex(rx, ""))).
getOrElse(JsError(__ \\ "$regex", "string expected"))
case js: JsObject if js.value.size == 2 && js.value.exists(_._1 == "$regex") && js.value.exists(_._1 == "$options") =>
val rx = (js \\ "$regex").asOpt[String]
val opts = (js \\ "$options").asOpt[String]
(rx, opts) match {
case (Some(rx), Some(opts)) => JsSuccess(BSONRegex(rx, opts))
case (None, Some(_)) => JsError(__ \\ "$regex", "string expected")
case (Some(_), None) => JsError(__ \\ "$options", "string expected")
case _ => JsError(__ \\ "$regex", "string expected") ++ JsError(__ \\ "$options", "string expected")
}
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case rx: BSONRegex =>
if (rx.flags.isEmpty)
Json.obj("$regex" -> rx.value)
else Json.obj("$regex" -> rx.value, "$options" -> rx.flags)
}
}
implicit object BSONNullFormat extends PartialFormat[BSONNull.type] {
val partialReads: PartialFunction[JsValue, JsResult[BSONNull.type]] = {
case JsNull => JsSuccess(BSONNull)
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case BSONNull => JsNull
}
}
implicit object BSONIntegerFormat extends PartialFormat[BSONInteger] {
val partialReads: PartialFunction[JsValue, JsResult[BSONInteger]] = {
case JsNumber(i) => JsSuccess(BSONInteger(i.toInt))
case IntValue(value) => JsSuccess(BSONInteger(value))
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case int: BSONInteger => JsNumber(int.value)
}
private object IntValue {
def unapply(obj: JsObject): Option[Int] =
(obj \\ "$int").asOpt[JsNumber].map(_.value.toInt)
}
}
implicit object BSONLongFormat extends PartialFormat[BSONLong] {
val partialReads: PartialFunction[JsValue, JsResult[BSONLong]] = {
case JsNumber(long) => JsSuccess(BSONLong(long.toLong))
case LongValue(value) => JsSuccess(BSONLong(value))
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case long: BSONLong => JsNumber(long.value)
}
private object LongValue {
def unapply(obj: JsObject): Option[Long] =
(obj \\ "$long").asOpt[JsNumber].map(_.value.toLong)
}
}
implicit object BSONBinaryFormat extends PartialFormat[BSONBinary] {
val partialReads: PartialFunction[JsValue, JsResult[BSONBinary]] = {
case JsString(str) => try {
JsSuccess(BSONBinary(Converters.str2Hex(str), Subtype.UserDefinedSubtype))
} catch {
case e: Throwable => JsError(s"error deserializing hex ${e.getMessage}")
}
case obj: JsObject if obj.fields.exists {
case (str, _: JsString) if str == "$binary" => true
case _ => false
} => try {
JsSuccess(BSONBinary(Converters.str2Hex((obj \\ "$binary").as[String]), Subtype.UserDefinedSubtype))
} catch {
case e: Throwable => JsError(s"error deserializing hex ${e.getMessage}")
}
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case binary: BSONBinary =>
val remaining = binary.value.readable()
Json.obj(
"$binary" -> Converters.hex2Str(binary.value.slice(remaining).readArray(remaining)),
"$type" -> Converters.hex2Str(Array(binary.subtype.value.toByte)))
}
}
implicit object BSONSymbolFormat extends PartialFormat[BSONSymbol] {
val partialReads: PartialFunction[JsValue, JsResult[BSONSymbol]] = {
case SymbolValue(value) => JsSuccess(BSONSymbol(value))
}
val partialWrites: PartialFunction[BSONValue, JsValue] = {
case BSONSymbol(s) => Json.obj("$symbol" -> s)
}
private object SymbolValue {
def unapply(obj: JsObject): Option[String] =
if (obj.fields.size != 1) None else (obj \\ "$symbol").asOpt[String]
}
}
val numberReads: PartialFunction[JsValue, JsResult[BSONValue]] = {
case JsNumber(n) if !n.ulp.isWhole => JsSuccess(BSONDouble(n.toDouble))
case JsNumber(n) if n.isValidInt => JsSuccess(BSONInteger(n.toInt))
case JsNumber(n) if n.isValidLong => JsSuccess(BSONLong(n.toLong))
}
def toBSON(json: JsValue): JsResult[BSONValue] =
BSONStringFormat.partialReads.
orElse(BSONObjectIDFormat.partialReads).
orElse(BSONDateTimeFormat.partialReads).
orElse(BSONTimestampFormat.partialReads).
orElse(BSONBinaryFormat.partialReads).
orElse(BSONRegexFormat.partialReads).
orElse(numberReads).
orElse(BSONBooleanFormat.partialReads).
orElse(BSONNullFormat.partialReads).
orElse(BSONSymbolFormat.partialReads).
orElse(BSONArrayFormat.partialReads).
orElse(BSONDocumentFormat.partialReads).
lift(json).getOrElse(JsError(s"unhandled json value: $json"))
def toJSON(bson: BSONValue): JsValue = BSONObjectIDFormat.partialWrites.
orElse(BSONDateTimeFormat.partialWrites).
orElse(BSONTimestampFormat.partialWrites).
orElse(BSONBinaryFormat.partialWrites).
orElse(BSONRegexFormat.partialWrites).
orElse(BSONDoubleFormat.partialWrites).
orElse(BSONIntegerFormat.partialWrites).
orElse(BSONLongFormat.partialWrites).
orElse(BSONBooleanFormat.partialWrites).
orElse(BSONNullFormat.partialWrites).
orElse(BSONStringFormat.partialWrites).
orElse(BSONSymbolFormat.partialWrites).
orElse(BSONArrayFormat.partialWrites).
orElse(BSONDocumentFormat.partialWrites).
lift(bson).getOrElse(throw new ReactiveMongoPlayJsonException(s"Unhandled json value: $bson"))
}
object Writers {
implicit class JsPathMongo(val jp: JsPath) extends AnyVal {
def writemongo[A](implicit writer: Writes[A]): OWrites[A] = {
OWrites[A] { (o: A) =>
val newPath = jp.path.flatMap {
case e: KeyPathNode => Some(e.key)
case e: RecursiveSearch => Some(s"$$.${e.key}")
case e: IdxPathNode => Some(s"${e.idx}")
}.mkString(".")
val orig = writer.writes(o)
orig match {
case JsObject(e) =>
JsObject(e.flatMap {
case (k, v) => Seq(s"${newPath}.$k" -> v)
})
case e: JsValue => JsObject(Seq(newPath -> e))
}
}
}
}
}
object JSONSerializationPack extends reactivemongo.api.SerializationPack {
import reactivemongo.bson.buffer.{
DefaultBufferHandler,
ReadableBuffer,
WritableBuffer
}
type Value = JsValue
type Document = JsObject
type Writer[A] = OWrites[A]
type Reader[A] = Reads[A]
object IdentityReader extends Reader[Document] {
def reads(js: JsValue): JsResult[Document] = js match {
case o: JsObject => JsSuccess(o)
case v => JsError(s"object is expected: $v")
}
}
object IdentityWriter extends Writer[Document] {
def writes(document: Document): Document = document
}
def serialize[A](a: A, writer: Writer[A]): Document = writer.writes(a)
def deserialize[A](document: Document, reader: Reader[A]): A =
reader.reads(document) match {
case JsError(msg) => sys.error(msg mkString ", ")
case JsSuccess(v, _) => v
}
def writeToBuffer(buffer: WritableBuffer, document: Document): WritableBuffer = {
BSONDocument.write(BSONFormats.toBSON(document).flatMap[BSONDocument] {
case d: BSONDocument => JsSuccess(d)
case v => JsError(s"document is expected: $v")
}.get, buffer)
buffer
}
def readFromBuffer(buffer: ReadableBuffer): Document =
BSONFormats.toJSON(BSONDocument.read(buffer)).as[Document]
def writer[A](f: A => Document): Writer[A] = new OWrites[A] {
def writes(input: A): Document = f(input)
}
def isEmpty(document: Document): Boolean = document.values.isEmpty
}
import play.api.libs.json.{ JsObject, JsValue }
import reactivemongo.bson.{
BSONDocument,
BSONDocumentReader,
BSONDocumentWriter
}
object ImplicitBSONHandlers extends ImplicitBSONHandlers
/**
* Implicit BSON Handlers (BSONDocumentReader/BSONDocumentWriter for JsObject)
*/
sealed trait ImplicitBSONHandlers extends BSONFormats {
implicit object JsObjectWriter extends BSONDocumentWriter[JsObject] {
def write(obj: JsObject): BSONDocument =
BSONFormats.BSONDocumentFormat.bson(obj)
}
implicit object JsObjectReader extends BSONDocumentReader[JsObject] {
def read(document: BSONDocument) =
BSONFormats.BSONDocumentFormat.writes(document).as[JsObject]
}
implicit object BSONDocumentWrites
extends JSONSerializationPack.Writer[BSONDocument] {
def writes(bson: BSONDocument): JsObject =
BSONFormats.BSONDocumentFormat.json(bson)
}
implicit object JsObjectDocumentWriter // Identity writer
extends JSONSerializationPack.Writer[JsObject] {
def writes(obj: JsObject): JSONSerializationPack.Document = obj
}
}
sealed trait LowerImplicitBSONHandlers {
import reactivemongo.bson.{ BSONElement, Producer }
implicit def jsWriter[A <: JsValue, B <: BSONValue] = new BSONWriter[A, B] {
def write(js: A): B = BSONFormats.toBSON(js).get.asInstanceOf[B]
}
implicit def JsFieldBSONElementProducer[T <: JsValue](jsField: (String, T)): Producer[BSONElement] = Producer.nameValue2Producer(jsField)
implicit object BSONValueReads extends Reads[BSONValue] {
def reads(js: JsValue) = BSONFormats.toBSON(js)
}
implicit object BSONValueWrites extends Writes[BSONValue] {
def writes(bson: BSONValue) = BSONFormats.toJSON(bson)
}
}
|
duncancrawford/Play-Json-ReactiveMongo
|
src/main/scala/play/modules/reactivemongo/json.scala
|
Scala
|
apache-2.0
| 16,295 |
package nexus.typelevel
import nexus._
import shapeless._
/**
* Typelevel function that computes the only different element between two lists.
* If the number of differences is not 1, no such evidence instance would be derived.
*/
trait Diff1[U, V] {
type Left
type Right
def index: Int
def left(u: U): Left
def right(v: V): Right
}
object Diff1 {
def apply[U, V, I, J](implicit d: Diff1.Aux[U, V, I, J]) = d
/** Proves that between U & V, there is only one difference -- in U it is I, whereas in V, it is J. */
type Aux[U, V, I, J] = Diff1[U, V] { type Left = I; type Right = J }
implicit def case0[T <: HList, L, R](implicit neq: L =:!= R): Aux[L :: T, R :: T, L, R] =
new Diff1[L :: T, R :: T] {
type Left = L
type Right = R
def index = 0
def left(u: L :: T) = u.head
def right(v: R :: T) = v.head
}
implicit def caseN[TL <: HList, TR <: HList, H, L, R](implicit d: Diff1.Aux[TL, TR, L, R]): Aux[H :: TL, H :: TR, L, R] =
new Diff1[H :: TL, H :: TR] {
type Left = L
type Right = R
def index = d.index + 1
def left(u: H :: TL) = d.left(u.tail)
def right(v: H :: TR) = d.right(v.tail)
}
implicit def tuple[U, Uh <: HList, V, Vh <: HList, L, R]
(implicit uh: ToHList.Aux[U, Uh], vh: ToHList.Aux[V, Vh], d: Diff1.Aux[Uh, Vh, L, R]): Aux[U, V, L, R] =
new Diff1[U, V] {
type Left = L
type Right = R
def index = d.index
def left(u: U) = d.left(uh(u))
def right(v: V) = d.right(vh(v))
}
}
|
ctongfei/nexus
|
tensor/src/main/scala/nexus/typelevel/Diff1.scala
|
Scala
|
mit
| 1,534 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.scala
import org.apache.flink.api.scala._
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.table.api.{StreamQueryConfig, Table, TableConfig, TableEnvironment}
import org.apache.flink.table.expressions.Expression
import org.apache.flink.table.functions.{AggregateFunction, TableFunction}
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}
import org.apache.flink.streaming.api.scala.asScalaStream
/**
* The [[TableEnvironment]] for a Scala [[StreamExecutionEnvironment]].
*
* A TableEnvironment can be used to:
* - convert a [[DataStream]] to a [[Table]]
* - register a [[DataStream]] in the [[TableEnvironment]]'s catalog
* - register a [[Table]] in the [[TableEnvironment]]'s catalog
* - scan a registered table to obtain a [[Table]]
* - specify a SQL query on registered tables to obtain a [[Table]]
* - convert a [[Table]] into a [[DataStream]]
* - explain the AST and execution plan of a [[Table]]
*
* @param execEnv The Scala [[StreamExecutionEnvironment]] of the TableEnvironment.
* @param config The configuration of the TableEnvironment.
*/
class StreamTableEnvironment(
execEnv: StreamExecutionEnvironment,
config: TableConfig)
extends org.apache.flink.table.api.StreamTableEnvironment(
execEnv.getWrappedStreamExecutionEnvironment,
config) {
/**
* Converts the given [[DataStream]] into a [[Table]].
*
* The field names of the [[Table]] are automatically derived from the type of the
* [[DataStream]].
*
* @param dataStream The [[DataStream]] to be converted.
* @tparam T The type of the [[DataStream]].
* @return The converted [[Table]].
*/
def fromDataStream[T](dataStream: DataStream[T]): Table = {
val name = createUniqueTableName()
registerDataStreamInternal(name, dataStream.javaStream)
scan(name)
}
/**
* Converts the given [[DataStream]] into a [[Table]] with specified field names.
*
* Example:
*
* {{{
* val stream: DataStream[(String, Long)] = ...
* val tab: Table = tableEnv.fromDataStream(stream, 'a, 'b)
* }}}
*
* @param dataStream The [[DataStream]] to be converted.
* @param fields The field names of the resulting [[Table]].
* @tparam T The type of the [[DataStream]].
* @return The converted [[Table]].
*/
def fromDataStream[T](dataStream: DataStream[T], fields: Expression*): Table = {
val name = createUniqueTableName()
registerDataStreamInternal(name, dataStream.javaStream, fields.toArray)
scan(name)
}
/**
* Registers the given [[DataStream]] as table in the
* [[TableEnvironment]]'s catalog.
* Registered tables can be referenced in SQL queries.
*
* The field names of the [[Table]] are automatically derived
* from the type of the [[DataStream]].
*
* @param name The name under which the [[DataStream]] is registered in the catalog.
* @param dataStream The [[DataStream]] to register.
* @tparam T The type of the [[DataStream]] to register.
*/
def registerDataStream[T](name: String, dataStream: DataStream[T]): Unit = {
checkValidTableName(name)
registerDataStreamInternal(name, dataStream.javaStream)
}
/**
* Registers the given [[DataStream]] as table with specified field names in the
* [[TableEnvironment]]'s catalog.
* Registered tables can be referenced in SQL queries.
*
* Example:
*
* {{{
* val set: DataStream[(String, Long)] = ...
* tableEnv.registerDataStream("myTable", set, 'a, 'b)
* }}}
*
* @param name The name under which the [[DataStream]] is registered in the catalog.
* @param dataStream The [[DataStream]] to register.
* @param fields The field names of the registered table.
* @tparam T The type of the [[DataStream]] to register.
*/
def registerDataStream[T](name: String, dataStream: DataStream[T], fields: Expression*): Unit = {
checkValidTableName(name)
registerDataStreamInternal(name, dataStream.javaStream, fields.toArray)
}
/**
* Converts the given [[Table]] into an append [[DataStream]] of a specified type.
*
* The [[Table]] must only have insert (append) changes. If the [[Table]] is also modified
* by update or delete changes, the conversion will fail.
*
* The fields of the [[Table]] are mapped to [[DataStream]] fields as follows:
* - [[org.apache.flink.types.Row]] and Scala Tuple types: Fields are mapped by position, field
* types must match.
* - POJO [[DataStream]] types: Fields are mapped by field name, field types must match.
*
* NOTE: This method only supports conversion of append-only tables. In order to make this
* more explicit in the future, please use [[toAppendStream()]] instead.
* If add and retract messages are required, use [[toRetractStream()]].
*
* @param table The [[Table]] to convert.
* @tparam T The type of the resulting [[DataStream]].
* @return The converted [[DataStream]].
*/
@deprecated("This method only supports conversion of append-only tables. In order to make this" +
" more explicit in the future, please use toAppendStream() instead.")
def toDataStream[T: TypeInformation](table: Table): DataStream[T] = toAppendStream(table)
/**
* Converts the given [[Table]] into an append [[DataStream]] of a specified type.
*
* The [[Table]] must only have insert (append) changes. If the [[Table]] is also modified
* by update or delete changes, the conversion will fail.
*
* The fields of the [[Table]] are mapped to [[DataStream]] fields as follows:
* - [[org.apache.flink.types.Row]] and Scala Tuple types: Fields are mapped by position, field
* types must match.
* - POJO [[DataStream]] types: Fields are mapped by field name, field types must match.
*
* NOTE: This method only supports conversion of append-only tables. In order to make this
* more explicit in the future, please use [[toAppendStream()]] instead.
* If add and retract messages are required, use [[toRetractStream()]].
*
* @param table The [[Table]] to convert.
* @param queryConfig The configuration of the query to generate.
* @tparam T The type of the resulting [[DataStream]].
* @return The converted [[DataStream]].
*/
@deprecated("This method only supports conversion of append-only tables. In order to make this" +
" more explicit in the future, please use toAppendStream() instead.")
def toDataStream[T: TypeInformation](
table: Table,
queryConfig: StreamQueryConfig): DataStream[T] = toAppendStream(table, queryConfig)
/**
* Converts the given [[Table]] into an append [[DataStream]] of a specified type.
*
* The [[Table]] must only have insert (append) changes. If the [[Table]] is also modified
* by update or delete changes, the conversion will fail.
*
* The fields of the [[Table]] are mapped to [[DataStream]] fields as follows:
* - [[org.apache.flink.types.Row]] and Scala Tuple types: Fields are mapped by position, field
* types must match.
* - POJO [[DataStream]] types: Fields are mapped by field name, field types must match.
*
* @param table The [[Table]] to convert.
* @tparam T The type of the resulting [[DataStream]].
* @return The converted [[DataStream]].
*/
def toAppendStream[T: TypeInformation](table: Table): DataStream[T] = {
toAppendStream(table, queryConfig)
}
/**
* Converts the given [[Table]] into an append [[DataStream]] of a specified type.
*
* The [[Table]] must only have insert (append) changes. If the [[Table]] is also modified
* by update or delete changes, the conversion will fail.
*
* The fields of the [[Table]] are mapped to [[DataStream]] fields as follows:
* - [[org.apache.flink.types.Row]] and Scala Tuple types: Fields are mapped by position, field
* types must match.
* - POJO [[DataStream]] types: Fields are mapped by field name, field types must match.
*
* @param table The [[Table]] to convert.
* @param queryConfig The configuration of the query to generate.
* @tparam T The type of the resulting [[DataStream]].
* @return The converted [[DataStream]].
*/
def toAppendStream[T: TypeInformation](
table: Table,
queryConfig: StreamQueryConfig): DataStream[T] = {
val returnType = createTypeInformation[T]
asScalaStream(translate(
table, queryConfig, updatesAsRetraction = false, withChangeFlag = false)(returnType))
}
/**
* Converts the given [[Table]] into a [[DataStream]] of add and retract messages.
* The message will be encoded as [[Tuple2]]. The first field is a [[Boolean]] flag,
* the second field holds the record of the specified type [[T]].
*
* A true [[Boolean]] flag indicates an add message, a false flag indicates a retract message.
*
* @param table The [[Table]] to convert.
* @tparam T The type of the requested data type.
* @return The converted [[DataStream]].
*/
def toRetractStream[T: TypeInformation](table: Table): DataStream[(Boolean, T)] = {
toRetractStream(table, queryConfig)
}
/**
* Converts the given [[Table]] into a [[DataStream]] of add and retract messages.
* The message will be encoded as [[Tuple2]]. The first field is a [[Boolean]] flag,
* the second field holds the record of the specified type [[T]].
*
* A true [[Boolean]] flag indicates an add message, a false flag indicates a retract message.
*
* @param table The [[Table]] to convert.
* @param queryConfig The configuration of the query to generate.
* @tparam T The type of the requested data type.
* @return The converted [[DataStream]].
*/
def toRetractStream[T: TypeInformation](
table: Table,
queryConfig: StreamQueryConfig): DataStream[(Boolean, T)] = {
val returnType = createTypeInformation[(Boolean, T)]
asScalaStream(
translate(table, queryConfig, updatesAsRetraction = true, withChangeFlag = true)(returnType))
}
/**
* Registers a [[TableFunction]] under a unique name in the TableEnvironment's catalog.
* Registered functions can be referenced in SQL queries.
*
* @param name The name under which the function is registered.
* @param tf The TableFunction to register
*/
def registerFunction[T: TypeInformation](name: String, tf: TableFunction[T]): Unit = {
registerTableFunctionInternal(name, tf)
}
/**
* Registers an [[AggregateFunction]] under a unique name in the TableEnvironment's catalog.
* Registered functions can be referenced in Table API and SQL queries.
*
* @param name The name under which the function is registered.
* @param f The AggregateFunction to register.
* @tparam T The type of the output value.
* @tparam ACC The type of aggregate accumulator.
*/
def registerFunction[T: TypeInformation, ACC](
name: String,
f: AggregateFunction[T, ACC])
: Unit = {
registerAggregateFunctionInternal[T, ACC](name, f)
}
}
|
hongyuhong/flink
|
flink-libraries/flink-table/src/main/scala/org/apache/flink/table/api/scala/StreamTableEnvironment.scala
|
Scala
|
apache-2.0
| 11,938 |
/**
* Copyright (C) 2012 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.webapp
import org.orbeon.oxf.common.{OXFException, Version}
import org.orbeon.oxf.pipeline.InitUtils
import org.orbeon.oxf.properties.Properties
import org.orbeon.oxf.resources.{ResourceManagerWrapper, WebAppResourceManagerImpl}
import org.orbeon.oxf.util.LoggerFactory
import org.orbeon.oxf.util.ScalaUtils.CodePointsOps
import scala.collection.JavaConverters._
// Orbeon web app initialization
object Orbeon {
private val PropertiesProperty = "oxf.properties"
private val LoggingProperty = "oxf.initialize-logging"
private val logger = LoggerFactory.createLogger(Orbeon.getClass)
// Initialize Orbeon
//
// - resource manager (based on init parameters)
// - properties subsystem (based on run mode)
// - version check
// - logger (based on properties)
// - processor registry
def initialize(context: WebAppContext) = {
// Check whether logging initialization is disabled
val initializeLogging = ! context.initParameters.get(LoggingProperty).contains("false")
if (initializeLogging)
LoggerFactory.initBasicLogger()
// 0. Say hello
logger.info("Starting " + Version.VersionString)
// 1. Initialize the Resource Manager
val properties = context.initParameters filter
{ case (name, value) β name.startsWith("oxf.resources.")} updated
(WebAppResourceManagerImpl.WEB_APP_CONTEXT_KEY, context) asJava
logger.info("Initializing Resource Manager with: " + properties)
ResourceManagerWrapper.init(properties)
// 2. Initialize properties
val propertiesURL = {
// Try to replace the run mode variable so we can write "oxf:/config/properties-${oxf.run-mode}.xml"
val rawPropertiesURL =
context.initParameters.getOrElse(
PropertiesProperty,
throw new OXFException("Properties file URL must be specified via oxf.properties in web.xml.")
).trimAllToNull
val runMode = RunMode.getRunMode(context.initParameters)
logger.info("Using run mode: " + runMode)
rawPropertiesURL.replaceAllLiterally("${" + RunMode.RunModeProperty + "}", runMode)
}
logger.info("Using properties file: " + propertiesURL)
Properties.init(propertiesURL)
// 3. Initialize Version object (depends on resource manager)
// Better to do it here so that log messages will go to the same place as the above logs
Version.instance
// 4. Initialize log4j with a DOMConfiguration
if (initializeLogging)
LoggerFactory.initLogger()
// 5. Register processor definitions with the default XML Processor Registry
InitUtils.processorDefinitions
}
}
|
joansmith/orbeon-forms
|
src/main/scala/org/orbeon/oxf/webapp/Orbeon.scala
|
Scala
|
lgpl-2.1
| 3,278 |
package at.forsyte.apalache.tla.bmcmt.rules.aux
import at.forsyte.apalache.tla.bmcmt.caches.StrValueCache
import at.forsyte.apalache.tla.bmcmt.types.ConstT
import at.forsyte.apalache.tla.bmcmt._
import at.forsyte.apalache.tla.bmcmt.smt.SolverContext
import at.forsyte.apalache.tla.lir.{TlaEx, ValEx}
import at.forsyte.apalache.tla.lir.convenience.tla
import at.forsyte.apalache.tla.lir.values.TlaInt
class UninterpretedConstOracle(valueCells: Seq[ArenaCell], oracleCell: ArenaCell, nvalues: Int) extends Oracle {
/**
* Produce an expression that states that the oracle values equals to the given integer position.
* The actual implementation may be different from an integer comparison.
*
* @param state a symbolic state
* @param position a position the oracle should be equal to
*/
override def whenEqualTo(state: SymbState, position: Int): TlaEx = {
tla.eql(oracleCell.toNameEx, valueCells(position).toNameEx)
}
/**
* Produce a ground expression that contains assertions for the possible oracle values.
*
* @param state a symbolic state
* @param assertions a sequence of assertions, one per oracle value, this sequence is always truncated to nvalues
* @return an expression ite(oracle = 0, ite(oracle = 1, ...))
*/
override def caseAssertions(state: SymbState, assertions: Seq[TlaEx]): TlaEx = {
nvalues match {
case 0 => state.arena.cellTrue().toNameEx
case 1 => assertions.head
case _ =>
val es = assertions.slice(0, nvalues).zipWithIndex.map
{ case (e, i) => tla.or(tla.not(whenEqualTo(state, i)), e) }
tla.and(es :_*)
}
}
/**
* Get a symbolic state and decode the value of the oracle variable into an integer.
* This method assumes that the solver context has produced an SMT model.
*
* @param solverContext a solver context
* @param state a symbolic state
* @return an integer value of the oracle, or -1, when the SMT encoding is broken
*/
override def evalPosition(solverContext: SolverContext, state: SymbState): Int = {
def isEqual(valueCell: ArenaCell): Boolean = {
solverContext.evalGroundExpr(tla.eql(valueCell.toNameEx, oracleCell.toNameEx)) == tla.bool(true)
}
valueCells indexWhere isEqual // the oracle must be equal to one of the cached values
}
}
object UninterpretedConstOracle {
def create(rewriter: SymbStateRewriter, state: SymbState, nvalues: Int): (SymbState, UninterpretedConstOracle) = {
val solverAssert = rewriter.solverContext.assertGroundExpr _
var nextState = state
def introConst(i: Int): ArenaCell = {
val (newArena, valueCell) = rewriter.strValueCache.getOrCreate(nextState.arena, i.toString)
nextState = nextState.setArena(newArena)
valueCell
}
val nums = 0 until nvalues
val valueCells = nums map introConst // introduce a constant for every integer
nextState = state.setArena(nextState.arena.appendCell(ConstT()))
val oracleCell = nextState.arena.topCell
val oracle = new UninterpretedConstOracle(valueCells, oracleCell, nvalues)
// the oracle value must be equal to one of the value cells
solverAssert(tla.or(nums.map(i => oracle.whenEqualTo(nextState, i)) :_*))
(nextState, oracle)
}
}
|
konnov/apalache
|
tla-bmcmt/src/main/scala/at/forsyte/apalache/tla/bmcmt/rules/aux/UninterpretedConstOracle.scala
|
Scala
|
apache-2.0
| 3,280 |
package scala.meta.eden
package quasiquote
import dotty.tools.dotc._
import core._
import ast._
import Contexts._
import Names._
import Decorators._
import Constants._
import Types._
import Symbols._
import Trees.Typed
import scala.{meta => m}
import scala.compat.Platform.EOL
object Quote {
type Quasi = m.internal.ast.Quasi
implicit class TreeOps(val tree: untpd.Tree) extends AnyVal {
def select(name: Name): untpd.Select = untpd.Select(tree, name)
def appliedTo(args: untpd.Tree*): untpd.Apply = untpd.Apply(tree, args.toList)
def appliedToType(args: untpd.Tree*): untpd.TypeApply = untpd.TypeApply(tree, args.toList)
}
private def select(path: String, isTerm: Boolean = true): untpd.Tree = {
val parts = path.split('.')
val name = if (isTerm) parts.last.toTermName else parts.last.toTypeName
parts.init.foldLeft[untpd.Tree](untpd.Ident("_root_".toTermName)) { (prefix, name) =>
prefix.select(name.toTermName)
}.select(name)
}
private def literal(value: Any): untpd.Tree = untpd.Literal(Constant(value))
}
/** Lift scala.meta trees as Dotty trees */
class Quote(tree: untpd.Tree, args: List[untpd.Tree], isTerm: Boolean = true)(implicit ctx: Context) {
import Quote._
val seqType = ctx.requiredClassRef("scala.collection.immutable.Seq")
val metaLiftType = ctx.requiredClassRef("scala.meta.quasiquotes.Lift")
val metaUnliftType = ctx.requiredClassRef("scala.meta.quasiquotes.Unlift")
def seqTypeOf(T: Type) = seqType.appliedTo(T)
def liftSeq(trees: Seq[m.Tree]): untpd.Tree = {
def loop(trees: List[m.Tree], acc: untpd.Tree, prefix: List[m.Tree]): untpd.Tree = trees match {
case (quasi: Quasi) +: rest if quasi.rank == 1 =>
if (acc.isEmpty) {
if (prefix.isEmpty) loop(rest, liftQuasi(quasi), Nil)
else loop(rest, prefix.foldRight(liftQuasi(quasi))((curr, acc) => {
val currElement = lift(curr)
untpd.InfixOp(currElement, untpd.Ident("+:".toTermName), acc)
}), Nil)
} else {
require(prefix.isEmpty)
if (isTerm) loop(rest, untpd.InfixOp(acc, untpd.Ident("++".toTermName), liftQuasi(quasi)), Nil)
else {
ctx.error(m.internal.parsers.Messages.QuasiquoteAdjacentEllipsesInPattern(quasi.rank), tree.pos)
untpd.EmptyTree
}
}
case other +: rest =>
if (acc.isEmpty) loop(rest, acc, prefix :+ other)
else {
require(prefix.isEmpty)
loop(rest, untpd.InfixOp(acc, untpd.Ident(":+".toTermName), lift(other)), Nil)
}
case Nil =>
if (acc.isEmpty)
select("scala.collection.immutable.List").appliedTo(prefix.map(lift): _*)
else acc
}
loop(trees.toList, untpd.EmptyTree, Nil)
}
def liftSeqSeq(treess: Seq[Seq[m.Tree]]): untpd.Tree = {
val tripleDotQuasis = treess.flatten.collect { case quasi: Quasi if quasi.rank == 2 => quasi }
if (tripleDotQuasis.length == 0) {
val list = select("scala.collection.immutable.List")
val args = treess.map(liftSeq)
list.appliedTo(args: _*)
} else if (tripleDotQuasis.length == 1) {
if (treess.flatten.length == 1) liftQuasi(tripleDotQuasis(0))
else {
ctx.error("implementation restriction: can't mix ...$ with anything else in parameter lists." +
EOL + "See https://github.com/scalameta/scalameta/issues/406 for details.", tree.pos)
untpd.EmptyTree
}
} else {
ctx.error(m.internal.parsers.Messages.QuasiquoteAdjacentEllipsesInPattern(2), tree.pos)
untpd.EmptyTree
}
}
def liftOpt(treeOpt: Option[m.Tree]): untpd.Tree = treeOpt match {
case Some(quasi: Quasi) =>
liftQuasi(quasi, optional = true)
case Some(tree) =>
select("scala.Some").appliedTo(lift(tree))
case None =>
select("scala.None")
}
def liftOptSeq(treesOpt: Option[Seq[m.Tree]]): untpd.Tree = treesOpt match {
case Some(Seq(quasi: Quasi)) if quasi.rank > 0 && !isTerm =>
select("scala.meta.internal.quasiquotes.Flatten").appliedTo(liftQuasi(quasi))
case Some(trees) =>
select("scala.Some").appliedTo(liftSeq(trees))
case None =>
select("scala.None")
}
def liftQuasi(quasi: Quasi, expectedRank: Int = 0, optional: Boolean = false): untpd.Tree = {
// credit: https://github.com/scalameta/scalameta/blob/master/scalameta/quasiquotes/src/main/scala/scala/meta/internal/quasiquotes/ReificationMacros.scala#L179
implicit class XtensionClazz(clazz: Class[_]) {
def toTpe: Type = {
def loop(owner: Symbol, parts: List[String]): Symbol = parts match {
case part :: Nil =>
if (clazz.getName.endsWith("$")) owner.info.decl(part.toTermName).symbol
else owner.info.decl(part.toTypeName).symbol
case part :: rest =>
loop(owner.info.decl(part.toTermName).symbol, rest)
case Nil => ??? // unlikely
}
val name = dotty.tools.dotc.util.NameTransformer.decode(clazz.getName)
val result = loop(ctx.definitions.RootClass, name.stripSuffix("$").split(Array('.', '$')).toList)
if (result.is(Flags.ModuleVal)) result.termRef else result.typeRef
}
}
implicit class XtensionType(tpe: Type) {
def wrap(rank: Int): Type = {
if (rank == 0) tpe
else seqTypeOf(tpe.wrap(rank - 1))
}
}
def quasiType: Type = {
var inferred = quasi.pt.toTpe.wrap(expectedRank)
if (optional) inferred = defn.OptionType.appliedTo(inferred)
inferred
}
// type of the pattern
def patternType(arg: untpd.Tree): Type = {
arg match {
case Typed(_, tp) => ctx.typer.typedType(tp).tpe
case _ =>
var inferred = defn.MetaTreeType.wrap(expectedRank)
if (optional) inferred = defn.OptionType.appliedTo(inferred)
inferred
}
}
def convert(arg: untpd.Tree, from: Type, to: Type, base: TypeRef): untpd.Tree = {
val conv = ctx.typer.inferImplicitArg(
base.appliedTo(from, to), msgFun => ctx.error(msgFun(""), arg.pos), arg.pos
)
if (conv.isEmpty) arg
else untpd.TypedSplice(conv).appliedTo(arg)
}
def unliftImplicitly(arg: untpd.Tree): untpd.Tree = {
// shortcut
val fromType = quasiType
val toType = patternType(arg)
if (fromType <:< toType || toType <:< fromType) return arg
convert(arg, fromType, toType, metaUnliftType)
}
def liftImplicitly(arg: untpd.Tree): untpd.Tree = {
// shortcut
val toType = quasiType
val fromType = ctx.typer.typedExpr(arg).tpe
if (fromType <:< toType) return arg
convert(arg, fromType.widen, toType, metaLiftType)
}
if (quasi.rank > 0) return liftQuasi(quasi.tree.asInstanceOf[Quasi], quasi.rank, optional)
quasi.tree match {
case m.Term.Name(Hole(i)) =>
if (isTerm) liftImplicitly(args(i))
else unliftImplicitly(args(i))
case m.Type.Name(Hole(i)) =>
if (isTerm) liftImplicitly(args(i))
else unliftImplicitly(args(i))
}
}
def liftCommon(obj: Any): untpd.Tree = obj match {
case seq: Seq[m.Tree] => liftSeq(seq)
case opt: Option[m.Tree] => liftOpt(opt)
case tree: m.Tree => lift(tree)
case _: String => literal(obj)
}
def lift(tree: m.Tree): untpd.Tree = (tree match {
case quasi: Quasi =>
liftQuasi(quasi)
case m.Lit(v) =>
select("scala.meta.Lit").appliedTo(literal(v))
case m.Term.Apply(fun, args) =>
// magic happens here with ...$args
args match {
case Seq(quasi: Quasi) if quasi.rank == 2 =>
select("scala.meta.internal.ast.Helpers.TermApply").appliedTo(lift(fun), liftQuasi(quasi))
case _ =>
select("scala.meta.Term.Apply").appliedTo(lift(fun), liftSeq(args))
}
case m.Term.Update(fun, argss, rhs) =>
select("scala.meta.Term.Update").appliedTo(lift(fun), liftSeqSeq(argss), lift(rhs))
case m.Decl.Def(mods, name, tparams, paramss, tpe) =>
select("scala.meta.Decl.Def").appliedTo(liftSeq(mods), lift(name), liftSeq(tparams), liftSeqSeq(paramss), lift(tpe))
case m.Defn.Def(mods, name, tparams, paramss, tpe, body) =>
select("scala.meta.Defn.Def").appliedTo(liftSeq(mods), lift(name), liftSeq(tparams), liftSeqSeq(paramss), liftOpt(tpe), lift(body))
case m.Defn.Macro(mods, name, tparams, paramss, tpe, body) =>
select("scala.meta.Defn.Macro").appliedTo(liftSeq(mods), lift(name), liftSeq(tparams), liftSeqSeq(paramss), liftOpt(tpe), lift(body))
case m.Ctor.Primary(mods, name, paramss) =>
select("scala.meta.Ctor.Primary").appliedTo(liftSeq(mods), lift(name), liftSeqSeq(paramss))
case m.Ctor.Secondary(mods, name, paramss, body) =>
select("scala.meta.Ctor.Secondary").appliedTo(liftSeq(mods), lift(name), liftSeqSeq(paramss), lift(body))
case m.Template(early, parents, self, stats) =>
select("scala.meta.Template").appliedTo(liftSeq(early), liftSeq(parents), lift(self), liftOptSeq(stats))
// We cannot handle Seq[Seq[_]] or Opt[Seq[_]] because of erasure
case tree: m.Tree =>
val name = "scala.meta." + tree.productPrefix
val args = tree.productIterator.toList.map(liftCommon)
select(name).appliedTo(args: _*)
}).withPos(this.tree.pos)
}
|
liufengyun/eden
|
src/main/scala/meta/eden/quasiquote/Quote.scala
|
Scala
|
bsd-3-clause
| 9,335 |
package net.scalytica.symbiotic.core.facades
import org.scalajs.jquery.JQuery
import scala.scalajs.js
object Bootstrap {
implicit def jquery2Bootstrap(jquery: JQuery): Bootstrap =
jquery.asInstanceOf[Bootstrap]
}
@js.native
trait Bootstrap extends JQuery {
def affix(options: Option[AffixOptions] = None): this.type = js.native
def modal(action: String): this.type = js.native
}
@js.native
trait ModalOptions extends js.Object {
var backdrop: Boolean = js.native
var keyboard: Boolean = js.native
var show: Boolean = js.native
var remote: String = js.native
}
@js.native
trait ModalOptionsBackdropString extends js.Object {
var backdrop: String = js.native
var keyboard: Boolean = js.native
var show: Boolean = js.native
var remote: String = js.native
}
@js.native
trait TooltipOptions extends js.Object {
var animation: Boolean = js.native
var html: Boolean = js.native
var placement: js.Any = js.native
var selector: String = js.native
var title: js.Any = js.native
var trigger: String = js.native
var delay: js.Any = js.native
var container: js.Any = js.native
}
@js.native
trait PopoverOptions extends js.Object {
var animation: Boolean = js.native
var html: Boolean = js.native
var placement: js.Any = js.native
var selector: String = js.native
var trigger: String = js.native
var title: js.Any = js.native
var content: js.Any = js.native
var delay: js.Any = js.native
var container: js.Any = js.native
}
@js.native
trait ScrollSpyOptions extends js.Object {
var offset: Double = js.native
}
@js.native
trait CollapseOptions extends js.Object {
var parent: js.Any = js.native
var toggle: Boolean = js.native
}
@js.native
trait CarouselOptions extends js.Object {
var interval: Double = js.native
var pause: String = js.native
}
@js.native
trait TypeaheadOptions extends js.Object {
var source: js.Any = js.native
var items: Double = js.native
var minLength: Double = js.native
var matcher: js.Function1[js.Any, Boolean] = js.native
var sorter: js.Function1[js.Array[js.Any], js.Array[js.Any]] = js.native
var updater: js.Function1[js.Any, Any] = js.native
var highlighter: js.Function1[js.Any, String] = js.native
}
@js.native
trait AffixOptions extends js.Object {
var offset: Int = js.native
var target: js.Any = js.native
}
@js.native
trait BootstrapAffix extends AffixOptions {
var affix: JQuery = js.native
}
|
kpmeen/symbiotic
|
examples/symbiotic-client/src/main/scala/net/scalytica/symbiotic/core/facades/Bootstrap.scala
|
Scala
|
apache-2.0
| 2,674 |
package org.positronicnet.sample.contacts
import org.positronicnet.ui._
import org.positronicnet.notifications.Actions._
import org.positronicnet.notifications.Future
import org.positronicnet.content.PositronicContentResolver
import android.util.Log
import android.os.Bundle
import android.content.Context
import android.view.{View, LayoutInflater}
class EditContactActivity
extends AggregatedContactActivity( layoutResourceId = R.layout.edit_contact )
with ViewUtils
{
onCreate {
findView( TR.save_button ).onClick {
doSave // will finish on data saved
}
findView( TR.revert_button ).onClick {
finish
}
}
// Special treatment for the "Back" button
override def onBackPressed = {
dialogResultMatchFromContext( this, R.string.do_what_on_back ) (
dialogCase( R.string.save_contact ){ doSave },
dialogCase( R.string.revert_contact ){ finish },
dialogCase( R.string.cancel_back ){ /* nothing */ }
)
}
// Loading a state into our editor widgets
// (invoked by AggregatedContactActivity base code, on start or restart)
def bindContactState = {
val editorContainer = findView( TR.raw_contact_editors )
editorContainer.removeAllViews
val inflater = getSystemService( Context.LAYOUT_INFLATER_SERVICE )
.asInstanceOf[ LayoutInflater ]
for (rawState <- contactState.rawContactEditStates) {
val rawEditor = inflater.inflate( R.layout.edit_raw_contact,
editorContainer, false )
rawEditor.asInstanceOf[ RawContactEditor ].bindState( rawState )
editorContainer.addView( rawEditor )
}
}
// Updating the state from what's displayed in the editor widgets.
// (Invoked by AggregatedContactActivity code on save, and by doSave below.)
def syncContactState = {
val editorContainer = findView( TR.raw_contact_editors )
for (ed <- editorContainer.childrenOfType[ RawContactEditor ])
ed.updateState
}
// Doing a save
def doSave = {
syncContactState
val batch = contactState.saveBatch
PositronicContentResolver ! batch.onSuccess{ finish }.onFailure{
toastShort("Error saving; see log") }
}
}
|
rst/positronic_net
|
sample/contacts_app/src/main/scala/EditContactActivity.scala
|
Scala
|
bsd-3-clause
| 2,220 |
package simple
import akka.actor.ActorSystem
import akka.testkit.{TestActorRef, TestKit}
import org.scalatest.{Matchers, WordSpecLike}
import support.TerminateAfterAll
class StatefulActorTest extends TestKit(ActorSystem("testsys")) with WordSpecLike with Matchers with TerminateAfterAll {
"The Stateful Actor" must {
"increase the counter after each received message" in {
val actorRef = TestActorRef[StatefulActor]
val actor = actorRef.underlyingActor
actor.count should be (0)
actorRef ! "Hello"
actor.count should be (1)
actorRef ! "Is it me you are looking for?"
actor.count should be (2)
}
"store the received message content in its state" in {
val actorRef = TestActorRef[StatefulActor]
val actor = actorRef.underlyingActor
actor.state should be ("Nog Niks")
actorRef ! "Hallo"
actor.state should be ("Hallo")
actorRef ! "Quit"
actor.state should be ("Done")
}
}
}
|
jvorhauer/akka-workshop
|
exercises/speedcam/src/test/scala/simple/StatefulActorTest.scala
|
Scala
|
apache-2.0
| 987 |
package org.sofi.deadman.component.writer
import akka.actor.ActorLogging
import com.rbmhtechnology.eventuate.EventsourcedWriter
import org.sofi.deadman.model._
import scala.concurrent.Future
trait TaskWriter[T] extends EventsourcedWriter[Long, Unit] with ActorLogging with NoTasks {
// Implicit execution context
protected implicit val executionContext = context.dispatcher
// Batch models during event processing.
private var cache: Vector[T] = Vector.empty
// Add a model to the cache collection
def batch(t: T): Unit = cache = cache :+ t
// Asynchronously writes the cache and sequence number of the last processed event to the database.
def write(): Future[Unit] = {
val nr = lastSequenceNr
val res = for {
_ β write(cache)
_ β WriteProgress.write(writerId, nr)
} yield ()
cache = Vector.empty // clear so that events can be processed while the write is in progress.
res
}
// Reads the sequence number of the last update; called only once after writer start or restart.
def read(): Future[Long] = WriteProgress.read(writerId)
// Indicates the start position for further reads from the event log.
override def readSuccess(result: Long): Option[Long] = Some(result + 1L)
// The ID of this writer
def writerId: String
// Save a series of models to a DB
def write(t: Vector[T]): Future[Unit]
}
|
SocialFinance/deadman-switch
|
core/src/main/scala/org/sofi/deadman/component/writer/TaskWriter.scala
|
Scala
|
bsd-3-clause
| 1,374 |
/*
* Copyright 2014 β 2018 Paul Horn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scalax.transducers
import scalax.transducers.internal.Reduced
trait Reducer[@specialized(Int, Long, Double, Char, Boolean) A, R] extends ((R, A, Reduced) β R) with (R β R) {
def apply(r: R, a: A, s: Reduced): R
def apply(r: R): R
def prepare(r: R, s: Reduced): R
}
|
knutwalker/transducers-scala
|
api/src/main/scala/scalax/transducers/Reducer.scala
|
Scala
|
apache-2.0
| 890 |
trait Analyzer extends Typers with Infer {
val global: Global
}
trait Definitions { self: SymbolTable =>
object definitions extends DefinitionsClass
def memb: TermSymbol = null
class DefinitionsClass {
def Predef_??? = memb
class RunDefinitions {
lazy val Predef_??? = DefinitionsClass.this.Predef_???
val x = 123
}
}
}
abstract class SymbolTable extends Symbols with Definitions {
}
trait Symbols { self: SymbolTable =>
class TermSymbol
class Symbol
}
trait Typers {
self: Analyzer =>
sealed class B[+T]
case class A[+T](value: T) extends B[T]
}
class Global extends SymbolTable {
lazy val analyzer = new {val global: Global.this.type = Global.this} with Analyzer
def currentRun: Run = null
class Run {
val runDefinitions: definitions.RunDefinitions = null
}
}
trait Infer {
self: Analyzer =>
import global._
def freshVar(s: Symbol): Int = 123
}
trait Validators {
self: DefaultMacroCompiler =>
import global._
import analyzer._
trait Validator {
self: MacroImplRefCompiler =>
lazy val atparams : List[global.Symbol] = null
atparams.map(tparam => freshVar(/*start*/tparam/*end*/))
}
}
abstract class DefaultMacroCompiler extends Validators{
val global: Global
import global._
val runDefinitions = currentRun.runDefinitions
import runDefinitions.{Predef_???, x}
class MacroImplRefCompiler extends Validator
Predef_???
x
}
//Validators.this.global.Symbol
|
triggerNZ/intellij-scala
|
testdata/typeInference/dependent/DeeperSubstitution.scala
|
Scala
|
apache-2.0
| 1,476 |
package models
case class AthleteDef(ageGroup: String, name: String,ussaNumber: String) extends Ordered[AthleteDef] {
override def toString = name
def compare ( that: AthleteDef ) = {
if ( this.ageGroup == that.ageGroup ) {
if ( this.name < that.name )
-1
else if ( this.name > that.name )
1
else
0
} else {
if ( this.ageGroup < that.ageGroup)
-1
else
1
}
}
}
|
saine1a/IMDRaces
|
app/models/AthleteDef.scala
|
Scala
|
mit
| 456 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.predictionio.workflow
import java.net.URI
import java.util.ServiceLoader
import akka.event.LoggingAdapter
import com.google.common.io.ByteStreams
import grizzled.slf4j.Logging
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.FileSystem
import org.apache.hadoop.fs.Path
import org.json4s.DefaultFormats
import org.json4s.Formats
import org.json4s.JObject
import org.json4s.JValue
import org.json4s.native.JsonMethods._
import scala.collection.JavaConversions._
import scala.collection.mutable
class EngineServerPluginContext(
val plugins: mutable.Map[String, mutable.Map[String, EngineServerPlugin]],
val pluginParams: mutable.Map[String, JValue],
val log: LoggingAdapter) {
def outputBlockers: Map[String, EngineServerPlugin] =
plugins.getOrElse(EngineServerPlugin.outputBlocker, Map.empty).toMap
def outputSniffers: Map[String, EngineServerPlugin] =
plugins.getOrElse(EngineServerPlugin.outputSniffer, Map.empty).toMap
}
object EngineServerPluginContext extends Logging {
implicit val formats: Formats = DefaultFormats
def apply(log: LoggingAdapter, engineVariant: String): EngineServerPluginContext = {
val plugins = mutable.Map[String, mutable.Map[String, EngineServerPlugin]](
EngineServerPlugin.outputBlocker -> mutable.Map(),
EngineServerPlugin.outputSniffer -> mutable.Map())
val pluginParams = mutable.Map[String, JValue]()
val serviceLoader = ServiceLoader.load(classOf[EngineServerPlugin])
val variantJson = parse(stringFromFile(engineVariant))
(variantJson \\ "plugins").extractOpt[JObject].foreach { pluginDefs =>
pluginDefs.obj.foreach { pluginParams += _ }
}
serviceLoader foreach { service =>
pluginParams.get(service.pluginName) map { params =>
if ((params \\ "enabled").extractOrElse(false)) {
info(s"Plugin ${service.pluginName} is enabled.")
plugins(service.pluginType) += service.pluginName -> service
} else {
info(s"Plugin ${service.pluginName} is disabled.")
}
} getOrElse {
info(s"Plugin ${service.pluginName} is disabled.")
}
}
new EngineServerPluginContext(
plugins,
pluginParams,
log)
}
private def stringFromFile(filePath: String): String = {
try {
val uri = new URI(filePath)
val fs = FileSystem.get(uri, new Configuration())
new String(ByteStreams.toByteArray(fs.open(new Path(uri))).map(_.toChar))
} catch {
case e: java.io.IOException =>
error(s"Error reading from file: ${e.getMessage}. Aborting.")
sys.exit(1)
}
}
}
|
dszeto/incubator-predictionio
|
core/src/main/scala/org/apache/predictionio/workflow/EngineServerPluginContext.scala
|
Scala
|
apache-2.0
| 3,438 |
package org.fathens.colorworks.iccprofile.tag
import java.io._
import _root_.org.fathens.colorworks.binarychain._
import _root_.org.fathens.colorworks.iccprofile._
object TypeViewingConditions extends ElementBuilder[TypeViewingConditions] {
val typeSignature = "view"
def build(commons: TagElement.CommonHeads, ins: InputStream, length: Long) = {
new TypeViewingConditions(
commons,
XYZNumber(ins),
XYZNumber(ins),
NumberU32(ins)
)
}
}
class TypeViewingConditions(commons: TagElement.CommonHeads,
val illuminant: XYZNumber,
val surround: XYZNumber,
val illuminantType: NumberU32) extends TagElement(commons, illuminant, surround, illuminantType)
|
sawatani/ColorWorks
|
src/main/scala/org/fathens/colorworks/iccprofile/tag/TypeViewingConditions.scala
|
Scala
|
mit
| 770 |
package coursier.cli.util
import argonaut.Parse
import utest._
object JsonReportTests extends TestSuite {
val tests = Tests {
test("empty JsonReport should be empty") {
val report: String = JsonReport[String](Vector.empty, Map())(
children = _ => Vector.empty,
reconciledVersionStr = _ => "",
requestedVersionStr = _ => "",
getFile = _ => Option(""),
exclusions = _ => Set.empty
)
assert(
report == "{\\"conflict_resolution\\":{},\\"dependencies\\":[],\\"version\\":\\"0.1.0\\"}"
)
}
test("JsonReport containing two deps should not be empty") {
val children = Map("a" -> Seq("b"), "b" -> Seq())
val report: String = JsonReport[String](
roots = Vector("a", "b"),
conflictResolutionForRoots = Map()
)(
children = children(_).toVector,
reconciledVersionStr = s => s"$s:reconciled",
requestedVersionStr = s => s"$s:requested",
getFile = _ => Option(""),
exclusions = _ => Set.empty
)
val reportJson = Parse.parse(report)
val expectedReportJson = Parse.parse(
"""{
| "conflict_resolution": {},
| "dependencies": [
| {
| "coord": "a:reconciled",
| "file": "",
| "directDependencies": [ "b:reconciled" ],
| "dependencies": [ "b:reconciled" ]
| },
| {
| "coord": "b:reconciled",
| "file": "",
| "directDependencies": [],
| "dependencies": []
| }
| ],
| "version": "0.1.0"
|}""".stripMargin
)
assert(reportJson == expectedReportJson)
}
test(
"JsonReport containing two deps should be sorted alphabetically regardless of input order"
) {
val children = Map("a" -> Seq("b"), "b" -> Seq())
val report: String = JsonReport[String](
roots = Vector("b", "a"),
conflictResolutionForRoots = Map()
)(
children = children(_).toVector,
reconciledVersionStr = s => s"$s:reconciled",
requestedVersionStr = s => s"$s:requested",
getFile = _ => Option(""),
exclusions = _ => Set.empty
)
val reportJson = Parse.parse(report)
val expectedReportJson = Parse.parse(
"""{
| "conflict_resolution": {},
| "dependencies": [
| { "coord": "a:reconciled", "file": "", "directDependencies": [ "b:reconciled" ], "dependencies": [ "b:reconciled" ] },
| { "coord": "b:reconciled", "file": "", "directDependencies": [], "dependencies": [] }
| ],
| "version": "0.1.0"
|}""".stripMargin
)
assert(reportJson == expectedReportJson)
}
test("JsonReport should prevent walking a tree in which a dependency depends on itself") {
val children = Map("a" -> Vector("a", "b"), "b" -> Vector.empty)
val report = JsonReport[String](
roots = Vector("a", "b"),
conflictResolutionForRoots = Map.empty
)(
children = children(_),
reconciledVersionStr = s => s"$s:reconciled",
requestedVersionStr = s => s"$s:requested",
getFile = _ => Option(""),
exclusions = _ => Set.empty
)
val reportJson = Parse.parse(report)
val expectedReportJson = Parse.parse(
"""{
| "conflict_resolution": {},
| "dependencies": [
| { "coord": "a:reconciled", "file": "", "directDependencies": [ "b:reconciled" ], "dependencies": [ "b:reconciled" ] },
| { "coord": "b:reconciled", "file": "", "directDependencies": [], "dependencies": [] }
| ],
| "version": "0.1.0"
|}""".stripMargin
)
assert(reportJson == expectedReportJson)
}
test("JsonReport should prevent walking a tree with cycles") {
val children =
Map("a" -> Vector("b"), "b" -> Vector("c"), "c" -> Vector("a", "d"), "d" -> Vector.empty)
val report = JsonReport[String](
roots = Vector("a", "b", "c"),
conflictResolutionForRoots = Map.empty
)(
children = children(_),
reconciledVersionStr = s => s"$s:reconciled",
requestedVersionStr = s => s"$s:requested",
getFile = _ => Option(""),
exclusions = _ => Set.empty
)
val reportJson = Parse.parse(report)
val expectedReportJson = Parse.parse(
"""{
| "conflict_resolution": {},
| "dependencies": [
| { "coord": "a:reconciled", "file": "", "directDependencies": [ "b:reconciled" ], "dependencies": [ "b:reconciled", "c:reconciled", "d:reconciled" ] },
| { "coord": "b:reconciled", "file": "", "directDependencies": [ "c:reconciled" ], "dependencies": [ "a:reconciled", "c:reconciled", "d:reconciled" ] },
| { "coord": "c:reconciled", "file": "", "directDependencies": [ "a:reconciled", "d:reconciled" ], "dependencies": [ "a:reconciled", "b:reconciled", "d:reconciled" ] }
| ],
| "version": "0.1.0"
|}""".stripMargin
)
assert(reportJson == expectedReportJson)
}
}
}
|
coursier/coursier
|
modules/cli/src/test/scala/coursier/cli/util/JsonReportTests.scala
|
Scala
|
apache-2.0
| 5,256 |
package com.github.aselab.activerecord
private[activerecord] object MethodMacros extends Deprecations with TypeSafeAssignable with TypeSafeFinder
|
aselab/scala-activerecord
|
macro/src/main/scala-2.13/MethodMacros.scala
|
Scala
|
mit
| 147 |
package com.github.diegopacheco.sandbox.scala.akka.lifecycle
import akka.actor.Actor
import akka.event.Logging
class LifeCycleActor extends Actor {
val log = Logging(context.system, this)
def receive = {
case m:Any => log.info("receive unknown message {" + m + "}")
}
override def preStart(): Unit = {
log.info("Pre Start")
}
override def postStop(): Unit = {
log.info("Post Stop")
}
override def preRestart(reason: Throwable, message: Option[Any]): Unit = {
log.info("Pre Restart")
}
override def postRestart(reason: Throwable): Unit = {
log.info("Post Restart")
}
}
object MainLoggingApp extends App {
import akka.actor.ActorSystem
import akka.actor.Props
import akka.actor._
val system = ActorSystem("LifeActorSystem")
val myActor = system.actorOf(Props[LifeCycleActor], "myactor2")
myActor ! "hi"
Thread.sleep(3000L)
system.stop(myActor)
Thread.sleep(3000L)
myActor ! "hi"
myActor ! PoisonPill
system.shutdown()
}
|
diegopacheco/scala-playground
|
scala_11_akka_23_full_playground/src/main/scala/com/github/diegopacheco/sandbox/scala/akka/lifecycle/LifeCycleActor.scala
|
Scala
|
unlicense
| 1,090 |
package com.softwaremill
package object macwire extends Tagging with Macwire {
private[macwire] type InstanceFactoryMap = Map[Class[_], () => AnyRef]
}
|
rcirka/macwire
|
macros/src/main/scala/com/softwaremill/macwire/package.scala
|
Scala
|
apache-2.0
| 155 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600e.v2
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.ct600e.v2.retriever.CT600EBoxRetriever
import uk.gov.hmrc.ct.ct600e.validations.ValidateDeclarationNameOrStatus
case class E1030(value: Option[String]) extends CtBoxIdentifier("Claimer's name") with CtOptionalString with Input
with ValidatableBox[CT600EBoxRetriever] with ValidateDeclarationNameOrStatus[CT600EBoxRetriever] {
override def validate(boxRetriever: CT600EBoxRetriever): Set[CtValidation] = validateDeclarationNameOrStatus("E1030", this)
}
|
pncampbell/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/ct600e/v2/E1030.scala
|
Scala
|
apache-2.0
| 1,146 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
case class CP28(value: Option[Int]) extends CtBoxIdentifier(name = "Depreciation") with CtOptionalInteger with Input
with ValidatableBox[ComputationsBoxRetriever] {
override def validate(boxRetriever: ComputationsBoxRetriever): Set[CtValidation] = {
validateZeroOrPositiveInteger(this)
}
}
object CP28 {
def apply(int: Int): CP28 = CP28(Some(int))
}
|
liquidarmour/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/computations/CP28.scala
|
Scala
|
apache-2.0
| 1,106 |
package com.delprks.productservicesprototype.api.directives
import akka.http.scaladsl.model.StatusCode
import akka.http.scaladsl.server.Directives.complete
import akka.http.scaladsl.server.StandardRoute
import com.delprks.productservicesprototype.api.directives.ErrorResponseDirectives.ErrorResponseData
import com.delprks.productservicesprototype.domain.marshalling.JsonSerializers
import com.delprks.productservicesprototype.domain.response.ErrorResponse
trait ErrorResponseDirectives extends JsonSerializers {
def completeWithError(schemaUrl: String, documentationUrl: String)
(errorResponseData: ErrorResponseData): StandardRoute = {
val errorResponse = ErrorResponse(
jsonSchemaUrl = schemaUrl,
documentationUrl = documentationUrl,
httpStatus = errorResponseData.statusCode.intValue,
message = errorResponseData.message)
complete(errorResponseData.statusCode, errorResponse)
}
}
object ErrorResponseDirectives {
case class ErrorResponseData(statusCode: StatusCode, message: String)
}
|
delprks/product-services-prototype
|
src/main/scala/com/delprks/productservicesprototype/api/directives/ErrorResponseDirectives.scala
|
Scala
|
mit
| 1,040 |
/*
* Copyright 2013 Akiyoshi Sugiki, University of Tsukuba
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kumoi.impl.aaa.ldap
import kumoi.shell.event._
import kumoi.shell.aaa._
import kumoi.core.or._
import kumoi.shell.or._
/**
*
* @author Akiyoshi SUGIKI
*/
class LdapColdRole extends ORObject[ColdRole] with ColdRole {
private var nm = "unnamed"
private var descr = ""
private var gidNum = 1000
//def name_=(n: String) { nm = n }
def name_=(na: (String, AAA)) { nm = na._1 }
override def name(implicit auth: AAA) = { nm }
//def id_=(r: String) { nm = r }
def id_=(r: (String, AAA)) { nm = r._1 }
def id(implicit auth: AAA) = { nm }
def add(elem: UInfo)(implicit auth: AAA) {
elem match {
case CanonicalName(n) => nm = n
case Description(d) => descr = d
case GidNumber(g) => gidNum = g
}
}
def remove(elem: UInfo)(implicit auth: AAA) {
elem match {
case CanonicalName(_) => nm = "unnamed"
case Description(_) => descr = ""
case GidNumber(_) => gidNum = 1000
}
}
//def description(implicit auth: AAA) = readop(this, auth) { descr }
//def gidNumber(implicit auth: AAA) = readop(this, auth) { gidNum }
override def info(implicit auth: AAA) = List(CanonicalName(nm), Description(descr), GidNumber(gidNum),
ObjectClass("top"), ObjectClass("posixGroup"))
override def genEvent(e: Exception) = RoleError(nm, nm, e)
override def toString = nm
}
|
axi-sugiki/kumoi
|
src/kumoi/impl/aaa/ldap/LdapColdRole.scala
|
Scala
|
apache-2.0
| 1,941 |
package com.datastax.spark.connector.writer
import java.io.IOException
import com.datastax.driver.core.BatchStatement.Type
import com.datastax.driver.core._
import com.datastax.spark.connector._
import com.datastax.spark.connector.cql._
import com.datastax.spark.connector.metrics.OutputMetricsUpdater
import com.datastax.spark.connector.util.CountingIterator
import org.apache.spark.{Logging, TaskContext}
import scala.collection._
/** Writes RDD data into given Cassandra table.
* Individual column values are extracted from RDD objects using given [[RowWriter]]
* Then, data are inserted into Cassandra with batches of CQL INSERT statements.
* Each RDD partition is processed by a single thread. */
class TableWriter[T] private (
connector: CassandraConnector,
tableDef: TableDef,
rowWriter: RowWriter[T],
writeConf: WriteConf) extends Serializable with Logging {
val keyspaceName = tableDef.keyspaceName
val tableName = tableDef.tableName
val columnNames = rowWriter.columnNames diff writeConf.optionPlaceholders
val columns = columnNames.map(tableDef.columnByName)
implicit val protocolVersion = connector.withClusterDo { _.getConfiguration.getProtocolOptions.getProtocolVersionEnum }
val defaultTTL = writeConf.ttl match {
case TTLOption(StaticWriteOptionValue(value)) => Some(value)
case _ => None
}
val defaultTimestamp = writeConf.timestamp match {
case TimestampOption(StaticWriteOptionValue(value)) => Some(value)
case _ => None
}
private def quote(name: String): String =
"\\"" + name + "\\""
private[connector] lazy val queryTemplateUsingInsert: String = {
val quotedColumnNames: Seq[String] = columnNames.map(quote)
val columnSpec = quotedColumnNames.mkString(", ")
val valueSpec = quotedColumnNames.map(":" + _).mkString(", ")
val ttlSpec = writeConf.ttl match {
case TTLOption(PerRowWriteOptionValue(placeholder)) => Some(s"TTL :$placeholder")
case TTLOption(StaticWriteOptionValue(value)) => Some(s"TTL $value")
case _ => None
}
val timestampSpec = writeConf.timestamp match {
case TimestampOption(PerRowWriteOptionValue(placeholder)) => Some(s"TIMESTAMP :$placeholder")
case TimestampOption(StaticWriteOptionValue(value)) => Some(s"TIMESTAMP $value")
case _ => None
}
val options = List(ttlSpec, timestampSpec).flatten
val optionsSpec = if (options.nonEmpty) s"USING ${options.mkString(" AND ")}" else ""
s"INSERT INTO ${quote(keyspaceName)}.${quote(tableName)} ($columnSpec) VALUES ($valueSpec) $optionsSpec".trim
}
private lazy val queryTemplateUsingUpdate: String = {
val (primaryKey, regularColumns) = columns.partition(_.isPrimaryKeyColumn)
val (counterColumns, nonCounterColumns) = regularColumns.partition(_.isCounterColumn)
def quotedColumnNames(columns: Seq[ColumnDef]) = columns.map(_.columnName).map(quote)
val setNonCounterColumnsClause = quotedColumnNames(nonCounterColumns).map(c => s"$c = :$c")
val setCounterColumnsClause = quotedColumnNames(counterColumns).map(c => s"$c = $c + :$c")
val setClause = (setNonCounterColumnsClause ++ setCounterColumnsClause).mkString(", ")
val whereClause = quotedColumnNames(primaryKey).map(c => s"$c = :$c").mkString(" AND ")
s"UPDATE ${quote(keyspaceName)}.${quote(tableName)} SET $setClause WHERE $whereClause"
}
private val isCounterUpdate =
tableDef.allColumns.exists(_.isCounterColumn)
private val queryTemplate: String = {
if (isCounterUpdate)
queryTemplateUsingUpdate
else
queryTemplateUsingInsert
}
private def prepareStatement(session: Session): PreparedStatement = {
try {
session.prepare(queryTemplate)
}
catch {
case t: Throwable =>
throw new IOException(s"Failed to prepare statement $queryTemplate: " + t.getMessage, t)
}
}
def batchRoutingKey(session: Session, routingKeyGenerator: RoutingKeyGenerator)(bs: BoundStatement): Any = {
writeConf.batchLevel match {
case BatchLevel.All => 0
case BatchLevel.ReplicaSet =>
if (bs.getRoutingKey == null)
bs.setRoutingKey(routingKeyGenerator(bs))
session.getCluster.getMetadata.getReplicas(keyspaceName, bs.getRoutingKey).hashCode() // hash code is enough
case BatchLevel.Partition =>
if (bs.getRoutingKey == null) {
bs.setRoutingKey(routingKeyGenerator(bs))
}
bs.getRoutingKey.duplicate()
}
}
/** Main entry point */
def write(taskContext: TaskContext, data: Iterator[T]) {
val updater = OutputMetricsUpdater(taskContext, writeConf)
connector.withSessionDo { session =>
val rowIterator = new CountingIterator(data)
val stmt = prepareStatement(session).setConsistencyLevel(writeConf.consistencyLevel)
val queryExecutor = new QueryExecutor(session, writeConf.parallelismLevel,
Some(updater.batchSucceeded), Some(updater.batchFailed))
val routingKeyGenerator = new RoutingKeyGenerator(tableDef, columnNames)
val batchType = if (isCounterUpdate) Type.COUNTER else Type.UNLOGGED
val boundStmtBuilder = new BoundStatementBuilder(rowWriter, stmt, protocolVersion)
val batchStmtBuilder = new BatchStatementBuilder(batchType, routingKeyGenerator, writeConf.consistencyLevel)
val batchKeyGenerator = batchRoutingKey(session, routingKeyGenerator) _
val batchBuilder = new GroupingBatchBuilder(boundStmtBuilder, batchStmtBuilder, batchKeyGenerator,
writeConf.batchSize, writeConf.batchBufferSize, rowIterator)
val rateLimiter = new RateLimiter(writeConf.throughputMiBPS * 1024 * 1024, 1024 * 1024)
logDebug(s"Writing data partition to $keyspaceName.$tableName in batches of ${writeConf.batchSize}.")
for (stmtToWrite <- batchBuilder) {
queryExecutor.executeAsync(stmtToWrite)
rateLimiter.maybeSleep(stmtToWrite.bytesCount)
}
queryExecutor.waitForCurrentlyExecutingTasks()
if (!queryExecutor.successful)
throw new IOException(s"Failed to write statements to $keyspaceName.$tableName.")
val duration = updater.finish() / 1000000000d
logInfo(f"Wrote ${rowIterator.count} rows to $keyspaceName.$tableName in $duration%.3f s.")
}
}
}
object TableWriter {
private def checkColumns(table: TableDef, columnNames: Seq[String]) = {
checkMissingColumns(table, columnNames)
checkMissingPrimaryKeyColumns(table, columnNames)
}
private def checkMissingColumns(table: TableDef, columnNames: Seq[String]) {
val allColumnNames = table.allColumns.map(_.columnName)
val missingColumns = columnNames.toSet -- allColumnNames
if (missingColumns.nonEmpty)
throw new IllegalArgumentException(
s"Column(s) not found: ${missingColumns.mkString(", ")}")
}
private def checkMissingPrimaryKeyColumns(table: TableDef, columnNames: Seq[String]) {
val primaryKeyColumnNames = table.primaryKey.map(_.columnName)
val missingPrimaryKeyColumns = primaryKeyColumnNames.toSet -- columnNames
if (missingPrimaryKeyColumns.nonEmpty)
throw new IllegalArgumentException(
s"Some primary key columns are missing in RDD or have not been selected: ${missingPrimaryKeyColumns.mkString(", ")}")
}
def apply[T : RowWriterFactory](
connector: CassandraConnector,
keyspaceName: String,
tableName: String,
columnNames: ColumnSelector,
writeConf: WriteConf): TableWriter[T] = {
val schema = Schema.fromCassandra(connector, Some(keyspaceName), Some(tableName))
val tableDef = schema.tables.headOption
.getOrElse(throw new IOException(s"Table not found: $keyspaceName.$tableName"))
val selectedColumns = columnNames match {
case SomeColumns(names @ _*) => names.map {
case ColumnName(columnName, _) => columnName
case TTL(_, _) | WriteTime(_, _) =>
throw new IllegalArgumentException(
s"Neither TTL nor WriteTime fields are supported for writing. " +
s"Use appropriate write configuration settings to specify TTL or WriteTime.")
}
case AllColumns => tableDef.allColumns.map(_.columnName).toSeq
case PartitionKeyColumns => tableDef.partitionKey.map(_.columnName)
}
val rowWriter = implicitly[RowWriterFactory[T]].rowWriter(
tableDef.copy(regularColumns = tableDef.regularColumns ++ writeConf.optionsAsColumns(keyspaceName, tableName)),
selectedColumns ++ writeConf.optionPlaceholders, columnNames.aliases)
checkColumns(tableDef, selectedColumns)
new TableWriter[T](connector, tableDef, rowWriter, writeConf)
}
}
|
brkyvz/spark-cassandra-connector
|
spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/writer/TableWriter.scala
|
Scala
|
apache-2.0
| 8,630 |
@deprecated("Suppress warnings", since="2.11")
object Test extends dotty.runtime.LegacyApp {
println(classManifest[Int])
println(classManifest[Int] eq Manifest.Int)
}
|
yusuke2255/dotty
|
tests/run/classmanifests_new_core.scala
|
Scala
|
bsd-3-clause
| 171 |
package monocle.std
import monocle.MonocleSuite
import monocle.law.discipline.PrismTests
import monocle.law.discipline.function.{EachTests, PossibleTests}
import scala.annotation.nowarn
class EitherSpec extends MonocleSuite {
checkAll("either left", PrismTests(stdLeft[String, Int]))
checkAll("either right", PrismTests(stdRight[String, String]))
checkAll("each Either", EachTests[Either[Unit, Int], Int])
checkAll("possible Either", PossibleTests[Either[Unit, Int], Int]): @nowarn
}
|
julien-truffaut/Monocle
|
test/shared/src/test/scala/monocle/std/EitherSpec.scala
|
Scala
|
mit
| 495 |
// a.scala
// Fri Jan 13 11:31:47 PST 2012
package foo {
package object bar {
def duh(n: Long) = println("long")
def duh(n: Double) = println("double")
def duh2(n: Double) = println("double")
def duh2(n: Long) = println("long")
}
package bar {
object Main {
def main(args:Array[String]) {
duh(33L)
bip.bar.duh(33L)
duh(33d)
bip.bar.duh(33d)
duh2(33L)
bip.bar.duh2(33L)
duh2(33d)
bip.bar.duh2(33d)
}
}
}
}
package bip {
trait Duh {
def duh(n: Long) = println("long")
def duh(n: Double) = println("double")
}
trait Duh2 {
def duh2(n: Double) = println("double")
def duh2(n: Long) = println("long")
}
package object bar extends Duh with Duh2 { }
package bar {
object Main {
def main(args:Array[String]) {
duh(33L)
bip.bar.duh(33L)
duh(33d)
bip.bar.duh(33d)
duh2(33L)
bip.bar.duh2(33L)
duh2(33d)
bip.bar.duh2(33d)
}
}
}
}
object Test {
def main(args: Array[String]): Unit = {
foo.bar.Main.main(null)
bip.bar.Main.main(null)
}
}
|
felixmulder/scala
|
test/files/run/t1987.scala
|
Scala
|
bsd-3-clause
| 1,173 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package base
package types
import org.jetbrains.plugins.scala.lang.psi.types.ScType
import org.jetbrains.plugins.scala.lang.psi.types.result.{TypeResult, TypingContext}
/**
* @author Alexander Podkhalyuzin
* Date: 07.03.2008
*/
trait ScAnnotTypeElement extends ScTypeElement {
override protected val typeName = "TypeWithAnnotation"
def typeElement: ScTypeElement = findChildByClassScala(classOf[ScTypeElement])
protected def innerType: TypeResult[ScType] = typeElement.getType()
}
|
loskutov/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/psi/api/base/types/ScAnnotTypeElement.scala
|
Scala
|
apache-2.0
| 565 |
/*
Copyright 2013 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.summingbird.batch
import com.twitter.algebird.{ Monoid, Semigroup, Predecessible, Successible }
import com.twitter.bijection.Bijection
import java.util.Date
import com.twitter.scalding.RichDate
case class Timestamp(milliSinceEpoch: Long) extends AnyVal {
def compare(that: Timestamp) = milliSinceEpoch.compare(that.milliSinceEpoch)
def prev = copy(milliSinceEpoch = milliSinceEpoch - 1)
def next = copy(milliSinceEpoch = milliSinceEpoch + 1)
def toDate = new Date(milliSinceEpoch)
def toRichDate = new RichDate(milliSinceEpoch)
def -(other: Milliseconds) = Timestamp(milliSinceEpoch - other.toLong)
def +(other: Milliseconds) = Timestamp(milliSinceEpoch + other.toLong)
// Delta between two timestamps
def -(other: Timestamp): Milliseconds = Milliseconds(milliSinceEpoch - other.milliSinceEpoch)
def incrementMillis(millis: Long) = Timestamp(milliSinceEpoch + millis)
def incrementSeconds(seconds: Long) = Timestamp(milliSinceEpoch + (seconds * 1000L))
def incrementMinutes(minutes: Long) = Timestamp(milliSinceEpoch + (minutes * 1000 * 60))
def incrementHours(hours: Long) = Timestamp(milliSinceEpoch + (hours * 1000 * 60 * 60))
def incrementDays(days: Long) = Timestamp(milliSinceEpoch + (days * 1000 * 60 * 60 * 24))
}
object Timestamp {
val Max = Timestamp(Long.MaxValue)
val Min = Timestamp(Long.MinValue)
def now: Timestamp = Timestamp(System.currentTimeMillis)
implicit def fromDate(d: Date) = Timestamp(d.getTime)
implicit val orderingOnTimestamp: Ordering[Timestamp] = Ordering.by(_.milliSinceEpoch)
implicit val maxTSMonoid: Monoid[Timestamp] = Monoid.from(Timestamp.Min)(orderingOnTimestamp.max(_, _))
implicit val timestamp2Date: Bijection[Timestamp, Date] =
Bijection.build[Timestamp, Date] { ts => new Date(ts.milliSinceEpoch) } { fromDate(_) }
implicit val timestamp2Long: Bijection[Timestamp, Long] =
Bijection.build[Timestamp, Long] { _.milliSinceEpoch } { Timestamp(_) }
implicit val timestampSuccessible: Successible[Timestamp] = new Successible[Timestamp] {
def next(old: Timestamp) = if (old.milliSinceEpoch != Long.MaxValue) Some(old.next) else None
def ordering: Ordering[Timestamp] = Timestamp.orderingOnTimestamp
def partialOrdering = Timestamp.orderingOnTimestamp
}
implicit val timestampPredecessible: Predecessible[Timestamp] = new Predecessible[Timestamp] {
def prev(old: Timestamp) = if (old.milliSinceEpoch != Long.MinValue) Some(old.prev) else None
def ordering: Ordering[Timestamp] = Timestamp.orderingOnTimestamp
def partialOrdering = Timestamp.orderingOnTimestamp
}
// This is a right semigroup, that given any two Timestamps just take the one on the right.
// The reason we did this is because we don't want to give a stronger contract to the semigroup
// than the store actually respects
val rightSemigroup = new Semigroup[Timestamp] {
def plus(a: Timestamp, b: Timestamp) = b
override def sumOption(ti: TraversableOnce[Timestamp]) =
if (ti.isEmpty) None
else {
val iter = ti.toIterator
var last: Timestamp = iter.next
while (iter.hasNext) {
last = iter.next
}
Some(last)
}
}
}
|
nabarunnag/Summingbird_dev
|
summingbird-batch/src/main/scala/com/twitter/summingbird/batch/TimeStamp.scala
|
Scala
|
apache-2.0
| 3,772 |
package ca.innovativemedicine.vcf
sealed trait JoinType
object JoinType {
case object JoinAfter extends JoinType
case object JoinBefore extends JoinType
case object JoinReverseAfter extends JoinType
case object JoinReverseBefore extends JoinType
}
case class Breakend(alt: String, chromosome: Either[VcfId, String], position: Int, joinType: JoinType) {
import JoinType._
private def location = chromosome.fold(_.toString, identity) + ":" + position
def toBreakendString: String = joinType match {
case JoinAfter => alt + "[" + location + "["
case JoinBefore => "]" + location + "]" + alt
case JoinReverseAfter => alt + "]" + location + "]"
case JoinReverseBefore => "[" + location + "[" + alt
}
}
|
innovativemedicine/vcfimp
|
vcfimp/src/main/scala/ca/innovativemedicine/vcf/Breakend.scala
|
Scala
|
bsd-2-clause
| 738 |
package sample.blog.stages
import akka.actor.{ Actor, ActorRef, ActorSystem, Props, Stash }
import akka.stream.scaladsl.{ Sink, Source }
import akka.stream.stage.GraphStageLogic.StageActor
import akka.stream.stage._
import akka.stream._
import org.slf4j.LoggerFactory
import sample.blog.stages.ActorSource.InstallSource
import scala.collection.mutable
import scala.concurrent.duration.FiniteDuration
object ActorSource {
case class InstallSource(actorRef: ActorRef)
def run = {
val log = LoggerFactory.getLogger(getClass)
implicit val system = ActorSystem("actor-stage")
implicit val materializer = ActorMaterializer()
val publisher: ActorRef = system.actorOf(Props(new Actor with Stash {
def receive: Receive = {
case _: String β stash()
case s: InstallSource β
unstashAll()
context become active(s.actorRef)
}
def active(actor: ActorRef): Receive = {
case msg: String β
log.info("Actor received message, forwarding to stream: {} ", msg)
actor ! msg
}
}))
val source: Source[String, akka.NotUsed] =
Source.fromGraph(new ActorSource(publisher))
source.to(Sink.foreach(log.info("Stream received message: {} ", _)))
publisher ! "One"
publisher ! "Two"
publisher ! "Three"
}
}
/*
A custom graph stage to create a Source using getActorStage
The end result is being able to send a actor messages to a source.
*/
class ActorSource(actor: ActorRef) extends GraphStage[SourceShape[String]] {
val out: Outlet[String] = Outlet("out")
override val shape: SourceShape[String] = SourceShape(out)
override def createLogic(attributes: Attributes): GraphStageLogic =
new GraphStageLogic(shape) with StageLogging {
println(attributes.attributeList.mkString(","))
lazy val actorStage: StageActor = getStageActor(onReceive)
val buffer = mutable.Queue[String]()
override def preStart(): Unit = {
log.info("pre-starting stage, assigning StageActor to source-feeder")
actor ! InstallSource(actorStage.ref)
}
setHandler(
out,
new OutHandler {
override def onDownstreamFinish(): Unit = {
val result = buffer
if (result.nonEmpty) {
log.debug(
"In order to avoid message lost we need to notify the upsteam that " +
"consumed elements cannot be handled")
//1. actor ! result - resend maybe
//2. store to internal DB
completeStage()
}
completeStage()
}
override def onPull(): Unit = {
log.info("downstream: pull")
tryToPush()
}
}
)
def tryToPush(): Unit = {
if (isAvailable(out) && buffer.nonEmpty) {
val element = buffer.dequeue
log.info(s"${buffer.size} push $element")
push(out, element)
}
}
def onReceive(x: (ActorRef, Any)): Unit = {
x._2 match {
case msg: String β
log.info("published: {} ", msg)
buffer enqueue msg
//tryToPush()
case other β
failStage(
throw new Exception(
s"Unexpected message type ${other.getClass.getSimpleName}"))
}
}
}
}
//Emit an element once in silencePeriod
class TimedGate[A](silencePeriod: FiniteDuration) extends GraphStage[FlowShape[A, A]] {
val in = Inlet[A]("in")
val out = Outlet[A]("out")
val shape = FlowShape.of(in, out)
override def createLogic(attributes: Attributes) =
new TimerGraphStageLogic(shape) {
var open = false
setHandler(in, new InHandler {
override def onPush(): Unit = {
val elem = grab(in)
if (open) {
//drop elem
pull(in)
} else {
push(out, elem)
open = true
scheduleOnce(None, silencePeriod)
}
}
})
setHandler(out, new OutHandler {
override def onPull(): Unit = pull(in)
})
override protected def onTimer(timerKey: Any): Unit = {
open = false
}
}
}
|
haghard/akka-pq
|
src/main/scala/sample/blog/stages/ActorSource.scala
|
Scala
|
apache-2.0
| 4,224 |
/*
* Copyright 2015 - 2016 Red Bull Media House GmbH <http://www.redbullmediahouse.com> - all rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.rbmhtechnology.example
import io.vertx.core.{ AsyncResult, DeploymentOptions, Handler, Vertx }
import scala.concurrent.{ ExecutionContext, Future, Promise }
import scala.reflect.ClassTag
package object vertx {
object ExampleVertxExtensions {
implicit class PromiseHandler[A](promise: Promise[A]) {
def asVertxHandler: Handler[AsyncResult[A]] = new Handler[AsyncResult[A]] {
override def handle(res: AsyncResult[A]): Unit = {
if (res.succeeded()) {
promise.success(res.result())
} else {
promise.failure(res.cause())
}
}
}
}
implicit class RichVertxDeployment(vertx: Vertx) {
def deploy[T](options: DeploymentOptions = new DeploymentOptions())(implicit t: ClassTag[T], ec: ExecutionContext): Future[String] = {
val promise = Promise[String]
vertx.deployVerticle(t.runtimeClass.getName, options, promise.asVertxHandler)
promise.future
}
}
}
}
|
RBMHTechnology/eventuate
|
eventuate-example-vertx/src/main/scala/com/rbmhtechnology/example/vertx/package.scala
|
Scala
|
apache-2.0
| 1,672 |
package com.campudus.test.postgresql
import org.junit.Test
import org.vertx.scala.core.json._
import com.campudus.test.{ BaseSqlTests, SqlTestVerticle }
import org.vertx.testtools.VertxAssert
class MySqlTest extends SqlTestVerticle with BaseSqlTests {
val address = "campudus.asyncdb"
val config = Json.obj("address" -> address, "connection" -> "MySQL")
override def getConfig = config
// FIXME test stuff
@Test
def something(): Unit = VertxAssert.testComplete()
// @Test
// override def selectFiltered(): Unit = super.selectFiltered()
// @Test
// override def selectEverything(): Unit = super.selectEverything()
// @Test
// override def insertUniqueProblem(): Unit = super.insertUniqueProblem()
// @Test
// override def insertMaliciousDataTest(): Unit = super.insertMaliciousDataTest()
// @Test
// override def insertTypeTest(): Unit = super.insertTypeTest()
// @Test
// override def insertCorrect(): Unit = super.insertCorrect()
// @Test
// override def createAndDropTable(): Unit = super.createAndDropTable()
// @Test
// override def multipleFields(): Unit = super.multipleFields()
// @Test
// override def simpleConnection(): Unit = super.simpleConnection()
}
|
campudus/vertx-mysql-postgresql
|
src/test/scala/com/campudus/test/mysql/MySqlTest.scala
|
Scala
|
apache-2.0
| 1,236 |
/*******************************************************************************
* Copyright (c) 2019. Carl Minden
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
package com.anathema_roguelike
package stats.effects
object MultiplicativeCalculation {
def build(mc: MultiplicativeCalculation): MultiplicativeCalculation = mc
def fixed(value: Double): MultiplicativeCalculation = () => value
}
trait MultiplicativeCalculation extends Calculation {}
|
carlminden/anathema-roguelike
|
src/com/anathema_roguelike/stats/effects/MultiplicativeCalculation.scala
|
Scala
|
gpl-3.0
| 1,137 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.adaptive
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{Attribute, Expression}
import org.apache.spark.sql.catalyst.plans.physical.{Partitioning, UnknownPartitioning}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.execution.{SparkPlan, UnaryExecNode}
import org.apache.spark.sql.execution.exchange.{EnsureRequirements, ReusedExchangeExec, ShuffleExchangeExec}
import org.apache.spark.sql.execution.joins.{BroadcastHashJoinExec, BuildLeft, BuildRight, BuildSide}
import org.apache.spark.sql.internal.SQLConf
/**
* A rule to optimize the shuffle reader to local reader iff no additional shuffles
* will be introduced:
* 1. if the input plan is a shuffle, add local reader directly as we can never introduce
* extra shuffles in this case.
* 2. otherwise, add local reader to the probe side of broadcast hash join and
* then run `EnsureRequirements` to check whether additional shuffle introduced.
* If introduced, we will revert all the local readers.
*/
case class OptimizeLocalShuffleReader(conf: SQLConf) extends Rule[SparkPlan] {
import OptimizeLocalShuffleReader._
private val ensureRequirements = EnsureRequirements(conf)
// The build side is a broadcast query stage which should have been optimized using local reader
// already. So we only need to deal with probe side here.
private def createProbeSideLocalReader(plan: SparkPlan): SparkPlan = {
val optimizedPlan = plan.transformDown {
case join @ BroadcastJoinWithShuffleLeft(shuffleStage, BuildRight) =>
val localReader = createLocalReader(shuffleStage)
join.asInstanceOf[BroadcastHashJoinExec].copy(left = localReader)
case join @ BroadcastJoinWithShuffleRight(shuffleStage, BuildLeft) =>
val localReader = createLocalReader(shuffleStage)
join.asInstanceOf[BroadcastHashJoinExec].copy(right = localReader)
}
val numShuffles = ensureRequirements.apply(optimizedPlan).collect {
case e: ShuffleExchangeExec => e
}.length
// Check whether additional shuffle introduced. If introduced, revert the local reader.
if (numShuffles > 0) {
logDebug("OptimizeLocalShuffleReader rule is not applied due" +
" to additional shuffles will be introduced.")
plan
} else {
optimizedPlan
}
}
private def createLocalReader(plan: SparkPlan): LocalShuffleReaderExec = {
plan match {
case c @ CoalescedShuffleReaderExec(s: ShuffleQueryStageExec, _) =>
LocalShuffleReaderExec(
s, getPartitionStartIndices(s, Some(c.partitionStartIndices.length)))
case s: ShuffleQueryStageExec =>
LocalShuffleReaderExec(s, getPartitionStartIndices(s, None))
}
}
// TODO: this method assumes all shuffle blocks are the same data size. We should calculate the
// partition start indices based on block size to avoid data skew.
private def getPartitionStartIndices(
shuffleStage: ShuffleQueryStageExec,
advisoryParallelism: Option[Int]): Array[Array[Int]] = {
val shuffleDep = shuffleStage.shuffle.shuffleDependency
val numReducers = shuffleDep.partitioner.numPartitions
val expectedParallelism = advisoryParallelism.getOrElse(numReducers)
val numMappers = shuffleDep.rdd.getNumPartitions
Array.fill(numMappers) {
equallyDivide(numReducers, math.max(1, expectedParallelism / numMappers)).toArray
}
}
/**
* To equally divide n elements into m buckets, basically each bucket should have n/m elements,
* for the remaining n%m elements, add one more element to the first n%m buckets each. Returns
* a sequence with length numBuckets and each value represents the start index of each bucket.
*/
private def equallyDivide(numElements: Int, numBuckets: Int): Seq[Int] = {
val elementsPerBucket = numElements / numBuckets
val remaining = numElements % numBuckets
val splitPoint = (elementsPerBucket + 1) * remaining
(0 until remaining).map(_ * (elementsPerBucket + 1)) ++
(remaining until numBuckets).map(i => splitPoint + (i - remaining) * elementsPerBucket)
}
override def apply(plan: SparkPlan): SparkPlan = {
if (!conf.getConf(SQLConf.LOCAL_SHUFFLE_READER_ENABLED)) {
return plan
}
plan match {
case s: SparkPlan if canUseLocalShuffleReader(s) =>
createLocalReader(s)
case s: SparkPlan =>
createProbeSideLocalReader(s)
}
}
}
object OptimizeLocalShuffleReader {
object BroadcastJoinWithShuffleLeft {
def unapply(plan: SparkPlan): Option[(SparkPlan, BuildSide)] = plan match {
case join: BroadcastHashJoinExec if canUseLocalShuffleReader(join.left) =>
Some((join.left, join.buildSide))
case _ => None
}
}
object BroadcastJoinWithShuffleRight {
def unapply(plan: SparkPlan): Option[(SparkPlan, BuildSide)] = plan match {
case join: BroadcastHashJoinExec if canUseLocalShuffleReader(join.right) =>
Some((join.right, join.buildSide))
case _ => None
}
}
def canUseLocalShuffleReader(plan: SparkPlan): Boolean = plan match {
case s: ShuffleQueryStageExec => s.shuffle.canChangeNumPartitions
case CoalescedShuffleReaderExec(s: ShuffleQueryStageExec, _) => s.shuffle.canChangeNumPartitions
case _ => false
}
}
/**
* A wrapper of shuffle query stage, which submits one or more reduce tasks per mapper to read the
* shuffle files written by one mapper. By doing this, it's very likely to read the shuffle files
* locally, as the shuffle files that a reduce task needs to read are in one node.
*
* @param child It's usually `ShuffleQueryStageExec`, but can be the shuffle exchange node during
* canonicalization.
* @param partitionStartIndicesPerMapper A mapper usually writes many shuffle blocks, and it's
* better to launch multiple tasks to read shuffle blocks of
* one mapper. This array contains the partition start
* indices for each mapper.
*/
case class LocalShuffleReaderExec(
child: SparkPlan,
partitionStartIndicesPerMapper: Array[Array[Int]]) extends UnaryExecNode {
override def output: Seq[Attribute] = child.output
override lazy val outputPartitioning: Partitioning = {
// when we read one mapper per task, then the output partitioning is the same as the plan
// before shuffle.
if (partitionStartIndicesPerMapper.forall(_.length == 1)) {
child match {
case ShuffleQueryStageExec(_, s: ShuffleExchangeExec) =>
s.child.outputPartitioning
case ShuffleQueryStageExec(_, r @ ReusedExchangeExec(_, s: ShuffleExchangeExec)) =>
s.child.outputPartitioning match {
case e: Expression => r.updateAttr(e).asInstanceOf[Partitioning]
case other => other
}
case _ =>
throw new IllegalStateException("operating on canonicalization plan")
}
} else {
UnknownPartitioning(partitionStartIndicesPerMapper.map(_.length).sum)
}
}
private var cachedShuffleRDD: RDD[InternalRow] = null
override protected def doExecute(): RDD[InternalRow] = {
if (cachedShuffleRDD == null) {
cachedShuffleRDD = child match {
case stage: ShuffleQueryStageExec =>
stage.shuffle.createLocalShuffleRDD(partitionStartIndicesPerMapper)
case _ =>
throw new IllegalStateException("operating on canonicalization plan")
}
}
cachedShuffleRDD
}
}
|
darionyaphet/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/adaptive/OptimizeLocalShuffleReader.scala
|
Scala
|
apache-2.0
| 8,433 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models.tradingpremises
import jto.validation._
import jto.validation.forms.UrlFormEncoded
import play.api.libs.json._
import utils.AmlsSpec
class TradingPremisesMsbServicesSpec extends AmlsSpec {
"MsbServices" must {
"round trip through Json correctly" in {
val data = TradingPremisesMsbServices(Set(TransmittingMoney, ChequeCashingNotScrapMetal, ChequeCashingScrapMetal, CurrencyExchange, ForeignExchange))
val js = Json.toJson(data)
js.as[TradingPremisesMsbServices] mustEqual data
}
"round trip through Forms correctly" in {
val model = TradingPremisesMsbServices(Set(TransmittingMoney, ChequeCashingNotScrapMetal, ChequeCashingScrapMetal, CurrencyExchange, ForeignExchange))
val data = implicitly[Write[TradingPremisesMsbServices, UrlFormEncoded]].writes(model)
implicitly[Rule[UrlFormEncoded, TradingPremisesMsbServices]].validate(data) mustEqual Valid(model)
}
"fail to validate when the set is empty" in {
val data: UrlFormEncoded = Map(
"msbServices" -> Seq.empty[String]
)
implicitly[Rule[UrlFormEncoded, TradingPremisesMsbServices]].validate(data)
.mustEqual(Invalid(Seq((Path \\ "msbServices") -> Seq(ValidationError("error.required.tp.services")))))
}
"fail to validate when there is an invalid entry in the set" in {
val data: UrlFormEncoded = Map(
"msbServices" -> Seq("invalid")
)
implicitly[Rule[UrlFormEncoded, TradingPremisesMsbServices]].validate(data)
.mustEqual(Invalid(Seq((Path \\ "msbServices" \\ 0) -> Seq(ValidationError("error.invalid")))))
}
"serialize with the expected structure" in {
val model = TradingPremisesMsbServices(Set(TransmittingMoney, ChequeCashingNotScrapMetal, ChequeCashingScrapMetal, CurrencyExchange, ForeignExchange))
val serializedModel = TradingPremisesMsbServices.formW.writes(model)
serializedModel.getOrElse("msbServices[]", Seq()).toSet mustEqual Set("01", "02", "03", "04", "05")
}
}
}
|
hmrc/amls-frontend
|
test/models/tradingpremises/TradingPremisesMsbServicesSpec.scala
|
Scala
|
apache-2.0
| 2,639 |
package works.weave.socks.aws.orders.main
import org.slf4j.Logger
import org.slf4j.LoggerFactory
import org.springframework.context.annotation.AnnotationConfigApplicationContext
import org.springframework.context.annotation.ComponentScan
import scala.reflect.ClassTag
import works.weave.socks.spring.aws.DynamoConfiguration
import works.weave.socks.spring.aws.DynamoSchema
/**
* Entrypoint for the orders web service
*/
object ServiceMain {
def main(args : Array[String]) : Unit = {
System.setProperty("org.jboss.logging.provider", "slf4j")
val appContext = new AnnotationConfigApplicationContext(classOf[Config])
def bean[T : ClassTag] : T = appContext.getBean(implicitly[ClassTag[T]].runtimeClass).asInstanceOf[T]
def initSchema() : Unit = {
val dynamo = bean[DynamoConfiguration]
bean[DynamoSchema].createMissing(dynamo.client)
}
def resetSchema() : Unit = {
val dynamo = bean[DynamoConfiguration]
bean[DynamoSchema].resetDestructively(dynamo.client)
}
// FIXME: do neither of initSchema, resetSchema
//initSchema()
//resetSchema()
try {
bean[Server].run()
} catch {
case e : Throwable if { Log.error("Service quitting due to throwable", e); false } =>
} finally {
Log.warn("Force-flushing log... " + (0 to 4096).map(_ => " ").mkString)
System.err.flush()
System.out.flush()
}
}
@ComponentScan(basePackages = Array("works.weave.socks.aws.orders", "works.weave.socks.spring"))
class Config {
}
val Log : Logger = LoggerFactory.getLogger(getClass)
}
|
Compositional/orders-aws
|
src/main/scala/works.weave.socks.aws.orders/main/ServiceMain.scala
|
Scala
|
apache-2.0
| 1,585 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.predictionio.data.storage.elasticsearch
import java.io.IOException
import scala.collection.JavaConverters.mapAsJavaMapConverter
import org.apache.http.entity.ContentType
import org.apache.http.nio.entity.NStringEntity
import org.apache.http.util.EntityUtils
import org.apache.predictionio.data.storage.AccessKey
import org.apache.predictionio.data.storage.AccessKeys
import org.apache.predictionio.data.storage.StorageClientConfig
import org.elasticsearch.client.{ResponseException, RestClient}
import org.json4s._
import org.json4s.JsonDSL._
import org.json4s.native.JsonMethods._
import org.json4s.native.Serialization.write
import grizzled.slf4j.Logging
/** Elasticsearch implementation of AccessKeys. */
class ESAccessKeys(client: RestClient, config: StorageClientConfig, index: String)
extends AccessKeys with Logging {
implicit val formats = DefaultFormats.lossless
private val estype = "accesskeys"
ESUtils.createIndex(client, index,
ESUtils.getNumberOfShards(config, index.toUpperCase),
ESUtils.getNumberOfReplicas(config, index.toUpperCase))
val mappingJson =
(estype ->
("_all" -> ("enabled" -> false)) ~
("properties" ->
("key" -> ("type" -> "keyword")) ~
("events" -> ("type" -> "keyword"))))
ESUtils.createMapping(client, index, estype, compact(render(mappingJson)))
def insert(accessKey: AccessKey): Option[String] = {
val key = if (accessKey.key.isEmpty) generateKey else accessKey.key
update(accessKey.copy(key = key))
Some(key)
}
def get(id: String): Option[AccessKey] = {
if (id.isEmpty) {
return None
}
try {
val response = client.performRequest(
"GET",
s"/$index/$estype/$id",
Map.empty[String, String].asJava)
val jsonResponse = parse(EntityUtils.toString(response.getEntity))
(jsonResponse \\ "found").extract[Boolean] match {
case true =>
Some((jsonResponse \\ "_source").extract[AccessKey])
case _ =>
None
}
} catch {
case e: ResponseException =>
e.getResponse.getStatusLine.getStatusCode match {
case 404 => None
case _ =>
error(s"Failed to access to /$index/$estype/$id", e)
None
}
case e: IOException =>
error(s"Failed to access to /$index/$estype/$id", e)
None
}
}
def getAll(): Seq[AccessKey] = {
try {
val json =
("query" ->
("match_all" -> List.empty))
ESUtils.getAll[AccessKey](client, index, estype, compact(render(json)))
} catch {
case e: IOException =>
error("Failed to access to /$index/$estype/_search", e)
Nil
}
}
def getByAppid(appid: Int): Seq[AccessKey] = {
try {
val json =
("query" ->
("term" ->
("appid" -> appid)))
ESUtils.getAll[AccessKey](client, index, estype, compact(render(json)))
} catch {
case e: IOException =>
error("Failed to access to /$index/$estype/_search", e)
Nil
}
}
def update(accessKey: AccessKey): Unit = {
val id = accessKey.key
try {
val entity = new NStringEntity(write(accessKey), ContentType.APPLICATION_JSON)
val response = client.performRequest(
"POST",
s"/$index/$estype/$id",
Map("refresh" -> "true").asJava,
entity)
val jsonResponse = parse(EntityUtils.toString(response.getEntity))
val result = (jsonResponse \\ "result").extract[String]
result match {
case "created" =>
case "updated" =>
case _ =>
error(s"[$result] Failed to update $index/$estype/$id")
}
} catch {
case e: IOException =>
error(s"Failed to update $index/$estype/$id", e)
}
}
def delete(id: String): Unit = {
try {
val response = client.performRequest(
"DELETE",
s"/$index/$estype/$id",
Map("refresh" -> "true").asJava)
val json = parse(EntityUtils.toString(response.getEntity))
val result = (json \\ "result").extract[String]
result match {
case "deleted" =>
case _ =>
error(s"[$result] Failed to update $index/$estype/id")
}
} catch {
case e: IOException =>
error(s"Failed to update $index/$estype/id", e)
}
}
}
|
dszeto/incubator-predictionio
|
storage/elasticsearch/src/main/scala/org/apache/predictionio/data/storage/elasticsearch/ESAccessKeys.scala
|
Scala
|
apache-2.0
| 5,153 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.