code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
enum Foo[A] {
case Bar extends Foo[Int]
}
| som-snytt/dotty | tests/run/i6677/Enum_1.scala | Scala | apache-2.0 | 44 |
package com.codexica.s3crate.filetree.history.snapshotstore.s3
import java.io.ByteArrayInputStream
import org.scalamock.specs2.MockFactory
import org.jets3t.service.utils.ServiceUtils
import org.apache.commons.io.FileUtils
import java.util.Random
import com.codexica.common.SafeLogSpecification
/**
* @author Josh Albrecht ([email protected])
*/
class S3BlobWriterSpec extends SafeLogSpecification {
trait Context extends BaseContext {
val dataLength = 34985
val bytes = new Array[Byte](dataLength)
new Random(1337).nextBytes(bytes)
val fullDataHash = ServiceUtils.computeMD5Hash(bytes)
val inputStream = new ByteArrayInputStream(bytes)
}
"saving a regular file" should {
"create a list with one file that matches" in new Context {
val (fileHashes, completeHash) = S3BlobWriter.write(inputStream, FileUtils.getTempDirectory, dataLength * 2)
completeHash.toList must be equalTo fullDataHash.toList
fileHashes.size must be equalTo 1
val (file, hash) = fileHashes.head
FileUtils.readFileToByteArray(file).toList must be equalTo bytes.toList
hash.toList must be equalTo fullDataHash.toList
}
}
"saving a large file" should {
"create a list of multiple files with matching hashes" in new Context {
val secondFileSize = 2111
val firstFileSize = dataLength - secondFileSize
val (fileHashes, completeHash) = S3BlobWriter.write(inputStream, FileUtils.getTempDirectory, firstFileSize)
completeHash.toList must be equalTo fullDataHash.toList
fileHashes.size must be equalTo 2
val (firstFile, firstHash) = fileHashes.head
FileUtils.readFileToByteArray(firstFile).toList must be equalTo bytes.toList.take(firstFileSize)
firstHash.toList must be equalTo ServiceUtils.computeMD5Hash(bytes.toList.take(firstFileSize).toArray).toList
val (secondFile, secondHash) = fileHashes.drop(1).head
FileUtils.readFileToByteArray(secondFile).toList must be equalTo bytes.toList.drop(firstFileSize)
secondHash.toList must be equalTo ServiceUtils.computeMD5Hash(bytes.toList.drop(firstFileSize).toArray).toList
}
}
}
| joshalbrecht/s3crate | src/test/scala/com/codexica/s3crate/filetree/history/snapshotstore/s3/S3BlobWriterSpec.scala | Scala | mit | 2,152 |
package snare
import logger.LoggerFactory
import scala.concurrent.ops.spawn
import com.mongodb.{BasicDBObject, Mongo}
import snare.tools.UUIDIdentitity._
import java.util.UUID
import snare.tools.Implicits._
import snare.storage._
/**
* The Snare main class
*
* @author Xavier Llora
* @date Jan 25, 2010 at 7:30:48 PM
*
*/
class Snare(val name: String, val pool: String, val metadata:BasicDBObject,
val host: String, val port: Int,
notify: (BasicDBObject) => Boolean) {
protected val createdAt = System.currentTimeMillis
protected val log = LoggerFactory.log
val uuid = uniqueUUID(name)
//
// Main store interface object
//
val storage = UpdateTools(uuid,name,pool,createdAt,host,port,log)
//
// Basic heartbeat control of the heartbeat
//
private var activityFlag = false
def activity = activityFlag
def activity_=(stat: Boolean) = {
stat match {
// Need to get started
case true if !this.activityFlag => {
this.activityFlag = true
spawnActivity
}
// Already running
case true if this.activityFlag =>
// Already not running
case false if !this.activityFlag =>
// Need to update activity to stop
case false if this.activityFlag =>
this.activityFlag = false
}
this.activityFlag
}
//
// Authentication
//
def authenticate(user:String,pswd:String) = storage.authenticate(user,pswd)
protected val ID = new BasicDBObject
ID.put("_id", uuid.toString)
// The shutdown hook for this instance
private val sdh = new SnareShutdownHook(this)
//
// The thread that runs the heart beat
//
protected def spawnActivity = {
//
// The heart beat
//
spawn {
try {
storage.registerToPool(ID, metadata)
Runtime.getRuntime.addShutdownHook(sdh)
log info "[HRTB] Heartbeat engaged for " + uuid
while (activityFlag) {
try {
Thread.sleep(Snare.HEARTBEAT_INTERVAL)
storage.updateHeartbeat
//log finest "[HRTB] Heartbeat for " + uuid + " " + update
}
catch {
case e => log warning "[FAIL] Heartbeat on " + uuid + " " + e.getCause
}
}
}
catch {
case e => {
log warning "[FAIL] Heartbeat failed to register " + uuid + " " + e.getCause
activityFlag = false
}
}
try {
storage unregisterFromPool ID
Runtime.getRuntime.removeShutdownHook(sdh)
log info "[HRTB] Heartbeat disengaged for " + uuid
}
catch {
case e => {
log warning "[FAIL] Heartbeat failed to unregister " + uuid + " " + e.getCause
activityFlag = false
}
}
this
}
//
// The notification threat
//
spawn {
log info "[EVTL] Notification event loop engaged for " + uuid
while (activityFlag) {
try {
Thread.sleep(Snare.EVENT_LOOP_INTERVAL)
val cur = storage.instance.find
// log finest "[EVTL] Notifications available " + cur.hasNext
while (cur.hasNext) {
try {
val msg = cur.next.asInstanceOf[BasicDBObject]
try {
if (notify(msg))
log info "[EVTL] Notification processed by " + uuid + " " + msg
else
log info "[EVTL] Notification ignored by " + uuid + " " + msg
}
catch {
case e => log warning "[EVTL] Exeception while processing notification on " + uuid + " " + e.toString
}
storage.instance.remove(msg)
}
catch {
case e => log warning "[EVTL] Exeception while removing processed notification on " + uuid + " " + e.toString
}
}
}
catch {
case e => log warning "[EVTL] Exception while pulling notifications in event loop on " + uuid + " " + e.toString
}
}
log info "[EVTL] Notification event loop disengaged for " + uuid
}
}
//
// Returns the peers in the pool
//
def heartbeats = storage.queryHeartbeats
def peers = storage.queryPeers
def broadcast(message: BasicDBObject) = storage.queryBroadcast(message)
def notifyPeer(uuid: String, message: BasicDBObject) = storage.queryNotifyPeer(uuid, message)
def fetchPeerInformation (uuid:String) = storage.queryFetchPeerInformation(uuid)
def fetchRegisteredPeersInformation = storage.queryFetchRegisteredPeersInformation
override def toString = "<Snare: "+name+", "+pool+", "+host+", "+port+", activity="+activityFlag+">"
private class SnareShutdownHook(snare: Snare) extends Thread {
val log = LoggerFactory.log
override def run() {
log severe "Abnormal finalization. Cleaning after " + snare.uuid + ":" + snare.name
storage unregisterFromPool ID
log severe "Broadcasting abnormal termination of " + snare.uuid + ":" + snare.name
snare.broadcast("""{"msg":"killed","type":"fatal","uuid":""" +
'"' + snare.uuid + '"' + ""","ts":""" + System.currentTimeMillis + "}")
}
}
}
/**
* Snare companion object
*
* @author Xavier Llora
* @date Jan 25, 2010 at 7:30:48 PM
*
*/
object Snare {
val version = "0.4.1vcli"
var HEARTBEAT_INTERVAL = 3000
var EVENT_LOOP_INTERVAL = 6000
def apply(name: String, pool: String, notify: (BasicDBObject) => Boolean) = new Snare(name, pool, new BasicDBObject, "localhost", 27017, notify)
def apply(name: String, pool: String, metadata: BasicDBObject, notify: (BasicDBObject) => Boolean) = new Snare(name, pool, metadata, "localhost", 27017, notify)
def apply(name: String, pool: String, metadata: BasicDBObject, port: Int, notify: (BasicDBObject) => Boolean) = new Snare(name, pool, metadata, "localhost", port, notify)
def apply(name: String, pool: String, metadata: BasicDBObject, host: String, notify: (BasicDBObject) => Boolean) = new Snare(name, pool, metadata, host, 27017, notify)
def apply(name: String, pool: String, metadata: BasicDBObject, host: String, port: Int, notify: (BasicDBObject) => Boolean) = new Snare(name, pool, metadata, host, port, notify)
def unapply(s: Snare): Option[(UUID, String, String, BasicDBObject, String, Int, Boolean)] = Some((s.uuid, s.name, s.pool, s.metadata, s.host, s.port, s.activityFlag))
} | xllora/Snare | src/main/scala/Snare.scala | Scala | bsd-3-clause | 6,395 |
package org.jetbrains.plugins.scala.testingSupport.scalatest.scala2_12.scalatest3_0_4
import org.jetbrains.plugins.scala.SlowTests
import org.jetbrains.plugins.scala.testingSupport.scalatest.SpecialCharactersTest
import org.junit.experimental.categories.Category
/**
* @author Roman.Shein
* @since 10.03.2017
*/
@Category(Array(classOf[SlowTests]))
class Scalatest2_12_3_0_4_SpecialCharactersTest extends Scalatest2_12_3_0_4_Base with SpecialCharactersTest
| triplequote/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/testingSupport/scalatest/scala2_12/scalatest3_0_4/Scalatest2_12_3_0_4_SpecialCharactersTest.scala | Scala | apache-2.0 | 462 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2002-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://www.scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala
package xml
package dtd
/** Parser for regexps (content models in DTD element declarations) */
object ContentModelParser extends Scanner { // a bit too permissive concerning #PCDATA
import ContentModel._
/** parses the argument to a regexp */
def parse(s: String): ContentModel = { initScanner(s); contentspec }
def accept(tok: Int) = {
if (token != tok) {
if ((tok == STAR) && (token == END)) // common mistake
scala.sys.error("in DTDs, \\n" +
"mixed content models must be like (#PCDATA|Name|Name|...)*")
else
scala.sys.error("expected " + token2string(tok) +
", got unexpected token:" + token2string(token))
}
nextToken()
}
// s [ '+' | '*' | '?' ]
def maybeSuffix(s: RegExp) = token match {
case STAR =>
nextToken(); Star(s)
case PLUS =>
nextToken(); Sequ(s, Star(s))
case OPT =>
nextToken(); Alt(Eps, s)
case _ => s
}
// contentspec ::= EMPTY | ANY | (#PCDATA) | "(#PCDATA|"regexp)
def contentspec: ContentModel = token match {
case NAME => value match {
case "ANY" => ANY
case "EMPTY" => EMPTY
case _ => scala.sys.error("expected ANY, EMPTY or '(' instead of " + value)
}
case LPAREN =>
nextToken()
sOpt()
if (token != TOKEN_PCDATA)
ELEMENTS(regexp)
else {
nextToken()
token match {
case RPAREN =>
PCDATA
case CHOICE =>
val res = MIXED(choiceRest(Eps))
sOpt()
accept(RPAREN)
accept(STAR)
res
case _ =>
scala.sys.error("unexpected token:" + token2string(token))
}
}
case _ =>
scala.sys.error("unexpected token:" + token2string(token))
}
// sopt ::= S?
def sOpt() = if (token == S) nextToken()
// (' S? mixed ::= '#PCDATA' S? ')'
// | '#PCDATA' (S? '|' S? atom)* S? ')*'
// '(' S? regexp ::= cp S? [seqRest|choiceRest] ')' [ '+' | '*' | '?' ]
def regexp: RegExp = {
val p = particle
sOpt()
maybeSuffix(token match {
case RPAREN =>
nextToken(); p
case CHOICE =>
val q = choiceRest(p); accept(RPAREN); q
case COMMA => val q = seqRest(p); accept(RPAREN); q
})
}
// seqRest ::= (',' S? cp S?)+
def seqRest(p: RegExp) = {
var k = List(p)
while (token == COMMA) {
nextToken()
sOpt()
k = particle :: k
sOpt()
}
Sequ(k.reverse: _*)
}
// choiceRest ::= ('|' S? cp S?)+
def choiceRest(p: RegExp) = {
var k = List(p)
while (token == CHOICE) {
nextToken()
sOpt()
k = particle :: k
sOpt()
}
Alt(k.reverse: _*)
}
// particle ::= '(' S? regexp
// | name [ '+' | '*' | '?' ]
def particle = token match {
case LPAREN =>
nextToken(); sOpt(); regexp
case NAME =>
val a = Letter(ElemName(value)); nextToken(); maybeSuffix(a)
case _ => scala.sys.error("expected '(' or Name, got:" + token2string(token))
}
// atom ::= name
def atom = token match {
case NAME =>
val a = Letter(ElemName(value)); nextToken(); a
case _ => scala.sys.error("expected Name, got:" + token2string(token))
}
}
| som-snytt/scala-xml | src/main/scala/scala/xml/dtd/ContentModelParser.scala | Scala | bsd-3-clause | 4,080 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.airframe.examples.di
import wvlet.log.LogSupport
/**
* An example of using constructor injection.
*
* This pattern is useful if you do not want to include the dependency to Airframe to some classes.
*/
object DI_02_ConstructorInjection extends App {
import wvlet.airframe._
case class MyAppConfig(port: Int = 8080)
// Use Constructor Injection
class MyApp(config: MyAppConfig) extends LogSupport {
def run: Unit = {
info(s"MyApp with ${config}")
}
}
val d = newDesign.noLifeCycleLogging // Disable lifecycle logging
// Build MyApp with Default Config
d.build[MyApp] { app =>
app.run // port = 8080
}
// Bind a custom config
val d2 = d
.bind[MyAppConfig].toInstance(MyAppConfig(port = 10010))
// Build MyApp with the custom config
d2.build[MyApp] { app =>
app.run // Shows MyApp with MyAppConfig(10010)
}
}
| wvlet/airframe | examples/src/main/scala/wvlet/airframe/examples/di/DI_02_ConstructorInjection.scala | Scala | apache-2.0 | 1,449 |
package extruder.cats
import cats.data.EitherT
import cats.effect.ExitCase.{Completed, Error}
import cats.effect._
import cats.syntax.either._
import cats.syntax.flatMap._
import cats.syntax.functor._
import cats.{Eval, Monad, MonadError}
import extruder.core.{ExtruderErrors, ValidationErrorsToThrowable}
import extruder.data._
import io.estatico.newtype.macros.newsubtype
import scala.util.control.NonFatal
package object effect {
@newsubtype case class EvalValidation[A](a: EitherT[Eval, ValidationErrors, A])
object EvalValidation extends EvalValidationLowPriorityInstances {
implicit def extruderErrorsEvalValidation: ExtruderErrors[EvalValidation] = new ExtruderErrors[EvalValidation] {
override def missing[A](message: String): EvalValidation[A] =
EvalValidation(EitherT.leftT[Eval, A](ValidationErrors.missing(message)))
override def validationFailure[A](message: String): EvalValidation[A] =
EvalValidation(EitherT.leftT[Eval, A](ValidationErrors.failure(message)))
override def validationException[A](message: String, ex: Throwable): EvalValidation[A] =
EvalValidation(EitherT.leftT[Eval, A](ValidationErrors.exception(message, ex)))
override def fallback[A](fa: EvalValidation[A])(thunk: => EvalValidation[A]): EvalValidation[A] =
EvalValidation(
EitherT(
fa.a.value
.flatMap(
_.fold(errs => Eval.later(thunk.a.value.value.leftMap(_ ++ errs.toList)), a => Eval.later(Right(a)))
)
)
)
}
}
trait EvalValidationLowPriorityInstances {
implicit def extruderStdInstancesForEvalValidation(
implicit toThrowable: ValidationErrorsToThrowable
): Sync[EvalValidation] = new Sync[EvalValidation] {
def F: MonadError[ValidationT[Eval, ?], Throwable] = MonadError[ValidationT[Eval, ?], Throwable]
def FF: Monad[EitherT[Eval, ValidationErrors, ?]] =
Monad[EitherT[Eval, ValidationErrors, ?]]
override def suspend[A](thunk: => EvalValidation[A]): EvalValidation[A] =
EvalValidation(EitherT(Eval.later(try {
thunk.a.value.value
} catch {
case NonFatal(th) => Left(ValidationErrors.exception(th))
})))
override def delay[A](thunk: => A): EvalValidation[A] =
EvalValidation(EitherT[Eval, ValidationErrors, A](Eval.always(try {
Right(thunk)
} catch {
case NonFatal(th) => Left(ValidationErrors.exception(th))
})))
override def bracketCase[A, B](
acquire: EvalValidation[A]
)(use: A => EvalValidation[B])(release: (A, ExitCase[Throwable]) => EvalValidation[Unit]): EvalValidation[B] =
acquire.a.value.value match {
case Right(a) =>
val res = use(a)
res.a.value.value match {
case Right(b) => EvalValidation(release(a, Completed).a.map(_ => b))
case Left(errs) =>
release(a, Error(toThrowable.convertErrors(errs))).a.value.value match {
case Right(_) => res
case Left(_) => res
}
}
case e @ Left(_) => EvalValidation(EitherT(Eval.now(e.rightCast[B])))
}
override def raiseError[A](e: Throwable): EvalValidation[A] =
EvalValidation(F.raiseError(e).a)
override def handleErrorWith[A](fa: EvalValidation[A])(f: Throwable => EvalValidation[A]): EvalValidation[A] =
EvalValidation(F.handleErrorWith(ValidationT(fa.a))(f.andThen(a => ValidationT(a.a))).a)
override def pure[A](x: A): EvalValidation[A] = EvalValidation(FF.pure(x))
override def flatMap[A, B](fa: EvalValidation[A])(f: A => EvalValidation[B]): EvalValidation[B] =
EvalValidation(FF.flatMap(fa.a)(f.andThen(_.a)))
override def tailRecM[A, B](a: A)(f: A => EvalValidation[Either[A, B]]): EvalValidation[B] =
EvalValidation(FF.tailRecM(a)(f.andThen(_.a)))
override def ap[A, B](ff: EvalValidation[A => B])(fa: EvalValidation[A]): EvalValidation[B] =
EvalValidation(F.ap(ValidationT(ff.a))(ValidationT(fa.a)).a)
override def ap2[A, B, Z](
ff: EvalValidation[(A, B) => Z]
)(fa: EvalValidation[A], fb: EvalValidation[B]): EvalValidation[Z] =
EvalValidation(F.ap2(ValidationT(ff.a))(ValidationT(fa.a), ValidationT(fb.a)).a)
}
}
@newsubtype case class EffectValidation[F[_], A](a: EitherT[F, ValidationErrors, A])
object EffectValidation extends TimerInstances with ContextShiftInstances with ConcurrentEffectInstances {
implicit def extruderErrorsEffectValidation[F[_]](
implicit F: MonadError[F, Throwable]
): ExtruderErrors[EffectValidation[F, ?]] =
new ExtruderErrors[EffectValidation[F, ?]] {
override def missing[A](message: String): EffectValidation[F, A] =
EffectValidation(EitherT.leftT[F, A](ValidationErrors.missing(message)))
override def validationFailure[A](message: String): EffectValidation[F, A] =
EffectValidation(EitherT.leftT[F, A](ValidationErrors.failure(message)))
override def validationException[A](message: String, ex: Throwable): EffectValidation[F, A] =
EffectValidation(EitherT.leftT[F, A](ValidationErrors.exception(message, ex)))
override def fallback[A](fa: EffectValidation[F, A])(thunk: => EffectValidation[F, A]): EffectValidation[F, A] =
EffectValidation(
EitherT(
fa.a.value.flatMap(_.fold(errs => thunk.a.value.map(_.leftMap(_ ++ errs.toList)), a => F.pure(Right(a))))
)
)
}
}
}
| janstenpickle/extruder | cats-effect/src/main/scala/extruder/cats/effect/package.scala | Scala | mit | 5,612 |
package com.cds.learnscala.extractors
object PatternMatch {
case class User(firstName: String, lastName: String, score: Int)
def advance(xs: List[User]) = xs match {
case User(_, _, score1) :: User(_, _, score2) :: _ => score1 - score2
case _ => 0
}
def main(args: Array[String]) {
}
}
| anancds/scala-project | learn-scala/src/main/scala/com/cds/learnscala/extractors/PatternMatch.scala | Scala | mit | 310 |
package uk.gov.gds.ier.transaction.shared
/**
* Variant of Scala Either class tailored for Mustache limitations, specifically confirmation page.
*
* Mustache conditions work well only with Options or booleans, not Either; Mustache cannot
* interpret properly LeftProjection/RightProjection used internally by Either.
*/
case class EitherErrorOrContent(blockContent: Option[List[String]], blockError: Option[String])
/** instantiate positive variant of EitherErrorOrContent */
object BlockContent {
def apply(values: List[String]) = EitherErrorOrContent(blockContent = Some(values), blockError = None)
def apply(value: String) = EitherErrorOrContent(blockContent = Some(List(value)), blockError = None)
}
/** instantiate negative variant of EitherErrorOrContent */
object BlockError {
def apply(value: String) = EitherErrorOrContent(blockContent = None, blockError = Some(value))
}
| michaeldfallen/ier-frontend | app/uk/gov/gds/ier/transaction/shared/ConfirmationMustacheCommon.scala | Scala | mit | 896 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.utils.tf.loaders
import java.nio.ByteOrder
import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.dllib.nn.tf.{DecodeRaw => DecodeRawOp}
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.dllib.utils.tf.Context
import org.tensorflow.framework.NodeDef
import scala.reflect.ClassTag
class DecodeRaw extends TensorflowOpsLoader {
import Utils._
override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder
, context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
val attrs = nodeDef.getAttrMap
val endian = getBoolean(attrs, "little_endian")
val outType = attrs.get("out_type").getType
new DecodeRawOp[T](outType, endian)
}
}
| intel-analytics/BigDL | scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/utils/tf/loaders/DecodeRaw.scala | Scala | apache-2.0 | 1,399 |
package org.eigengo.sbtmdrw
import java.io.File
import org.pegdown.{LinkRenderer, ToHtmlSerializer, Extensions, PegDownProcessor}
import scala.io.Source
import sbt.{Logger, State}
import scala.util.Try
class MarkdownRewriter(source: File, renderer: MarkdownRenderer) {
val sourceCharacters = Source.fromFile(source).iter.toArray
def run[A](log: Logger)(onComplete: Try[String] => A): A = {
val processor = new PegDownProcessor(Extensions.FENCED_CODE_BLOCKS | Extensions.HARDWRAPS | Extensions.TABLES | Extensions.DEFINITIONS)
val root = processor.parseMarkdown(sourceCharacters)
renderer.render(root, log)(onComplete)
}
}
| eigengo/sbt-mdrw | src/main/scala/org/eigengo/sbtmdrw/MarkdownRewriter.scala | Scala | apache-2.0 | 645 |
package org.nexbook.performance.app
import com.typesafe.config.Config
import org.nexbook.app.{AppConfig, OrderBookApp}
import org.nexbook.fix.FixMessageHandler
import org.nexbook.performance.PerformanceTest
import org.nexbook.performance.result.ResultLogger
import org.nexbook.testutils.FixMessageProvider
import org.slf4j.Logger
import quickfix.{Message, SessionID}
/**
* Created by milczu on 08.01.16.
*/
trait OrderBookAppPerformanceTest extends PerformanceTest {
val appRoot = ScriptRunner.executeScript("app-root.sh")
val resultLogger = new ResultLogger
val dbCollections = List("orders", "executions")
def cleanBeforeTest(): Unit = dbCollections.foreach(MongodbTestUtils.dropCollection)
def logger: Logger
def benchmarkConfig: Config
def testDataPath: String
def resultLog: String
def expectedTotalOrdersCount: Int
val fixMessageApplierThreadPool = 64
def executeTest() = {
logger.info("Test run!")
logger.debug("Load all FIX messages for test")
val messages: List[(Message, SessionID)] = FixMessageProvider.get(testDataPath)
logger.debug("FIX messages for test loaded")
cleanBeforeTest()
asyncExecute("OrderBookApp") {
OrderBookApp.main(Array())
}
applyFixMessages(messages, OrderBookApp.fixMessageHandler)
new AppProgressChecker().execute()
resultLogger.logResultToFile(benchmarkConfig, resultLog, writeHeader = true)
OrderBookApp.stop()
}
def applyFixMessages(messages: List[(Message, SessionID)], fixMessageHandler: FixMessageHandler) = {
if (fixMessageApplierThreadPool == 1) {
asyncExecute("Async FIX message applier") {
logger.info("Apply FIX messages")
messages.foreach(m => fixMessageHandler.fromApp(m._1, m._2))
logger.info("Applied FIX messages")
}
} else {
val partitionSize = math.ceil(messages.size / fixMessageApplierThreadPool.toDouble).toInt
val messagesPartitions: List[List[(Message, SessionID)]] = messages.grouped(partitionSize).toList
for (part <- messagesPartitions.indices) {
asyncExecute(s"Async FIX message applier: $part") {
logger.info("Apply FIX messages")
messagesPartitions(part).foreach { m => fixMessageHandler.fromApp(m._1, m._2)/*; if(Random.nextInt(10) % 3 == 0) Thread.sleep(0,5) else Thread.sleep(0)*/ }
logger.info(s"Applied FIX messages: $part")
}
}
}
}
class AppProgressChecker {
val scriptsPath = "src/test/resources/scripts"
val logFile = "nexbook.log"
import sys.process._
val phraseFMH = "FixMessageHandler - .* - onMessage:"
val phraseME = "MatchingEngine - .* - Order processed"
val phraseTDS = "TradeDatabaseSaver - .* - Saved order"
def execute() = {
Thread.sleep(20000)
while (!isAppFinished) {
val countTDS = MongodbTestUtils.count("orders")
val countME = countOccurrencesInLogFile(phraseME)
val countFMH = countOccurrencesInLogFile(phraseFMH)
logger.info(s"Current counts - FMH: $countFMH, ME: $countME, TDS: $countTDS")
Thread.sleep(10000)
}
logger.info("Progress checker finished")
val startLine = findFirstOccurrenceInLogFile(phraseFMH)
val endLine = findLastOccurrenceInLogFile(phraseME)
def extractNanoTimeFromLogFile(line: String): Long = line.split(" ")(1).toLong
val startTime = extractNanoTimeFromLogFile(startLine)
val endTime = extractNanoTimeFromLogFile(endLine)
import scala.concurrent.duration._
val execTime = Duration(endTime - startTime, NANOSECONDS)
val throughput = (expectedTotalOrdersCount / execTime.toMicros.toDouble * Duration(1, SECONDS).toMicros).toInt
logger.info(s"Duration: ${execTime.toMillis}ms. Throughput: $throughput orders/s")
}
def isAppFinished: Boolean = {
logger.info("executing isAppFinished")
def allOrdersProcessedInFixMessageHandler: Boolean = countOccurrencesInLogFile(phraseFMH) == expectedTotalOrdersCount
def allOrdersHandlerInMatchingEngine: Boolean = countOccurrencesInLogFile(phraseME) == expectedTotalOrdersCount
def allOrdersSavedInDb: Boolean = !AppConfig.dbPersist || MongodbTestUtils.count("orders") == expectedTotalOrdersCount
def appFinishedConditions: List[Boolean] = List(allOrdersProcessedInFixMessageHandler, allOrdersHandlerInMatchingEngine, allOrdersSavedInDb) // List(allOrdersProcessedInFixMessageHandler, allOrdersHandlerInMatchingEngine, allOrdersSavedInDb)
appFinishedConditions.reduce(_ && _)
}
def countOccurrencesInLogFile(phrase: String): Int = {
val cmd = s"cp $appRoot/logs/$logFile $appRoot/logs/temp.log && less $appRoot/logs/temp.log | grep '$phrase' | wc -l"
val output = (stringSeqToProcess(Seq("bash", "-c", cmd)) !!).trim
(stringSeqToProcess(Seq("bash", "-c", s"rm -rf $appRoot/logs/temp.log")) !)
if (!output.matches("\\\\d+")) {
logger.warn(s"returned output: $output for $phrase")
0
} else output.toInt
}
def findFirstOccurrenceInLogFile(phrase: String): String = {
val cmd = s"less $appRoot/logs/$logFile | grep '$phrase' | head -n 1"
(stringSeqToProcess(Seq("bash", "-c", cmd)) !!).trim
}
def findLastOccurrenceInLogFile(phrase: String): String = {
val cmd = s"less $appRoot/logs/$logFile | grep '$phrase' | tail -n 1"
(stringSeqToProcess(Seq("bash", "-c", cmd)) !!).trim
}
}
}
| milczarekIT/nexbook | src/test/scala/org/nexbook/performance/app/OrderBookAppPerformanceTest.scala | Scala | apache-2.0 | 5,158 |
package com.dongxiguo.zeroLog.context
import language.experimental.macros
final class CurrentLine (val get: Int) extends AnyVal
object CurrentLine {
final def currentLine_impl(c: scala.reflect.macros.Context) = {
import c.universe._
val lineExpr = c.Expr(Literal(Constant(c.enclosingPosition.line)))
reify(new _root_.com.dongxiguo.zeroLog.context.CurrentLine(lineExpr.splice))
}
implicit final def currentLine: CurrentLine = macro currentLine_impl
}
| jasoncao/zero-log | context/src/main/scala/com/dongxiguo/zeroLog/context/CurrentLine.scala | Scala | apache-2.0 | 473 |
package synthesis
// dummy
object APAInputSyntaxTree
/** Provides several methods to deal with input terms.
*
* @author Mikaël Mayer
*/
object APAInputTerm {
def partitionInteger(l: List[APAInputTerm]): (List[Int], List[APAInputTerm]) = l match {
case Nil => (Nil, Nil)
case (APAInputCombination(n, Nil)::q) =>
val (a, b) = partitionInteger(q)
(n::a, b)
case (p::q) =>
val (a, b) = partitionInteger(q)
(a, p::b)
}
}
/*****************
* Input terms *
*****************/
/** Trait expressing that an expression can be converted to an InputTerm
* It is useful to deal with Input variables, which are not directly InputTerms
* in order not to overload the pattern matching.
*
* @author Mikaël Mayer
*/
trait ConvertibleToInputTerm {
implicit def toInputTerm():APAInputTerm
}
/** A class defining a general input term, that is, containing only input variables and integers.
* A sign abstraction is provided for each term.
*
* @author Mikaël Mayer
*/
sealed abstract class APAInputTerm extends SignAbstraction {
/** Returns the same expression, but simplified. */
def simplified:APAInputTerm // OptimizeMe : Store when it's already simplified in order not to compute two times the same thing
/** @return The list of Input variables that this expression contains. */
def input_variables: List[InputVar]
//@{ Operators
/** @param that A combination eventually containing output variables. */
/** @return The sum of this input term and the provided APACombination. */
def +(that : APACombination):APACombination = that + APACombination(this)
/** @return The difference of this input term and the provided APACombination. */
def -(that : APACombination):APACombination = -that + APACombination(this)
/** @return The product of this input term and the provided APACombination. */
def *(that : APACombination):APACombination = that * this
/** @return The sum of this input term and the provided input term. */
def +(that : APAInputTerm): APAInputTerm = APAInputAddition(this, that).simplified
/** @return The division of this input term by the provided input term. */
def /(that : APAInputTerm): APAInputTerm = APAInputDivision(this, that).simplified
/** @return The product of this input term by the provided input term. */
def *(that : APAInputTerm): APAInputTerm = APAInputMultiplication(this, that).simplified
/** @return The difference between this input term and the provided one. */
def -(that : APAInputTerm): APAInputTerm = (this, that) match {
case (t1: APAInputCombination, t2: APAInputCombination) => t1 - t2
case _ => this+(that*APAInputCombination(-1))
}
/** @return The opposite of this input term. */
def unary_-(): APAInputTerm = APAInputCombination(0, Nil) - this
//@}
/** @return This input term where all occurences of y have been replaced by t. */
def replace(y: InputVar, t: APAInputTerm):APAInputTerm
/** @return This input term where all occurences of y have been replaced by t. */
def replaceList(lxt : List[(InputVar, APAInputTerm)]): APAInputTerm = {
lxt.foldLeft(this){ case (result, (x, t)) => result.replace(x, t) }
}
/** @return This input term where the sign abstraction s is applied to all occurences of t1 */
def assumeSignInputTerm(t1: APAInputTerm, s: SignAbstraction):APAInputTerm = {
(this, t1, -t1) match {
case (t0:APAInputCombination, t1:APAInputCombination, _) if t0 == t1 =>
val result = t1.assumeSign(s).propagateSign(this)
result
case (t0:APAInputCombination, _, mt1:APAInputCombination) if t0 == mt1 =>
val result = (-t1.assumeSign(s)).propagateSign(this)
result
case (t0:APAInputCombination, t1:APAInputCombination, mt1:APAInputCombination) =>
this
case _ =>
this
}
}
/** @return The integer that this input term represents if it exists, else throws an exception. */
def toInt: Int = this match {
case APAInputCombination(i, Nil) => i
case _ =>
throw new Exception(this + " cannot be converted to an integer")
}
/** Converts this input term to a string */
override def toString = toGeneralString
/** Converts this input term to a string in the current rendering mode */
/** See APASynthesis.rendering_mode. */
def toGeneralString: String = APASynthesis.rendering_mode match {
case rm@RenderingPython() => toPythonString(rm)
case rm@RenderingScala() => toScalaString(rm)
}
/** Converts this input term to a Python string. */
/** @rm should be a RenderingPython() */
protected def toPythonString(rm: RenderingMode): String = this match {
case APAInputLCM(l) => rm.lcm_symbol+"(["+(l map (_.toCommonString(rm)) reduceLeft (_ + "," + _)) +"])"
case APAInputGCD(l) => rm.gcd_symbol+"(["+(l map (_.toCommonString(rm)) reduceLeft (_ + "," + _)) +"])"
case _ => toCommonString(rm)
}
/** Converts this input term to a Scala string. */
/** @rm should be a RenderingScala() */
protected def toScalaString(rm: RenderingMode): String = this match {
case APAInputLCM(l) => rm.lcm_symbol+"(List("+(l map (_.toCommonString(rm)) reduceLeft (_ + "," + _)) +"))"
case APAInputGCD(l) => rm.gcd_symbol+"(List("+(l map (_.toCommonString(rm)) reduceLeft (_ + "," + _)) +"))"
case _ => toCommonString(rm)
}
/** Converts this input term to a common string in the provided rendering mode. */
/** rm should be equal to APASynthesis.rendering_mode */
def toCommonString(rm:RenderingMode):String = this match {
case APAInputMultiplication(Nil) => "1"
case APAInputMultiplication(a::Nil) => a.toCommonString(rm)
case APAInputMultiplication(l) => l map {
case el =>
val s = el.toCommonString(rm)
if(((s indexOf '-') >= 0) || ((s indexOf '+') >= 0) || ((s indexOf '/') >= 0)) "("+s+")" else s
} reduceLeft (_ + "*" + _)
case APAInputDivision(Nil, ld) => "1/("+APAInputMultiplication(ld).toCommonString(rm)+")"
case APAInputDivision(ln, Nil) => APAInputMultiplication(ln).toCommonString(rm)
case APAInputDivision(ln, ld) =>
val num = APAInputMultiplication(ln).toCommonString(rm)
val den = APAInputMultiplication(ld).toCommonString(rm)
val num_string = (if((num indexOf '+') >= 0 || (num indexOf '-') >= 0 || (num indexOf '+') >= 0) "("+num+")" else num )
val den_string = (if((den indexOf '+') >= 0 || (den indexOf '-') >= 0 || (den indexOf '+') >= 0) "("+den+")" else den )
num_string +"/"+den_string
case APAInputAddition(l) => l map {
case el =>
val s = el.toCommonString(rm)
if((s indexOf '-') >= 0) "("+s+")" else s
} reduceLeft (_ + " + " + _)
case APAInputAbs(e) => rm.abs_symbol + "("+e.toCommonString(rm)+")"
case APAInputLCM(Nil) =>
throw new Exception("What is this lcm that has not been simplified ??")
case APAInputLCM(l) => rm.lcm_symbol+"(List("+(l map (_.toCommonString(rm)) reduceLeft (_ + "," + _)) +"))"
case APAInputGCD(l) => rm.gcd_symbol+"(List("+(l map (_.toCommonString(rm)) reduceLeft (_ + "," + _)) +"))"
case t:APAInputCombination => t.toNiceString
/*case APAInputMod(operand, divisor) =>
val num = operand.toCommonString(rm)
val den = operand.toCommonString(rm)
val num_string = (if((num indexOf '+') >= 0 || (num indexOf '-') >= 0 || (num indexOf '+') >= 0) "("+num+")" else num )
val den_string = (if((den indexOf '+') >= 0 || (den indexOf '-') >= 0 || (den indexOf '+') >= 0) "("+den+")" else den )
val final_den_string = if(divisor.isPositive) den_string else (rm.abs_symbol+"("+den_string+")")
rm.mod_function(num_string, final_den_string)*/
//case _ => super.toString
}
}
/** Definition of an input variable. */
case class InputVar(name: String) extends SignAbstraction with ConvertibleToInputTerm with APAVariable {
/** Clones the variable without the sign abstraction */
def normalClone():this.type = InputVar(name).asInstanceOf[this.type]
/** Return an InputTerm containing the variable. */
def toInputTerm():APAInputCombination = {
if(isZero) return APAInputCombination(0)
APAInputCombination(this)
}
// Syntactic sugar
//def +(pac : APACombination) = pac+APACombination(this)
//def +(v : InputVar) = APACombination(v)+APACombination(this)
//def +(v : OutputVar) = APACombination(v)+APACombination(this)
//def *(i : Int) = APAInputCombination(0, (i, this)::Nil)
//def *(that: APAInputTerm) = APAInputMultiplication(APAInputCombination(this), that)
}
/** Object to provide more constructors for APAInputCombination.
*/
object APAInputCombination {
def apply(i: Int):APAInputCombination = APAInputCombination(i, Nil)
def apply(i: InputVar):APAInputCombination = APAInputCombination(0, (1, i)::Nil).propagateSign(i)
}
/** A linear combination of input variables, with a constant coefficient.
*/
case class APAInputCombination(coefficient: Int, input_linear: List[(Int, InputVar)]) extends APAInputTerm {
setSign(SignAbstraction.linearCombinationSign(coefficient, input_linear))
/** Clones the expression without the sign abstraction. */
def normalClone():this.type = APAInputCombination(coefficient, input_linear).asInstanceOf[this.type]
/** Returns the list of input variables that this expression contains. */
def input_variables: List[InputVar] = input_linear map (_._2)
/** Returns true if the variable i1 is has a name lexicographically less than the variable i2. */
def by_InputVar_name(i1:(Int, InputVar), i2:(Int, InputVar)) : Boolean = (i1, i2) match {
case ((_, InputVar(name1)), (_, InputVar(name2))) => name1 < name2
}
/** Adds the coefficiented variable i to the list of existing coefficiented regrouped_vars. */
/** Assumes that if the variable appears exist, it appears first. */
def fold_Inputvar_name(i:(Int, InputVar), regrouped_vars:List[(Int, InputVar)]):List[(Int, InputVar)] = (i, regrouped_vars) match {
case (i, Nil) => i::Nil
case ((coef1, InputVar(name1)), (coef2, InputVar(name2))::q) if name1 == name2 => (coef1 + coef2, InputVar(name1))::q
case (i, q) => i::q
}
/** Intercepts the sign propagation, and if there is a unique variable, propagates the sign abstraction to it. */
override def propagateSign(s: SignAbstraction):this.type = { //Intercepts the sign propagation
val result = (coefficient, input_linear) match {
case (0, (i, v)::Nil) =>
val new_v = v.assumeSign(SignAbstraction.multSign(SignAbstraction.mergeSign(this, s), SignAbstraction.number(i)))
APAInputCombination(0, (i, new_v)::Nil)
case _ => this
}
result.propagateSign_internal(s).asInstanceOf[this.type]
}
/** Returns a simplified version of this input combination.
* Guarantees that
* - The variables are alphabetically sorted,
* - There are no null coefficients
*/
def simplified: APAInputCombination = {
if(isZero) return APAInputCombination(0)
val input_linear2 = (input_linear sortWith by_InputVar_name ).foldRight[List[(Int, InputVar)]](Nil){ case (a, b) => fold_Inputvar_name(a, b)}
val input_linear3: List[(Int, InputVar)] = input_linear2 match {
case (i, v)::Nil => (i, v.assumeSign(SignAbstraction.multSign(this, SignAbstraction.number(i))))::Nil
case _ => input_linear2
}
APAInputCombination(coefficient, input_linear3 remove { case (i, v) => i == 0 || v.isZero}).propagateSign(this)
}
/** Returns the list of the coefficients in front of the input variables + the constant coefficient. */
def coefficient_list = (coefficient :: ((input_linear map (_._1)) ))
/** Returns true if there is a non-null coefficient in the combination. */
def has_gcd_coefs: Boolean = coefficient_list exists (_ != 0)
/** Returns the gcd of all coefficients. <code>has_gcd_coefs</code> is assumed. */
def gcd_coefs = Common.gcdlist(coefficient_list)
/** Returns the first sign present.
* If this sign is negative in equations like -a+b == 0,
* it is used to gain a character and produce a-b == 0
*/
def first_sign_present = coefficient_list find (_ != 0) match {
case Some(i) => if(i > 0) 1 else -1
case None => 1
}
/** Returns the division of this combination by an integer. */
/** Needs the coefficients to be divisible by i. */
def /(i : Int): APAInputCombination = {
APAInputCombination(coefficient / i, input_linear map {t => (t._1 / i, t._2)}).assumeSign(SignAbstraction.multSign(this, SignAbstraction.number(i)))
}
/** Returns true if this combination can be safely divisible by i. */
def safelyDivisibleBy(i : Int): Boolean = {
coefficient % i == 0 && (input_linear forall { case (k, v) => k % i == 0})
}
/** Returns the division of an input combination by another. */
/** The result is not necessarily a input combination */
def /(that : APAInputCombination): APAInputTerm = {
APAInputDivision(this, that).simplified
}
/** Returns the multiplication of this input combination by an integer. */
def *(i : Int): APAInputCombination = {
APAInputCombination(coefficient * i, input_linear map {t => (t._1 * i, t._2)}).assumeSign(SignAbstraction.multSign(this, SignAbstraction.number(i)))
}
/** Returns the multiplication of this input combination by another input combination. */
/** The result is not necessarily a input combination */
def *(that : APAInputCombination): APAInputTerm = {
APAInputMultiplication(this, that).simplified
}
/** Returns the sum of two input combinations. */
def +(pac : APAInputCombination): APAInputCombination = pac match {
case APAInputCombination(c, i) =>
APAInputCombination(coefficient + c, input_linear ++ i).simplified.assumeSign(SignAbstraction.addSign(this, pac))
}
/** Returns the difference between two input combinations. */
def -(that : APAInputCombination): APAInputCombination = this + (that * (-1))
/** Returns the sum of this input combination with a coefficiented variable. */
def +(kv : (Int, InputVar)): APAInputCombination = this + APAInputCombination(0, kv::Nil)
/** Returns the sum of this input combination with an integer. */
def +(k : Int): APAInputCombination = this + APAInputCombination(k, Nil)
/** Returns the difference of this input combination with a coefficiented variable. */
def -(kv : (Int, InputVar)): APAInputCombination = this - APAInputCombination(0, kv::Nil)
/** Returns the difference of this input combination with an integer. */
def -(k : Int): APAInputCombination = this + APAInputCombination(-k, Nil)
/** Returns the opposite of this input combination. */
override def unary_-(): APAInputCombination = (APAInputCombination(0, Nil) - this).propagateSign(SignAbstraction.oppSign(this))
/** Returns the linear expression where the input variable y has been replaced by the expression t. */
def replace(y: InputVar, t: APAInputTerm):APAInputTerm = {
val (input_linear_with_y, input_linear_without_y) = input_linear partition (_._2 == y)
val pac_without_y = APAInputCombination(coefficient, input_linear_without_y)
val total_y_coefficient = (input_linear_with_y map (_._1)).foldLeft(0)(_+_)
val result = t match {
case t:APAInputCombination =>
pac_without_y + (t*total_y_coefficient)
case _ =>
APAInputAddition(pac_without_y, APAInputMultiplication(APAInputCombination(total_y_coefficient), t))
}
result.propagateSign(this)
}
/** Returns this expression where the fact that t1 has sign s has been taken into account. */
override def assumeSignInputTerm(t1: APAInputTerm, s: SignAbstraction) = {
t1 match {
case t@APAInputCombination(coefficient2, Nil) => this // Would be strange to arrive there.
case t@APAInputCombination(coefficient2, (i, v)::l) => // i is not null,
input_linear find (_._2 == v) match {
case Some((i2, v2)) =>
val t_assumed = t.assumeSign(s)
val resultWithoutT = (this*i-t_assumed*i2)
val resultAddingT =(t_assumed*i2)
val resultMultipliedByI = resultWithoutT+resultAddingT
val result = resultMultipliedByI/i
result
case None => this // This variable is not there, so we cannot conclude anything.
}
case _ => this
}
}
/** Returns a string representing this linear combination. */
override def toString = toNiceString // Comment this to keep the abstract syntax tree representation
/** Returns a nice string representing an usual linear combination, e.g. like 5+3a. */
def toNiceString:String = {
def inputElementToString(kv : (Int, InputVar)) = kv._1 match {
case 1 => kv._2.name
case -1 => "-" + kv._2.name
case k => k + "*" + kv._2.name
}
def makePlus(l: List[String]):String = l match {
case Nil => ""
case a::p => val s = makePlus(p)
if(s=="") a
else if(a=="") s
else if(s.charAt(0) == '-')
a + s
else
a + "+" + s
}
var c_string = if(coefficient == 0) "" else coefficient.toString
var i_string = input_linear match {
case Nil => ""
case a::l => l.foldLeft(inputElementToString(a)) { (s, t2) =>
val t2s = inputElementToString(t2)
s + (if(t2s.charAt(0) =='-') t2s else "+" + t2s)}
}
val s = makePlus(c_string::i_string::Nil)
if(s == "") "0" else s
}
}
/** Object providing methods to create a division of input terms, with some optimizations.
*/
object APAInputDivision {
/** Returns a simplified division of input terms. */
def apply(a: APAInputTerm, b: APAInputTerm): APAInputTerm = APAInputDivision(a::Nil, b::Nil).simplified
/** Returns a division where common occurences between n and d have been removed (simplification). */
def simplifyNumDenom(n: List[APAInputTerm], d:List[APAInputTerm]):APAInputTerm = {
val (to_delete, n_ok) = n partition (d contains _)
if(to_delete != Nil) {
val (d_updated, to_be_nil) = d.foldLeft((to_delete, Nil:List[APAInputTerm])){
case ((to_delete, collecter), el) =>
if(to_delete contains el)
(to_delete - el, collecter)
else
(to_delete, collecter ++ (el ::Nil))
}
APAInputDivision(n_ok, d_updated)
} else {
APAInputDivision(n, d)
}
}
}
/** Class representing a integer division between input terms.
* It should be guaranteed that the denominator divides the numerator.
*/
case class APAInputDivision(numerator: List[APAInputTerm], denominator : List[APAInputTerm]) extends APAInputTerm {
setSign(SignAbstraction.divSign(SignAbstraction.multSign(numerator), SignAbstraction.multSign(denominator)))
/** Returns a clone of this expression without the sign abstraction. */
def normalClone():this.type = APAInputDivision(numerator, denominator).asInstanceOf[this.type]
/** Returns a simplified version of this division. */
def simplified:APAInputTerm = {
if(isZero) return APAInputCombination(0)
val result = ((APAInputMultiplication(numerator).simplified, APAInputMultiplication(denominator).simplified) match {
case (n, APAInputCombination(1, Nil)) => n
case (n, d) if n == d => APAInputCombination(1, Nil)
case (nm@APAInputMultiplication(n), dm@APAInputMultiplication(d)) => APAInputDivision.simplifyNumDenom(n, d)
case (nm, dm@APAInputMultiplication(d)) => APAInputDivision.simplifyNumDenom(nm::Nil, d)
case (nm@APAInputMultiplication(n), dm) => APAInputDivision.simplifyNumDenom(n, dm::Nil)
case (nc@APAInputCombination(c, Nil), dc@APAInputCombination(i, Nil)) => APAInputCombination(c/i)
case (nc@APAInputCombination(c, l), dc@APAInputCombination(i, Nil)) if nc.safelyDivisibleBy(i) => nc/i
case (nc@APAInputCombination(c, l), dc@APAInputCombination(i, Nil)) => APAInputDivision(nc::Nil, dc::Nil)
case (n, d) => APAInputDivision(n::Nil, d::Nil)
})
result.propagateSign(this)
}
/** Returns the division where the variable y has been replaced by the input term t. */
def replace(y: InputVar, t: APAInputTerm):APAInputTerm = {
APAInputDivision(numerator map (_.replace(y, t)), denominator map (_.replace(y, t))).simplified.propagateSign(this)
}
/** Returns the list of input variables that this division contains. */
def input_variables: List[InputVar] = {
((numerator flatMap (_.input_variables)) ++ (denominator flatMap (_.input_variables))).distinct
}
}
/** Object providing a method to create multiplications of input terms.
*/
object APAInputMultiplication {
def apply(a: APAInputTerm*):APAInputTerm = APAInputMultiplication(a.toList).simplified
}
/** Class representing a multiplication between input terms. */
case class APAInputMultiplication(operands: List[APAInputTerm]) extends APAInputTerm {
//assert(operands.length > 1) // Else it does not make sense, it should have been simplified.
setSign(SignAbstraction.multSign(operands))
/** Returns a clone of this multiplication without the sign abstraction. */
def normalClone():this.type = APAInputMultiplication(operands).asInstanceOf[this.type]
/** Returns a simplified equal version of this multiplication. */
def simplified:APAInputTerm = {
if(isZero) return APAInputCombination(0)
val result = operands flatMap (_.simplified match {
case APAInputMultiplication(l) => l map (_.assumeNotzerobility(this))
case APAInputCombination(1, Nil) => Nil
case t => List(t.assumeNotzerobility(this))
}) match {
case Nil => APAInputCombination(1, Nil)
case a::Nil => a
case l =>
APAInputTerm.partitionInteger(l) match {
case (Nil, l) =>
APAInputMultiplication(l)
case (integers, not_input_combinations) =>
((integers reduceLeft (_ * _)), not_input_combinations) match {
case (0, _) => APAInputCombination(0)
case (a, Nil) => APAInputCombination(a)
case (a, (t:APAInputCombination)::q) => APAInputMultiplication((t*a)::q)
case (a, _) => val s = APAInputCombination(a)::not_input_combinations
APAInputMultiplication(s)
}
}
}
result.propagateSign(this)
}
/** Returns the same multiplication where the non-zerobility of the applied sign abstraction. */
/** is propagated to all sub-children. */
override def propagateSign(s: SignAbstraction):this.type = { //Intercepts the sign propagation
if(s.isNotZero) {
val new_operands = operands map (_.assumeNotzerobility(s))
APAInputMultiplication(new_operands).propagateSign_internal(s).asInstanceOf[this.type]
} else {
APAInputMultiplication(operands).propagateSign_internal(s).asInstanceOf[this.type]
}
}
/** Returns an expression where all occurences of the variable y have been replaced by t. */
def replace(y: InputVar, t: APAInputTerm):APAInputTerm = {
APAInputMultiplication(operands map (_.replace(y, t))).propagateSign(this).simplified
}
/** Returns the list of input variables contained in this multiplication. */
def input_variables: List[InputVar] = {
(operands flatMap (_.input_variables)).distinct
}
}
/** Object providing a method to create additions of input terms and others.
*/
object APAInputAddition {
/** Returns an addition of the given input terms. */
def apply(a: APAInputTerm*):APAInputTerm = APAInputAddition(a.toList).simplified
/** Separate the input terms in l between input combinations and general input terms. */
/** Used to group input combinations together */
def partitionInputCombination(l: List[APAInputTerm]): (List[APAInputCombination], List[APAInputTerm]) = l match {
case Nil => (Nil, Nil)
case ((t@APAInputCombination(_, _))::q) =>
val (a, b) = partitionInputCombination(q)
(t::a, b)
case (p::q) =>
val (a, b) = partitionInputCombination(q)
(a, p::b)
}
}
/** Class representing an addition of given input terms.
* Additions differs from linear combinations, because they can store general additions
* like a*b+c+b^2+1
*/
case class APAInputAddition(l: List[APAInputTerm]) extends APAInputTerm {
setSign(SignAbstraction.addSign(l))
/** Returns a clone of this addition without the top-level abstraction (strange ?). */
def normalClone():this.type = APAInputAddition(l).asInstanceOf[this.type]
/** Returns a simplified version of this addition. */
def simplified:APAInputTerm = {
if(isZero) return APAInputCombination(0)
val result = l flatMap (_.simplified match {
case APAInputAddition(l) => l
case APAInputCombination(0, Nil) => Nil
case t => List(t)
}) match {
case Nil => APAInputCombination(0, Nil)
case a::Nil => a
case l =>
APAInputAddition.partitionInputCombination(l) match {
case (Nil, l) =>
APAInputAddition(l)
case (input_combinations, not_input_combinations) =>
((input_combinations reduceLeft (_ + _)), not_input_combinations) match {
case (a, Nil) => a
case (a, _) => val s = a::not_input_combinations
APAInputAddition(s)
}
}
}
result.propagateSign(this)
}
/** Returns an expression where all occurences of the variable y have been replaced by t. */
def replace(y: InputVar, t: APAInputTerm):APAInputTerm = {
APAInputAddition(l map (_.replace(y, t))).propagateSign(this).simplified
}
/** Returns the list of input variables contained in this addition. */
def input_variables: List[InputVar] = {
(l flatMap (_.input_variables)).distinct
}
}
/** Class representing an absolute value of an input term.
*/
case class APAInputAbs(arg: APAInputTerm) extends APAInputTerm {
setSign(SignAbstraction.absSign(arg))
/** Returns a clone of this absolute value without the abstraction. */
def normalClone():this.type = APAInputAbs(arg).asInstanceOf[this.type]
/** Returns a simplified version of this absolute value. */
def simplified:APAInputTerm = {
if(isZero) return APAInputCombination(0)
val result = arg.simplified match {
case t if t.isPositiveZero => t
case APAInputCombination(i, Nil) => APAInputCombination(Math.abs(i), Nil)
case t =>
APAInputAbs(t)
}
result.propagateSign(this)
}
/** Returns an expression where all occurences of the variable y have been replaced by t. */
def replace(y: InputVar, t: APAInputTerm):APAInputTerm = {
val result = APAInputAbs(arg.replace(y, t)).simplified
result.propagateSign(this)
}
/** Returns the list of input variables contained in this absolute value. */
def input_variables: List[InputVar] = {
arg.input_variables
}
}
/** Class representing the gcd of a list of input terms.
* The list of input terms should be guaranteed not to be all zero at the same time.
*/
case class APAInputGCD(l: List[APAInputTerm]) extends APAInputTerm {
setSign(1)
/** Returns a clone of this gcd without the abstraction. */
def normalClone():this.type = APAInputGCD(l).asInstanceOf[this.type]
/** Returns a simplified version of this gcd. */
def simplified:APAInputTerm = {
if(isZero) return APAInputCombination(0)
val (integers, non_integers) = APAInputTerm.partitionInteger(l map (_.simplified))
val result = (Common.gcdlistComplete(integers), non_integers) match {
case (Some(1), _) => APAInputCombination(1, Nil)
case (None, k::Nil) => APAInputAbs(k).simplified
case (None, Nil) =>
throw new Error("GCD is not defined on an empty set")
case (None, l) => APAInputGCD(l)
case (Some(n), Nil) => APAInputAbs(APAInputCombination(n, Nil)).simplified
case (Some(n), l) => APAInputGCD(APAInputCombination(n, Nil)::l)
}
result.propagateSign(this)
}
/** Returns an expression where all occurences of the variable y have been replaced by t. */
def replace(y: InputVar, t: APAInputTerm):APAInputTerm = {
APAInputGCD(l map (_.replace(y, t))).simplified.propagateSign(this)
}
/** Returns the list of input variables contained in this gcd. */
def input_variables: List[InputVar] = {
(l flatMap (_.input_variables)).distinct
}
}
/** Class representing the lcm of a list of input terms.
*/
case class APAInputLCM(l: List[APAInputTerm]) extends APAInputTerm {
setSign(1)
/** Returns a clone of this lcm without the abstraction. */
def normalClone():this.type = APAInputLCM(l).asInstanceOf[this.type]
/** Returns a simplified version of this lcm. */
def simplified:APAInputTerm = {
if(isZero) return APAInputCombination(0)
val (integers, non_integers) = APAInputTerm.partitionInteger(l map (_.simplified))
val result = (Common.lcmlist(integers), non_integers) match {
case (1, Nil) => APAInputCombination(1)
case (1, k::Nil) => APAInputAbs(k).simplified
case (1, k1::k2::l) if k1 == k2 => APAInputLCM(k2::l).simplified
case (1, l) => APAInputLCM(l)
case (n, Nil) => APAInputAbs(APAInputCombination(n, Nil)).simplified
case (n, l) => APAInputLCM(APAInputCombination(n, Nil)::l)
}
result.propagateSign(this)
}
/** Returns an expression where all occurences of the variable y have been replaced by t. */
def replace(y: InputVar, t: APAInputTerm):APAInputTerm = {
APAInputLCM(l map (_.replace(y, t))).simplified.propagateSign(this)
}
/** Returns the list of input variables contained in this lcm. */
def input_variables: List[InputVar] = {
(l flatMap (_.input_variables)).distinct
}
}
/*
case class APAInputMod(operand: APAInputTerm, divisor: APAInputTerm) extends APAInputTerm {
setSign(true, true, false) // >= 0
if(divisor.can_be_zero) throw new Exception("Error : "+divisor+" can be zero in expression "+this)
def normalClone():this.type = APAInputMod(operand, divisor).asInstanceOf[this.type]
def simplified:APAInputTerm = {
if(isZero) return APAInputCombination(0)
val result = (operand.simplified, divisor.simplified) match {
case (APAInputCombination(0, Nil), _) => APAInputCombination(0, Nil)
case (_, APAInputCombination(1, Nil)) => APAInputCombination(0, Nil)
case (APAInputCombination(i, Nil), APAInputCombination(j, Nil)) if j != 0 => APAInputCombination(Common.smod(i, j), Nil)
case (o, d) => APAInputMod(o, d)
}
result.propagateSign(this)
}
def replace(y: InputVar, t: APAInputTerm):APAInputTerm = {
APAInputMod(operand.replace(y, t), divisor.replace(y, t)).simplified.propagateSign(this)
}
def input_variables: List[InputVar] = {
(operand.input_variables ++ divisor.input_variables).distinct
}
}*/
| epfl-lara/comfusy | src/main/scala/APAInputSyntaxTree.scala | Scala | bsd-2-clause | 30,642 |
/*
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
*/
package play.it.http.parsing
import play.api.libs.iteratee.Enumerator
import play.api.libs.json.{ Json, JsError }
import play.api.mvc.Results.BadRequest
import play.api.mvc.{ BodyParser, BodyParsers }
import play.api.test._
object JsonBodyParserSpec extends PlaySpecification {
private case class Foo(a: Int, b: String)
private implicit val fooFormat = Json.format[Foo]
"The JSON body parser" should {
def parse[A](json: String, contentType: Option[String], encoding: String, bodyParser: BodyParser[A] = BodyParsers.parse.tolerantJson) = {
await(Enumerator(json.getBytes(encoding)) |>>>
bodyParser(FakeRequest().withHeaders(contentType.map(CONTENT_TYPE -> _).toSeq: _*)))
}
"parse JSON bodies" in new WithApplication() {
parse("""{"foo":"bar"}""", Some("application/json"), "utf-8") must beRight.like {
case json => (json \ "foo").as[String] must_== "bar"
}
}
"automatically detect the charset" in new WithApplication() {
parse("""{"foo":"bär"}""", Some("application/json"), "utf-8") must beRight.like {
case json => (json \ "foo").as[String] must_== "bär"
}
parse("""{"foo":"bär"}""", Some("application/json"), "utf-16") must beRight.like {
case json => (json \ "foo").as[String] must_== "bär"
}
parse("""{"foo":"bär"}""", Some("application/json"), "utf-32") must beRight.like {
case json => (json \ "foo").as[String] must_== "bär"
}
}
"ignore the supplied charset" in new WithApplication() {
parse("""{"foo":"bär"}""", Some("application/json; charset=iso-8859-1"), "utf-16") must beRight.like {
case json => (json \ "foo").as[String] must_== "bär"
}
}
"accept all common json content types" in new WithApplication() {
parse("""{"foo":"bar"}""", Some("application/json"), "utf-8", BodyParsers.parse.json) must beRight.like {
case json => (json \ "foo").as[String] must_== "bar"
}
parse("""{"foo":"bar"}""", Some("text/json"), "utf-8", BodyParsers.parse.json) must beRight.like {
case json => (json \ "foo").as[String] must_== "bar"
}
}
"reject non json content types" in new WithApplication() {
parse("""{"foo":"bar"}""", Some("application/xml"), "utf-8", BodyParsers.parse.json) must beLeft
parse("""{"foo":"bar"}""", None, "utf-8", BodyParsers.parse.json) must beLeft
}
"gracefully handle invalid json" in new WithApplication() {
parse("""{"foo:}""", Some("application/json"), "utf-8", BodyParsers.parse.json) must beLeft
}
"validate json content using .validate" in new WithApplication() {
import scala.concurrent.ExecutionContext.Implicits.global
val fooParser = BodyParsers.parse.json.validate {
_.validate[Foo].asEither.left.map(e => BadRequest(JsError.toFlatJson(e)))
}
parse("""{"a":1,"b":"bar"}""", Some("application/json"), "utf-8", fooParser) must beRight
parse("""{"foo":"bar"}""", Some("application/json"), "utf-8", fooParser) must beLeft
parse("""{"a":1}""", Some("application/json"), "utf-8", fooParser) must beLeft
}
"validate json content using implicit reads" in new WithApplication() {
parse("""{"a":1,"b":"bar"}""", Some("application/json"), "utf-8", BodyParsers.parse.json[Foo]) must beRight.like {
case foo => foo must_== Foo(1, "bar")
}
parse("""{"foo":"bar"}""", Some("application/json"), "utf-8", BodyParsers.parse.json[Foo]) must beLeft
parse("""{"a":1}""", Some("application/json"), "utf-8", BodyParsers.parse.json[Foo]) must beLeft
parse("""{"foo:}""", Some("application/json"), "utf-8", BodyParsers.parse.json[Foo]) must beLeft
}
}
}
| jyotikamboj/container | pf-framework/src/play-integration-test/src/test/scala/play/it/http/parsing/JsonBodyParserSpec.scala | Scala | mit | 3,796 |
/* Copyright 2017-19, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.tensorflow.api.ops
import org.platanios.tensorflow.api.core.exception.InvalidDataTypeException
import org.platanios.tensorflow.api.core.types._
import org.platanios.tensorflow.api.implicits.Implicits._
import org.platanios.tensorflow.api.ops
import org.platanios.tensorflow.api.ops.basic.Basic
import org.platanios.tensorflow.api.ops.control_flow.{Context, ControlFlow, GradientState}
import org.platanios.tensorflow.api.ops.math.Math
import org.platanios.tensorflow.api.utilities.DefaultsTo.AnyDefault
import org.platanios.tensorflow.jni.{Graph => NativeGraph, Output => NativeOutput}
import com.typesafe.scalalogging.Logger
import org.slf4j.LoggerFactory
import scala.collection.compat._
import scala.collection.mutable
/**
* @author Emmanouil Antonios Platanios
*/
object Gradients {
val logger = Logger(LoggerFactory.getLogger("Gradients"))
private[ops] trait API {
val gradients: ops.Gradients.type = ops.Gradients
}
// TODO: [GRADIENTS] !!! Figure out what the right signature for the gradient functions should be.
type GradientFn[I, O, GI >: I, GO >: O] = ( /* Op */ Op[I, O], /* Output Gradients */ GO) => GI
type UntypedGradientFn = GradientFn[Seq[OutputLike[Any]], Seq[OutputLike[Any]], Seq[OutputLike[Any]], Seq[OutputLike[Any]]]
private[ops] def convertGradientFn[I, O, GI >: I, GO >: O](
gradientFn: Gradients.GradientFn[I, O, GI, GO]
)(implicit
evGI: Op.OpInput[GI],
evGO: Op.OpOutput[GO]
): Gradients.UntypedGradientFn = {
def gradient(
op: Op[Seq[OutputLike[Any]], Seq[OutputLike[Any]]],
outputGradients: Seq[OutputLike[Any]]
): Seq[OutputLike[Any]] = {
val oGradients = implicitly[Op.OpOutput[GO]].fromOutputLikes(outputGradients)
val iGradient = gradientFn(op.asInstanceOf[Op[I, O]], oGradients)
implicitly[Op.OpInput[GI]].toOutputLikes(iGradient)
}
gradient
}
def unaryHelper[T: TF, OL[A] <: OutputLike[A], GO[A] >: OL[A]](
output: Output[T],
outputGradient: OL[T],
opType: String,
name: String,
gradientFn: Option[GradientFn[(Output[T], OL[T]), Output[T], (Output[T], GO[T]), Output[T]]] = None
): OL[T] = {
type GF[O[A]] = Option[GradientFn[(Output[T], O[T]), Output[T], (Output[T], O[T]), Output[T]]]
val gradient = outputGradient match {
case g: Output[T] =>
Op.Builder[(Output[T], Output[T]), Output[T]](
opType = opType,
name = name,
input = (output, g)
).setGradientFnHelper(gradientFn.asInstanceOf[GF[Output]])
.build().output
case g: OutputIndexedSlices[T] =>
val values = Op.Builder[(Output[T], OutputIndexedSlices[T]), Output[T]](
opType = opType,
name = name,
input = (output, g)
).setGradientFnHelper(gradientFn.asInstanceOf[GF[OutputIndexedSlices]])
.build().output
OutputIndexedSlices(indices = g.indices, values = values, denseShape = g.denseShape)
case g: SparseOutput[T] =>
val values = Op.Builder[(Output[T], SparseOutput[T]), Output[T]](
opType = opType,
name = name,
input = (output, g)
).setGradientFnHelper(gradientFn.asInstanceOf[GF[SparseOutput]])
.build().output
SparseOutput(indices = g.indices, values = values, denseShape = g.denseShape)
}
gradient.asInstanceOf[OL[T]]
}
// TODO: [DOC] Document the "gradients" function.
/**
*
* Note that ops/graphs created outside TensorFlow Scala are not differentiable.
*
* @param ys
* @param xs
* @param dataType
* @param dys
* @param gateGradients
* @param aggregationMethod
* @param colocateGradientsWithOps
* @param name
* @return
*/
def gradients[T: TF, I: AnyDefault](
ys: Seq[Output[Any]],
xs: Seq[Output[I]],
dataType: DataType[T],
dys: Seq[OutputLike[T]] = null,
gateGradients: Boolean = false,
aggregationMethod: AggregationMethod = AddAggregationMethod,
colocateGradientsWithOps: Boolean = false,
name: String = "Gradients"
): Seq[OutputLike[T]] = Op.currentGraph.synchronized {
// The `accumulatedGradients` variable collects the gradients received on each output endpoint of the op. The
// gradients for each endpoint are initially collected as a sequence. When it is time to call the op's gradient
// function, for each endpoint we aggregate the list of received gradients into a "add" operation, if there is more
// than one.
val accumulatedGradients = mutable.Map.empty[UntypedOp, mutable.Seq[Seq[OutputLike[Any]]]]
Op.nameScope(name) {
// Get a UID for this call to gradients that can be used to help cluster ops for compilation.
val gradientUID = Op.currentGraph.uniqueName("GradientUID")
// The approach we take here is as follows: Create a list of all ops in the sub-graph between the ys and xs. Visit
// these ops in reverse order of ids to ensure that when we visit an op the gradients with respect to its outputs
// have been collected. Then, aggregate these gradients if needed, call the op's gradient function, and add the
// generated gradients to the gradients for its input.
// Initialize the pending counts for ops in the connected sub-graph between the ys and xs.
val sourceOps = xs.map(_.op).toSet
val destinationOps = ys.map(_.op).toSet
// `pendingCounts(op)` is a count-down counter for the expected gradients to accumulate for `op`. When
// `pendingCounts(op)` becomes zero, we have collected all the backpropagation gradients for all outputs of `op`.
val (pendingCounts, controlFlowGradientState) = initialPendingCounts(
sourceOps, destinationOps, colocateGradientsWithOps)
// `readyOps` keeps track of ops that have been completely processed. We initialize it with the destination ops.
// We filter the destination ops based on whether one output gradient relies on another output's gradient.
val readyOps = mutable.Queue(
destinationOps.filter(pendingCounts.getOrElse(_, 0) == 0).toSeq: _*)
// Add the initial gradients for the ys.
val dyInitial = initialGradients(dataType, ys, dys, colocateGradientsWithOps, gradientUID)
for ((y, dy) <- ys.zip(dyInitial))
setGradient(accumulatedGradients, y, dy)
controlFlowGradientState.foreach(state => {
state.processUnusedLoopExits(pendingCounts, destinationOps)
.filter(isTrainable)
.foreach(loopExit => {
val zeros = state.zerosLikeForExit(loopExit)(TF.fromDataType(loopExit.dataType))
val castedZeros = zeros.castTo[T]
setGradient(accumulatedGradients, loopExit, castedZeros)
readyOps.enqueue(loopExit.op)
})
})
// Stop ops form the frontier of the forward graph before which back-propagation should stop. Ops in this set will
// not be differentiated. This set is defined as the subset of `sourceOps` containing ops that have no predecessor
// in `sourceOps`. An op has predecessors in `sourceOps` if and only if `pendingCounts(op) > 0`.
val stopOps = sourceOps.filter(_.inputsSeq.forall(i => pendingCounts.getOrElse(i.op, 0) <= 0))
while (readyOps.nonEmpty) {
val op = readyOps.dequeue()
maybeColocateWith(op, colocateGradientsWithOps, gradientUID) {
controlFlowGradientState.foreach(_.enterGradientWhileLoopContext(op, before = true))
val opGradients = aggregationMethod.aggregateGradients(accumulatedGradients, op, gradientUID)
controlFlowGradientState.foreach(_.exitGradientWhileLoopContext(op, before = true))
val hasOutputGradients = opGradients.nonEmpty
val hasGradientFn = hasOutputGradients && !stopOps.contains(op) && op.hasGradient
controlFlowGradientState.foreach(_.enterGradientWhileLoopContext(op, before = false))
if (hasOutputGradients && hasGradientFn) {
// Note that, the gradient aggregation not computing a value for the i'th output, means that the cost does
// not depend on output i and therefore the gradient with respect to that output is 0.
for ((gradient, outputIndex) <- opGradients.zipWithIndex) {
// Only floating-point outputs get a zero gradient. Gradient functions should ignore the gradient for
// other outputs.
val output = op.outputsSeq(outputIndex)
if (gradient.isEmpty && isTrainable(output))
// TODO: !!! [GRADIENTS] Gradients of resource handles might be an issue here because of the zeros.
opGradients(outputIndex) = Seq(
controlFlowGradientState
.map(_.zerosLike(op, outputIndex))
.getOrElse(Some(Context.zerosLikeOutsideLoop(op, outputIndex)))
.orNull)
}
// Compute the actual op gradients.
Op.createWith(nameScope = s"${op.name}Gradient") {
// TODO: [CONTEXT] Add support for original op context.
val outputGradients = opGradients.map(_.headOption.orNull)
var inputGradients = maybeCompile(name, op, () => op.gradientFn.get(op, outputGradients.toSeq))
if (gateGradients && inputGradients.count(_ != null) > 1) {
Op.createWith(device = null) {
Op.colocateWithForGradient(
Set.empty,
Some(gradientUID),
ignoreExisting = true
) {
val dataType = inputGradients.find(_ != null).get.dataType
inputGradients = ControlFlow.tuple(inputGradients)(TF.fromDataType(dataType))
}
}
}
val nInp = op.inputsSeq.length
val nGrd = inputGradients.length
assert(nInp == nGrd, s"Gradients size ($nGrd) for op '$op' does not match inputs size ($nInp).")
logGradients(op, outputGradients.toSeq, inputGradients)
// TODO: [GRADIENTS] !!! Report somehow the non-differentiable ops in the graph. This is currently hard to debug.
op.inputsSeq.zip(inputGradients).filter(_._2 != null).foreach(i => {
i._2 match {
case gradient: Output[_] if i._1.dataType != RESOURCE =>
gradient.setShape(i._1.shape)
case _ =>
}
setGradient(accumulatedGradients, i._1, i._2)
})
}
}
controlFlowGradientState.foreach(_.exitGradientWhileLoopContext(op, before = false))
}
// Update the pending counts for the inputs of `op` and enqueue ready ops.
op.inputsSeq.foreach(input => {
pendingCounts.update(input.op, pendingCounts.getOrElse(input.op, 0) - 1)
var ready = pendingCounts(input.op) == 0
if (!ready)
controlFlowGradientState.foreach(_ => {
ready = pendingCounts(input.op) > 0 && ControlFlow.isLoopSwitch(input.op)
})
if (ready) {
if (ControlFlow.isLoopExit(input.op)) {
// If `input` is an exit without real gradient, defer processing them.
controlFlowGradientState.flatMap(_.getGradientLoopState(input.op, before = false)).foreach(state => {
state.deferredExits += input
state.pendingExitsCount -= 1
if (state.pendingExitsCount == 0) {
// We now have all the exits and so we process them.
var hasRealGradient = false
state.deferredExits.foreach(exit => {
if (accumulatedGradients.get(exit.op).exists(_.exists(_.exists(_ != null)))) {
hasRealGradient = true
readyOps.enqueue(exit.op)
} else {
state.unusedExits += exit
}
})
if (hasRealGradient) {
// For an unused exit, if it has floating-point outputs, we back-propagate a zero gradient.
// Otherwise, we just ignore it.
state.unusedExits.foreach(exit => {
if (isTrainable(exit)) {
val zeros = controlFlowGradientState.get
.zerosLikeForExit(exit)(TF.fromDataType(exit.dataType))
val castedZeros = zeros.castTo[T]
setGradient(accumulatedGradients, exit, castedZeros)
}
readyOps.enqueue(exit.op)
})
} else {
// All exits are "unused" and so we use `null` as the gradient.
state.unusedExits.foreach(exit => readyOps.enqueue(exit.op))
}
}
})
} else {
readyOps.enqueue(input.op)
}
}
})
}
controlFlowGradientState.foreach(_.postProcess())
}
// Collect the aggregated gradients for the requested tensors and return them.
xs.map(x => {
val gradients = accumulatedGradients.get(x.op).map(_.apply(x.index))
if (gradients.isDefined && gradients.get.lengthCompare(1) > 0)
throw new IllegalArgumentException("The gradients should have been aggregated by now.")
gradients.map(_.head.asInstanceOf[OutputLike[T]]).orNull
})
}
/** If `colocateGradientsWithOps` is `true`, then all ops created within `block` will be colocated with `op`.
*
* @param op Op to maybe colocate with.
* @param colocateGradientsWithOps Boolean value indicating whether to colocate the gradient ops with the original
* ops.
* @param gradientUID Unique identifier within the graph indicating which invocation of gradients is
* being executed. Used to cluster ops for compilation.
* @param block Block of code to execute using the specified colocation ops.
* @return Return value of `block`.
*/
private def maybeColocateWith[R](
op: UntypedOp,
colocateGradientsWithOps: Boolean,
gradientUID: String
)(block: => R): R = {
if (colocateGradientsWithOps)
Op.colocateWithForGradient(Set(op), Some(gradientUID))(block)
else
block
}
// TODO: [FUNCTIONAL] Symbolic gradient ('_SymGrad').
/** If the op was marked as compiled, this function compiles the calculation in `gradientFunction` (using XLA) and
* returns the result of `gradientFunction`. Otherwise, it simply returns the result of `gradientFunction`.
*
* @param nameScope Name scope to use for the gradient ops.
* @param op Op whose gradients are being computed.
* @param gradientFunction Function that computes the gradients for `op`.
* @return Created gradients op.
*/
private def maybeCompile(
nameScope: String,
op: UntypedOp,
gradientFunction: () => Seq[OutputLike[Any]]
): Seq[OutputLike[Any]] = {
// TODO: [FUNCTIONAL] Add extra 'func' argument.
val cleanNameScope = nameScope.stripSuffix("/").replace('/', '_')
try {
val xlaCompile = op.booleanAttribute("_XlaCompile")
if (!xlaCompile) {
gradientFunction() // Exit early
} else {
val xlaSeparateCompileGradient = op.booleanAttribute("_XlaSeparateCompiledGradients")
val xlaScope = op.stringAttribute("_XlaScope")
// If the gradients are supposed to be compiled separately, we give them an '_XlaScope' name that is based on
// the name_scope of the gradients. Otherwise, they just inherit the existing '_XlaScope' name, which lets them
// be merged together with the non-gradient computation.
val xlaGradientsScope = if (xlaSeparateCompileGradient) s"${xlaScope}_grad_$cleanNameScope" else xlaScope
Op.createWith(attributes = Map(
"_XlaCompile" -> xlaCompile,
"_XlaScope" -> xlaGradientsScope)
) {
gradientFunction()
}
}
} catch {
case _: IllegalArgumentException => gradientFunction() // Something went wrong and so we exit
}
}
/** Returns a boolean value indicating whether the data type of `tensor` is trainable. This means whether its
* gradients can be computed. */
private def isTrainable(tensor: OutputLike[Any]): Boolean = {
Set(FLOAT16, FLOAT32, FLOAT64, COMPLEX64, COMPLEX128).contains(tensor.dataType)
}
/** Computes initial values for the provided gradients, and checks whether their data types are correct.
*
* @param dataType Data type of the gradients.
* @param ys Sequence containing the variables corresponding to `dys`.
* @param dys Sequence containing tensor gradients.
* @param colocateGradientsWithOps Boolean value indicating whether to colocate the gradient ops with the original
* ops.
* @param gradientUID Unique identifier within the graph indicating which invocation of gradients is
* being executed. Used to cluster ops for compilation.
* @return Sequence containing the default gradient values.
* @throws InvalidDataTypeException If the gradient tensor data types are not compatible with the input data types.
*/
@throws[InvalidDataTypeException]
private def initialGradients[T: TF](
dataType: DataType[T],
ys: Seq[OutputLike[Any]],
dys: Seq[OutputLike[T]],
colocateGradientsWithOps: Boolean,
gradientUID: String = "__unsupported__"
): Seq[OutputLike[T]] = {
ys.zip(if (dys != null) dys else Seq.fill[OutputLike[T]](ys.length)(null))
.zipWithIndex.map {
case ((y, dy), index) =>
if (dy == null) {
if (y.dataType.isComplex) {
throw InvalidDataTypeException(
s"Gradients of complex tensors must " +
s"set 'gradients' (variable.dataType = '${y.dataType}').")
}
maybeColocateWith(y.op, colocateGradientsWithOps, gradientUID) {
y match {
case o: Output[_] =>
Op.nameScope(s"Gradients_$index") {
if (o.shape.isFullyDefined) {
Basic.ones[T](o.shape)
} else {
Basic.ones[T](Basic.shape(o)(TF.fromDataType(o.dataType)))
}
}
case o: OutputIndexedSlices[_] =>
if (o.denseShape == null) {
throw new IllegalArgumentException(
"The dense shape of output indexed slices must " +
"be known in order to obtain their gradients.")
}
Op.nameScope(s"Gradients_$index") {
Basic.ones[T](o.denseShape)
}
case o: SparseOutput[_] =>
Op.nameScope(s"Gradients_$index") {
Basic.ones[T, Long](o.denseShape)
}
}
}
} else {
if (y.dataType.isFloatingPoint || y.dataType.isInteger) {
if (!dy.dataType.isFloatingPoint && !dy.dataType.isInteger) {
throw InvalidDataTypeException(
s"Gradient data type '${dy.dataType}' generated for " +
s"real or integer-valued tensor '$y' with data type " +
s"'${y.dataType}' must be real or integer.")
}
} else if (y.dataType.isComplex) {
if (!dy.dataType.isComplex) {
throw InvalidDataTypeException(
s"Gradient data type '${dy.dataType}' generated for " +
s"complex-valued tensor '$y' with data type " +
s"'${y.dataType}' must be complex.")
}
} else {
throw InvalidDataTypeException(
s"Tensor '$y' with data type '${y.dataType}' must " +
s"be numeric in order to obtain a default gradient.")
}
// Create a gradients tensor in the name scope of the gradients. This is required in order for tensor arrays
// to identify which gradient call a gradient value is coming from.
dy match {
case o: Output[T] =>
Basic.identity(o, name = s"Gradients_$index")
case o: OutputIndexedSlices[T] =>
OutputIndexedSlices(
Basic.identity(o.indices, name = s"Gradients_${index}_Indices"),
Basic.identity(o.values, name = s"Gradients_${index}_Values"),
if (o.denseShape == null)
o.denseShape
else
Basic.identity(o.denseShape, name = s"Gradients_${index}_DenseShape"))
case o: SparseOutput[T] =>
SparseOutput(
Basic.identity(o.indices, name = s"Gradients_${index}_Indices"),
Basic.identity(o.values, name = s"Gradients_${index}_Values"),
if (o.denseShape == null)
o.denseShape
else
Basic.identity(o.denseShape, name = s"Gradients_${index}_DenseShape"))
}
}
}
}
/** Initializes the back-propagation input counts for ops between two sets of ops.
*
* 'outputMap(op)' indicates the number of back-propagation inputs to this op.
*
* @param sourceOps Set of source ops.
* @param destinationOps Set of destination ops.
* @param colocateGradientsWithOps Boolean value indicating whether to colocate the gradient ops with the original
* ops.
* @return Tuple containing: (1) Map from op to the number of back-propagation inputs to this op, and (2) a control
* flow gradient state object which is not `None` if the ops between `sources` and `destinations` contain
* control flow loops.
*/
private def initialPendingCounts(
sourceOps: Set[UntypedOp],
destinationOps: Set[UntypedOp],
colocateGradientsWithOps: Boolean
): (mutable.Map[UntypedOp, Int], Option[GradientState]) = {
// Mark ops reached when going from 'sources' to 'destinations'
val reached = mutable.Set(destinationOps.toSeq: _*)
val reachedQueue = mutable.Queue(sourceOps.toSeq: _*)
while (reachedQueue.nonEmpty) {
val op = reachedQueue.dequeue()
if (!reached.contains(op)) {
reached += op
op.outputsSeq.foreach(_.consumers.foreach(c => reachedQueue.enqueue(c.op)))
}
}
// Mark ops between 'sources' and 'destinations'
val between = mutable.Set.empty[UntypedOp]
// TODO: [CONTROL_FLOW] Do we need the list aside from the set?
val betweenList = mutable.ListBuffer.empty[UntypedOp]
val betweenQueue = mutable.Queue(destinationOps.toSeq: _*)
while (betweenQueue.nonEmpty) {
val op = betweenQueue.dequeue()
if (reached.contains(op)) {
between += op
betweenList += op
reached -= op // Done so we don't go through the same ops twice
op.inputsSeq.foreach(i => betweenQueue.enqueue(i.op))
}
}
// `controlFlowGradientState` is `None` if there are no while loops.
val controlFlowGradientState = GradientState.maybeCreate(
between, betweenList, colocateGradientsWithOps)
// Initialize the pending counts for the between ops
val pendingCounts = mutable.Map.empty[UntypedOp, Int]
betweenList
.flatMap(_.inputsSeq)
.map(_.op)
.filter(between.contains)
.foreach(input => {
pendingCounts.update(input, pendingCounts.getOrElse(input, 0) + 1)
})
(pendingCounts, controlFlowGradientState)
}
/** Adds the provided `gradient` to the sequence of `output`'s gradients that have been collected so far.
*
* @param gradients Map where the collected gradients are stored.
* @param output Op output whose gradient is provided.
* @param gradient Gradient of `output` to add to the collected gradients.
*/
private def setGradient(
gradients: mutable.Map[UntypedOp, mutable.Seq[Seq[OutputLike[Any]]]],
output: Output[Any],
gradient: OutputLike[Any]
): Unit = {
val opGradients = gradients.getOrElseUpdate(
output.op, mutable.Seq(output.op.outputsSeq.map(_ => Seq.empty): _*))
if (ControlFlow.isLoopSwitch(output.op))
opGradients(output.index) = Seq(gradient)
else
opGradients(output.index) :+= gradient
}
/** Logs the input and output gradients of the provided op.
*
* @param op Op.
* @param outputGradients Output gradients of op.
* @param inputGradients Input gradients of op.
*/
private def logGradients(
op: UntypedOp,
outputGradients: Seq[OutputLike[Any]],
inputGradients: Seq[OutputLike[Any]]
): Unit = {
logger.debug(s"Gradients for op '${op.name}':")
logger.debug(s" in --> ${outputGradients.filter(_ != null).map(_.name).mkString(", ")}")
logger.debug(s" out --> ${inputGradients.filter(_ != null).map(_.name).mkString(", ")}")
}
sealed trait GatingMethod
object NoGating extends GatingMethod
object OpGating extends GatingMethod
object GraphGating extends GatingMethod
/** Aggregation method used to combine gradients.
*
* Computing partial derivatives can require aggregating gradient contributions. All such aggregation methods are
* represented as objects extending this trait.
*/
sealed trait AggregationMethod {
/** Aggregate the gradients for op `op`.
*
* @param gradients Map where the collected gradients are stored. The gradient sequences corresponding to `op`
* will be replaced with sequences containing a single element corresponding to the aggregated
* gradient.
* @param op Op whose gradients to aggregate.
* @param gradientUID Unique identifier within the graph indicating which invocation of gradients is being
* executed. Used to cluster ops for compilation.
*/
private[Gradients] def aggregateGradients(
gradients: mutable.Map[UntypedOp, mutable.Seq[Seq[OutputLike[Any]]]],
op: UntypedOp,
gradientUID: String
): mutable.Seq[Seq[OutputLike[Any]]] = {
val opGradients = gradients.getOrElse(op, mutable.Seq.empty[Seq[OutputLike[Any]]])
if (ControlFlow.isLoopSwitch(op)) {
opGradients
} else {
opGradients.zipWithIndex.foreach {
case (grads, index) =>
if (grads.length < 2) {
grads
} else {
val gs = grads.filter(_ != null)
opGradients(index) = Seq(
aggregate(gs, Some(gradientUID))(TF.fromDataType(gs.head.dataType)))
}
}
opGradients
}
}
/** Aggregates `values` into a single tensor.
*
* @param values Sequence of values to aggregate.
* @param gradientUID Unique identifier within the graph indicating which invocation of gradients is being
* executed (if any). Used to cluster ops for compilation.
* @return Aggregated tensor.
*/
private[ops] def aggregate[T: TF](
values: Seq[OutputLike[T]],
gradientUID: Option[String] = None
): OutputLike[T]
}
/** Gradient aggregation method that simply adds up the collected gradients. */
object AddAggregationMethod extends AggregationMethod {
override private[ops] def aggregate[T: TF](
gradients: Seq[OutputLike[T]],
gradientUID: Option[String] = None
): OutputLike[T] = {
// TODO: [TYPES] !!! Super hacky. Remove in the future.
implicit val ev: IsNumeric[T] = null
if (gradients.forall(_.isInstanceOf[OutputIndexedSlices[T]])) {
def addNOutputIndexedSlices(
gradients: Seq[OutputIndexedSlices[T]]
): OutputIndexedSlices[T] = {
if (gradients.isEmpty) {
throw new IllegalArgumentException(
"Can not aggregate empty gradients list.")
} else if (gradients.length == 1) {
gradients.head
} else {
OutputIndexedSlices(
Basic.concatenate(gradients.map(_.indices)),
Basic.concatenate(gradients.map(_.values)),
gradients.head.denseShape)
}
}
val deviceContributions = gradients.groupBy(_.device).toSeq.sortBy(_._1).map {
case (_, outputs) => addNOutputIndexedSlices(
outputs.map(_.asInstanceOf[OutputIndexedSlices[T]]))
}
addNOutputIndexedSlices(deviceContributions)
} else {
// This function adds op outputs from potentially different devices.
// We add the tensors of each device separately first, and we then add up the partial results.
val deviceContributions = gradients.groupBy(_.device).toSeq.sortBy(_._1).map {
case (_, outputs) =>
Op.colocateWithForGradient(
Set(gradients.head.op),
gradientUID,
ignoreExisting = true
) {
Math.addN(outputs.map(_.toOutput))
}
}
Math.addN(deviceContributions)
}
}
}
/** Gradient aggregation method that simply adds up the collected gradients, without first waiting for all of them to
* become available at once.
*
* The benefit of using this method is that its inputs can be combined in any order and this can allow the expression
* to be evaluated with a smaller memory footprint. With this method, it is possible to compute a sum of terms which
* are much larger than total GPU memory.
*/
object AccumulateAggregationMethod extends AggregationMethod {
override private[ops] def aggregate[T: TF](
gradients: Seq[OutputLike[T]],
gradientUID: Option[String] = None
): OutputLike[T] = {
// TODO: [TYPES] !!! Super hacky. Remove in the future.
implicit val ev: IsNumeric[T] = null
if (gradients.forall(_.isInstanceOf[Output[T]])) {
Math.accumulateN(gradients.map(_.asInstanceOf[Output[T]]))
} else if (gradients.forall(_.isInstanceOf[OutputIndexedSlices[T]])) {
def addNOutputIndexedSlices(
gradients: Seq[OutputIndexedSlices[T]]
): OutputIndexedSlices[T] = {
if (gradients.isEmpty) {
throw new IllegalArgumentException(
"Can not aggregate empty gradients list.")
} else if (gradients.length == 1) {
gradients.head
} else {
OutputIndexedSlices(
Basic.concatenate(gradients.map(_.indices)),
Basic.concatenate(gradients.map(_.values)),
gradients.head.denseShape)
}
}
val deviceContributions = gradients.groupBy(_.device).toSeq.sortBy(_._1).map {
case (_, outputs) => addNOutputIndexedSlices(
outputs.map(_.asInstanceOf[OutputIndexedSlices[T]]))
}
addNOutputIndexedSlices(deviceContributions)
} else {
throw new IllegalArgumentException(
"The gradients being aggregated need to be all " +
"of type 'Output' or 'OutputIndexedSlices'.")
}
}
}
/** Adds ops to the graph to compute the partial derivatives of the sum of `y`s with respect to the `x`s, using the
* C++ gradients support of the TensorFlow native library.
*
* Note that the C++ gradients support of the TensorFlow native library is incomplete and will not be sufficient for
* many use cases. It is mainly exposed as means of comparison to the Scala API functionality.
*
* The result of this function is an array containing: `d(y_1 + y_2 + ...)/dx_1`, `d(y_1 + y_2 + ...)/dx_2`, `...`.
*
* @param y Tensors whose partial derivatives are computed.
* @param x Tensors with respect to which the gradients are computed.
* @param dy Tensors to use as the initial gradients. They represent the symbolic partial derivatives of some loss
* function `L` with respect to `y`. If `null`, then ones are used. The number of tensors in `dx` must
* match the number of tensors in `y`.
* @return Partial derivatives of the `y`s given each one of the `x`s.
* @throws IllegalArgumentException If the length of `y` does not match the length of `dx`.
*/
@throws[IllegalArgumentException]
def ccGradients[T: TF, O](
y: Array[Output[O]],
x: Array[Output[T]],
dy: Array[Output[T]] = null
): Array[Output[T]] = {
// TODO: Overload this method with all possible uses for it.
if (dy != null && dy.length != y.length) {
throw new IllegalArgumentException(
s"The number of ys (${y.length}) must match the number of dxs (${dy.length}).")
}
// Obtain the graph and verify that all provided op outputs are defined over the same graph
val graph = y.head.graph
y.foreach(o => Op.assertSameGraph(o.op, y.head.op))
x.foreach(o => Op.assertSameGraph(o.op, y.head.op))
if (dy != null)
dy.foreach(o => Op.assertSameGraph(o.op, y.head.op))
// Map all arrays to the corresponding data structures used by the JNI layer
val yJNI = y.map(o => NativeOutput(o.op.nativeHandle, o.index))
val xJNI = x.map(o => NativeOutput(o.op.nativeHandle, o.index))
val dxJNI = if (dy == null) null else dy.map(o => NativeOutput(o.op.nativeHandle, o.index))
// Add the gradients to the graph and collect them to the array that is returned
val jniGradients = NativeGraph.addGradients(graph.nativeHandle, yJNI, xJNI, dxJNI)
jniGradients.map(o => {
val op = graph.opsCache.getOrElseUpdate(o.opHandle, {
new Op[Seq[Output[Any]], Seq[Output[Any]]](graph, None, o.opHandle)
})
Output[T](op, o.outputIndex)
})
}
}
| eaplatanios/tensorflow_scala | modules/api/src/main/scala/org/platanios/tensorflow/api/ops/Gradients.scala | Scala | apache-2.0 | 34,913 |
package drt.shared.dates
import upickle.default.{ReadWriter, macroRW}
case class LocalDate(year: Int, month: Int, day: Int) extends DateLike {
override val timeZone: String = "Europe/London"
}
case object LocalDate {
implicit val rw: ReadWriter[LocalDate] = macroRW
def parse: String => Option[LocalDate] = DateLike.parse((y: Int, m: Int, d: Int) => LocalDate(y, m, d))
}
| UKHomeOffice/drt-scalajs-spa-exploration | shared/src/main/scala/drt/shared/dates/LocalDate.scala | Scala | apache-2.0 | 383 |
class Test {
def test(b: Base): Unit = b match {
case Base_1(Some(_)) =>
case Base_2(Nested_1(_)) =>
case Base_2(Nested_2(_)) =>
}
}
| loskutov/intellij-scala | testdata/scalacTests/pos/t9630/t9630b.scala | Scala | apache-2.0 | 150 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.calcite
import org.apache.flink.sql.parser.SqlProperty
import org.apache.flink.sql.parser.dml.RichSqlInsert
import org.apache.flink.table.calcite.PreValidateReWriter.appendPartitionProjects
import org.apache.flink.table.catalog.CatalogReader
import org.apache.calcite.plan.RelOptTable
import org.apache.calcite.prepare.CalciteCatalogReader
import org.apache.calcite.rel.`type`.{RelDataType, RelDataTypeFactory, RelDataTypeField}
import org.apache.calcite.runtime.{CalciteContextException, Resources}
import org.apache.calcite.sql.`type`.SqlTypeUtil
import org.apache.calcite.sql.fun.SqlStdOperatorTable
import org.apache.calcite.sql.parser.SqlParserPos
import org.apache.calcite.sql.util.SqlBasicVisitor
import org.apache.calcite.sql.validate.{SqlValidatorException, SqlValidatorTable, SqlValidatorUtil}
import org.apache.calcite.sql.{SqlCall, SqlIdentifier, SqlLiteral, SqlNode, SqlNodeList, SqlSelect, SqlUtil}
import org.apache.calcite.util.Static.RESOURCE
import java.util
import scala.collection.JavaConversions._
/** Implements [[org.apache.calcite.sql.util.SqlVisitor]]
* interface to do some rewrite work before sql node validation. */
class PreValidateReWriter(
val catalogReader: CatalogReader,
val typeFactory: RelDataTypeFactory) extends SqlBasicVisitor[Unit] {
override def visit(call: SqlCall): Unit = {
call match {
case r: RichSqlInsert if r.getStaticPartitions.nonEmpty
&& r.getSource.isInstanceOf[SqlSelect] =>
appendPartitionProjects(r, catalogReader, typeFactory,
r.getSource.asInstanceOf[SqlSelect], r.getStaticPartitions)
case _ =>
}
}
}
object PreValidateReWriter {
//~ Tools ------------------------------------------------------------------
/**
* Append the static partitions to the data source projection list. The columns are appended to
* the corresponding positions.
*
* <p>If we have a table A with schema (<a>, <b>, <c>) whose
* partition columns are (<a>, <c>), and got a query
* <blockquote><pre>
* insert into A partition(a='11', c='22')
* select b from B
* </pre></blockquote>
* The query would be rewritten to:
* <blockquote><pre>
* insert into A partition(a='11', c='22')
* select cast('11' as tpe1), b, cast('22' as tpe2) from B
* </pre></blockquote>
* Where the "tpe1" and "tpe2" are data types of column a and c of target table A.
*
* @param sqlInsert RichSqlInsert instance
* @param calciteCatalogReader catalog reader
* @param typeFactory type factory
* @param select Source sql select
* @param partitions Static partition statements
*/
def appendPartitionProjects(sqlInsert: RichSqlInsert,
calciteCatalogReader: CalciteCatalogReader,
typeFactory: RelDataTypeFactory,
select: SqlSelect,
partitions: SqlNodeList): Unit = {
val names = sqlInsert.getTargetTable.asInstanceOf[SqlIdentifier].names
val table = calciteCatalogReader.getTable(names)
if (table == null) {
// There is no table exists in current catalog,
// just skip to let other validation error throw.
return
}
val targetRowType = createTargetRowType(typeFactory,
calciteCatalogReader, table, sqlInsert.getTargetColumnList)
// validate partition fields first.
val assignedFields = new util.LinkedHashMap[Integer, SqlNode]
val relOptTable = table match {
case t: RelOptTable => t
case _ => null
}
for (node <- partitions.getList) {
val sqlProperty = node.asInstanceOf[SqlProperty]
val id = sqlProperty.getKey
val targetField = SqlValidatorUtil.getTargetField(targetRowType,
typeFactory, id, calciteCatalogReader, relOptTable)
validateField(idx => !assignedFields.contains(idx), id, targetField)
val value = sqlProperty.getValue.asInstanceOf[SqlLiteral]
assignedFields.put(targetField.getIndex,
maybeCast(value, value.createSqlType(typeFactory), targetField.getType, typeFactory))
}
val currentNodes = new util.ArrayList[SqlNode](select.getSelectList.getList)
val fixedNodes = new util.ArrayList[SqlNode]
0 until targetRowType.getFieldList.length foreach {
idx =>
if (assignedFields.containsKey(idx)) {
fixedNodes.add(assignedFields.get(idx))
} else if (currentNodes.size() > 0) {
fixedNodes.add(currentNodes.remove(0))
}
}
// Although it is error case, we still append the old remaining
// projection nodes to new projection.
if (currentNodes.size > 0) {
fixedNodes.addAll(currentNodes)
}
select.setSelectList(new SqlNodeList(fixedNodes, select.getSelectList.getParserPosition))
}
/**
* Derives a row-type for INSERT and UPDATE operations.
*
* <p>This code snippet is almost inspired by
* [[org.apache.calcite.sql.validate.SqlValidatorImpl#createTargetRowType]].
* It is the best that the logic can be merged into Apache Calcite,
* but this needs time.
*
* @param typeFactory TypeFactory
* @param catalogReader CalciteCatalogReader
* @param table Target table for INSERT/UPDATE
* @param targetColumnList List of target columns, or null if not specified
* @return Rowtype
*/
private def createTargetRowType(
typeFactory: RelDataTypeFactory,
catalogReader: CalciteCatalogReader,
table: SqlValidatorTable,
targetColumnList: SqlNodeList): RelDataType = {
val baseRowType = table.getRowType
if (targetColumnList == null) return baseRowType
val fields = new util.ArrayList[util.Map.Entry[String, RelDataType]]
val assignedFields = new util.HashSet[Integer]
val relOptTable = table match {
case t: RelOptTable => t
case _ => null
}
for (node <- targetColumnList) {
val id = node.asInstanceOf[SqlIdentifier]
val targetField = SqlValidatorUtil.getTargetField(baseRowType,
typeFactory, id, catalogReader, relOptTable)
validateField(assignedFields.add, id, targetField)
fields.add(targetField)
}
typeFactory.createStructType(fields)
}
/** Check whether the field is valid. **/
private def validateField(tester: Function[Integer, Boolean],
id: SqlIdentifier,
targetField: RelDataTypeField): Unit = {
if (targetField == null) {
throw newValidationError(id, RESOURCE.unknownTargetColumn(id.toString))
}
if (!tester.apply(targetField.getIndex)) {
throw newValidationError(id, RESOURCE.duplicateTargetColumn(targetField.getName))
}
}
private def newValidationError(node: SqlNode,
e: Resources.ExInst[SqlValidatorException]): CalciteContextException = {
assert(node != null)
val pos = node.getParserPosition
SqlUtil.newContextException(pos, e)
}
// This code snippet is copied from the SqlValidatorImpl.
private def maybeCast(node: SqlNode,
currentType: RelDataType,
desiredType: RelDataType,
typeFactory: RelDataTypeFactory): SqlNode = {
if (currentType == desiredType
|| (currentType.isNullable != desiredType.isNullable
&& typeFactory.createTypeWithNullability(currentType, desiredType.isNullable)
== desiredType)) {
node
} else {
SqlStdOperatorTable.CAST.createCall(SqlParserPos.ZERO,
node, SqlTypeUtil.convertTypeToSpec(desiredType))
}
}
}
| bowenli86/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/calcite/PreValidateReWriter.scala | Scala | apache-2.0 | 8,294 |
/*
* Copyright 2018 Vladimir Konstantinov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.illfaku.korro.dto.ws
import akka.actor.ActorRef
/**
* Response for WebSocket handshake request.
*
* @param actor Actor that will process WebSocket frames.
*/
case class WsHandshakeResponse(actor: ActorRef)
| oxy-development/korro | src/main/scala/com/github/illfaku/korro/dto/ws/WsHandshakeResponse.scala | Scala | apache-2.0 | 836 |
package scala
package reflect
package internal
package tpe
import scala.collection.{ mutable }
import util.{ Statistics, TriState }
import scala.annotation.tailrec
trait TypeComparers {
self: SymbolTable =>
import definitions._
import TypesStats._
private final val LogPendingSubTypesThreshold = TypeConstants.DefaultLogThreshhold
private val _pendingSubTypes = new mutable.HashSet[SubTypePair]
def pendingSubTypes = _pendingSubTypes
final case class SubTypePair(tp1: Type, tp2: Type) {
// SI-8146 we used to implement equality here in terms of pairwise =:=.
// But, this was inconsistent with hashCode, which was based on the
// Type#hashCode, based on the structure of types, not the meaning.
// Now, we use `Type#{equals,hashCode}` as the (consistent) basis for
// detecting cycles (aka keeping subtyping decidable.)
//
// I added a tests to show that we detect the cycle: neg/t8146-no-finitary*
override def toString = tp1+" <:<? "+tp2
}
private var _subsametypeRecursions: Int = 0
def subsametypeRecursions = _subsametypeRecursions
def subsametypeRecursions_=(value: Int) = _subsametypeRecursions = value
private def isUnifiable(pre1: Type, pre2: Type) = (
(isEligibleForPrefixUnification(pre1) || isEligibleForPrefixUnification(pre2))
&& (pre1 =:= pre2)
)
/** Returns true iff we are past phase specialize,
* sym1 and sym2 are two existential skolems with equal names and bounds,
* and pre1 and pre2 are equal prefixes
*/
private def isSameSpecializedSkolem(sym1: Symbol, sym2: Symbol, pre1: Type, pre2: Type) = {
sym1.isExistentialSkolem && sym2.isExistentialSkolem &&
sym1.name == sym2.name &&
phase.specialized &&
sym1.info =:= sym2.info &&
pre1 =:= pre2
}
private def isSubPre(pre1: Type, pre2: Type, sym: Symbol) =
if ((pre1 ne pre2) && (pre1 ne NoPrefix) && (pre2 ne NoPrefix) && pre1 <:< pre2) {
if (settings.debug) println(s"new isSubPre $sym: $pre1 <:< $pre2")
true
} else
false
private def equalSymsAndPrefixes(sym1: Symbol, pre1: Type, sym2: Symbol, pre2: Type): Boolean = (
if (sym1 eq sym2)
sym1.hasPackageFlag || sym1.owner.hasPackageFlag || phase.erasedTypes || pre1 =:= pre2
else
(sym1.name == sym2.name) && isUnifiable(pre1, pre2)
)
def isDifferentType(tp1: Type, tp2: Type): Boolean = try {
subsametypeRecursions += 1
undoLog undo { // undo type constraints that arise from operations in this block
!isSameType1(tp1, tp2)
}
} finally {
subsametypeRecursions -= 1
// XXX AM TODO: figure out when it is safe and needed to clear the log -- the commented approach below is too eager (it breaks #3281, #3866)
// it doesn't help to keep separate recursion counts for the three methods that now share it
// if (subsametypeRecursions == 0) undoLog.clear()
}
def isDifferentTypeConstructor(tp1: Type, tp2: Type) = !isSameTypeConstructor(tp1, tp2)
private def isSameTypeConstructor(tr1: TypeRef, tr2: TypeRef): Boolean = (
(tr1.sym eq tr2.sym)
&& !isDifferentType(tr1.pre, tr2.pre)
)
private def isSameTypeConstructor(tp1: Type, tp2: Type): Boolean = (
tp1.isInstanceOf[TypeRef]
&& tp2.isInstanceOf[TypeRef]
&& isSameTypeConstructor(tp1.asInstanceOf[TypeRef], tp2.asInstanceOf[TypeRef])
)
/** Do `tp1` and `tp2` denote equivalent types? */
def isSameType(tp1: Type, tp2: Type): Boolean = try {
if (Statistics.canEnable) Statistics.incCounter(sametypeCount)
subsametypeRecursions += 1
//OPT cutdown on Function0 allocation
//was:
// undoLog undoUnless {
// isSameType1(tp1, tp2)
// }
val before = undoLog.log
var result = false
try {
result = isSameType1(tp1, tp2)
}
finally if (!result) undoLog.undoTo(before)
result
}
finally {
subsametypeRecursions -= 1
// XXX AM TODO: figure out when it is safe and needed to clear the log -- the commented approach below is too eager (it breaks #3281, #3866)
// it doesn't help to keep separate recursion counts for the three methods that now share it
// if (subsametypeRecursions == 0) undoLog.clear()
}
// @pre: at least one argument has annotations
private def sameAnnotatedTypes(tp1: Type, tp2: Type) = (
annotationsConform(tp1, tp2)
&& annotationsConform(tp2, tp1)
&& (tp1.withoutAnnotations =:= tp2.withoutAnnotations)
)
// We flush out any AnnotatedTypes before calling isSameType2 because
// unlike most other subclasses of Type, we have to allow for equivalence of any
// combination of { tp1, tp2 } { is, is not } an AnnotatedType - this because the
// logic of "annotationsConform" is arbitrary and unknown.
private def isSameType1(tp1: Type, tp2: Type): Boolean = typeRelationPreCheck(tp1, tp2) match {
case state if state.isKnown => state.booleanValue
case _ if typeHasAnnotations(tp1) || typeHasAnnotations(tp2) => sameAnnotatedTypes(tp1, tp2)
case _ => isSameType2(tp1, tp2)
}
private def isSameHKTypes(tp1: Type, tp2: Type) = (
tp1.isHigherKinded
&& tp2.isHigherKinded
&& (tp1.normalize =:= tp2.normalize)
)
private def isSameTypeRef(tr1: TypeRef, tr2: TypeRef) = (
equalSymsAndPrefixes(tr1.sym, tr1.pre, tr2.sym, tr2.pre)
&& (isSameHKTypes(tr1, tr2) || isSameTypes(tr1.args, tr2.args))
)
private def isSameSingletonType(tp1: SingletonType, tp2: SingletonType): Boolean = {
// We don't use dealiasWiden here because we are looking for the SAME type,
// and widening leads to a less specific type. The logic is along the lines of
// dealiasAndFollowUnderlyingAsLongAsTheTypeIsEquivalent. This method is only
// called after a surface comparison has failed, so if chaseDealiasedUnderlying
// does not produce a type other than tp1 and tp2, return false.
@tailrec def chaseDealiasedUnderlying(tp: Type): Type = tp.underlying.dealias match {
case next: SingletonType if tp ne next => chaseDealiasedUnderlying(next)
case _ => tp
}
val origin1 = chaseDealiasedUnderlying(tp1)
val origin2 = chaseDealiasedUnderlying(tp2)
((origin1 ne tp1) || (origin2 ne tp2)) && (origin1 =:= origin2)
}
private def isSameMethodType(mt1: MethodType, mt2: MethodType) = (
isSameTypes(mt1.paramTypes, mt2.paramTypes)
&& (mt1.resultType =:= mt2.resultType.substSym(mt2.params, mt1.params))
&& (mt1.isImplicit == mt2.isImplicit)
)
private def equalTypeParamsAndResult(tparams1: List[Symbol], res1: Type, tparams2: List[Symbol], res2: Type) = {
def subst(info: Type) = info.substSym(tparams2, tparams1)
// corresponds does not check length of two sequences before checking the predicate,
// but SubstMap assumes it has been checked (SI-2956)
( sameLength(tparams1, tparams2)
&& (tparams1 corresponds tparams2)((p1, p2) => methodHigherOrderTypeParamsSameVariance(p1, p2) && p1.info =:= subst(p2.info))
&& (res1 =:= subst(res2))
)
}
// SI-2066 This prevents overrides with incompatible variance in higher order type parameters.
private def methodHigherOrderTypeParamsSameVariance(sym1: Symbol, sym2: Symbol) = {
def ignoreVariance(sym: Symbol) = !(sym.isHigherOrderTypeParameter && sym.logicallyEnclosingMember.isMethod)
!settings.isScala211 || ignoreVariance(sym1) || ignoreVariance(sym2) || sym1.variance == sym2.variance
}
private def methodHigherOrderTypeParamsSubVariance(low: Symbol, high: Symbol) =
!settings.isScala211 || methodHigherOrderTypeParamsSameVariance(low, high) || low.variance.isInvariant
def isSameType2(tp1: Type, tp2: Type): Boolean = {
def retry(lhs: Type, rhs: Type) = ((lhs ne tp1) || (rhs ne tp2)) && isSameType(lhs, rhs)
/* Here we highlight those unfortunate type-like constructs which
* are hidden bundles of mutable state, cruising the type system picking
* up any type constraints naive enough to get into their hot rods.
*/
def mutateNonTypeConstructs(lhs: Type, rhs: Type) = lhs match {
case BoundedWildcardType(bounds) => bounds containsType rhs
case tv @ TypeVar(_, _) => tv.registerTypeEquality(rhs, typeVarLHS = lhs eq tp1)
case TypeRef(tv @ TypeVar(_, _), sym, _) => tv.registerTypeSelection(sym, rhs)
case _ => false
}
/* SingletonType receives this additional scrutiny because there are
* a variety of Types which must be treated as equivalent even if they
* arrive in different guises. For instance, object Foo in the following
* might appear in (at least) the four given below.
*
* package pkg { object Foo ; type Bar = Foo.type }
*
* ModuleClassTypeRef(pkg.type, Foo: ModuleClassSymbol, Nil)
* ThisType(Foo: ModuleClassSymbol)
* SingleType(pkg.type, Foo: ModuleSymbol)
* AliasTypeRef(NoPrefix, sym: AliasSymbol, Nil) where sym.info is one of the above
*/
def sameSingletonType = tp1 match {
case tp1: SingletonType => tp2 match {
case tp2: SingletonType => isSameSingletonType(tp1, tp2)
case _ => false
}
case _ => false
}
/* Those false cases certainly are ugly. There's a proposed SIP to deuglify it.
* https://docs.google.com/a/improving.org/document/d/1onPrzSqyDpHScc9PS_hpxJwa3FlPtthxw-bAuuEe8uA
*/
def sameTypeAndSameCaseClass = tp1 match {
case tp1: TypeRef => tp2 match { case tp2: TypeRef => isSameTypeRef(tp1, tp2) ; case _ => false }
case tp1: MethodType => tp2 match { case tp2: MethodType => isSameMethodType(tp1, tp2) ; case _ => false }
case RefinedType(ps1, decls1) => tp2 match { case RefinedType(ps2, decls2) => isSameTypes(ps1, ps2) && (decls1 isSameScope decls2) ; case _ => false }
case SingleType(pre1, sym1) => tp2 match { case SingleType(pre2, sym2) => equalSymsAndPrefixes(sym1, pre1, sym2, pre2) ; case _ => false }
case PolyType(ps1, res1) => tp2 match { case PolyType(ps2, res2) => equalTypeParamsAndResult(ps1, res1, ps2, res2) ; case _ => false }
case ExistentialType(qs1, res1) => tp2 match { case ExistentialType(qs2, res2) => equalTypeParamsAndResult(qs1, res1, qs2, res2) ; case _ => false }
case ThisType(sym1) => tp2 match { case ThisType(sym2) => sym1 eq sym2 ; case _ => false }
case ConstantType(c1) => tp2 match { case ConstantType(c2) => c1 == c2 ; case _ => false }
case NullaryMethodType(res1) => tp2 match { case NullaryMethodType(res2) => res1 =:= res2 ; case _ => false }
case TypeBounds(lo1, hi1) => tp2 match { case TypeBounds(lo2, hi2) => lo1 =:= lo2 && hi1 =:= hi2 ; case _ => false }
case _ => false
}
( sameTypeAndSameCaseClass
|| sameSingletonType
|| mutateNonTypeConstructs(tp1, tp2)
|| mutateNonTypeConstructs(tp2, tp1)
|| retry(normalizePlus(tp1), normalizePlus(tp2))
)
}
def isSubType(tp1: Type, tp2: Type, depth: Depth = Depth.AnyDepth): Boolean = try {
subsametypeRecursions += 1
//OPT cutdown on Function0 allocation
//was:
// undoLog undoUnless { // if subtype test fails, it should not affect constraints on typevars
// if (subsametypeRecursions >= LogPendingSubTypesThreshold) {
// val p = new SubTypePair(tp1, tp2)
// if (pendingSubTypes(p))
// false
// else
// try {
// pendingSubTypes += p
// isSubType2(tp1, tp2, depth)
// } finally {
// pendingSubTypes -= p
// }
// } else {
// isSubType2(tp1, tp2, depth)
// }
// }
val before = undoLog.log
var result = false
try result = { // if subtype test fails, it should not affect constraints on typevars
if (subsametypeRecursions >= LogPendingSubTypesThreshold) {
val p = new SubTypePair(tp1, tp2)
if (pendingSubTypes(p))
false // see neg/t8146-no-finitary*
else
try {
pendingSubTypes += p
isSubType1(tp1, tp2, depth)
} finally {
pendingSubTypes -= p
}
} else {
isSubType1(tp1, tp2, depth)
}
} finally if (!result) undoLog.undoTo(before)
result
} finally {
subsametypeRecursions -= 1
// XXX AM TODO: figure out when it is safe and needed to clear the log -- the commented approach below is too eager (it breaks #3281, #3866)
// it doesn't help to keep separate recursion counts for the three methods that now share it
// if (subsametypeRecursions == 0) undoLog.clear()
}
/** Check whether the subtype or type equivalence relationship
* between the argument is predetermined. Returns a tri-state
* value: True means the arguments are always sub/same types,
* False means they never are, and Unknown means the caller
* will have to figure things out.
*/
private def typeRelationPreCheck(tp1: Type, tp2: Type): TriState = {
def isTrue = (
(tp1 eq tp2)
|| isErrorOrWildcard(tp1)
|| isErrorOrWildcard(tp2)
|| (tp1 eq NoPrefix) && tp2.typeSymbol.isPackageClass // !! I do not see how this would be warranted by the spec
|| (tp2 eq NoPrefix) && tp1.typeSymbol.isPackageClass // !! I do not see how this would be warranted by the spec
)
// isFalse, assuming !isTrue
def isFalse = (
(tp1 eq NoType)
|| (tp2 eq NoType)
|| (tp1 eq NoPrefix)
|| (tp2 eq NoPrefix)
)
if (isTrue) TriState.True
else if (isFalse) TriState.False
else TriState.Unknown
}
private def isSubType1(tp1: Type, tp2: Type, depth: Depth): Boolean = typeRelationPreCheck(tp1, tp2) match {
case state if state.isKnown => state.booleanValue
case _ if typeHasAnnotations(tp1) || typeHasAnnotations(tp2) => annotationsConform(tp1, tp2) && (tp1.withoutAnnotations <:< tp2.withoutAnnotations)
case _ => isSubType2(tp1, tp2, depth)
}
private def isPolySubType(tp1: PolyType, tp2: PolyType): Boolean = {
val PolyType(tparams1, res1) = tp1
val PolyType(tparams2, res2) = tp2
sameLength(tparams1, tparams2) && {
// fast-path: polymorphic method type -- type params cannot be captured
val isMethod = tparams1.head.owner.isMethod
//@M for an example of why we need to generate fresh symbols otherwise, see neg/tcpoly_ticket2101.scala
val substitutes = if (isMethod) tparams1 else cloneSymbols(tparams1)
def sub1(tp: Type) = if (isMethod) tp else tp.substSym(tparams1, substitutes)
def sub2(tp: Type) = tp.substSym(tparams2, substitutes)
def cmp(p1: Symbol, p2: Symbol) = (
methodHigherOrderTypeParamsSubVariance(p2, p1)
&& sub2(p2.info) <:< sub1(p1.info)
)
(tparams1 corresponds tparams2)(cmp) && (sub1(res1) <:< sub2(res2))
}
}
// This is looking for situations such as B.this.x.type <:< B.super.x.type.
// If it's a ThisType on the lhs and a SuperType on the right, and they originate
// in the same class, and the 'x' in the ThisType has in its override chain
// the 'x' in the SuperType, then the types conform.
private def isThisAndSuperSubtype(tp1: Type, tp2: Type): Boolean = (tp1, tp2) match {
case (SingleType(ThisType(lpre), v1), SingleType(SuperType(ThisType(rpre), _), v2)) => (lpre eq rpre) && (v1.overrideChain contains v2)
case _ => false
}
// @assume tp1.isHigherKinded || tp2.isHigherKinded
def isHKSubType(tp1: Type, tp2: Type, depth: Depth): Boolean = {
def isSub(ntp1: Type, ntp2: Type) = (ntp1.withoutAnnotations, ntp2.withoutAnnotations) match {
case (TypeRef(_, AnyClass, _), _) => false // avoid some warnings when Nothing/Any are on the other side
case (_, TypeRef(_, NothingClass, _)) => false
case (pt1: PolyType, pt2: PolyType) => isPolySubType(pt1, pt2) // @assume both .isHigherKinded (both normalized to PolyType)
case (_: PolyType, MethodType(ps, _)) if ps exists (_.tpe.isWildcard) => false // don't warn on HasMethodMatching on right hand side
case _ => // @assume !(both .isHigherKinded) thus cannot be subtypes
def tp_s(tp: Type): String = f"$tp%-20s ${util.shortClassOfInstance(tp)}%s"
devWarning(s"HK subtype check on $tp1 and $tp2, but both don't normalize to polytypes:\\n tp1=${tp_s(ntp1)}\\n tp2=${tp_s(ntp2)}")
false
}
( (tp1.typeSymbol eq NothingClass) // @M Nothing is subtype of every well-kinded type
|| (tp2.typeSymbol eq AnyClass) // @M Any is supertype of every well-kinded type (@PP: is it? What about continuations plugin?)
|| isSub(tp1.normalize, tp2.normalize) && annotationsConform(tp1, tp2) // @M! normalize reduces higher-kinded case to PolyType's
)
}
/** Does type `tp1` conform to `tp2`? */
private def isSubType2(tp1: Type, tp2: Type, depth: Depth): Boolean = {
def retry(lhs: Type, rhs: Type) = ((lhs ne tp1) || (rhs ne tp2)) && isSubType(lhs, rhs, depth)
if (isSingleType(tp1) && isSingleType(tp2) || isConstantType(tp1) && isConstantType(tp2))
return (tp1 =:= tp2) || isThisAndSuperSubtype(tp1, tp2) || retry(tp1.underlying, tp2)
if (tp1.isHigherKinded || tp2.isHigherKinded)
return isHKSubType(tp1, tp2, depth)
/* First try, on the right:
* - unwrap Annotated types, BoundedWildcardTypes,
* - bind TypeVars on the right, if lhs is not Annotated nor BoundedWildcard
* - handle common cases for first-kind TypeRefs on both sides as a fast path.
*/
def firstTry = tp2 match {
// fast path: two typerefs, none of them HK
case tr2: TypeRef =>
tp1 match {
case tr1: TypeRef =>
// TODO - dedicate a method to TypeRef/TypeRef subtyping.
// These typerefs are pattern matched up and down far more
// than is necessary.
val sym1 = tr1.sym
val sym2 = tr2.sym
val pre1 = tr1.pre
val pre2 = tr2.pre
(((if (sym1 eq sym2) phase.erasedTypes || sym1.owner.hasPackageFlag || isSubType(pre1, pre2, depth)
else (sym1.name == sym2.name && !sym1.isModuleClass && !sym2.isModuleClass &&
(isUnifiable(pre1, pre2) ||
isSameSpecializedSkolem(sym1, sym2, pre1, pre2) ||
sym2.isAbstractType && isSubPre(pre1, pre2, sym2)))) &&
isSubArgs(tr1.args, tr2.args, sym1.typeParams, depth))
||
sym2.isClass && {
val base = tr1 baseType sym2
// During bootstrap, `base eq NoType` occurs about 2.5 times as often as `base ne NoType`.
// The extra check seems like a worthwhile optimization (about 2.5M useless calls to isSubtype saved during that run).
(base ne tr1) && (base ne NoType) && isSubType(base, tr2, depth)
}
||
thirdTryRef(tr1, tr2))
case _ =>
secondTry
}
case AnnotatedType(_, _) =>
isSubType(tp1.withoutAnnotations, tp2.withoutAnnotations, depth) &&
annotationsConform(tp1, tp2)
case BoundedWildcardType(bounds) =>
isSubType(tp1, bounds.hi, depth)
case tv2 @ TypeVar(_, constr2) =>
tp1 match {
case AnnotatedType(_, _) | BoundedWildcardType(_) =>
secondTry
case _ =>
tv2.registerBound(tp1, isLowerBound = true)
}
case _ =>
secondTry
}
/* Second try, on the left:
* - unwrap AnnotatedTypes, BoundedWildcardTypes,
* - bind typevars,
* - handle existential types by skolemization.
*/
def secondTry = tp1 match {
case AnnotatedType(_, _) =>
isSubType(tp1.withoutAnnotations, tp2.withoutAnnotations, depth) &&
annotationsConform(tp1, tp2)
case BoundedWildcardType(bounds) =>
isSubType(tp1.bounds.lo, tp2, depth)
case tv @ TypeVar(_,_) =>
tv.registerBound(tp2, isLowerBound = false)
case ExistentialType(_, _) =>
try {
skolemizationLevel += 1
isSubType(tp1.skolemizeExistential, tp2, depth)
} finally {
skolemizationLevel -= 1
}
case _ =>
thirdTry
}
def thirdTryRef(tp1: Type, tp2: TypeRef): Boolean = {
val sym2 = tp2.sym
def retry(lhs: Type, rhs: Type) = isSubType(lhs, rhs, depth)
def abstractTypeOnRight(lo: Type) = isDifferentTypeConstructor(tp2, lo) && retry(tp1, lo)
def classOnRight = (
if (isRawType(tp2)) retry(tp1, rawToExistential(tp2))
else if (sym2.isRefinementClass) retry(tp1, sym2.info)
else fourthTry
)
sym2 match {
case SingletonClass => tp1.isStable || fourthTry
case _: ClassSymbol => classOnRight
case _: TypeSymbol if sym2.isDeferred => abstractTypeOnRight(tp2.bounds.lo) || fourthTry
case _: TypeSymbol => retry(normalizePlus(tp1), normalizePlus(tp2))
case _ => fourthTry
}
}
/* Third try, on the right:
* - decompose refined types.
* - handle typerefs and existentials.
* - handle left+right method types, polytypes, typebounds
*/
def thirdTry = tp2 match {
case tr2: TypeRef =>
thirdTryRef(tp1, tr2)
case rt2: RefinedType =>
(rt2.parents forall (isSubType(tp1, _, depth))) &&
(rt2.decls forall (specializesSym(tp1, _, depth)))
case et2: ExistentialType =>
et2.withTypeVars(isSubType(tp1, _, depth), depth) || fourthTry
case mt2: MethodType =>
tp1 match {
case mt1 @ MethodType(params1, res1) =>
val params2 = mt2.params
val res2 = mt2.resultType
(sameLength(params1, params2) &&
mt1.isImplicit == mt2.isImplicit &&
matchingParams(params1, params2, mt1.isJava, mt2.isJava) &&
isSubType(res1.substSym(params1, params2), res2, depth))
// TODO: if mt1.params.isEmpty, consider NullaryMethodType?
case _ =>
false
}
case pt2 @ NullaryMethodType(_) =>
tp1 match {
// TODO: consider MethodType mt for which mt.params.isEmpty??
case pt1 @ NullaryMethodType(_) =>
isSubType(pt1.resultType, pt2.resultType, depth)
case _ =>
false
}
case TypeBounds(lo2, hi2) =>
tp1 match {
case TypeBounds(lo1, hi1) =>
isSubType(lo2, lo1, depth) && isSubType(hi1, hi2, depth)
case _ =>
false
}
case _ =>
fourthTry
}
/* Fourth try, on the left:
* - handle typerefs, refined types, and singleton types.
*/
def fourthTry = {
def retry(lhs: Type, rhs: Type) = ((tp1 ne lhs) || (tp2 ne rhs)) && isSubType(lhs, rhs, depth)
def abstractTypeOnLeft(hi: Type) = isDifferentTypeConstructor(tp1, hi) && retry(hi, tp2)
tp1 match {
case tr1 @ TypeRef(pre1, sym1, _) =>
def nullOnLeft = tp2 match {
case TypeRef(_, sym2, _) => sym1 isBottomSubClass sym2
case _ => isSingleType(tp2) && retry(tp1, tp2.widen)
}
sym1 match {
case NothingClass => true
case NullClass => nullOnLeft
case _: ClassSymbol if isRawType(tp1) => retry(normalizePlus(tp1), normalizePlus(tp2))
case _: ClassSymbol if sym1.isModuleClass => retry(normalizePlus(tp1), normalizePlus(tp2))
case _: ClassSymbol if sym1.isRefinementClass => retry(sym1.info, tp2)
case _: TypeSymbol if sym1.isDeferred => abstractTypeOnLeft(tp1.bounds.hi)
case _: TypeSymbol => retry(normalizePlus(tp1), normalizePlus(tp2))
case _ => false
}
case RefinedType(parents, _) => parents exists (retry(_, tp2))
case _: SingletonType => retry(tp1.underlying, tp2)
case _ => false
}
}
firstTry
}
def isWeakSubType(tp1: Type, tp2: Type) =
tp1.dealiasWiden match {
case TypeRef(_, sym1, _) if isNumericValueClass(sym1) =>
tp2.deconst.dealias match {
case TypeRef(_, sym2, _) if isNumericValueClass(sym2) =>
isNumericSubClass(sym1, sym2)
case tv2 @ TypeVar(_, _) =>
tv2.registerBound(tp1, isLowerBound = true, isNumericBound = true)
case _ =>
isSubType(tp1, tp2)
}
case tv1 @ TypeVar(_, _) =>
tp2.deconst.dealias match {
case TypeRef(_, sym2, _) if isNumericValueClass(sym2) =>
tv1.registerBound(tp2, isLowerBound = false, isNumericBound = true)
case _ =>
isSubType(tp1, tp2)
}
case _ =>
isSubType(tp1, tp2)
}
def isNumericSubType(tp1: Type, tp2: Type) = (
isNumericSubClass(primitiveBaseClass(tp1.dealiasWiden), primitiveBaseClass(tp2.dealias))
)
/** If the given type has a primitive class among its base classes,
* the symbol of that class. Otherwise, NoSymbol.
*/
private def primitiveBaseClass(tp: Type): Symbol = {
@tailrec def loop(bases: List[Symbol]): Symbol = bases match {
case Nil => NoSymbol
case x :: xs => if (isPrimitiveValueClass(x)) x else loop(xs)
}
loop(tp.baseClasses)
}
}
| felixmulder/scala | src/reflect/scala/reflect/internal/tpe/TypeComparers.scala | Scala | bsd-3-clause | 26,607 |
package almhirt.herder.herdingdogs
import scalaz.Validation.FlatMap._
import akka.actor._
import almhirt.common._
import almhirt.context._
import almhirt.akkax.ComponentId
import almhirt.herder._
import almhirt.problem.{ Severity }
import akka.actor.ActorRef
object RejectedCommandsHerdingDog {
import com.typesafe.config.Config
def props(implicit ctx: AlmhirtContext): AlmValidation[Props] = {
import almhirt.configuration._
val configPath = "almhirt.herder.herding-dogs.rejected-commands"
for {
section ← ctx.config.v[Config](configPath)
historySize ← section.v[Int]("history-size")
unwrapFailures ← section.v[Boolean]("unwrap-failures")
downgradeCommandRepresentation ← section.v[Boolean]("downgrade-command-representations")
} yield Props(new RejectedCommandsHerdingDog(historySize, unwrapFailures, downgradeCommandRepresentation))
}
val actorname = "rejected-commands-herdingdog"
}
private[almhirt] class RejectedCommandsHerdingDog(historySize: Int, unwrapFailures: Boolean, downgradeCommandRepresentation: Boolean)(implicit override val almhirtContext: AlmhirtContext) extends Actor with HasAlmhirtContext with ActorLogging {
import HerderMessages.CommandMessages._
implicit val executor = almhirtContext.futuresContext
implicit object GetSev extends GetsSeverity[RejectedCommandsEntry] {
def get(from: RejectedCommandsEntry): Severity = from._3
}
val history = new MutableBadThingsHistories[ComponentId, RejectedCommandsEntry](historySize)
def receiveRunning: Receive = {
case RejectedCommand(componentId, commandRepr, severity, cause, timestamp) ⇒
history.add(componentId, (if(downgradeCommandRepresentation) commandRepr.downgradeToIdAndType else commandRepr, if (unwrapFailures) cause.unwrap() else cause, severity, timestamp))
case ReportRejectedCommands ⇒
val missed = history.allReversed.sorted
sender() ! RejectedCommands(missed)
case ReportRejectedCommandsFor(componentId) ⇒
sender() ! ReportedRejectedCommandsFor(componentId, history getImmutableReversed componentId)
}
override def receive: Receive = receiveRunning
} | chridou/almhirt | almhirt-core/src/main/scala/almhirt/herder/herdingdogs/RejectedCommandsHerdingDog.scala | Scala | apache-2.0 | 2,177 |
package io.getquill
import io.getquill.context.Context
// Testing we are passing type params explicitly into AsyncContext, otherwise
// this file will fail to compile
trait BaseExtensions {
val context: Context[PostgresDialect, _]
}
trait AsyncExtensions extends BaseExtensions {
override val context: PostgresAsyncContext[_]
}
| getquill/quill | quill-async-postgres/src/test/scala/io/getquill/TypeParamExtensionTest.scala | Scala | apache-2.0 | 336 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package toplevel
import javax.swing.Icon
import com.intellij.extapi.psi.StubBasedPsiElementBase
import com.intellij.navigation.ItemPresentation
import com.intellij.openapi.editor.colors.TextAttributesKey
import com.intellij.psi._
import com.intellij.psi.search.{LocalSearchScope, SearchScope}
import com.intellij.psi.stubs.NamedStub
import com.intellij.psi.util.PsiTreeUtil
import org.jetbrains.plugins.scala.extensions.PsiElementExt
import org.jetbrains.plugins.scala.icons.Icons
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.ScCaseClause
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScClassParameter
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates.ScTemplateBody
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef._
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory.createIdentifier
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.synthetic.JavaIdentifier
import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaNamesUtil
import org.jetbrains.plugins.scala.macroAnnotations.{Cached, ModCount}
trait ScNamedElement extends ScalaPsiElement with PsiNameIdentifierOwner with NavigatablePsiElement {
@Cached(ModCount.anyScalaPsiModificationCount, this)
def name: String = {
this match {
case st: StubBasedPsiElementBase[_] => st.getGreenStub match {
case namedStub: NamedStub[_] => namedStub.getName
case _ => nameInner
}
case _ => nameInner
}
}
def name_=(it: String) {
setName(it)
}
def nameInner: String = nameId.getText
@Cached(ModCount.anyScalaPsiModificationCount, this)
def nameContext: PsiElement =
this.withParentsInFile
.find(ScalaPsiUtil.isNameContext)
.orNull
override def getTextOffset: Int = nameId.getTextRange.getStartOffset
override def getName: String = ScalaNamesUtil.toJavaName(name)
def nameId: PsiElement
override def getNameIdentifier: PsiIdentifier = if (nameId != null) new JavaIdentifier(nameId) else null
override def setName(name: String): PsiElement = {
val id = nameId.getNode
val parent = id.getTreeParent
val newId = createIdentifier(name)
parent.replaceChild(id, newId)
this
}
override def getPresentation: ItemPresentation = {
val clazz: ScTemplateDefinition =
nameContext.getParent match {
case _: ScTemplateBody | _: ScEarlyDefinitions =>
PsiTreeUtil.getParentOfType(this, classOf[ScTemplateDefinition], true)
case _ if this.isInstanceOf[ScClassParameter] =>
PsiTreeUtil.getParentOfType(this, classOf[ScTemplateDefinition], true)
case _ => null
}
val parentMember: ScMember = PsiTreeUtil.getParentOfType(this, classOf[ScMember], false)
new ItemPresentation {
def getPresentableText: String = name
def getTextAttributesKey: TextAttributesKey = null
def getLocationString: String = clazz match {
case _: ScTypeDefinition => "(" + clazz.qualifiedName + ")"
case _: ScNewTemplateDefinition => "(<anonymous>)"
case _ => ""
}
override def getIcon(open: Boolean): Icon = parentMember match {case mem: ScMember => mem.getIcon(0) case _ => null}
}
}
override def getIcon(flags: Int): Icon =
nameContext match {
case null => null
case _: ScCaseClause => Icons.PATTERN_VAL
case x => x.getIcon(flags)
}
abstract override def getUseScope: SearchScope = {
ScalaPsiUtil.intersectScopes(super.getUseScope, nameContext match {
case member: ScMember if member != this => Some(member.getUseScope)
case caseClause: ScCaseClause => Some(new LocalSearchScope(caseClause))
case elem @ (_: ScEnumerator | _: ScGenerator) =>
Option(PsiTreeUtil.getContextOfType(elem, true, classOf[ScForStatement]))
.orElse(Option(PsiTreeUtil.getContextOfType(elem, true, classOf[ScBlock], classOf[ScMember])))
.map(new LocalSearchScope(_))
case _ => None
})
}
} | loskutov/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/api/toplevel/ScNamedElement.scala | Scala | apache-2.0 | 4,151 |
package eventstore
package cluster
import org.joda.time.{ DateTimeZone, DateTime }
import org.joda.time.format.{ DateTimeFormat => JodaFormat, DateTimeFormatter }
import spray.json._
import scala.util.{ Failure, Try, Success }
object ClusterProtocol extends DefaultJsonProtocol {
implicit object UuidFormat extends JsonFormat[Uuid] {
def write(x: Uuid) = JsString(x.toString)
def read(json: JsValue) = json match {
case JsString(x) => x.uuid
case _ => deserializationError("Uuid expected")
}
}
implicit object DateTimeFormat extends JsonFormat[DateTime] {
val formats = List(
JodaFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"),
JodaFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSSSSS'Z'"),
JodaFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss'Z'"),
JodaFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSSS'Z'"),
JodaFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSSS'Z'"),
JodaFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'"),
JodaFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.SS'Z'"),
JodaFormat.forPattern("yyyy-MM-dd'T'HH:mm:ss.S'Z'"))
def write(x: DateTime) = JsString(x.toString(formats.head))
def read(json: JsValue) = json match {
case JsString(x) =>
def loop(h: DateTimeFormatter, t: List[DateTimeFormatter]): DateTime = Try(h.parseDateTime(x)) match {
case Success(x) => x.withZoneRetainFields(DateTimeZone.UTC)
case Failure(x) => if (t.nonEmpty) loop(t.head, t.tail) else throw x
}
loop(formats.head, formats.tail)
case _ => deserializationError("Date expected")
}
}
implicit object NodeStateFormat extends JsonFormat[NodeState] {
def write(obj: NodeState) = JsString(obj.toString)
def read(json: JsValue) = json match {
case JsString(x) => NodeState(x)
case _ => deserializationError("NodeState expected")
}
}
implicit object MemberInfoFormat extends JsonFormat[MemberInfo] {
private val MappingFormat = jsonFormat21(Mapping)
def read(json: JsValue) = {
val m = MappingFormat.read(json)
MemberInfo(
instanceId = m.instanceId,
timestamp = m.timeStamp,
state = m.state,
isAlive = m.isAlive,
internalTcp = m.internalTcpIp :: m.internalTcpPort,
externalTcp = m.externalTcpIp :: m.externalTcpPort,
internalSecureTcp = m.internalTcpIp :: m.internalSecureTcpPort,
externalSecureTcp = m.externalTcpIp :: m.externalSecureTcpPort,
internalHttp = m.internalHttpIp :: m.internalHttpPort,
externalHttp = m.externalHttpIp :: m.externalHttpPort,
lastCommitPosition = m.lastCommitPosition,
writerCheckpoint = m.writerCheckpoint,
chaserCheckpoint = m.chaserCheckpoint,
epochPosition = m.epochPosition,
epochNumber = m.epochNumber,
epochId = m.epochId,
nodePriority = m.nodePriority)
}
def write(x: MemberInfo) = {
val m = Mapping(
instanceId = x.instanceId,
timeStamp = x.timestamp,
state = x.state,
isAlive = x.isAlive,
internalTcpIp = x.internalTcp.getHostString,
internalTcpPort = x.internalTcp.getPort,
internalSecureTcpPort = x.internalSecureTcp.getPort,
externalTcpIp = x.externalTcp.getHostString,
externalTcpPort = x.externalTcp.getPort,
externalSecureTcpPort = x.externalSecureTcp.getPort,
internalHttpIp = x.internalHttp.getHostString,
internalHttpPort = x.internalHttp.getPort,
externalHttpIp = x.externalHttp.getHostString,
externalHttpPort = x.externalHttp.getPort,
lastCommitPosition = x.lastCommitPosition,
writerCheckpoint = x.writerCheckpoint,
chaserCheckpoint = x.chaserCheckpoint,
epochPosition = x.epochPosition,
epochNumber = x.epochNumber,
epochId = x.epochId,
nodePriority = x.nodePriority)
MappingFormat.write(m)
}
case class Mapping(
instanceId: Uuid,
timeStamp: DateTime,
state: NodeState,
isAlive: Boolean,
internalTcpIp: String,
internalTcpPort: Int,
internalSecureTcpPort: Int,
externalTcpIp: String,
externalTcpPort: Int,
externalSecureTcpPort: Int,
internalHttpIp: String,
internalHttpPort: Int,
externalHttpIp: String,
externalHttpPort: Int,
lastCommitPosition: Long,
writerCheckpoint: Long,
chaserCheckpoint: Long,
epochPosition: Long,
epochNumber: Int,
epochId: Uuid,
nodePriority: Int)
}
implicit object ClusterInfoFormat extends RootJsonFormat[ClusterInfo] {
private val MappingFormat = jsonFormat3(Mapping.apply)
def read(json: JsValue) = {
val m = MappingFormat.read(json)
ClusterInfo(
serverAddress = m.serverIp :: m.serverPort,
members = m.members)
}
def write(x: ClusterInfo) = {
val m = Mapping(
members = x.members,
serverIp = x.serverAddress.getHostString,
serverPort = x.serverAddress.getPort)
MappingFormat.write(m)
}
private case class Mapping(members: List[MemberInfo], serverIp: String, serverPort: Int)
}
} | pawelkaczor/EventStore.JVM | src/main/scala/eventstore/cluster/ClusterProtocol.scala | Scala | bsd-3-clause | 5,222 |
package view
import java.util.Date
import org.specs2.mutable._
import service.RequestCache
import model.Account
import service.SystemSettingsService.SystemSettings
import twirl.api.Html
class AvatarImageProviderSpec extends Specification {
implicit val context = app.Context("", None, "", null)
"getAvatarImageHtml" should {
"show Gravatar image for no image account if gravatar integration is enabled" in {
val provider = new AvatarImageProviderImpl(Some(createAccount(None)), createSystemSettings(true))
provider.toHtml("user", 20).toString mustEqual
"<img src=\\"https://www.gravatar.com/avatar/d41d8cd98f00b204e9800998ecf8427e?s=20\\" class=\\"avatar\\" style=\\"width: 20px; height: 20px;\\" />"
}
"show uploaded image even if gravatar integration is enabled" in {
val provider = new AvatarImageProviderImpl(Some(createAccount(Some("icon.png"))), createSystemSettings(true))
provider.toHtml("user", 20).toString mustEqual
"<img src=\\"/user/_avatar\\" class=\\"avatar\\" style=\\"width: 20px; height: 20px;\\" />"
}
"show local image for no image account if gravatar integration is disabled" in {
val provider = new AvatarImageProviderImpl(Some(createAccount(None)), createSystemSettings(false))
provider.toHtml("user", 20).toString mustEqual
"<img src=\\"/user/_avatar\\" class=\\"avatar\\" style=\\"width: 20px; height: 20px;\\" />"
}
"show Gravatar image for specified mail address if gravatar integration is enabled" in {
val provider = new AvatarImageProviderImpl(None, createSystemSettings(true))
provider.toHtml("user", 20, "[email protected]").toString mustEqual
"<img src=\\"https://www.gravatar.com/avatar/4712f9b0e63f56ad952ad387eaa23b9c?s=20\\" class=\\"avatar\\" style=\\"width: 20px; height: 20px;\\" />"
}
"show unknown image for unknown user if gravatar integration is enabled" in {
val provider = new AvatarImageProviderImpl(None, createSystemSettings(true))
provider.toHtml("user", 20).toString mustEqual
"<img src=\\"/_unknown/_avatar\\" class=\\"avatar\\" style=\\"width: 20px; height: 20px;\\" />"
}
"show unknown image for specified mail address if gravatar integration is disabled" in {
val provider = new AvatarImageProviderImpl(None, createSystemSettings(false))
provider.toHtml("user", 20, "[email protected]").toString mustEqual
"<img src=\\"/_unknown/_avatar\\" class=\\"avatar\\" style=\\"width: 20px; height: 20px;\\" />"
}
"add tooltip if it's enabled" in {
val provider = new AvatarImageProviderImpl(None, createSystemSettings(false))
provider.toHtml("user", 20, "[email protected]", true).toString mustEqual
"<img src=\\"/_unknown/_avatar\\" class=\\"avatar\\" style=\\"width: 20px; height: 20px;\\" data-toggle=\\"tooltip\\" title=\\"user\\"/>"
}
}
private def createAccount(image: Option[String]) =
Account(
userName = "user",
fullName = "user@localhost",
mailAddress = "",
password = "",
isAdmin = false,
url = None,
registeredDate = new Date(),
updatedDate = new Date(),
lastLoginDate = None,
image = image,
isGroupAccount = false,
isRemoved = false)
private def createSystemSettings(useGravatar: Boolean) =
SystemSettings(
allowAccountRegistration = false,
gravatar = useGravatar,
notification = false,
smtp = None,
ldapAuthentication = false,
ldap = None)
/**
* Adapter to test AvatarImageProviderImpl.
*/
class AvatarImageProviderImpl(account: Option[Account], settings: SystemSettings)
extends AvatarImageProvider with RequestCache {
def toHtml(userName: String, size: Int, mailAddress: String = "", tooltip: Boolean = false)
(implicit context: app.Context): Html = getAvatarImageHtml(userName, size, mailAddress, tooltip)
override def getAccountByMailAddress(mailAddress: String)(implicit context: app.Context): Option[Account] = account
override def getAccountByUserName(userName: String)(implicit context: app.Context): Option[Account] = account
override def getSystemSettings()(implicit context: app.Context): SystemSettings = settings
}
}
| loveshell/gitbucket | src/test/scala/view/AvatarImageProviderSpec.scala | Scala | apache-2.0 | 4,358 |
package object scalaFP extends Base
| aaronvargo/scalaFP | base/src/main/scala/package.scala | Scala | bsd-3-clause | 36 |
package org.jetbrains.plugins.cbt.settings
import com.intellij.openapi.externalSystem.service.settings.{AbstractExternalProjectSettingsControl, AbstractExternalSystemConfigurable}
import com.intellij.openapi.externalSystem.util.{ExternalSystemSettingsControl, PaintAwarePanel}
import com.intellij.openapi.project.Project
import org.jetbrains.plugins.cbt.project.CbtProjectSystem
import org.jetbrains.plugins.cbt.project.settings.{CbtProjectSettings, CbtProjectSettingsListener, CbtSystemSettings, CbtSystemSettingsControl}
class CbtExternalSystemConfigurable(project: Project)
extends AbstractExternalSystemConfigurable[CbtProjectSettings, CbtProjectSettingsListener,
CbtSystemSettings](project, CbtProjectSystem.Id) {
override def newProjectSettings(): CbtProjectSettings = new CbtProjectSettings
override def createSystemSettingsControl(settings: CbtSystemSettings): CbtSystemSettingsControl =
new CbtSystemSettingsControl(settings)
override def createProjectSettingsControl(settings: CbtProjectSettings): AbstractExternalProjectSettingsControl[CbtProjectSettings] =
new AbstractExternalProjectSettingsControl[CbtProjectSettings](settings) {
override def applyExtraSettings(settings: CbtProjectSettings): Unit = {}
override def resetExtraSettings(isDefaultModuleCreation: Boolean): Unit = {}
override def fillExtraControls(content: PaintAwarePanel, indentLevel: Int): Unit = {}
override def isExtraSettingModified: Boolean = false
override def validate(settings: CbtProjectSettings): Boolean = true
}
override def getId: String = "cbt.project.settings.configurable"
}
| triplequote/intellij-scala | cbt/src/org/jetbrains/plugins/cbt/settings/CbtExternalSystemConfigurable.scala | Scala | apache-2.0 | 1,637 |
package com.twitter.example
import java.net.InetSocketAddress
import org.apache.thrift.protocol.TBinaryProtocol
import com.twitter.finagle.builder.ClientBuilder
import com.twitter.finagle.Service
import com.twitter.finagle.thrift.{ThriftClientFramedCodec, ThriftClientRequest}
import thrift.EchoService
object EchoClient {
def main(args: Array[String]) {
// Create a raw Thrift client service. This implements the
// ThriftClientRequest => Future[Array[Byte]] interface.
val service: Service[ThriftClientRequest, Array[Byte]] = ClientBuilder()
.hosts(new InetSocketAddress(EchoServer.port))
.codec(ThriftClientFramedCodec())
.hostConnectionLimit(1)
.build()
// Wrap the raw Thrift service in a Client decorator. The client
// provides a convenient procedural interface for accessing the Thrift
// server.
val client = new EchoService.ServiceToClient(service, new TBinaryProtocol.Factory())
client.echo("hello") onSuccess { response =>
println("Received response: " + response)
} ensure {
service.release()
}
}
}
| twitter/iago | examples/echo/src/main/scala/com/twitter/example/EchoClient.scala | Scala | apache-2.0 | 1,098 |
package com.github.j5ik2o.wicket.pages
import org.apache.wicket.ajax.AjaxRequestTarget
import org.apache.wicket.ajax.markup.html.AjaxFallbackLink
import org.apache.wicket.markup.html.basic.Label
import org.apache.wicket.model.Model
class AjaxCounterPage extends BasePage {
val model = new Model[java.lang.Integer]() {
private var counter = 0
override def getObject(): java.lang.Integer = {
counter += 1
counter
}
}
val label = new Label("counter", model)
label.setOutputMarkupId(true)
add(new AjaxFallbackLink("link") {
override def onClick(target: AjaxRequestTarget): Unit = {
if (target != null) {
target.add(label)
}
// add the components that need to be updated to
// the target
}
})
add(label)
}
| j5ik2o/scala-wicket-sample | application/src/main/scala/com/github/j5ik2o/wicket/pages/AjaxCounterPage.scala | Scala | mit | 787 |
package fpgatidbits.dma
import Chisel._
import fpgatidbits.ocm._
class RespDeinterleaverIF(numPipes: Int, p: MemReqParams) extends Bundle {
// interleaved responses in
val rspIn = Decoupled(new GenericMemoryResponse(p)).flip
// deinterleaved responses out
val rspOut = Vec.fill(numPipes) {Decoupled(new GenericMemoryResponse(p))}
// number of decode errors (ID width no matching pipe)
val decodeErrors = UInt(OUTPUT, width = 32)
override def clone = {
new RespDeinterleaverIF(numPipes, p).asInstanceOf[this.type]
}
}
class QueuedDeinterleaver(numPipes: Int, p: MemReqParams, n: Int,
routeFxn: GenericMemoryResponse => UInt = {x: GenericMemoryResponse => x.channelID}
) extends Module {
val io = new RespDeinterleaverIF(numPipes,p)
val deint = Module(new RespDeinterleaver(numPipes, p, routeFxn)).io
deint.rspIn <> io.rspIn
io.decodeErrors := deint.decodeErrors
for(i <- 0 until numPipes) {
val rspQ = Module(new FPGAQueue(new GenericMemoryResponse(p), n)).io
rspQ.deq <> io.rspOut(i)
rspQ.enq <> deint.rspOut(i)
}
}
class RespDeinterleaver(numPipes: Int, p: MemReqParams,
routeFxn: GenericMemoryResponse => UInt = {x: GenericMemoryResponse => x.channelID}
) extends Module {
val io = new RespDeinterleaverIF(numPipes, p)
val regDecodeErrors = Reg(init = UInt(0, 32))
// TODO the current implementation is likely to cause timing problems
// due to high-fanout signals and combinational paths
// - to avoid high-fanout signals: implement decoding as e.g shiftreg
// - to avoid combinational paths, pipeline the deinterleaver
for(i <- 0 until numPipes) {
io.rspOut(i).bits := io.rspIn.bits
io.rspOut(i).valid := Bool(false)
}
io.rspIn.ready := Bool(false)
io.decodeErrors := regDecodeErrors
val destPipe = routeFxn(io.rspIn.bits)
val invalidChannel = (destPipe >= UInt(numPipes))
val canProceed = io.rspIn.valid && io.rspOut(destPipe).ready
when (invalidChannel) {
// do not let the entire pipe stall because head of line has invalid dest
// increment error counter and move on
regDecodeErrors := regDecodeErrors + UInt(1)
io.rspIn.ready := Bool(true)
printf("RespDeinterleaver decode error! chanID = %d dest = %d \\n",
io.rspIn.bits.channelID, destPipe
)
}
.elsewhen (canProceed) {
io.rspIn.ready := Bool(true)
io.rspOut(destPipe).valid := Bool(true)
}
}
| maltanar/fpga-tidbits | src/main/scala/fpgatidbits/dma/RespDeinterleaver.scala | Scala | bsd-2-clause | 2,393 |
/*
Copyright 2009 David Hall, Daniel Ramage
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package scalanlp.serialization;
import org.scalatest.FunSuite;
import org.scalatest.junit.JUnitRunner;
import org.junit.runner.RunWith;
package typedexamples {
case class MyCaseClass1(b : String);
object MyCaseClass1 extends TypedCompanion1[String,MyCaseClass1] {
prepare();
}
case class MyCaseClass2(a : Int, b : String)
object MyCaseClass2 extends TypedCompanion2[Int,String,MyCaseClass2] {
prepare();
}
case class MyCompoundCaseClass1(a : (Int,Double));
object MyCompoundCaseClass1 extends TypedCompanion1[(Int,Double),MyCompoundCaseClass1] {
prepare();
}
case class MyCompoundCaseClass2(a : Int, b : MyCaseClass2)
object MyCompoundCaseClass2 extends TypedCompanion2[Int,MyCaseClass2,MyCompoundCaseClass2] {
prepare();
}
case class MyCompoundCaseClass3(a : List[Double]);
object MyCompoundCaseClass3 extends TypedCompanion1[List[Double],MyCompoundCaseClass3] {
prepare();
}
trait SubtypedRoot;
object SubtypedRoot extends SubtypedCompanion[SubtypedRoot] {
prepare();
register[SubtypedOption1]("SubtypedOption1");
register[SubtypedOption2]("SubtypedOption2");
}
case class SubtypedOption1(value : Int) extends SubtypedRoot;
object SubtypedOption1 extends TypedCompanion1[Int,SubtypedOption1] {
prepare();
}
case class SubtypedOption2(str : String) extends SubtypedRoot;
object SubtypedOption2 extends TypedCompanion1[String,SubtypedOption2] {
prepare();
}
}
@RunWith(classOf[JUnitRunner])
class TypedCaseCompanionTest extends FunSuite {
import typedexamples._;
def loop[T:TextSerialization.ReadWritable](value : T) = {
val string = TextSerialization.toString(value);
val parsed = TextSerialization.fromString[T](string);
assert(parsed === value, "Failure on "+string);
}
test("Check case class to and from string") {
loop(MyCaseClass1("hi"));
loop(MyCaseClass2(1,"hi"));
loop(MyCompoundCaseClass1((1,2.0)));
loop(MyCompoundCaseClass2(1,MyCaseClass2(2,"yo")));
loop(MyCompoundCaseClass3(List(1.0,2.0)));
}
test("Check SubtypedCompanion") {
val o1 = SubtypedOption1(-1);
val o2 = SubtypedOption2("hi");
loop(o1);
loop(o2);
assert(TextSerialization.fromString[SubtypedRoot](TextSerialization.toString(o1)) === o1);
assert(TextSerialization.fromString[SubtypedRoot](TextSerialization.toString(o2)) === o2);
}
}
| MLnick/scalanlp-core | data/src/test/scala/scalanlp/serialization/TypedCompanionTest.scala | Scala | apache-2.0 | 2,960 |
package circlepuzzles.geometry
import circlepuzzles.math.FixedPoint
/**
* Angles that can specify rotations.
* @param radians Value of the angle in radians. Must be in the range [0,2*pi).
*/
class Angle(val radians: FixedPoint) {
/**
* The sine of this angle.
* @return `sin(radians)`.
*/
def sin: FixedPoint = FixedPoint.sin(radians)
/**
* The cosine of this angle.
* @return `cos(radians)`.
*/
def cos: FixedPoint = FixedPoint.cos(radians)
/**
* Angle addition.
* @param that Angle to add
* @return Sum of the angles `this + that`.
*/
def +(that: Angle): Angle = {
new Angle(FixedPoint.mod2Pi(radians + that.radians))
}
/**
* The explement of this angle, i.e. the angle that can be added to this to give a full circle.
* @return Explement of this angle.
*/
def explement: Angle = {
new Angle(FixedPoint.TwoPi - radians)
}
/**
* The supplement of this angle, i.e. the angle that can be added to this to give a half circle.
* @return Supplement of this angle.
*/
def supplement: Angle = {
new Angle(FixedPoint.mod2Pi(FixedPoint.Pi - radians))
}
/**
* Memoize this angle.
* @return A memoized angle equivalent to `this`.
*/
def memoized: MemoizedAngle = {
new MemoizedAngle(radians)
}
/**
* Tests if `that` is an equal `Angle`. Two angles are equal if and only if their values in radians are equal.
* @param that Object to compare to.
* @return True if and only if `this` and `that` are equal angles.
*/
override def equals(that: Any): Boolean = {
that match {
case thatAngle: Angle => radians == thatAngle.radians
case _ => false
}
}
/**
* Returns a hash code that satisfies the [[Object]] contract, which is to say that the hash depends only on
* `radians`.
* @return A hash code depending only on `radians`.
*/
override def hashCode: Int = radians.hashCode
/**
* Returns a decimal representation of this angle's measure, in radians.
* @return A string representation of this angle.
*/
override def toString: String = s"Angle($radians)"
}
/**
* Angles that memoize the corresponding sine and cosine values.
* @param radians Value of the angle in radians. Must be in the range `[0,2*pi)`.
*/
class MemoizedAngle(radians: FixedPoint) extends Angle(radians) {
/**
* The sine of this angle. Memoized.
*/
override val sin = super.sin
/**
* The cosine of this angle. Memoized.
*/
override val cos = super.cos
override def memoized: MemoizedAngle = this
}
| wkretschmer/CirclePuzzles | src/main/scala/circlepuzzles/geometry/Angle.scala | Scala | mit | 2,603 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import org.apache.spark.sql.SaveMode
import org.apache.spark.sql.catalyst.{FunctionIdentifier, TableIdentifier}
import org.apache.spark.sql.catalyst.analysis.{AnalysisTest, UnresolvedAlias, UnresolvedAttribute, UnresolvedRelation, UnresolvedStar}
import org.apache.spark.sql.catalyst.catalog.{BucketSpec, CatalogStorageFormat, CatalogTable, CatalogTableType}
import org.apache.spark.sql.catalyst.expressions.{Ascending, Concat, SortOrder}
import org.apache.spark.sql.catalyst.parser.ParseException
import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Project, RepartitionByExpression, Sort}
import org.apache.spark.sql.execution.command._
import org.apache.spark.sql.execution.datasources.{CreateTable, RefreshResource}
import org.apache.spark.sql.internal.{HiveSerDe, SQLConf}
import org.apache.spark.sql.types.{IntegerType, LongType, StringType, StructType}
/**
* Parser test cases for rules defined in [[SparkSqlParser]].
*
* See [[org.apache.spark.sql.catalyst.parser.PlanParserSuite]] for rules
* defined in the Catalyst module.
*/
class SparkSqlParserSuite extends AnalysisTest {
val newConf = new SQLConf
private lazy val parser = new SparkSqlParser(newConf)
/**
* Normalizes plans:
* - CreateTable the createTime in tableDesc will replaced by -1L.
*/
override def normalizePlan(plan: LogicalPlan): LogicalPlan = {
plan match {
case CreateTable(tableDesc, mode, query) =>
val newTableDesc = tableDesc.copy(createTime = -1L)
CreateTable(newTableDesc, mode, query)
case _ => plan // Don't transform
}
}
private def assertEqual(sqlCommand: String, plan: LogicalPlan): Unit = {
val normalized1 = normalizePlan(parser.parsePlan(sqlCommand))
val normalized2 = normalizePlan(plan)
comparePlans(normalized1, normalized2)
}
private def intercept(sqlCommand: String, messages: String*): Unit = {
val e = intercept[ParseException](parser.parsePlan(sqlCommand))
messages.foreach { message =>
assert(e.message.contains(message))
}
}
test("refresh resource") {
assertEqual("REFRESH prefix_path", RefreshResource("prefix_path"))
assertEqual("REFRESH /", RefreshResource("/"))
assertEqual("REFRESH /path///a", RefreshResource("/path///a"))
assertEqual("REFRESH pat1h/112/_1a", RefreshResource("pat1h/112/_1a"))
assertEqual("REFRESH pat1h/112/_1a/a-1", RefreshResource("pat1h/112/_1a/a-1"))
assertEqual("REFRESH path-with-dash", RefreshResource("path-with-dash"))
assertEqual("REFRESH \\'path with space\\'", RefreshResource("path with space"))
assertEqual("REFRESH \\"path with space 2\\"", RefreshResource("path with space 2"))
intercept("REFRESH a b", "REFRESH statements cannot contain")
intercept("REFRESH a\\tb", "REFRESH statements cannot contain")
intercept("REFRESH a\\nb", "REFRESH statements cannot contain")
intercept("REFRESH a\\rb", "REFRESH statements cannot contain")
intercept("REFRESH a\\r\\nb", "REFRESH statements cannot contain")
intercept("REFRESH @ $a$", "REFRESH statements cannot contain")
intercept("REFRESH ", "Resource paths cannot be empty in REFRESH statements")
intercept("REFRESH", "Resource paths cannot be empty in REFRESH statements")
}
test("show functions") {
assertEqual("show functions", ShowFunctionsCommand(None, None, true, true))
assertEqual("show all functions", ShowFunctionsCommand(None, None, true, true))
assertEqual("show user functions", ShowFunctionsCommand(None, None, true, false))
assertEqual("show system functions", ShowFunctionsCommand(None, None, false, true))
intercept("show special functions", "SHOW special FUNCTIONS")
assertEqual("show functions foo",
ShowFunctionsCommand(None, Some("foo"), true, true))
assertEqual("show functions foo.bar",
ShowFunctionsCommand(Some("foo"), Some("bar"), true, true))
assertEqual("show functions 'foo\\\\\\\\.*'",
ShowFunctionsCommand(None, Some("foo\\\\.*"), true, true))
intercept("show functions foo.bar.baz", "Unsupported function name")
}
test("describe function") {
assertEqual("describe function bar",
DescribeFunctionCommand(FunctionIdentifier("bar", database = None), isExtended = false))
assertEqual("describe function extended bar",
DescribeFunctionCommand(FunctionIdentifier("bar", database = None), isExtended = true))
assertEqual("describe function foo.bar",
DescribeFunctionCommand(
FunctionIdentifier("bar", database = Some("foo")), isExtended = false))
assertEqual("describe function extended f.bar",
DescribeFunctionCommand(FunctionIdentifier("bar", database = Some("f")), isExtended = true))
}
private def createTableUsing(
table: String,
database: Option[String] = None,
tableType: CatalogTableType = CatalogTableType.MANAGED,
storage: CatalogStorageFormat = CatalogStorageFormat.empty,
schema: StructType = new StructType,
provider: Option[String] = Some("parquet"),
partitionColumnNames: Seq[String] = Seq.empty,
bucketSpec: Option[BucketSpec] = None,
mode: SaveMode = SaveMode.ErrorIfExists,
query: Option[LogicalPlan] = None): CreateTable = {
CreateTable(
CatalogTable(
identifier = TableIdentifier(table, database),
tableType = tableType,
storage = storage,
schema = schema,
provider = provider,
partitionColumnNames = partitionColumnNames,
bucketSpec = bucketSpec
), mode, query
)
}
private def createTable(
table: String,
database: Option[String] = None,
tableType: CatalogTableType = CatalogTableType.MANAGED,
storage: CatalogStorageFormat = CatalogStorageFormat.empty.copy(
inputFormat = HiveSerDe.sourceToSerDe("textfile").get.inputFormat,
outputFormat = HiveSerDe.sourceToSerDe("textfile").get.outputFormat,
serde = Some("org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe")),
schema: StructType = new StructType,
provider: Option[String] = Some("hive"),
partitionColumnNames: Seq[String] = Seq.empty,
comment: Option[String] = None,
mode: SaveMode = SaveMode.ErrorIfExists,
query: Option[LogicalPlan] = None): CreateTable = {
CreateTable(
CatalogTable(
identifier = TableIdentifier(table, database),
tableType = tableType,
storage = storage,
schema = schema,
provider = provider,
partitionColumnNames = partitionColumnNames,
comment = comment
), mode, query
)
}
test("create table - schema") {
assertEqual("CREATE TABLE my_tab(a INT COMMENT 'test', b STRING)",
createTable(
table = "my_tab",
schema = (new StructType)
.add("a", IntegerType, nullable = true, "test")
.add("b", StringType)
)
)
assertEqual("CREATE TABLE my_tab(a INT COMMENT 'test', b STRING) " +
"PARTITIONED BY (c INT, d STRING COMMENT 'test2')",
createTable(
table = "my_tab",
schema = (new StructType)
.add("a", IntegerType, nullable = true, "test")
.add("b", StringType)
.add("c", IntegerType)
.add("d", StringType, nullable = true, "test2"),
partitionColumnNames = Seq("c", "d")
)
)
assertEqual("CREATE TABLE my_tab(id BIGINT, nested STRUCT<col1: STRING,col2: INT>)",
createTable(
table = "my_tab",
schema = (new StructType)
.add("id", LongType)
.add("nested", (new StructType)
.add("col1", StringType)
.add("col2", IntegerType)
)
)
)
// Partitioned by a StructType should be accepted by `SparkSqlParser` but will fail an analyze
// rule in `AnalyzeCreateTable`.
assertEqual("CREATE TABLE my_tab(a INT COMMENT 'test', b STRING) " +
"PARTITIONED BY (nested STRUCT<col1: STRING,col2: INT>)",
createTable(
table = "my_tab",
schema = (new StructType)
.add("a", IntegerType, nullable = true, "test")
.add("b", StringType)
.add("nested", (new StructType)
.add("col1", StringType)
.add("col2", IntegerType)
),
partitionColumnNames = Seq("nested")
)
)
intercept("CREATE TABLE my_tab(a: INT COMMENT 'test', b: STRING)",
"no viable alternative at input")
}
test("SPARK-17328 Fix NPE with EXPLAIN DESCRIBE TABLE") {
assertEqual("describe t",
DescribeTableCommand(TableIdentifier("t"), Map.empty, isExtended = false))
assertEqual("describe table t",
DescribeTableCommand(TableIdentifier("t"), Map.empty, isExtended = false))
assertEqual("describe table extended t",
DescribeTableCommand(TableIdentifier("t"), Map.empty, isExtended = true))
assertEqual("describe table formatted t",
DescribeTableCommand(TableIdentifier("t"), Map.empty, isExtended = true))
}
test("describe query") {
val query = "SELECT * FROM t"
assertEqual("DESCRIBE QUERY " + query, DescribeQueryCommand(query, parser.parsePlan(query)))
assertEqual("DESCRIBE " + query, DescribeQueryCommand(query, parser.parsePlan(query)))
}
test("describe table column") {
assertEqual("DESCRIBE t col",
DescribeColumnCommand(
TableIdentifier("t"), Seq("col"), isExtended = false))
assertEqual("DESCRIBE t `abc.xyz`",
DescribeColumnCommand(
TableIdentifier("t"), Seq("abc.xyz"), isExtended = false))
assertEqual("DESCRIBE t abc.xyz",
DescribeColumnCommand(
TableIdentifier("t"), Seq("abc", "xyz"), isExtended = false))
assertEqual("DESCRIBE t `a.b`.`x.y`",
DescribeColumnCommand(
TableIdentifier("t"), Seq("a.b", "x.y"), isExtended = false))
assertEqual("DESCRIBE TABLE t col",
DescribeColumnCommand(
TableIdentifier("t"), Seq("col"), isExtended = false))
assertEqual("DESCRIBE TABLE EXTENDED t col",
DescribeColumnCommand(
TableIdentifier("t"), Seq("col"), isExtended = true))
assertEqual("DESCRIBE TABLE FORMATTED t col",
DescribeColumnCommand(
TableIdentifier("t"), Seq("col"), isExtended = true))
intercept("DESCRIBE TABLE t PARTITION (ds='1970-01-01') col",
"DESC TABLE COLUMN for a specific partition is not supported")
}
test("analyze table statistics") {
assertEqual("analyze table t compute statistics",
AnalyzeTableCommand(TableIdentifier("t"), noscan = false))
assertEqual("analyze table t compute statistics noscan",
AnalyzeTableCommand(TableIdentifier("t"), noscan = true))
assertEqual("analyze table t partition (a) compute statistics nOscAn",
AnalyzePartitionCommand(TableIdentifier("t"), Map("a" -> None), noscan = true))
// Partitions specified
assertEqual("ANALYZE TABLE t PARTITION(ds='2008-04-09', hr=11) COMPUTE STATISTICS",
AnalyzePartitionCommand(TableIdentifier("t"), noscan = false,
partitionSpec = Map("ds" -> Some("2008-04-09"), "hr" -> Some("11"))))
assertEqual("ANALYZE TABLE t PARTITION(ds='2008-04-09', hr=11) COMPUTE STATISTICS noscan",
AnalyzePartitionCommand(TableIdentifier("t"), noscan = true,
partitionSpec = Map("ds" -> Some("2008-04-09"), "hr" -> Some("11"))))
assertEqual("ANALYZE TABLE t PARTITION(ds='2008-04-09') COMPUTE STATISTICS noscan",
AnalyzePartitionCommand(TableIdentifier("t"), noscan = true,
partitionSpec = Map("ds" -> Some("2008-04-09"))))
assertEqual("ANALYZE TABLE t PARTITION(ds='2008-04-09', hr) COMPUTE STATISTICS",
AnalyzePartitionCommand(TableIdentifier("t"), noscan = false,
partitionSpec = Map("ds" -> Some("2008-04-09"), "hr" -> None)))
assertEqual("ANALYZE TABLE t PARTITION(ds='2008-04-09', hr) COMPUTE STATISTICS noscan",
AnalyzePartitionCommand(TableIdentifier("t"), noscan = true,
partitionSpec = Map("ds" -> Some("2008-04-09"), "hr" -> None)))
assertEqual("ANALYZE TABLE t PARTITION(ds, hr=11) COMPUTE STATISTICS noscan",
AnalyzePartitionCommand(TableIdentifier("t"), noscan = true,
partitionSpec = Map("ds" -> None, "hr" -> Some("11"))))
assertEqual("ANALYZE TABLE t PARTITION(ds, hr) COMPUTE STATISTICS",
AnalyzePartitionCommand(TableIdentifier("t"), noscan = false,
partitionSpec = Map("ds" -> None, "hr" -> None)))
assertEqual("ANALYZE TABLE t PARTITION(ds, hr) COMPUTE STATISTICS noscan",
AnalyzePartitionCommand(TableIdentifier("t"), noscan = true,
partitionSpec = Map("ds" -> None, "hr" -> None)))
intercept("analyze table t compute statistics xxxx",
"Expected `NOSCAN` instead of `xxxx`")
intercept("analyze table t partition (a) compute statistics xxxx",
"Expected `NOSCAN` instead of `xxxx`")
}
test("analyze table column statistics") {
intercept("ANALYZE TABLE t COMPUTE STATISTICS FOR COLUMNS", "")
assertEqual("ANALYZE TABLE t COMPUTE STATISTICS FOR COLUMNS key, value",
AnalyzeColumnCommand(TableIdentifier("t"), Option(Seq("key", "value")), allColumns = false))
// Partition specified - should be ignored
assertEqual("ANALYZE TABLE t PARTITION(ds='2017-06-10') " +
"COMPUTE STATISTICS FOR COLUMNS key, value",
AnalyzeColumnCommand(TableIdentifier("t"), Option(Seq("key", "value")), allColumns = false))
// Partition specified should be ignored in case of COMPUTE STATISTICS FOR ALL COLUMNS
assertEqual("ANALYZE TABLE t PARTITION(ds='2017-06-10') " +
"COMPUTE STATISTICS FOR ALL COLUMNS",
AnalyzeColumnCommand(TableIdentifier("t"), None, allColumns = true))
intercept("ANALYZE TABLE t COMPUTE STATISTICS FOR ALL COLUMNS key, value",
"mismatched input 'key' expecting <EOF>")
intercept("ANALYZE TABLE t COMPUTE STATISTICS FOR ALL",
"missing 'COLUMNS' at '<EOF>'")
}
test("query organization") {
// Test all valid combinations of order by/sort by/distribute by/cluster by/limit/windows
val baseSql = "select * from t"
val basePlan =
Project(Seq(UnresolvedStar(None)), UnresolvedRelation(TableIdentifier("t")))
assertEqual(s"$baseSql distribute by a, b",
RepartitionByExpression(UnresolvedAttribute("a") :: UnresolvedAttribute("b") :: Nil,
basePlan,
numPartitions = newConf.numShufflePartitions))
assertEqual(s"$baseSql distribute by a sort by b",
Sort(SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil,
global = false,
RepartitionByExpression(UnresolvedAttribute("a") :: Nil,
basePlan,
numPartitions = newConf.numShufflePartitions)))
assertEqual(s"$baseSql cluster by a, b",
Sort(SortOrder(UnresolvedAttribute("a"), Ascending) ::
SortOrder(UnresolvedAttribute("b"), Ascending) :: Nil,
global = false,
RepartitionByExpression(UnresolvedAttribute("a") :: UnresolvedAttribute("b") :: Nil,
basePlan,
numPartitions = newConf.numShufflePartitions)))
}
test("pipeline concatenation") {
val concat = Concat(
Concat(UnresolvedAttribute("a") :: UnresolvedAttribute("b") :: Nil) ::
UnresolvedAttribute("c") ::
Nil
)
assertEqual(
"SELECT a || b || c FROM t",
Project(UnresolvedAlias(concat) :: Nil, UnresolvedRelation(TableIdentifier("t"))))
}
test("database and schema tokens are interchangeable") {
assertEqual("CREATE DATABASE foo", parser.parsePlan("CREATE SCHEMA foo"))
assertEqual("DROP DATABASE foo", parser.parsePlan("DROP SCHEMA foo"))
assertEqual("ALTER DATABASE foo SET DBPROPERTIES ('x' = 'y')",
parser.parsePlan("ALTER SCHEMA foo SET DBPROPERTIES ('x' = 'y')"))
assertEqual("DESC DATABASE foo", parser.parsePlan("DESC SCHEMA foo"))
}
}
| LantaoJin/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/SparkSqlParserSuite.scala | Scala | apache-2.0 | 16,577 |
package org.tearne.crosser.config
import java.nio.file.Path
import scala.util.Try
import org.slf4j.LoggerFactory
import com.typesafe.config.{ConfigFactory => TypesafeConfigFactory}
import java.net.URL
import scala.io.Source
import com.typesafe.config.{Config => TypesafeConfig}
object ConfigFactory {
val log = LoggerFactory.getLogger(this.getClass())
def fromPath(path: Path): Config = {
val tConf = TypesafeConfigFactory.parseFile(path.toFile())
fromTypesafeConfig(tConf)
}
def fromURL(url: String): Config = {
val stream = new URL(url).openStream()
val lines = Source.fromInputStream(stream).getLines
val tConf = TypesafeConfigFactory.parseString(lines.next)
assert(!lines.hasNext, "Expected only one line at config url")
fromTypesafeConfig(tConf)
}
def fromTypesafeConfig(tConf: TypesafeConfig) = {
Try{
log.debug("Try parsing human format config")
new HumanConfig(tConf)
}.getOrElse{
log.debug("Try parsing web format config")
new WebConfig(tConf)
}
}
} | tearne/Crosser | src/main/scala/org/tearne/crosser/config/ConfigFactory.scala | Scala | apache-2.0 | 1,005 |
package dotty.tools.dotc
package core
package tasty
import dotty.tools.tasty.{TastyBuffer, TastyReader}
import TastyBuffer.NameRef
import Contexts._, Decorators._
import Names.{Name, TermName}
import StdNames.nme
import TastyUnpickler._
import util.Spans.offsetToInt
import printing.Highlighting._
/** Reads the package and class name of the class contained in this TASTy */
class TastyClassName(bytes: Array[Byte]) {
val unpickler: TastyUnpickler = new TastyUnpickler(bytes)
import unpickler.{nameAtRef, unpickle}
/** Returns a tuple with the package and class names */
def readName(): Option[(TermName, TermName)] = unpickle(new TreeSectionUnpickler)
class TreeSectionUnpickler extends SectionUnpickler[(TermName, TermName)](TreePickler.sectionName) {
import dotty.tools.tasty.TastyFormat._
def unpickle(reader: TastyReader, tastyName: NameTable): (TermName, TermName) = {
import reader._
def readName() = {
val idx = readNat()
nameAtRef(NameRef(idx))
}
def readNames(packageName: TermName): (TermName, TermName) = {
val tag = readByte()
if (tag >= firstLengthTreeTag) {
val len = readNat()
val end = currentAddr + len
tag match {
case TYPEDEF =>
val className = readName()
goto(end)
(packageName, className)
case IMPORT | VALDEF =>
goto(end)
readNames(packageName)
case PACKAGE =>
readNames(packageName)
}
}
else tag match {
case TERMREFpkg | TYPEREFpkg =>
val subPackageName = readName()
readNames(subPackageName)
case _ =>
readNames(packageName)
}
}
readNames(nme.EMPTY_PACKAGE)
}
}
}
| som-snytt/dotty | compiler/src/dotty/tools/dotc/core/tasty/TastyClassName.scala | Scala | apache-2.0 | 1,828 |
package io.taig.android.soap.operation
import android.content.{ Intent, SharedPreferences }
import android.os.Bundle
import cats.syntax.either._
import io.circe.Decoder
import io.circe.parser._
sealed trait reader[C] {
def container: C
def read[V: Decoder]( key: String ): Option[V]
}
object reader {
@inline
private def parse[V: Decoder]( json: String ): Option[V] = {
Option( json ).flatMap { json ⇒
decode[V]( json ).toOption
}
}
final case class bundle( container: Bundle ) extends reader[Bundle] {
override def read[V: Decoder]( key: String ): Option[V] = {
parse( container.getString( key ) )
}
}
final case class intent( container: Intent ) extends reader[Intent] {
override def read[V: Decoder]( key: String ): Option[V] = {
parse( container.getStringExtra( key ) )
}
}
final case class sharedPreferences( container: SharedPreferences )
extends reader[SharedPreferences] {
override def read[V: Decoder]( key: String ): Option[V] = {
parse( container.getString( key, null ) )
}
}
} | Taig/Soap | src/main/scala/io/taig/android/soap/operation/reader.scala | Scala | mit | 1,165 |
package org.scalaide.core.internal.formatter
import org.eclipse.jdt.internal.corext.fix.CodeFormatFix
import org.eclipse.jdt.core.ICompilationUnit
import org.eclipse.jdt.core.refactoring.CompilationUnitChange
import org.eclipse.jdt.ui.cleanup.ICleanUpFix
import org.eclipse.jface.text.Document
import org.eclipse.jface.text.TextUtilities
import org.eclipse.text.edits.MultiTextEdit
import scalariform.formatter.ScalaFormatter
import scalariform.parser.ScalaParserException
import scala.tools.eclipse.contribution.weaving.jdt.ui.javaeditor.formatter.IFormatterCleanUpProvider
import org.scalaide.util.eclipse.EclipseUtils.asEclipseTextEdit
import org.eclipse.jdt.internal.ui.javaeditor.DocumentAdapter
class ScalaFormatterCleanUpProvider extends IFormatterCleanUpProvider {
def createCleanUp(cu: ICompilationUnit): ICleanUpFix = {
val document = cu.getBuffer match {
case adapter: DocumentAdapter => adapter.getDocument
case _ => new Document(cu.getBuffer.getContents)
}
val lineDelimiter = TextUtilities.getDefaultLineDelimiter(document)
val preferences = FormatterPreferences.getPreferences(cu.getJavaProject)
val edits =
try ScalaFormatter.formatAsEdits(cu.getSource, preferences, Some(lineDelimiter))
catch { case _: ScalaParserException => List() }
val multiEdit = new MultiTextEdit
multiEdit.addChildren(edits.map(asEclipseTextEdit).toArray)
val change = new CompilationUnitChange("Formatting", cu)
change.setEdit(multiEdit)
new CodeFormatFix(change)
}
}
| scala-ide/scala-ide | org.scala-ide.sdt.core/src/org/scalaide/core/internal/formatter/ScalaFormatterCleanUpProvider.scala | Scala | bsd-3-clause | 1,534 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.cluster
import java.nio.ByteBuffer
import kafka.utils.Logging
import org.apache.kafka.common.protocol.SecurityProtocol
import org.junit.Test
import scala.collection.mutable
class BrokerEndPointTest extends Logging {
@Test
def testSerDe() = {
val endpoint = new EndPoint("myhost", 9092, SecurityProtocol.PLAINTEXT)
val listEndPoints = Map(SecurityProtocol.PLAINTEXT -> endpoint)
val origBroker = new Broker(1, listEndPoints)
val brokerBytes = ByteBuffer.allocate(origBroker.sizeInBytes)
origBroker.writeTo(brokerBytes)
val newBroker = Broker.readFrom(brokerBytes.flip().asInstanceOf[ByteBuffer])
assert(origBroker == newBroker)
}
@Test
def testHashAndEquals() = {
val endpoint1 = new EndPoint("myhost", 9092, SecurityProtocol.PLAINTEXT)
val endpoint2 = new EndPoint("myhost", 9092, SecurityProtocol.PLAINTEXT)
val endpoint3 = new EndPoint("myhost", 1111, SecurityProtocol.PLAINTEXT)
val endpoint4 = new EndPoint("other", 1111, SecurityProtocol.PLAINTEXT)
val broker1 = new Broker(1, Map(SecurityProtocol.PLAINTEXT -> endpoint1))
val broker2 = new Broker(1, Map(SecurityProtocol.PLAINTEXT -> endpoint2))
val broker3 = new Broker(2, Map(SecurityProtocol.PLAINTEXT -> endpoint3))
val broker4 = new Broker(1, Map(SecurityProtocol.PLAINTEXT -> endpoint4))
assert(broker1 == broker2)
assert(broker1 != broker3)
assert(broker1 != broker4)
assert(broker1.hashCode() == broker2.hashCode())
assert(broker1.hashCode() != broker3.hashCode())
assert(broker1.hashCode() != broker4.hashCode())
val hashmap = new mutable.HashMap[Broker, Int]()
hashmap.put(broker1, 1)
assert(hashmap.getOrElse(broker1, -1) == 1)
}
@Test
def testFromJSON() = {
val brokerInfoStr = "{\\"version\\":2," +
"\\"host\\":\\"localhost\\"," +
"\\"port\\":9092," +
"\\"jmx_port\\":9999," +
"\\"timestamp\\":\\"1416974968782\\"," +
"\\"endpoints\\":[\\"PLAINTEXT://localhost:9092\\"]}"
val broker = Broker.createBroker(1, brokerInfoStr)
assert(broker.id == 1)
assert(broker.getBrokerEndPoint(SecurityProtocol.PLAINTEXT).host == "localhost")
assert(broker.getBrokerEndPoint(SecurityProtocol.PLAINTEXT).port == 9092)
}
@Test
def testFromOldJSON() = {
val brokerInfoStr = "{\\"jmx_port\\":-1,\\"timestamp\\":\\"1420485325400\\",\\"host\\":\\"172.16.8.243\\",\\"version\\":1,\\"port\\":9091}"
val broker = Broker.createBroker(1, brokerInfoStr)
assert(broker.id == 1)
assert(broker.getBrokerEndPoint(SecurityProtocol.PLAINTEXT).host == "172.16.8.243")
assert(broker.getBrokerEndPoint(SecurityProtocol.PLAINTEXT).port == 9091)
}
@Test
def testBrokerEndpointFromURI() = {
var connectionString = "localhost:9092"
var endpoint = BrokerEndPoint.createBrokerEndPoint(1, connectionString)
assert(endpoint.host == "localhost")
assert(endpoint.port == 9092)
// also test for ipv6
connectionString = "[::1]:9092"
endpoint = BrokerEndPoint.createBrokerEndPoint(1, connectionString)
assert(endpoint.host == "::1")
assert(endpoint.port == 9092)
// add test for uppercase in hostname
connectionString = "MyHostname:9092"
endpoint = BrokerEndPoint.createBrokerEndPoint(1, connectionString)
assert(endpoint.host == "MyHostname")
assert(endpoint.port == 9092)
}
@Test
def testEndpointFromURI() = {
var connectionString = "PLAINTEXT://localhost:9092"
var endpoint = EndPoint.createEndPoint(connectionString)
assert(endpoint.host == "localhost")
assert(endpoint.port == 9092)
assert(endpoint.connectionString == "PLAINTEXT://localhost:9092")
// also test for default bind
connectionString = "PLAINTEXT://:9092"
endpoint = EndPoint.createEndPoint(connectionString)
assert(endpoint.host == null)
assert(endpoint.port == 9092)
assert(endpoint.connectionString == "PLAINTEXT://:9092")
// also test for ipv6
connectionString = "PLAINTEXT://[::1]:9092"
endpoint = EndPoint.createEndPoint(connectionString)
assert(endpoint.host == "::1")
assert(endpoint.port == 9092)
assert(endpoint.connectionString == "PLAINTEXT://[::1]:9092")
// test hostname
connectionString = "PLAINTEXT://MyHostname:9092"
endpoint = EndPoint.createEndPoint(connectionString)
assert(endpoint.host == "MyHostname")
assert(endpoint.port == 9092)
assert(endpoint.connectionString == "PLAINTEXT://MyHostname:9092")
}
}
| eljefe6a/kafka | core/src/test/scala/unit/kafka/cluster/BrokerEndPointTest.scala | Scala | apache-2.0 | 5,366 |
package com.thinkbiganalytics.spark.metadata
import com.thinkbiganalytics.spark.SparkContextService
import com.thinkbiganalytics.spark.dataprofiler.Profiler
import com.thinkbiganalytics.spark.rest.model.TransformResponse
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.slf4j.LoggerFactory
/** Wraps a transform script into a function that can be evaluated.
*
* @param destination the name of the destination Hive table
* @param sqlContext the Spark SQL context
*/
abstract class TransformScript20(destination: String, profiler: Profiler, sqlContext: SQLContext, sparkContextService: SparkContextService) extends TransformScript(destination, profiler) {
private[this] val log = LoggerFactory.getLogger(classOf[TransformScript])
/** Evaluates this transform script and stores the result in a Hive table. */
def run(): QueryResultCallable = {
new QueryResultCallable20
}
/** Evaluates the transform script.
*
* @return the transformation result
*/
protected[metadata] def dataFrame: DataFrame
/** Fetches or re-generates the results of the parent transformation, if available.
*
* @return the parent results
*/
protected def parent: DataFrame = {
try {
sqlContext.read.table(parentTable)
}
catch {
case e: Exception =>
log.trace("Exception reading parent table: {}", e.toString)
log.debug("Parent table not found: {}", parentTable)
parentDataFrame
}
}
/** Re-generates the parent transformation.
*
* @return the parent transformation
*/
protected override def parentDataFrame: DataFrame = {
throw new UnsupportedOperationException
}
/** Stores the `DataFrame` results in a [[com.thinkbiganalytics.discovery.schema.QueryResult]] and returns the object. */
private class QueryResultCallable20 extends QueryResultCallable {
override def call(): TransformResponse = {
// Cache data frame
val cache = dataFrame.cache
cache.registerTempTable(destination)
// Build response object
toResponse(sparkContextService.toDataSet(cache))
}
}
}
| rashidaligee/kylo | integrations/spark/spark-shell-client/spark-shell-client-v2/src/main/scala/com/thinkbiganalytics/spark/metadata/TransformScript20.scala | Scala | apache-2.0 | 2,270 |
/*
* Copyright 2016 Dennis Vriend
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dnvriend.streams.stage.simple
import akka.stream.testkit.scaladsl.TestSink
import com.github.dnvriend.streams.TestSpec
class FilterStageTest extends TestSpec {
/**
* Only pass on those elements that satisfy the given predicate.
*
* - Emits when: the given predicate returns true for the element
* - Backpressures when: the given predicate returns true for the element and downstream backpressures
* - Completes when: upstream completes
* - Cancels when: downstream cancels
*/
"Filter a sequence of numbers for even numbers" should "emit only even numbers" in {
withIterator() { src ⇒
src.take(10)
.filter(_ % 2 == 0)
.runWith(TestSink.probe[Int])
.request(Integer.MAX_VALUE)
.expectNext(0, 2, 4, 6, 8)
.expectComplete()
}
}
}
| dnvriend/intro-to-akka-streams | src/test/scala/com/github/dnvriend/streams/stage/simple/FilterStageTest.scala | Scala | apache-2.0 | 1,429 |
package bazooka.server.service
import com.google.inject._
import com.google.gwt.user.client.rpc._
import com.google.gwt.user.server.rpc._
@Singleton
class BazookaRemoteServiceServlet extends RemoteServiceServlet {
@Inject private var injector: Injector = _
override def processCall(payload: String) = {
try {
val request = RPC.decodeRequest(payload, null, this)
val service = getServiceInstance(request.getMethod.getDeclaringClass);
RPC.invokeAndEncodeResponse(service, request.getMethod, request.getParameters, request.getSerializationPolicy)
}
catch {
case ex: IncompatibleRemoteServiceException =>
log("IncompatibleRemoteServiceException in the processCall(String) method", ex)
RPC.encodeResponseForFailure(null, ex);
}
}
private def getServiceInstance(serviceClass: Class[_]) = injector.getInstance(serviceClass)
} | tiagofernandez/bazooka | src/main/scala/bazooka/server/servlet/BazookaRemoteServiceServlet.scala | Scala | mit | 888 |
/*
* Licensed to The Apereo Foundation under one or more contributor license
* agreements. See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
*
* The Apereo Foundation licenses this file to you under the Apache License,
* Version 2.0, (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.tle.web.controls.universal.handlers.fileupload.details
import com.tle.beans.item.attachments.Attachment
import com.tle.common.Check
import com.tle.common.filesystem.FileEntry
import com.tle.core.services.ZipProgress
import com.tle.web.controls.universal.handlers.fileupload.{AttachmentDelete, WebFileUploads}
import com.tle.web.controls.universal.{ControlContext, DialogRenderOptions}
import com.tle.web.sections.SectionInfo
import com.tle.web.sections.events.RenderContext
import com.tle.web.sections.render.{Label, SectionRenderable}
import com.tle.web.sections.standard.TextField
import com.tle.web.viewurl.ViewableResource
trait ZipHandler {
def zipProgress: Option[ZipProgress]
def selectedAttachments: Map[String, Attachment]
def unzip: ZipProgress
def removeUnzipped(): Unit
def unzipped: Boolean
def unzippedEntries: Seq[FileEntry]
}
trait ViewerHandler {
def viewableResource(info: SectionInfo): ViewableResource
def viewerListModel: ViewersListModel
}
trait EditingHandler {
def editingArea: String
def syncEdits(filename: String): Unit
}
object DetailsPage {
val LABEL_ERROR_BLANK = WebFileUploads.label("handlers.abstract.error.blank")
}
import com.tle.web.controls.universal.handlers.fileupload.details.DetailsPage._
trait DetailsPage {
def editingAttachment: SectionInfo => Attachment
def previewable: Boolean
def renderDetails(context: RenderContext): (SectionRenderable, DialogRenderOptions => Unit)
def prepareUI(info: SectionInfo): Unit
def editAttachment(info: SectionInfo,
a: Attachment,
ctx: ControlContext): (Attachment, Option[AttachmentDelete])
def validate(info: SectionInfo): Boolean
def displayName: TextField
def validateDisplayName(info: SectionInfo): Option[(String, Label)] = {
if (Check.isEmpty(displayName.getValue(info))) {
Some("displayName" -> LABEL_ERROR_BLANK)
} else None
}
}
| equella/Equella | Source/Plugins/Core/com.equella.core/scalasrc/com/tle/web/controls/universal/handlers/fileupload/details/DetailsPage.scala | Scala | apache-2.0 | 2,750 |
package ore.models.project
import scala.language.higherKinds
import java.time.{Instant, OffsetDateTime, ZoneOffset}
import java.util.Locale
import ore.data.project.{Category, FlagReason, ProjectNamespace}
import ore.db._
import ore.db.access._
import ore.db.impl.OrePostgresDriver.api._
import ore.db.impl.common._
import ore.db.impl.schema._
import ore.db.impl.{DefaultModelCompanion, OrePostgresDriver}
import ore.member.{Joinable, MembershipDossier}
import ore.models.admin.ProjectVisibilityChange
import ore.models.api.ProjectApiKey
import ore.models.project.Project.ProjectSettings
import ore.models.statistic.ProjectView
import ore.models.user.role.ProjectUserRole
import ore.models.user.{User, UserOwned}
import ore.permission.role.Role
import ore.permission.scope.HasScope
import ore.syntax._
import ore.util.StringLocaleFormatterUtils
import cats.syntax.all._
import cats.{Functor, Monad, MonadError, Parallel}
import io.circe.Json
import io.circe.generic.JsonCodec
import io.circe.syntax._
import slick.lifted
import slick.lifted.{Rep, TableQuery}
/**
* Represents an Ore package.
*
* <p>Note: As a general rule, do not handle actions / results in model classes</p>
*
* @param pluginId Plugin ID
* @param ownerName The owner Author for this project
* @param ownerId User ID of Project owner
* @param name Name of plugin
* @param slug URL slug
* @param recommendedVersionId The ID of this project's recommended version
* @param topicId ID of forum topic
* @param postId ID of forum topic post ID
* @param isTopicDirty Whether this project's forum topic needs to be updated
* @param visibility Whether this project is visible to the default user
* @param notes JSON notes
*/
case class Project(
pluginId: String,
ownerName: String,
ownerId: DbRef[User],
name: String,
slug: String,
recommendedVersionId: Option[DbRef[Version]] = None,
category: Category = Category.Undefined,
description: Option[String],
topicId: Option[Int] = None,
postId: Option[Int] = None,
visibility: Visibility = Visibility.Public,
notes: Json = Json.obj(),
settings: ProjectSettings = ProjectSettings()
) extends Named
with Describable
with Visitable {
def namespace: ProjectNamespace = ProjectNamespace(ownerName, slug)
/**
* Returns the base URL for this Project.
*
* @return Base URL for project
*/
override def url: String = namespace.toString
/**
* Get all messages
* @return
*/
def decodeNotes: Seq[Note] =
notes.hcursor.getOrElse[Seq[Note]]("messages")(Nil).toTry.get //Should be safe. If it's not we got bigger problems
def isOwner(user: Model[User]): Boolean = user.id.value == ownerId
}
/**
* This modal is needed to convert the json
*/
@JsonCodec case class Note(message: String, user: DbRef[User], time: Long = System.currentTimeMillis()) {
def printTime(implicit locale: Locale): String =
StringLocaleFormatterUtils.prettifyDateAndTime(OffsetDateTime.ofInstant(Instant.ofEpochMilli(time), ZoneOffset.UTC))
}
object Project extends DefaultModelCompanion[Project, ProjectTable](TableQuery[ProjectTable]) {
case class ProjectSettings(
keywords: List[String] = Nil,
homepage: Option[String] = None,
issues: Option[String] = None,
source: Option[String] = None,
support: Option[String] = None,
licenseName: Option[String] = None,
licenseUrl: Option[String] = None,
forumSync: Boolean = true
)
implicit val query: ModelQuery[Project] = ModelQuery.from(this)
implicit val assocWatchersQuery: AssociationQuery[ProjectWatchersTable, Project, User] =
AssociationQuery.from[ProjectWatchersTable, Project, User](TableQuery[ProjectWatchersTable])(_.projectId, _.userId)
implicit val hasScope: HasScope[Model[Project]] = HasScope.projectScope(_.id)
private def queryRoleForTrust(projectId: Rep[DbRef[Project]], userId: Rep[DbRef[User]]) = {
val q = for {
m <- TableQuery[ProjectMembersTable] if m.projectId === projectId && m.userId === userId
r <- TableQuery[ProjectRoleTable] if m.userId === r.userId && r.projectId === projectId
} yield r.roleType
q.to[Set]
}
lazy val roleForTrustQuery = lifted.Compiled(queryRoleForTrust _)
implicit def projectHideable[F[_]](
implicit service: ModelService[F],
F: Monad[F],
parallel: Parallel[F]
): Hideable.Aux[F, Project, ProjectVisibilityChange, ProjectVisibilityChangeTable] = new Hideable[F, Project] {
override type MVisibilityChange = ProjectVisibilityChange
override type MVisibilityChangeTable = ProjectVisibilityChangeTable
override def visibility(m: Project): Visibility = m.visibility
/**
* Sets whether this project is visible.
*
* @param visibility True if visible
*/
override def setVisibility(m: Model[Project])(
visibility: Visibility,
comment: String,
creator: DbRef[User]
): F[(Model[Project], Model[ProjectVisibilityChange])] = {
val updateOldChange = lastVisibilityChange(m)(ModelView.now(ProjectVisibilityChange))
.semiflatMap { vc =>
service.update(vc)(
_.copy(
resolvedAt = Some(OffsetDateTime.now()),
resolvedBy = Some(creator)
)
)
}
.cata((), _ => ())
val createNewChange = service.insert(
ProjectVisibilityChange(
Some(creator),
m.id,
comment,
None,
None,
visibility
)
)
val updateProject = service.update(m)(
_.copy(
visibility = visibility
)
)
updateOldChange *> (updateProject, createNewChange).parTupled
}
/**
* Get VisibilityChanges
*/
override def visibilityChanges[V[_, _]: QueryView](m: Model[Project])(
view: V[ProjectVisibilityChangeTable, Model[ProjectVisibilityChange]]
): V[ProjectVisibilityChangeTable, Model[ProjectVisibilityChange]] = view.filterView(_.projectId === m.id.value)
}
implicit val isUserOwned: UserOwned[Project] = (a: Project) => a.ownerId
implicit def projectJoinable[F[_]](
implicit service: ModelService[F],
F: MonadError[F, Throwable],
par: Parallel[F]
): Joinable.Aux[F, Project, ProjectUserRole, ProjectRoleTable] = new Joinable[F, Project] {
type RoleType = ProjectUserRole
type RoleTypeTable = ProjectRoleTable
override def transferOwner(m: Model[Project])(newOwner: DbRef[User]): F[Model[Project]] = {
// Down-grade current owner to "Developer"
val oldOwner = m.ownerId
for {
newOwnerUser <- ModelView
.now(User)
.get(newOwner)
.getOrElseF(F.raiseError(new Exception("Could not find user to transfer owner to")))
t2 <- (this.memberships.getRoles(m)(oldOwner), this.memberships.getRoles(m)(newOwner)).parTupled
(ownerRoles, userRoles) = t2
setOwner <- setOwner(m)(newOwnerUser)
_ <- ownerRoles
.filter(_.role == Role.ProjectOwner)
.toVector
.parTraverse(role => service.update(role)(_.copy(role = Role.ProjectDeveloper)))
_ <- userRoles.toVector.parTraverse(role => service.update(role)(_.copy(role = Role.ProjectOwner)))
} yield setOwner
}
private def setOwner(m: Model[Project])(user: Model[User]): F[Model[Project]] = {
service.update(m)(
_.copy(
ownerId = user.id
)
)
}
override def memberships: MembershipDossier.Aux[F, Project, RoleType, RoleTypeTable] =
MembershipDossier.projectHasMemberships
override def userOwned: UserOwned[Project] = isUserOwned
}
implicit class ProjectModelOps(private val self: Model[Project]) extends AnyVal {
/**
* Returns ModelAccess to the user's who are watching this project.
*
* @return Users watching project
*/
def watchers[F[_]: ModelService: Functor]
: ParentAssociationAccess[ProjectWatchersTable, Project, User, ProjectTable, UserTable, F] =
new ModelAssociationAccessImpl(OrePostgresDriver)(Project, User).applyParent(self.id)
/**
* Returns [[ore.db.access.ChildAssociationAccess]] to [[User]]s who have starred this
* project.
*
* @return Users who have starred this project
*/
def stars[F[_]: ModelService: Functor]
: ChildAssociationAccess[ProjectStarsTable, User, Project, UserTable, ProjectTable, F] =
new ModelAssociationAccessImpl[ProjectStarsTable, User, Project, UserTable, ProjectTable, F](
OrePostgresDriver
)(
User,
Project
).applyChild(self.id)
/**
* Returns this Project's recommended version.
*
* @return Recommended version
*/
def recommendedVersion[QOptRet, SRet[_]](
view: ModelView[QOptRet, SRet, VersionTable, Model[Version]]
): Option[QOptRet] =
self.recommendedVersionId.map(versions(view).get)
/**
* Sets the "starred" state of this Project for the specified User.
*
* @param user User to set starred state of
*/
def toggleStarredBy[F[_]](
user: Model[User]
)(implicit service: ModelService[F], F: Monad[F]): F[Project] =
for {
contains <- self.stars.contains(user.id)
_ <- if (contains)
self.stars.removeAssoc(user.id)
else
self.stars.addAssoc(user.id)
} yield self
/**
* Returns all flags on this project.
*
* @return Flags on project
*/
def flags[V[_, _]: QueryView](view: V[FlagTable, Model[Flag]]): V[FlagTable, Model[Flag]] =
view.filterView(_.projectId === self.id.value)
/**
* Submits a flag on this project for the specified user.
*
* @param user Flagger
* @param reason Reason for flagging
*/
def flagFor[F[_]](user: Model[User], reason: FlagReason, comment: String)(
implicit service: ModelService[F]
): F[Model[Flag]] = {
val userId = user.id.value
require(userId != self.ownerId, "cannot flag own project")
service.insert(Flag(self.id, user.id, reason, comment))
}
/**
* Returns the Channels in this Project.
*
* @return Channels in project
*/
def channels[V[_, _]: QueryView](view: V[ChannelTable, Model[Channel]]): V[ChannelTable, Model[Channel]] =
view.filterView(_.projectId === self.id.value)
/**
* Returns all versions in this project.
*
* @return Versions in project
*/
def versions[V[_, _]: QueryView](view: V[VersionTable, Model[Version]]): V[VersionTable, Model[Version]] =
view.filterView(_.projectId === self.id.value)
/**
* Returns the pages in this Project.
*
* @return Pages in project
*/
def pages[V[_, _]: QueryView](view: V[PageTable, Model[Page]]): V[PageTable, Model[Page]] =
view.filterView(_.projectId === self.id.value)
/**
* Returns the parentless, root, pages for this project.
*
* @return Root pages of project
*/
def rootPages[V[_, _]: QueryView](view: V[PageTable, Model[Page]]): V[PageTable, Model[Page]] =
view.sortView(_.name).filterView(p => p.projectId === self.id.value && p.parentId.isEmpty)
def apiKeys[V[_, _]: QueryView](
view: V[ProjectApiKeyTable, Model[ProjectApiKey]]
): V[ProjectApiKeyTable, Model[ProjectApiKey]] =
view.filterView(_.projectId === self.id.value)
/**
* Add new note
*/
def addNote[F[_]](message: Note)(implicit service: ModelService[F]): F[Model[Project]] = {
val messages = self.decodeNotes :+ message
service.update(self)(
_.copy(
notes = Json.obj(
"messages" := messages
)
)
)
}
}
}
| SpongePowered/Ore | models/src/main/scala/ore/models/project/Project.scala | Scala | mit | 12,017 |
package scala.pickling.json.list.t.`new`
import org.scalatest.FunSuite
import scala.pickling._, scala.pickling.Defaults._, json._
class JsonListTNewTest extends FunSuite {
test("main") {
val pickle = List(1, 2, 3).pickle
assert(pickle.toString === """
|JSONPickle({
| "tpe": "scala.collection.immutable.$colon$colon[scala.Int]",
| "head": 1,
| "tl": {
| "tpe": "scala.collection.immutable.$colon$colon[scala.Int]",
| "head": 2,
| "tl": {
| "tpe": "scala.collection.immutable.$colon$colon[scala.Int]",
| "head": 3,
| "tl": {
| "tpe": "scala.collection.immutable.Nil.type"
| }
| }
| }
|})
""".stripMargin.trim)
assert(pickle.unpickle[List[Int]] === List(1, 2, 3))
}
}
| eed3si9n/pickling-historical | core/src/test/scala/pickling/run/json-list-t-new.scala | Scala | bsd-3-clause | 821 |
/*
* (c) Copyright 2014 LinkedIn Corp. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.linkedin.rookboom.web.dust.loader
/**
* @author Sergey Skrobotov, [email protected]
*/
trait TemplateLoader {
def loadTemplates(): Map[String, String]
def loadTemplates(since: Long): Map[String, String]
} | linkedin/RookBoom | web/src/main/scala/com/linkedin/rookboom/web/dust/loader/TemplateLoader.scala | Scala | apache-2.0 | 857 |
package com.eclipsesource.schema
import com.eclipsesource.schema.drafts.Version7
import com.eclipsesource.schema.test.JsonSpec
import org.specs2.mutable.Specification
class ConstSpec extends Specification with JsonSpec {
"const draft7" in {
import Version7._
implicit val validator = SchemaValidator(Some(Version7))
validate("const", "draft7")
}
}
| eclipsesource/play-json-schema-validator | src/test/scala/com/eclipsesource/schema/ConstSpec.scala | Scala | apache-2.0 | 368 |
package revenueAnalysis
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.joda.time.DateTime
import org.apache.spark.sql.functions._
import org.apache.spark.sql.{ UserDefinedFunction, DataFrame, SQLContext, GroupedData }
import java.lang.Math
import java.util.concurrent.TimeUnit
object DataToParquet extends App {
val sparkConf: SparkConf = Common.getSparkConf("DataToParquet")
val sparkContext: SparkContext = new SparkContext(sparkConf)
val sqlContext: SQLContext = new SQLContext(sparkContext)
private val dataDir: String = "/Users/kellypet/Desktop/PredictionIO/Ecommerce-Price-Predict-Project/fb/"
private val usersCSV: String = dataDir + "users.csv"
private val viewsCSV: String = dataDir + "views.csv"
private val itemsCSV: String = dataDir + "items.csv"
private val purchasesCSV: String = dataDir + "conversions.csv"
private val adsCSV: String = dataDir + "users_ads.csv"
import sqlContext.implicits._
case class Users(
userId: String,
timestamp: String,
signupTime: Double,
registerCountry: String)
extends Serializable
val usersDF: DataFrame = sparkContext.textFile(usersCSV, 750)
.map(line => {
val fields: Array[String] = line.split(",")
val numFields: Int = fields.size
val userId: String = fields(0)
val timestamp: String = fields(numFields - 1)
val timeDate: DateTime = DateTime.parse(timestamp)
val signupTime: Double = timeDate.getMillis.toDouble / 1000
val registerCountry: String = fields
.slice(1, numFields - 1)
.mkString(",")
Users(userId, timestamp, signupTime, registerCountry)
})
.toDF
.dropDuplicates(Seq("userId"))
usersDF.write.parquet("data/users.parquet")
case class Conversions(
userId: String,
itemId: String,
itemPrice: Double,
quantity: Double,
conversionTime: Double)
extends Serializable
val conversionDF: DataFrame = sparkContext.textFile(purchasesCSV, 750)
.map(line => {
val fields: Array[String] = line.split(",")
val numFields: Int = fields.size
val userId: String = fields(0)
val itemId: String = fields(1)
val itemPrice: Double = fields(2).trim.toDouble
val quantity: Double = fields(3).trim.toInt
val timestamp: String = fields(4)
val timeDate: DateTime = DateTime.parse(timestamp)
val conversionTime: Double = timeDate.getMillis.toDouble / 1000
Conversions(userId, itemId, itemPrice, quantity, conversionTime)
})
.toDF
.dropDuplicates(Seq("userId"))
conversionDF.write.parquet("data/conversions.parquet")
case class Views(
userId: String,
viewTime: Double)
extends Serializable
val viewsDF: DataFrame = sparkContext.textFile(viewsCSV, 750)
.map(line => {
val fields: Array[String] = line.split(",")
val numFields: Int = fields.size
val userId: String = fields(0)
val timestamp: String = fields(2)
val timeDate: DateTime = DateTime.parse(timestamp)
val viewTime: Double = timeDate.getMillis.toDouble / 1000
Views(userId, viewTime)
})
.toDF
.dropDuplicates(Seq("userId"))
viewsDF.write.parquet("data/views.parquet")
case class UsersAds(
userId: String,
utmSource: String,
utmCampaign: String,
utmMedium: String,
utmTerm: String,
utmContent: String)
val usersAds: DataFrame = sparkContext.textFile(adsCSV, 750)
.map(line => {
val fields = line.split(",")
UsersAds(
userId = fields(0),
utmSource = fields(1),
utmCampaign = fields(2),
utmMedium = fields(3),
utmTerm = fields(4),
utmContent = fields(5))
})
.toDF
.dropDuplicates(Seq("userId"))
usersAds.write.parquet("data/ads.parquet")
//var conversionDF = usersDF.join(usersConvDF, "userId")
// var usersDF:DataFrame = sqlContext.read.parquet("data/users.parquet")
// var userAndPurchaseDF = usersDF.join(conversionDF, "userId")
// var viewsDF: DataFrame = sqlContext.read.parquet("data/views.parquet")
// var activityDF = viewsDF.join(userAndPurchaseDF, "userId")
// activityDF.write.parquet("data/userActivity.parquet")
}
| PredictionIO/open-academy | KellyPeterson/revenueAnalysis/src/main/scala/revenueAnalysis/dataToParquet.scala | Scala | apache-2.0 | 4,198 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.boxes
import uk.gov.hmrc.ct.accounts.frs102.calculations.IntangibleAssetsCalculator
import uk.gov.hmrc.ct.accounts.frs102.retriever.{AbridgedAccountsBoxRetriever, Frs102AccountsBoxRetriever, FullAccountsBoxRetriever}
import uk.gov.hmrc.ct.box._
case class AC121(value: Option[Int]) extends CtBoxIdentifier(name = "Amortisation at [POA END]")
with CtOptionalInteger
with Input
with ValidatableBox[Frs102AccountsBoxRetriever]
with Validators {
override def validate(boxRetriever: Frs102AccountsBoxRetriever): Set[CtValidation] = {
collectErrors(
validateMoney(value, min = 0)
)
}
}
object AC121 extends Calculated[AC121, Frs102AccountsBoxRetriever]
with IntangibleAssetsCalculator {
override def calculate(boxRetriever: Frs102AccountsBoxRetriever): AC121 = {
boxRetriever match {
case x: AbridgedAccountsBoxRetriever => calculateAbridgedAC121(x.ac118(), x.ac119(), x.ac120(), x.ac211())
case x: FullAccountsBoxRetriever => calculateFullAC121(x.ac121A(), x.ac121B())
}
}
}
| hmrc/ct-calculations | src/main/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/AC121.scala | Scala | apache-2.0 | 1,673 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.apigateway.connector.impl
import javax.inject.Singleton
import uk.gov.hmrc.play.audit.http.config.LoadAuditingConfig
import uk.gov.hmrc.play.audit.http.connector.AuditConnector
import uk.gov.hmrc.play.config.RunMode
@Singleton
class MicroserviceAuditConnector extends AuditConnector with RunMode {
override lazy val auditingConfig = LoadAuditingConfig(s"$env.auditing")
}
| hmrc/api-gateway | app/uk/gov/hmrc/apigateway/connector/impl/MicroserviceAuditConnector.scala | Scala | apache-2.0 | 1,002 |
package io.github.meln1k.vkApi.models.wall
import play.api.libs.json.Json
case class PostId(post_id: Long)
object PostId {
implicit val postIdReads = Json.reads[PostId]
}
| meln1k/vk-scala-api | src/main/scala/io/github/meln1k/vkApi/models/wall/PostId.scala | Scala | apache-2.0 | 177 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.oap.expression
import com.google.common.collect.Lists
import com.intel.oap.ColumnarPluginConfig
import org.apache.arrow.gandiva.evaluator._
import org.apache.arrow.gandiva.exceptions.GandivaException
import org.apache.arrow.gandiva.expression._
import org.apache.arrow.vector.types.pojo.ArrowType
import org.apache.arrow.vector.types.pojo.Field
import org.apache.arrow.vector.types.DateUnit
import org.apache.arrow.vector.types.pojo.ArrowType.ArrowTypeID
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.types._
import scala.collection.mutable.ListBuffer
/**
* A version of add that supports columnar processing for longs.
*/
class ColumnarAnd(left: Expression, right: Expression, original: Expression)
extends And(left: Expression, right: Expression)
with ColumnarExpression
with Logging {
override def doColumnarCodeGen(args: java.lang.Object): (TreeNode, ArrowType) = {
val (left_node, left_type): (TreeNode, ArrowType) =
left.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
val (right_node, right_type): (TreeNode, ArrowType) =
right.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
val resultType = new ArrowType.Bool()
val funcNode = TreeBuilder.makeAnd(Lists.newArrayList(left_node, right_node))
(funcNode, resultType)
}
}
class ColumnarOr(left: Expression, right: Expression, original: Expression)
extends Or(left: Expression, right: Expression)
with ColumnarExpression
with Logging {
override def doColumnarCodeGen(args: java.lang.Object): (TreeNode, ArrowType) = {
val (left_node, left_type): (TreeNode, ArrowType) =
left.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
val (right_node, right_type): (TreeNode, ArrowType) =
right.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
val resultType = new ArrowType.Bool()
val funcNode = TreeBuilder.makeOr(Lists.newArrayList(left_node, right_node))
(funcNode, resultType)
}
}
class ColumnarEndsWith(left: Expression, right: Expression, original: Expression)
extends EndsWith(left: Expression, right: Expression)
with ColumnarExpression
with Logging {
override def doColumnarCodeGen(args: java.lang.Object): (TreeNode, ArrowType) = {
val (left_node, left_type): (TreeNode, ArrowType) =
left.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
val (right_node, right_type): (TreeNode, ArrowType) =
right.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
val resultType = new ArrowType.Bool()
val funcNode =
TreeBuilder.makeFunction("ends_with", Lists.newArrayList(left_node, right_node), resultType)
(funcNode, resultType)
}
}
class ColumnarStartsWith(left: Expression, right: Expression, original: Expression)
extends StartsWith(left: Expression, right: Expression)
with ColumnarExpression
with Logging {
override def doColumnarCodeGen(args: java.lang.Object): (TreeNode, ArrowType) = {
val (left_node, left_type): (TreeNode, ArrowType) =
left.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
val (right_node, right_type): (TreeNode, ArrowType) =
right.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
val resultType = new ArrowType.Bool()
val funcNode =
TreeBuilder.makeFunction("starts_with", Lists.newArrayList(left_node, right_node), resultType)
(funcNode, resultType)
}
}
class ColumnarLike(left: Expression, right: Expression, original: Expression)
extends Like(left: Expression, right: Expression)
with ColumnarExpression
with Logging {
override def doColumnarCodeGen(args: java.lang.Object): (TreeNode, ArrowType) = {
val (left_node, left_type): (TreeNode, ArrowType) =
left.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
val (right_node, right_type): (TreeNode, ArrowType) =
right.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
val resultType = new ArrowType.Bool()
val funcNode =
TreeBuilder.makeFunction("like", Lists.newArrayList(left_node, right_node), resultType)
(funcNode, resultType)
}
}
class ColumnarContains(left: Expression, right: Expression, original: Expression)
extends Contains(left: Expression, right: Expression)
with ColumnarExpression
with Logging {
override def doColumnarCodeGen(args: java.lang.Object): (TreeNode, ArrowType) = {
val (left_node, left_type): (TreeNode, ArrowType) =
left.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
val (right_node, right_type): (TreeNode, ArrowType) =
(TreeBuilder.makeStringLiteral(right.toString()), new ArrowType.Utf8())
val resultType = new ArrowType.Bool()
val funcNode =
TreeBuilder.makeFunction("is_substr", Lists.newArrayList(left_node, right_node), resultType)
(funcNode, resultType)
}
}
class ColumnarEqualTo(left: Expression, right: Expression, original: Expression)
extends EqualTo(left: Expression, right: Expression)
with ColumnarExpression
with Logging {
override def doColumnarCodeGen(args: java.lang.Object): (TreeNode, ArrowType) = {
var (left_node, left_type): (TreeNode, ArrowType) =
left.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
var (right_node, right_type): (TreeNode, ArrowType) =
right.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
val unifiedType = CodeGeneration.getResultType(left_type, right_type)
if (!left_type.equals(unifiedType)) {
val func_name = CodeGeneration.getCastFuncName(unifiedType)
left_node =
TreeBuilder.makeFunction(func_name, Lists.newArrayList(left_node), unifiedType)
}
if (!right_type.equals(unifiedType)) {
val func_name = CodeGeneration.getCastFuncName(unifiedType)
right_node =
TreeBuilder.makeFunction(func_name, Lists.newArrayList(right_node), unifiedType)
}
var function = "equal"
val nanCheck = ColumnarPluginConfig.getConf.enableColumnarNaNCheck
if (nanCheck) {
unifiedType match {
case t: ArrowType.FloatingPoint =>
function = "equal_with_nan"
case _ =>
}
}
val resultType = new ArrowType.Bool()
val funcNode =
TreeBuilder.makeFunction(function, Lists.newArrayList(left_node, right_node), resultType)
(funcNode, resultType)
}
}
class ColumnarEqualNull(left: Expression, right: Expression, original: Expression)
extends EqualNullSafe(left: Expression, right: Expression)
with ColumnarExpression
with Logging {
override def doColumnarCodeGen(args: java.lang.Object): (TreeNode, ArrowType) = {
var (left_node, left_type): (TreeNode, ArrowType) =
left.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
var (right_node, right_type): (TreeNode, ArrowType) =
right.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
val unifiedType = CodeGeneration.getResultType(left_type, right_type)
if (!left_type.equals(unifiedType)) {
val func_name = CodeGeneration.getCastFuncName(unifiedType)
left_node =
TreeBuilder.makeFunction(func_name, Lists.newArrayList(left_node), unifiedType)
}
if (!right_type.equals(unifiedType)) {
val func_name = CodeGeneration.getCastFuncName(unifiedType)
right_node =
TreeBuilder.makeFunction(func_name, Lists.newArrayList(right_node), unifiedType)
}
var function = "equal"
val nanCheck = ColumnarPluginConfig.getConf.enableColumnarNaNCheck
if (nanCheck) {
unifiedType match {
case t: ArrowType.FloatingPoint =>
function = "equal_with_nan"
case _ =>
}
}
val resultType = new ArrowType.Bool()
val funcNode =
TreeBuilder.makeFunction(function, Lists.newArrayList(left_node, right_node), resultType)
(funcNode, resultType)
}
}
class ColumnarLessThan(left: Expression, right: Expression, original: Expression)
extends LessThan(left: Expression, right: Expression)
with ColumnarExpression
with Logging {
override def doColumnarCodeGen(args: java.lang.Object): (TreeNode, ArrowType) = {
var (left_node, left_type): (TreeNode, ArrowType) =
left.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
var (right_node, right_type): (TreeNode, ArrowType) =
right.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
val unifiedType = CodeGeneration.getResultType(left_type, right_type)
if (!left_type.equals(unifiedType)) {
val func_name = CodeGeneration.getCastFuncName(unifiedType)
left_node =
TreeBuilder.makeFunction(func_name, Lists.newArrayList(left_node), unifiedType)
}
if (!right_type.equals(unifiedType)) {
val func_name = CodeGeneration.getCastFuncName(unifiedType)
right_node =
TreeBuilder.makeFunction(func_name, Lists.newArrayList(right_node), unifiedType)
}
var function = "less_than"
val nanCheck = ColumnarPluginConfig.getConf.enableColumnarNaNCheck
if (nanCheck) {
unifiedType match {
case t: ArrowType.FloatingPoint =>
function = "less_than_with_nan"
case _ =>
}
}
val resultType = new ArrowType.Bool()
val funcNode =
TreeBuilder.makeFunction(function, Lists.newArrayList(left_node, right_node), resultType)
(funcNode, resultType)
}
}
class ColumnarLessThanOrEqual(left: Expression, right: Expression, original: Expression)
extends LessThanOrEqual(left: Expression, right: Expression)
with ColumnarExpression
with Logging {
override def doColumnarCodeGen(args: java.lang.Object): (TreeNode, ArrowType) = {
var (left_node, left_type): (TreeNode, ArrowType) =
left.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
var (right_node, right_type): (TreeNode, ArrowType) =
right.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
val unifiedType = CodeGeneration.getResultType(left_type, right_type)
if (!left_type.equals(unifiedType)) {
val func_name = CodeGeneration.getCastFuncName(unifiedType)
left_node =
TreeBuilder.makeFunction(func_name, Lists.newArrayList(left_node), unifiedType)
}
if (!right_type.equals(unifiedType)) {
val func_name = CodeGeneration.getCastFuncName(unifiedType)
right_node =
TreeBuilder.makeFunction(func_name, Lists.newArrayList(right_node), unifiedType)
}
var function = "less_than_or_equal_to"
val nanCheck = ColumnarPluginConfig.getConf.enableColumnarNaNCheck
if (nanCheck) {
unifiedType match {
case t: ArrowType.FloatingPoint =>
function = "less_than_or_equal_to_with_nan"
case _ =>
}
}
val resultType = new ArrowType.Bool()
val funcNode = TreeBuilder.makeFunction(
function,
Lists.newArrayList(left_node, right_node),
resultType)
(funcNode, resultType)
}
}
class ColumnarGreaterThan(left: Expression, right: Expression, original: Expression)
extends GreaterThan(left: Expression, right: Expression)
with ColumnarExpression
with Logging {
override def doColumnarCodeGen(args: java.lang.Object): (TreeNode, ArrowType) = {
var (left_node, left_type): (TreeNode, ArrowType) =
left.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
var (right_node, right_type): (TreeNode, ArrowType) =
right.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
val unifiedType = CodeGeneration.getResultType(left_type, right_type)
if (!left_type.equals(unifiedType)) {
val func_name = CodeGeneration.getCastFuncName(unifiedType)
left_node =
TreeBuilder.makeFunction(func_name, Lists.newArrayList(left_node), unifiedType)
}
if (!right_type.equals(unifiedType)) {
val func_name = CodeGeneration.getCastFuncName(unifiedType)
right_node =
TreeBuilder.makeFunction(func_name, Lists.newArrayList(right_node), unifiedType)
}
var function = "greater_than"
val nanCheck = ColumnarPluginConfig.getConf.enableColumnarNaNCheck
if (nanCheck) {
unifiedType match {
case t: ArrowType.FloatingPoint =>
function = "greater_than_with_nan"
case _ =>
}
}
val resultType = new ArrowType.Bool()
val funcNode = TreeBuilder.makeFunction(
function,
Lists.newArrayList(left_node, right_node),
resultType)
(funcNode, resultType)
}
}
class ColumnarGreaterThanOrEqual(left: Expression, right: Expression, original: Expression)
extends GreaterThanOrEqual(left: Expression, right: Expression)
with ColumnarExpression
with Logging {
override def doColumnarCodeGen(args: java.lang.Object): (TreeNode, ArrowType) = {
var (left_node, left_type): (TreeNode, ArrowType) =
left.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
var (right_node, right_type): (TreeNode, ArrowType) =
right.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
val unifiedType = CodeGeneration.getResultType(left_type, right_type)
if (!left_type.equals(unifiedType)) {
val func_name = CodeGeneration.getCastFuncName(unifiedType)
left_node =
TreeBuilder.makeFunction(func_name, Lists.newArrayList(left_node), unifiedType)
}
if (!right_type.equals(unifiedType)) {
val func_name = CodeGeneration.getCastFuncName(unifiedType)
right_node =
TreeBuilder.makeFunction(func_name, Lists.newArrayList(right_node), unifiedType)
}
var function = "greater_than_or_equal_to"
val nanCheck = ColumnarPluginConfig.getConf.enableColumnarNaNCheck
if (nanCheck) {
unifiedType match {
case t: ArrowType.FloatingPoint =>
function = "greater_than_or_equal_to_with_nan"
case _ =>
}
}
val resultType = new ArrowType.Bool()
val funcNode = TreeBuilder.makeFunction(
function,
Lists.newArrayList(left_node, right_node),
resultType)
(funcNode, resultType)
}
}
class ColumnarShiftLeft(left: Expression, right: Expression, original: Expression)
extends ShiftLeft(left: Expression, right: Expression)
with ColumnarExpression
with Logging {
override def doColumnarCodeGen(args: Object): (TreeNode, ArrowType) = {
var (left_node, left_type): (TreeNode, ArrowType) =
left.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
var (right_node, right_type): (TreeNode, ArrowType) =
right.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
if (right_type.getTypeID != ArrowTypeID.Int) {
throw new IllegalArgumentException("shiftleft requires for int type on second parameter")
}
val resultType = left_type
val funcNode = TreeBuilder.makeFunction(
"shift_left",
Lists.newArrayList(left_node, right_node),
resultType)
(funcNode, resultType)
}
}
class ColumnarShiftRight(left: Expression, right: Expression, original: Expression)
extends ShiftRight(left: Expression, right: Expression)
with ColumnarExpression
with Logging {
override def doColumnarCodeGen(args: Object): (TreeNode, ArrowType) = {
var (left_node, left_type): (TreeNode, ArrowType) =
left.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
var (right_node, right_type): (TreeNode, ArrowType) =
right.asInstanceOf[ColumnarExpression].doColumnarCodeGen(args)
if (right_type.getTypeID != ArrowTypeID.Int) {
throw new IllegalArgumentException("shiftright requires for int type on second parameter")
}
val resultType = left_type
val funcNode = TreeBuilder.makeFunction(
"shift_right",
Lists.newArrayList(left_node, right_node),
resultType)
(funcNode, resultType)
}
}
object ColumnarBinaryOperator {
def create(left: Expression, right: Expression, original: Expression): Expression =
original match {
case a: And =>
new ColumnarAnd(left, right, a)
case o: Or =>
new ColumnarOr(left, right, o)
case e: EqualTo =>
new ColumnarEqualTo(left, right, e)
case e: EqualNullSafe =>
new ColumnarEqualNull(left, right, e)
case l: LessThan =>
new ColumnarLessThan(left, right, l)
case l: LessThanOrEqual =>
new ColumnarLessThanOrEqual(left, right, l)
case g: GreaterThan =>
new ColumnarGreaterThan(left, right, g)
case g: GreaterThanOrEqual =>
new ColumnarGreaterThanOrEqual(left, right, g)
case e: EndsWith =>
new ColumnarEndsWith(left, right, e)
case s: StartsWith =>
new ColumnarStartsWith(left, right, s)
case c: Contains =>
new ColumnarContains(left, right, c)
case l: Like =>
new ColumnarLike(left, right, l)
case s: ShiftLeft =>
new ColumnarShiftLeft(left, right, s)
case s: ShiftRight =>
new ColumnarShiftRight(left, right, s)
case other =>
throw new UnsupportedOperationException(s"not currently supported: $other.")
}
}
| Intel-bigdata/OAP | oap-native-sql/core/src/main/scala/com/intel/oap/expression/ColumnarBinaryOperator.scala | Scala | apache-2.0 | 17,856 |
package domala
import org.seasar.doma
package object jdbc {
// Alias of Doma type
type SqlLogType = doma.jdbc.SqlLogType
object SqlLogType {
val RAW = doma.jdbc.SqlLogType.RAW
val FORMATTED = doma.jdbc.SqlLogType.FORMATTED
val NONE = doma.jdbc.SqlLogType.NONE
}
type Naming = doma.jdbc.Naming
object Naming {
val NONE: Naming = doma.jdbc.Naming.NONE
val LOWER_CASE: Naming = doma.jdbc.Naming.LOWER_CASE
val UPPER_CASE: Naming = doma.jdbc.Naming.UPPER_CASE
val SNAKE_LOWER_CASE: Naming = doma.jdbc.Naming.SNAKE_LOWER_CASE
val SNAKE_UPPER_CASE: Naming = doma.jdbc.Naming.SNAKE_UPPER_CASE
val LENIENT_SNAKE_UPPER_CASE: Naming = doma.jdbc.Naming.LENIENT_SNAKE_UPPER_CASE
}
}
| bakenezumi/domala | core/src/main/scala/domala/jdbc/package.scala | Scala | apache-2.0 | 724 |
package com.arcusys.valamis.model
/** When to do randomization of child activities */
object ScopeType extends Enumeration {
type ScopeType = Value
val Instance = Value("instance")
val Site = Value("site")
val Page = Value("page")
val Player = Value("player")
} | ViLPy/Valamis | valamis-core/src/main/scala/com/arcusys/valamis/model/ScopeType.scala | Scala | lgpl-3.0 | 272 |
package skinny.validator
import org.scalatest._
class ErrorsSpec extends FlatSpec with Matchers {
behavior of "Errors"
it should "be available" in {
val instance = new Errors(Map())
instance should not be null
}
}
| seratch/skinny-framework | validator/src/test/scala/skinny/validator/ErrorsSpec.scala | Scala | mit | 233 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.io.{File, IOException}
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicBoolean
import com.yammer.metrics.core.Gauge
import kafka.api._
import kafka.cluster.{BrokerEndPoint, Partition, Replica}
import kafka.common._
import kafka.controller.KafkaController
import kafka.log.{LogAppendInfo, LogManager}
import kafka.message.{ByteBufferMessageSet, MessageSet}
import kafka.metrics.KafkaMetricsGroup
import kafka.utils._
import org.I0Itec.zkclient.ZkClient
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.utils.{Time => JTime}
import scala.collection._
/*
* Result metadata of a log append operation on the log
*/
case class LogAppendResult(info: LogAppendInfo, error: Option[Throwable] = None) {
def errorCode = error match {
case None => ErrorMapping.NoError
case Some(e) => ErrorMapping.codeFor(e.getClass.asInstanceOf[Class[Throwable]])
}
}
/*
* Result metadata of a log read operation on the log
* @param info @FetchDataInfo returned by the @Log read
* @param hw high watermark of the local replica
* @param readSize amount of data that was read from the log i.e. size of the fetch
* @param isReadFromLogEnd true if the request read up to the log end offset snapshot
* when the read was initiated, false otherwise
* @param error Exception if error encountered while reading from the log
*/
case class LogReadResult(info: FetchDataInfo,
hw: Long,
readSize: Int,
isReadFromLogEnd : Boolean,
error: Option[Throwable] = None) {
def errorCode = error match {
case None => ErrorMapping.NoError
case Some(e) => ErrorMapping.codeFor(e.getClass.asInstanceOf[Class[Throwable]])
}
override def toString = {
"Fetch Data: [%s], HW: [%d], readSize: [%d], isReadFromLogEnd: [%b], error: [%s]"
.format(info, hw, readSize, isReadFromLogEnd, error)
}
}
object LogReadResult {
val UnknownLogReadResult = LogReadResult(FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata,
MessageSet.Empty),
-1L,
-1,
false)
}
case class BecomeLeaderOrFollowerResult(responseMap: collection.Map[(String, Int), Short],
updatedLeaders: Set[Partition],
updatedFollowers: Set[Partition],
errorCode: Short) {
override def toString = {
"updated leaders: [%s], updated followers: [%s], update results: [%s], global error: [%d]"
.format(updatedLeaders, updatedFollowers, responseMap, errorCode)
}
}
object ReplicaManager {
val HighWatermarkFilename = "replication-offset-checkpoint"
}
class ReplicaManager(val config: KafkaConfig,
metrics: Metrics,
time: Time,
jTime: JTime,
val zkUtils: ZkUtils,
scheduler: Scheduler,
val logManager: LogManager,
val isShuttingDown: AtomicBoolean,
threadNamePrefix: Option[String] = None) extends Logging with KafkaMetricsGroup {
/* epoch of the controller that last changed the leader */
@volatile var controllerEpoch: Int = KafkaController.InitialControllerEpoch - 1
private val localBrokerId = config.brokerId
private val allPartitions = new Pool[(String, Int), Partition]
private val replicaStateChangeLock = new Object
val replicaFetcherManager = new ReplicaFetcherManager(config, this, metrics, jTime, threadNamePrefix)
private val highWatermarkCheckPointThreadStarted = new AtomicBoolean(false)
val highWatermarkCheckpoints = config.logDirs.map(dir => (new File(dir).getAbsolutePath, new OffsetCheckpoint(new File(dir, ReplicaManager.HighWatermarkFilename)))).toMap
private var hwThreadInitialized = false
this.logIdent = "[Replica Manager on Broker " + localBrokerId + "]: "
val stateChangeLogger = KafkaController.stateChangeLogger
private val isrChangeSet: mutable.Set[TopicAndPartition] = new mutable.HashSet[TopicAndPartition]()
val delayedProducePurgatory = new DelayedOperationPurgatory[DelayedProduce](
purgatoryName = "Produce", config.brokerId, config.producerPurgatoryPurgeIntervalRequests)
val delayedFetchPurgatory = new DelayedOperationPurgatory[DelayedFetch](
purgatoryName = "Fetch", config.brokerId, config.fetchPurgatoryPurgeIntervalRequests)
newGauge(
"LeaderCount",
new Gauge[Int] {
def value = {
getLeaderPartitions().size
}
}
)
newGauge(
"PartitionCount",
new Gauge[Int] {
def value = allPartitions.size
}
)
newGauge(
"UnderReplicatedPartitions",
new Gauge[Int] {
def value = underReplicatedPartitionCount()
}
)
val isrExpandRate = newMeter("IsrExpandsPerSec", "expands", TimeUnit.SECONDS)
val isrShrinkRate = newMeter("IsrShrinksPerSec", "shrinks", TimeUnit.SECONDS)
def underReplicatedPartitionCount(): Int = {
getLeaderPartitions().count(_.isUnderReplicated)
}
def startHighWaterMarksCheckPointThread() = {
if(highWatermarkCheckPointThreadStarted.compareAndSet(false, true))
scheduler.schedule("highwatermark-checkpoint", checkpointHighWatermarks, period = config.replicaHighWatermarkCheckpointIntervalMs, unit = TimeUnit.MILLISECONDS)
}
def recordIsrChange(topicAndPartition: TopicAndPartition) {
isrChangeSet synchronized {
isrChangeSet += topicAndPartition
}
}
def maybePropagateIsrChanges() {
isrChangeSet synchronized {
if (isrChangeSet.nonEmpty) {
ReplicationUtils.propagateIsrChanges(zkUtils, isrChangeSet)
isrChangeSet.clear()
}
}
}
/**
* Try to complete some delayed produce requests with the request key;
* this can be triggered when:
*
* 1. The partition HW has changed (for acks = -1)
* 2. A follower replica's fetch operation is received (for acks > 1)
*/
def tryCompleteDelayedProduce(key: DelayedOperationKey) {
val completed = delayedProducePurgatory.checkAndComplete(key)
debug("Request key %s unblocked %d producer requests.".format(key.keyLabel, completed))
}
/**
* Try to complete some delayed fetch requests with the request key;
* this can be triggered when:
*
* 1. The partition HW has changed (for regular fetch)
* 2. A new message set is appended to the local log (for follower fetch)
*/
def tryCompleteDelayedFetch(key: DelayedOperationKey) {
val completed = delayedFetchPurgatory.checkAndComplete(key)
debug("Request key %s unblocked %d fetch requests.".format(key.keyLabel, completed))
}
def startup() {
// start ISR expiration thread
scheduler.schedule("isr-expiration", maybeShrinkIsr, period = config.replicaLagTimeMaxMs, unit = TimeUnit.MILLISECONDS)
scheduler.schedule("isr-change-propagation", maybePropagateIsrChanges, period = 5000, unit = TimeUnit.MILLISECONDS)
}
def stopReplica(topic: String, partitionId: Int, deletePartition: Boolean): Short = {
stateChangeLogger.trace("Broker %d handling stop replica (delete=%s) for partition [%s,%d]".format(localBrokerId,
deletePartition.toString, topic, partitionId))
val errorCode = ErrorMapping.NoError
getPartition(topic, partitionId) match {
case Some(partition) =>
if(deletePartition) {
val removedPartition = allPartitions.remove((topic, partitionId))
if (removedPartition != null)
removedPartition.delete() // this will delete the local log
}
case None =>
// Delete log and corresponding folders in case replica manager doesn't hold them anymore.
// This could happen when topic is being deleted while broker is down and recovers.
if(deletePartition) {
val topicAndPartition = TopicAndPartition(topic, partitionId)
if(logManager.getLog(topicAndPartition).isDefined) {
logManager.deleteLog(topicAndPartition)
}
}
stateChangeLogger.trace("Broker %d ignoring stop replica (delete=%s) for partition [%s,%d] as replica doesn't exist on broker"
.format(localBrokerId, deletePartition, topic, partitionId))
}
stateChangeLogger.trace("Broker %d finished handling stop replica (delete=%s) for partition [%s,%d]"
.format(localBrokerId, deletePartition, topic, partitionId))
errorCode
}
def stopReplicas(stopReplicaRequest: StopReplicaRequest): (mutable.Map[TopicAndPartition, Short], Short) = {
replicaStateChangeLock synchronized {
val responseMap = new collection.mutable.HashMap[TopicAndPartition, Short]
if(stopReplicaRequest.controllerEpoch < controllerEpoch) {
stateChangeLogger.warn("Broker %d received stop replica request from an old controller epoch %d."
.format(localBrokerId, stopReplicaRequest.controllerEpoch) +
" Latest known controller epoch is %d " + controllerEpoch)
(responseMap, ErrorMapping.StaleControllerEpochCode)
} else {
controllerEpoch = stopReplicaRequest.controllerEpoch
// First stop fetchers for all partitions, then stop the corresponding replicas
replicaFetcherManager.removeFetcherForPartitions(stopReplicaRequest.partitions.map(r => TopicAndPartition(r.topic, r.partition)))
for(topicAndPartition <- stopReplicaRequest.partitions){
val errorCode = stopReplica(topicAndPartition.topic, topicAndPartition.partition, stopReplicaRequest.deletePartitions)
responseMap.put(topicAndPartition, errorCode)
}
(responseMap, ErrorMapping.NoError)
}
}
}
def getOrCreatePartition(topic: String, partitionId: Int): Partition = {
var partition = allPartitions.get((topic, partitionId))
if (partition == null) {
allPartitions.putIfNotExists((topic, partitionId), new Partition(topic, partitionId, time, this))
partition = allPartitions.get((topic, partitionId))
}
partition
}
def getPartition(topic: String, partitionId: Int): Option[Partition] = {
val partition = allPartitions.get((topic, partitionId))
if (partition == null)
None
else
Some(partition)
}
def getReplicaOrException(topic: String, partition: Int): Replica = {
val replicaOpt = getReplica(topic, partition)
if(replicaOpt.isDefined)
replicaOpt.get
else
throw new ReplicaNotAvailableException("Replica %d is not available for partition [%s,%d]".format(config.brokerId, topic, partition))
}
def getLeaderReplicaIfLocal(topic: String, partitionId: Int): Replica = {
val partitionOpt = getPartition(topic, partitionId)
partitionOpt match {
case None =>
throw new UnknownTopicOrPartitionException("Partition [%s,%d] doesn't exist on %d".format(topic, partitionId, config.brokerId))
case Some(partition) =>
partition.leaderReplicaIfLocal match {
case Some(leaderReplica) => leaderReplica
case None =>
throw new NotLeaderForPartitionException("Leader not local for partition [%s,%d] on broker %d"
.format(topic, partitionId, config.brokerId))
}
}
}
def getReplica(topic: String, partitionId: Int, replicaId: Int = config.brokerId): Option[Replica] = {
val partitionOpt = getPartition(topic, partitionId)
partitionOpt match {
case None => None
case Some(partition) => partition.getReplica(replicaId)
}
}
/**
* Append messages to leader replicas of the partition, and wait for them to be replicated to other replicas;
* the callback function will be triggered either when timeout or the required acks are satisfied
*/
def appendMessages(timeout: Long,
requiredAcks: Short,
internalTopicsAllowed: Boolean,
messagesPerPartition: Map[TopicAndPartition, MessageSet],
responseCallback: Map[TopicAndPartition, ProducerResponseStatus] => Unit) {
if (isValidRequiredAcks(requiredAcks)) {
val sTime = SystemTime.milliseconds
val localProduceResults = appendToLocalLog(internalTopicsAllowed, messagesPerPartition, requiredAcks)
debug("Produce to local log in %d ms".format(SystemTime.milliseconds - sTime))
val produceStatus = localProduceResults.map { case (topicAndPartition, result) =>
topicAndPartition ->
ProducePartitionStatus(
result.info.lastOffset + 1, // required offset
ProducerResponseStatus(result.errorCode, result.info.firstOffset)) // response status
}
if (delayedRequestRequired(requiredAcks, messagesPerPartition, localProduceResults)) {
// create delayed produce operation
val produceMetadata = ProduceMetadata(requiredAcks, produceStatus)
val delayedProduce = new DelayedProduce(timeout, produceMetadata, this, responseCallback)
// create a list of (topic, partition) pairs to use as keys for this delayed produce operation
val producerRequestKeys = messagesPerPartition.keys.map(new TopicPartitionOperationKey(_)).toSeq
// try to complete the request immediately, otherwise put it into the purgatory
// this is because while the delayed produce operation is being created, new
// requests may arrive and hence make this operation completable.
delayedProducePurgatory.tryCompleteElseWatch(delayedProduce, producerRequestKeys)
} else {
// we can respond immediately
val produceResponseStatus = produceStatus.mapValues(status => status.responseStatus)
responseCallback(produceResponseStatus)
}
} else {
// If required.acks is outside accepted range, something is wrong with the client
// Just return an error and don't handle the request at all
val responseStatus = messagesPerPartition.map {
case (topicAndPartition, messageSet) =>
(topicAndPartition ->
ProducerResponseStatus(Errors.INVALID_REQUIRED_ACKS.code,
LogAppendInfo.UnknownLogAppendInfo.firstOffset))
}
responseCallback(responseStatus)
}
}
// If all the following conditions are true, we need to put a delayed produce request and wait for replication to complete
//
// 1. required acks = -1
// 2. there is data to append
// 3. at least one partition append was successful (fewer errors than partitions)
private def delayedRequestRequired(requiredAcks: Short, messagesPerPartition: Map[TopicAndPartition, MessageSet],
localProduceResults: Map[TopicAndPartition, LogAppendResult]): Boolean = {
requiredAcks == -1 &&
messagesPerPartition.size > 0 &&
localProduceResults.values.count(_.error.isDefined) < messagesPerPartition.size
}
private def isValidRequiredAcks(requiredAcks: Short): Boolean = {
requiredAcks == -1 || requiredAcks == 1 || requiredAcks == 0
}
/**
* Append the messages to the local replica logs
*/
private def appendToLocalLog(internalTopicsAllowed: Boolean,
messagesPerPartition: Map[TopicAndPartition, MessageSet],
requiredAcks: Short): Map[TopicAndPartition, LogAppendResult] = {
trace("Append [%s] to local log ".format(messagesPerPartition))
messagesPerPartition.map { case (topicAndPartition, messages) =>
BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).totalProduceRequestRate.mark()
BrokerTopicStats.getBrokerAllTopicsStats().totalProduceRequestRate.mark()
// reject appending to internal topics if it is not allowed
if (Topic.InternalTopics.contains(topicAndPartition.topic) && !internalTopicsAllowed) {
(topicAndPartition, LogAppendResult(
LogAppendInfo.UnknownLogAppendInfo,
Some(new InvalidTopicException("Cannot append to internal topic %s".format(topicAndPartition.topic)))))
} else {
try {
val partitionOpt = getPartition(topicAndPartition.topic, topicAndPartition.partition)
val info = partitionOpt match {
case Some(partition) =>
partition.appendMessagesToLeader(messages.asInstanceOf[ByteBufferMessageSet], requiredAcks)
case None => throw new UnknownTopicOrPartitionException("Partition %s doesn't exist on %d"
.format(topicAndPartition, localBrokerId))
}
val numAppendedMessages =
if (info.firstOffset == -1L || info.lastOffset == -1L)
0
else
info.lastOffset - info.firstOffset + 1
// update stats for successfully appended bytes and messages as bytesInRate and messageInRate
BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).bytesInRate.mark(messages.sizeInBytes)
BrokerTopicStats.getBrokerAllTopicsStats.bytesInRate.mark(messages.sizeInBytes)
BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).messagesInRate.mark(numAppendedMessages)
BrokerTopicStats.getBrokerAllTopicsStats.messagesInRate.mark(numAppendedMessages)
trace("%d bytes written to log %s-%d beginning at offset %d and ending at offset %d"
.format(messages.sizeInBytes, topicAndPartition.topic, topicAndPartition.partition, info.firstOffset, info.lastOffset))
(topicAndPartition, LogAppendResult(info))
} catch {
// NOTE: Failed produce requests metric is not incremented for known exceptions
// it is supposed to indicate un-expected failures of a broker in handling a produce request
case e: KafkaStorageException =>
fatal("Halting due to unrecoverable I/O error while handling produce request: ", e)
Runtime.getRuntime.halt(1)
(topicAndPartition, null)
case utpe: UnknownTopicOrPartitionException =>
(topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(utpe)))
case nle: NotLeaderForPartitionException =>
(topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(nle)))
case mtle: MessageSizeTooLargeException =>
(topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(mtle)))
case mstle: MessageSetSizeTooLargeException =>
(topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(mstle)))
case imse : InvalidMessageSizeException =>
(topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(imse)))
case t: Throwable =>
BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).failedProduceRequestRate.mark()
BrokerTopicStats.getBrokerAllTopicsStats.failedProduceRequestRate.mark()
error("Error processing append operation on partition %s".format(topicAndPartition), t)
(topicAndPartition, LogAppendResult(LogAppendInfo.UnknownLogAppendInfo, Some(t)))
}
}
}
}
/**
* Fetch messages from the leader replica, and wait until enough data can be fetched and return;
* the callback function will be triggered either when timeout or required fetch info is satisfied
*/
def fetchMessages(timeout: Long,
replicaId: Int,
fetchMinBytes: Int,
fetchInfo: immutable.Map[TopicAndPartition, PartitionFetchInfo],
responseCallback: Map[TopicAndPartition, FetchResponsePartitionData] => Unit) {
val isFromFollower = replicaId >= 0
val fetchOnlyFromLeader: Boolean = replicaId != Request.DebuggingConsumerId
val fetchOnlyCommitted: Boolean = ! Request.isValidBrokerId(replicaId)
// read from local logs
val logReadResults = readFromLocalLog(fetchOnlyFromLeader, fetchOnlyCommitted, fetchInfo)
// if the fetch comes from the follower,
// update its corresponding log end offset
if(Request.isValidBrokerId(replicaId))
updateFollowerLogReadResults(replicaId, logReadResults)
// check if this fetch request can be satisfied right away
val bytesReadable = logReadResults.values.map(_.info.messageSet.sizeInBytes).sum
val errorReadingData = logReadResults.values.foldLeft(false) ((errorIncurred, readResult) =>
errorIncurred || (readResult.errorCode != ErrorMapping.NoError))
// respond immediately if 1) fetch request does not want to wait
// 2) fetch request does not require any data
// 3) has enough data to respond
// 4) some error happens while reading data
if(timeout <= 0 || fetchInfo.size <= 0 || bytesReadable >= fetchMinBytes || errorReadingData) {
val fetchPartitionData = logReadResults.mapValues(result =>
FetchResponsePartitionData(result.errorCode, result.hw, result.info.messageSet))
responseCallback(fetchPartitionData)
} else {
// construct the fetch results from the read results
val fetchPartitionStatus = logReadResults.map { case (topicAndPartition, result) =>
(topicAndPartition, FetchPartitionStatus(result.info.fetchOffsetMetadata, fetchInfo.get(topicAndPartition).get))
}
val fetchMetadata = FetchMetadata(fetchMinBytes, fetchOnlyFromLeader, fetchOnlyCommitted, isFromFollower, fetchPartitionStatus)
val delayedFetch = new DelayedFetch(timeout, fetchMetadata, this, responseCallback)
// create a list of (topic, partition) pairs to use as keys for this delayed fetch operation
val delayedFetchKeys = fetchPartitionStatus.keys.map(new TopicPartitionOperationKey(_)).toSeq
// try to complete the request immediately, otherwise put it into the purgatory;
// this is because while the delayed fetch operation is being created, new requests
// may arrive and hence make this operation completable.
delayedFetchPurgatory.tryCompleteElseWatch(delayedFetch, delayedFetchKeys)
}
}
/**
* Read from a single topic/partition at the given offset upto maxSize bytes
*/
def readFromLocalLog(fetchOnlyFromLeader: Boolean,
readOnlyCommitted: Boolean,
readPartitionInfo: Map[TopicAndPartition, PartitionFetchInfo]): Map[TopicAndPartition, LogReadResult] = {
readPartitionInfo.map { case (TopicAndPartition(topic, partition), PartitionFetchInfo(offset, fetchSize)) =>
BrokerTopicStats.getBrokerTopicStats(topic).totalFetchRequestRate.mark()
BrokerTopicStats.getBrokerAllTopicsStats().totalFetchRequestRate.mark()
val partitionDataAndOffsetInfo =
try {
trace("Fetching log segment for topic %s, partition %d, offset %d, size %d".format(topic, partition, offset, fetchSize))
// decide whether to only fetch from leader
val localReplica = if (fetchOnlyFromLeader)
getLeaderReplicaIfLocal(topic, partition)
else
getReplicaOrException(topic, partition)
// decide whether to only fetch committed data (i.e. messages below high watermark)
val maxOffsetOpt = if (readOnlyCommitted)
Some(localReplica.highWatermark.messageOffset)
else
None
/* Read the LogOffsetMetadata prior to performing the read from the log.
* We use the LogOffsetMetadata to determine if a particular replica is in-sync or not.
* Using the log end offset after performing the read can lead to a race condition
* where data gets appended to the log immediately after the replica has consumed from it
* This can cause a replica to always be out of sync.
*/
val initialLogEndOffset = localReplica.logEndOffset
val logReadInfo = localReplica.log match {
case Some(log) =>
log.read(offset, fetchSize, maxOffsetOpt)
case None =>
error("Leader for partition [%s,%d] does not have a local log".format(topic, partition))
FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty)
}
val readToEndOfLog = initialLogEndOffset.messageOffset - logReadInfo.fetchOffsetMetadata.messageOffset <= 0
LogReadResult(logReadInfo, localReplica.highWatermark.messageOffset, fetchSize, readToEndOfLog, None)
} catch {
// NOTE: Failed fetch requests metric is not incremented for known exceptions since it
// is supposed to indicate un-expected failure of a broker in handling a fetch request
case utpe: UnknownTopicOrPartitionException =>
LogReadResult(FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty), -1L, fetchSize, false, Some(utpe))
case nle: NotLeaderForPartitionException =>
LogReadResult(FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty), -1L, fetchSize, false, Some(nle))
case rnae: ReplicaNotAvailableException =>
LogReadResult(FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty), -1L, fetchSize, false, Some(rnae))
case oor : OffsetOutOfRangeException =>
LogReadResult(FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty), -1L, fetchSize, false, Some(oor))
case e: Throwable =>
BrokerTopicStats.getBrokerTopicStats(topic).failedFetchRequestRate.mark()
BrokerTopicStats.getBrokerAllTopicsStats().failedFetchRequestRate.mark()
error("Error processing fetch operation on partition [%s,%d] offset %d".format(topic, partition, offset))
LogReadResult(FetchDataInfo(LogOffsetMetadata.UnknownOffsetMetadata, MessageSet.Empty), -1L, fetchSize, false, Some(e))
}
(TopicAndPartition(topic, partition), partitionDataAndOffsetInfo)
}
}
def maybeUpdateMetadataCache(updateMetadataRequest: UpdateMetadataRequest, metadataCache: MetadataCache) {
replicaStateChangeLock synchronized {
if(updateMetadataRequest.controllerEpoch < controllerEpoch) {
val stateControllerEpochErrorMessage = ("Broker %d received update metadata request with correlation id %d from an " +
"old controller %d with epoch %d. Latest known controller epoch is %d").format(localBrokerId,
updateMetadataRequest.correlationId, updateMetadataRequest.controllerId, updateMetadataRequest.controllerEpoch,
controllerEpoch)
stateChangeLogger.warn(stateControllerEpochErrorMessage)
throw new ControllerMovedException(stateControllerEpochErrorMessage)
} else {
metadataCache.updateCache(updateMetadataRequest, localBrokerId, stateChangeLogger)
controllerEpoch = updateMetadataRequest.controllerEpoch
}
}
}
def becomeLeaderOrFollower(leaderAndISRRequest: LeaderAndIsrRequest): BecomeLeaderOrFollowerResult = {
leaderAndISRRequest.partitionStateInfos.foreach { case ((topic, partition), stateInfo) =>
stateChangeLogger.trace("Broker %d received LeaderAndIsr request %s correlation id %d from controller %d epoch %d for partition [%s,%d]"
.format(localBrokerId, stateInfo, leaderAndISRRequest.correlationId,
leaderAndISRRequest.controllerId, leaderAndISRRequest.controllerEpoch, topic, partition))
}
replicaStateChangeLock synchronized {
val responseMap = new mutable.HashMap[(String, Int), Short]
if (leaderAndISRRequest.controllerEpoch < controllerEpoch) {
leaderAndISRRequest.partitionStateInfos.foreach { case ((topic, partition), stateInfo) =>
stateChangeLogger.warn(("Broker %d ignoring LeaderAndIsr request from controller %d with correlation id %d since " +
"its controller epoch %d is old. Latest known controller epoch is %d").format(localBrokerId, leaderAndISRRequest.controllerId,
leaderAndISRRequest.correlationId, leaderAndISRRequest.controllerEpoch, controllerEpoch))
}
BecomeLeaderOrFollowerResult(responseMap, Set.empty[Partition], Set.empty[Partition], ErrorMapping.StaleControllerEpochCode)
} else {
val controllerId = leaderAndISRRequest.controllerId
val correlationId = leaderAndISRRequest.correlationId
controllerEpoch = leaderAndISRRequest.controllerEpoch
// First check partition's leader epoch
val partitionState = new mutable.HashMap[Partition, PartitionStateInfo]()
leaderAndISRRequest.partitionStateInfos.foreach { case ((topic, partitionId), partitionStateInfo) =>
val partition = getOrCreatePartition(topic, partitionId)
val partitionLeaderEpoch = partition.getLeaderEpoch()
// If the leader epoch is valid record the epoch of the controller that made the leadership decision.
// This is useful while updating the isr to maintain the decision maker controller's epoch in the zookeeper path
if (partitionLeaderEpoch < partitionStateInfo.leaderIsrAndControllerEpoch.leaderAndIsr.leaderEpoch) {
if(partitionStateInfo.allReplicas.contains(config.brokerId))
partitionState.put(partition, partitionStateInfo)
else {
stateChangeLogger.warn(("Broker %d ignoring LeaderAndIsr request from controller %d with correlation id %d " +
"epoch %d for partition [%s,%d] as itself is not in assigned replica list %s")
.format(localBrokerId, controllerId, correlationId, leaderAndISRRequest.controllerEpoch,
topic, partition.partitionId, partitionStateInfo.allReplicas.mkString(",")))
}
} else {
// Otherwise record the error code in response
stateChangeLogger.warn(("Broker %d ignoring LeaderAndIsr request from controller %d with correlation id %d " +
"epoch %d for partition [%s,%d] since its associated leader epoch %d is old. Current leader epoch is %d")
.format(localBrokerId, controllerId, correlationId, leaderAndISRRequest.controllerEpoch,
topic, partition.partitionId, partitionStateInfo.leaderIsrAndControllerEpoch.leaderAndIsr.leaderEpoch, partitionLeaderEpoch))
responseMap.put((topic, partitionId), ErrorMapping.StaleLeaderEpochCode)
}
}
val partitionsTobeLeader = partitionState.filter { case (partition, partitionStateInfo) =>
partitionStateInfo.leaderIsrAndControllerEpoch.leaderAndIsr.leader == config.brokerId
}
val partitionsToBeFollower = (partitionState -- partitionsTobeLeader.keys)
val partitionsBecomeLeader = if (!partitionsTobeLeader.isEmpty)
makeLeaders(controllerId, controllerEpoch, partitionsTobeLeader, leaderAndISRRequest.correlationId, responseMap)
else
Set.empty[Partition]
val partitionsBecomeFollower = if (!partitionsToBeFollower.isEmpty)
makeFollowers(controllerId, controllerEpoch, partitionsToBeFollower, leaderAndISRRequest.leaders, leaderAndISRRequest.correlationId, responseMap)
else
Set.empty[Partition]
// we initialize highwatermark thread after the first leaderisrrequest. This ensures that all the partitions
// have been completely populated before starting the checkpointing there by avoiding weird race conditions
if (!hwThreadInitialized) {
startHighWaterMarksCheckPointThread()
hwThreadInitialized = true
}
replicaFetcherManager.shutdownIdleFetcherThreads()
BecomeLeaderOrFollowerResult(responseMap, partitionsBecomeLeader, partitionsBecomeFollower, ErrorMapping.NoError)
}
}
}
/*
* Make the current broker to become leader for a given set of partitions by:
*
* 1. Stop fetchers for these partitions
* 2. Update the partition metadata in cache
* 3. Add these partitions to the leader partitions set
*
* If an unexpected error is thrown in this function, it will be propagated to KafkaApis where
* the error message will be set on each partition since we do not know which partition caused it
* TODO: the above may need to be fixed later
*/
private def makeLeaders(controllerId: Int,
epoch: Int,
partitionState: Map[Partition, PartitionStateInfo],
correlationId: Int,
responseMap: mutable.Map[(String, Int), Short]): Set[Partition] = {
partitionState.foreach(state =>
stateChangeLogger.trace(("Broker %d handling LeaderAndIsr request correlationId %d from controller %d epoch %d " +
"starting the become-leader transition for partition %s")
.format(localBrokerId, correlationId, controllerId, epoch, TopicAndPartition(state._1.topic, state._1.partitionId))))
for (partition <- partitionState.keys)
responseMap.put((partition.topic, partition.partitionId), ErrorMapping.NoError)
try {
// First stop fetchers for all the partitions
replicaFetcherManager.removeFetcherForPartitions(partitionState.keySet.map(new TopicAndPartition(_)))
partitionState.foreach { state =>
stateChangeLogger.trace(("Broker %d stopped fetchers as part of become-leader request from controller " +
"%d epoch %d with correlation id %d for partition %s")
.format(localBrokerId, controllerId, epoch, correlationId, TopicAndPartition(state._1.topic, state._1.partitionId)))
}
// Update the partition information to be the leader
partitionState.foreach{ case (partition, partitionStateInfo) =>
partition.makeLeader(controllerId, partitionStateInfo, correlationId)}
} catch {
case e: Throwable =>
partitionState.foreach { state =>
val errorMsg = ("Error on broker %d while processing LeaderAndIsr request correlationId %d received from controller %d" +
" epoch %d for partition %s").format(localBrokerId, correlationId, controllerId, epoch,
TopicAndPartition(state._1.topic, state._1.partitionId))
stateChangeLogger.error(errorMsg, e)
}
// Re-throw the exception for it to be caught in KafkaApis
throw e
}
partitionState.foreach { state =>
stateChangeLogger.trace(("Broker %d completed LeaderAndIsr request correlationId %d from controller %d epoch %d " +
"for the become-leader transition for partition %s")
.format(localBrokerId, correlationId, controllerId, epoch, TopicAndPartition(state._1.topic, state._1.partitionId)))
}
partitionState.keySet
}
/*
* Make the current broker to become follower for a given set of partitions by:
*
* 1. Remove these partitions from the leader partitions set.
* 2. Mark the replicas as followers so that no more data can be added from the producer clients.
* 3. Stop fetchers for these partitions so that no more data can be added by the replica fetcher threads.
* 4. Truncate the log and checkpoint offsets for these partitions.
* 5. If the broker is not shutting down, add the fetcher to the new leaders.
*
* The ordering of doing these steps make sure that the replicas in transition will not
* take any more messages before checkpointing offsets so that all messages before the checkpoint
* are guaranteed to be flushed to disks
*
* If an unexpected error is thrown in this function, it will be propagated to KafkaApis where
* the error message will be set on each partition since we do not know which partition caused it
*/
private def makeFollowers(controllerId: Int,
epoch: Int,
partitionState: Map[Partition, PartitionStateInfo],
leaders: Set[BrokerEndPoint],
correlationId: Int,
responseMap: mutable.Map[(String, Int), Short]) : Set[Partition] = {
partitionState.foreach { state =>
stateChangeLogger.trace(("Broker %d handling LeaderAndIsr request correlationId %d from controller %d epoch %d " +
"starting the become-follower transition for partition %s")
.format(localBrokerId, correlationId, controllerId, epoch, TopicAndPartition(state._1.topic, state._1.partitionId)))
}
for (partition <- partitionState.keys)
responseMap.put((partition.topic, partition.partitionId), ErrorMapping.NoError)
val partitionsToMakeFollower: mutable.Set[Partition] = mutable.Set()
try {
// TODO: Delete leaders from LeaderAndIsrRequest
partitionState.foreach{ case (partition, partitionStateInfo) =>
val leaderIsrAndControllerEpoch = partitionStateInfo.leaderIsrAndControllerEpoch
val newLeaderBrokerId = leaderIsrAndControllerEpoch.leaderAndIsr.leader
leaders.find(_.id == newLeaderBrokerId) match {
// Only change partition state when the leader is available
case Some(leaderBroker) =>
if (partition.makeFollower(controllerId, partitionStateInfo, correlationId))
partitionsToMakeFollower += partition
else
stateChangeLogger.info(("Broker %d skipped the become-follower state change after marking its partition as follower with correlation id %d from " +
"controller %d epoch %d for partition [%s,%d] since the new leader %d is the same as the old leader")
.format(localBrokerId, correlationId, controllerId, leaderIsrAndControllerEpoch.controllerEpoch,
partition.topic, partition.partitionId, newLeaderBrokerId))
case None =>
// The leader broker should always be present in the leaderAndIsrRequest.
// If not, we should record the error message and abort the transition process for this partition
stateChangeLogger.error(("Broker %d received LeaderAndIsrRequest with correlation id %d from controller" +
" %d epoch %d for partition [%s,%d] but cannot become follower since the new leader %d is unavailable.")
.format(localBrokerId, correlationId, controllerId, leaderIsrAndControllerEpoch.controllerEpoch,
partition.topic, partition.partitionId, newLeaderBrokerId))
// Create the local replica even if the leader is unavailable. This is required to ensure that we include
// the partition's high watermark in the checkpoint file (see KAFKA-1647)
partition.getOrCreateReplica()
}
}
replicaFetcherManager.removeFetcherForPartitions(partitionsToMakeFollower.map(new TopicAndPartition(_)))
partitionsToMakeFollower.foreach { partition =>
stateChangeLogger.trace(("Broker %d stopped fetchers as part of become-follower request from controller " +
"%d epoch %d with correlation id %d for partition %s")
.format(localBrokerId, controllerId, epoch, correlationId, TopicAndPartition(partition.topic, partition.partitionId)))
}
logManager.truncateTo(partitionsToMakeFollower.map(partition => (new TopicAndPartition(partition), partition.getOrCreateReplica().highWatermark.messageOffset)).toMap)
partitionsToMakeFollower.foreach { partition =>
stateChangeLogger.trace(("Broker %d truncated logs and checkpointed recovery boundaries for partition [%s,%d] as part of " +
"become-follower request with correlation id %d from controller %d epoch %d").format(localBrokerId,
partition.topic, partition.partitionId, correlationId, controllerId, epoch))
}
if (isShuttingDown.get()) {
partitionsToMakeFollower.foreach { partition =>
stateChangeLogger.trace(("Broker %d skipped the adding-fetcher step of the become-follower state change with correlation id %d from " +
"controller %d epoch %d for partition [%s,%d] since it is shutting down").format(localBrokerId, correlationId,
controllerId, epoch, partition.topic, partition.partitionId))
}
}
else {
// we do not need to check if the leader exists again since this has been done at the beginning of this process
val partitionsToMakeFollowerWithLeaderAndOffset = partitionsToMakeFollower.map(partition =>
new TopicAndPartition(partition) -> BrokerAndInitialOffset(
leaders.find(_.id == partition.leaderReplicaIdOpt.get).get,
partition.getReplica().get.logEndOffset.messageOffset)).toMap
replicaFetcherManager.addFetcherForPartitions(partitionsToMakeFollowerWithLeaderAndOffset)
partitionsToMakeFollower.foreach { partition =>
stateChangeLogger.trace(("Broker %d started fetcher to new leader as part of become-follower request from controller " +
"%d epoch %d with correlation id %d for partition [%s,%d]")
.format(localBrokerId, controllerId, epoch, correlationId, partition.topic, partition.partitionId))
}
}
} catch {
case e: Throwable =>
val errorMsg = ("Error on broker %d while processing LeaderAndIsr request with correlationId %d received from controller %d " +
"epoch %d").format(localBrokerId, correlationId, controllerId, epoch)
stateChangeLogger.error(errorMsg, e)
// Re-throw the exception for it to be caught in KafkaApis
throw e
}
partitionState.foreach { state =>
stateChangeLogger.trace(("Broker %d completed LeaderAndIsr request correlationId %d from controller %d epoch %d " +
"for the become-follower transition for partition %s")
.format(localBrokerId, correlationId, controllerId, epoch, TopicAndPartition(state._1.topic, state._1.partitionId)))
}
partitionsToMakeFollower
}
private def maybeShrinkIsr(): Unit = {
trace("Evaluating ISR list of partitions to see which replicas can be removed from the ISR")
allPartitions.values.foreach(partition => partition.maybeShrinkIsr(config.replicaLagTimeMaxMs))
}
private def updateFollowerLogReadResults(replicaId: Int, readResults: Map[TopicAndPartition, LogReadResult]) {
debug("Recording follower broker %d log read results: %s ".format(replicaId, readResults))
readResults.foreach { case (topicAndPartition, readResult) =>
getPartition(topicAndPartition.topic, topicAndPartition.partition) match {
case Some(partition) =>
partition.updateReplicaLogReadResult(replicaId, readResult)
// for producer requests with ack > 1, we need to check
// if they can be unblocked after some follower's log end offsets have moved
tryCompleteDelayedProduce(new TopicPartitionOperationKey(topicAndPartition))
case None =>
warn("While recording the replica LEO, the partition %s hasn't been created.".format(topicAndPartition))
}
}
}
private def getLeaderPartitions() : List[Partition] = {
allPartitions.values.filter(_.leaderReplicaIfLocal().isDefined).toList
}
// Flushes the highwatermark value for all partitions to the highwatermark file
def checkpointHighWatermarks() {
val replicas = allPartitions.values.map(_.getReplica(config.brokerId)).collect{case Some(replica) => replica}
val replicasByDir = replicas.filter(_.log.isDefined).groupBy(_.log.get.dir.getParentFile.getAbsolutePath)
for((dir, reps) <- replicasByDir) {
val hwms = reps.map(r => (new TopicAndPartition(r) -> r.highWatermark.messageOffset)).toMap
try {
highWatermarkCheckpoints(dir).write(hwms)
} catch {
case e: IOException =>
fatal("Error writing to highwatermark file: ", e)
Runtime.getRuntime().halt(1)
}
}
}
// High watermark do not need to be checkpointed only when under unit tests
def shutdown(checkpointHW: Boolean = true) {
info("Shutting down")
replicaFetcherManager.shutdown()
delayedFetchPurgatory.shutdown()
delayedProducePurgatory.shutdown()
if (checkpointHW)
checkpointHighWatermarks()
info("Shut down completely")
}
}
| vkroz/kafka | core/src/main/scala/kafka/server/ReplicaManager.scala | Scala | apache-2.0 | 45,229 |
/**
* Copyright (C) 2019 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.fr.persistence.test
import org.orbeon.oxf.fr.persistence.db.Connect
import org.orbeon.oxf.fr.persistence.http.HttpCall
import org.orbeon.oxf.fr.persistence.relational.Version.Specific
import org.orbeon.oxf.http.HttpMethod.POST
import org.orbeon.oxf.test.{DocumentTestBase, ResourceManagerSupport, XFormsSupport}
import org.orbeon.oxf.util.{CoreCrossPlatformSupport, IndentedLogger, LoggerFactory}
import org.orbeon.oxf.xml.dom.Converter._
import org.scalatest.funspec.AnyFunSpecLike
class SearchTest
extends DocumentTestBase
with XFormsSupport
with ResourceManagerSupport
with AnyFunSpecLike {
private implicit val Logger = new IndentedLogger(LoggerFactory.createLogger(classOf[SearchTest]), true)
private implicit val coreCrossPlatformSupport = CoreCrossPlatformSupport
describe("Search API") {
it("returns an empty result when there are no documents") {
withTestExternalContext { implicit externalContext =>
Connect.withOrbeonTables("form definition") { (connection, provider) =>
val FormURL = HttpCall.crudURLPrefix (provider) + "form/form.xhtml"
val DataURL = HttpCall.crudURLPrefix (provider) + "data/123/data.xml"
val SearchURL = HttpCall.searchURLPrefix(provider)
val data =
<form>
<my-section>
<my-field>42</my-field>
</my-section>
</form>.toDocument
val searchRequest =
<search>
<query/>
<drafts>include</drafts>
<page-size>10</page-size>
<page-number>1</page-number>
<lang>en</lang>
</search>.toDocument
val searchResult =
<documents search-total="0"/>.toDocument
HttpCall.assertCall(
HttpCall.SolicitedRequest(
path = SearchURL,
version = Specific(1),
method = POST,
body = Some(HttpCall.XML(searchRequest))
),
HttpCall.ExpectedResponse(
code = 200,
body = Some(HttpCall.XML(searchResult))
)
)
}
}
}
}
}
| orbeon/orbeon-forms | form-runner/jvm/src/db/scala/org/orbeon/oxf/fr/persistence/test/SearchTest.scala | Scala | lgpl-2.1 | 2,860 |
package org.genericConfig.admin.models.config
import org.genericConfig.admin.controllers.websocket.WebClient
import org.genericConfig.admin.models.CommonFunction
import org.genericConfig.admin.models.common.ConfigIdHashNotExist
import org.genericConfig.admin.shared.Actions
import org.genericConfig.admin.shared.config.{ConfigDTO, ConfigParamsDTO}
import org.specs2.mutable.Specification
import org.specs2.specification.BeforeAfterAll
import play.api.Logger
import play.api.libs.json.{JsResult, Json}
/**
* Copyright (C) 2016 Gennadi Heimann [email protected]
*
* Created by Gennadi Heimann 02.05.2018
*/
//@RunWith(classOf[JUnitRunner])
class DeleteConfigWithDefectIdSpecs extends Specification
with BeforeAfterAll
with CommonFunction{
val wC: WebClient = WebClient.init
val username = "user_v016_5"
var deleteConfigResult : JsResult[ConfigDTO] = _
def beforeAll(): Unit = {
before()
}
def afterAll(): Unit = {
// Logger.info("Deleting Configs : " + deleteAllConfigs(this.username))
}
"Der Binutzer versucht die Konfiguration mit defkten Id zu loeschen" >> {
"action = DELETE_CONFIG" >> {
deleteConfigResult.get.action === Actions.DELETE_CONFIG
}
"result.errors = ConfigIdHashNotExist" >> {
deleteConfigResult.get.result.get.errors.get.head.name === ConfigIdHashNotExist().name
}
}
private def before(): Unit = {
val userId : String = createUser(username, wC)
Logger.info(userId)
val configId : String = createConfig(userId,"//http://contig1/user_v016_5")
val deleteConfigParams = Json.toJson(
ConfigDTO(
action = Actions.DELETE_CONFIG,
params = Some(ConfigParamsDTO(
userId = None,
configId = Some("1111"),
configUrl = None,
configurationCourse = None,
update = None
)),
result = None
)
)
Logger.info("<- " + deleteConfigParams)
deleteConfigResult = Json.fromJson[ConfigDTO](wC.handleMessage(deleteConfigParams))
Logger.info("-> " + deleteConfigResult)
}
} | gennadij/admin | server/test/org/genericConfig/admin/models/config/DeleteConfigWithDefectIdSpecs.scala | Scala | apache-2.0 | 2,123 |
/** Copyright (C) 2012 Edge System Design, LLC. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* Author(s): Ricky Elrod <[email protected]>
*/
package com.edgesysdesign.simpleradio.tests
import com.edgesysdesign.simpleradio._
import junit.framework.Assert._
import _root_.android.test.AndroidTestCase
import _root_.android.test.ActivityInstrumentationTestCase2
class AndroidTests extends AndroidTestCase {
def testPackageIsCorrect() {
assertEquals("com.edgesysdesign.simpleradio", getContext.getPackageName)
}
}
class ActivityTests extends ActivityInstrumentationTestCase2(classOf[MainActivity]) {
def testHelloWorldIsShown() {
val activity = getActivity
val textview = activity.findView(TR.textview)
assertEquals(textview.getText, "hello, world!")
}
}
| edge-sys-design/simple-radio | tests/src/main/scala/Tests.scala | Scala | gpl-2.0 | 1,441 |
package ca.uqam.euler.nicolas
/**
* Starting with the number 1 and moving to the right
* in a clockwise direction a 5 by 5 spiral is formed
* as follows:
*
* 21 22 23 24 25
* 20 7 8 9 10
* 19 6 1 2 11
* 18 5 4 3 12
* 17 16 15 14 13
*
* It can be verified that the sum of the numbers on the
* diagonals is 101. What is the sum of the numbers on
* the diagonals in a 1001 by 1001 spiral formed in the
* same way?
*
*/
object Problem028 {
case class V(x: Int, y: Int) { def +(v: V) = V(x + v.x, y + v.y) }
type M = IndexedSeq[IndexedSeq[Int]]
def dirs = {
def f(unused: Seq[V], used: Seq[V]): Stream[V] =
if (unused.nonEmpty)
unused.head #:: f(unused.tail, unused.head +: used)
else
f(used.reverse, Nil)
f(Seq(V(-1, 0), V(0, 1), V(1, 0), V(0, -1)), Nil)
}
def makeSpiral(size: Int) = {
assert(size % 2 == 1)
def look(m: M, p: V) =
if (m.isDefinedAt(p.x))
if (m(p.x).isDefinedAt(p.y))
Some(m(p.x)(p.y))
else None
else None
def updated(m: M, pos: V, i: Int) =
m.updated(pos.x, m(pos.x).updated(pos.y, i))
def f(i: Int, p: V, dirs: Stream[V], m: M): M = {
val q = p + dirs.tail.head
if (look(m, q) == Some(0))
f(i + 1, q, dirs.tail, updated(m, p, i))
else if (look(m, p) == None)
m
else
f(i + 1, p + dirs.head, dirs, updated(m, p, i))
}
f(1, V(size / 2, size / 2), dirs,
IndexedSeq.tabulate(size, size)((x, y) => 0))
}
def diagSum(m: M) =
(0 until m.size).map { x =>
m(x)(x) + m(x)(m.size - x - 1)
}.sum - 1
def main(args: Array[String]) = Answer {
diagSum(makeSpiral(1001))
}
} | nicolaspayette/project-euler | src/main/scala/ca/uqam/euler/nicolas/Problem028.scala | Scala | mit | 1,702 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.utils
import kafka.cluster.{Broker, Cluster}
import kafka.consumer.TopicCount
import org.I0Itec.zkclient.{IZkDataListener, ZkClient}
import org.I0Itec.zkclient.exception.{ZkNodeExistsException, ZkNoNodeException, ZkMarshallingError}
import org.I0Itec.zkclient.serialize.ZkSerializer
import scala.collection._
import kafka.api.LeaderAndIsr
import mutable.HashMap
import org.apache.zookeeper.data.Stat
import java.util.concurrent.locks.{ReentrantLock, Condition}
import kafka.admin._
import kafka.common.{TopicAndPartition, KafkaException, NoEpochForPartitionException}
import kafka.controller.{LeaderIsrAndControllerEpoch, PartitionAndReplica, ReassignedPartitionsContext}
object ZkUtils extends Logging {
val ConsumersPath = "/consumers"
val BrokerIdsPath = "/brokers/ids"
val BrokerTopicsPath = "/brokers/topics"
val ControllerPath = "/controller"
val ControllerEpochPath = "/controllerEpoch"
val ReassignPartitionsPath = "/admin/reassign_partitions"
val PreferredReplicaLeaderElectionPath = "/admin/preferred_replica_election"
def getTopicPath(topic: String): String ={
BrokerTopicsPath + "/" + topic
}
def getTopicPartitionsPath(topic: String): String ={
getTopicPath(topic) + "/partitions"
}
def getController(zkClient: ZkClient): Int= {
readDataMaybeNull(zkClient, ControllerPath)._1 match {
case Some(controller) => controller.toInt
case None => throw new KafkaException("Controller doesn't exist")
}
}
def getTopicPartitionPath(topic: String, partitionId: Int): String ={
getTopicPartitionsPath(topic) + "/" + partitionId
}
def getTopicPartitionLeaderAndIsrPath(topic: String, partitionId: Int): String ={
getTopicPartitionPath(topic, partitionId) + "/" + "leaderAndISR"
}
def getSortedBrokerList(zkClient: ZkClient): Seq[String] ={
ZkUtils.getChildren(zkClient, ZkUtils.BrokerIdsPath).sorted
}
def getAllLiveBrokerIds(zkClient: ZkClient): Set[Int] = {
ZkUtils.getChildren(zkClient, BrokerIdsPath).map(_.toInt).toSet
}
def getAllBrokersInCluster(zkClient: ZkClient): Seq[Broker] = {
val brokerIds = ZkUtils.getChildren(zkClient, ZkUtils.BrokerIdsPath).sorted
brokerIds.map(_.toInt).map(getBrokerInfo(zkClient, _)).filter(_.isDefined).map(_.get)
}
def getLeaderIsrAndEpochForPartition(zkClient: ZkClient, topic: String, partition: Int):Option[LeaderIsrAndControllerEpoch] = {
val leaderAndIsrPath = getTopicPartitionLeaderAndIsrPath(topic, partition)
val leaderAndIsrInfo = readDataMaybeNull(zkClient, leaderAndIsrPath)
val leaderAndIsrOpt = leaderAndIsrInfo._1
val stat = leaderAndIsrInfo._2
leaderAndIsrOpt match {
case Some(leaderAndIsrStr) => parseLeaderAndIsr(leaderAndIsrStr, topic, partition, stat)
case None => None
}
}
def getLeaderAndIsrForPartition(zkClient: ZkClient, topic: String, partition: Int):Option[LeaderAndIsr] = {
getLeaderIsrAndEpochForPartition(zkClient, topic, partition).map(_.leaderAndIsr)
}
def parseLeaderAndIsr(leaderAndIsrStr: String, topic: String, partition: Int, stat: Stat)
: Option[LeaderIsrAndControllerEpoch] = {
Json.parseFull(leaderAndIsrStr) match {
case Some(m) =>
val leader = m.asInstanceOf[Map[String, String]].get("leader").get.toInt
val epoch = m.asInstanceOf[Map[String, String]].get("leaderEpoch").get.toInt
val isrString = m.asInstanceOf[Map[String, String]].get("ISR").get
val controllerEpoch = m.asInstanceOf[Map[String, String]].get("controllerEpoch").get.toInt
val isr = Utils.parseCsvList(isrString).map(r => r.toInt)
val zkPathVersion = stat.getVersion
debug("Leader %d, Epoch %d, Isr %s, Zk path version %d for topic %s and partition %d".format(leader, epoch,
isr.toString(), zkPathVersion, topic, partition))
Some(LeaderIsrAndControllerEpoch(LeaderAndIsr(leader, epoch, isr.toList, zkPathVersion), controllerEpoch))
case None => None
}
}
def getLeaderForPartition(zkClient: ZkClient, topic: String, partition: Int): Option[Int] = {
val leaderAndIsrOpt = readDataMaybeNull(zkClient, getTopicPartitionLeaderAndIsrPath(topic, partition))._1
leaderAndIsrOpt match {
case Some(leaderAndIsr) =>
Json.parseFull(leaderAndIsr) match {
case Some(m) =>
Some(m.asInstanceOf[Map[String, String]].get("leader").get.toInt)
case None => None
}
case None => None
}
}
/**
* This API should read the epoch in the ISR path. It is sufficient to read the epoch in the ISR path, since if the
* leader fails after updating epoch in the leader path and before updating epoch in the ISR path, effectively some
* other broker will retry becoming leader with the same new epoch value.
*/
def getEpochForPartition(zkClient: ZkClient, topic: String, partition: Int): Int = {
val leaderAndIsrOpt = readDataMaybeNull(zkClient, getTopicPartitionLeaderAndIsrPath(topic, partition))._1
leaderAndIsrOpt match {
case Some(leaderAndIsr) =>
Json.parseFull(leaderAndIsr) match {
case None => throw new NoEpochForPartitionException("No epoch, leaderAndISR data for topic %s partition %d is invalid".format(topic, partition))
case Some(m) => m.asInstanceOf[Map[String, String]].get("leaderEpoch").get.toInt
}
case None => throw new NoEpochForPartitionException("No epoch, ISR path for topic %s partition %d is empty"
.format(topic, partition))
}
}
/**
* Gets the in-sync replicas (ISR) for a specific topic and partition
*/
def getInSyncReplicasForPartition(zkClient: ZkClient, topic: String, partition: Int): Seq[Int] = {
val leaderAndIsrOpt = readDataMaybeNull(zkClient, getTopicPartitionLeaderAndIsrPath(topic, partition))._1
leaderAndIsrOpt match {
case Some(leaderAndIsr) =>
Json.parseFull(leaderAndIsr) match {
case Some(m) =>
val isrString = m.asInstanceOf[Map[String, String]].get("ISR").get
Utils.parseCsvList(isrString).map(r => r.toInt)
case None => Seq.empty[Int]
}
case None => Seq.empty[Int]
}
}
/**
* Gets the assigned replicas (AR) for a specific topic and partition
*/
def getReplicasForPartition(zkClient: ZkClient, topic: String, partition: Int): Seq[Int] = {
val jsonPartitionMapOpt = readDataMaybeNull(zkClient, getTopicPath(topic))._1
jsonPartitionMapOpt match {
case Some(jsonPartitionMap) =>
Json.parseFull(jsonPartitionMap) match {
case Some(m) => m.asInstanceOf[Map[String, List[String]]].get(partition.toString) match {
case None => Seq.empty[Int]
case Some(seq) => seq.map(_.toInt)
}
case None => Seq.empty[Int]
}
case None => Seq.empty[Int]
}
}
def isPartitionOnBroker(zkClient: ZkClient, topic: String, partition: Int, brokerId: Int): Boolean = {
val replicas = getReplicasForPartition(zkClient, topic, partition)
debug("The list of replicas for topic %s, partition %d is %s".format(topic, partition, replicas))
replicas.contains(brokerId.toString)
}
def registerBrokerInZk(zkClient: ZkClient, id: Int, host: String, port: Int) {
val brokerIdPath = ZkUtils.BrokerIdsPath + "/" + id
val broker = new Broker(id, host, port)
try {
createEphemeralPathExpectConflict(zkClient, brokerIdPath, broker.getZkString)
} catch {
case e: ZkNodeExistsException =>
throw new RuntimeException("A broker is already registered on the path " + brokerIdPath + ". This probably " + "indicates that you either have configured a brokerid that is already in use, or " + "else you have shutdown this broker and restarted it faster than the zookeeper " + "timeout so it appears to be re-registering.")
}
info("Registering broker " + brokerIdPath + " succeeded with " + broker)
}
def getConsumerPartitionOwnerPath(group: String, topic: String, partition: Int): String = {
val topicDirs = new ZKGroupTopicDirs(group, topic)
topicDirs.consumerOwnerDir + "/" + partition
}
def leaderAndIsrZkData(leaderAndIsr: LeaderAndIsr, controllerEpoch: Int): String = {
val jsonDataMap = new HashMap[String, String]
jsonDataMap.put("leader", leaderAndIsr.leader.toString)
jsonDataMap.put("leaderEpoch", leaderAndIsr.leaderEpoch.toString)
jsonDataMap.put("ISR", leaderAndIsr.isr.mkString(","))
jsonDataMap.put("controllerEpoch", controllerEpoch.toString)
Utils.stringMapToJson(jsonDataMap)
}
/**
* make sure a persistent path exists in ZK. Create the path if not exist.
*/
def makeSurePersistentPathExists(client: ZkClient, path: String) {
if (!client.exists(path))
client.createPersistent(path, true) // won't throw NoNodeException or NodeExistsException
}
/**
* create the parent path
*/
private def createParentPath(client: ZkClient, path: String): Unit = {
val parentDir = path.substring(0, path.lastIndexOf('/'))
if (parentDir.length != 0)
client.createPersistent(parentDir, true)
}
/**
* Create an ephemeral node with the given path and data. Create parents if necessary.
*/
private def createEphemeralPath(client: ZkClient, path: String, data: String): Unit = {
try {
client.createEphemeral(path, data)
} catch {
case e: ZkNoNodeException => {
createParentPath(client, path)
client.createEphemeral(path, data)
}
}
}
/**
* Create an ephemeral node with the given path and data.
* Throw NodeExistException if node already exists.
*/
def createEphemeralPathExpectConflict(client: ZkClient, path: String, data: String): Unit = {
try {
createEphemeralPath(client, path, data)
} catch {
case e: ZkNodeExistsException => {
// this can happen when there is connection loss; make sure the data is what we intend to write
var storedData: String = null
try {
storedData = readData(client, path)._1
} catch {
case e1: ZkNoNodeException => // the node disappeared; treat as if node existed and let caller handles this
case e2 => throw e2
}
if (storedData == null || storedData != data) {
info("conflict in " + path + " data: " + data + " stored data: " + storedData)
throw e
} else {
// otherwise, the creation succeeded, return normally
info(path + " exists with value " + data + " during connection loss; this is ok")
}
}
case e2 => throw e2
}
}
/**
* Create an persistent node with the given path and data. Create parents if necessary.
*/
def createPersistentPath(client: ZkClient, path: String, data: String = ""): Unit = {
try {
client.createPersistent(path, data)
} catch {
case e: ZkNoNodeException => {
createParentPath(client, path)
client.createPersistent(path, data)
}
}
}
def createSequentialPersistentPath(client: ZkClient, path: String, data: String = ""): String = {
client.createPersistentSequential(path, data)
}
/**
* Update the value of a persistent node with the given path and data.
* create parrent directory if necessary. Never throw NodeExistException.
* Return the updated path zkVersion
*/
def updatePersistentPath(client: ZkClient, path: String, data: String): Int = {
var stat: Stat = null
try {
stat = client.writeData(path, data)
return stat.getVersion
} catch {
case e: ZkNoNodeException => {
createParentPath(client, path)
try {
client.createPersistent(path, data)
// When the new path is created, its zkVersion always starts from 0
return 0
} catch {
case e: ZkNodeExistsException =>
stat = client.writeData(path, data)
return stat.getVersion
case e2 => throw e2
}
}
case e2 => throw e2
}
}
/**
* Conditional update the persistent path data, return (true, newVersion) if it succeeds, otherwise (the path doesn't
* exist, the current version is not the expected version, etc.) return (false, -1)
*/
def conditionalUpdatePersistentPath(client: ZkClient, path: String, data: String, expectVersion: Int): (Boolean, Int) = {
try {
val stat = client.writeData(path, data, expectVersion)
info("Conditional update of zkPath %s with value %s and expected version %d succeeded, returning the new version: %d"
.format(path, data, expectVersion, stat.getVersion))
(true, stat.getVersion)
} catch {
case e: Exception =>
error("Conditional update of zkPath %s with data %s and expected version %d failed".format(path, data,
expectVersion), e)
(false, -1)
}
}
/**
* Conditional update the persistent path data, return (true, newVersion) if it succeeds, otherwise (the current
* version is not the expected version, etc.) return (false, -1). If path doesn't exist, throws ZkNoNodeException
*/
def conditionalUpdatePersistentPathIfExists(client: ZkClient, path: String, data: String, expectVersion: Int): (Boolean, Int) = {
try {
val stat = client.writeData(path, data, expectVersion)
info("Conditional update of zkPath %s with value %s and expected version %d succeeded, returning the new version: %d"
.format(path, data, expectVersion, stat.getVersion))
(true, stat.getVersion)
} catch {
case nne: ZkNoNodeException => throw nne
case e: Exception =>
error("Conditional update of zkPath %s with data %s and expected version %d failed".format(path, data,
expectVersion), e)
(false, -1)
}
}
/**
* Update the value of a persistent node with the given path and data.
* create parrent directory if necessary. Never throw NodeExistException.
*/
def updateEphemeralPath(client: ZkClient, path: String, data: String): Unit = {
try {
client.writeData(path, data)
} catch {
case e: ZkNoNodeException => {
createParentPath(client, path)
client.createEphemeral(path, data)
}
case e2 => throw e2
}
}
def deletePath(client: ZkClient, path: String): Boolean = {
try {
client.delete(path)
} catch {
case e: ZkNoNodeException =>
// this can happen during a connection loss event, return normally
info(path + " deleted during connection loss; this is ok")
false
case e2 => throw e2
}
}
def deletePathRecursive(client: ZkClient, path: String) {
try {
client.deleteRecursive(path)
} catch {
case e: ZkNoNodeException =>
// this can happen during a connection loss event, return normally
info(path + " deleted during connection loss; this is ok")
case e2 => throw e2
}
}
def maybeDeletePath(zkUrl: String, dir: String) {
try {
val zk = new ZkClient(zkUrl, 30*1000, 30*1000, ZKStringSerializer)
zk.deleteRecursive(dir)
zk.close()
} catch {
case _ => // swallow
}
}
def readData(client: ZkClient, path: String): (String, Stat) = {
val stat: Stat = new Stat()
val dataStr: String = client.readData(path, stat)
(dataStr, stat)
}
def readDataMaybeNull(client: ZkClient, path: String): (Option[String], Stat) = {
val stat: Stat = new Stat()
val dataAndStat = try {
(Some(client.readData(path, stat)), stat)
} catch {
case e: ZkNoNodeException =>
(None, stat)
case e2 => throw e2
}
dataAndStat
}
def getChildren(client: ZkClient, path: String): Seq[String] = {
import scala.collection.JavaConversions._
// triggers implicit conversion from java list to scala Seq
client.getChildren(path)
}
def getChildrenParentMayNotExist(client: ZkClient, path: String): Seq[String] = {
import scala.collection.JavaConversions._
// triggers implicit conversion from java list to scala Seq
try {
client.getChildren(path)
} catch {
case e: ZkNoNodeException => return Nil
case e2 => throw e2
}
}
/**
* Check if the given path exists
*/
def pathExists(client: ZkClient, path: String): Boolean = {
client.exists(path)
}
def getLastPart(path : String) : String = path.substring(path.lastIndexOf('/') + 1)
def getCluster(zkClient: ZkClient) : Cluster = {
val cluster = new Cluster
val nodes = getChildrenParentMayNotExist(zkClient, BrokerIdsPath)
for (node <- nodes) {
val brokerZKString = readData(zkClient, BrokerIdsPath + "/" + node)._1
cluster.add(Broker.createBroker(node.toInt, brokerZKString))
}
cluster
}
def getReplicaAssignmentForTopics(zkClient: ZkClient, topics: Iterator[String]):
mutable.Map[(String, Int), Seq[Int]] = {
val ret = new mutable.HashMap[(String, Int), Seq[Int]]
topics.foreach { topic =>
val jsonPartitionMapOpt = readDataMaybeNull(zkClient, getTopicPath(topic))._1
jsonPartitionMapOpt match {
case Some(jsonPartitionMap) =>
Json.parseFull(jsonPartitionMap) match {
case Some(m) =>
val replicaMap = m.asInstanceOf[Map[String, Seq[String]]]
for((partition, replicas) <- replicaMap){
ret.put((topic, partition.toInt), replicas.map(_.toInt))
debug("Replicas assigned to topic [%s], partition [%s] are [%s]".format(topic, partition, replicas))
}
case None =>
}
case None =>
}
}
ret
}
def getPartitionLeaderAndIsrForTopics(zkClient: ZkClient, topics: Seq[String]):
mutable.Map[TopicAndPartition, LeaderIsrAndControllerEpoch] = {
val ret = new mutable.HashMap[TopicAndPartition, LeaderIsrAndControllerEpoch]
val partitionsForTopics = getPartitionsForTopics(zkClient, topics)
for((topic, partitions) <- partitionsForTopics) {
for(partition <- partitions) {
ZkUtils.getLeaderIsrAndEpochForPartition(zkClient, topic, partition.toInt) match {
case Some(leaderIsrAndControllerEpoch) => ret.put(TopicAndPartition(topic, partition.toInt), leaderIsrAndControllerEpoch)
case None =>
}
}
}
ret
}
def getReplicaAssignmentForTopics(zkClient: ZkClient, topics: Seq[String]): mutable.Map[TopicAndPartition, Seq[Int]] = {
val ret = new mutable.HashMap[TopicAndPartition, Seq[Int]]
topics.foreach { topic =>
val jsonPartitionMapOpt = readDataMaybeNull(zkClient, getTopicPath(topic))._1
jsonPartitionMapOpt match {
case Some(jsonPartitionMap) =>
Json.parseFull(jsonPartitionMap) match {
case Some(m) =>
val replicaMap = m.asInstanceOf[Map[String, Seq[String]]]
for((partition, replicas) <- replicaMap){
ret.put(TopicAndPartition(topic, partition.toInt), replicas.map(_.toInt))
debug("Replicas assigned to topic [%s], partition [%s] are [%s]".format(topic, partition, replicas))
}
case None =>
}
case None =>
}
}
ret
}
def getPartitionAssignmentForTopics(zkClient: ZkClient, topics: Seq[String]):
mutable.Map[String, collection.Map[Int, Seq[Int]]] = {
val ret = new mutable.HashMap[String, Map[Int, Seq[Int]]]()
topics.foreach{ topic =>
val jsonPartitionMapOpt = readDataMaybeNull(zkClient, getTopicPath(topic))._1
val partitionMap = jsonPartitionMapOpt match {
case Some(jsonPartitionMap) =>
Json.parseFull(jsonPartitionMap) match {
case Some(m) =>
val m1 = m.asInstanceOf[Map[String, Seq[String]]]
m1.map(p => (p._1.toInt, p._2.map(_.toInt)))
case None => Map[Int, Seq[Int]]()
}
case None => Map[Int, Seq[Int]]()
}
debug("Partition map for /brokers/topics/%s is %s".format(topic, partitionMap))
ret += (topic -> partitionMap)
}
ret
}
def getReplicaAssignmentFromPartitionAssignment(topicPartitionAssignment: mutable.Map[String, collection.Map[Int, Seq[Int]]]):
mutable.Map[(String, Int), Seq[Int]] = {
val ret = new mutable.HashMap[(String, Int), Seq[Int]]
for((topic, partitionAssignment) <- topicPartitionAssignment){
for((partition, replicaAssignment) <- partitionAssignment){
ret.put((topic, partition), replicaAssignment)
}
}
ret
}
def getPartitionsForTopics(zkClient: ZkClient, topics: Seq[String]): mutable.Map[String, Seq[Int]] = {
getPartitionAssignmentForTopics(zkClient, topics).map
{ topicAndPartitionMap =>
val topic = topicAndPartitionMap._1
val partitionMap = topicAndPartitionMap._2
debug("partition assignment of /brokers/topics/%s is %s".format(topic, partitionMap))
(topic -> partitionMap.keys.toSeq.sortWith((s,t) => s < t))
}
}
def getPartitionsAssignedToBroker(zkClient: ZkClient, topics: Seq[String], brokerId: Int):
Seq[(String, Int)] = {
val topicsAndPartitions = getPartitionAssignmentForTopics(zkClient, topics)
topicsAndPartitions.map { topicAndPartitionMap =>
val topic = topicAndPartitionMap._1
val partitionMap = topicAndPartitionMap._2
val relevantPartitionsMap = partitionMap.filter( m => m._2.contains(brokerId) )
val relevantPartitions = relevantPartitionsMap.map(_._1)
for(relevantPartition <- relevantPartitions) yield {
(topic, relevantPartition)
}
}.flatten[(String, Int)].toSeq
}
def getPartitionsBeingReassigned(zkClient: ZkClient): Map[TopicAndPartition, ReassignedPartitionsContext] = {
// read the partitions and their new replica list
val jsonPartitionMapOpt = readDataMaybeNull(zkClient, ReassignPartitionsPath)._1
jsonPartitionMapOpt match {
case Some(jsonPartitionMap) =>
val reassignedPartitions = parsePartitionReassignmentData(jsonPartitionMap)
reassignedPartitions.map { p =>
val newReplicas = p._2
(p._1 -> new ReassignedPartitionsContext(newReplicas))
}
case None => Map.empty[TopicAndPartition, ReassignedPartitionsContext]
}
}
def parsePartitionReassignmentData(jsonData: String):Map[TopicAndPartition, Seq[Int]] = {
Json.parseFull(jsonData) match {
case Some(m) =>
val replicaMap = m.asInstanceOf[Map[String, Seq[String]]]
replicaMap.map { reassignedPartitions =>
val topic = reassignedPartitions._1.split(",").head
val partition = reassignedPartitions._1.split(",").last.toInt
val newReplicas = reassignedPartitions._2.map(_.toInt)
TopicAndPartition(topic, partition) -> newReplicas
}
case None => Map.empty[TopicAndPartition, Seq[Int]]
}
}
def updatePartitionReassignmentData(zkClient: ZkClient, partitionsToBeReassigned: Map[TopicAndPartition, Seq[Int]]) {
val zkPath = ZkUtils.ReassignPartitionsPath
partitionsToBeReassigned.size match {
case 0 => // need to delete the /admin/reassign_partitions path
deletePath(zkClient, zkPath)
info("No more partitions need to be reassigned. Deleting zk path %s".format(zkPath))
case _ =>
val jsonData = Utils.mapToJson(partitionsToBeReassigned.map(p =>
("%s,%s".format(p._1.topic, p._1.partition)) -> p._2.map(_.toString)))
try {
updatePersistentPath(zkClient, zkPath, jsonData)
info("Updated partition reassignment path with %s".format(jsonData))
}catch {
case nne: ZkNoNodeException =>
ZkUtils.createPersistentPath(zkClient, zkPath, jsonData)
debug("Created path %s with %s for partition reassignment".format(zkPath, jsonData))
case e2 => throw new AdministrationException(e2.toString)
}
}
}
def getAllReplicasOnBroker(zkClient: ZkClient, topics: Seq[String], brokerIds: Seq[Int]): Set[PartitionAndReplica] = {
Set.empty[PartitionAndReplica] ++ brokerIds.map { brokerId =>
// read all the partitions and their assigned replicas into a map organized by
// { replica id -> partition 1, partition 2...
val partitionsAssignedToThisBroker = getPartitionsAssignedToBroker(zkClient, topics, brokerId)
if(partitionsAssignedToThisBroker.size == 0)
info("No state transitions triggered since no partitions are assigned to brokers %s".format(brokerIds.mkString(",")))
partitionsAssignedToThisBroker.map(p => new PartitionAndReplica(p._1, p._2, brokerId))
}.flatten
}
def getPartitionsUndergoingPreferredReplicaElection(zkClient: ZkClient): Set[TopicAndPartition] = {
// read the partitions and their new replica list
val jsonPartitionListOpt = readDataMaybeNull(zkClient, PreferredReplicaLeaderElectionPath)._1
jsonPartitionListOpt match {
case Some(jsonPartitionList) => parsePreferredReplicaElectionData(jsonPartitionList)
case None => Set.empty[TopicAndPartition]
}
}
def parsePreferredReplicaElectionData(jsonData: String):Set[TopicAndPartition] = {
Json.parseFull(jsonData) match {
case Some(m) =>
val topicAndPartitions = m.asInstanceOf[Array[Map[String, String]]]
val partitions = topicAndPartitions.map { p =>
val topicPartitionMap = p.asInstanceOf[Map[String, String]]
val topic = topicPartitionMap.get("topic").get
val partition = topicPartitionMap.get("partition").get.toInt
TopicAndPartition(topic, partition)
}
Set.empty[TopicAndPartition] ++ partitions
case None => Set.empty[TopicAndPartition]
}
}
def deletePartition(zkClient : ZkClient, brokerId: Int, topic: String) {
val brokerIdPath = BrokerIdsPath + "/" + brokerId
zkClient.delete(brokerIdPath)
val brokerPartTopicPath = BrokerTopicsPath + "/" + topic + "/" + brokerId
zkClient.delete(brokerPartTopicPath)
}
def getConsumersInGroup(zkClient: ZkClient, group: String): Seq[String] = {
val dirs = new ZKGroupDirs(group)
getChildren(zkClient, dirs.consumerRegistryDir)
}
def getConsumersPerTopic(zkClient: ZkClient, group: String) : mutable.Map[String, List[String]] = {
val dirs = new ZKGroupDirs(group)
val consumers = getChildrenParentMayNotExist(zkClient, dirs.consumerRegistryDir)
val consumersPerTopicMap = new mutable.HashMap[String, List[String]]
for (consumer <- consumers) {
val topicCount = TopicCount.constructTopicCount(group, consumer, zkClient)
for ((topic, consumerThreadIdSet) <- topicCount.getConsumerThreadIdsPerTopic) {
for (consumerThreadId <- consumerThreadIdSet)
consumersPerTopicMap.get(topic) match {
case Some(curConsumers) => consumersPerTopicMap.put(topic, consumerThreadId :: curConsumers)
case _ => consumersPerTopicMap.put(topic, List(consumerThreadId))
}
}
}
for ( (topic, consumerList) <- consumersPerTopicMap )
consumersPerTopicMap.put(topic, consumerList.sortWith((s,t) => s < t))
consumersPerTopicMap
}
/**
* This API takes in a broker id, queries zookeeper for the broker metadata and returns the metadata for that broker
* or throws an exception if the broker dies before the query to zookeeper finishes
* @param brokerId The broker id
* @param zkClient The zookeeper client connection
* @return An optional Broker object encapsulating the broker metadata
*/
def getBrokerInfo(zkClient: ZkClient, brokerId: Int): Option[Broker] = {
ZkUtils.readDataMaybeNull(zkClient, ZkUtils.BrokerIdsPath + "/" + brokerId)._1 match {
case Some(brokerInfo) => Some(Broker.createBroker(brokerId, brokerInfo))
case None => None
}
}
def getAllTopics(zkClient: ZkClient): Seq[String] = {
val topics = ZkUtils.getChildrenParentMayNotExist(zkClient, BrokerTopicsPath)
if(topics == null) Seq.empty[String]
else topics
}
def getAllPartitions(zkClient: ZkClient): Set[TopicAndPartition] = {
val topics = ZkUtils.getChildrenParentMayNotExist(zkClient, BrokerTopicsPath)
if(topics == null) Set.empty[TopicAndPartition]
else {
topics.map { topic =>
getChildren(zkClient, getTopicPartitionsPath(topic)).map(_.toInt).map(TopicAndPartition(topic, _))
}.flatten.toSet
}
}
}
class LeaderExistsOrChangedListener(topic: String,
partition: Int,
leaderLock: ReentrantLock,
leaderExistsOrChanged: Condition,
oldLeaderOpt: Option[Int] = None,
zkClient: ZkClient = null) extends IZkDataListener with Logging {
@throws(classOf[Exception])
def handleDataChange(dataPath: String, data: Object) {
val t = dataPath.split("/").takeRight(3).head
val p = dataPath.split("/").takeRight(2).head.toInt
leaderLock.lock()
try {
if(t == topic && p == partition){
if(oldLeaderOpt == None){
trace("In leader existence listener on partition [%s, %d], leader has been created".format(topic, partition))
leaderExistsOrChanged.signal()
}
else {
val newLeaderOpt = ZkUtils.getLeaderForPartition(zkClient, t, p)
if(newLeaderOpt.isDefined && newLeaderOpt.get != oldLeaderOpt.get){
trace("In leader change listener on partition [%s, %d], leader has been moved from %d to %d".format(topic, partition, oldLeaderOpt.get, newLeaderOpt.get))
leaderExistsOrChanged.signal()
}
}
}
}
finally {
leaderLock.unlock()
}
}
@throws(classOf[Exception])
def handleDataDeleted(dataPath: String) {
leaderLock.lock()
try {
leaderExistsOrChanged.signal()
}finally {
leaderLock.unlock()
}
}
}
object ZKStringSerializer extends ZkSerializer {
@throws(classOf[ZkMarshallingError])
def serialize(data : Object) : Array[Byte] = data.asInstanceOf[String].getBytes("UTF-8")
@throws(classOf[ZkMarshallingError])
def deserialize(bytes : Array[Byte]) : Object = {
if (bytes == null)
null
else
new String(bytes, "UTF-8")
}
}
class ZKGroupDirs(val group: String) {
def consumerDir = ZkUtils.ConsumersPath
def consumerGroupDir = consumerDir + "/" + group
def consumerRegistryDir = consumerGroupDir + "/ids"
}
class ZKGroupTopicDirs(group: String, topic: String) extends ZKGroupDirs(group) {
def consumerOffsetDir = consumerGroupDir + "/offsets/" + topic
def consumerOwnerDir = consumerGroupDir + "/owners/" + topic
}
class ZKConfig(props: VerifiableProperties) {
/** ZK host string */
val zkConnect = props.getString("zk.connect", null)
/** zookeeper session timeout */
val zkSessionTimeoutMs = props.getInt("zk.sessiontimeout.ms", 6000)
/** the max time that the client waits to establish a connection to zookeeper */
val zkConnectionTimeoutMs = props.getInt("zk.connectiontimeout.ms",zkSessionTimeoutMs)
/** how far a ZK follower can be behind a ZK leader */
val zkSyncTimeMs = props.getInt("zk.synctime.ms", 2000)
}
| dchenbecker/kafka-sbt | core/src/main/scala/kafka/utils/ZkUtils.scala | Scala | apache-2.0 | 32,076 |
package com.arcusys.learn.liferay.update
import com.arcusys.learn.liferay.LiferayClasses.LUpgradeProcess
class DBUpdater220 extends LUpgradeProcess with SQLRunner {
override def getThreshold = 220
override def doUpgrade() {
System.out.println("Updating to 2.2")
// update tincan table columns length
runSQLScript("""alter table Learn_LFTincanManifestAct alter column "tincanid" TYPE VARCHAR(2000);""".stripMargin)
runSQLScript("""alter table Learn_LFTincanManifestAct alter column "description" TYPE VARCHAR(2000);""".stripMargin)
runSQLScript("""alter table Learn_LFTincanManifestAct alter column "resourceid" TYPE VARCHAR(2000);""".stripMargin)
runSQLScript("""alter table Learn_LFTincanManifestAct alter column "activitytype" TYPE VARCHAR(2000);""".stripMargin)
runSQLScript("""alter table Learn_LFTincanManifestAct alter column "launch" TYPE VARCHAR(2000);""".stripMargin)
runSQLScript("""alter table Learn_LFTincanManifestAct alter column "name" TYPE VARCHAR(2000);""".stripMargin)
// rename table passinglimit -> lessonlimit
runSQLScript("""ALTER TABLE Learn_LFPassingLimit RENAME TO Learn_LFLessonLimit;""".stripMargin)
// rename table Learn_LFCertificateTincanStatement -> Learn_LFCertTCStmnt
runSQLScript("""ALTER TABLE Learn_LFCertificateTincanStatement RENAME TO Learn_LFCertTCStmnt;""".stripMargin)
// rename table Learn_LFGlobalObjectiveState -> Learn_LFGlblObjectiveState
runSQLScript("""ALTER TABLE Learn_LFGlobalObjectiveState RENAME TO Learn_LFGlblObjectiveState;""".stripMargin)
// rename table Learn_LFQuizQuestionCategory -> Learn_LFQuizQuestCat
runSQLScript("""ALTER TABLE Learn_LFQuizQuestionCategory RENAME TO Learn_LFQuizQuestCat;""".stripMargin)
// rename table Learn_LFSequencingPermissions -> Learn_LFSeqPermissions
runSQLScript("""ALTER TABLE Learn_LFSequencingPermissions RENAME TO Learn_LFSeqPermissions;""".stripMargin)
// rename table Learn_LFTincanClientApiStorage -> Learn_LFTCClntApiStorage
runSQLScript("""ALTER TABLE Learn_LFTincanClientApiStorage RENAME TO Learn_LFTCClntApiStorage;""".stripMargin)
// rename table Learn_LFTincanLrsAgentProfile -> Learn_LFTCLrsAgentProfile
runSQLScript("""ALTER TABLE Learn_LFTincanLrsAgentProfile RENAME TO Learn_LFTCLrsAgentProfile;""".stripMargin)
// rename table Learn_LFTincanLrsStatementRef -> Learn_LFTCLrsStmntRef
runSQLScript("""ALTER TABLE Learn_LFTincanLrsStatementRef RENAME TO Learn_LFTCLrsStmntRef;""".stripMargin)
// rename table Learn_LFTincanLrsSubStatement -> Learn_LFTCLrsSubStmnt
runSQLScript("""ALTER TABLE Learn_LFTincanLrsSubStatement RENAME TO Learn_LFTCLrsSubStmnt;""".stripMargin)
// add columns to lessonlimit
runSQLScript("""ALTER TABLE Learn_lflessonlimit RENAME column itemValue TO passingLimit;""".stripMargin)
runSQLScript("""ALTER TABLE Learn_lflessonlimit ADD COLUMN rerunInterval INTEGER null;""".stripMargin)
runSQLScript("""ALTER TABLE Learn_lflessonlimit ADD COLUMN rerunIntervalType VARCHAR(75) null;""".stripMargin)
// add columns to quiz
runSQLScript("""alter table Learn_lfquiz add column maxDuration INTEGER null;""".stripMargin)
runSQLScript("""alter table Learn_lfquizquestion add column groupId INTEGER null;""".stripMargin)
//Learn_LFCertTCStmnt
runSQLScript("""alter table Learn_LFCertTCStmnt alter column "verb" TYPE VARCHAR(1024);""".stripMargin)
runSQLScript("""alter table Learn_LFCertTCStmnt alter column "object" TYPE VARCHAR(4096);""".stripMargin)
runSQLScript("""alter table Learn_LFCertTCStmnt alter column "periodType" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFAchievement
runSQLScript("""alter table Learn_LFAchievement alter column "title" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFBigDecimal
runSQLScript("""alter table Learn_LFBigDecimal alter column "text_" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFCertificate
runSQLScript("""alter table Learn_LFCertificate alter column "logo" TYPE VARCHAR(512);""".stripMargin)
runSQLScript("""alter table Learn_LFCertificate alter column "shortDescription" TYPE VARCHAR(512);""".stripMargin)
runSQLScript("""alter table Learn_LFCertificate alter column "state_" TYPE VARCHAR(512);""".stripMargin)
runSQLScript("""alter table Learn_LFCertificate alter column "emails" TYPE VARCHAR(512);""".stripMargin)
runSQLScript("""alter table Learn_LFCertificate alter column "validPeriodType" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFCertificateActivity
runSQLScript("""alter table Learn_LFCertificateActivity alter column "activityName" TYPE VARCHAR(512);""".stripMargin)
runSQLScript("""alter table Learn_LFCertificateActivity alter column "periodType" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFCertificateCourse
runSQLScript("""alter table Learn_LFCertificateCourse alter column "periodType" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFConfig
runSQLScript("""alter table Learn_LFConfig alter column "dataKey" TYPE VARCHAR(512);""".stripMargin)
runSQLScript("""alter table Learn_LFConfig alter column "dataValue" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFLRSToActSettng
runSQLScript("""alter table Learn_LFLRSToActivitySetting alter column "title" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFLessonLimit
runSQLScript("""alter table Learn_LFLessonLimit alter column "itemType" TYPE VARCHAR(512);""".stripMargin)
runSQLScript("""alter table Learn_LFLessonLimit alter column "rerunIntervalType" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFPackageComment
runSQLScript("""alter table Learn_LFPackageComment alter column "comment_" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFPackageGradeStorage
runSQLScript("""alter table Learn_LFPackageGradeStorage alter column "grade" TYPE VARCHAR(512);""".stripMargin)
runSQLScript("""alter table Learn_LFPackageGradeStorage alter column "comment_" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFQuiz
runSQLScript("""alter table Learn_LFQuiz alter column "logo" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFQuizTreeElement
runSQLScript("""alter table Learn_LFQuizTreeElement alter column "elementID" TYPE VARCHAR(512);""".stripMargin)
runSQLScript("""alter table Learn_LFQuizTreeElement alter column "parentID" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFRequiredActivity
runSQLScript("""alter table Learn_LFRequiredActivity alter column "activityClassName" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFRole
runSQLScript("""alter table Learn_LFRole alter column "permission" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFSiteDependentConfig
runSQLScript("""alter table Learn_LFSiteDependentConfig alter column "dataKey" TYPE VARCHAR(512);""".stripMargin)
runSQLScript("""alter table Learn_LFSiteDependentConfig alter column "dataValue" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFSocialPackageTag
runSQLScript("""alter table Learn_LFSocialPackageTag alter column "name" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFTincanActProfile
runSQLScript("""alter table Learn_LFTincanActProfile alter column "activityId" TYPE VARCHAR(512);""".stripMargin)
runSQLScript("""alter table Learn_LFTincanActProfile alter column "profileId" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFTincanActivity
runSQLScript("""alter table Learn_LFTincanActivity alter column "objectType" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFTincanActor
runSQLScript("""alter table Learn_LFTincanActor alter column "tincanID" TYPE VARCHAR(512);""".stripMargin)
runSQLScript("""alter table Learn_LFTincanActor alter column "objectType" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFTCClntApiStorage
runSQLScript("""alter table Learn_LFTCClntApiStorage alter column "name" TYPE VARCHAR(512);""".stripMargin)
runSQLScript("""alter table Learn_LFTCClntApiStorage alter column "description" TYPE VARCHAR(512);""".stripMargin)
runSQLScript("""alter table Learn_LFTCClntApiStorage alter column "secret" TYPE VARCHAR(512);""".stripMargin)
runSQLScript("""alter table Learn_LFTCClntApiStorage alter column "url" TYPE VARCHAR(512);""".stripMargin)
runSQLScript("""alter table Learn_LFTCClntApiStorage alter column "redirectUrl" TYPE VARCHAR(512);""".stripMargin)
runSQLScript("""alter table Learn_LFTCClntApiStorage alter column "scope" TYPE VARCHAR(512);""".stripMargin)
runSQLScript("""alter table Learn_LFTCClntApiStorage alter column "iconUrl" TYPE VARCHAR(512);""".stripMargin)
runSQLScript("""alter table Learn_LFTCClntApiStorage alter column "token" TYPE VARCHAR(512);""".stripMargin)
runSQLScript("""alter table Learn_LFTCClntApiStorage alter column "code_" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFTCLrsAgentProfile
runSQLScript("""alter table Learn_LFTCLrsAgentProfile alter column "profileId" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFTincanLrsContext
runSQLScript("""alter table Learn_LFTincanLrsContext alter column "registration" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFTincanLrsDocument
runSQLScript("""alter table Learn_LFTincanLrsDocument alter column "documentId" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFTincanLrsResult
runSQLScript("""alter table Learn_LFTincanLrsResult alter column "duration" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFTincanLrsState
runSQLScript("""alter table Learn_LFTincanLrsState alter column "stateId" TYPE VARCHAR(512);""".stripMargin)
runSQLScript("""alter table Learn_LFTincanLrsState alter column "documentId" TYPE VARCHAR(512);""".stripMargin)
runSQLScript("""alter table Learn_LFTincanLrsState alter column "activityId" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFTincanLrsStatement
runSQLScript("""alter table Learn_LFTincanLrsStatement alter column "tincanID" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFTCLrsStmntRef
runSQLScript("""alter table Learn_LFTCLrsStmntRef alter column "uuid_" TYPE VARCHAR(512);""".stripMargin)
//Learn_LFTincanURI
runSQLScript("""alter table Learn_LFTincanURI alter column "objID" TYPE VARCHAR(512);""".stripMargin)
runSQLScript("""alter table Learn_LFTincanURI alter column "objType" TYPE VARCHAR(512);""".stripMargin)
}
}
| ViLPy/Valamis | learn-portlet/src/main/scala/com/arcusys/learn/liferay/update/DBUpdater220.scala | Scala | lgpl-3.0 | 10,313 |
package util
import javax.crypto.SecretKeyFactory
import javax.crypto.spec.PBEKeySpec
import org.joda.time.DateTime
/**
* パスワード暗号化の共通クラス.
*/
object PasswordDigestUtil {
/**
* Hash文字列作成.
* @param passwordStr ユーザ入力パスワード文字列
* @param timeStamp パスワード用日時
* @return Hash化文字列
*/
def createHashPassword(passwordStr: String, timeStamp: DateTime): String = {
val passChars = passwordStr.toCharArray
val solt = (timeStamp.toString(ConfigHelper.soltFormat) + ConfigHelper.pepper).getBytes
val keySpec = new PBEKeySpec(passChars, solt, ConfigHelper.stretching, 256)
val skf = SecretKeyFactory.getInstance("PBKDF2WithHmacSHA512")
val secretKey = skf.generateSecret(keySpec)
secretKey.getEncoded.map("%02X" format _).mkString
}
}
| nemuzuka/vss-kanban | src/main/scala/util/PasswordDigestUtil.scala | Scala | mit | 854 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.models
import com.intel.analytics.bigdl.models.inception._
import com.intel.analytics.bigdl.nn.{Graph, Input}
import com.intel.analytics.bigdl.numeric.NumericFloat
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.RandomGenerator._
import com.intel.analytics.bigdl.utils.{T, Table}
import org.scalatest.{FlatSpec, Matchers}
import scala.util.Random
@com.intel.analytics.bigdl.tags.Serial
class InceptionSpec extends FlatSpec with Matchers {
"Inception_Layer_V1 graph" should "be correct" in {
val batchSize = 8
RNG.setSeed(1000)
val model = Inception_Layer_v1(2, T(T(4), T(96, 128), T(16, 32), T(32)), "conv")
RNG.setSeed(1000)
val input = Input()
val f1 = Inception_Layer_v1(input, 2, T(T(4), T(96, 128), T(16, 32), T(32)), "conv")
val graphModel = Graph(input, f1)
val inputData = Tensor(batchSize, 2, 4, 4).rand()
val gradOutput = Tensor(batchSize, 256, 4, 4).rand()
val output1 = model.forward(inputData).toTensor[Float]
val output2 = graphModel.forward(inputData).toTensor[Float]
output1 should be(output2)
val gradInput1 = model.backward(inputData, gradOutput).toTensor[Float]
val gradInput2 = graphModel.backward(inputData, gradOutput).toTensor[Float]
gradInput1 should be(gradInput2)
model.getParametersTable()[Table]("conv1x1")[Tensor[Float]]("gradWeight") should be(
graphModel.getParametersTable()[Table]("conv1x1")[Tensor[Float]]("gradWeight")
)
model.getParametersTable()[Table]("conv3x3_reduce")[Tensor[Float]]("gradWeight") should be(
graphModel.getParametersTable()[Table]("conv3x3_reduce")[Tensor[Float]]("gradWeight")
)
model.getParametersTable()[Table]("conv3x3")[Tensor[Float]]("gradWeight") should be(
graphModel.getParametersTable()[Table]("conv3x3")[Tensor[Float]]("gradWeight")
)
model.getParametersTable()[Table]("conv5x5_reduce")[Tensor[Float]]("gradWeight") should be(
graphModel.getParametersTable()[Table]("conv5x5_reduce")[Tensor[Float]]("gradWeight")
)
model.getParametersTable()[Table]("conv5x5")[Tensor[Float]]("gradWeight") should be(
graphModel.getParametersTable()[Table]("conv5x5")[Tensor[Float]]("gradWeight")
)
model.getParametersTable()[Table]("convpool_proj")[Tensor[Float]]("gradWeight") should be(
graphModel.getParametersTable()[Table]("convpool_proj")[Tensor[Float]]("gradWeight")
)
}
"Inception graph" should "be correct" in {
val batchSize = 2
RNG.setSeed(1000)
val model = Inception_v1_NoAuxClassifier(1000, false)
RNG.setSeed(1000)
val graphModel = Inception_v1_NoAuxClassifier.graph(1000, false)
val input = Tensor[Float](batchSize, 3, 224, 224).apply1(e => Random.nextFloat())
val gradOutput = Tensor[Float](batchSize, 1000).apply1(e => Random.nextFloat())
val output1 = model.forward(input).toTensor[Float]
val output2 = graphModel.forward(input).toTensor[Float]
output1 should be(output2)
val gradInput1 = model.backward(input, gradOutput).toTensor[Float]
val gradInput2 = graphModel.backward(input, gradOutput).toTensor[Float]
gradInput1 should be(gradInput2)
val table1 = model.getParametersTable()
val table2 = graphModel.getParametersTable()
table1.keySet.foreach(key => {
table1(key).asInstanceOf[Table]("weight").asInstanceOf[Tensor[Float]] should be
table2(key).asInstanceOf[Table]("weight").asInstanceOf[Tensor[Float]]
table1(key).asInstanceOf[Table]("bias").asInstanceOf[Tensor[Float]] should be
table2(key).asInstanceOf[Table]("bias").asInstanceOf[Tensor[Float]]
table1(key).asInstanceOf[Table]("gradWeight").asInstanceOf[Tensor[Float]] should be
table2(key).asInstanceOf[Table]("gradWeight").asInstanceOf[Tensor[Float]]
table1(key).asInstanceOf[Table]("gradBias").asInstanceOf[Tensor[Float]] should be
table2(key).asInstanceOf[Table]("gradBias").asInstanceOf[Tensor[Float]]
})
}
"Inception_v1 graph" should "be correct" in {
val batchSize = 1
RNG.setSeed(1000)
val model = Inception_v1(1000, false)
RNG.setSeed(1000)
val graphModel = Inception_v1.graph(1000, false)
val input = Tensor[Float](batchSize, 3, 224, 224).apply1(e => Random.nextFloat())
val gradOutput = Tensor[Float](batchSize, 3000).apply1(e => Random.nextFloat())
val output1 = model.forward(input).toTensor[Float]
val output2 = graphModel.forward(input).toTensor[Float]
output1 should be(output2)
val gradInput1 = model.backward(input, gradOutput)
val gradInput2 = graphModel.backward(input, gradOutput)
gradInput1 should be(gradInput2)
}
"Inception_Layer_V2 graph" should "be correct" in {
val batchSize = 8
RNG.setSeed(1000)
val model = Inception_Layer_v2(2, T(T(4), T(96, 128), T(16, 32), T("avg", 32)), "conv")
RNG.setSeed(1000)
val input1 = Input()
val f1 = Inception_Layer_v2(input1, 2, T(T(4), T(96, 128), T(16, 32), T("avg", 32)), "conv")
val graphModel = Graph(input1, f1)
val input = Tensor(batchSize, 2, 4, 4).rand()
val gradOutput = Tensor(batchSize, 256, 4, 4).rand()
val output1 = model.forward(input).toTensor[Float]
val output2 = graphModel.forward(input).toTensor[Float]
output1 should be(output2)
val gradInput1 = model.backward(input, gradOutput).toTensor[Float]
val gradInput2 = graphModel.backward(input, gradOutput).toTensor[Float]
gradInput1 should be(gradInput2)
val table1 = model.getParametersTable()
val table2 = graphModel.getParametersTable()
table1.keySet.foreach(key => {
table1(key).asInstanceOf[Table]("weight").asInstanceOf[Tensor[Float]] should be
table2(key).asInstanceOf[Table]("weight").asInstanceOf[Tensor[Float]]
table1(key).asInstanceOf[Table]("bias").asInstanceOf[Tensor[Float]] should be
table2(key).asInstanceOf[Table]("bias").asInstanceOf[Tensor[Float]]
table1(key).asInstanceOf[Table]("gradWeight").asInstanceOf[Tensor[Float]] should be
table2(key).asInstanceOf[Table]("gradWeight").asInstanceOf[Tensor[Float]]
table1(key).asInstanceOf[Table]("gradBias").asInstanceOf[Tensor[Float]] should be
table2(key).asInstanceOf[Table]("gradBias").asInstanceOf[Tensor[Float]]
})
}
"Inception_v2_NoAuxClassifier graph" should "be correct" in {
val batchSize = 2
RNG.setSeed(1000)
val model = Inception_v2_NoAuxClassifier(1000)
RNG.setSeed(1000)
val graphModel = Inception_v2_NoAuxClassifier.graph(1000)
val input = Tensor[Float](batchSize, 3, 224, 224).apply1(e => Random.nextFloat())
val gradOutput = Tensor[Float](batchSize, 1000).apply1(e => Random.nextFloat())
val output1 = model.forward(input).toTensor[Float]
val output2 = graphModel.forward(input).toTensor[Float]
output1 should be(output2)
val gradInput1 = model.backward(input, gradOutput).toTensor[Float]
val gradInput2 = graphModel.backward(input, gradOutput).toTensor[Float]
gradInput1 should be(gradInput2)
val table1 = model.getParametersTable()
val table2 = graphModel.getParametersTable()
table1.keySet.foreach(key => {
table1(key).asInstanceOf[Table]("weight").asInstanceOf[Tensor[Float]] should be
table2(key).asInstanceOf[Table]("weight").asInstanceOf[Tensor[Float]]
table1(key).asInstanceOf[Table]("bias").asInstanceOf[Tensor[Float]] should be
table2(key).asInstanceOf[Table]("bias").asInstanceOf[Tensor[Float]]
table1(key).asInstanceOf[Table]("gradWeight").asInstanceOf[Tensor[Float]] should be
table2(key).asInstanceOf[Table]("gradWeight").asInstanceOf[Tensor[Float]]
table1(key).asInstanceOf[Table]("gradBias").asInstanceOf[Tensor[Float]] should be
table2(key).asInstanceOf[Table]("gradBias").asInstanceOf[Tensor[Float]]
})
}
"Inception_v2 graph" should "be correct" in {
val batchSize = 2
RNG.setSeed(1000)
val model = Inception_v2(1000)
RNG.setSeed(1000)
val graphModel = Inception_v2.graph(1000)
val input = Tensor[Float](batchSize, 3, 224, 224).apply1(e => Random.nextFloat())
val gradOutput = Tensor[Float](batchSize, 3000).apply1(e => Random.nextFloat())
val output1 = model.forward(input).toTensor[Float]
val output2 = graphModel.forward(input).toTensor[Float]
output1 should be(output2)
val gradInput1 = model.updateGradInput(input, gradOutput)
val gradInput2 = graphModel.updateGradInput(input, gradOutput)
gradInput1 should be(gradInput2)
}
}
| zhangxiaoli73/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/models/InceptionSpec.scala | Scala | apache-2.0 | 9,135 |
/** Copyright 2015 TappingStone, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.prediction.data.storage.elasticsearch
import grizzled.slf4j.Logging
import org.elasticsearch.client.Client
import org.elasticsearch.client.transport.TransportClient
import org.elasticsearch.common.transport.InetSocketTransportAddress
import org.elasticsearch.transport.ConnectTransportException
import io.prediction.data.storage.BaseStorageClient
import io.prediction.data.storage.StorageClientConfig
import io.prediction.data.storage.StorageClientException
class StorageClient(val config: StorageClientConfig) extends BaseStorageClient
with Logging {
override val prefix = "ES"
val client = try {
val transportClient = new TransportClient()
(config.hosts zip config.ports) foreach { hp =>
transportClient.addTransportAddress(
new InetSocketTransportAddress(hp._1, hp._2))
}
transportClient
} catch {
case e: ConnectTransportException =>
throw new StorageClientException(e.getMessage)
}
}
| nvoron23/PredictionIO | data/src/main/scala/io/prediction/data/storage/elasticsearch/StorageClient.scala | Scala | apache-2.0 | 1,569 |
/*
* Copyright (C) 2012 Pavel Fatin <http://pavelfatin.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.pavelfatin.fs
package manager
private case class Step(index: Int, name: String)
| pavelfatin/toyfs | src/main/scala/com/pavelfatin/fs/manager/Step.scala | Scala | gpl-3.0 | 808 |
package app.agent
import app.server.ProbeSummaryResponse
import app.ui.BigSpinner
import im.mange.jetboot._
import im.mange.jetpac._
case class ChecksSummaryAgent() extends Renderable {
private val holder = div(Some("checksSummary")).classes("hidden").styles(marginTop("5px"))
def render = holder.render
def requestSummary = holder.show & holder.fill(BigSpinner("checksSummarySpinner", "Loading checks summary..."))
def show(response: ProbeSummaryResponse) = holder.fill(ChecksConfigPresentation(response.probes))
def hide = holder.empty & holder.hide
} | alltonp/reprobate | src/main/scala/app/agent/ChecksSummaryAgent.scala | Scala | apache-2.0 | 566 |
class Position(val foo: Int) extends AnyVal {
def orElse(that: Position) =
if (foo != 0) this else that
}
| som-snytt/dotty | tests/pos/vcif.scala | Scala | apache-2.0 | 112 |
import slick.driver.JdbcProfile
import scala.concurrent.{Future, Await}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import slick.backend.{DatabaseConfig, StaticDatabaseConfig}
@StaticDatabaseConfig("#tsql")
object TypedSQL extends App {
val dc = DatabaseConfig.forAnnotation[JdbcProfile]
import dc.driver.api._
def getSuppliers(id: Int): DBIO[Seq[(Int, String, String, String, String, String)]] =
tsql"select * from suppliers where id > $id"
val db = dc.db
try {
val a: DBIO[Unit] =
getSuppliers(50).map { s =>
println("All suppliers > 50:")
s.foreach(println)
}
val f: Future[Unit] = db.run(a)
Await.result(f, Duration.Inf)
} finally db.close
}
| nafg/slick | samples/slick-plainsql/src/main/scala/TypedSQL.scala | Scala | bsd-2-clause | 764 |
package org.jetbrains.plugins.scala.lang.psi.stubs.index
import com.intellij.openapi.project.Project
import com.intellij.psi.search.GlobalSearchScope
import com.intellij.psi.stubs.StringStubIndexExtension
import org.jetbrains.plugins.scala.finder.ScalaSourceFilterScope
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScObject
/**
* @author Alexander Podkhalyuzin
*/
class ScImplicitObjectKey extends StringStubIndexExtension[ScObject] {
override def get(fqn: String, project: Project, scope: GlobalSearchScope): java.util.Collection[ScObject] =
super.get(fqn, project, new ScalaSourceFilterScope(scope, project))
def getKey = ScImplicitObjectKey.KEY
}
object ScImplicitObjectKey {
val KEY = ScalaIndexKeys.IMPLICIT_OBJECT_KEY
} | LPTK/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/stubs/index/ScImplicitObjectKey.scala | Scala | apache-2.0 | 763 |
trait V {
// ok
// error: java.lang.IllegalArgumentException: Could not find proxy for val f: Function1 in List(value f, value v, trait V, package <empty>, package <root>) (currentOwner= value <local V$class> )
val v = { val f = (x: Int) => x + 1; f(2) }
// ok
// assertion failed:
// Trying to access the this of another class: tree.symbol = trait V, class symbol = object V$class compilation unit: fields.scala
val developmentVersion =
for {
v <- scalaPropOrNone("maven.version.number")
if v endsWith "-SNAPSHOT"
ov <- scalaPropOrNone("version.number")
} yield ov
def scalaPropOrNone(name: String): Option[String] = ???
}
object O extends V
| scala/scala | test/files/pos/trait_fields_owners.scala | Scala | apache-2.0 | 690 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.path
import org.scalatest._
import SharedHelpers._
import org.scalatest.path.{ FunSpec => PathFunSpec }
import org.scalatest.events._
import org.scalatest.exceptions.TestFailedException
import org.scalatest.exceptions.TestCanceledException
import org.scalatest.exceptions.DuplicateTestNameException
import org.scalatest.exceptions.TestRegistrationClosedException
import org.scalatest.exceptions.NotAllowedException
import java.lang.annotation.AnnotationFormatError
import java.awt.AWTError
import java.nio.charset.CoderMalfunctionError
import javax.xml.parsers.FactoryConfigurationError
import javax.xml.transform.TransformerFactoryConfigurationError
class FunSpecSpec extends org.scalatest.FreeSpec with GivenWhenThen {
"A path.FunSpec" - {
"should return the test names in registration order from testNames" in {
class AFunSpec extends PathFunSpec {
it("should test this") {}
it("should test that") {}
override def newInstance = new AFunSpec
}
val a = new AFunSpec
assertResult(List("should test this", "should test that")) {
a.testNames.iterator.toList
}
val b = new PathFunSpec {}
assertResult(List[String]()) {
b.testNames.iterator.toList
}
class CFunSpec extends PathFunSpec {
it("should test that") {}
it("should test this") {}
override def newInstance = new CFunSpec
}
val c = new CFunSpec
assertResult(List("should test that", "should test this")) {
c.testNames.iterator.toList
}
class DFunSpec extends PathFunSpec {
describe("A Tester") {
it("should test that") {}
it("should test this") {}
}
override def newInstance = new DFunSpec
}
val d = new DFunSpec
assertResult(List("A Tester should test that", "A Tester should test this")) {
d.testNames.iterator.toList
}
class EFunSpec extends PathFunSpec {
describe("A Tester") {
it("should test this") {}
it("should test that") {}
}
override def newInstance = new EFunSpec
}
val e = new EFunSpec
assertResult(List("A Tester should test this", "A Tester should test that")) {
e.testNames.iterator.toList
}
}
"should throw DuplicateTestNameException if a duplicate test name registration is attempted" in {
intercept[DuplicateTestNameException] {
class AFunSpec extends PathFunSpec {
it("should test this") {}
it("should test this") {}
override def newInstance = new AFunSpec
}
(new AFunSpec).tags // Must call a method to get it to attempt to register the second test
}
intercept[DuplicateTestNameException] {
class AFunSpec extends PathFunSpec {
it("should test this") {}
ignore("should test this") {}
override def newInstance = new AFunSpec
}
(new AFunSpec).tags
}
intercept[DuplicateTestNameException] {
class AFunSpec extends PathFunSpec {
ignore("should test this") {}
ignore("should test this") {}
override def newInstance = new AFunSpec
}
(new AFunSpec).tags
}
intercept[DuplicateTestNameException] {
class AFunSpec extends PathFunSpec {
ignore("should test this") {}
it("should test this") {}
override def newInstance = new AFunSpec
}
(new AFunSpec).tags
}
}
"should create new instance as FunSpecLike" in {
val spec = new ExampleFunSpecLike
assert(spec.newInstance.isInstanceOf[FunSpecLike])
}
"(with info calls)" - {
class InfoInsideTestSpec extends PathFunSpec {
val msg = "hi there, dude"
val testName = "test name"
it(testName) {
info(msg)
}
override def newInstance = new InfoInsideTestSpec
}
// In a Spec, any InfoProvided's fired during the test should be cached and sent out after the test has
// suceeded or failed. This makes the report look nicer, because the info is tucked under the "specifier'
// text for that test.
"should, when the info appears in the code of a successful test, report the info in the TestSucceeded" in {
val spec = new InfoInsideTestSpec
val (testStartingIndex, testSucceededIndex) =
getIndexesForTestInformerEventOrderTests(spec, spec.testName, spec.msg)
assert(testStartingIndex < testSucceededIndex)
}
class InfoBeforeTestSpec extends PathFunSpec {
val msg = "hi there, dude"
val testName = "test name"
info(msg)
it(testName) {}
}
"should, when the info appears in the body before a test, report the info before the test" in {
val spec = new InfoBeforeTestSpec
val (infoProvidedIndex, testStartingIndex, testSucceededIndex) =
getIndexesForInformerEventOrderTests(spec, spec.testName, spec.msg)
assert(infoProvidedIndex < testStartingIndex)
assert(testStartingIndex < testSucceededIndex)
}
"should, when the info appears in the body after a test, report the info after the test runs" in {
val msg = "hi there, dude"
val testName = "test name"
class MySpec extends PathFunSpec {
it(testName) {}
info(msg)
}
val (infoProvidedIndex, testStartingIndex, testSucceededIndex) =
getIndexesForInformerEventOrderTests(new MySpec, testName, msg)
assert(testStartingIndex < testSucceededIndex)
assert(testSucceededIndex < infoProvidedIndex)
}
"should print to stdout when info is called by a method invoked after the suite has been executed" in {
class MySpec extends PathFunSpec {
callInfo() // This should work fine
def callInfo() {
info("howdy")
}
it("howdy also") {
callInfo() // This should work fine
}
override def newInstance = new MySpec
}
val spec = new MySpec
val myRep = new EventRecordingReporter
spec.run(None, Args(myRep))
spec.callInfo() // TODO: Actually test that This prints to stdout
}
"should send an InfoProvided with an IndentedText formatter with level 1 when called outside a test" in {
val spec = new InfoBeforeTestSpec
val indentedText = getIndentedTextFromInfoProvided(spec)
assert(indentedText === IndentedText("+ " + spec.msg, spec.msg, 0))
}
"should send an InfoProvided with an IndentedText formatter with level 2 when called within a test" in {
val spec = new InfoInsideTestSpec
val indentedText = getIndentedTextFromTestInfoProvided(spec)
assert(indentedText === IndentedText(" + " + spec.msg, spec.msg, 1))
}
}
"(when a nesting rule has been violated)" - {
"should, if they call a describe from within an it clause, result in a TestFailedException when running the test" in {
class MySpec extends PathFunSpec {
it("should blow up") {
describe("in the wrong place, at the wrong time") {
}
}
override def newInstance = new MySpec
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
"should, if they call a describe with a nested it from within an it clause, result in a TestFailedException when running the test" in {
class MySpec extends PathFunSpec {
it("should blow up") {
describe("in the wrong place, at the wrong time") {
it("should never run") {
assert(1 === 1)
}
}
}
override def newInstance = new MySpec
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
"should, if they call a nested it from within an it clause, result in a TestFailedException when running the test" in {
class MySpec extends PathFunSpec {
it("should blow up") {
it("should never run") {
assert(1 === 1)
}
}
override def newInstance = new MySpec
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
"should, if they call a nested it with tags from within an it clause, result in a TestFailedException when running the test" in {
class MySpec extends PathFunSpec {
it("should blow up") {
it("should never run", mytags.SlowAsMolasses) {
assert(1 === 1)
}
}
override def newInstance = new MySpec
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
"should, if they call a describe with a nested ignore from within an it clause, result in a TestFailedException when running the test" in {
class MySpec extends PathFunSpec {
it("should blow up") {
describe("in the wrong place, at the wrong time") {
ignore("should never run") {
assert(1 === 1)
}
}
}
override def newInstance = new MySpec
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
"should, if they call a nested ignore from within an it clause, result in a TestFailedException when running the test" in {
class MySpec extends PathFunSpec {
it("should blow up") {
ignore("should never run") {
assert(1 === 1)
}
}
override def newInstance = new MySpec
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
"should, if they call a nested ignore with tags from within an it clause, result in a TestFailedException when running the test" in {
class MySpec extends PathFunSpec {
it("should blow up") {
ignore("should never run", mytags.SlowAsMolasses) {
assert(1 === 1)
}
}
override def newInstance = new MySpec
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
}
"should run tests registered via the 'it should behave like' syntax" in {
trait SharedSpecTests { this: PathFunSpec =>
def nonEmptyStack(s: String)(i: Int) {
it("should be that I am shared") {}
}
}
class MySpec extends PathFunSpec with SharedSpecTests {
it should behave like nonEmptyStack("hi")(1)
override def newInstance = new MySpec
}
val suite = new MySpec
val reporter = new EventRecordingReporter
suite.run(None, Args(reporter))
val indexedList = reporter.eventsReceived
val testStartingOption = indexedList.find(_.isInstanceOf[TestStarting])
assert(testStartingOption.isDefined)
assert(testStartingOption.get.asInstanceOf[TestStarting].testName === "should be that I am shared")
}
"should throw NullPointerException if a null test tag is provided" in {
// it
intercept[NullPointerException] {
new PathFunSpec {
it("hi", null) {}
}
}
val caught = intercept[NullPointerException] {
new PathFunSpec {
it("hi", mytags.SlowAsMolasses, null) {}
}
}
assert(caught.getMessage === "a test tag was null")
intercept[NullPointerException] {
new PathFunSpec {
it("hi", mytags.SlowAsMolasses, null, mytags.WeakAsAKitten) {}
}
}
// ignore
intercept[NullPointerException] {
new PathFunSpec {
ignore("hi", null) {}
}
}
val caught2 = intercept[NullPointerException] {
new PathFunSpec {
ignore("hi", mytags.SlowAsMolasses, null) {}
}
}
assert(caught2.getMessage === "a test tag was null")
intercept[NullPointerException] {
new PathFunSpec {
ignore("hi", mytags.SlowAsMolasses, null, mytags.WeakAsAKitten) {}
}
}
}
case class TestWasCalledCounts(var theTestThisCalled: Boolean, var theTestThatCalled: Boolean)
class TestWasCalledSuite(val counts: TestWasCalledCounts) extends PathFunSpec {
def this() { this(TestWasCalledCounts(false, false)) }
it("should run this") { counts.theTestThisCalled = true }
it("should run that, maybe") { counts.theTestThatCalled = true }
override def newInstance = new TestWasCalledSuite(counts)
}
"should execute all tests when run is called with testName None" in {
val b = new TestWasCalledSuite
b.run(None, Args(SilentReporter))
assert(b.counts.theTestThisCalled)
assert(b.counts.theTestThatCalled)
}
"should execute one test when run is called with a defined testName" in {
val a = new TestWasCalledSuite
val rep = new EventRecordingReporter
a.run(Some("should run this"), Args(rep))
assert(a.counts.theTestThisCalled)
assert(a.counts.theTestThatCalled) // In a path trait, this gets executed, but not reported
val tse = rep.testSucceededEventsReceived
assert(tse.size == 1)
assert(tse(0).testName === "should run this")
val tfe = rep.testFailedEventsReceived
assert(tfe.size === 0)
val tste = rep.testStartingEventsReceived
assert(tste.size === 1)
}
"should report as ignored, and not run, tests marked ignored" in {
class AFunSpec(val counts: TestWasCalledCounts) extends PathFunSpec {
it("test this") { counts.theTestThisCalled = true }
it("test that") { counts.theTestThatCalled = true }
override def newInstance = new AFunSpec(counts)
}
val a = new AFunSpec(TestWasCalledCounts(false, false))
val repA = new TestIgnoredTrackingReporter
a.run(None, Args(repA))
assert(!repA.testIgnoredReceived)
assert(a.counts.theTestThisCalled)
assert(a.counts.theTestThatCalled)
class BFunSpec(val counts: TestWasCalledCounts) extends PathFunSpec {
ignore("test this") { counts.theTestThisCalled = true }
it("test that") { counts.theTestThatCalled = true }
override def newInstance = new BFunSpec(counts)
}
val b = new BFunSpec(TestWasCalledCounts(false, false))
val repB = new TestIgnoredTrackingReporter
b.run(None, Args(repB))
assert(repB.testIgnoredReceived)
assert(repB.lastEvent.isDefined)
assert(repB.lastEvent.get.testName endsWith "test this")
assert(!b.counts.theTestThisCalled)
assert(b.counts.theTestThatCalled)
class CFunSpec(val counts: TestWasCalledCounts) extends PathFunSpec {
it("test this") { counts.theTestThisCalled = true }
ignore("test that") { counts.theTestThatCalled = true }
override def newInstance = new CFunSpec(counts)
}
val c = new CFunSpec(TestWasCalledCounts(false, false))
val repC = new TestIgnoredTrackingReporter
c.run(None, Args(repC))
assert(repC.testIgnoredReceived)
assert(repC.lastEvent.isDefined)
assert(repC.lastEvent.get.testName endsWith "test that", repC.lastEvent.get.testName)
assert(c.counts.theTestThisCalled)
assert(!c.counts.theTestThatCalled)
// The order I want is order of appearance in the file.
// Will try and implement that tomorrow. Subtypes will be able to change the order.
class DFunSpec(val counts: TestWasCalledCounts) extends PathFunSpec {
ignore("test this") { counts.theTestThisCalled = true }
ignore("test that") { counts.theTestThatCalled = true }
override def newInstance = new DFunSpec(counts)
}
val d = new DFunSpec(TestWasCalledCounts(false, false))
val repD = new TestIgnoredTrackingReporter
d.run(None, Args(repD))
assert(repD.testIgnoredReceived)
assert(repD.lastEvent.isDefined)
assert(repD.lastEvent.get.testName endsWith "test that") // last because should be in order of appearance
assert(!d.counts.theTestThisCalled)
assert(!d.counts.theTestThatCalled)
}
"should ignore a test marked as ignored if run is invoked with that testName" in {
// If I provide a specific testName to run, then it should ignore an Ignore on that test
// method and actually invoke it.
class EFunSpec(val counts: TestWasCalledCounts) extends PathFunSpec {
def this() { this(TestWasCalledCounts(false, false)) }
ignore("test this") { counts.theTestThisCalled = true }
it("test that") { counts.theTestThatCalled = true }
override def newInstance = new EFunSpec(counts)
}
val e = new EFunSpec
val repE = new EventRecordingReporter
e.run(Some("test this"), Args(repE))
assert(repE.testIgnoredEventsReceived.size === 1)
assert(!e.counts.theTestThisCalled)
assert(e.counts.theTestThatCalled) // In a path trait, tests other than the Some(testName) get executed, but not reported
val tste = repE.testStartingEventsReceived
assert(tste.size === 0)
}
"should run only those tests selected by the tags to include and exclude sets" in {
// Nothing is excluded
class AFunSpec(val counts: TestWasCalledCounts) extends PathFunSpec {
it("test this", mytags.SlowAsMolasses) { counts.theTestThisCalled = true }
it("test that") { counts.theTestThatCalled = true }
override def newInstance = new AFunSpec(counts)
}
val a = new AFunSpec(TestWasCalledCounts(false, false))
val repA = new TestIgnoredTrackingReporter
a.run(None, Args(repA))
assert(!repA.testIgnoredReceived)
assert(a.counts.theTestThisCalled)
assert(a.counts.theTestThatCalled)
// SlowAsMolasses is included, one test should be excluded
class BFunSpec(val counts: TestWasCalledCounts) extends PathFunSpec {
it("test this", mytags.SlowAsMolasses) { counts.theTestThisCalled = true }
it("test that") { counts.theTestThatCalled = true }
override def newInstance = new BFunSpec(counts)
}
val b = new BFunSpec(TestWasCalledCounts(false, false))
val repB = new EventRecordingReporter
b.run(None, Args(repB, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set()), ConfigMap.empty, None, new Tracker, Set.empty))
assert(repB.testIgnoredEventsReceived.isEmpty)
assert(b.counts.theTestThisCalled)
assert(b.counts.theTestThatCalled)
assert(repB.testStartingEventsReceived.size === 1)
assert(repB.testStartingEventsReceived(0).testName == "test this")
// SlowAsMolasses is included, and both tests should be included
class CFunSpec(val counts: TestWasCalledCounts) extends PathFunSpec {
it("test this", mytags.SlowAsMolasses) { counts.theTestThisCalled = true }
it("test that", mytags.SlowAsMolasses) { counts.theTestThatCalled = true }
override def newInstance = new CFunSpec(counts)
}
val c = new CFunSpec(TestWasCalledCounts(false, false))
val repC = new EventRecordingReporter
c.run(None, Args(repC, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set()), ConfigMap.empty, None, new Tracker, Set.empty))
assert(repC.testIgnoredEventsReceived.isEmpty)
assert(c.counts.theTestThisCalled)
assert(c.counts.theTestThatCalled)
assert(repC.testStartingEventsReceived.size === 2)
// SlowAsMolasses is included. both tests should be included but one ignored
class DFunSpec(val counts: TestWasCalledCounts) extends PathFunSpec {
ignore("test this", mytags.SlowAsMolasses) { counts.theTestThisCalled = true }
it("test that", mytags.SlowAsMolasses) { counts.theTestThatCalled = true }
override def newInstance = new DFunSpec(counts)
}
val d = new DFunSpec(TestWasCalledCounts(false, false))
val repD = new EventRecordingReporter
d.run(None, Args(repD, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.Ignore")), ConfigMap.empty, None, new Tracker, Set.empty))
assert(repD.testIgnoredEventsReceived.size === 1)
assert(!d.counts.theTestThisCalled)
assert(d.counts.theTestThatCalled)
assert(repD.testStartingEventsReceived.size === 1)
assert(repD.testStartingEventsReceived(0).testName === "test that")
case class ThreeCounts(var theTestThisCalled: Boolean, var theTestThatCalled: Boolean, var theTestTheOtherCalled: Boolean)
// SlowAsMolasses included, FastAsLight excluded
class EFunSpec(val counts: ThreeCounts) extends PathFunSpec {
it("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { counts.theTestThisCalled = true }
it("test that", mytags.SlowAsMolasses) { counts.theTestThatCalled = true }
it("test the other") { counts.theTestTheOtherCalled = true }
override def newInstance = new EFunSpec(counts)
}
val e = new EFunSpec(ThreeCounts(false, false, false))
val repE = new EventRecordingReporter
e.run(None, Args(repE, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight")),
ConfigMap.empty, None, new Tracker, Set.empty))
assert(repE.testIgnoredEventsReceived.isEmpty)
assert(e.counts.theTestThisCalled)
assert(e.counts.theTestThatCalled)
assert(e.counts.theTestTheOtherCalled)
assert(repE.testStartingEventsReceived.size === 1)
assert(repE.testStartingEventsReceived(0).testName === "test that")
// An Ignored test that was both included and excluded should not generate a TestIgnored event
class FFunSpec(val counts: ThreeCounts) extends PathFunSpec {
ignore("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { counts.theTestThisCalled = true }
it("test that", mytags.SlowAsMolasses) { counts.theTestThatCalled = true }
it("test the other") { counts.theTestTheOtherCalled = true }
override def newInstance = new FFunSpec(counts)
}
val f = new FFunSpec(ThreeCounts(false, false, false))
val repF = new EventRecordingReporter
f.run(None, Args(repF, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight")),
ConfigMap.empty, None, new Tracker, Set.empty))
assert(repF.testIgnoredEventsReceived.isEmpty)
assert(!f.counts.theTestThisCalled)
assert(f.counts.theTestThatCalled)
assert(f.counts.theTestTheOtherCalled)
assert(repE.testStartingEventsReceived.size === 1)
assert(repE.testStartingEventsReceived(0).testName === "test that")
// An Ignored test that was not included should not generate a TestIgnored event
class GFunSpec(val counts: ThreeCounts) extends PathFunSpec {
it("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { counts.theTestThisCalled = true }
it("test that", mytags.SlowAsMolasses) { counts.theTestThatCalled = true }
ignore("test the other") { counts.theTestTheOtherCalled = true }
override def newInstance = new GFunSpec(counts)
}
val g = new GFunSpec(ThreeCounts(false, false, false))
val repG = new EventRecordingReporter
g.run(None, Args(repG, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight")),
ConfigMap.empty, None, new Tracker, Set.empty))
assert(repG.testIgnoredEventsReceived.isEmpty)
assert(g.counts.theTestThisCalled)
assert(g.counts.theTestThatCalled)
assert(!g.counts.theTestTheOtherCalled)
assert(repG.testStartingEventsReceived.size === 1)
assert(repG.testStartingEventsReceived(0).testName === "test that")
// No tagsToInclude set, FastAsLight excluded
class HFunSpec(val counts: ThreeCounts) extends PathFunSpec {
it("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { counts.theTestThisCalled = true }
it("test that", mytags.SlowAsMolasses) { counts.theTestThatCalled = true }
it("test the other") { counts.theTestTheOtherCalled = true }
override def newInstance = new HFunSpec(counts)
}
val h = new HFunSpec(ThreeCounts(false, false, false))
val repH = new EventRecordingReporter
h.run(None, Args(repH, Stopper.default, Filter(None, Set("org.scalatest.FastAsLight")), ConfigMap.empty, None, new Tracker, Set.empty))
assert(repH.testIgnoredEventsReceived.isEmpty)
assert(h.counts.theTestThisCalled)
assert(h.counts.theTestThatCalled)
assert(h.counts.theTestTheOtherCalled)
assert(repH.testStartingEventsReceived.size === 2)
assert(repH.testStartingEventsReceived.exists(_.testName == "test that"))
assert(repH.testStartingEventsReceived.exists(_.testName == "test the other"))
// No tagsToInclude set, mytags.SlowAsMolasses excluded
class IFunSpec(val counts: ThreeCounts) extends PathFunSpec {
it("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { counts.theTestThisCalled = true }
it("test that", mytags.SlowAsMolasses) { counts.theTestThatCalled = true }
it("test the other") { counts.theTestTheOtherCalled = true }
override def newInstance = new IFunSpec(counts)
}
val i = new IFunSpec(ThreeCounts(false, false, false))
val repI = new EventRecordingReporter
i.run(None, Args(repI, Stopper.default, Filter(None, Set("org.scalatest.SlowAsMolasses")), ConfigMap.empty, None, new Tracker, Set.empty))
assert(repI.testIgnoredEventsReceived.isEmpty)
assert(i.counts.theTestThisCalled)
assert(i.counts.theTestThatCalled)
assert(i.counts.theTestTheOtherCalled)
assert(repI.testStartingEventsReceived.size === 1)
assert(repI.testStartingEventsReceived(0).testName === "test the other")
// No tagsToInclude set, mytags.SlowAsMolasses excluded, TestIgnored should not be received on excluded ones
class JFunSpec(val counts: ThreeCounts) extends PathFunSpec {
ignore("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { counts.theTestThisCalled = true }
ignore("test that", mytags.SlowAsMolasses) { counts.theTestThatCalled = true }
it("test the other") { counts.theTestTheOtherCalled = true }
override def newInstance = new JFunSpec(counts)
}
val j = new JFunSpec(ThreeCounts(false, false, false))
val repJ = new TestIgnoredTrackingReporter
j.run(None, Args(repJ, Stopper.default, Filter(None, Set("org.scalatest.SlowAsMolasses")), ConfigMap.empty, None, new Tracker, Set.empty))
assert(!repJ.testIgnoredReceived)
assert(!j.counts.theTestThisCalled)
assert(!j.counts.theTestThatCalled)
assert(j.counts.theTestTheOtherCalled)
// Same as previous, except Ignore specifically mentioned in excludes set
class KFunSpec(val counts: ThreeCounts) extends PathFunSpec {
ignore("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { counts.theTestThisCalled = true }
ignore("test that", mytags.SlowAsMolasses) { counts.theTestThatCalled = true }
ignore("test the other") { counts.theTestTheOtherCalled = true }
override def newInstance = new KFunSpec(counts)
}
val k = new KFunSpec(ThreeCounts(false, false, false))
val repK = new TestIgnoredTrackingReporter
k.run(None, Args(repK, Stopper.default, Filter(None, Set("org.scalatest.SlowAsMolasses", "org.scalatest.Ignore")), ConfigMap.empty, None, new Tracker, Set.empty))
assert(repK.testIgnoredReceived)
assert(!k.counts.theTestThisCalled)
assert(!k.counts.theTestThatCalled)
assert(!k.counts.theTestTheOtherCalled)
}
"should return the correct test count from its expectedTestCount method" in {
class AFunSpec extends PathFunSpec {
it("test this") {}
it("test that") {}
override def newInstance = new AFunSpec
}
val a = new AFunSpec
assert(a.expectedTestCount(Filter()) === 2)
class BFunSpec extends PathFunSpec {
ignore("test this") {}
it("test that") {}
override def newInstance = new BFunSpec
}
val b = new BFunSpec
assert(b.expectedTestCount(Filter()) === 1)
class CFunSpec extends PathFunSpec {
it("test this", mytags.FastAsLight) {}
it("test that") {}
override def newInstance = new CFunSpec
}
val c = new CFunSpec
assert(c.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 1)
assert(c.expectedTestCount(Filter(None, Set("org.scalatest.FastAsLight"))) === 1)
class DFunSpec extends PathFunSpec {
it("test this", mytags.FastAsLight, mytags.SlowAsMolasses) {}
it("test that", mytags.SlowAsMolasses) {}
it("test the other thing") {}
override def newInstance = new DFunSpec
}
val d = new DFunSpec
assert(d.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 1)
assert(d.expectedTestCount(Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight"))) === 1)
assert(d.expectedTestCount(Filter(None, Set("org.scalatest.SlowAsMolasses"))) === 1)
assert(d.expectedTestCount(Filter()) === 3)
class EFunSpec extends PathFunSpec {
it("test this", mytags.FastAsLight, mytags.SlowAsMolasses) {}
it("test that", mytags.SlowAsMolasses) {}
ignore("test the other thing") {}
// ignore("test the other thing") {}
override def newInstance = new EFunSpec
}
val e = new EFunSpec
assert(e.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) === 1)
assert(e.expectedTestCount(Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight"))) === 1)
assert(e.expectedTestCount(Filter(None, Set("org.scalatest.SlowAsMolasses"))) === 0)
assert(e.expectedTestCount(Filter()) === 2)
val f = new Suites(a, b, c, d, e)
assert(f.expectedTestCount(Filter()) === 10)
}
"should generate a TestPending message when the test body is (pending)" in {
class AFunSpec extends PathFunSpec {
it("should do this") (pending)
it("should do that") {
assert(2 + 2 === 4)
}
it("should do something else") {
assert(2 + 2 === 4)
pending
}
override def newInstance = new AFunSpec
}
val a = new AFunSpec
val rep = new EventRecordingReporter
a.run(None, Args(rep))
val tp = rep.testPendingEventsReceived
assert(tp.size === 2)
}
"should generate a test failure if a Throwable, or an Error other than direct Error subtypes known in JDK 1.5, excluding AssertionError" in {
class AFunSpec extends PathFunSpec {
it("throws AssertionError") { throw new AssertionError }
it("throws plain old Error") { throw new Error }
it("throws Throwable") { throw new Throwable }
override def newInstance = new AFunSpec
}
val a = new AFunSpec
val rep = new EventRecordingReporter
a.run(None, Args(rep))
val tf = rep.testFailedEventsReceived
assert(tf.size === 3)
}
"should propagate out Errors that are direct subtypes of Error in JDK 1.5, other than AssertionError, causing Suites and Runs to abort." in {
class AFunSpec extends PathFunSpec {
it("throws AssertionError") { throw new OutOfMemoryError }
override def newInstance = new AFunSpec
}
// val a = new AFunSpec
intercept[OutOfMemoryError] {
new AFunSpec
// a.run(None, Args(SilentReporter))
}
}
/*
"should send InfoProvided events with aboutAPendingTest set to true for info calls made from a test that is pending" in {
class AFunSpec extends PathFunSpec with GivenWhenThen {
it("should do something else") {
given("two integers")
when("one is subracted from the other")
then("the result is the difference between the two numbers")
pending
}
override def newInstance = new AFunSpec
}
val a = new AFunSpec
val rep = new EventRecordingReporter
a.run(None, Args(rep))
val testPending = rep.testPendingEventsReceived
assert(testPending.size === 1)
val recordedEvents = testPending(0).recordedEvents
assert(recordedEvents.size === 3)
for (event <- recordedEvents) {
val ip = event.asInstanceOf[InfoProvided]
assert(ip.aboutAPendingTest.isDefined && ip.aboutAPendingTest.get)
}
}
"should send InfoProvided events with aboutAPendingTest set to false for info calls made from a test that is not pending" in {
class AFunSpec extends PathFunSpec with GivenWhenThen {
it("should do something else") {
given("two integers")
when("one is subracted from the other")
then("the result is the difference between the two numbers")
assert(1 + 1 === 2)
}
override def newInstance = new AFunSpec
}
val a = new AFunSpec
val rep = new EventRecordingReporter
a.run(None, Args(rep))
val testSucceeded = rep.testSucceededEventsReceived
assert(testSucceeded.size === 1)
val recordedEvents = testSucceeded(0).recordedEvents
assert(recordedEvents.size === 3)
for (event <- recordedEvents) {
val ip = event.asInstanceOf[InfoProvided]
assert(ip.aboutAPendingTest.isDefined && !ip.aboutAPendingTest.get)
}
}
*/
}
"when failure happens" - {
"should fire TestFailed event with correct stack depth info when test failed" in {
class TestSpec extends FunSpec {
it("fail scenario") {
assert(1 === 2)
}
describe("a feature") {
it("nested fail scenario") {
assert(1 === 2)
}
}
override def newInstance = new TestSpec
}
val rep = new EventRecordingReporter
val s1 = new TestSpec
s1.run(None, Args(rep))
assert(rep.testFailedEventsReceived.size === 2)
assert(rep.testFailedEventsReceived(0).throwable.get.asInstanceOf[TestFailedException].failedCodeFileName.get === "FunSpecSpec.scala")
assert(rep.testFailedEventsReceived(0).throwable.get.asInstanceOf[TestFailedException].failedCodeLineNumber.get === thisLineNumber - 14)
assert(rep.testFailedEventsReceived(1).throwable.get.asInstanceOf[TestFailedException].failedCodeFileName.get === "FunSpecSpec.scala")
assert(rep.testFailedEventsReceived(1).throwable.get.asInstanceOf[TestFailedException].failedCodeLineNumber.get === thisLineNumber - 12)
}
"should generate TestRegistrationClosedException with correct stack depth info when has a it nested inside a it" in {
class TestSpec extends FunSpec {
describe("a feature") {
it("a scenario") {
it("nested scenario") {
assert(1 === 2)
}
}
}
override def newInstance = new TestSpec
}
val rep = new EventRecordingReporter
val s = new TestSpec
s.run(None, Args(rep))
val testFailedEvents = rep.testFailedEventsReceived
assert(testFailedEvents.size === 1)
assert(testFailedEvents(0).throwable.get.getClass() === classOf[TestRegistrationClosedException])
val trce = testFailedEvents(0).throwable.get.asInstanceOf[TestRegistrationClosedException]
assert("FunSpecSpec.scala" === trce.failedCodeFileName.get)
assert(trce.failedCodeLineNumber.get === thisLineNumber - 15)
assert(trce.message == Some("An it clause may not appear inside another it or they clause."))
}
"should generate TestRegistrationClosedException with correct stack depth info when has a ignore nested inside a it" in {
class TestSpec extends FunSpec {
describe("a feature") {
it("a scenario") {
ignore("nested scenario") {
assert(1 === 2)
}
}
}
override def newInstance = new TestSpec
}
val rep = new EventRecordingReporter
val s = new TestSpec
s.run(None, Args(rep))
val testFailedEvents = rep.testFailedEventsReceived
assert(testFailedEvents.size === 1)
assert(testFailedEvents(0).throwable.get.getClass() === classOf[TestRegistrationClosedException])
val trce = testFailedEvents(0).throwable.get.asInstanceOf[TestRegistrationClosedException]
assert("FunSpecSpec.scala" === trce.failedCodeFileName.get)
assert(trce.failedCodeLineNumber.get === thisLineNumber - 15)
assert(trce.message == Some("An ignore clause may not appear inside an it or a they clause."))
}
"should generate TestRegistrationClosedException with correct stack depth info when has a they nested inside a they" in {
class TestSpec extends FunSpec {
describe("a feature") {
they("a scenario") {
they("nested scenario") {
assert(1 === 2)
}
}
}
override def newInstance = new TestSpec
}
val rep = new EventRecordingReporter
val s = new TestSpec
s.run(None, Args(rep))
val testFailedEvents = rep.testFailedEventsReceived
assert(testFailedEvents.size === 1)
assert(testFailedEvents(0).throwable.get.getClass() === classOf[TestRegistrationClosedException])
val trce = testFailedEvents(0).throwable.get.asInstanceOf[TestRegistrationClosedException]
assert("FunSpecSpec.scala" === trce.failedCodeFileName.get)
assert(trce.failedCodeLineNumber.get === thisLineNumber - 15)
assert(trce.message == Some("A they clause may not appear inside another it or they clause."))
}
"should generate TestRegistrationClosedException with correct stack depth info when has a ignore nested inside a they" in {
class TestSpec extends FunSpec {
describe("a feature") {
they("a scenario") {
ignore("nested scenario") {
assert(1 === 2)
}
}
}
override def newInstance = new TestSpec
}
val rep = new EventRecordingReporter
val s = new TestSpec
s.run(None, Args(rep))
val testFailedEvents = rep.testFailedEventsReceived
assert(testFailedEvents.size === 1)
assert(testFailedEvents(0).throwable.get.getClass() === classOf[TestRegistrationClosedException])
val trce = testFailedEvents(0).throwable.get.asInstanceOf[TestRegistrationClosedException]
assert("FunSpecSpec.scala" === trce.failedCodeFileName.get)
assert(trce.failedCodeLineNumber.get === thisLineNumber - 15)
assert(trce.message == Some("An ignore clause may not appear inside an it or a they clause."))
}
"should generate NotAllowedException wrapping a TestFailedException when assert fails in scope" in {
class TestSpec extends FunSpec {
describe("a feature") {
val a = 1
assert(a == 2)
}
}
val e = intercept[NotAllowedException] {
new TestSpec
}
assert("FunSpecSpec.scala" == e.failedCodeFileName.get)
assert(e.failedCodeLineNumber.get == thisLineNumber - 3)
assert(e.message == Some(FailureMessages.assertionShouldBePutInsideItOrTheyClauseNotDescribeClause))
assert(e.cause.isDefined)
val causeThrowable = e.cause.get
assert(causeThrowable.isInstanceOf[TestFailedException])
val cause = causeThrowable.asInstanceOf[TestFailedException]
assert("FunSpecSpec.scala" == cause.failedCodeFileName.get)
assert(cause.failedCodeLineNumber.get == thisLineNumber - 15)
assert(cause.message == Some(FailureMessages.didNotEqual(1, 2)))
}
"should generate NotAllowedException wrapping a TestCanceledException when assume fails in scope" in {
class TestSpec extends FunSpec {
describe("a feature") {
val a = 1
assume(a == 2)
}
}
val e = intercept[NotAllowedException] {
new TestSpec
}
assert("FunSpecSpec.scala" == e.failedCodeFileName.get)
assert(e.failedCodeLineNumber.get == thisLineNumber - 3)
assert(e.message == Some(FailureMessages.assertionShouldBePutInsideItOrTheyClauseNotDescribeClause))
assert(e.cause.isDefined)
val causeThrowable = e.cause.get
assert(causeThrowable.isInstanceOf[TestCanceledException])
val cause = causeThrowable.asInstanceOf[TestCanceledException]
assert("FunSpecSpec.scala" == cause.failedCodeFileName.get)
assert(cause.failedCodeLineNumber.get == thisLineNumber - 15)
assert(cause.message == Some(FailureMessages.didNotEqual(1, 2)))
}
"should generate NotAllowedException wrapping a non-fatal RuntimeException is thrown inside scope" in {
class TestSpec extends FunSpec {
describe("a feature") {
throw new RuntimeException("on purpose")
}
}
val e = intercept[NotAllowedException] {
new TestSpec
}
assert("FunSpecSpec.scala" == e.failedCodeFileName.get)
assert(e.failedCodeLineNumber.get == thisLineNumber - 3)
assert(e.cause.isDefined)
val causeThrowable = e.cause.get
assert(e.message == Some(FailureMessages.exceptionWasThrownInDescribeClause(UnquotedString(causeThrowable.getClass.getName), "a feature")))
assert(causeThrowable.isInstanceOf[RuntimeException])
val cause = causeThrowable.asInstanceOf[RuntimeException]
assert(cause.getMessage == "on purpose")
}
"should propagate AnnotationFormatError when it is thrown inside scope" in {
class TestSpec extends FunSpec {
describe("a feature") {
throw new AnnotationFormatError("on purpose")
}
}
val e = intercept[AnnotationFormatError] {
new TestSpec
}
assert(e.getMessage == "on purpose")
}
"should propagate AWTError when it is thrown inside scope" in {
class TestSpec extends FunSpec {
describe("a feature") {
throw new AWTError("on purpose")
}
}
val e = intercept[AWTError] {
new TestSpec
}
assert(e.getMessage == "on purpose")
}
"should propagate CoderMalfunctionError when it is thrown inside scope" in {
class TestSpec extends FunSpec {
describe("a feature") {
throw new CoderMalfunctionError(new RuntimeException("on purpose"))
}
}
val e = intercept[CoderMalfunctionError] {
new TestSpec
}
assert(e.getMessage == "java.lang.RuntimeException: on purpose")
}
"should propagate FactoryConfigurationError when it is thrown inside scope" in {
class TestSpec extends FunSpec {
describe("a feature") {
throw new FactoryConfigurationError("on purpose")
}
}
val e = intercept[FactoryConfigurationError] {
new TestSpec
}
assert(e.getMessage == "on purpose")
}
"should propagate LinkageError when it is thrown inside scope" in {
class TestSpec extends FunSpec {
describe("a feature") {
throw new LinkageError("on purpose")
}
}
val e = intercept[LinkageError] {
new TestSpec
}
assert(e.getMessage == "on purpose")
}
"should propagate ThreadDeath when it is thrown inside scope" in {
class TestSpec extends FunSpec {
describe("a feature") {
throw new ThreadDeath
}
}
val e = intercept[ThreadDeath] {
new TestSpec
}
assert(e.getMessage == null)
}
"should propagate TransformerFactoryConfigurationError when it is thrown inside scope" in {
class TestSpec extends FunSpec {
describe("a feature") {
throw new TransformerFactoryConfigurationError("on purpose")
}
}
val e = intercept[TransformerFactoryConfigurationError] {
new TestSpec
}
assert(e.getMessage == "on purpose")
}
"should propagate VirtualMachineError when it is thrown inside scope" in {
class TestSpec extends FunSpec {
describe("a feature") {
throw new VirtualMachineError("on purpose") {}
}
}
val e = intercept[VirtualMachineError] {
new TestSpec
}
assert(e.getMessage == "on purpose")
}
}
}
| SRGOM/scalatest | scalatest-test/src/test/scala/org/scalatest/path/FunSpecSpec.scala | Scala | apache-2.0 | 45,423 |
// Generated by <a href="http://scalaxb.org/">scalaxb</a>.
package xmlschema
import masked.scalaxb
/**
Part 1 version: Id: structures.xsd,v 1.2 2004/01/15 11:34:25 ht Exp
Part 2 version: Id: datatypes.xsd,v 1.3 2004/01/23 18:11:13 ht Exp
*/
/**
This type is extended by almost all schema types
to allow attributes from other namespaces to be
added to user schemas.
*/
trait XOpenAttrsable {
val attributes: Map[String, scalaxb.DataRecord[Any]]
}
/**
This type is extended by almost all schema types
to allow attributes from other namespaces to be
added to user schemas.
*/
case class XOpenAttrs(attributes: Map[String, scalaxb.DataRecord[Any]]) extends XOpenAttrsable
/**
This type is extended by all types which allow annotation
other than <schema> itself
*/
trait XAnnotatedable extends XOpenAttrsable {
val annotation: Option[xmlschema.XAnnotation]
val id: Option[String]
val attributes: Map[String, scalaxb.DataRecord[Any]]
}
/**
This type is extended by all types which allow annotation
other than <schema> itself
*/
case class XAnnotated(annotation: Option[xmlschema.XAnnotation] = None,
id: Option[String] = None,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XAnnotatedable
trait XFormChoice
object XFormChoice {
def fromString(value: String, scope: scala.xml.NamespaceBinding): XFormChoice = value match {
case "qualified" => XQualified
case "unqualified" => XUnqualified
}
}
case object XQualified extends XFormChoice { override def toString = "qualified" }
case object XUnqualified extends XFormChoice { override def toString = "unqualified" }
trait XReducedDerivationControl
object XReducedDerivationControl {
def fromString(value: String, scope: scala.xml.NamespaceBinding): XReducedDerivationControl = value match {
case "extension" => XExtensionValue2
case "restriction" => XRestrictionValue3
}
}
case object XExtensionValue2 extends XReducedDerivationControl { override def toString = "extension" }
case object XRestrictionValue3 extends XReducedDerivationControl { override def toString = "restriction" }
trait XTypeDerivationControl
object XTypeDerivationControl {
def fromString(value: String, scope: scala.xml.NamespaceBinding): XTypeDerivationControl = value match {
case "extension" => XExtensionValue
case "restriction" => XRestrictionValue2
case "list" => XListValue2
case "union" => XUnionValue2
}
}
case object XExtensionValue extends XTypeDerivationControl { override def toString = "extension" }
case object XRestrictionValue2 extends XTypeDerivationControl { override def toString = "restriction" }
case object XListValue2 extends XTypeDerivationControl { override def toString = "list" }
case object XUnionValue2 extends XTypeDerivationControl { override def toString = "union" }
case class XSchema(xschemaoption: Seq[scalaxb.DataRecord[xmlschema.XSchemaOption]] = Nil,
xschemasequence1: Seq[xmlschema.XSchemaSequence1] = Nil,
targetNamespace: Option[java.net.URI] = None,
version: Option[String] = None,
finalDefault: String,
blockDefault: String,
attributeFormDefault: xmlschema.XFormChoice,
elementFormDefault: xmlschema.XFormChoice,
id: Option[String] = None,
xmllang: Option[String] = None,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XOpenAttrsable
trait XSchemaOption
case class XSchemaSequence1(xSchemaTopOption1: scalaxb.DataRecord[Any],
annotation: Seq[xmlschema.XAnnotation] = Nil)
trait XUse
object XUse {
def fromString(value: String, scope: scala.xml.NamespaceBinding): XUse = value match {
case "prohibited" => XProhibited
case "optional" => XOptional
case "required" => XRequired
}
}
case object XProhibited extends XUse { override def toString = "prohibited" }
case object XOptional extends XUse { override def toString = "optional" }
case object XRequired extends XUse { override def toString = "required" }
trait XAttributable extends XAnnotatedable with XAttrDeclsOption1 {
val annotation: Option[xmlschema.XAnnotation]
val simpleType: Option[xmlschema.XLocalSimpleType]
val id: Option[String]
val name: Option[String]
val ref: Option[javax.xml.namespace.QName]
val typeValue: Option[javax.xml.namespace.QName]
val use: xmlschema.XUse
val default: Option[String]
val fixed: Option[String]
val form: Option[xmlschema.XFormChoice]
val attributes: Map[String, scalaxb.DataRecord[Any]]
}
case class XAttribute(annotation: Option[xmlschema.XAnnotation] = None,
simpleType: Option[xmlschema.XLocalSimpleType] = None,
id: Option[String] = None,
name: Option[String] = None,
ref: Option[javax.xml.namespace.QName] = None,
typeValue: Option[javax.xml.namespace.QName] = None,
use: xmlschema.XUse,
default: Option[String] = None,
fixed: Option[String] = None,
form: Option[xmlschema.XFormChoice] = None,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XAttributable
case class XTopLevelAttribute(annotation: Option[xmlschema.XAnnotation] = None,
simpleType: Option[xmlschema.XLocalSimpleType] = None,
id: Option[String] = None,
name: Option[String] = None,
ref: Option[javax.xml.namespace.QName] = None,
typeValue: Option[javax.xml.namespace.QName] = None,
use: xmlschema.XUse,
default: Option[String] = None,
fixed: Option[String] = None,
form: Option[xmlschema.XFormChoice] = None,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XAttributable with XSchemaTopOption
trait XComplexType extends XAnnotatedable {
val annotation: Option[xmlschema.XAnnotation]
val xComplexTypeModelOption3: scalaxb.DataRecord[xmlschema.XComplexTypeModelOption]
val id: Option[String]
val name: Option[String]
val mixed: Boolean
val abstractValue: Boolean
val finalValue: Option[String]
val block: Option[String]
val attributes: Map[String, scalaxb.DataRecord[Any]]
}
case class XTopLevelComplexType(annotation: Option[xmlschema.XAnnotation] = None,
xComplexTypeModelOption3: scalaxb.DataRecord[xmlschema.XComplexTypeModelOption],
id: Option[String] = None,
name: Option[String] = None,
mixed: Boolean,
abstractValue: Boolean,
finalValue: Option[String] = None,
block: Option[String] = None,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XComplexType with XRedefinableOption
case class XLocalComplexType(annotation: Option[xmlschema.XAnnotation] = None,
xComplexTypeModelOption3: scalaxb.DataRecord[xmlschema.XComplexTypeModelOption],
id: Option[String] = None,
name: Option[String] = None,
mixed: Boolean,
abstractValue: Boolean,
finalValue: Option[String] = None,
block: Option[String] = None,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XComplexType with XElementOption with XTopLevelElementOption with XLocalElementableOption with XNarrowMaxMinOption
trait XRestrictionTypable extends XAnnotatedable {
val annotation: Option[xmlschema.XAnnotation]
val xrestrictiontypableoption: Option[scalaxb.DataRecord[Any]]
val xAttrDeclsSequence4: xmlschema.XAttrDeclsSequence
val id: Option[String]
val base: javax.xml.namespace.QName
val attributes: Map[String, scalaxb.DataRecord[Any]]
}
case class XRestrictionType(annotation: Option[xmlschema.XAnnotation] = None,
xrestrictiontypableoption: Option[scalaxb.DataRecord[Any]] = None,
xAttrDeclsSequence4: xmlschema.XAttrDeclsSequence,
id: Option[String] = None,
base: javax.xml.namespace.QName,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XRestrictionTypable
trait XRestrictionTypableOption
case class XComplexRestrictionType(annotation: Option[xmlschema.XAnnotation] = None,
xrestrictiontypableoption: Option[scalaxb.DataRecord[Any]] = None,
xAttrDeclsSequence4: xmlschema.XAttrDeclsSequence,
id: Option[String] = None,
base: javax.xml.namespace.QName,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XRestrictionTypable with XComplexContentOption
trait XComplexRestrictionTypeOption
trait XExtensionTypable extends XAnnotatedable with XComplexContentOption {
val annotation: Option[xmlschema.XAnnotation]
val xTypeDefParticleOption3: Option[scalaxb.DataRecord[xmlschema.XTypeDefParticleOption]]
val xAttrDeclsSequence4: xmlschema.XAttrDeclsSequence
val id: Option[String]
val base: javax.xml.namespace.QName
val attributes: Map[String, scalaxb.DataRecord[Any]]
}
case class XExtensionType(annotation: Option[xmlschema.XAnnotation] = None,
xTypeDefParticleOption3: Option[scalaxb.DataRecord[xmlschema.XTypeDefParticleOption]] = None,
xAttrDeclsSequence4: xmlschema.XAttrDeclsSequence,
id: Option[String] = None,
base: javax.xml.namespace.QName,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XExtensionTypable
case class XComplexContent(annotation: Option[xmlschema.XAnnotation] = None,
xcomplexcontentoption: scalaxb.DataRecord[xmlschema.XComplexContentOption],
id: Option[String] = None,
mixed: Option[Boolean] = None,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XAnnotatedable with XComplexTypeModelOption
trait XComplexContentOption
case class XSimpleRestrictionType(annotation: Option[xmlschema.XAnnotation] = None,
xrestrictiontypableoption: Option[scalaxb.DataRecord[Any]] = None,
xAttrDeclsSequence4: xmlschema.XAttrDeclsSequence,
id: Option[String] = None,
base: javax.xml.namespace.QName,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XRestrictionTypable with XSimpleContentOption
trait XSimpleRestrictionTypeOption
case class XSimpleExtensionType(annotation: Option[xmlschema.XAnnotation] = None,
xTypeDefParticleOption3: Option[scalaxb.DataRecord[xmlschema.XTypeDefParticleOption]] = None,
xAttrDeclsSequence4: xmlschema.XAttrDeclsSequence,
id: Option[String] = None,
base: javax.xml.namespace.QName,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XExtensionTypable with XSimpleContentOption
case class XSimpleContent(annotation: Option[xmlschema.XAnnotation] = None,
xsimplecontentoption: scalaxb.DataRecord[xmlschema.XSimpleContentOption],
id: Option[String] = None,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XAnnotatedable with XComplexTypeModelOption
trait XSimpleContentOption
/**
The element element can be used either
at the top level to define an element-type binding globally,
or within a content model to either reference a globally-defined
element or type or declare an element-type binding locally.
The ref form is not allowed at the top level.
*/
trait XElement extends XAnnotatedable {
val annotation: Option[xmlschema.XAnnotation]
val xelementoption: Option[scalaxb.DataRecord[xmlschema.XElementOption]]
val xIdentityConstraintOption4: Seq[scalaxb.DataRecord[xmlschema.XIdentityConstraintOption]]
val id: Option[String]
val name: Option[String]
val ref: Option[javax.xml.namespace.QName]
val typeValue: Option[javax.xml.namespace.QName]
val substitutionGroup: Option[javax.xml.namespace.QName]
val minOccurs: BigInt
val maxOccurs: String
val default: Option[String]
val fixed: Option[String]
val nillable: Boolean
val abstractValue: Boolean
val finalValue: Option[String]
val block: Option[String]
val form: Option[xmlschema.XFormChoice]
val attributes: Map[String, scalaxb.DataRecord[Any]]
}
trait XElementOption
case class XTopLevelElement(annotation: Option[xmlschema.XAnnotation] = None,
xelementoption: Option[scalaxb.DataRecord[xmlschema.XElementOption]] = None,
xIdentityConstraintOption4: Seq[scalaxb.DataRecord[xmlschema.XIdentityConstraintOption]] = Nil,
id: Option[String] = None,
name: Option[String] = None,
ref: Option[javax.xml.namespace.QName] = None,
typeValue: Option[javax.xml.namespace.QName] = None,
substitutionGroup: Option[javax.xml.namespace.QName] = None,
minOccurs: BigInt,
maxOccurs: String,
default: Option[String] = None,
fixed: Option[String] = None,
nillable: Boolean,
abstractValue: Boolean,
finalValue: Option[String] = None,
block: Option[String] = None,
form: Option[xmlschema.XFormChoice] = None,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XElement with XSchemaTopOption
trait XTopLevelElementOption
trait XLocalElementable extends XElement with XNestedParticleOption with XParticleOption {
val annotation: Option[xmlschema.XAnnotation]
val xelementoption: Option[scalaxb.DataRecord[xmlschema.XElementOption]]
val xIdentityConstraintOption4: Seq[scalaxb.DataRecord[xmlschema.XIdentityConstraintOption]]
val id: Option[String]
val name: Option[String]
val ref: Option[javax.xml.namespace.QName]
val typeValue: Option[javax.xml.namespace.QName]
val substitutionGroup: Option[javax.xml.namespace.QName]
val minOccurs: BigInt
val maxOccurs: String
val default: Option[String]
val fixed: Option[String]
val nillable: Boolean
val abstractValue: Boolean
val finalValue: Option[String]
val block: Option[String]
val form: Option[xmlschema.XFormChoice]
val attributes: Map[String, scalaxb.DataRecord[Any]]
}
case class XLocalElement(annotation: Option[xmlschema.XAnnotation] = None,
xelementoption: Option[scalaxb.DataRecord[xmlschema.XElementOption]] = None,
xIdentityConstraintOption4: Seq[scalaxb.DataRecord[xmlschema.XIdentityConstraintOption]] = Nil,
id: Option[String] = None,
name: Option[String] = None,
ref: Option[javax.xml.namespace.QName] = None,
typeValue: Option[javax.xml.namespace.QName] = None,
substitutionGroup: Option[javax.xml.namespace.QName] = None,
minOccurs: BigInt,
maxOccurs: String,
default: Option[String] = None,
fixed: Option[String] = None,
nillable: Boolean,
abstractValue: Boolean,
finalValue: Option[String] = None,
block: Option[String] = None,
form: Option[xmlschema.XFormChoice] = None,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XLocalElementable
trait XLocalElementableOption
/**
group type for explicit groups, named top-level groups and
group references
*/
trait XGroup extends XAnnotatedable {
val annotation: Option[xmlschema.XAnnotation]
val xParticleOption3: Seq[scalaxb.DataRecord[xmlschema.XParticleOption]]
val id: Option[String]
val name: Option[String]
val ref: Option[javax.xml.namespace.QName]
val minOccurs: BigInt
val maxOccurs: String
val attributes: Map[String, scalaxb.DataRecord[Any]]
}
trait XRealGroupable extends XGroup {
val annotation: Option[xmlschema.XAnnotation]
val xParticleOption3: Seq[scalaxb.DataRecord[xmlschema.XParticleOption]]
val id: Option[String]
val name: Option[String]
val ref: Option[javax.xml.namespace.QName]
val minOccurs: BigInt
val maxOccurs: String
val attributes: Map[String, scalaxb.DataRecord[Any]]
}
case class XRealGroup(annotation: Option[xmlschema.XAnnotation] = None,
xParticleOption3: Seq[scalaxb.DataRecord[xmlschema.XParticleOption]] = Nil,
id: Option[String] = None,
name: Option[String] = None,
ref: Option[javax.xml.namespace.QName] = None,
minOccurs: BigInt,
maxOccurs: String,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XRealGroupable
trait XRealGroupableOption
case class XAll(annotation: Option[xmlschema.XAnnotation] = None,
xParticleOption3: Seq[scalaxb.DataRecord[xmlschema.XParticleOption]] = Nil,
id: Option[String] = None,
name: Option[String] = None,
ref: Option[javax.xml.namespace.QName] = None,
minOccurs: BigInt,
maxOccurs: String,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XAllable with XNamedGroupOption
case class XNamedGroup(annotation: Option[xmlschema.XAnnotation] = None,
xParticleOption3: Seq[scalaxb.DataRecord[xmlschema.XParticleOption]] = Nil,
id: Option[String] = None,
name: Option[String] = None,
ref: Option[javax.xml.namespace.QName] = None,
minOccurs: BigInt,
maxOccurs: String,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XRealGroupable with XRedefinableOption
trait XNamedGroupOption
case class XGroupRef(annotation: Option[xmlschema.XAnnotation] = None,
xParticleOption3: Seq[scalaxb.DataRecord[xmlschema.XParticleOption]] = Nil,
id: Option[String] = None,
name: Option[String] = None,
ref: Option[javax.xml.namespace.QName] = None,
minOccurs: BigInt,
maxOccurs: String,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XRealGroupable with XTypeDefParticleOption with XNestedParticleOption with XParticleOption
/**
group type for the three kinds of group
*/
trait XExplicitGroupable extends XGroup with XTypeDefParticleOption with XNestedParticleOption with XParticleOption with XRealGroupableOption {
val annotation: Option[xmlschema.XAnnotation]
val xParticleOption3: Seq[scalaxb.DataRecord[xmlschema.XParticleOption]]
val id: Option[String]
val name: Option[String]
val ref: Option[javax.xml.namespace.QName]
val minOccurs: BigInt
val maxOccurs: String
val attributes: Map[String, scalaxb.DataRecord[Any]]
}
/**
group type for the three kinds of group
*/
case class XExplicitGroup(annotation: Option[xmlschema.XAnnotation] = None,
xParticleOption3: Seq[scalaxb.DataRecord[xmlschema.XParticleOption]] = Nil,
id: Option[String] = None,
name: Option[String] = None,
ref: Option[javax.xml.namespace.QName] = None,
minOccurs: BigInt,
maxOccurs: String,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XExplicitGroupable
case class XSimpleExplicitGroup(annotation: Option[xmlschema.XAnnotation] = None,
xParticleOption3: Seq[scalaxb.DataRecord[xmlschema.XParticleOption]] = Nil,
id: Option[String] = None,
name: Option[String] = None,
ref: Option[javax.xml.namespace.QName] = None,
minOccurs: BigInt,
maxOccurs: String,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XExplicitGroupable with XNamedGroupOption
trait XMinOccurs
object XMinOccurs {
def fromString(value: String, scope: scala.xml.NamespaceBinding): XMinOccurs = value match {
case "0" => X0
case "1" => X1
}
}
case object X0 extends XMinOccurs { override def toString = "0" }
case object X1 extends XMinOccurs { override def toString = "1" }
trait XMaxOccurs
object XMaxOccurs {
def fromString(value: String, scope: scala.xml.NamespaceBinding): XMaxOccurs = value match {
case "0" => X0Value
case "1" => X1Value
}
}
case object X0Value extends XMaxOccurs { override def toString = "0" }
case object X1Value extends XMaxOccurs { override def toString = "1" }
/** restricted max/min
*/
case class XNarrowMaxMin(annotation: Option[xmlschema.XAnnotation] = None,
xelementoption: Option[scalaxb.DataRecord[xmlschema.XElementOption]] = None,
xIdentityConstraintOption4: Seq[scalaxb.DataRecord[xmlschema.XIdentityConstraintOption]] = Nil,
id: Option[String] = None,
name: Option[String] = None,
ref: Option[javax.xml.namespace.QName] = None,
typeValue: Option[javax.xml.namespace.QName] = None,
substitutionGroup: Option[javax.xml.namespace.QName] = None,
minOccurs: BigInt,
maxOccurs: String,
default: Option[String] = None,
fixed: Option[String] = None,
nillable: Boolean,
abstractValue: Boolean,
finalValue: Option[String] = None,
block: Option[String] = None,
form: Option[xmlschema.XFormChoice] = None,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XLocalElementable with XAllModelOption1
trait XNarrowMaxMinOption
trait XMinOccursType
object XMinOccursType {
def fromString(value: String, scope: scala.xml.NamespaceBinding): XMinOccursType = value match {
case "0" => X0Value2
case "1" => X1Value2
}
}
case object X0Value2 extends XMinOccursType { override def toString = "0" }
case object X1Value2 extends XMinOccursType { override def toString = "1" }
trait XMaxOccursType
object XMaxOccursType {
def fromString(value: String, scope: scala.xml.NamespaceBinding): XMaxOccursType = value match {
case "1" => X1Value3
}
}
case object X1Value3 extends XMaxOccursType { override def toString = "1" }
/**
Only elements allowed inside
*/
trait XAllable extends XExplicitGroupable with XTypeDefParticleOption with XParticleOption with XRealGroupableOption {
val annotation: Option[xmlschema.XAnnotation]
val xParticleOption3: Seq[scalaxb.DataRecord[xmlschema.XParticleOption]]
val id: Option[String]
val name: Option[String]
val ref: Option[javax.xml.namespace.QName]
val minOccurs: BigInt
val maxOccurs: String
val attributes: Map[String, scalaxb.DataRecord[Any]]
}
/**
Only elements allowed inside
*/
case class XAllType(annotation: Option[xmlschema.XAnnotation] = None,
xParticleOption3: Seq[scalaxb.DataRecord[xmlschema.XParticleOption]] = Nil,
id: Option[String] = None,
name: Option[String] = None,
ref: Option[javax.xml.namespace.QName] = None,
minOccurs: BigInt,
maxOccurs: String,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XAllable
trait XProcessContents
object XProcessContents {
def fromString(value: String, scope: scala.xml.NamespaceBinding): XProcessContents = value match {
case "skip" => XSkip
case "lax" => XLax
case "strict" => XStrict
}
}
case object XSkip extends XProcessContents { override def toString = "skip" }
case object XLax extends XProcessContents { override def toString = "lax" }
case object XStrict extends XProcessContents { override def toString = "strict" }
trait XWildcardable extends XAnnotatedable {
val annotation: Option[xmlschema.XAnnotation]
val id: Option[String]
val namespace: String
val processContents: xmlschema.XProcessContents
val attributes: Map[String, scalaxb.DataRecord[Any]]
}
case class XWildcard(annotation: Option[xmlschema.XAnnotation] = None,
id: Option[String] = None,
namespace: String,
processContents: xmlschema.XProcessContents,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XWildcardable
case class XAny(annotation: Option[xmlschema.XAnnotation] = None,
id: Option[String] = None,
namespace: String,
processContents: xmlschema.XProcessContents,
minOccurs: BigInt,
maxOccurs: String,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XWildcardable with XNestedParticleOption with XParticleOption
trait XAttributeGroup extends XAnnotatedable {
val annotation: Option[xmlschema.XAnnotation]
val xAttrDeclsSequence3: xmlschema.XAttrDeclsSequence
val id: Option[String]
val name: Option[String]
val ref: Option[javax.xml.namespace.QName]
val attributes: Map[String, scalaxb.DataRecord[Any]]
}
case class XNamedAttributeGroup(annotation: Option[xmlschema.XAnnotation] = None,
xAttrDeclsSequence3: xmlschema.XAttrDeclsSequence,
id: Option[String] = None,
name: Option[String] = None,
ref: Option[javax.xml.namespace.QName] = None,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XAttributeGroup with XRedefinableOption
case class XAttributeGroupRef(annotation: Option[xmlschema.XAnnotation] = None,
xAttrDeclsSequence3: xmlschema.XAttrDeclsSequence,
id: Option[String] = None,
name: Option[String] = None,
ref: Option[javax.xml.namespace.QName] = None,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XAttributeGroup with XAttrDeclsOption1
case class XInclude(annotation: Option[xmlschema.XAnnotation] = None,
id: Option[String] = None,
schemaLocation: java.net.URI,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XAnnotatedable with XSchemaOption
case class XRedefine(xredefineoption: Seq[scalaxb.DataRecord[Any]] = Nil,
schemaLocation: java.net.URI,
id: Option[String] = None,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XOpenAttrsable with XSchemaOption
trait XRedefineOption
case class XImport(annotation: Option[xmlschema.XAnnotation] = None,
id: Option[String] = None,
namespace: Option[java.net.URI] = None,
schemaLocation: Option[java.net.URI] = None,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XAnnotatedable with XSchemaOption
case class XSelector(annotation: Option[xmlschema.XAnnotation] = None,
id: Option[String] = None,
xpath: String,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XAnnotatedable
case class XField(annotation: Option[xmlschema.XAnnotation] = None,
id: Option[String] = None,
xpath: String,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XAnnotatedable
trait XKeybasable extends XAnnotatedable with XIdentityConstraintOption {
val annotation: Option[xmlschema.XAnnotation]
val selector: xmlschema.XSelector
val field: Seq[xmlschema.XField]
val id: Option[String]
val name: String
val attributes: Map[String, scalaxb.DataRecord[Any]]
}
case class XKeybase(annotation: Option[xmlschema.XAnnotation] = None,
selector: xmlschema.XSelector,
field: Seq[xmlschema.XField] = Nil,
id: Option[String] = None,
name: String,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XKeybasable
case class XKeyref(annotation: Option[xmlschema.XAnnotation] = None,
selector: xmlschema.XSelector,
field: Seq[xmlschema.XField] = Nil,
id: Option[String] = None,
name: String,
refer: javax.xml.namespace.QName,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XKeybasable with XIdentityConstraintOption
case class XNotation(annotation: Option[xmlschema.XAnnotation] = None,
id: Option[String] = None,
name: String,
public: Option[String] = None,
system: Option[java.net.URI] = None,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XAnnotatedable with XSchemaTopOption
case class XAppinfo(mixed: Seq[scalaxb.DataRecord[Any]] = Nil,
source: Option[java.net.URI] = None,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XAnnotationOption
case class XDocumentation(mixed: Seq[scalaxb.DataRecord[Any]] = Nil,
source: Option[java.net.URI] = None,
xmllang: Option[String] = None,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XAnnotationOption
case class XAnnotation(xannotationoption: Seq[scalaxb.DataRecord[xmlschema.XAnnotationOption]] = Nil,
id: Option[String] = None,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XOpenAttrsable with XSchemaOption with XRedefineOption
trait XAnnotationOption
/**
Not the real urType, but as close an approximation as we can
get in the XML representation
*/
case class XAnyType(mixed: Seq[scalaxb.DataRecord[Any]] = Nil,
attributes: Map[String, scalaxb.DataRecord[Any]])
trait XDerivationControl
object XDerivationControl {
def fromString(value: String, scope: scala.xml.NamespaceBinding): XDerivationControl = value match {
case "substitution" => XSubstitution
case "extension" => XExtension
case "restriction" => XRestrictionValue
case "list" => XListValue
case "union" => XUnionValue
}
}
case object XSubstitution extends XDerivationControl { override def toString = "substitution" }
case object XExtension extends XDerivationControl { override def toString = "extension" }
case object XRestrictionValue extends XDerivationControl { override def toString = "restriction" }
case object XListValue extends XDerivationControl { override def toString = "list" }
case object XUnionValue extends XDerivationControl { override def toString = "union" }
trait XSimpleType extends XAnnotatedable {
val annotation: Option[xmlschema.XAnnotation]
val xSimpleDerivationOption3: scalaxb.DataRecord[xmlschema.XSimpleDerivationOption]
val id: Option[String]
val finalValue: Option[String]
val name: Option[String]
val attributes: Map[String, scalaxb.DataRecord[Any]]
}
case class XTopLevelSimpleType(annotation: Option[xmlschema.XAnnotation] = None,
xSimpleDerivationOption3: scalaxb.DataRecord[xmlschema.XSimpleDerivationOption],
id: Option[String] = None,
finalValue: Option[String] = None,
name: Option[String] = None,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XSimpleType with XRedefinableOption
case class XLocalSimpleType(annotation: Option[xmlschema.XAnnotation] = None,
xSimpleDerivationOption3: scalaxb.DataRecord[xmlschema.XSimpleDerivationOption],
id: Option[String] = None,
finalValue: Option[String] = None,
name: Option[String] = None,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XSimpleType with XElementOption with XTopLevelElementOption with XLocalElementableOption with XNarrowMaxMinOption
/**
base attribute and simpleType child are mutually
exclusive, but one or other is required
*/
case class XRestriction(annotation: Option[xmlschema.XAnnotation] = None,
xSimpleRestrictionModelSequence3: xmlschema.XSimpleRestrictionModelSequence,
id: Option[String] = None,
base: Option[javax.xml.namespace.QName] = None,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XAnnotatedable with XSimpleDerivationOption
/**
itemType attribute and simpleType child are mutually
exclusive, but one or other is required
*/
case class XList(annotation: Option[xmlschema.XAnnotation] = None,
simpleType: Option[xmlschema.XLocalSimpleType] = None,
id: Option[String] = None,
itemType: Option[javax.xml.namespace.QName] = None,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XAnnotatedable with XSimpleDerivationOption
/**
memberTypes attribute must be non-empty or there must be
at least one simpleType child
*/
case class XUnion(annotation: Option[xmlschema.XAnnotation] = None,
simpleType: Seq[xmlschema.XLocalSimpleType] = Nil,
id: Option[String] = None,
memberTypes: Option[Seq[javax.xml.namespace.QName]] = None,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XAnnotatedable with XSimpleDerivationOption
trait XFacetable extends XAnnotatedable with XFacetsOption {
val annotation: Option[xmlschema.XAnnotation]
val id: Option[String]
val value: String
val fixed: Boolean
val attributes: Map[String, scalaxb.DataRecord[Any]]
}
case class XFacet(annotation: Option[xmlschema.XAnnotation] = None,
id: Option[String] = None,
value: String,
fixed: Boolean,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XFacetable
trait XNoFixedFacetable extends XFacetable with XFacetsOption {
val annotation: Option[xmlschema.XAnnotation]
val id: Option[String]
val value: String
val fixed: Boolean
val attributes: Map[String, scalaxb.DataRecord[Any]]
}
case class XNoFixedFacet(annotation: Option[xmlschema.XAnnotation] = None,
id: Option[String] = None,
value: String,
fixed: Boolean,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XNoFixedFacetable
trait XNumFacetable extends XFacetable with XFacetsOption {
val annotation: Option[xmlschema.XAnnotation]
val id: Option[String]
val value: String
val fixed: Boolean
val attributes: Map[String, scalaxb.DataRecord[Any]]
}
case class XNumFacet(annotation: Option[xmlschema.XAnnotation] = None,
id: Option[String] = None,
value: String,
fixed: Boolean,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XNumFacetable
case class XTotalDigits(annotation: Option[xmlschema.XAnnotation] = None,
id: Option[String] = None,
value: String,
fixed: Boolean,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XNumFacetable with XFacetsOption
trait XValue
object XValue {
def fromString(value: String, scope: scala.xml.NamespaceBinding): XValue = value match {
case "preserve" => XPreserve
case "replace" => XReplace
case "collapse" => XCollapse
}
}
case object XPreserve extends XValue { override def toString = "preserve" }
case object XReplace extends XValue { override def toString = "replace" }
case object XCollapse extends XValue { override def toString = "collapse" }
case class XWhiteSpace(annotation: Option[xmlschema.XAnnotation] = None,
id: Option[String] = None,
value: String,
fixed: Boolean,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XFacetable with XFacetsOption
case class XPattern(annotation: Option[xmlschema.XAnnotation] = None,
id: Option[String] = None,
value: String,
fixed: Boolean,
attributes: Map[String, scalaxb.DataRecord[Any]]) extends XNoFixedFacetable with XFacetsOption
case class XSimpleRestrictionModelSequence(simpleType: Option[xmlschema.XLocalSimpleType] = None,
xFacetsOption2: Seq[scalaxb.DataRecord[xmlschema.XFacetsOption]] = Nil)
trait XFacetsOption
trait XSimpleDerivationOption
trait XIdentityConstraintOption
case class XAllModelSequence(annotation: Option[xmlschema.XAnnotation] = None,
xallmodeloption1: Seq[scalaxb.DataRecord[xmlschema.XNarrowMaxMin]] = Nil)
trait XAllModelOption1
trait XComplexTypeModelOption
case class XComplexTypeModelSequence1(xTypeDefParticleOption1: Option[scalaxb.DataRecord[xmlschema.XTypeDefParticleOption]] = None,
xAttrDeclsSequence2: xmlschema.XAttrDeclsSequence) extends XComplexTypeModelOption
case class XAttrDeclsSequence(xattrdeclsoption1: Seq[scalaxb.DataRecord[xmlschema.XAttrDeclsOption1]] = Nil,
anyAttribute: Option[xmlschema.XWildcardable] = None)
trait XAttrDeclsOption1
trait XParticleOption
trait XNestedParticleOption
trait XTypeDefParticleOption
trait XRedefinableOption
trait XSchemaTopOption
/**
for element, group and attributeGroup,
which both define and reference
*/
case class XDefRef(name: Option[String] = None,
ref: Option[javax.xml.namespace.QName] = None)
/**
for all particles
*/
case class XOccurs(minOccurs: BigInt,
maxOccurs: String)
| eed3si9n/scalaxb | cli/src_managed/xmlschema/xmlschema.scala | Scala | mit | 33,431 |
package c1.w6
object numberOfQueens extends App {
def queens(n: Int): Set[List[Int]] = {
def placeQueen(row: Int): Set[List[Int]] = {
if (row == 0) Set(Nil)
else {
for {
candidate <- placeQueen(row - 1) // create all rows N
col <- 0 until n // for each column
if isSafe(row, col, candidate)
} yield col :: candidate
}
}
placeQueen(n)
}
def isSafe(cr: Int, col: Int, queens: List[Int]): Boolean = {
val row = queens.length
val queensWithRow = (row - 1 to 0 by -1) zip queens
val b = queensWithRow forall {
case (r, c) => col != c && math.abs(col - c) != row - r
}
println(s"row=$cr col=$col isSafe=$b queens=$queens")
b
}
def show(queens: List[Int]): String = {
val lines = for (col <- queens.reverse)
yield Vector.fill(queens.length)("*").updated(col, "X ").mkString
"\\n" + (lines mkString "\\n")
}
queens(4) foreach (q => println(show(q)))
} | lwo/lwo.github.io | src/main/scala/c1/w6/numberOfQueens.scala | Scala | gpl-3.0 | 982 |
package com.github.j5ik2o.reactive.redis.pool
import java.util.UUID
import akka.NotUsed
import akka.stream.scaladsl.Flow
import com.github.j5ik2o.reactive.redis.command.CommandRequestBase
import com.github.j5ik2o.reactive.redis.{ PeerConfig, RedisConnection }
import io.github.andrebeat.pool.Lease
import monix.eval.Task
import monix.execution.Scheduler
private[redis] final case class ScalaPoolConnection(underlying: Lease[RedisConnection]) extends RedisConnection {
private val underlyingCon = underlying.get()
override def id: UUID = underlyingCon.id
override def peerConfig: PeerConfig = underlyingCon.peerConfig
override def shutdown(): Unit = underlyingCon.shutdown()
override def toFlow[C <: CommandRequestBase](parallelism: Int)(
implicit scheduler: Scheduler
): Flow[C, C#Response, NotUsed] = underlyingCon.toFlow(parallelism)
override def send[C <: CommandRequestBase](cmd: C): Task[cmd.Response] = underlyingCon.send(cmd)
}
| j5ik2o/reactive-redis | pool-scala/src/main/scala/com/github/j5ik2o/reactive/redis/pool/ScalaPoolConnection.scala | Scala | mit | 965 |
package org.stanoq.crawler
import java.util.Collections
import java.util.concurrent.{ConcurrentHashMap, TimeUnit}
import akka.actor.ActorSystem
import akka.event.Logging
import org.jsoup.nodes.Document
import org.jsoup.{Connection, HttpStatusException, Jsoup}
import org.stanoq.crawler.model.{ConfigProperties, Page}
import scala.collection.JavaConverters._
import scala.util.Try
class Crawler(config:ConfigProperties){
val logger = Logging(ActorSystem(), getClass)
val visitedPages = createSet[String]
private val domain:String = config.getDomain
val root: Page = new Page(domain, domain,0,0,0,createSet[Page])
def process(url: String="") = {
logger.info("Processing " + config.url + url)
crawl(config.url + url, 1, root)
root.statusCode = 200
this
}
private def crawl(url: String, depth: Int, prev: Page) {
if (visitedPages.contains(url.substring(0,url.length-1)) || !visitedPages.add(url)) return
val (page,links) = getPage(url,prev)
if(page.statusCode!=200) return
prev.addChild(page)
logger.info(url + " "+links.size)
if(depth > config.depthLimit) return
links.par.foreach(link => crawl(link, depth + 1, page))
}
private def createSet[T] = Collections.newSetFromMap(new ConcurrentHashMap[T, java.lang.Boolean]).asScala
private def getPage(url: String, prev:Page): (Page,List[String]) = {
def parseLinksToVisit(doc: Document): List[String] ={
def predicate(l: String) = (!(l.trim.length<7 || l.startsWith("mailto"))) && l.contains(domain)
doc.select("a").iterator().asScala.toStream.map(_.attr("abs:href")).filter(predicate).toList
}
def getDocument(con: Connection):(Page,List[String]) = {
val time = System.nanoTime()
val res = con.execute()
val pageSize = res.bodyAsBytes().size
val timeToLoad = TimeUnit.MILLISECONDS.convert(System.nanoTime()-time, TimeUnit.NANOSECONDS)
val doc = res.parse()
val links = parseLinksToVisit(doc)
(new Page(url, doc.title(), 200,timeToLoad,pageSize,createSet[Page]),parseLinksToVisit(doc))
}
(Try(Jsoup.connect(url).userAgent("Mozilla/5.0").timeout(30 * 1000)).map(getDocument).recover {
case e: HttpStatusException => logger.error(e.getStatusCode + " :: on " + url);
val errPage = new Page(url,e.getMessage,e.getStatusCode,4040,9000,createSet[Page])
prev.addChild(errPage); (errPage, List())
case e: Exception => logger.error(e.getMessage + " :: on " + url);
val errPage = new Page(url,e.getMessage,500,5000,9000,createSet[Page])
prev.addChild(errPage);(errPage, List())
}).get
}
}
| olka/stanoq | src/main/scala/org/stanoq/crawler/Crawler.scala | Scala | mit | 2,620 |
package lib
sealed abstract class SessionAttribute(val key: String)
object SessionAttribute {
case object LoginUser extends SessionAttribute("login_user")
case object Ref extends SessionAttribute("ref")
case object UploadPolicy extends SessionAttribute("upload_policy")
case object ResetPasswordToken extends SessionAttribute("reset_password_token")
} | atware/sharedocs | src/main/scala/lib/SessionAttribute.scala | Scala | mit | 361 |
package org.jetbrains.plugins.scala.failed.annotator
import com.intellij.lang.annotation.AnnotationHolder
import com.intellij.psi.{PsiErrorElement, PsiReference}
import org.jetbrains.plugins.scala.annotator.AnnotatorHolderMock
import org.jetbrains.plugins.scala.annotator.quickfix.ReportHighlightingErrorQuickFix
import org.jetbrains.plugins.scala.base.ScalaLightCodeInsightFixtureTestAdapter
import org.jetbrains.plugins.scala.lang.psi.api.base.types.{ScTypeElement, ScTypeElementExt}
import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScBlockExpr, ScExpression}
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScPatternDefinition
import org.jetbrains.plugins.scala.lang.psi.types.ScTypeExt
import org.jetbrains.plugins.scala.lang.psi.types.api.ScTypePresentation
import org.jetbrains.plugins.scala.{PerfCycleTests, ScalaBundle}
import org.junit.Assert._
import org.junit.experimental.categories.Category
/**
* User: Dmitry.Naydanov
* Date: 23.03.16.
*
*
*/
@Category(Array(classOf[PerfCycleTests]))
class OverloadingTest extends ScalaLightCodeInsightFixtureTestAdapter {
//TODO this class contains a fair amount of a copy-paste code, however refactoring isn't practical here as the class is to be removed soon
import org.jetbrains.plugins.scala.extensions._
protected def collectMessages(fileText: String) = {
myFixture.configureByText("dummy.scala", fileText)
val file = myFixture.getFile
val mock = new AnnotatorHolderMock(file)
assertEquals(Nil, file.depthFirst().filterByType[PsiErrorElement].map(_.getText).toList)
assertEquals(Nil, file.depthFirst().filterByType[PsiReference]
.filter(_.resolve == null).map(_.getElement.getText).toList)
file.depthFirst().foreach {
case it: ScPatternDefinition => annotate(it, mock, typeAware = true)
case _ =>
}
mock.annotations
}
protected def annotate(element: ScPatternDefinition, holder: AnnotationHolder, typeAware: Boolean): Unit = {
for (expr <- element.expr; element <- element.children.findByType[ScTypeElement])
checkConformance(expr, element, holder)
}
private def checkConformance(expression: ScExpression, typeElement: ScTypeElement, holder: AnnotationHolder) {
expression.getTypeAfterImplicitConversion().tr.foreach {actual =>
val expected = typeElement.calcType
if (!actual.conforms(expected)) {
val expr = expression match {
case b: ScBlockExpr => b.getRBrace.map(_.getPsi).getOrElse(b)
case _ => expression
}
val (actualText, expText) = ScTypePresentation.different(actual, expected)
val annotation = holder.createErrorAnnotation(expr,
ScalaBundle.message("type.mismatch.found.required", actualText, expText))
annotation.registerFix(ReportHighlightingErrorQuickFix)
}
}
}
def testSCL9908(): Unit = assert(
collectMessages(
"""
|class Test {
| def foo(s: String, args: Any*) = println("foo(s, args)")
| def foo(x: Any) = println("foo(x)")
|
| def func(args: Array[String]) = {
| foo("Hello") // red code; 'foo(s, args)' with scalac
| }
|}
""".stripMargin).isEmpty
)
def testSCL7442(): Unit = assert(
collectMessages(
"""
|class Test {
| def set(value: Any) : Unit = {
| val (a, b, c, d) = value.asInstanceOf[(Int, Int, Int, Int)]
| set(a, b, c, d)
| }
| def set(aValue: Int, bValue: Int, cValue: Int, dValue: Int) = {
| //...
| }
| (set _).tupled((1, 2, 3, 4))
|}
""".stripMargin).isEmpty
)
def testSCL10158(): Unit = assert(
collectMessages(
"""
|class Test {
| val lock = new AnyRef
| class Test {
| def run: Unit = this.synchronized(println("sync"))
| def synchronized[T](exec: => T): Unit = lock.synchronized(exec)
| }
|}
""".stripMargin).isEmpty
)
def testSCL10183(): Unit = assert(
collectMessages(
"""
|class MyClass {
| def foo[T](): T = ???
|
| val value = foo[MyTrait]
| value.get _
|}
|
|trait MyTrait {
| def get() = ???
| def get[A](arg: Any => Any) = ???
|}
""".stripMargin).isEmpty
)
def testSCL10295(): Unit = assert(
collectMessages(
"""
|import java.lang.reflect.Field
|import scala.collection.mutable
|
|class Test {
|
| def instanceFieldsOf(v: AnyRef): Array[Field] = ???
| def instanceFieldsOf(v: AnyRef,
| cache: mutable.Map[Class[_], Array[Field]],
| newFieldsHandler: Field => Unit = v => ())
| : Array[Field] = ???
|
| def valueAndInstanceFieldTuplesOf(v: AnyRef,
| cache: mutable.Map[Class[_], Array[Field]],
| newFieldsHandler: Field => Unit = v => ())
| : Stream[(AnyRef, Field)] = {
| val fields: Array[Field] = this.instanceFieldsOf(v, cache, newFieldsHandler)
| fields.toStream.map { f => (f.get(v), f) }
| }
|}
""".stripMargin).isEmpty
)
}
| ilinum/intellij-scala | test/org/jetbrains/plugins/scala/failed/annotator/OverloadingTest.scala | Scala | apache-2.0 | 5,378 |
package net.mm.composer
import com.twitter.finatra.{Controller, ResponseBuilder}
import com.twitter.logging.Logger
import com.twitter.util.Future
import net.mm.composer.properties.Property
import net.mm.composer.relations.RelationJsonComposer
trait CompositionResponseBuilder {
self: Controller =>
protected def relationComposer: RelationJsonComposer
implicit class CompositionSupport(render: ResponseBuilder) {
def composedJson[T](obj: Any)(implicit properties: Seq[Property], m: Manifest[T]): Future[ResponseBuilder] = composedJson(obj, m.runtimeClass)
def composedJson(obj: Any, clazz: Class[_])(implicit properties: Seq[Property]): Future[ResponseBuilder] = {
relationComposer.compose(obj, clazz)(properties)
.map(render.json)
.onFailure(Logger.get.warning(_, "Relation composition failed"))
}
}
}
| mosche/finatra-composition-proxy | composition-proxy/src/main/scala/net/mm/composer/CompositionResponseBuilder.scala | Scala | mit | 851 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.descriptors
import org.apache.flink.table.descriptors.StreamTableDescriptorValidator.{UPDATE_MODE, UPDATE_MODE_VALUE_APPEND, UPDATE_MODE_VALUE_RETRACT, UPDATE_MODE_VALUE_UPSERT}
import org.apache.flink.util.Preconditions
import org.junit.Assert.assertEquals
import org.junit.Test
import scala.collection.JavaConverters._
abstract class DescriptorTestBase {
/**
* Returns a set of valid descriptors.
* This method is implemented in both Scala and Java.
*/
def descriptors(): java.util.List[Descriptor]
/**
* Returns a set of properties for each valid descriptor.
* This code is implemented in both Scala and Java.
*/
def properties(): java.util.List[java.util.Map[String, String]]
/**
* Returns a validator that can validate all valid descriptors.
*/
def validator(): DescriptorValidator
@Test
def testValidation(): Unit = {
val d = descriptors().asScala
val p = properties().asScala
Preconditions.checkArgument(d.length == p.length)
d.zip(p).foreach { case (desc, props) =>
verifyProperties(desc, props.asScala.toMap)
}
}
def verifyProperties(descriptor: Descriptor, expected: Map[String, String]): Unit = {
val normProps = new DescriptorProperties
descriptor.addProperties(normProps)
// test produced properties
assertEquals(expected, normProps.asMap.asScala.toMap)
// test validation logic
validator().validate(normProps)
}
def addPropertyAndVerify(
descriptor: Descriptor,
property: String,
invalidValue: String): Unit = {
val properties = new DescriptorProperties
descriptor.addProperties(properties)
properties.unsafePut(property, invalidValue)
validator().validate(properties)
}
def removePropertyAndVerify(descriptor: Descriptor, removeProperty: String): Unit = {
val properties = new DescriptorProperties
descriptor.addProperties(properties)
properties.unsafeRemove(removeProperty)
validator().validate(properties)
}
}
class TestTableDescriptor(connector: ConnectorDescriptor)
extends TableDescriptor
with SchematicDescriptor[TestTableDescriptor]
with StreamableDescriptor[TestTableDescriptor] {
private var formatDescriptor: Option[FormatDescriptor] = None
private var schemaDescriptor: Option[Schema] = None
private var updateMode: Option[String] = None
override private[flink] def addProperties(properties: DescriptorProperties): Unit = {
connector.addProperties(properties)
formatDescriptor.foreach(_.addProperties(properties))
schemaDescriptor.foreach(_.addProperties(properties))
updateMode.foreach(mode => properties.putString(UPDATE_MODE, mode))
}
override def withFormat(format: FormatDescriptor): TestTableDescriptor = {
this.formatDescriptor = Some(format)
this
}
override def withSchema(schema: Schema): TestTableDescriptor = {
this.schemaDescriptor = Some(schema)
this
}
override def inAppendMode(): TestTableDescriptor = {
updateMode = Some(UPDATE_MODE_VALUE_APPEND)
this
}
override def inRetractMode(): TestTableDescriptor = {
updateMode = Some(UPDATE_MODE_VALUE_RETRACT)
this
}
override def inUpsertMode(): TestTableDescriptor = {
updateMode = Some(UPDATE_MODE_VALUE_UPSERT)
this
}
}
| zhangminglei/flink | flink-libraries/flink-table/src/test/scala/org/apache/flink/table/descriptors/DescriptorTestBase.scala | Scala | apache-2.0 | 4,122 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.airframe.legacy
import wvlet.airspec.AirSpec
import wvlet.log.LogSupport
import wvlet.airframe._
/**
*/
class AssistedInjectionTest extends AirSpec {
import AssistedInjectionTest._
test("support assisted injection") {
newSilentDesign
.bind[MyService].toInstance("hello")
.withSession { session =>
val p = session.build[NamedServiceProvider]
val a1 = p.provider("A1", session)
val a2 = p.provider("A2", session)
a1.name shouldBe "A1"
a2.name shouldBe "A2"
a1.service shouldBe "hello"
a2.service shouldBe "hello"
val a3 = assistedInjector("A3", session)
a3.name shouldBe "A3"
a3.service shouldBe "hello"
}
}
}
object AssistedInjectionTest extends LogSupport {
type MyService = String
trait NamedService {
val name: String
val service = bind[MyService]
}
trait NamedServiceProvider {
val provider = (givenName: String, ss: Session) =>
new NamedService with DISupport {
override def session: Session = ss
val name: String = givenName
}
}
def assistedInjector(serviceName: String, ss: Session): NamedService =
new NamedService with DISupport {
override def session: Session = ss
val name: String = serviceName
}
}
| wvlet/airframe | airframe-di/src/test/scala-2/wvlet/airframe/legacy/AssistedInjectionTest.scala | Scala | apache-2.0 | 1,904 |
package io.hydrosphere.mist.jobs.resolvers
import java.nio.file.{Files, Paths}
import akka.http.scaladsl.model.{HttpRequest, HttpResponse}
import akka.stream.scaladsl.Flow
import org.apache.commons.codec.digest.DigestUtils
import org.scalatest.{FunSuite, Matchers}
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future, Promise}
class MavenArtifactResolverTest extends FunSuite with Matchers {
test("maven artifact") {
val remote = MavenArtifact("io.hydrosphere", "mist", "0.0.1")
remote.jarName shouldBe "mist-0.0.1.jar"
remote.jarPath shouldBe "io/hydrosphere/mist/0.0.1/mist-0.0.1.jar"
}
test("construct resolver from path") {
val path = "mvn://http://localhost:8081/artifactory/releases :: io.hydrosphere % mist_2.10 % 0.0.1"
val resolver = MavenArtifactResolver.fromPath(path)
resolver.repoUrl shouldBe "http://localhost:8081/artifactory/releases"
resolver.artifact shouldBe MavenArtifact("io.hydrosphere", "mist_2.10", "0.0.1")
}
test("resolver over http") {
import akka.http.scaladsl.model.StatusCodes._
// maven-like repository mock
val routes = Flow[HttpRequest].map { request =>
val uri = request.uri.toString()
if (uri.endsWith(".jar")) {
HttpResponse(status = OK, entity = "JAR CONTENT")
} else if (uri.endsWith(".sha1")) {
val data = DigestUtils.sha1Hex("JAR CONTENT")
HttpResponse(status = OK, entity = data)
} else {
HttpResponse(status = NotFound, entity = s"Not found ${request.uri}")
}
}
val future = MockHttpServer.onServer(routes, binding => {
val port = binding.localAddress.getPort
val url = s"http://localhost:$port/artifactory/libs-release-local"
val artifact = MavenArtifact("mist_examples", "mist_examples_2.10", "0.10.0")
val resolver = MavenArtifactResolver(url, artifact, "target")
resolver.resolve()
})
val file = Await.result(future, Duration.Inf)
file.exists() shouldBe true
val content = Files.readAllBytes(Paths.get(file.getAbsolutePath))
new String(content) shouldBe "JAR CONTENT"
}
object MockHttpServer {
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.stream.ActorMaterializer
import akka.util.Timeout
import scala.concurrent.duration._
def onServer[A](
routes: Flow[HttpRequest, HttpResponse, Unit],
f: (Http.ServerBinding) => A): Future[A] = {
implicit val system = ActorSystem("mock-http-cli")
implicit val materializer = ActorMaterializer()
implicit val executionContext = system.dispatcher
implicit val timeout = Timeout(1.seconds)
val binding = Http().bindAndHandle(routes, "localhost", 0)
val close = Promise[Http.ServerBinding]
close.future
.flatMap(binding => binding.unbind())
.onComplete(_ => {
system.shutdown()
system.awaitTermination()
})
val result = binding.flatMap(binding => {
try{
Future.successful(f(binding))
} catch {
case e: Throwable =>
Future.failed(e)
} finally {
close.success(binding)
}
})
result
}
}
}
| KineticCookie/mist | src/test/scala/io/hydrosphere/mist/jobs/resolvers/MavenArtifactResolverTest.scala | Scala | apache-2.0 | 3,236 |
package com.mesosphere.http
import cats.instances.list._
import cats.instances.try_._
import cats.syntax.traverse._
import scala.util.Success
import scala.util.Try
object CompoundMediaTypeParser {
def parse(s: String): Try[CompoundMediaType] = {
s.split(',').toList.filterNot(_.trim.isEmpty) match {
case Nil => Success(new CompoundMediaType(Set.empty))
case mts =>
mts.map(MediaType.parse)
.sequence
.map { mediaTypes =>
CompoundMediaType(backfillParams(mediaTypes)._2.toSet)
}
}
}
/**
* This method implements the logic necessary to "propagate parameters" to all MediaTypes specified as part of
* an Accept header. Since we have a concrete type that represents Media Type we need to "back fill" the parameters
* specified on media types that come later in the string to those earlier in the string where those earlier
* media types do not have parameters defined.
*
*
* For example, given the Accept header value of `* application/xml;q=0.8,application/json,application/x-protobuf;q=0.9`
* We will end up with three media types:
* * application/xml;q=0.8
* * application/json;q=0.9
* * application/x-protobuf;q=0.9
*
* @see https://tools.ietf.org/html/rfc7231#section-5.3.2 for full details on the spec for the Accept header
* and content negotiation.
*/
private[this] def backfillParams(
mts: List[MediaType]
): (Map[String, String], List[MediaType]) = {
mts match {
case Nil => (Map.empty, Nil)
case x :: xs =>
// walk down the list of media types
backfillParams(xs) match {
case (m, l) =>
if (x.parameters.isEmpty) {
// if the current media type doesn't have parameters set on it set it to be the parameters returned
// from the next media type in the list
m -> (x.copy(parameters = m) :: l)
} else {
// if the current media type does have parameters set on it, leave them intact and pass
// up for previous media type in the list
x.parameters -> (x :: l)
}
}
}
}
}
| dcos/cosmos | cosmos-common/src/main/scala/com/mesosphere/http/CompoundMediaTypeParser.scala | Scala | apache-2.0 | 2,196 |
/*
Copyright 2010 the original author or authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package net.gumbix.hl7dsl.DSL
import org.hl7.rim.{RimObjectFactory, Supply}
import org.hl7.types._
/**
* Wrapper Class for RIM Class "Supply"
* @author Ahmet Gül ([email protected])
*/
class SupplyDSL(supply: Supply) extends ActDSL(supply) {
def this() = {
this (RimObjectFactory.getInstance.createRimObject("Supply").asInstanceOf[Supply])
}
/**
* @return PQ
*/
def quantity: PQ = supply.getQuantity
def quantity_=(v: PQ) {
supply.setQuantity(v)
}
/**
* @return IVL[TS]
*/
def expectedUseTime: IVL[TS] = supply.getExpectedUseTime
def expectedUseTime_=(v: IVL[TS]) {
supply.setExpectedUseTime(v)
}
} | markusgumbel/dshl7 | core/src/main/scala/net/gumbix/hl7dsl/DSL/SupplyDSL.scala | Scala | apache-2.0 | 1,263 |
package decoupledwithfactory
trait MessageProvider {
def getMessage: String
} | BBK-PiJ-2015-67/sdp-portfolio | exercises/week02/di-scala/src/decoupledwithfactory/MessageProvider.scala | Scala | unlicense | 80 |
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datamountaineer.streamreactor.connect.elastic6
import java.nio.ByteBuffer
import com.datamountaineer.streamreactor.connect.json.SimpleJsonConverter
import com.fasterxml.jackson.databind.JsonNode
import com.landoop.connect.sql.StructSql._
import com.landoop.json.sql.JacksonJson
import com.landoop.json.sql.JsonSql._
import com.landoop.sql.Field
import com.typesafe.scalalogging.StrictLogging
import org.apache.kafka.connect.data.{Schema, Struct}
import scala.util.{Failure, Success, Try}
private object TransformAndExtractPK extends StrictLogging {
lazy val simpleJsonConverter = new SimpleJsonConverter()
def apply(fields: Seq[Field],
ignoredFields: Seq[Field],
primaryKeysPaths: Seq[Vector[String]],
schema: Schema,
value: Any,
withStructure: Boolean): (JsonNode, Seq[Any]) = {
def raiseException(msg: String, t: Throwable) = throw new IllegalArgumentException(msg, t)
if (value == null) {
if (schema == null || !schema.isOptional) {
raiseException("Null value is not allowed.", null)
}
else null
} else {
if (schema != null) {
schema.`type`() match {
case Schema.Type.BYTES =>
//we expected to be json
val array = value match {
case a: Array[Byte] => a
case b: ByteBuffer => b.array()
case other => raiseException("Invalid payload:$other for schema Schema.BYTES.", null)
}
Try(JacksonJson.mapper.readTree(array)) match {
case Failure(e) => raiseException("Invalid json.", e)
case Success(json) =>
Try(json.sql(fields, !withStructure)) match {
case Failure(e) => raiseException(s"A KCQL exception occurred. ${e.getMessage}", e)
case Success(jn) =>
(jn, primaryKeysPaths.map(PrimaryKeyExtractor.extract(json, _)))
}
}
case Schema.Type.STRING =>
//we expected to be json
Try(JacksonJson.asJson(value.asInstanceOf[String])) match {
case Failure(e) => raiseException("Invalid json", e)
case Success(json) =>
Try(json.sql(fields, !withStructure)) match {
case Success(jn) => (jn, primaryKeysPaths.map(PrimaryKeyExtractor.extract(json, _)))
case Failure(e) => raiseException(s"A KCQL exception occurred.${e.getMessage}", e)
}
}
case Schema.Type.STRUCT =>
val struct = value.asInstanceOf[Struct]
Try(struct.sql(fields, !withStructure)) match {
case Success(s) =>
(simpleJsonConverter.fromConnectData(s.schema(), s), primaryKeysPaths.map(PrimaryKeyExtractor.extract(struct, _)))
case Failure(e) => raiseException(s"A KCQL error occurred.${e.getMessage}", e)
}
case other => raiseException("Can't transform Schema type:$other.", null)
}
} else {
//we can handle java.util.Map (this is what JsonConverter can spit out)
value match {
case m: java.util.Map[_, _] =>
val map = m.asInstanceOf[java.util.Map[String, Any]]
val jsonNode: JsonNode = JacksonJson.mapper.valueToTree(map)
Try(jsonNode.sql(fields, !withStructure)) match {
case Success(j) => (j, primaryKeysPaths.map(PrimaryKeyExtractor.extract(jsonNode, _)))
case Failure(e) => raiseException(s"A KCQL exception occurred.${e.getMessage}", e)
}
case s: String =>
Try(JacksonJson.asJson(value.asInstanceOf[String])) match {
case Failure(e) => raiseException("Invalid json.", e)
case Success(json) =>
Try(json.sql(fields, !withStructure)) match {
case Success(jn) => (jn, primaryKeysPaths.map(PrimaryKeyExtractor.extract(json, _)))
case Failure(e) => raiseException(s"A KCQL exception occurred.${e.getMessage}", e)
}
}
case b: Array[Byte] =>
Try(JacksonJson.mapper.readTree(b)) match {
case Failure(e) => raiseException("Invalid json.", e)
case Success(json) =>
Try(json.sql(fields, !withStructure)) match {
case Failure(e) => raiseException(s"A KCQL exception occurred. ${e.getMessage}", e)
case Success(jn) => (jn, primaryKeysPaths.map(PrimaryKeyExtractor.extract(json, _)))
}
}
//we take it as String
case other => raiseException(s"Value:$other is not handled!", null)
}
}
}
}
}
| datamountaineer/stream-reactor | kafka-connect-elastic6/src/main/scala/com/datamountaineer/streamreactor/connect/elastic6/TransformAndExtractPK.scala | Scala | apache-2.0 | 5,351 |
package sativum
import peapod.Task
abstract class Dag {
lazy val name: String = this.getClass.getName
protected val sativum: Sativum
var endpoints: List[Task[_]] = Nil
protected val waitTime = 60000
def endpoint (t: Task[_]) = {
endpoints = endpoints :+ t
}
/**
* Returns back if all peas in this Dag are ready, this would only be false in the case of Sensor Tasks
*
*/
def ready(): Boolean = {
endpoints.map(sativum(_)).forall{
case s: SativumPea[_] => s.ready()
case _ => true
}
}
def run() {
endpoints.map(sativum(_))
while(! ready()) {
Thread.sleep(waitTime)
}
endpoints.flatMap(_.parents).foreach(_.delete())
endpoints.par.map(sativum(_).get())
}
def view(): String = {
peapod.Util.mindfulmachinesDotLink(sativum.dotFormatDiagram())
}
}
| teachingmachines/sativum | src/main/scala/sativum/Dag.scala | Scala | mit | 840 |
package breeze.linalg
import breeze.benchmark.{MyRunner, BreezeBenchmark}
import breeze.linalg.operators.DenseVectorSupportMethods
import spire.syntax.cfor._
/**
* Created by dlwh on 8/14/15.
*/
class DenseAxpyBenchmark extends BreezeBenchmark {
assert(usingNatives)
val dv, dv2 = DenseVector.rand(5)
def timeSmallDVAxpy(reps: Int) = {
var sum = 0.0
cforRange(0 until reps) { rep =>
axpy(0.042, dv, dv2)
}
dv2
}
def timeSmallDVInlineRange(reps: Int) = {
cforRange(0 until reps) { rep =>
val ad = dv.data
val bd = dv2.data
cforRange(0 until dv.length) { i =>
bd(i) += 0.042 * ad(i)
}
}
dv2
}
def timeSmallDVScaleAddInline(reps: Int) = {
cforRange(0 until reps) { rep =>
dv(0) += dv2(0) * 0.042
dv(1) += dv2(1) * 0.042
dv(2) += dv2(2) * 0.042
dv(3) += dv2(3) * 0.042
dv(4) += dv2(4) * 0.042
}
dv
}
val largeDV, largeDV2 = DenseVector.rand(400)
val largeDM, largeDM2 = DenseMatrix.rand(20, 20)
def timeLargeDMAddInPlace(reps: Int) = {
cforRange(0 until reps) { rep =>
largeDM += largeDM2
}
}
def timeLargeDVAddInPlace(reps: Int) = {
cforRange(0 until reps) { rep =>
largeDV += largeDV2
}
}
}
object DenseAxpyBenchmark extends MyRunner(classOf[DenseAxpyBenchmark])
object DenseAxpyX {
def main(args: Array[String]):Unit = {
(new DenseAxpyBenchmark).timeSmallDVScaleAddInline(44400000)
}
}
| wstcpyt/breeze | benchmark/src/main/scala/breeze/linalg/DenseAxpyBenchmark.scala | Scala | apache-2.0 | 1,470 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.network
import java.io.IOException
import java.net._
import java.nio.channels._
import java.nio.channels.{Selector => NSelector}
import java.util.concurrent._
import java.util.concurrent.atomic._
import com.yammer.metrics.core.Gauge
import kafka.cluster.{BrokerEndPoint, EndPoint}
import kafka.common.KafkaException
import kafka.metrics.KafkaMetricsGroup
import kafka.security.CredentialProvider
import kafka.server.KafkaConfig
import kafka.utils._
import org.apache.kafka.common.Reconfigurable
import org.apache.kafka.common.memory.{MemoryPool, SimpleMemoryPool}
import org.apache.kafka.common.metrics._
import org.apache.kafka.common.metrics.stats.Meter
import org.apache.kafka.common.network.{ChannelBuilder, ChannelBuilders, KafkaChannel, ListenerName, Selectable, Send, Selector => KSelector}
import org.apache.kafka.common.requests.{RequestContext, RequestHeader}
import org.apache.kafka.common.security.auth.SecurityProtocol
import org.apache.kafka.common.utils.{KafkaThread, LogContext, Time}
import org.slf4j.event.Level
import scala.collection._
import JavaConverters._
import scala.collection.mutable.{ArrayBuffer, Buffer}
import scala.util.control.ControlThrowable
/**
* An NIO socket server. The threading model is
* 1 Acceptor thread that handles new connections
* Acceptor has N Processor threads that each have their own selector and read requests from sockets
* M Handler threads that handle requests and produce responses back to the processor threads for writing.
*/
class SocketServer(val config: KafkaConfig, val metrics: Metrics, val time: Time, val credentialProvider: CredentialProvider) extends Logging with KafkaMetricsGroup {
private val maxQueuedRequests = config.queuedMaxRequests
private val maxConnectionsPerIp = config.maxConnectionsPerIp
private val maxConnectionsPerIpOverrides = config.maxConnectionsPerIpOverrides
private val logContext = new LogContext(s"[SocketServer brokerId=${config.brokerId}] ")
this.logIdent = logContext.logPrefix
private val memoryPoolSensor = metrics.sensor("MemoryPoolUtilization")
private val memoryPoolDepletedPercentMetricName = metrics.metricName("MemoryPoolAvgDepletedPercent", "socket-server-metrics")
private val memoryPoolDepletedTimeMetricName = metrics.metricName("MemoryPoolDepletedTimeTotal", "socket-server-metrics")
memoryPoolSensor.add(new Meter(TimeUnit.MILLISECONDS, memoryPoolDepletedPercentMetricName, memoryPoolDepletedTimeMetricName))
private val memoryPool = if (config.queuedMaxBytes > 0) new SimpleMemoryPool(config.queuedMaxBytes, config.socketRequestMaxBytes, false, memoryPoolSensor) else MemoryPool.NONE
val requestChannel = new RequestChannel(maxQueuedRequests)
private val processors = new ConcurrentHashMap[Int, Processor]()
private var nextProcessorId = 0
private[network] val acceptors = new ConcurrentHashMap[EndPoint, Acceptor]()
private var connectionQuotas: ConnectionQuotas = _
private var stoppedProcessingRequests = false
/**
* Start the socket server. Acceptors for all the listeners are started. Processors
* are started if `startupProcessors` is true. If not, processors are only started when
* [[kafka.network.SocketServer#startProcessors()]] is invoked. Delayed starting of processors
* is used to delay processing client connections until server is fully initialized, e.g.
* to ensure that all credentials have been loaded before authentications are performed.
* Acceptors are always started during `startup` so that the bound port is known when this
* method completes even when ephemeral ports are used. Incoming connections on this server
* are processed when processors start up and invoke [[org.apache.kafka.common.network.Selector#poll]].
*
* @param startupProcessors Flag indicating whether `Processor`s must be started.
*/
def startup(startupProcessors: Boolean = true) {
this.synchronized {
connectionQuotas = new ConnectionQuotas(maxConnectionsPerIp, maxConnectionsPerIpOverrides)
createAcceptorAndProcessors(config.numNetworkThreads, config.listeners)
if (startupProcessors) {
startProcessors()
}
}
newGauge("NetworkProcessorAvgIdlePercent",
new Gauge[Double] {
def value = SocketServer.this.synchronized {
val ioWaitRatioMetricNames = processors.values.asScala.map { p =>
metrics.metricName("io-wait-ratio", "socket-server-metrics", p.metricTags)
}
ioWaitRatioMetricNames.map { metricName =>
Option(metrics.metric(metricName)).fold(0.0)(_.value)
}.sum / processors.size
}
}
)
newGauge("MemoryPoolAvailable",
new Gauge[Long] {
def value = memoryPool.availableMemory()
}
)
newGauge("MemoryPoolUsed",
new Gauge[Long] {
def value = memoryPool.size() - memoryPool.availableMemory()
}
)
info("Started " + acceptors.size + " acceptor threads")
}
/**
* Starts processors of all the acceptors of this server if they have not already been started.
* This method is used for delayed starting of processors if [[kafka.network.SocketServer#startup]]
* was invoked with `startupProcessors=false`.
*/
def startProcessors(): Unit = synchronized {
acceptors.values.asScala.foreach { _.startProcessors() }
info(s"Started processors for ${acceptors.size} acceptors")
}
private def endpoints = config.listeners.map(l => l.listenerName -> l).toMap
private def createAcceptorAndProcessors(processorsPerListener: Int,
endpoints: Seq[EndPoint]): Unit = synchronized {
val sendBufferSize = config.socketSendBufferBytes
val recvBufferSize = config.socketReceiveBufferBytes
val brokerId = config.brokerId
endpoints.foreach { endpoint =>
val listenerName = endpoint.listenerName
val securityProtocol = endpoint.securityProtocol
val acceptor = new Acceptor(endpoint, sendBufferSize, recvBufferSize, brokerId, connectionQuotas)
KafkaThread.nonDaemon(s"kafka-socket-acceptor-$listenerName-$securityProtocol-${endpoint.port}", acceptor).start()
acceptor.awaitStartup()
acceptors.put(endpoint, acceptor)
addProcessors(acceptor, endpoint, processorsPerListener)
}
}
private def addProcessors(acceptor: Acceptor, endpoint: EndPoint, newProcessorsPerListener: Int): Unit = synchronized {
val listenerName = endpoint.listenerName
val securityProtocol = endpoint.securityProtocol
val listenerProcessors = new ArrayBuffer[Processor]()
for (_ <- 0 until newProcessorsPerListener) {
val processor = newProcessor(nextProcessorId, connectionQuotas, listenerName, securityProtocol, memoryPool)
listenerProcessors += processor
requestChannel.addProcessor(processor)
nextProcessorId += 1
}
listenerProcessors.foreach(p => processors.put(p.id, p))
acceptor.addProcessors(listenerProcessors)
}
/**
* Stop processing requests and new connections.
*/
def stopProcessingRequests() = {
info("Stopping socket server request processors")
this.synchronized {
acceptors.asScala.values.foreach(_.shutdown())
processors.asScala.values.foreach(_.shutdown())
requestChannel.clear()
stoppedProcessingRequests = true
}
info("Stopped socket server request processors")
}
def resizeThreadPool(oldNumNetworkThreads: Int, newNumNetworkThreads: Int): Unit = synchronized {
info(s"Resizing network thread pool size for each listener from $oldNumNetworkThreads to $newNumNetworkThreads")
if (newNumNetworkThreads > oldNumNetworkThreads) {
acceptors.asScala.foreach { case (endpoint, acceptor) =>
addProcessors(acceptor, endpoint, newNumNetworkThreads - oldNumNetworkThreads)
}
} else if (newNumNetworkThreads < oldNumNetworkThreads)
acceptors.asScala.values.foreach(_.removeProcessors(oldNumNetworkThreads - newNumNetworkThreads, requestChannel))
}
/**
* Shutdown the socket server. If still processing requests, shutdown
* acceptors and processors first.
*/
def shutdown() = {
info("Shutting down socket server")
this.synchronized {
if (!stoppedProcessingRequests)
stopProcessingRequests()
requestChannel.shutdown()
}
info("Shutdown completed")
}
def boundPort(listenerName: ListenerName): Int = {
try {
acceptors.get(endpoints(listenerName)).serverChannel.socket.getLocalPort
} catch {
case e: Exception =>
throw new KafkaException("Tried to check server's port before server was started or checked for port of non-existing protocol", e)
}
}
def addListeners(listenersAdded: Seq[EndPoint]): Unit = synchronized {
info(s"Adding listeners for endpoints $listenersAdded")
createAcceptorAndProcessors(config.numNetworkThreads, listenersAdded)
startProcessors()
}
def removeListeners(listenersRemoved: Seq[EndPoint]): Unit = synchronized {
info(s"Removing listeners for endpoints $listenersRemoved")
listenersRemoved.foreach { endpoint =>
acceptors.asScala.remove(endpoint).foreach(_.shutdown())
}
}
/* `protected` for test usage */
protected[network] def newProcessor(id: Int, connectionQuotas: ConnectionQuotas, listenerName: ListenerName,
securityProtocol: SecurityProtocol, memoryPool: MemoryPool): Processor = {
new Processor(id,
time,
config.socketRequestMaxBytes,
requestChannel,
connectionQuotas,
config.connectionsMaxIdleMs,
listenerName,
securityProtocol,
config,
metrics,
credentialProvider,
memoryPool,
logContext
)
}
/* For test usage */
private[network] def connectionCount(address: InetAddress): Int =
Option(connectionQuotas).fold(0)(_.get(address))
/* For test usage */
private[network] def processor(index: Int): Processor = processors.get(index)
}
/**
* A base class with some helper variables and methods
*/
private[kafka] abstract class AbstractServerThread(connectionQuotas: ConnectionQuotas) extends Runnable with Logging {
private val startupLatch = new CountDownLatch(1)
// `shutdown()` is invoked before `startupComplete` and `shutdownComplete` if an exception is thrown in the constructor
// (e.g. if the address is already in use). We want `shutdown` to proceed in such cases, so we first assign an open
// latch and then replace it in `startupComplete()`.
@volatile private var shutdownLatch = new CountDownLatch(0)
private val alive = new AtomicBoolean(true)
def wakeup(): Unit
/**
* Initiates a graceful shutdown by signaling to stop and waiting for the shutdown to complete
*/
def shutdown(): Unit = {
if (alive.getAndSet(false))
wakeup()
shutdownLatch.await()
}
/**
* Wait for the thread to completely start up
*/
def awaitStartup(): Unit = startupLatch.await
/**
* Record that the thread startup is complete
*/
protected def startupComplete(): Unit = {
// Replace the open latch with a closed one
shutdownLatch = new CountDownLatch(1)
startupLatch.countDown()
}
/**
* Record that the thread shutdown is complete
*/
protected def shutdownComplete(): Unit = shutdownLatch.countDown()
/**
* Is the server still running?
*/
protected def isRunning: Boolean = alive.get
/**
* Close `channel` and decrement the connection count.
*/
def close(channel: SocketChannel): Unit = {
if (channel != null) {
debug("Closing connection from " + channel.socket.getRemoteSocketAddress())
connectionQuotas.dec(channel.socket.getInetAddress)
CoreUtils.swallow(channel.socket().close(), this, Level.ERROR)
CoreUtils.swallow(channel.close(), this, Level.ERROR)
}
}
}
/**
* Thread that accepts and configures new connections. There is one of these per endpoint.
*/
private[kafka] class Acceptor(val endPoint: EndPoint,
val sendBufferSize: Int,
val recvBufferSize: Int,
brokerId: Int,
connectionQuotas: ConnectionQuotas) extends AbstractServerThread(connectionQuotas) with KafkaMetricsGroup {
private val nioSelector = NSelector.open()
val serverChannel = openServerSocket(endPoint.host, endPoint.port)
private val processors = new ArrayBuffer[Processor]()
private val processorsStarted = new AtomicBoolean
private[network] def addProcessors(newProcessors: Buffer[Processor]): Unit = synchronized {
processors ++= newProcessors
if (processorsStarted.get)
startProcessors(newProcessors)
}
private[network] def startProcessors(): Unit = synchronized {
if (!processorsStarted.getAndSet(true)) {
startProcessors(processors)
}
}
private def startProcessors(processors: Seq[Processor]): Unit = synchronized {
processors.foreach { processor =>
KafkaThread.nonDaemon(s"kafka-network-thread-$brokerId-${endPoint.listenerName}-${endPoint.securityProtocol}-${processor.id}",
processor).start()
}
}
private[network] def removeProcessors(removeCount: Int, requestChannel: RequestChannel): Unit = synchronized {
// Shutdown `removeCount` processors. Remove them from the processor list first so that no more
// connections are assigned. Shutdown the removed processors, closing the selector and its connections.
// The processors are then removed from `requestChannel` and any pending responses to these processors are dropped.
val toRemove = processors.takeRight(removeCount)
processors.remove(processors.size - removeCount, removeCount)
toRemove.foreach(_.shutdown())
toRemove.foreach(processor => requestChannel.removeProcessor(processor.id))
}
override def shutdown(): Unit = {
super.shutdown()
synchronized {
processors.foreach(_.shutdown())
}
}
/**
* Accept loop that checks for new connection attempts
*/
def run() {
serverChannel.register(nioSelector, SelectionKey.OP_ACCEPT)
startupComplete()
try {
var currentProcessor = 0
while (isRunning) {
try {
val ready = nioSelector.select(500)
if (ready > 0) {
val keys = nioSelector.selectedKeys()
val iter = keys.iterator()
while (iter.hasNext && isRunning) {
try {
val key = iter.next
iter.remove()
if (key.isAcceptable) {
val processor = synchronized {
currentProcessor = currentProcessor % processors.size
processors(currentProcessor)
}
accept(key, processor)
} else
throw new IllegalStateException("Unrecognized key state for acceptor thread.")
// round robin to the next processor thread, mod(numProcessors) will be done later
currentProcessor = currentProcessor + 1
} catch {
case e: Throwable => error("Error while accepting connection", e)
}
}
}
}
catch {
// We catch all the throwables to prevent the acceptor thread from exiting on exceptions due
// to a select operation on a specific channel or a bad request. We don't want
// the broker to stop responding to requests from other clients in these scenarios.
case e: ControlThrowable => throw e
case e: Throwable => error("Error occurred", e)
}
}
} finally {
debug("Closing server socket and selector.")
CoreUtils.swallow(serverChannel.close(), this, Level.ERROR)
CoreUtils.swallow(nioSelector.close(), this, Level.ERROR)
shutdownComplete()
}
}
/*
* Create a server socket to listen for connections on.
*/
private def openServerSocket(host: String, port: Int): ServerSocketChannel = {
val socketAddress =
if(host == null || host.trim.isEmpty)
new InetSocketAddress(port)
else
new InetSocketAddress(host, port)
val serverChannel = ServerSocketChannel.open()
serverChannel.configureBlocking(false)
if (recvBufferSize != Selectable.USE_DEFAULT_BUFFER_SIZE)
serverChannel.socket().setReceiveBufferSize(recvBufferSize)
try {
serverChannel.socket.bind(socketAddress)
info("Awaiting socket connections on %s:%d.".format(socketAddress.getHostString, serverChannel.socket.getLocalPort))
} catch {
case e: SocketException =>
throw new KafkaException("Socket server failed to bind to %s:%d: %s.".format(socketAddress.getHostString, port, e.getMessage), e)
}
serverChannel
}
/*
* Accept a new connection
*/
def accept(key: SelectionKey, processor: Processor) {
val serverSocketChannel = key.channel().asInstanceOf[ServerSocketChannel]
val socketChannel = serverSocketChannel.accept()
try {
connectionQuotas.inc(socketChannel.socket().getInetAddress)
socketChannel.configureBlocking(false)
socketChannel.socket().setTcpNoDelay(true)
socketChannel.socket().setKeepAlive(true)
if (sendBufferSize != Selectable.USE_DEFAULT_BUFFER_SIZE)
socketChannel.socket().setSendBufferSize(sendBufferSize)
debug("Accepted connection from %s on %s and assigned it to processor %d, sendBufferSize [actual|requested]: [%d|%d] recvBufferSize [actual|requested]: [%d|%d]"
.format(socketChannel.socket.getRemoteSocketAddress, socketChannel.socket.getLocalSocketAddress, processor.id,
socketChannel.socket.getSendBufferSize, sendBufferSize,
socketChannel.socket.getReceiveBufferSize, recvBufferSize))
processor.accept(socketChannel)
} catch {
case e: TooManyConnectionsException =>
info("Rejected connection from %s, address already has the configured maximum of %d connections.".format(e.ip, e.count))
close(socketChannel)
}
}
/**
* Wakeup the thread for selection.
*/
@Override
def wakeup = nioSelector.wakeup()
}
private[kafka] object Processor {
val IdlePercentMetricName = "IdlePercent"
val NetworkProcessorMetricTag = "networkProcessor"
val ListenerMetricTag = "listener"
}
/**
* Thread that processes all requests from a single connection. There are N of these running in parallel
* each of which has its own selector
*/
private[kafka] class Processor(val id: Int,
time: Time,
maxRequestSize: Int,
requestChannel: RequestChannel,
connectionQuotas: ConnectionQuotas,
connectionsMaxIdleMs: Long,
listenerName: ListenerName,
securityProtocol: SecurityProtocol,
config: KafkaConfig,
metrics: Metrics,
credentialProvider: CredentialProvider,
memoryPool: MemoryPool,
logContext: LogContext) extends AbstractServerThread(connectionQuotas) with KafkaMetricsGroup {
import Processor._
private object ConnectionId {
def fromString(s: String): Option[ConnectionId] = s.split("-") match {
case Array(local, remote, index) => BrokerEndPoint.parseHostPort(local).flatMap { case (localHost, localPort) =>
BrokerEndPoint.parseHostPort(remote).map { case (remoteHost, remotePort) =>
ConnectionId(localHost, localPort, remoteHost, remotePort, Integer.parseInt(index))
}
}
case _ => None
}
}
private[network] case class ConnectionId(localHost: String, localPort: Int, remoteHost: String, remotePort: Int, index: Int) {
override def toString: String = s"$localHost:$localPort-$remoteHost:$remotePort-$index"
}
private val newConnections = new ConcurrentLinkedQueue[SocketChannel]()
private val inflightResponses = mutable.Map[String, RequestChannel.Response]()
private val responseQueue = new LinkedBlockingDeque[RequestChannel.Response]()
private[kafka] val metricTags = mutable.LinkedHashMap(
ListenerMetricTag -> listenerName.value,
NetworkProcessorMetricTag -> id.toString
).asJava
newGauge(IdlePercentMetricName,
new Gauge[Double] {
def value = {
Option(metrics.metric(metrics.metricName("io-wait-ratio", "socket-server-metrics", metricTags))).fold(0.0)(_.value)
}
},
// for compatibility, only add a networkProcessor tag to the Yammer Metrics alias (the equivalent Selector metric
// also includes the listener name)
Map(NetworkProcessorMetricTag -> id.toString)
)
private val selector = createSelector(
ChannelBuilders.serverChannelBuilder(listenerName,
listenerName == config.interBrokerListenerName,
securityProtocol,
config,
credentialProvider.credentialCache,
credentialProvider.tokenCache))
// Visible to override for testing
protected[network] def createSelector(channelBuilder: ChannelBuilder): KSelector = {
channelBuilder match {
case reconfigurable: Reconfigurable => config.addReconfigurable(reconfigurable)
case _ =>
}
new KSelector(
maxRequestSize,
connectionsMaxIdleMs,
metrics,
time,
"socket-server",
metricTags,
false,
true,
channelBuilder,
memoryPool,
logContext)
}
// Connection ids have the format `localAddr:localPort-remoteAddr:remotePort-index`. The index is a
// non-negative incrementing value that ensures that even if remotePort is reused after a connection is
// closed, connection ids are not reused while requests from the closed connection are being processed.
private var nextConnectionIndex = 0
override def run() {
startupComplete()
try {
while (isRunning) {
try {
// setup any new connections that have been queued up
configureNewConnections()
// register any new responses for writing
processNewResponses()
poll()
processCompletedReceives()
processCompletedSends()
processDisconnected()
} catch {
// We catch all the throwables here to prevent the processor thread from exiting. We do this because
// letting a processor exit might cause a bigger impact on the broker. This behavior might need to be
// reviewed if we see an exception that needs the entire broker to stop. Usually the exceptions thrown would
// be either associated with a specific socket channel or a bad request. These exceptions are caught and
// processed by the individual methods above which close the failing channel and continue processing other
// channels. So this catch block should only ever see ControlThrowables.
case e: Throwable => processException("Processor got uncaught exception.", e)
}
}
} finally {
debug("Closing selector - processor " + id)
CoreUtils.swallow(closeAll(), this, Level.ERROR)
shutdownComplete()
}
}
private def processException(errorMessage: String, throwable: Throwable) {
throwable match {
case e: ControlThrowable => throw e
case e => error(errorMessage, e)
}
}
private def processChannelException(channelId: String, errorMessage: String, throwable: Throwable) {
if (openOrClosingChannel(channelId).isDefined) {
error(s"Closing socket for $channelId because of error", throwable)
close(channelId)
}
processException(errorMessage, throwable)
}
private def processNewResponses() {
var curr: RequestChannel.Response = null
while ({curr = dequeueResponse(); curr != null}) {
val channelId = curr.request.context.connectionId
try {
curr.responseAction match {
case RequestChannel.NoOpAction =>
// There is no response to send to the client, we need to read more pipelined requests
// that are sitting in the server's socket buffer
updateRequestMetrics(curr)
trace("Socket server received empty response to send, registering for read: " + curr)
openOrClosingChannel(channelId).foreach(c => selector.unmute(c.id))
case RequestChannel.SendAction =>
val responseSend = curr.responseSend.getOrElse(
throw new IllegalStateException(s"responseSend must be defined for SendAction, response: $curr"))
sendResponse(curr, responseSend)
case RequestChannel.CloseConnectionAction =>
updateRequestMetrics(curr)
trace("Closing socket connection actively according to the response code.")
close(channelId)
}
} catch {
case e: Throwable =>
processChannelException(channelId, s"Exception while processing response for $channelId", e)
}
}
}
/* `protected` for test usage */
protected[network] def sendResponse(response: RequestChannel.Response, responseSend: Send) {
val connectionId = response.request.context.connectionId
trace(s"Socket server received response to send to $connectionId, registering for write and sending data: $response")
// `channel` can be None if the connection was closed remotely or if selector closed it for being idle for too long
if (channel(connectionId).isEmpty) {
warn(s"Attempting to send response via channel for which there is no open connection, connection id $connectionId")
response.request.updateRequestMetrics(0L, response)
}
// Invoke send for closingChannel as well so that the send is failed and the channel closed properly and
// removed from the Selector after discarding any pending staged receives.
// `openOrClosingChannel` can be None if the selector closed the connection because it was idle for too long
if (openOrClosingChannel(connectionId).isDefined) {
selector.send(responseSend)
inflightResponses += (connectionId -> response)
}
}
private def poll() {
try selector.poll(300)
catch {
case e @ (_: IllegalStateException | _: IOException) =>
// The exception is not re-thrown and any completed sends/receives/connections/disconnections
// from this poll will be processed.
error(s"Processor $id poll failed", e)
}
}
private def processCompletedReceives() {
selector.completedReceives.asScala.foreach { receive =>
try {
openOrClosingChannel(receive.source) match {
case Some(channel) =>
val header = RequestHeader.parse(receive.payload)
val context = new RequestContext(header, receive.source, channel.socketAddress,
channel.principal, listenerName, securityProtocol)
val req = new RequestChannel.Request(processor = id, context = context,
startTimeNanos = time.nanoseconds, memoryPool, receive.payload, requestChannel.metrics)
requestChannel.sendRequest(req)
selector.mute(receive.source)
case None =>
// This should never happen since completed receives are processed immediately after `poll()`
throw new IllegalStateException(s"Channel ${receive.source} removed from selector before processing completed receive")
}
} catch {
// note that even though we got an exception, we can assume that receive.source is valid.
// Issues with constructing a valid receive object were handled earlier
case e: Throwable =>
processChannelException(receive.source, s"Exception while processing request from ${receive.source}", e)
}
}
}
private def processCompletedSends() {
selector.completedSends.asScala.foreach { send =>
try {
val resp = inflightResponses.remove(send.destination).getOrElse {
throw new IllegalStateException(s"Send for ${send.destination} completed, but not in `inflightResponses`")
}
updateRequestMetrics(resp)
selector.unmute(send.destination)
} catch {
case e: Throwable => processChannelException(send.destination,
s"Exception while processing completed send to ${send.destination}", e)
}
}
}
private def updateRequestMetrics(response: RequestChannel.Response) {
val request = response.request
val networkThreadTimeNanos = openOrClosingChannel(request.context.connectionId).fold(0L)(_.getAndResetNetworkThreadTimeNanos())
request.updateRequestMetrics(networkThreadTimeNanos, response)
}
private def processDisconnected() {
selector.disconnected.keySet.asScala.foreach { connectionId =>
try {
val remoteHost = ConnectionId.fromString(connectionId).getOrElse {
throw new IllegalStateException(s"connectionId has unexpected format: $connectionId")
}.remoteHost
inflightResponses.remove(connectionId).foreach(updateRequestMetrics)
// the channel has been closed by the selector but the quotas still need to be updated
connectionQuotas.dec(InetAddress.getByName(remoteHost))
} catch {
case e: Throwable => processException(s"Exception while processing disconnection of $connectionId", e)
}
}
}
/**
* Close the connection identified by `connectionId` and decrement the connection count.
* The channel will be immediately removed from the selector's `channels` or `closingChannels`
* and no further disconnect notifications will be sent for this channel by the selector.
* If responses are pending for the channel, they are dropped and metrics is updated.
* If the channel has already been removed from selector, no action is taken.
*/
private def close(connectionId: String): Unit = {
openOrClosingChannel(connectionId).foreach { channel =>
debug(s"Closing selector connection $connectionId")
val address = channel.socketAddress
if (address != null)
connectionQuotas.dec(address)
selector.close(connectionId)
inflightResponses.remove(connectionId).foreach(response => updateRequestMetrics(response))
}
}
/**
* Queue up a new connection for reading
*/
def accept(socketChannel: SocketChannel) {
newConnections.add(socketChannel)
wakeup()
}
/**
* Register any new connections that have been queued up
*/
private def configureNewConnections() {
while (!newConnections.isEmpty) {
val channel = newConnections.poll()
try {
debug(s"Processor $id listening to new connection from ${channel.socket.getRemoteSocketAddress}")
selector.register(connectionId(channel.socket), channel)
} catch {
// We explicitly catch all exceptions and close the socket to avoid a socket leak.
case e: Throwable =>
val remoteAddress = channel.socket.getRemoteSocketAddress
// need to close the channel here to avoid a socket leak.
close(channel)
processException(s"Processor $id closed connection from $remoteAddress", e)
}
}
}
/**
* Close the selector and all open connections
*/
private def closeAll() {
selector.channels.asScala.foreach { channel =>
close(channel.id)
}
selector.close()
removeMetric(IdlePercentMetricName, Map(NetworkProcessorMetricTag -> id.toString))
}
// 'protected` to allow override for testing
protected[network] def connectionId(socket: Socket): String = {
val localHost = socket.getLocalAddress.getHostAddress
val localPort = socket.getLocalPort
val remoteHost = socket.getInetAddress.getHostAddress
val remotePort = socket.getPort
val connId = ConnectionId(localHost, localPort, remoteHost, remotePort, nextConnectionIndex).toString
nextConnectionIndex = if (nextConnectionIndex == Int.MaxValue) 0 else nextConnectionIndex + 1
connId
}
private[network] def enqueueResponse(response: RequestChannel.Response): Unit = {
responseQueue.put(response)
wakeup()
}
private def dequeueResponse(): RequestChannel.Response = {
val response = responseQueue.poll()
if (response != null)
response.request.responseDequeueTimeNanos = Time.SYSTEM.nanoseconds
response
}
private[network] def responseQueueSize = responseQueue.size
// Only for testing
private[network] def inflightResponseCount: Int = inflightResponses.size
// Visible for testing
// Only methods that are safe to call on a disconnected channel should be invoked on 'openOrClosingChannel'.
private[network] def openOrClosingChannel(connectionId: String): Option[KafkaChannel] =
Option(selector.channel(connectionId)).orElse(Option(selector.closingChannel(connectionId)))
/* For test usage */
private[network] def channel(connectionId: String): Option[KafkaChannel] =
Option(selector.channel(connectionId))
// Visible for testing
private[network] def numStagedReceives(connectionId: String): Int =
openOrClosingChannel(connectionId).map(c => selector.numStagedReceives(c)).getOrElse(0)
/**
* Wakeup the thread for selection.
*/
override def wakeup() = selector.wakeup()
override def shutdown(): Unit = {
super.shutdown()
removeMetric("IdlePercent", Map("networkProcessor" -> id.toString))
}
}
class ConnectionQuotas(val defaultMax: Int, overrideQuotas: Map[String, Int]) {
private val overrides = overrideQuotas.map { case (host, count) => (InetAddress.getByName(host), count) }
private val counts = mutable.Map[InetAddress, Int]()
def inc(address: InetAddress) {
counts.synchronized {
val count = counts.getOrElseUpdate(address, 0)
counts.put(address, count + 1)
val max = overrides.getOrElse(address, defaultMax)
if (count >= max)
throw new TooManyConnectionsException(address, max)
}
}
def dec(address: InetAddress) {
counts.synchronized {
val count = counts.getOrElse(address,
throw new IllegalArgumentException(s"Attempted to decrease connection count for address with no connections, address: $address"))
if (count == 1)
counts.remove(address)
else
counts.put(address, count - 1)
}
}
def get(address: InetAddress): Int = counts.synchronized {
counts.getOrElse(address, 0)
}
}
class TooManyConnectionsException(val ip: InetAddress, val count: Int) extends KafkaException("Too many connections from %s (maximum = %d)".format(ip, count))
| sebadiaz/kafka | core/src/main/scala/kafka/network/SocketServer.scala | Scala | apache-2.0 | 35,445 |
package leo.modules.output.logger
import leo.modules.output.Output
/**
* Simple implementation of the [[Logging]] trait
* with message logging to System.err (FD 1).
*
* The verbosity (logging level threshold) is set
* by the `v` flag from the command-line arguments.
* @see [[leo.Configuration]]
*/
object Out extends Logging {
override protected val loggerName = "Console"
override protected val useParentLoggers = false
import java.util.logging.{ConsoleHandler, LogRecord, Formatter}
addLogHandler(
new ConsoleHandler {
setLevel(defaultLogLevel)
setFormatter(new Formatter {
def format(record: LogRecord) = {
val lines = record.getMessage.linesWithSeparators
if (lines.hasNext) {
val msg = lines.next() + lines.map(str => "% " + str).mkString("")
s"% [${record.getLevel.getLocalizedName}] \\t $msg \\n"
} else {
""
}
}
})
override def publish(record: LogRecord): Unit = {
super.publish(record)
flush()
}
}
)
def output(msg: Output): Unit = { println(msg.apply()) }
def output(msg: String): Unit = { println(msg) }
def comment(msg: String): Unit = {println(msg.linesWithSeparators.map(str => "% "+str).mkString(""))}
}
| lex-lex/Leo-III | src/main/scala/leo/modules/output/logger/Out.scala | Scala | bsd-3-clause | 1,293 |
package dx.core.languages.wdl
import wdlTools.types.{TypedAbstractSyntax => TAT, WdlTypes}
case class Bundle(primaryCallable: Option[TAT.Callable],
allCallables: Map[String, TAT.Callable],
typeAliases: Map[String, WdlTypes.T])
| dnanexus-rnd/dxWDL | src/main/scala/dx/core/languages/wdl/Bundle.scala | Scala | apache-2.0 | 265 |
package com.openquant.quoter.quotedb
import org.specs2.specification.Scope
import java.io.File
trait TempDBScope extends Scope {
val tmpPath = File.createTempFile("quoteDB_tests_", ".db")
tmpPath.deleteOnExit()
val quoteDB = QuoteDB(s"sqlite://$tmpPath")
} | openquant/quoter | src/test/scala/com/larroy/quoter/quotedb/TempDBScope.scala | Scala | lgpl-3.0 | 265 |
/*
* Accio is a platform to launch computer science experiments.
* Copyright (C) 2016-2018 Vincent Primault <[email protected]>
*
* Accio is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Accio is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Accio. If not, see <http://www.gnu.org/licenses/>.
*/
package fr.cnrs.liris.util.reflect
import scala.reflect.runtime.{universe => ru}
// TODO: `tpe` should ultimately be private.
final class ScalaType(val tpe: ru.Type) {
/**
* Return the associated JVM runtime class.
*/
lazy val runtimeClass: Class[_] = ReflectUtils.classForType(tpe)
def isA[T: ru.TypeTag]: Boolean = tpe =:= ru.typeOf[T]
def isLike[T: ru.TypeTag]: Boolean = tpe <:< ru.typeOf[T]
def args: Seq[ScalaType] = tpe.typeArgs.map(new ScalaType(_))
def baseType[T: ru.TypeTag]: ScalaType = new ScalaType(tpe.baseType(ru.typeOf[T].typeSymbol))
/**
* Return whether this field is an optional field.
*/
def isOption: Boolean = tpe <:< ScalaType.OPTION
def isUnit: Boolean = tpe =:= ScalaType.UNIT
override def toString: String = tpe.toString
override def hashCode: Int = tpe.hashCode()
override def equals(obj: scala.Any): Boolean =
obj match {
case scalaType: ScalaType => scalaType.tpe =:= tpe
case _ => false
}
}
object ScalaType {
private[reflect] val OPTION = ru.typeOf[Option[_]]
private[reflect] val UNIT = ru.typeOf[Unit]
}
| privamov/accio | accio/java/fr/cnrs/liris/util/reflect/ScalaType.scala | Scala | gpl-3.0 | 1,876 |
/*******************************************************************************
Copyright (c) 2013, S-Core.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.analysis.typing.models
import kr.ac.kaist.jsaf.analysis.typing.domain.PropValue
sealed abstract class AbsProperty
case class AbsBuiltinFunc(id: String, length: Double) extends AbsProperty
case class AbsBuiltinFuncAftercall(id: String, length: Double) extends AbsProperty
case class AbsBuiltinFuncCallback(id: String, length: Double) extends AbsProperty
case class AbsInternalFunc(id: String) extends AbsProperty
case class AbsConstValue(v: PropValue) extends AbsProperty
| daejunpark/jsaf | src/kr/ac/kaist/jsaf/analysis/typing/models/AbsProperty.scala | Scala | bsd-3-clause | 830 |
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package testpkg
import java.io.{ InputStream, OutputStream, PrintStream }
import java.util.concurrent.{ LinkedBlockingQueue, TimeUnit, TimeoutException }
import sbt.internal.client.NetworkClient
import sbt.internal.util.Util
import scala.collection.mutable
object ClientTest extends AbstractServerTest {
override val testDirectory: String = "client"
object NullInputStream extends InputStream {
override def read(): Int = {
try this.synchronized(this.wait)
catch { case _: InterruptedException => }
-1
}
}
val NullPrintStream = new PrintStream(_ => {}, false)
class CachingPrintStream extends { val cos = new CachingOutputStream }
with PrintStream(cos, true) {
def lines = cos.lines
}
class CachingOutputStream extends OutputStream {
private val byteBuffer = new mutable.ArrayBuffer[Byte]
override def write(i: Int) = Util.ignoreResult(byteBuffer += i.toByte)
def lines = new String(byteBuffer.toArray, "UTF-8").linesIterator.toSeq
}
class FixedInputStream(keys: Char*) extends InputStream {
var i = 0
override def read(): Int = {
if (i < keys.length) {
val res = keys(i).toInt
i += 1
res
} else -1
}
}
private[this] def background[R](f: => R): R = {
val result = new LinkedBlockingQueue[R]
val thread = new Thread("client-bg-thread") {
setDaemon(true)
start()
override def run(): Unit = result.put(f)
}
result.poll(1, TimeUnit.MINUTES) match {
case null =>
thread.interrupt()
thread.join(5000)
throw new TimeoutException
case r => r
}
}
private def client(args: String*): Int = {
background(
NetworkClient.client(
testPath.toFile,
args.toArray,
NullInputStream,
NullPrintStream,
NullPrintStream,
false
)
)
}
// This ensures that the completion command will send a tab that triggers
// sbt to call definedTestNames or discoveredMainClasses if there hasn't
// been a necessary compilation
def tabs = new FixedInputStream('\\t', '\\t')
private def complete(completionString: String): Seq[String] = {
val cps = new CachingPrintStream
background(
NetworkClient.complete(
testPath.toFile,
Array(s"--completions=sbtn $completionString"),
false,
tabs,
cps
)
)
cps.lines
}
test("exit success") { c =>
assert(client("willSucceed") == 0)
}
test("exit failure") { _ =>
assert(client("willFail") == 1)
}
test("two commands") { _ =>
assert(client("compile;willSucceed") == 0)
}
test("two commands with failing second") { _ =>
assert(client("compile;willFail") == 1)
}
test("two commands with leading failure") { _ =>
assert(client("willFail;willSucceed") == 1)
}
test("three commands") { _ =>
assert(client("compile;clean;willSucceed") == 0)
}
test("three commands with middle failure") { _ =>
assert(client("compile;willFail;willSucceed") == 1)
}
test("compi completions") { _ =>
val expected = Vector(
"compile",
"compile:",
"compileAnalysisFile",
"compileAnalysisFilename",
"compileAnalysisTargetRoot",
"compileEarly",
"compileIncSetup",
"compileIncremental",
"compileJava",
"compileOrder",
"compileOutputs",
"compileProgress",
"compileScalaBackend",
"compileSplit",
"compilerCache",
"compilers",
)
assert(complete("compi").toVector == expected)
}
test("testOnly completions") { _ =>
val testOnlyExpected = Vector(
"testOnly",
"testOnly/",
"testOnly::",
"testOnly;",
)
assert(complete("testOnly") == testOnlyExpected)
val testOnlyOptionsExpected = Vector("--", ";", "test.pkg.FooSpec")
assert(complete("testOnly ") == testOnlyOptionsExpected)
}
test("quote with semi") { _ =>
assert(complete("\\"compile; fooB") == Vector("compile; fooBar"))
}
}
| sbt/sbt | server-test/src/test/scala/testpkg/ClientTest.scala | Scala | apache-2.0 | 4,149 |
1
case class Values(ds: Double*)
object MatchValues {
def main(args: Array[String]) {
val vals = Values(1.5, -2.3, 4.5)
println(vals match {
case Values(ds@_*) => /*start*/(0.0 /: ds)(_ + _)/*end*/ // wrong errors here
})
}
}
//Double | ilinum/intellij-scala | testdata/typeInference/bugs5/SCL3123.scala | Scala | apache-2.0 | 259 |
case class i1() extends AnyRef
| lampepfl/dotty | tests/pos/i5001.scala | Scala | apache-2.0 | 31 |
/**
* Copyright (c) 2012 Petr Kozelek <[email protected]>
*
* The full copyright and license information is presented
* in the file LICENSE that was distributed with this source code.
*/
package mql.model.semantic
trait SqlExpression extends SqlConvertible | footcha/MQL | src/main/scala/mql/model/semantic/SqlExpression.scala | Scala | bsd-3-clause | 267 |
package mesosphere.marathon
import java.net.InetSocketAddress
import java.util
import com.google.inject.Module
import com.twitter.common.quantity.{ Amount, Time }
import com.twitter.common.zookeeper.ZooKeeperClient
import mesosphere.chaos.App
import mesosphere.chaos.http.{ HttpModule, HttpService }
import mesosphere.chaos.metrics.MetricsModule
import mesosphere.marathon.api.MarathonRestModule
import mesosphere.marathon.core.CoreGuiceModule
import mesosphere.marathon.event.EventModule
import mesosphere.marathon.event.http.HttpEventModule
import mesosphere.marathon.metrics.{ MetricsReporterModule, MetricsReporterService }
import scala.concurrent.{ Future, Await }
import org.apache.zookeeper.KeeperException
import org.slf4j.LoggerFactory
import scala.concurrent.duration._
import scala.collection.JavaConverters._
class MarathonApp extends App {
val log = LoggerFactory.getLogger(getClass.getName)
lazy val zk: ZooKeeperClient = {
require(
conf.zooKeeperSessionTimeout() < Integer.MAX_VALUE,
"ZooKeeper timeout too large!"
)
val client = new ZooKeeperLeaderElectionClient(
Amount.of(conf.zooKeeperSessionTimeout().toInt, Time.MILLISECONDS),
conf.zooKeeperHostAddresses.asJavaCollection
)
// Marathon can't do anything useful without a ZK connection
// so we wait to proceed until one is available
var connectedToZk = false
while (!connectedToZk) {
try {
log.info("Connecting to ZooKeeper...")
client.get
connectedToZk = true
}
catch {
case t: Throwable =>
log.warn("Unable to connect to ZooKeeper, retrying...")
}
}
client
}
def modules(): Seq[Module] = {
Seq(
new HttpModule(conf),
new MetricsModule,
new MetricsReporterModule(conf),
new MarathonModule(conf, conf, zk),
new MarathonRestModule,
new EventModule(conf),
new DebugModule(conf),
new CoreGuiceModule
) ++ getEventsModule
}
def getEventsModule: Option[Module] = {
conf.eventSubscriber.get.flatMap {
case "http_callback" =>
log.info("Using HttpCallbackEventSubscriber for event notification")
Some(new HttpEventModule(conf))
case _ =>
log.info("Event notification disabled.")
None
}
}
override lazy val conf = new AllConf(args)
def runDefault(): Unit = {
setConcurrentContextDefaults()
log.info(s"Starting Marathon ${BuildInfo.version} with ${args.mkString(" ")}")
run(
classOf[HttpService],
classOf[MarathonSchedulerService],
classOf[MetricsReporterService]
)
}
/**
* Make sure that we have more than one thread -- otherwise some unmarked blocking operations might cause trouble.
*
* See
* [The Global Execution
* Context](http://docs.scala-lang.org/overviews/core/futures.html#the-global-execution-context)
* in the scala documentation.
*
* Here is the relevant excerpt in case the link gets broken:
*
* # The Global Execution Context
*
* ExecutionContext.global is an ExecutionContext backed by a ForkJoinPool. It should be sufficient for most
* situations but requires some care. A ForkJoinPool manages a limited amount of threads (the maximum amount of
* thread being referred to as parallelism level). The number of concurrently blocking computations can exceed the
* parallelism level only if each blocking call is wrapped inside a blocking call (more on that below). Otherwise,
* there is a risk that the thread pool in the global execution context is starved, and no computation can proceed.
*
* By default the ExecutionContext.global sets the parallelism level of its underlying fork-join pool to the amount
* of available processors (Runtime.availableProcessors). This configuration can be overriden by setting one
* (or more) of the following VM attributes:
*
* scala.concurrent.context.minThreads - defaults to Runtime.availableProcessors
* scala.concurrent.context.numThreads - can be a number or a multiplier (N) in the form ‘xN’ ;
* defaults to Runtime.availableProcessors
* scala.concurrent.context.maxThreads - defaults to Runtime.availableProcessors
*
* The parallelism level will be set to numThreads as long as it remains within [minThreads; maxThreads].
*
* As stated above the ForkJoinPool can increase the amount of threads beyond its parallelismLevel in the
* presence of blocking computation.
*/
private[this] def setConcurrentContextDefaults(): Unit = {
def setIfNotDefined(property: String, value: String): Unit = {
if (!sys.props.contains(property)) {
sys.props += property -> value
}
}
setIfNotDefined("scala.concurrent.context.minThreads", "5")
setIfNotDefined("scala.concurrent.context.numThreads", "x2")
setIfNotDefined("scala.concurrent.context.maxThreads", "64")
}
class ZooKeeperLeaderElectionClient(sessionTimeout: Amount[Integer, Time],
zooKeeperServers: java.lang.Iterable[InetSocketAddress])
extends ZooKeeperClient(sessionTimeout, zooKeeperServers) {
override def shouldRetry(e: KeeperException): Boolean = {
log.error("Got ZooKeeper exception", e)
log.error("Committing suicide to avoid invalidating ZooKeeper state")
val f = Future {
// scalastyle:off magic.number
Runtime.getRuntime.exit(9)
// scalastyle:on
}(scala.concurrent.ExecutionContext.global)
try {
Await.result(f, 5.seconds)
}
catch {
case _: Throwable =>
log.error("Finalization failed, killing JVM.")
// scalastyle:off magic.number
Runtime.getRuntime.halt(1)
// scalastyle:on
}
false
}
}
}
object Main extends MarathonApp {
runDefault()
}
| ss75710541/marathon | src/main/scala/mesosphere/marathon/Main.scala | Scala | apache-2.0 | 5,929 |
package org.jetbrains.plugins.scala
package lang
package psi
package stubs
package elements
import com.intellij.lang.ASTNode
import com.intellij.psi.PsiElement
import com.intellij.psi.stubs.{IndexSink, StubElement, StubInputStream, StubOutputStream}
import com.intellij.util.io.StringRef._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScPackaging
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.packaging.ScPackagingImpl
import org.jetbrains.plugins.scala.lang.psi.stubs.impl.ScPackagingStubImpl
import org.jetbrains.plugins.scala.lang.psi.stubs.index.ScalaIndexKeys
import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaNamesUtil
/**
* @author ilyas
*/
class ScPackagingElementType extends ScStubElementType[ScPackagingStub, ScPackaging]("packaging") {
override def serialize(stub: ScPackagingStub, dataStream: StubOutputStream): Unit = {
dataStream.writeName(stub.parentPackageName)
dataStream.writeName(stub.packageName)
dataStream.writeBoolean(stub.isExplicit)
}
override def deserialize(dataStream: StubInputStream, parentStub: StubElement[_ <: PsiElement]): ScPackagingStub =
new ScPackagingStubImpl(parentStub.asInstanceOf[StubElement[PsiElement]], this,
parentPackageNameRef = dataStream.readName,
packageNameRef = dataStream.readName,
isExplicit = dataStream.readBoolean)
override def createStub(packaging: ScPackaging, parentStub: StubElement[_ <: PsiElement]): ScPackagingStub =
new ScPackagingStubImpl(parentStub, this,
parentPackageNameRef = fromString(packaging.parentPackageName),
packageNameRef = fromString(packaging.packageName),
isExplicit = packaging.isExplicit)
override def indexStub(stub: ScPackagingStub, sink: IndexSink): Unit = {
val prefix = stub.parentPackageName
var ownNamePart = stub.packageName
def append(postfix: String) =
ScalaNamesUtil.cleanFqn(if (prefix.length > 0) prefix + "." + postfix else postfix)
var i = 0
do {
sink.occurrence[ScPackaging, java.lang.Integer](ScalaIndexKeys.PACKAGE_FQN_KEY, append(ownNamePart).hashCode)
i = ownNamePart.lastIndexOf(".")
if (i > 0) {
ownNamePart = ownNamePart.substring(0, i)
}
} while (i > 0)
}
override def createElement(node: ASTNode): ScPackaging = new ScPackagingImpl(node)
override def createPsi(stub: ScPackagingStub): ScPackaging = new ScPackagingImpl(stub)
}
| ilinum/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/stubs/elements/ScPackagingElementType.scala | Scala | apache-2.0 | 2,424 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.