code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
package com.sksamuel.scapegoat.inspections.string import com.sksamuel.scapegoat.{ Inspection, InspectionContext, Inspector, Levels } /** @author Stephen Samuel */ class LooksLikeInterpolatedString extends Inspection { final val regex1 = "\\\\$\\\\{[a-z][.a-zA-Z0-9_]*\\\\}".r final val regex2 = "\\\\$[a-z][.a-zA-Z0-9_]*".r def inspector(context: InspectionContext): Inspector = new Inspector(context) { override def postTyperTraverser = Some apply new context.Traverser { import context.global._ override def inspect(tree: Tree): Unit = { tree match { case Literal(Constant(str: String)) => val possibles1 = regex1.findAllIn(str).toList.filterNot(_.contains("$anonfun")) val possibles2 = regex2.findAllIn(str).toList.filterNot(_.contains("$anonfun")) if ((possibles1 ++ possibles2).nonEmpty) { context.warn("Looks Like Interpolated String", tree.pos, Levels.Warning, str, LooksLikeInterpolatedString.this) } case _ => continue(tree) } } } } }
pwwpche/scalac-scapegoat-plugin
src/main/scala/com/sksamuel/scapegoat/inspections/string/LooksLikeInterpolatedString.scala
Scala
apache-2.0
1,138
/* * Copyright 2017 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package uk.gov.hmrc.apigateway.connector.impl import javax.inject.{Inject, Singleton} import play.api.libs.ws.WSClient import uk.gov.hmrc.apigateway.cache.CacheManager import uk.gov.hmrc.apigateway.connector.ServiceConnector import uk.gov.hmrc.apigateway.exception.GatewayError.{NotFound, InvalidSubscription} import uk.gov.hmrc.apigateway.model._ import uk.gov.hmrc.apigateway.play.binding.PlayBindings._ import uk.gov.hmrc.apigateway.util.HttpHeaders.X_SERVER_TOKEN import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future import scala.concurrent.Future._ @Singleton class ThirdPartyApplicationConnector @Inject() (wsClient: WSClient, cache: CacheManager) extends ServiceConnector(wsClient, cache, serviceName = "third-party-application") { def getApplicationByServerToken(serverToken: String): Future[Application] = get[Application]( key = s"$serviceName-$serverToken", urlPath = "application", headers = Seq(X_SERVER_TOKEN -> serverToken) ) def getApplicationByClientId(clientId: String): Future[Application] = get[Application]( key = s"$serviceName-$clientId", urlPath = s"application?clientId=$clientId" ) def validateSubscription(applicationId: String, apiIdentifier: ApiIdentifier): Future[Unit] = { get[ApiIdentifier]( key = s"$serviceName-$applicationId-${apiIdentifier.context}-${apiIdentifier.version}", urlPath = s"application/$applicationId/subscription/${apiIdentifier.context}/${apiIdentifier.version}" ) map { _ => ()} recoverWith { case NotFound() => failed(InvalidSubscription()) } } }
hmrc/api-gateway
app/uk/gov/hmrc/apigateway/connector/impl/ThirdPartyApplicationConnector.scala
Scala
apache-2.0
2,235
package dotty.tools package dotc package util import scala.language.unsafeNulls import printing.{Showable, Printer} import printing.Texts._ import core.Contexts.Context import Spans.{Span, NoSpan} import scala.annotation.internal.sharable /** A source position is comprised of a span and a source file */ case class SourcePosition(source: SourceFile, span: Span, outer: SourcePosition = NoSourcePosition) extends SrcPos, interfaces.SourcePosition, Showable { def sourcePos(using Context) = this /** Is `that` a source position contained in this source position ? * `outer` is not taken into account. */ def contains(that: SourcePosition): Boolean = this.source == that.source && this.span.contains(that.span) def exists: Boolean = span.exists def lineContent: String = source.lineContent(point) def point: Int = span.point def line: Int = source.offsetToLine(point) /** Extracts the lines from the underlying source file as `Array[Char]`*/ def linesSlice: Array[Char] = source.content.slice(source.startOfLine(start), source.nextLine(end)) /** The lines of the position */ def lines: Range = { val startOffset = source.offsetToLine(start) val endOffset = source.offsetToLine(end - 1) // -1 to drop a line if no chars in it form part of the position if (startOffset >= endOffset) line to line else startOffset to endOffset } def lineOffsets: List[Int] = lines.toList.map(source.lineToOffset(_)) def beforeAndAfterPoint: (List[Int], List[Int]) = lineOffsets.partition(_ <= point) def column: Int = source.column(point) def start: Int = span.start def startLine: Int = source.offsetToLine(start) def startColumn: Int = source.column(start) def startColumnPadding: String = source.startColumnPadding(start) def end: Int = span.end def endLine: Int = source.offsetToLine(end) def endColumn: Int = source.column(end) def withOuter(outer: SourcePosition): SourcePosition = SourcePosition(source, span, outer) def withSpan(range: Span) = SourcePosition(source, range, outer) def startPos: SourcePosition = withSpan(span.startPos) def endPos : SourcePosition = withSpan(span.endPos) def focus : SourcePosition = withSpan(span.focus) def toSynthetic: SourcePosition = withSpan(span.toSynthetic) def outermost: SourcePosition = if outer == null || outer == NoSourcePosition then this else outer.outermost /** Inner most position that is contained within the `outermost` position. * Most precise position that comes from the call site. */ def nonInlined: SourcePosition = { val om = outermost def rec(self: SourcePosition): SourcePosition = if om.contains(self) then self else rec(self.outer) rec(this) } override def toString: String = s"${if (source.exists) source.file.toString else "(no source)"}:$span" def toText(printer: Printer): Text = printer.toText(this) } /** A sentinel for a non-existing source position */ @sharable object NoSourcePosition extends SourcePosition(NoSource, NoSpan, null) { override def line: Int = -1 override def column: Int = -1 override def toString: String = "?" override def withOuter(outer: SourcePosition): SourcePosition = outer } /** Things that can produce a source position and a span */ trait SrcPos: def sourcePos(using ctx: Context): SourcePosition def span: Span def startPos(using ctx: Context): SourcePosition = sourcePos.startPos def endPos(using ctx: Context): SourcePosition = sourcePos.endPos def focus(using ctx: Context): SourcePosition = sourcePos.focus def line(using ctx: Context): Int = sourcePos.line
dotty-staging/dotty
compiler/src/dotty/tools/dotc/util/SourcePosition.scala
Scala
apache-2.0
3,633
package dotty.tools.dotc.printing import dotty.tools.dotc.ast.Trees._ import dotty.tools.dotc.ast.untpd.{Tree, PackageDef, Template, TypeDef} import dotty.tools.dotc.ast.{Trees, untpd} import dotty.tools.dotc.printing.Texts._ import dotty.tools.dotc.core.Contexts._ import dotty.tools.dotc.core.StdNames.nme import dotty.tools.dotc.core.Flags._ import dotty.tools.dotc.core.Symbols._ import dotty.tools.dotc.core.StdNames._ class DecompilerPrinter(_ctx: Context) extends RefinedPrinter(_ctx) { override protected def dropAnnotForModText(sym: Symbol): Boolean = super.dropAnnotForModText(sym) || sym == defn.SourceFileAnnot override protected def blockToText[T >: Untyped](block: Block[T]): Text = block match { case Block(DefDef(_, _, _, Trees.If(cond, Trees.Block(body :: Nil, _), _)) :: Nil, y) if y.symbol.name == nme.WHILE_PREFIX => keywordText("while") ~ " (" ~ toText(cond) ~ ")" ~ toText(body) case Block(DefDef(_, _, _, Trees.Block(body :: Nil, Trees.If(cond, _, _))) :: Nil, y) if y.symbol.name == nme.DO_WHILE_PREFIX => keywordText("do") ~ toText(body) ~ keywordText("while") ~ " (" ~ toText(cond) ~ ")" case Block((meth @ DefDef(nme.ANON_FUN, _, _, _)) :: Nil, _: Closure[T]) => withEnclosingDef(meth) { addParamssText("", meth.termParamss) ~ " => " ~ toText(meth.rhs) } case _ => super.blockToText(block) } override protected def packageDefText(tree: PackageDef): Text = { val stats = tree.stats.filter { case vdef: ValDef[?] => !vdef.symbol.is(Module) case _ => true } val statsText = stats match { case (pdef: PackageDef) :: Nil => toText(pdef) case _ => Fluid(toTextGlobal(stats, "\\n") :: Nil) } val bodyText = if (tree.pid.symbol.isEmptyPackage) statsText else if (currentPrecedence == TopLevelPrec) "\\n" ~ statsText else " {" ~ statsText ~ "}" (keywordStr("package ") ~ toTextPackageId(tree.pid)).provided(!tree.symbol.isEmptyPackage) ~ bodyText } override protected def templateText(tree: TypeDef, impl: Template): Text = { val decl = if (!tree.mods.is(Module)) modText(tree.mods, tree.symbol, keywordStr(if (tree.mods.is(Trait)) "trait" else "class"), isType = true) else modText(tree.mods, tree.symbol, keywordStr("object"), isType = false) decl ~~ typeText(nameIdText(tree)) ~ withEnclosingDef(tree) { toTextTemplate(impl) } ~ "" } override protected def toTextTemplate(impl: Template, ofNew: Boolean = false): Text = { def isSynthetic(parent: Tree): Boolean = { val sym = parent.symbol sym.maybeOwner == defn.ObjectClass || (sym == defn.ProductClass && impl.symbol.owner.is(Case)) || (sym == defn.SerializableClass && impl.symbol.owner.is(Case)) } val parents = impl.parents.filterNot(isSynthetic) val body = impl.body.filterNot(_.symbol.is(ParamAccessor)) // We don't print self type and constructor for objects val isObject = impl.constr.symbol.owner.is(Module) if (isObject) { val parentsText = keywordText(" extends") ~~ Text(parents.map(constrText), keywordStr(" with ")) val bodyText = " {" ~~ toTextGlobal(impl.body, "\\n") ~ "}" parentsText.provided(parents.nonEmpty) ~ bodyText } else super.toTextTemplate(untpd.cpy.Template(impl)(parents = parents, body = body), ofNew) } override protected def typeApplyText[T >: Untyped](tree: TypeApply[T]): Text = if (tree.symbol eq defn.QuotedRuntime_exprQuote) "'" else super.typeApplyText(tree) }
dotty-staging/dotty
compiler/src/dotty/tools/dotc/printing/DecompilerPrinter.scala
Scala
apache-2.0
3,553
package wow.auth.protocol.packets import wow.auth.protocol.AuthResults.AuthResult import wow.common.codecs._ import wow.auth.protocol.{AuthResults, OpCodes, ServerPacket} import scodec._ import scodec.codecs._ /** * Packet values for successful challenge. */ case class ServerLogonChallengeSuccess(serverKey: BigInt, g: Int, N: BigInt, salt: BigInt, unk3: BigInt) object ServerLogonChallengeSuccess { implicit val codec: Codec[ServerLogonChallengeSuccess] = { ("serverKey" | fixedUBigIntL(32)) :: constantE(1)(uint8L) :: ("g" | uint8L) :: constantE(32)(uint8L) :: ("N" | fixedUBigIntL(32)) :: ("salt" | fixedUBigIntL(32)) :: ("unk3" | fixedUBigIntL(16)) :: constantE(0)(uint8L) }.as[ServerLogonChallengeSuccess] } /** * Server logon challenge */ case class ServerLogonChallenge(authResult: AuthResult, success: Option[ServerLogonChallengeSuccess]) extends ServerPacket { require((authResult == AuthResults.Success) == success.nonEmpty) } object ServerLogonChallenge { implicit val codec: Codec[ServerLogonChallenge] = { constantE(OpCodes.LogonChallenge) :: constantE(0)(uint8L) :: // no errors (("authResult" | Codec[AuthResult]) >>:~ { authResult => ("success" | conditional(authResult == AuthResults.Success, Codec[ServerLogonChallengeSuccess])).hlist }) }.as[ServerLogonChallenge] }
SKNZ/SpinaciCore
wow/core/src/main/scala/wow/auth/protocol/packets/ServerLogonChallenge.scala
Scala
mit
1,418
package com.twitter.scalding import cascading.pipe.joiner._ import org.specs._ class StarJoinJob(args : Args) extends Job(args) { val in0 = Tsv("input0").read.mapTo((0,1) -> ('x0, 'a)) { input : (Int, Int) => input } val in1 = Tsv("input1").read.mapTo((0,1) -> ('x1, 'b)) { input : (Int, Int) => input } val in2 = Tsv("input2").read.mapTo((0,1) -> ('x2, 'c)) { input : (Int, Int) => input } val in3 = Tsv("input3").read.mapTo((0,1) -> ('x3, 'd)) { input : (Int, Int) => input } in0.coGroupBy('x0) { _.coGroup('x1, in1, OuterJoinMode) .coGroup('x2, in2, OuterJoinMode) .coGroup('x3, in3, OuterJoinMode) } .project('x0, 'a, 'b, 'c, 'd) .write(Tsv("output")) } class CoGroupTest extends Specification with TupleConversions { noDetailedDiffs() "A StarJoinJob" should { JobTest("com.twitter.scalding.StarJoinJob") .source(Tsv("input0"), List((0, 1), (1, 1), (2, 1), (3, 2))) .source(Tsv("input1"), List((0, 1), (2, 5), (3, 2))) .source(Tsv("input2"), List((1, 1), (2, 8))) .source(Tsv("input3"), List((0, 9), (2, 11))) .sink[(Int, Int, Int, Int, Int)](Tsv("output")) { outputBuf => "be able to work" in { val out = outputBuf.toSet val expected = Set((0,1,1,0,9), (1,1,0,1,0), (2,1,5,8,11), (3,2,2,0,0)) out must_== expected } } .run .finish } }
AoJ/scalding
src/test/scala/com/twitter/scalding/CoGroupTest.scala
Scala
apache-2.0
1,380
/** * Copyright (C) 2011 Orbeon, Inc. * * This program is free software; you can redistribute it and/or modify it under the terms of the * GNU Lesser General Public License as published by the Free Software Foundation; either version * 2.1 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU Lesser General Public License for more details. * * The full text of the license is available at http://www.gnu.org/copyleft/lesser.html */ package org.orbeon.oxf.xforms import java.util.{Collection β‡’ JCollection} import org.apache.commons.lang3.StringUtils import org.orbeon.oxf.util.CollectionUtils._ import org.orbeon.oxf.xforms.analysis.ElementAnalysis import org.orbeon.oxf.xforms.analysis.controls._ import org.orbeon.oxf.xforms.analysis.model.{Instance, Model} import org.orbeon.oxf.xforms.event.EventHandler import org.orbeon.oxf.xforms.xbl._ import org.orbeon.oxf.xml.SAXStore import org.orbeon.xforms.XFormsId import scala.collection.JavaConverters._ trait PartGlobalOps { // Global def getMark(prefixedId: String): Option[SAXStore#Mark] // Models def getModelsForScope(scope: Scope): Seq[Model] def jGetModelsForScope(scope: Scope) = getModelsForScope(scope).asJava def getInstances(modelPrefixedId: String): JCollection[Instance] // Controls def getControlAnalysis(prefixedId: String): ElementAnalysis def findControlAnalysis(prefixedId: String): Option[ElementAnalysis] def hasControlByName(controlName: String): Boolean def controlsByName(controlName: String): Traversable[ElementAnalysis] // Events def hasHandlerForEvent(eventName: String): Boolean def hasHandlerForEvent(eventName: String, includeAllEvents: Boolean): Boolean def keypressHandlers: Seq[EventHandler] // XBL def getBinding(prefixedId: String): Option[ConcreteBinding] def getGlobals: collection.Seq[XBLBindings#Global] def allBindingsMaybeDuplicates: Iterable[AbstractBinding] // Return the scope associated with the given prefixed id (the scope is directly associated with the prefix of the id) def containingScope(prefixedId: String): Scope def scopeForPrefixedId(prefixedId: String): Scope // Repeats def repeats: Traversable[RepeatControl] def getRepeatHierarchyString(ns: String): String // AVTs def hasAttributeControl(prefixedForAttribute: String): Boolean def getAttributeControl(prefixedForAttribute: String, attributeName: String): AttributeControl // Client-side resources def scriptsByPrefixedId: Map[String, StaticScript] def uniqueJsScripts: List[ShareableScript] // Functions derived from getControlAnalysis def getControlElement(prefixedId: String) = findControlAnalysis(prefixedId) map (_.element) orNull def hasBinding(prefixedId: String) = findControlAnalysis(prefixedId) exists (_.hasBinding) def getControlPosition(prefixedId: String) = findControlAnalysis(prefixedId) collect { case viewTrait: ViewTrait β‡’ viewTrait.index } def getSelect1Analysis(prefixedId: String) = findControlAnalysis(prefixedId) match { case Some(selectionControl: SelectionControlTrait) β‡’ selectionControl case _ β‡’ null } def isValueControl(effectiveId: String) = findControlAnalysis(XFormsId.getPrefixedId(effectiveId)) exists (_.isInstanceOf[ValueTrait]) def appendClasses(sb: java.lang.StringBuilder, prefixedId: String) = findControlAnalysis(prefixedId) foreach { controlAnalysis β‡’ val controlClasses = controlAnalysis.classes if (StringUtils.isNotEmpty(controlClasses)) { if (sb.length > 0) sb.append(' ') sb.append(controlClasses) } } // LHHA def getLHH(prefixedId: String, lhha: LHHA) = collectByErasedType[StaticLHHASupport](getControlAnalysis(prefixedId)) flatMap (_.lhh(lhha)) orNull def getAlerts(prefixedId: String) = collectByErasedType[StaticLHHASupport](getControlAnalysis(prefixedId)).toList flatMap (_.alerts) def hasLHHA(prefixedId: String, lhha: LHHA) = collectByErasedType[StaticLHHASupport](getControlAnalysis(prefixedId)) exists (_.hasLHHA(lhha)) }
brunobuzzi/orbeon-forms
xforms/jvm/src/main/scala/org/orbeon/oxf/xforms/PartGlobalOps.scala
Scala
lgpl-2.1
4,253
/* __ *\\ ** ________ ___ / / ___ __ ____ Scala.js API ** ** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL ** ** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-lang.org/ ** ** /____/\\___/_/ |_/____/_/ | |__/ /____/ ** ** |/____/ ** \\* */ /** * All doc-comments marked as "MDN" are by Mozilla Contributors, * distributed under the Creative Commons Attribution-ShareAlike license from * https://developer.mozilla.org/en-US/docs/Web/Reference/API */ package scala.scalajs.js /** * Math is a built-in object that has properties and methods for mathematical * constants and functions. Not a function object. * * MDN */ object Math extends Object { /** * Euler's constant and the base of natural logarithms, approximately 2.718. * * MDN */ val E: Double = ??? /** * Natural logarithm of 10, approximately 2.303. * * MDN */ val LN10: Double = ??? /** * Natural logarithm of 2, approximately 0.693. * * MDN */ val LN2: Double = ??? /** * Base 2 logarithm of E, approximately 1.443. * * MDN */ val LOG2E: Double = ??? /** * Base 10 logarithm of E, approximately 0.434. * * MSN */ val LOG10E: Double = ??? /** * Ratio of the circumference of a circle to its diameter, approximately 3.14159. * * MDN */ val PI: Double = ??? /** * Square root of 1/2; equivalently, 1 over the square root of 2, approximately 0.707. * * MDN */ val SQRT1_2: Double = ??? /** * Square root of 2, approximately 1.414. * * MDN */ val SQRT2: Double = ??? /** * Returns the absolute value of a number. * * Passing a non-numeric string or undefined/empty variable returns NaN. * Passing null returns 0. * * MDN */ def abs(x: Int): Int = ??? /** * Returns the absolute value of a number. * * Passing a non-numeric string or undefined/empty variable returns NaN. * Passing null returns 0. * * MDN */ def abs(x: Double): Double = ??? /** * The Math.acos() function returns the arccosine (in radians) of a number. * * The acos method returns a numeric value between 0 and pi radians for x * between -1 and 1. If the value of number is outside this range, it returns NaN. * * MDN */ def acos(x: Double): Double = ??? /** * The Math.asin() function returns the arcsine (in radians) of a number. * * The asin method returns a numeric value between -pi/2 and pi/2 radians for x * between -1 and 1. If the value of number is outside this range, it returns NaN. * * MDN */ def asin(x: Double): Double = ??? /** * The Math.atan() function returns the arctangent (in radians) of a number. * * The atan method returns a numeric value between -pi/2 and pi/2 radians. * * MDN */ def atan(x: Double): Double = ??? /** * The Math.atan2() function returns the arctangent of the quotient of its * arguments. * * The atan2 method returns a numeric value between -pi and pi representing * the angle theta of an (x,y) point. This is the counterclockwise angle, * measured in radians, between the positive X axis, and the point (x,y). * Note that the arguments to this function pass the y-coordinate first and * the x-coordinate second. * * atan2 is passed separate x and y arguments, and atan is passed the ratio * of those two arguments. * * MDN */ def atan2(y: Double, x: Double): Double = ??? /** * The Math.ceil() function returns the smallest integer greater than or * equal to a number. * * MDN */ def ceil(x: Double): Double = ??? /** * The Math.cos() function returns the cosine of a number. * * The cos method returns a numeric value between -1 and 1, which represents * the cosine of the angle. * * MDN */ def cos(x: Double): Double = ??? /** * The Math.exp() function returns E^x, where x is the argument, and E is * Euler's constant, the base of the natural logarithms. * * MDN */ def exp(x: Double): Double = ??? /** * The Math.floor() function returns the largest integer less than or equal * to a number. * * MDN */ def floor(x: Double): Double = ??? /** * The Math.log() function returns the natural logarithm (base E) of a number. * * If the value of number is negative, the return value is always NaN. * * MDN */ def log(x: Double): Double = ??? /** * The Math.max() function returns the largest of zero or more numbers. * * MDN */ def max(value1: Int, values: Int*): Int = ??? /** * The Math.max() function returns the largest of zero or more numbers. * * If no arguments are given, the result is - Infinity. * * If at least one of arguments cannot be converted to a number, the result is NaN. * * MDN */ def max(values: Double*): Double = ??? /** * The Math.min() function returns the smallest of zero or more numbers. * * MDN */ def min(value1: Int, values: Int*): Int = ??? /** * The Math.min() function returns the smallest of zero or more numbers. * * If no arguments are given, the result is Infinity. * * If at least one of arguments cannot be converted to a number, the result is NaN. * * MDN */ def min(values: Double*): Double = ??? /** * The Math.pow() function returns the base to the exponent Power, that is, base^^exponent. * * MDN */ def pow(x: Double, y: Double): Double = ??? /** * The Math.random() function returns a floating-point, pseudo-random number in * the range [0, 1) that is, from 0 (inclusive) up to but not including 1 * (exclusive), which you can then scale to your desired range. * * The random number generator is seeded from the current time, as in Java. * * MDN */ def random(): Double = ??? /** * The Math.round() function returns the value of a number rounded to the * nearest integer. * * If the fractional portion of number is .5 or greater, the argument is * rounded to the next higher integer. If the fractional portion of number * is less than .5, the argument is rounded to the next lower integer. * * MDN */ def round(x: Double): Double = ??? /** * The Math.sin() function returns the sine of a number. * * The sin method returns a numeric value between -1 and 1, which represents * the sine of the angle given in radians. * * MDN */ def sin(x: Double): Double = ??? /** * The Math.sqrt() function returns the square root (x\\sqrt{x}) of a number. * * If the value of number is negative, sqrt returns NaN. * * MDN */ def sqrt(x: Double): Double = ??? /** * The Math.tan() function returns the tangent of a number. * * The tan method returns a numeric value that represents the tangent of the angle. * * MDN */ def tan(x: Double): Double = ??? }
swhgoon/scala-js
library/src/main/scala/scala/scalajs/js/Math.scala
Scala
bsd-3-clause
7,203
package almhirt.akkax import scala.concurrent.duration._ import scala.concurrent.ExecutionContext import akka.actor.ActorRef import akka.pattern._ import almhirt.common._ import almhirt.almfuture.all._ sealed trait ComponentState object ComponentState { case object Startup extends ComponentState { override def toString = "Startup" } case object WaitingForStartSignal extends ComponentState { override def toString = "WaitingForStartSignal" } case object Running extends ComponentState { override def toString = "Running" } case object PreparingForPause extends ComponentState { override def toString = "PreparingForPause" } case object Paused extends ComponentState { override def toString = "Paused" } final case class Error(cause: almhirt.problem.ProblemCause) extends ComponentState { override def toString = s"Error: ${cause.message}" } case object PreparingForShutdown extends ComponentState { override def toString = "PreparingForShutdown" } case object ReadyForShutdown extends ComponentState { override def toString = "ReadyForShutdown" } implicit class ComponentStateOps(val self: ComponentState) extends AnyVal { def parsableString: String = self.toString } def fromString(toParse: String): AlmValidation[ComponentState] = { toParse.split(":") match { case Array(onePart) β‡’ onePart.toLowerCase match { case "startup" β‡’ scalaz.Success(Startup) case "running" β‡’ scalaz.Success(Running) case "preparingforpause" β‡’ scalaz.Success(PreparingForPause) case "paused" β‡’ scalaz.Success(Paused) case "preparingforshutdown" β‡’ scalaz.Success(PreparingForShutdown) case "readyforshutdown" β‡’ scalaz.Success(ReadyForShutdown) case x β‡’ scalaz.Failure(ParsingProblem(s""""x" is not a valid component state.""")) } case Array(first, second) β‡’ if (first.toLowerCase() == "error") { scalaz.Success(Error(UnspecifiedProblem(second.trim()))) } else { scalaz.Failure(ParsingProblem(s""""x" is not a valid component state.""")) } case _ β‡’ scalaz.Failure(ParsingProblem(s""""x" is not a valid component state.""")) } } } trait ComponentControl { def supports(action: ComponentControlAction): Boolean def changeState(command: almhirt.akkax.ActorMessages.ComponentControlCommand): Unit def state(timeout: FiniteDuration)(implicit executor: ExecutionContext): AlmFuture[ComponentState] } trait LocalComponentControl extends ComponentControl { def supportedActions: Set[ComponentControlAction] def blockedChangeStateActions: Set[ComponentControlAction] } object ComponentControl { def apply(actor: ActorRef, supportedStateChangeActions: Set[ComponentControlAction], logMsg: Option[(β‡’ String) β‡’ Unit]): ComponentControl = new ComponentControl { def supports(action: ComponentControlAction): Boolean = supportedStateChangeActions(action) def changeState(command: almhirt.akkax.ActorMessages.ComponentControlCommand): Unit = if (supports(command.action)) actor ! command else logMsg.foreach(_(s"Attempt to execute an unsupportet state change command: $command")) def state(timeout: FiniteDuration)(implicit executor: ExecutionContext): AlmFuture[ComponentState] = (actor ? ActorMessages.ReportComponentState)(timeout).mapCastTo[ComponentState] } } object LocalComponentControl { def apply(actor: ActorRef, theSupportedStateChangeActions: Set[ComponentControlAction], logMsg: Option[(β‡’ String) β‡’ Unit]): LocalComponentControl = LocalComponentControl(actor, theSupportedStateChangeActions, Set.empty, logMsg) def apply(actor: ActorRef, theSupportedStateChangeActions: Set[ComponentControlAction], theBlockedChangeStateActions: Set[ComponentControlAction], logMsg: Option[(β‡’ String) β‡’ Unit]): LocalComponentControl = new LocalComponentControl { val supportedActions = theSupportedStateChangeActions val blockedChangeStateActions = theBlockedChangeStateActions def supports(action: ComponentControlAction): Boolean = theSupportedStateChangeActions(action) def changeState(command: almhirt.akkax.ActorMessages.ComponentControlCommand): Unit = if (supports(command.action) && !blockedChangeStateActions.contains(command.action)) actor ! command else logMsg.foreach(_(s"Attempt to execute an unsupportet or blocked state change command: $command")) def state(timeout: FiniteDuration)(implicit executor: ExecutionContext): AlmFuture[ComponentState] = (actor ? ActorMessages.ReportComponentState)(timeout).mapCastTo[ComponentState] } implicit class LocalComponentControlOps(val self: LocalComponentControl) extends AnyVal { def toRestrictedComponentControl: ComponentControl = new ComponentControl { def supports(action: ComponentControlAction): Boolean = self.supports(action) && !self.blockedChangeStateActions(action) def changeState(command: almhirt.akkax.ActorMessages.ComponentControlCommand): Unit = self.changeState(command) def state(timeout: FiniteDuration)(implicit executor: ExecutionContext): AlmFuture[ComponentState] = self.state(timeout) } } }
chridou/almhirt
almhirt-core/src/main/scala/almhirt/akkax/ComponentControl.scala
Scala
apache-2.0
5,283
import scala.tools.partest.instrumented.Instrumentation._ object Test { def main(args: Array[String]): Unit = { startProfiling() // tests optimization in Cleanup for varargs reference arrays Array("") Array(true) Array(true, false) Array(1: Byte) Array(1: Byte, 2: Byte) Array(1: Short) Array(1: Short, 2: Short) Array(1) Array(1, 2) Array(1L) Array(1L, 2L) Array(1d) Array(1d, 2d) Array(1f) Array(1f, 2f) /* Not currently optimized: Array[Int](1, 2) etc Array(()) Array((), ()) */ stopProfiling() printStatistics() } }
lrytz/scala
test/files/instrumented/t6611.scala
Scala
apache-2.0
626
package org.wartremover.contrib.warts import org.wartremover.{ WartTraverser, WartUniverse } object RefinedClasstag extends WartTraverser { def ctMessage(typeName: String): String = s"Refined types should not be used in Classtags since only the first type will be checked at runtime. Type found: $typeName" def mfMessage(typeName: String): String = s"Refined types should not be used in Manifests since only the first type will be checked at runtime. Type found: $typeName" def apply(u: WartUniverse): u.Traverser = { import u.universe._ object RefinedTypeTree { def unapply(typ: Tree): Option[Type] = { typ.tpe match { case RefinedType(_, _) => Some(typ.tpe) case _ => None } } } def checkIfRefined(typeArgs: List[Tree], msg: String => String): Unit = { typeArgs.foreach { case arg @ RefinedTypeTree(tpe) => error(u)(arg.pos, msg(tpe.toString)) case _ => } } val classTag = TermName("ClassTag") val applyMethod = TermName("apply") val manifestFactory = TermName("ManifestFactory") val intersectionType = TermName("intersectionType") new u.Traverser { override def traverse(tree: Tree): Unit = { tree match { // Ignore trees marked by SuppressWarnings case t if hasWartAnnotation(u)(t) => case TypeApply(Select(Ident(`classTag`), `applyMethod`), args) => checkIfRefined(args, ctMessage) super.traverse(tree) case TypeApply(Select(Select(_, `manifestFactory`), `intersectionType`), args) => checkIfRefined(args, mfMessage) super.traverse(tree) case _ => super.traverse(tree) } } } } }
wartremover/wartremover-contrib
core/src/main/scala/wartremover/contrib/warts/RefinedClasstag.scala
Scala
apache-2.0
1,774
package notebook.core.parser.impl import java.io.StringWriter import net.java.textilej.parser.MarkupParser import net.java.textilej.parser.builder.HtmlDocumentBuilder import net.java.textilej.parser.markup.mediawiki.MediaWikiDialect import notebook.core.parser.Parser private[core] object MediaWikiParser extends Parser { override def parseImpl(text: String): String = { val sw = new StringWriter val builder = new HtmlDocumentBuilder(sw) builder.setEmitAsDocument(false) val parser = new MarkupParser(new MediaWikiDialect) parser.setBuilder(builder) parser.parse(text) sw.toString } }
fkmt-disk/notebook
src/main/scala/notebook/core/parser/impl/MediaWikiParser.scala
Scala
mit
641
/* * Copyright (C) 2014 - 2019 Dennis Vriend <https://github.com/dnvriend> * Copyright (C) 2019 - 2021 Lightbend Inc. <https://www.lightbend.com> */ package akka.persistence.jdbc.config import akka.persistence.jdbc.util.ConfigOps._ import com.typesafe.config.Config import scala.concurrent.duration._ object ConfigKeys { val useSharedDb = "use-shared-db" } class SlickConfiguration(config: Config) { val jndiName: Option[String] = config.asStringOption("jndiName") val jndiDbName: Option[String] = config.asStringOption("jndiDbName") override def toString: String = s"SlickConfiguration($jndiName,$jndiDbName)" } class LegacyJournalTableColumnNames(config: Config) { private val cfg = config.getConfig("tables.legacy_journal.columnNames") val ordering: String = cfg.getString("ordering") val deleted: String = cfg.getString("deleted") val persistenceId: String = cfg.getString("persistenceId") val sequenceNumber: String = cfg.getString("sequenceNumber") val created: String = cfg.getString("created") val tags: String = cfg.getString("tags") val message: String = cfg.getString("message") override def toString: String = s"JournalTableColumnNames($persistenceId,$sequenceNumber,$created,$tags,$message)" } class EventJournalTableColumnNames(config: Config) { private val cfg = config.getConfig("tables.event_journal.columnNames") val ordering: String = cfg.getString("ordering") val deleted: String = cfg.getString("deleted") val persistenceId: String = cfg.getString("persistenceId") val sequenceNumber: String = cfg.getString("sequenceNumber") val writer: String = cfg.getString("writer") val writeTimestamp: String = cfg.getString("writeTimestamp") val adapterManifest: String = cfg.getString("adapterManifest") val eventPayload: String = cfg.getString("eventPayload") val eventSerId: String = cfg.getString("eventSerId") val eventSerManifest: String = cfg.getString("eventSerManifest") val metaPayload: String = cfg.getString("metaPayload") val metaSerId: String = cfg.getString("metaSerId") val metaSerManifest: String = cfg.getString("metaSerManifest") } class EventTagTableColumnNames(config: Config) { private val cfg = config.getConfig("tables.event_tag.columnNames") val eventId: String = cfg.getString("eventId") val tag: String = cfg.getString("tag") } class LegacyJournalTableConfiguration(config: Config) { private val cfg = config.getConfig("tables.legacy_journal") val tableName: String = cfg.getString("tableName") val schemaName: Option[String] = cfg.asStringOption("schemaName") val columnNames: LegacyJournalTableColumnNames = new LegacyJournalTableColumnNames(config) override def toString: String = s"LegacyJournalTableConfiguration($tableName,$schemaName,$columnNames)" } class EventJournalTableConfiguration(config: Config) { private val cfg = config.getConfig("tables.event_journal") val tableName: String = cfg.getString("tableName") val schemaName: Option[String] = cfg.asStringOption("schemaName") val columnNames: EventJournalTableColumnNames = new EventJournalTableColumnNames(config) override def toString: String = s"EventJournalTableConfiguration($tableName,$schemaName,$columnNames)" } class EventTagTableConfiguration(config: Config) { private val cfg = config.getConfig("tables.event_tag") val tableName: String = cfg.getString("tableName") val schemaName: Option[String] = cfg.asStringOption("schemaName") val columnNames: EventTagTableColumnNames = new EventTagTableColumnNames(config) } class LegacySnapshotTableColumnNames(config: Config) { private val cfg = config.getConfig("tables.legacy_snapshot.columnNames") val persistenceId: String = cfg.getString("persistenceId") val sequenceNumber: String = cfg.getString("sequenceNumber") val created: String = cfg.getString("created") val snapshot: String = cfg.getString("snapshot") override def toString: String = s"SnapshotTableColumnNames($persistenceId,$sequenceNumber,$created,$snapshot)" } class SnapshotTableColumnNames(config: Config) { private val cfg = config.getConfig("tables.snapshot.columnNames") val persistenceId: String = cfg.getString("persistenceId") val sequenceNumber: String = cfg.getString("sequenceNumber") val created: String = cfg.getString("created") val snapshotPayload: String = cfg.getString("snapshotPayload") val snapshotSerId: String = cfg.getString("snapshotSerId") val snapshotSerManifest: String = cfg.getString("snapshotSerManifest") val metaPayload: String = cfg.getString("metaPayload") val metaSerId: String = cfg.getString("metaSerId") val metaSerManifest: String = cfg.getString("metaSerManifest") } class LegacySnapshotTableConfiguration(config: Config) { private val cfg = config.getConfig("tables.legacy_snapshot") val tableName: String = cfg.getString("tableName") val schemaName: Option[String] = cfg.asStringOption("schemaName") val columnNames: LegacySnapshotTableColumnNames = new LegacySnapshotTableColumnNames(config) override def toString: String = s"LegacySnapshotTableConfiguration($tableName,$schemaName,$columnNames)" } class SnapshotTableConfiguration(config: Config) { private val cfg = config.getConfig("tables.snapshot") val tableName: String = cfg.getString("tableName") val schemaName: Option[String] = cfg.asStringOption("schemaName") val columnNames: SnapshotTableColumnNames = new SnapshotTableColumnNames(config) override def toString: String = s"SnapshotTableConfiguration($tableName,$schemaName,$columnNames)" } class JournalPluginConfig(config: Config) { val tagSeparator: String = config.getString("tagSeparator") val dao: String = config.getString("dao") override def toString: String = s"JournalPluginConfig($tagSeparator,$dao)" } class BaseDaoConfig(config: Config) { val bufferSize: Int = config.getInt("bufferSize") val batchSize: Int = config.getInt("batchSize") val replayBatchSize: Int = config.getInt("replayBatchSize") val parallelism: Int = config.getInt("parallelism") val logicalDelete: Boolean = config.getBoolean("logicalDelete") override def toString: String = s"BaseDaoConfig($bufferSize,$batchSize,$parallelism,$logicalDelete)" } class ReadJournalPluginConfig(config: Config) { val tagSeparator: String = config.getString("tagSeparator") val dao: String = config.getString("dao") override def toString: String = s"ReadJournalPluginConfig($tagSeparator,$dao)" } class SnapshotPluginConfig(config: Config) { val dao: String = config.getString("dao") override def toString: String = s"SnapshotPluginConfig($dao)" } // aggregations class JournalConfig(config: Config) { val journalTableConfiguration = new LegacyJournalTableConfiguration(config) val eventJournalTableConfiguration = new EventJournalTableConfiguration(config) val eventTagTableConfiguration = new EventTagTableConfiguration(config) val pluginConfig = new JournalPluginConfig(config) val daoConfig = new BaseDaoConfig(config) val useSharedDb: Option[String] = config.asStringOption(ConfigKeys.useSharedDb) override def toString: String = s"JournalConfig($journalTableConfiguration,$pluginConfig,$useSharedDb)" } class SnapshotConfig(config: Config) { val legacySnapshotTableConfiguration = new LegacySnapshotTableConfiguration(config) val snapshotTableConfiguration = new SnapshotTableConfiguration(config) val pluginConfig = new SnapshotPluginConfig(config) val useSharedDb: Option[String] = config.asStringOption(ConfigKeys.useSharedDb) override def toString: String = s"SnapshotConfig($snapshotTableConfiguration,$pluginConfig,$useSharedDb)" } object JournalSequenceRetrievalConfig { def apply(config: Config): JournalSequenceRetrievalConfig = JournalSequenceRetrievalConfig( batchSize = config.getInt("journal-sequence-retrieval.batch-size"), maxTries = config.getInt("journal-sequence-retrieval.max-tries"), queryDelay = config.asFiniteDuration("journal-sequence-retrieval.query-delay"), maxBackoffQueryDelay = config.asFiniteDuration("journal-sequence-retrieval.max-backoff-query-delay"), askTimeout = config.asFiniteDuration("journal-sequence-retrieval.ask-timeout")) } case class JournalSequenceRetrievalConfig( batchSize: Int, maxTries: Int, queryDelay: FiniteDuration, maxBackoffQueryDelay: FiniteDuration, askTimeout: FiniteDuration) class ReadJournalConfig(config: Config) { val journalTableConfiguration = new LegacyJournalTableConfiguration(config) val eventJournalTableConfiguration = new EventJournalTableConfiguration(config) val eventTagTableConfiguration = new EventTagTableConfiguration(config) val journalSequenceRetrievalConfiguration = JournalSequenceRetrievalConfig(config) val pluginConfig = new ReadJournalPluginConfig(config) val refreshInterval: FiniteDuration = config.asFiniteDuration("refresh-interval") val maxBufferSize: Int = config.getInt("max-buffer-size") val addShutdownHook: Boolean = config.getBoolean("add-shutdown-hook") val includeDeleted: Boolean = config.getBoolean("includeLogicallyDeleted") override def toString: String = s"ReadJournalConfig($journalTableConfiguration,$pluginConfig,$refreshInterval,$maxBufferSize,$addShutdownHook,$includeDeleted)" } class DurableStateTableColumnNames(config: Config) { private val cfg = config.getConfig("tables.durable_state.columnNames") val globalOffset: String = cfg.getString("globalOffset") val persistenceId: String = cfg.getString("persistenceId") val revision: String = cfg.getString("revision") val statePayload: String = cfg.getString("statePayload") val stateSerId: String = cfg.getString("stateSerId") val stateSerManifest: String = cfg.getString("stateSerManifest") val tag: String = cfg.getString("tag") val stateTimestamp: String = cfg.getString("stateTimestamp") } class DurableStateTableConfiguration(config: Config) { private val cfg = config.getConfig("tables.durable_state") val tableName: String = cfg.getString("tableName") val refreshInterval: FiniteDuration = config.asFiniteDuration("refreshInterval") val batchSize: Int = config.getInt("batchSize") val schemaName: Option[String] = cfg.asStringOption("schemaName") val columnNames: DurableStateTableColumnNames = new DurableStateTableColumnNames(config) val stateSequenceConfig = DurableStateSequenceRetrievalConfig(config) override def toString: String = s"DurableStateTableConfiguration($tableName,$schemaName,$columnNames)" } object DurableStateSequenceRetrievalConfig { def apply(config: Config): DurableStateSequenceRetrievalConfig = DurableStateSequenceRetrievalConfig( batchSize = config.getInt("durable-state-sequence-retrieval.batch-size"), maxTries = config.getInt("durable-state-sequence-retrieval.max-tries"), queryDelay = config.asFiniteDuration("durable-state-sequence-retrieval.query-delay"), maxBackoffQueryDelay = config.asFiniteDuration("durable-state-sequence-retrieval.max-backoff-query-delay"), askTimeout = config.asFiniteDuration("durable-state-sequence-retrieval.ask-timeout"), revisionCacheCapacity = config.getInt("durable-state-sequence-retrieval.revision-cache-capacity")) } case class DurableStateSequenceRetrievalConfig( batchSize: Int, maxTries: Int, queryDelay: FiniteDuration, maxBackoffQueryDelay: FiniteDuration, askTimeout: FiniteDuration, revisionCacheCapacity: Int)
dnvriend/akka-persistence-jdbc
core/src/main/scala/akka/persistence/jdbc/config/AkkaPersistenceConfig.scala
Scala
apache-2.0
11,408
/* * File MessageHandler.scala is part of JsonRecipes. * JsonRecipes is opensource Minecraft mod(released under LGPLv3), created by anti344. * Full licence information can be found in LICENCE and LICENCE.LESSER files in jar-file of the mod. * Copyright Β© 2014, anti344 */ package net.anti344.jsonrecipes.handlers import net.anti344.jsonrecipes.config.Config import cpw.mods.fml.common.eventhandler.SubscribeEvent import cpw.mods.fml.common.gameevent.TickEvent import collection.mutable.ArrayBuffer import net.minecraft.client.Minecraft.{getMinecraft => mc} import net.minecraft.util._ object MessageHandler { private var initialized: Boolean = false private val messages: ArrayBuffer[IChatComponent] = ArrayBuffer() @SubscribeEvent def onTick(e: TickEvent.ClientTickEvent) = if (e.phase == TickEvent.Phase.END && mc.inGameHasFocus && !initialized) { initialized = true messages.foreach(mc.thePlayer.addChatMessage) messages.clear() } def error(obj: AnyRef) = { Log.debug("[CHAT_ERR]" + String.valueOf(obj)) if (Config.errorOutput.getBoolean) msg("[ERROR]" + String.valueOf(obj), '4') } def msg(msg: IChatComponent, color: Char = '7') = { val style = new ChatStyle().setColor(EnumChatFormatting.values()("0123456789abcdef".indexOf(color))) val _msg = msg.createCopy().setChatStyle(style) if(initialized && mc.thePlayer != null) mc.thePlayer.addChatMessage(_msg) else messages += _msg } implicit def str2chat(str: String): IChatComponent = new ChatComponentTranslation(str) implicit def str2chat_args(t: (String, Seq[AnyRef])): IChatComponent = new ChatComponentTranslation(t._1, t._2:_*) }
mc-anti344/JsonRecipes
src/main/scala/net/anti344/jsonrecipes/handlers/MessageHandler.scala
Scala
gpl-3.0
1,699
/* * MUSIT is a museum database to archive natural and cultural history data. * Copyright (C) 2016 MUSIT Norway, part of www.uio.no (University of Oslo) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, * or any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ package no.uio.musit.models import play.api.libs.json.{JsNumber, Reads, Writes, _} case class OrgId(underlying: Long) extends MusitId object OrgId { implicit val reads: Reads[OrgId] = __.read[Long].map(OrgId.apply) implicit val writes: Writes[OrgId] = Writes(oid => JsNumber(oid.underlying)) implicit def fromLong(l: Long): OrgId = OrgId(l) implicit def toLong(oid: OrgId): Long = oid.underlying implicit def fromOptLong(ml: Option[Long]): Option[OrgId] = ml.map(fromLong) implicit def toOptLong(moid: Option[OrgId]): Option[Long] = moid.map(toLong) }
kpmeen/musit
musit-models/src/main/scala/no/uio/musit/models/OrgId.scala
Scala
gpl-2.0
1,436
package chandu0101.scalajs.react.components.materialui import japgolly.scalajs.react._ import materialui.Mui import scala.scalajs.js /** * key: PropTypes.string, style: PropTypes.js.Any, ref: PropTypes.String, inset: React.PropTypes.bool, */ object MuiListDivider { def apply(key: js.UndefOr[String] = js.undefined, style: js.UndefOr[js.Any] = js.undefined, ref: js.UndefOr[String] = js.undefined, inset: js.UndefOr[Boolean] = js.undefined) = { val p = js.Dynamic.literal() key.foreach(v => p.updateDynamic("key")(v)) style.foreach(v => p.updateDynamic("style")(v)) ref.foreach(v => p.updateDynamic("ref")(v)) inset.foreach(v => p.updateDynamic("inset")(v)) val f = React.asInstanceOf[js.Dynamic].createFactory(Mui.ListDivider) f(p).asInstanceOf[ReactComponentU_] } } /** * key: PropTypes.string, style: PropTypes.js.Any, ref: PropTypes.String, disableTouchTap: React.PropTypes.bool, insetChildren: React.PropTypes.bool, leftAvatar: React.PropTypes.element, leftCheckbox: React.PropTypes.element, leftIcon: React.PropTypes.element, onMouseOut: React.PropTypes.ReactEvent => Unit, onMouseOver: React.PropTypes.ReactEvent => Unit, rightAvatar: React.PropTypes.element, rightIcon: React.PropTypes.element, rightToggle: React.PropTypes.element, secondaryText: React.PropTypes.node, secondaryTextLines: React.PropTypes.number */ case class MuiListItem(secondaryText: js.UndefOr[ReactElement] = js.undefined, style: js.UndefOr[js.Any] = js.undefined, disableTouchTap: js.UndefOr[Boolean] = js.undefined, insetChildren: js.UndefOr[Boolean] = js.undefined, ref: js.UndefOr[String] = js.undefined, rightAvatar: js.UndefOr[ReactElement] = js.undefined, leftAvatar: js.UndefOr[ReactElement] = js.undefined, key: js.UndefOr[String] = js.undefined, onMouseOver: js.UndefOr[ReactEvent => Unit] = js.undefined, onMouseOut: js.UndefOr[ReactEvent => Unit] = js.undefined, secondaryTextLines: js.UndefOr[Int] = js.undefined, leftIcon: js.UndefOr[ReactElement] = js.undefined, rightIcon: js.UndefOr[ReactElement] = js.undefined, rightToggle: js.UndefOr[ReactElement] = js.undefined, leftCheckbox: js.UndefOr[ReactElement] = js.undefined) { def toJS = { val p = js.Dynamic.literal() secondaryText.foreach(v => p.updateDynamic("secondaryText")(v)) style.foreach(v => p.updateDynamic("style")(v)) disableTouchTap.foreach(v => p.updateDynamic("disableTouchTap")(v)) insetChildren.foreach(v => p.updateDynamic("insetChildren")(v)) ref.foreach(v => p.updateDynamic("ref")(v)) rightAvatar.foreach(v => p.updateDynamic("rightAvatar")(v)) leftAvatar.foreach(v => p.updateDynamic("leftAvatar")(v)) key.foreach(v => p.updateDynamic("key")(v)) onMouseOver.foreach(v => p.updateDynamic("onMouseOver")(v)) onMouseOut.foreach(v => p.updateDynamic("onMouseOut")(v)) secondaryTextLines.foreach(v => p.updateDynamic("secondaryTextLines")(v)) leftIcon.foreach(v => p.updateDynamic("leftIcon")(v)) rightIcon.foreach(v => p.updateDynamic("rightIcon")(v)) rightToggle.foreach(v => p.updateDynamic("rightToggle")(v)) leftCheckbox.foreach(v => p.updateDynamic("leftCheckbox")(v)) p } def apply(children: ReactNode*) = { val f = React.asInstanceOf[js.Dynamic].createFactory(Mui.ListItem) f(toJS, children.toJsArray).asInstanceOf[ReactComponentU_] } } /** * key: PropTypes.string, style: PropTypes.js.Any, ref: PropTypes.String, insetSubheader: React.PropTypes.bool, subheader: React.PropTypes.string, subheaderStyle: React.PropTypes.js.Any * */ case class MuiList(subheaderStyle: js.UndefOr[js.Any] = js.undefined, insetSubheader: js.UndefOr[Boolean] = js.undefined, style: js.UndefOr[js.Any] = js.undefined, ref: js.UndefOr[String] = js.undefined, key: js.UndefOr[String] = js.undefined, subheader: js.UndefOr[String] = js.undefined) { def toJS = { val p = js.Dynamic.literal() subheaderStyle.foreach(v => p.updateDynamic("subheaderStyle")(v)) insetSubheader.foreach(v => p.updateDynamic("insetSubheader")(v)) style.foreach(v => p.updateDynamic("style")(v)) ref.foreach(v => p.updateDynamic("ref")(v)) key.foreach(v => p.updateDynamic("key")(v)) subheader.foreach(v => p.updateDynamic("subheader")(v)) p } def apply(children: ReactNode*) = { val f = React.asInstanceOf[js.Dynamic].createFactory(Mui.List) f(toJS, children.toJsArray).asInstanceOf[ReactComponentU_] } }
coreyauger/scalajs-react-components
core/src/main/scala/chandu0101/scalajs/react/components/materialui/MuiLists.scala
Scala
apache-2.0
4,923
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.samza.storage.kv import java.io.File import org.apache.samza.SamzaException import org.apache.samza.container.SamzaContainerContext import org.apache.samza.metrics.MetricsRegistry import org.apache.samza.serializers.Serde import org.apache.samza.storage.{StorageEngine, StorageEngineFactory} import org.apache.samza.system.SystemStreamPartition import org.apache.samza.task.MessageCollector /** * A key value storage engine factory implementation * * This trait encapsulates all the steps needed to create a key value storage engine. It is meant to be extended * by the specific key value store factory implementations which will in turn override the getKVStore method. */ trait BaseKeyValueStorageEngineFactory[K, V] extends StorageEngineFactory[K, V] { /** * Return a KeyValueStore instance for the given store name, * which will be used as the underlying raw store * * @param storeName Name of the store * @param storeDir The directory of the store * @param registry MetricsRegistry to which to publish store specific metrics. * @param changeLogSystemStreamPartition Samza stream partition from which to receive the changelog. * @param containerContext Information about the container in which the task is executing. * @return A valid KeyValueStore instance */ def getKVStore( storeName: String, storeDir: File, registry: MetricsRegistry, changeLogSystemStreamPartition: SystemStreamPartition, containerContext: SamzaContainerContext): KeyValueStore[Array[Byte], Array[Byte]] /** * Constructs a key-value StorageEngine and returns it to the caller * * @param storeName The name of the storage engine. * @param storeDir The directory of the storage engine. * @param keySerde The serializer to use for serializing keys when reading or writing to the store. * @param msgSerde The serializer to use for serializing messages when reading or writing to the store. * @param collector MessageCollector the storage engine uses to persist changes. * @param registry MetricsRegistry to which to publish storage-engine specific metrics. * @param changeLogSystemStreamPartition Samza stream partition from which to receive the changelog. * @param containerContext Information about the container in which the task is executing. **/ def getStorageEngine( storeName: String, storeDir: File, keySerde: Serde[K], msgSerde: Serde[V], collector: MessageCollector, registry: MetricsRegistry, changeLogSystemStreamPartition: SystemStreamPartition, containerContext: SamzaContainerContext): StorageEngine = { val storageConfig = containerContext.config.subset("stores." + storeName + ".", true) val batchSize = storageConfig.getInt("write.batch.size", 500) val cacheSize = storageConfig.getInt("object.cache.size", math.max(batchSize, 1000)) val enableCache = cacheSize > 0 if (cacheSize > 0 && cacheSize < batchSize) { throw new SamzaException("A store's cache.size cannot be less than batch.size as batched values reside in cache.") } if (keySerde == null) { throw new SamzaException("Must define a key serde when using key value storage.") } if (msgSerde == null) { throw new SamzaException("Must define a message serde when using key value storage.") } val rawStore = getKVStore(storeName, storeDir, registry, changeLogSystemStreamPartition, containerContext) // maybe wrap with logging val maybeLoggedStore = if (changeLogSystemStreamPartition == null) { rawStore } else { val loggedStoreMetrics = new LoggedStoreMetrics(storeName, registry) new LoggedStore(rawStore, changeLogSystemStreamPartition, collector, loggedStoreMetrics) } // wrap with serialization val serializedMetrics = new SerializedKeyValueStoreMetrics(storeName, registry) val serialized = new SerializedKeyValueStore[K, V](maybeLoggedStore, keySerde, msgSerde, serializedMetrics) // maybe wrap with caching val maybeCachedStore = if (enableCache) { val cachedStoreMetrics = new CachedStoreMetrics(storeName, registry) new CachedStore(serialized, cacheSize, batchSize, cachedStoreMetrics) } else { serialized } // wrap with null value checking val nullSafeStore = new NullSafeKeyValueStore(maybeCachedStore) // create the storage engine and return // TODO: Decide if we should use raw bytes when restoring val keyValueStorageEngineMetrics = new KeyValueStorageEngineMetrics(storeName, registry) new KeyValueStorageEngine(nullSafeStore, rawStore, keyValueStorageEngineMetrics, batchSize) } }
vjagadish/samza-clone
samza-kv/src/main/scala/org/apache/samza/storage/kv/BaseKeyValueStorageEngineFactory.scala
Scala
apache-2.0
5,672
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.ml.feature import org.apache.spark.annotation.Since import org.apache.spark.ml.Transformer import org.apache.spark.ml.attribute.AttributeGroup import org.apache.spark.ml.param._ import org.apache.spark.ml.param.shared.{HasInputCol, HasOutputCol} import org.apache.spark.ml.util._ import org.apache.spark.mllib.feature import org.apache.spark.sql.{DataFrame, Dataset} import org.apache.spark.sql.functions.{col, udf} import org.apache.spark.sql.types.{ArrayType, StructType} /** * Maps a sequence of terms to their term frequencies using the hashing trick. * Currently we use Austin Appleby's MurmurHash 3 algorithm (MurmurHash3_x86_32) * to calculate the hash code value for the term object. * Since a simple modulo is used to transform the hash function to a column index, * it is advisable to use a power of two as the numFeatures parameter; * otherwise the features will not be mapped evenly to the columns. */ @Since("1.2.0") class HashingTF @Since("1.4.0") (@Since("1.4.0") override val uid: String) extends Transformer with HasInputCol with HasOutputCol with DefaultParamsWritable { @Since("1.2.0") def this() = this(Identifiable.randomUID("hashingTF")) /** @group setParam */ @Since("1.4.0") def setInputCol(value: String): this.type = set(inputCol, value) /** @group setParam */ @Since("1.4.0") def setOutputCol(value: String): this.type = set(outputCol, value) /** * Number of features. Should be greater than 0. * (default = 2^18^) * @group param */ @Since("1.2.0") val numFeatures = new IntParam(this, "numFeatures", "number of features (> 0)", ParamValidators.gt(0)) /** * Binary toggle to control term frequency counts. * If true, all non-zero counts are set to 1. This is useful for discrete probabilistic * models that model binary events rather than integer counts. * (default = false) * @group param */ @Since("2.0.0") val binary = new BooleanParam(this, "binary", "If true, all non zero counts are set to 1. " + "This is useful for discrete probabilistic models that model binary events rather " + "than integer counts") setDefault(numFeatures -> (1 << 18), binary -> false) /** @group getParam */ @Since("1.2.0") def getNumFeatures: Int = $(numFeatures) /** @group setParam */ @Since("1.2.0") def setNumFeatures(value: Int): this.type = set(numFeatures, value) /** @group getParam */ @Since("2.0.0") def getBinary: Boolean = $(binary) /** @group setParam */ @Since("2.0.0") def setBinary(value: Boolean): this.type = set(binary, value) @Since("2.0.0") override def transform(dataset: Dataset[_]): DataFrame = { val outputSchema = transformSchema(dataset.schema) val hashingTF = new feature.HashingTF($(numFeatures)).setBinary($(binary)) // TODO: Make the hashingTF.transform natively in ml framework to avoid extra conversion. val t = udf { terms: Seq[_] => hashingTF.transform(terms).asML } val metadata = outputSchema($(outputCol)).metadata dataset.select(col("*"), t(col($(inputCol))).as($(outputCol), metadata)) } @Since("1.4.0") override def transformSchema(schema: StructType): StructType = { val inputType = schema($(inputCol)).dataType require(inputType.isInstanceOf[ArrayType], s"The input column must be ${ArrayType.simpleString}, but got ${inputType.catalogString}.") val attrGroup = new AttributeGroup($(outputCol), $(numFeatures)) SchemaUtils.appendColumn(schema, attrGroup.toStructField()) } @Since("1.4.1") override def copy(extra: ParamMap): HashingTF = defaultCopy(extra) } @Since("1.6.0") object HashingTF extends DefaultParamsReadable[HashingTF] { @Since("1.6.0") override def load(path: String): HashingTF = super.load(path) }
WindCanDie/spark
mllib/src/main/scala/org/apache/spark/ml/feature/HashingTF.scala
Scala
apache-2.0
4,582
// Copyright 2017 Dennis Vriend // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.github.dnvriend.ops import java.nio.ByteBuffer import scala.language.implicitConversions object ByteBufferOps extends ByteBufferOps trait ByteBufferOps { implicit def toByteBufferOps(that: ByteBuffer): ByteBufferOpsImpl = new ByteBufferOpsImpl(that) } class ByteBufferOpsImpl(that: ByteBuffer) { def toByteArray: Array[Byte] = { that.array() } }
dnvriend/serverless-test
aws-lambda-handler/src/main/scala/com/github/dnvriend/ops/ByteBufferOps.scala
Scala
apache-2.0
965
import sbt._ import Keys._ import com.typesafe.sbt.SbtMultiJvm import com.typesafe.sbt.SbtMultiJvm.MultiJvmKeys.MultiJvm object ApplicationBuild extends Build { val appName = "akka-raft" val appVersion = "1.0-SNAPSHOT" import Dependencies._ val debugInUse = SettingKey[Boolean]("debug-in-use", "debug is used") lazy val akkaRaft = Project(appName, file(".")) .configs(MultiJvm) .settings(multiJvmSettings: _*) .settings( libraryDependencies ++= generalDependencies ) lazy val multiJvmSettings = SbtMultiJvm.multiJvmSettings ++ Seq( // make sure that MultiJvm test are compiled by the default test compilation compile in MultiJvm <<= (compile in MultiJvm) triggeredBy (compile in Test), // disable parallel tests parallelExecution in Test := false, // make sure that MultiJvm tests are executed by the default test target executeTests in Test <<= ((executeTests in Test), (executeTests in MultiJvm)).map{ case (outputOfTests, outputOfMultiJVMTests) => Tests.Output(Seq(outputOfTests.overall, outputOfMultiJVMTests.overall).sorted.reverse.head, outputOfTests.events ++ outputOfMultiJVMTests.events, outputOfTests.summaries ++ outputOfMultiJVMTests.summaries) } ) } object Dependencies { val akkaVersion = "2.4.4" val generalDependencies = Seq( "com.typesafe.akka" %% "akka-actor" % akkaVersion, "com.typesafe.akka" %% "akka-slf4j" % akkaVersion, "com.typesafe.akka" %% "akka-cluster" % akkaVersion, "com.typesafe.akka" %% "akka-persistence" % akkaVersion, "com.typesafe.akka" %% "akka-testkit" % akkaVersion % "test", "com.typesafe.akka" %% "akka-multi-node-testkit" % akkaVersion % "test", "org.iq80.leveldb" % "leveldb" % "0.7", "org.fusesource.leveldbjni" % "leveldbjni-all" % "1.8", "org.mockito" % "mockito-core" % "1.9.5" % "test", "org.scalatest" %% "scalatest" % "2.2.1" % "test" ) }
ktoso/akka-raft
project/Build.scala
Scala
apache-2.0
2,027
object Test { def ser[T](s: Seq[T]): Unit = { val bos = new java.io.ByteArrayOutputStream() val oos = new java.io.ObjectOutputStream(bos) oos.writeObject(s) val ois = new java.io.ObjectInputStream(new java.io.ByteArrayInputStream(bos.toByteArray)) val obj = ois.readObject() println(obj) println(obj.asInstanceOf[Seq[T]].toList) } def main(args: Array[String]): Unit = { ser(Stream(1, 2, 3)) ser(Stream(1)) ser(Stream()) ser(LazyList(1, 2, 3)) ser(LazyList(1)) ser(LazyList()) } }
som-snytt/dotty
tests/run/serialize-stream.scala
Scala
apache-2.0
546
/*********************************************************************** * Copyright (c) 2013-2015 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 which * accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. *************************************************************************/ package org.locationtech.geomesa.utils import org.geotools.data.FeatureReader import org.geotools.data.collection.DelegateFeatureReader import org.geotools.feature.collection.DelegateFeatureIterator import org.geotools.referencing.CRS import org.geotools.referencing.crs.DefaultGeographicCRS import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType} import scala.util.Try package object geotools { // use the epsg jar if it's available (e.g. in geoserver), otherwise use the less-rich constant val CRS_EPSG_4326 = try { CRS.decode("EPSG:4326") } catch { case t: Throwable => DefaultGeographicCRS.WGS84 } type FR = FeatureReader[SimpleFeatureType, SimpleFeature] type DFR = DelegateFeatureReader[SimpleFeatureType, SimpleFeature] type DFI = DelegateFeatureIterator[SimpleFeature] }
drackaer/geomesa
geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/geotools/package.scala
Scala
apache-2.0
1,266
package lara.epfl.scalasca.rules import lara.epfl.scalasca.core._ import scala.tools.nsc._ import reflect.runtime.universe.Transformer import scala.tools.reflect.ToolBox case class BlockConstantPropagationMapper(symbolTable: Map[Global#Symbol, SymbolImage]) extends RuleResult with SymbolMapper { override def getMapping(): Map[Global#Symbol, SymbolImage] = symbolTable override def warning = Notice("GEN_BLOCK_CONST_PROP", "Propagating constants inside syntactic blocks (simple operations)", Console.GREEN + "No constants to be propagated" + Console.RESET, GeneralCategory()) override def toString: String = if (symbolTable.size > 0) warning.formattedWarning + Console.BLUE + " " + symbolTable.size + " values evaluated as constants" + Console.RESET else warning.formattedDefaultMessage override def isSuccess: Boolean = symbolTable.size == 0 } /** * GEN_BLOCK_CONST_PROP * * Considers: * - Values in simple expression blocks */ case class BlockConstantPropagationTraversalState(map: Map[Global#Symbol, Any]) extends TraversalState class BlockConstantPropagation[T <: Global](val global: T, inputResults: List[RuleResult] = List()) extends ASTRule with ConstantPropagationEvaluator { import global._ type TS = BlockConstantPropagationTraversalState type RR = BlockConstantPropagationMapper override val ruleName = "GEN_BLOCK_CONST_PROP" override def getRuleResult(state: TS): RR = BlockConstantPropagationMapper(state.map.map({ case (k, v) => (k, LiteralImage(v)) })) override def getDefaultState(): TS = BlockConstantPropagationTraversalState(Map()) override def mergeStates(s1: TS, s2: TS): TS = BlockConstantPropagationTraversalState(s1.map ++ s2.map) override def step(tree: Global#Tree, state: TS): List[(Option[TT], TS)] = tree match { case q"package $ref { ..$stats }" => goto(stats, state) //Ignores class fields // Quasiquote throws weird match error in some cases? // case q"$mods class $tpname[..$targs] $ctorMods(...$paramss) extends { ..$early } with ..$parents { $self => ..$stats }" => { case ClassDef(mods, name, tparams, Template(parents, self, stats)) => { goto(stats, state, stat => stat match { case q"$mods def $tname[..$targs](...$paramss): $tpt = $expr" => true case _ => false}) } //Ignores object fields case q"$mods object $tname extends { ..$early } with ..$parents { $self => ..$body }" => { goto(body, state, member => member match { case q"$mods def $tname[..$targs](...$paramss): $tpt = $expr" => true case _ => false}) } //Ignores trait fields case q"$mods trait $tpname[..$tparams] extends { ..$earlydefns } with ..$parents { $self => ..$stats }" => { goto(stats, state, stat => stat match { case q"$mods def $tname[..$targs](...$paramss): $tpt = $expr" => true case _ => false}) } //Functions, provided they are more than a mere literal case functionDefinition @ q"$mods def $tname[..$targs](...$paramss): $tpt = $expr" => expr match { case Block(_, _) => goto(expr, state) case _ => goto(Nil, state) } //Regular if case q"if ($cond) $thenP else $elseP" => goto(List(cond, thenP, elseP), state) case constantVal @ q"$mods val $tname: $tpt = $expr" => expr match { //Constant val literal case cst @ Literal(Constant(constant)) => gotoLeaf(state.copy(map = state.map + (constantVal.symbol -> constant))) case application @ q"$fun(...$args)" => evaluateToConstant(application)(global)(state.map) match { case Some(constant) => { if (constant.isInstanceOf[Int]) gotoLeaf(state.copy(map = state.map + (constantVal.symbol -> constant.asInstanceOf[Int]))) else if (constant.isInstanceOf[Double]) gotoLeaf(state.copy(map = state.map + (constantVal.symbol -> constant.asInstanceOf[Double]))) else if (constant.isInstanceOf[Boolean]) gotoLeaf(state.copy(map = state.map + (constantVal.symbol -> constant.asInstanceOf[Boolean]))) else if (constant.isInstanceOf[String]) gotoLeaf(state.copy(map = state.map + (constantVal.symbol -> constant.asInstanceOf[String]))) else gotoLeaf(state) } case c => gotoLeaf(state) } case anyOther => goto(anyOther.children, state) } case block @ q"{ ..$stats }" => stats match { case stat :: Nil => goto(stat.children, state) case s :: rest => goto(block.children, state) case Nil => gotoLeaf(state) } case anyOther => goto(anyOther.children, state) } override def apply(syntaxTree: Tree): RR = { ASTRule.apply(global)(syntaxTree, List(this)) match { case result :: rest => result match { case b @ BlockConstantPropagationMapper(_) => b case _ => BlockConstantPropagationMapper(Map()) } case _ => BlockConstantPropagationMapper(Map()) } } }
jean-andre-gauthier/scalasca
src/main/scala/lara/epfl/scalasca/rules/BlockConstantPropagation.scala
Scala
bsd-3-clause
4,939
/* Copyright 2010 the original author or authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package net.gumbix.hl7dsl.DSL import org.hl7.rim.{RimObjectFactory, Person} import org.hl7.types._ import net.gumbix.hl7dsl.helper.ImplicitDef._ import org.hl7.rim.impl.PersonImpl /** * Wrapper class for the RIM Class "Person" * @author Ahmet GΓΌl ([email protected]) */ class PersonDSL(person: Person) extends LivingSubjectDSL(person) { /** * @param cloneName Required to navigate through the object graph. */ def this() = { this (RimObjectFactory.getInstance.createRimObject("Person").asInstanceOf[Person]) } /** * @return BAG[AD] */ def addr: BAG[AD] = person.getAddr def addr_=(v: BAG[AD]) { person.setAddr(v) } /** * @return CE */ def maritalStatusCode: CE = person.getMaritalStatusCode def maritalStatusCode_=(v: CE) { person.setMaritalStatusCode(v) } /** * @return CE */ def educationLevelCode: CE = person.getEducationLevelCode def educationLevelCode_=(v: CE) { person.setEducationLevelCode(v) } /** * @return SET[CE] */ def raceCode: SET[CE] = person.getRaceCode def raceCode_=(v: SET[CE]) { person.setRaceCode(v) } /** * @return SET[CE] */ def disabilityCode: SET[CE] = person.getDisabilityCode def disabilityCode_=(v: SET[CE]) { person.setDisabilityCode(v) } /** * @return CE */ def livingArrangementCode: CE = person.getLivingArrangementCode def livingArrangementCode_=(v: CE) { person.setLivingArrangementCode(v) } /** * @return CE */ def religiousAffiliationCode: CE = person.getReligiousAffiliationCode def religiousAffiliationCode_=(v: CE) { person.setReligiousAffiliationCode(v) } /** * @return SET[CE] */ def ethnicGroupCode: SET[CE] = person.getEthnicGroupCode def ethnicGroupCode_=(v: SET[CE]) { person.setEthnicGroupCode(v) } override def clone() = { val clonedPerson = person.asInstanceOf[PersonImpl].clone() val newPerson = new PersonDSL(clonedPerson.asInstanceOf[Person]) newPerson } /** * @return Person */ // def getPerson: Person = entity }
markusgumbel/dshl7
core/src/main/scala/net/gumbix/hl7dsl/DSL/PersonDSL.scala
Scala
apache-2.0
2,687
package com.wavesplatform.state import com.typesafe.config.Config import net.ceedubs.ficus.Ficus._ case class Settings( networkConfigFile: String, aliasesFile: String, restTxsFile: String, blocksFile: String, accountsFile: String, assetsFile: String, dataFile: String ) object Settings { def fromConfig(config: Config): Settings = { import net.ceedubs.ficus.readers.ArbitraryTypeReader._ config.as[Settings]("waves.benchmark.state") } }
wavesplatform/Waves
benchmark/src/main/scala/com/wavesplatform/state/Settings.scala
Scala
mit
480
/* * Scala (https://www.scala-lang.org) * * Copyright EPFL and Lightbend, Inc. * * Licensed under Apache License 2.0 * (http://www.apache.org/licenses/LICENSE-2.0). * * See the NOTICE file distributed with this work for * additional information regarding copyright ownership. */ package scala package sys import scala.collection.mutable /** The internal implementation of scala.sys.Prop. */ private[sys] class PropImpl[+T](val key: String, valueFn: String => T) extends Prop[T] { def value: T = if (isSet) valueFn(get) else zero def isSet = underlying contains key def set(newValue: String): String = { val old = if (isSet) get else null underlying(key) = newValue old } def setValue[T1 >: T](newValue: T1): T = { val old = value if (newValue == null) set(null) else set("" + newValue) old } def get: String = if (isSet) underlying.getOrElse(key, "") else "" def clear(): Unit = underlying -= key def option: Option[T] = if (isSet) Some(value) else None def or[T1 >: T](alt: => T1): T1 = if (isSet) value else alt /** The underlying property map, in our case always sys.props */ protected def underlying: mutable.Map[String, String] = scala.sys.props protected def zero: T = null.asInstanceOf[T] private def getString = if (isSet) "currently: " + get else "unset" override def toString = "%s (%s)".format(key, getString) } private[sys] abstract class CreatorImpl[+T](f: String => T) extends Prop.Creator[T] { def apply(key: String): Prop[T] = new PropImpl[T](key, f) }
scala/scala
src/library/scala/sys/PropImpl.scala
Scala
apache-2.0
1,558
package controllers import play.api.mvc._ import play.api.libs.json._ import play.api.libs.json.Reads._ import play.api.libs.functional.syntax._ import play.api.cache._ import javax.inject.Inject import closeness.core.vertex.Graph import closeness.core.vertex.Edge //import closeness.core.vertex.Node class Application @Inject() (cache: CacheApi) extends Controller { case class EdgePresenter(fromNode: String, toNode: String) def graph: Graph[String] = cache.getOrElse("graph")(new Graph[String]().add("1", "6").add("6", "3").add("1", "2")) implicit val edgePresenterReads: Reads[EdgePresenter] = ( (__ \ "fromNode").read[String](minLength[String](1)) and (__ \ "toNode").read[String](minLength[String](1)))(EdgePresenter.apply _) def showGraph = Action { Ok(Json.toJson(Json.obj( "nodes" -> graph.nodes.map { x => Json.obj("name" -> x.value) }, "edges" -> graph.edges.map { x => Json.obj( "nodeIn" -> x.nodeIn.value, "nodeOut" -> x.nodeOut.value) }))) } def showNodes = Action { Ok(Json.toJson(Json.obj( "nodes" -> graph.nodes.map { x => Json.obj("name" -> x.value) }))) } def showEdges = Action { Ok(Json.toJson(Json.obj( "edges" -> graph.edges.map { x => Json.obj( "fromNode" -> x.nodeIn.value, "toNode" -> x.nodeOut.value) }))) } def showRanking = Action { Ok(Json.toJson(Json.obj( "ranking" -> graph.closenessRanking().map { x => Json.obj( "node" -> x.node.value, "value" -> x.position) }))) } def saveEdges = Action(BodyParsers.parse.json) { request => val placeResult = request.body.validate[EdgePresenter] placeResult.fold( errors => { BadRequest(Json.obj("status" -> "KO", "message" -> JsError.toJson(errors))) }, edge => { cache.set("graph", graph.add(edge.fromNode, edge.toNode)) Ok(Json.obj("status" -> "OK", "message" -> ("Edge '" + edge + "' saved."))) }) } }
restarac/close2center
closeness-web/app/controllers/Application.scala
Scala
gpl-3.0
2,050
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.python import scala.collection.mutable import scala.collection.mutable.ArrayBuffer import org.apache.spark.api.python.PythonEvalType import org.apache.spark.sql.AnalysisException import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.catalyst.rules.Rule /** * Extracts all the Python UDFs in logical aggregate, which depends on aggregate expression or * grouping key, or doesn't depend on any above expressions, evaluate them after aggregate. */ object ExtractPythonUDFFromAggregate extends Rule[LogicalPlan] { /** * Returns whether the expression could only be evaluated within aggregate. */ private def belongAggregate(e: Expression, agg: Aggregate): Boolean = { e.isInstanceOf[AggregateExpression] || PythonUDF.isGroupedAggPandasUDF(e) || agg.groupingExpressions.exists(_.semanticEquals(e)) } private def hasPythonUdfOverAggregate(expr: Expression, agg: Aggregate): Boolean = { expr.find { e => PythonUDF.isScalarPythonUDF(e) && (e.references.isEmpty || e.find(belongAggregate(_, agg)).isDefined) }.isDefined } private def extract(agg: Aggregate): LogicalPlan = { val projList = new ArrayBuffer[NamedExpression]() val aggExpr = new ArrayBuffer[NamedExpression]() agg.aggregateExpressions.foreach { expr => if (hasPythonUdfOverAggregate(expr, agg)) { // Python UDF can only be evaluated after aggregate val newE = expr transformDown { case e: Expression if belongAggregate(e, agg) => val alias = e match { case a: NamedExpression => a case o => Alias(e, "agg")() } aggExpr += alias alias.toAttribute } projList += newE.asInstanceOf[NamedExpression] } else { aggExpr += expr projList += expr.toAttribute } } // There is no Python UDF over aggregate expression Project(projList, agg.copy(aggregateExpressions = aggExpr)) } def apply(plan: LogicalPlan): LogicalPlan = plan transformUp { case agg: Aggregate if agg.aggregateExpressions.exists(hasPythonUdfOverAggregate(_, agg)) => extract(agg) } } /** * Extracts PythonUDFs in logical aggregate, which are used in grouping keys, evaluate them * before aggregate. * This must be executed after `ExtractPythonUDFFromAggregate` rule and before `ExtractPythonUDFs`. */ object ExtractGroupingPythonUDFFromAggregate extends Rule[LogicalPlan] { private def hasScalarPythonUDF(e: Expression): Boolean = { e.find(PythonUDF.isScalarPythonUDF).isDefined } private def extract(agg: Aggregate): LogicalPlan = { val projList = new ArrayBuffer[NamedExpression]() val groupingExpr = new ArrayBuffer[Expression]() val attributeMap = mutable.HashMap[PythonUDF, NamedExpression]() agg.groupingExpressions.foreach { expr => if (hasScalarPythonUDF(expr)) { val newE = expr transformDown { case p: PythonUDF => // This is just a sanity check, the rule PullOutNondeterministic should // already pull out those nondeterministic expressions. assert(p.udfDeterministic, "Non-determinstic PythonUDFs should not appear " + "in grouping expression") val canonicalized = p.canonicalized.asInstanceOf[PythonUDF] if (attributeMap.contains(canonicalized)) { attributeMap(canonicalized) } else { val alias = Alias(p, "groupingPythonUDF")() projList += alias attributeMap += ((canonicalized, alias.toAttribute)) alias.toAttribute } } groupingExpr += newE } else { groupingExpr += expr } } val aggExpr = agg.aggregateExpressions.map { expr => expr.transformUp { // PythonUDF over aggregate was pull out by ExtractPythonUDFFromAggregate. // PythonUDF here should be either // 1. Argument of an aggregate function. // CheckAnalysis guarantees the arguments are deterministic. // 2. PythonUDF in grouping key. Grouping key must be deterministic. // 3. PythonUDF not in grouping key. It is either no arguments or with grouping key // in its arguments. Such PythonUDF was pull out by ExtractPythonUDFFromAggregate, too. case p: PythonUDF if p.udfDeterministic => val canonicalized = p.canonicalized.asInstanceOf[PythonUDF] attributeMap.getOrElse(canonicalized, p) }.asInstanceOf[NamedExpression] } agg.copy( groupingExpressions = groupingExpr, aggregateExpressions = aggExpr, child = Project(projList ++ agg.child.output, agg.child)) } def apply(plan: LogicalPlan): LogicalPlan = plan transformUp { case agg: Aggregate if agg.groupingExpressions.exists(hasScalarPythonUDF(_)) => extract(agg) } } /** * Extracts PythonUDFs from operators, rewriting the query plan so that the UDF can be evaluated * alone in a batch. * * Only extracts the PythonUDFs that could be evaluated in Python (the single child is PythonUDFs * or all the children could be evaluated in JVM). * * This has the limitation that the input to the Python UDF is not allowed include attributes from * multiple child operators. */ object ExtractPythonUDFs extends Rule[LogicalPlan] with PredicateHelper { private type EvalType = Int private type EvalTypeChecker = EvalType => Boolean private def hasScalarPythonUDF(e: Expression): Boolean = { e.find(PythonUDF.isScalarPythonUDF).isDefined } private def canEvaluateInPython(e: PythonUDF): Boolean = { e.children match { // single PythonUDF child could be chained and evaluated in Python case Seq(u: PythonUDF) => e.evalType == u.evalType && canEvaluateInPython(u) // Python UDF can't be evaluated directly in JVM case children => !children.exists(hasScalarPythonUDF) } } private def collectEvaluableUDFsFromExpressions(expressions: Seq[Expression]): Seq[PythonUDF] = { // If fisrt UDF is SQL_SCALAR_PANDAS_ITER_UDF, then only return this UDF, // otherwise check if subsequent UDFs are of the same type as the first UDF. (since we can only // extract UDFs of the same eval type) var firstVisitedScalarUDFEvalType: Option[Int] = None def canChainUDF(evalType: Int): Boolean = { if (evalType == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF) { false } else { evalType == firstVisitedScalarUDFEvalType.get } } def collectEvaluableUDFs(expr: Expression): Seq[PythonUDF] = expr match { case udf: PythonUDF if PythonUDF.isScalarPythonUDF(udf) && canEvaluateInPython(udf) && firstVisitedScalarUDFEvalType.isEmpty => firstVisitedScalarUDFEvalType = Some(udf.evalType) Seq(udf) case udf: PythonUDF if PythonUDF.isScalarPythonUDF(udf) && canEvaluateInPython(udf) && canChainUDF(udf.evalType) => Seq(udf) case e => e.children.flatMap(collectEvaluableUDFs) } expressions.flatMap(collectEvaluableUDFs) } def apply(plan: LogicalPlan): LogicalPlan = plan match { // SPARK-26293: A subquery will be rewritten into join later, and will go through this rule // eventually. Here we skip subquery, as Python UDF only needs to be extracted once. case _: Subquery => plan case _ => plan transformUp { // A safe guard. `ExtractPythonUDFs` only runs once, so we will not hit `BatchEvalPython` and // `ArrowEvalPython` in the input plan. However if we hit them, we must skip them, as we can't // extract Python UDFs from them. case p: BatchEvalPython => p case p: ArrowEvalPython => p case plan: LogicalPlan => extract(plan) } } /** * Extract all the PythonUDFs from the current operator and evaluate them before the operator. */ private def extract(plan: LogicalPlan): LogicalPlan = { val udfs = collectEvaluableUDFsFromExpressions(plan.expressions) // ignore the PythonUDF that come from second/third aggregate, which is not used .filter(udf => udf.references.subsetOf(plan.inputSet)) if (udfs.isEmpty) { // If there aren't any, we are done. plan } else { val attributeMap = mutable.HashMap[PythonUDF, Expression]() // Rewrite the child that has the input required for the UDF val newChildren = plan.children.map { child => // Pick the UDF we are going to evaluate val validUdfs = udfs.filter { udf => // Check to make sure that the UDF can be evaluated with only the input of this child. udf.references.subsetOf(child.outputSet) } if (validUdfs.nonEmpty) { require( validUdfs.forall(PythonUDF.isScalarPythonUDF), "Can only extract scalar vectorized udf or sql batch udf") val resultAttrs = validUdfs.zipWithIndex.map { case (u, i) => AttributeReference(s"pythonUDF$i", u.dataType)() } val evalTypes = validUdfs.map(_.evalType).toSet if (evalTypes.size != 1) { throw new AnalysisException( s"Expected udfs have the same evalType but got different evalTypes: " + s"${evalTypes.mkString(",")}") } val evalType = evalTypes.head val evaluation = evalType match { case PythonEvalType.SQL_BATCHED_UDF => BatchEvalPython(validUdfs, resultAttrs, child) case PythonEvalType.SQL_SCALAR_PANDAS_UDF | PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF => ArrowEvalPython(validUdfs, resultAttrs, child, evalType) case _ => throw new AnalysisException("Unexcepted UDF evalType") } attributeMap ++= validUdfs.zip(resultAttrs) evaluation } else { child } } // Other cases are disallowed as they are ambiguous or would require a cartesian // product. udfs.filterNot(attributeMap.contains).foreach { udf => sys.error(s"Invalid PythonUDF $udf, requires attributes from more than one child.") } val rewritten = plan.withNewChildren(newChildren).transformExpressions { case p: PythonUDF if attributeMap.contains(p) => attributeMap(p) } // extract remaining python UDFs recursively val newPlan = extract(rewritten) if (newPlan.output != plan.output) { // Trim away the new UDF value if it was only used for filtering or something. Project(plan.output, newPlan) } else { newPlan } } } }
pgandhi999/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/python/ExtractPythonUDFs.scala
Scala
apache-2.0
11,616
package com.sksamuel.scapegoat.inspections.string import com.sksamuel.scapegoat.{Inspection, InspectionContext, Inspector, Levels} /** * @author * Stephen Samuel */ class SubstringZero extends Inspection( text = "String.substring(0)", defaultLevel = Levels.Info, description = "Checks for String.substring(0).", explanation = "Use of String.substring(0) will always return the same string." ) { def inspector(context: InspectionContext): Inspector = new Inspector(context) { override def postTyperTraverser: context.Traverser = new context.Traverser { import context.global._ private val Substring = TermName("substring") private val StringType = typeOf[String] override def inspect(tree: Tree): Unit = { tree match { case Apply(Select(lhs, Substring), List(Literal(Constant(0)))) if lhs.tpe <:< StringType => context.warn(tree.pos, self, tree.toString.take(100)) case _ => continue(tree) } } } } }
sksamuel/scapegoat
src/main/scala/com/sksamuel/scapegoat/inspections/string/SubstringZero.scala
Scala
apache-2.0
1,092
package demo.akka.stream import akka.actor.ActorSystem import akka.stream.ActorMaterializer import akka.stream.scaladsl.{Sink, Source, Flow} import scala.concurrent.Future object Demo1 extends App { implicit val system = ActorSystem() import system.dispatcher implicit val mat = ActorMaterializer() val source: Source[Int, Unit] = Source(1 to 10000000) val flow = Flow[Int].map( _ + 1 ).filter(_ % 2 == 0) val sink: Sink[Int, Future[Unit]] = Sink.foreach(println) source.via(flow).runWith(sink).onComplete( _ => system.shutdown) }
MartinSeeler/akka-stream-introduction
src/main/scala/demo/akka/stream/Demo1.scala
Scala
mit
552
package models import java.util.Date import com.mongodb.casbah.Imports._ import org.bson.types.ObjectId import models.mongoContext._ import play.api.Play.current import play.api.libs.json._ import play.api.libs.json.Json._ import libs.json._ import com.novus.salat.annotations.raw.Salat import com.novus.salat.dao.{SalatDAO, ModelCompanion} import se.radley.plugin.salat._ import play.api.libs.json.JsString import play.api.libs.json.JsObject import play.api.libs.json.JsNumber import play.api.mvc.MultipartFormData.FilePart import play.api.libs.Files.TemporaryFile import com.mongodb.casbah.gridfs.{GridFSDBFile, GridFSInputFile} import play.api.Logger import java.io.{ByteArrayOutputStream} @Salat sealed trait ExpenseStatusValues case class Draft() extends ExpenseStatusValues case class Submitted() extends ExpenseStatusValues object ExpenseStatus { lazy val DRAFT:ExpenseStatusValues=Draft() lazy val SUBMITTED:ExpenseStatusValues=Submitted() implicit object ExpenseStatusJson extends Format[ExpenseStatusValues] { def reads(value: JsValue): ExpenseStatusValues = { value.as[String] match { case "Draft()" => Draft() case "Submitted()" => Submitted() } } def writes(status: ExpenseStatusValues) = { toJson(status.toString) } } } import ExpenseStatus._ case class ExpenseReport(id: ObjectId, from: Date, to: Date, userId: ObjectId, _lines: Seq[ExpenseLine],status:Option[ExpenseStatusValues]) { def subtotals:Seq[Expense]={ val expensesByQualifier: Map[String, Seq[Expense]] = lines.map(_.expense).groupBy(_.qualifier) val subtotalsByQualifier=expensesByQualifier.map({ case (qualifier, expenses) => (qualifier, expenses.map(_.amount).foldLeft(0.0)(_ + _)) }) subtotalsByQualifier.map(Expense.tupleToExpense).toSeq } lazy val lines = _lines def addLine(valueDate: Date, account: String, description: String, expense: Expense, evidences:Seq[ObjectId]) = { lazy val newParent: ExpenseReport = ExpenseReport(this.id, this.from, this.to, this.userId, line +: this.lines, this.status) lazy val line: ExpenseLine = ExpenseLine(valueDate, account, description, expense,evidences) newParent } def total = { lines.map(l => l.expense.amount).sum } def save() { ExpenseReport.save(this) } } case class ExpenseLine(valueDate: Date, account: String, description: String, expense: Expense, evidences:Seq[ObjectId]) { } object Evidence { import libs.mongo._ def save(part: FilePart[TemporaryFile]): Option[ObjectId] = { val newFile: GridFSInputFile = gridFS("default").createFile(part.ref.file) newFile.filename=part.filename part.contentType.map(contentType => newFile.contentType = contentType) newFile.save() newFile._id } def findById(id:ObjectId): FilePart[Array[Byte]] ={ val file: GridFSDBFile = gridFS("default").find(id) Logger.info(file.filename.toString +" "+file.size) val byteStream = new ByteArrayOutputStream() file.writeTo(byteStream) FilePart(file.filename.getOrElse("file"), file.filename.getOrElse(""),file.contentType,byteStream.toByteArray) } } object ExpenseLine { implicit object ExpenseLineFormat extends Format[ExpenseLine] { def reads(value: JsValue): ExpenseLine = { ExpenseLine( (value \\ "valueDate").as[Date], (value \\ "account").as[String], (value \\ "description").as[String], ((value \\ "expenseType").as[String], (value \\ "expense").as[Double]), (value \\ "evidences").as[Seq[String]].map({new ObjectId(_)}) ) } def writes(line: ExpenseLine) = { JsObject( Seq( "valueDate" -> toJson(line.valueDate), "account" -> JsString(line.account), "description" -> JsString(line.description), "expense" -> JsNumber(line.expense.amount), "expenseType" -> JsString(line.expense.qualifier), "evidences" -> JsArray(line.evidences.map(x => JsString(x.toString()))) ) ) } } } object ExpenseReport extends ModelCompanion[ExpenseReport, ObjectId] { def dao={ new SalatDAO[ExpenseReport,ObjectId](collection = mongoCollection("expenses")) {} } def findAllByUserId(userId: ObjectId): List[ExpenseReport] = { find(MongoDBObject("userId"->userId)).sort(MongoDBObject("from" -> -1, "to"-> -1)).toList } def findByIdAndUserID(id: ObjectId, userId: ObjectId): Option[ExpenseReport] = { findOne(MongoDBObject("userId" -> userId,"_id" -> id)) } def findById(id: ObjectId): Option[ExpenseReport] = { findOneById(id) } } object ExpenseFormat { implicit object ExpenseReportWrites extends Writes[ExpenseReport] { def writes(report: ExpenseReport) = { toJson( Map( "id" -> toJson(report.id.toString), "userId" -> toJson(report.userId.toString), "startDate" -> toJson(report.from), "endDate" -> toJson(report.to), "total" -> toJson(report.total), "lines" -> toJson(report.lines) , "status" -> toJson(report.status) ) ) } } implicit object ExpenseReportReads extends Reads[User => ExpenseReport] { def reads(json: JsValue) = { user => val expenseReportId: ObjectId = (json \\ "id").asOpt[String].map(new ObjectId(_)).getOrElse(new ObjectId()) ExpenseReport( expenseReportId, (json \\ "startDate").as[Date], (json \\ "endDate").as[Date], user.id, (json \\ "lines").as[Seq[ExpenseLine]], (json \\ "status").asOpt[ExpenseStatusValues].orElse(Some(ExpenseStatus.DRAFT)) ) } } } object Expense { implicit def tupleToExpense(tuple: (String, Double)): Expense = { val (qualifier, amount) = tuple qualifier match { case "Lodging" => Lodging(amount) case "Transportation" => Transportation(amount) case "Gas" => Gas(amount) case "Meal" => Meal(amount) case "Phone" => Phone(amount) case "Internet" => Internet(amount) case "Other" => Other(amount) } } } @Salat sealed trait Expense { val amount: Double val qualifier: String } case class Lodging(amount: Double) extends Expense { val qualifier = "Lodging" } case class Transportation(amount: Double) extends Expense { val qualifier = "Transportation" } case class Gas(amount: Double) extends Expense { val qualifier = "Gas" } case class Meal(amount: Double) extends Expense { val qualifier = "Meal" } case class Phone(amount: Double) extends Expense { val qualifier = "Phone" } case class Internet(amount: Double) extends Expense { val qualifier = "Internet" } case class Other(amount: Double) extends Expense { val qualifier = "Other" }
xebia-france/xndf
app/models/domain.scala
Scala
mit
6,811
package com.esri import com.vividsolutions.jts.geom._ /** */ object GeomFact extends Serializable { val geomFact = new GeometryFactory(new PrecisionModel(1000000.0)) def createPoint(x: Double, y: Double) = { geomFact.createPoint(new Coordinate(x, y)) } def createMultiPolygons(polygons: Array[Polygon]): Geometry = { geomFact.createMultiPolygon(polygons) } def createPolygon(shell: LinearRing, holes: Array[LinearRing]): Polygon = { geomFact.createPolygon(shell, holes) } }
mraad/spark-pip
src/main/scala/com/esri/GeomFact.scala
Scala
apache-2.0
508
package org.helianto.message.service import org.helianto.message.config.{MailerProperties, SmsProperties} import org.helianto.message.domain.{Message, MessageLog, TotalVoiceSmsMessage} import org.helianto.message.repository.MessageLogRepository import org.slf4j.{Logger, LoggerFactory} import org.springframework.beans.factory.annotation.Qualifier import org.springframework.http.{HttpEntity, HttpHeaders, HttpMethod, MediaType} import org.springframework.stereotype.Service import org.springframework.web.client.RestTemplate import scala.util.{Failure, Success, Try} @Service @Qualifier("localSMS") class TotalVoiceService (val restTemplate: RestTemplate , val smsProperties: SmsProperties , val messageLogRepository: MessageLogRepository) { private[service] val logger: Logger = LoggerFactory.getLogger(classOf[TotalVoiceService]) private[service] def createEntity(message: TotalVoiceSmsMessage):HttpEntity[TotalVoiceSmsMessage] = { val headers: HttpHeaders = new HttpHeaders headers.setContentType(MediaType.APPLICATION_JSON) headers.set("Access-Token", smsProperties.accessToken) new HttpEntity[TotalVoiceSmsMessage](message, headers) } // TODO message log def send(message: TotalVoiceSmsMessage) = { logger.debug("Sending {}", message) logger.info("Props {}", smsProperties) val uri = smsProperties.serviceUri logger.debug("To {}", uri) Try(restTemplate.exchange(uri, HttpMethod.POST, createEntity(message), classOf[String])) match { case Success(responseEntity) => val body = new String(responseEntity.getBody.getBytes("ISO-8859-1"),"UTF-8") //responseEntity.getBody logger.debug("Response body {}", body) // messageLogRepository.saveAndFlush(new MessageLog(message, body, responseEntity.getStatusCode.value)) message case Failure(e) => logger.error("Unable to send message.", e) throw new IllegalArgumentException } } }
iservport/helianto-spring
src/main/scala/org/helianto/message/service/TotalVoiceService.scala
Scala
apache-2.0
1,947
package demy.mllib.feature import org.apache.spark.ml.Transformer import org.apache.spark.ml.param.{Param, ParamMap} import org.apache.spark.ml.util.{Identifiable, DefaultParamsWritable, DefaultParamsReadable} import org.apache.spark.ml.linalg.SparseVector import org.apache.spark.sql.{Dataset, DataFrame} import org.apache.spark.sql.types._ import org.apache.spark.sql.functions.{udf, col} import scala.util.hashing.MurmurHash3 import org.apache.spark.ml.attribute.AttributeGroup class ArrayHasher(override val uid: String) extends Transformer with DefaultParamsWritable { final val inputCol = new Param[String](this, "inputCol", "The input column") final val outputCol = new Param[String](this, "outputCol", "The new column column") final val numFeatures = new Param[Int](this, "numFeatures", "The number of components on outut vectors") def setInputCol(value: String): this.type = set(inputCol, value) def setOutputCol(value: String): this.type = set(outputCol, value) def setNumFeatures(value: Int): this.type = set(numFeatures, value) override def transform(dataset: Dataset[_]): DataFrame = { dataset.withColumn(get(outputCol).get, udf((words:Seq[String])=>{ val size = get(numFeatures).get val pairs = words.map(w => (MurmurHash3.stringHash(w, MurmurHash3.stringSeed) % size) match {case x => if(x < 0) -x else x}) .groupBy(id => id) .map(p => p match {case (id, ids) => (id, ids.size)}) .toArray.sortWith((p1, p2) => (p1, p2) match {case ((id1, count1),(id2, count2)) => id1 < id2}) new SparseVector(size=size, indices= pairs.map(p => p._1), values= pairs.map(p => p._2.toDouble)) }).apply(col(get(inputCol).get))) } override def transformSchema(schema: StructType): StructType = {schema.add(new AttributeGroup(name=get(outputCol).get).toStructField)} def copy(extra: ParamMap): ArrayHasher = { defaultCopy(extra) } def this() = this(Identifiable.randomUID("ArrayHasher")) } object ArrayHasher extends DefaultParamsReadable[ArrayHasher]{ }
forchard-epi/demy
mllib/src/main/scala/feature/ArrayHasher.scala
Scala
bsd-3-clause
2,079
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.openwhisk.core.database.test.behavior import java.time.Instant import akka.stream.ActorMaterializer import common.{StreamLogging, WskActorSystem} import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures} import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, FlatSpec, Matchers} import spray.json.{JsObject, JsValue} import org.apache.openwhisk.common.{TransactionId, WhiskInstants} import org.apache.openwhisk.core.database.memory.MemoryAttachmentStore import org.apache.openwhisk.core.database.test.DbUtils import org.apache.openwhisk.core.database.test.behavior.ArtifactStoreTestUtil.storeAvailable import org.apache.openwhisk.core.database.{ArtifactStore, AttachmentStore, StaleParameter} import org.apache.openwhisk.core.entity._ import org.apache.openwhisk.core.entity.size._ import org.apache.openwhisk.utils.JsHelpers import scala.util.{Random, Try} trait ArtifactStoreBehaviorBase extends FlatSpec with ScalaFutures with Matchers with StreamLogging with DbUtils with WskActorSystem with IntegrationPatience with BeforeAndAfterEach with BeforeAndAfterAll with WhiskInstants { //Bring in sync the timeout used by ScalaFutures and DBUtils implicit override val patienceConfig: PatienceConfig = PatienceConfig(timeout = dbOpTimeout) protected implicit val materializer: ActorMaterializer = ActorMaterializer() protected val prefix = s"artifactTCK_${Random.alphanumeric.take(4).mkString}" def authStore: ArtifactStore[WhiskAuth] def entityStore: ArtifactStore[WhiskEntity] def activationStore: ArtifactStore[WhiskActivation] def storeType: String override def afterEach(): Unit = { cleanup() stream.reset() } override def afterAll(): Unit = { assertAttachmentStoreIsEmpty() println("Shutting down store connections") authStore.shutdown() entityStore.shutdown() activationStore.shutdown() super.afterAll() assertAttachmentStoresAreClosed() } override protected def withFixture(test: NoArgTest) = { assume(storeAvailable(storeAvailableCheck), s"$storeType not configured or available") val outcome = super.withFixture(test) if (outcome.isFailed) { println(logLines.mkString("\n")) } outcome } protected def storeAvailableCheck: Try[Any] = Try(true) //~----------------------------------------< utility methods > protected def query[A <: WhiskEntity]( db: ArtifactStore[A], table: String, startKey: List[Any], endKey: List[Any], skip: Int = 0, limit: Int = 0, includeDocs: Boolean = false, descending: Boolean = true, reduce: Boolean = false, stale: StaleParameter = StaleParameter.No)(implicit transid: TransactionId): List[JsObject] = { db.query(table, startKey, endKey, skip, limit, includeDocs, descending, reduce, stale).futureValue } protected def count[A <: WhiskEntity]( db: ArtifactStore[A], table: String, startKey: List[Any], endKey: List[Any], skip: Int = 0, stale: StaleParameter = StaleParameter.No)(implicit transid: TransactionId): Long = { db.count(table, startKey, endKey, skip, stale).futureValue } protected def getWhiskAuth(doc: DocInfo)(implicit transid: TransactionId) = { authStore.get[WhiskAuth](doc).futureValue } protected def newAuth() = { val subject = Subject() val namespaces = Set(wskNS("foo")) WhiskAuth(subject, namespaces) } protected def wskNS(name: String) = { val uuid = UUID() WhiskNamespace(Namespace(EntityName(name), uuid), BasicAuthenticationAuthKey(uuid, Secret())) } private val exec = BlackBoxExec(ExecManifest.ImageName("image"), None, None, native = false, binary = false) protected def newAction(ns: EntityPath): WhiskAction = { WhiskAction(ns, aname(), exec) } protected def newActivation(ns: String, actionName: String, start: Long): WhiskActivation = { WhiskActivation( EntityPath(ns), EntityName(actionName), Subject(), ActivationId.generate(), Instant.ofEpochMilli(start), Instant.ofEpochMilli(start + 1000)) } protected def aname() = EntityName(s"${prefix}_name_${randomString()}") protected def newNS() = EntityPath(s"${prefix}_ns_${randomString()}") private def randomString() = Random.alphanumeric.take(5).mkString protected def getJsObject(js: JsObject, fields: String*): JsObject = { JsHelpers.getFieldPath(js, fields: _*).get.asJsObject } protected def getJsField(js: JsObject, subObject: String, fieldName: String): JsValue = { js.fields(subObject).asJsObject().fields(fieldName) } protected def getAttachmentStore(store: ArtifactStore[_]): Option[AttachmentStore] protected def getAttachmentCount(store: AttachmentStore): Option[Int] = store match { case s: MemoryAttachmentStore => Some(s.attachmentCount) case _ => None } protected def getAttachmentSizeForTest(store: ArtifactStore[_]): Int = { val mb = getAttachmentStore(store).map(_ => 5.MB).getOrElse(maxAttachmentSizeWithoutAttachmentStore) mb.toBytes.toInt } protected def maxAttachmentSizeWithoutAttachmentStore: ByteSize = 5.MB private def assertAttachmentStoreIsEmpty(): Unit = { Seq(authStore, entityStore, activationStore).foreach { s => for { as <- getAttachmentStore(s) count <- getAttachmentCount(as) } require(count == 0, s"AttachmentStore not empty after all runs - $count") } } private def assertAttachmentStoresAreClosed(): Unit = { Seq(authStore, entityStore, activationStore).foreach { s => getAttachmentStore(s).foreach { case s: MemoryAttachmentStore => require(s.isClosed, "AttachmentStore was not closed") case _ => } } } }
jasonpet/openwhisk
tests/src/test/scala/org/apache/openwhisk/core/database/test/behavior/ArtifactStoreBehaviorBase.scala
Scala
apache-2.0
6,615
package org.loklak.data import akka.actor._ /** * Created by Scott on 6/4/16. */ case object ClearESCount class DataAccessActor extends Actor with ActorLogging{ var count = 0 def receive = { case (id:String,json:String) => DAO.store(id,json); count = count + 1 case ClearESCount => count = 0 } }
DengYiping/loklak-scala
src/main/scala/org/loklak/data/DataAccessActor.scala
Scala
mit
316
package org.apache.spark.core.utils import com.typesafe.config.Config import org.apache.spark.SparkConf import scala.collection.JavaConverters._ object ContextUtils { def configToSparkConf(config:Config, contextName:String): SparkConf ={ val sparkConf = new SparkConf() .setAppName(contextName) .setJars(config.getStringList("context.jars").asScala) for(x <- config.entrySet().asScala if x.getKey.startsWith("spark.")) { sparkConf.set(x.getKey, x.getValue.unwrapped().toString) } sparkConf } }
linzhe/matrix
src/main/scala/org/apache/spark/core/utils/ContextUtils.scala
Scala
apache-2.0
537
package org.elasticsearch.spark.serialization import scala.collection.Map import scala.collection.immutable.Nil import org.elasticsearch.hadoop.serialization.Generator import org.elasticsearch.hadoop.serialization.builder.JdkValueWriter import org.elasticsearch.hadoop.serialization.builder.ValueWriter.Result import org.elasticsearch.spark.serialization.{ ReflectionUtils => RU } import org.elasticsearch.spark.rdd.CompatUtils import org.elasticsearch.hadoop.EsHadoopIllegalArgumentException class ScalaValueWriter(writeUnknownTypes: Boolean = false) extends JdkValueWriter(writeUnknownTypes) { def this() { this(false) } override def write(value: AnyRef, generator: Generator): Result = { doWrite(value, generator, true) } private def doWrite(value: Any, generator: Generator, acceptsJavaBeans: Boolean): Result = { value match { case null | None | Unit => generator.writeNull() case Nil => generator.writeBeginArray(); generator.writeEndArray() case Some(s: AnyRef) => return doWrite(s, generator, false) case m: Map[_, _] => { generator.writeBeginObject() for ((k, v) <- m) { if (shouldKeep(generator.getParentPath(), k.toString())) { generator.writeFieldName(k.toString()) val result = doWrite(v, generator, false) if (!result.isSuccesful()) { return result } } } generator.writeEndObject() } case i: Traversable[_] => { generator.writeBeginArray() for (v <- i) { val result = doWrite(v, generator, false) if (!result.isSuccesful()) { return result } } generator.writeEndArray() } case i: Array[_] => { generator.writeBeginArray() for (v <- i) { val result = doWrite(v, generator, false) if (!result.isSuccesful()) { return result } } generator.writeEndArray() } case p: Product => { // handle case class if (RU.isCaseClass(p)) { val result = doWrite(RU.caseClassValues(p), generator, false) if (!result.isSuccesful()) { return result } } // normal product - treat it as a list/array else { generator.writeBeginArray() for (t <- p.productIterator) { val result = doWrite(t.asInstanceOf[AnyRef], generator, false) if (!result.isSuccesful()) { return result } } generator.writeEndArray() } } case _ => { // check if it's called by accident on a DataFrame/SchemaRDD (happens) if (value.getClass().getName().startsWith("org.apache.spark.sql.")) { throw new EsHadoopIllegalArgumentException("Spark SQL types are not handled through basic RDD saveToEs() calls; typically this is a mistake(as the SQL schema will be ignored). Use 'org.elasticsearch.spark.sql' package instead") } // normal JDK types failed, try the JavaBean last val result = super.write(value, generator) if (!result.isSuccesful()) { if (acceptsJavaBeans && RU.isJavaBean(value)) { return doWrite(RU.javaBeanAsMap(value), generator, false) } else return result } } } Result.SUCCESFUL() } }
girirajsharma/elasticsearch-hadoop
spark/core/main/scala/org/elasticsearch/spark/serialization/ScalaValueWriter.scala
Scala
apache-2.0
3,431
package example import cucumber.api.scala.{EN, ScalaDsl} import org.scalatest.Matchers import org.scalatest.concurrent.{Eventually, ScalaFutures} object ServiceSteps { lazy val defaultStartedService = { CalculatorServer.start(8080) } } class ServiceSteps extends ScalaDsl with EN with Matchers with ScalaFutures with Eventually { var lastResult = Int.MinValue var client: CalculatorClient = null /** * This assumes a running service mapped against the host machine at the given location */ Given("""^a calculator client against (.+)$""") { hostPort: String => client = CalculatorClient(hostPort) // prove connectivity eagerly within this step client.add(0, 0) shouldBe 0 } Given("""^a remote request to add (.+) and (.+)$""") { (lhs: Int, rhs: Int) => lastResult = client.add(lhs, rhs) } Given("""^a remote request to subtract (.+) from (.+)$""") { (rhs: Int, lhs: Int) => lastResult = client.subtract(lhs, rhs) } Then("""^The response should be ([-0-9]+)$""") { (expected: Int) => lastResult shouldBe expected } }
Tapad/sbt-docker-compose
examples/basic-with-tests-cucumber/src/test/scala/ServiceSteps.scala
Scala
bsd-3-clause
1,085
package com.sksamuel.elastic4s.admin import com.sksamuel.elastic4s.{Indexes, IndexesAndTypes} trait IndexAdminApi { def refreshIndex(first: String, rest: String*): RefreshIndexDefinition = refreshIndex(first +: rest) def refreshIndex(indexes: Iterable[String]): RefreshIndexDefinition = refreshIndex(Indexes(indexes)) def refreshIndex(indexes: Indexes): RefreshIndexDefinition = RefreshIndexDefinition(indexes.values) def indexStats(indexes: Indexes): IndicesStatsDefinition = IndicesStatsDefinition(indexes) def indexStats(first: String, rest: String*): IndicesStatsDefinition = indexStats(first +: rest) def typesExist(indexesAndTypes: IndexesAndTypes) = TypesExistsDefinition(indexesAndTypes.indexes, indexesAndTypes.types) def typesExist(types: String*): TypesExistExpectsIn = typesExist(types) def typesExist(types: Iterable[String]): TypesExistExpectsIn = new TypesExistExpectsIn(types) class TypesExistExpectsIn(types: Iterable[String]) { def in(indexes: String*): TypesExistsDefinition = TypesExistsDefinition(indexes, types.toSeq) } def closeIndex(first: String, rest: String*): CloseIndexDefinition = CloseIndexDefinition(first +: rest) def openIndex(first: String, rest: String*): OpenIndexDefinition = OpenIndexDefinition(first +: rest) def getSegments(indexes: Indexes): GetSegmentsDefinition = GetSegmentsDefinition(indexes) def getSegments(first: String, rest: String*): GetSegmentsDefinition = getSegments(first +: rest) def flushIndex(indexes: Iterable[String]): FlushIndexDefinition = FlushIndexDefinition(indexes.toSeq) def flushIndex(indexes: String*): FlushIndexDefinition = flushIndex(indexes) def indexExists(index: String): IndexExistsDefinition = IndexExistsDefinition(index) def aliasExists(alias: String): AliasExistsDefinition = AliasExistsDefinition(alias) def clearCache(first: String, rest: String*): ClearCacheDefinition = clearCache(first +: rest) def clearCache(indexes: Iterable[String]): ClearCacheDefinition = ClearCacheDefinition(indexes.toSeq) def clearIndex(first: String, rest: String*): ClearCacheDefinition = clearIndex(first +: rest) def clearIndex(indexes: Iterable[String]): ClearCacheDefinition = ClearCacheDefinition(indexes.toSeq) def rollover(alias: String): RolloverDefinition = RolloverDefinition(alias) def shrink(source: String, target: String): ShrinkDefinition = ShrinkDefinition(source, target) def updateIndexLevelSettings(first: String, rest: String*): UpdateIndexLevelSettingsDefinition = updateIndexLevelSettings(first +: rest) def updateIndexLevelSettings(indexes: Iterable[String]): UpdateIndexLevelSettingsDefinition = updateIndexLevelSettings(Indexes(indexes)) def updateIndexLevelSettings(indexes: Indexes): UpdateIndexLevelSettingsDefinition = UpdateIndexLevelSettingsDefinition(indexes.values) def indexShardStores(first: String, rest: String*): IndexShardStoreDefinition = indexShardStores(first +: rest) def indexShardStores(indexes: Iterable[String]): IndexShardStoreDefinition = indexShardStores(Indexes(indexes)) def indexShardStores(indexes: Indexes): IndexShardStoreDefinition = IndexShardStoreDefinition(indexes) }
tyth/elastic4s
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/admin/IndexAdminApi.scala
Scala
apache-2.0
3,171
package manu.tron.service.impl import manu.tron.service.MinimaxService import manu.tron.common.Vocabulary._ /** * Created by manu on 2/6/14. */ trait MinimaxServiceComponent { val minimaxService: MinimaxService class MinimaxServiceImpl extends MinimaxService { override def minimax(status: GameStatus, childsFinder: (GameStatus => Map[Direction, GameStatus]), heuristic: ((GameStatus, PlayerId) => Int), depth: Int): Direction = { val pointOfView = status.nextPlayerToPlay match { case None => throw new IllegalArgumentException( "Couldn't decide a move, no player was expected to play" ) case Some(p) => p } sealed trait Res{ val rate: Int } case class ResWithMove(rate: Int, move: Direction) extends Res case class ResWithoutMove(rate: Int) extends Res def recurse(status: GameStatus, depth: Int): Res = if(depth == 0) ResWithoutMove(heuristic(status, pointOfView)) else { val childsMap = childsFinder(status) if(childsMap.isEmpty) ResWithoutMove(heuristic(status, pointOfView)) else { val childsAndRates = childsMap.map { case (move, child) => (move, recurse(child, depth - 1).rate) } val pick = if(status.nextPlayerToPlay.head == pointOfView) childsAndRates.maxBy(_._2) else childsAndRates.minBy(_._2) ResWithMove(pick._2, pick._1) } } recurse(status, depth) match { case ResWithMove(_, move) => move case ResWithoutMove(_) => throw new IllegalArgumentException( "Couldn't decide a move, you either provided a depth of 0 or gave a status where the game was already over without any child nodes" ) } } } }
implicitdef/tron
src/main/scala/manu/tron/service/impl/MinimaxServiceComponent.scala
Scala
mit
1,964
package scapi.sigma.damgardjurik.product import java.math.BigInteger import akka.actor.{Actor, ActorSystem, Props} import edu.biu.scapi.midLayer.asymmetricCrypto.encryption.{DJKeyGenParameterSpec, ScDamgardJurikEnc} import edu.biu.scapi.midLayer.asymmetricCrypto.keys.{DamgardJurikPrivateKey, DamgardJurikPublicKey} import edu.biu.scapi.midLayer.ciphertext.BigIntegerCiphertext import edu.biu.scapi.midLayer.plaintext.BigIntegerPlainText import scapi.sigma.rework.SigmaProtocolFunctions.StartInteraction import scapi.sigma.damgardjurik.product.Prover.SendFirstMessage class Dealer extends Actor { lazy val protocolParams = ProtocolParams(soundness = 40, lengthParameter = 1) lazy val commonInput = CommonInput(pubKey, c1, c2, c3) lazy val proverInput = ProverInput(privKey, x1, x2) val b1 = new BigInteger("1000") val b2 = new BigInteger("2000") val b3 = new BigInteger("2000000") val x1 = new BigIntegerPlainText(b1) val x2 = new BigIntegerPlainText(b2) val x3 = new BigIntegerPlainText(b3) val djEncScheme = new ScDamgardJurikEnc() val keyPair = djEncScheme.generateKey(new DJKeyGenParameterSpec()) djEncScheme.setKey(keyPair.getPublic, keyPair.getPrivate) val pubKey = keyPair.getPublic.asInstanceOf[DamgardJurikPublicKey] val privKey = keyPair.getPrivate.asInstanceOf[DamgardJurikPrivateKey] val c1 = djEncScheme.encrypt(x1).asInstanceOf[BigIntegerCiphertext] val c2 = djEncScheme.encrypt(x2).asInstanceOf[BigIntegerCiphertext] val c3 = djEncScheme.encrypt(x3).asInstanceOf[BigIntegerCiphertext] val verifier = context.actorOf(Props(classOf[Verifier], protocolParams, commonInput)) val prover = context.actorOf(Props(classOf[Prover], protocolParams, commonInput, proverInput, verifier)) override def receive: Receive = { case StartInteraction => prover ! SendFirstMessage } } object Launcher extends App { val actorSystem = ActorSystem() val dealer = actorSystem.actorOf(Props[Dealer]) dealer ! StartInteraction }
kushti/scala-scapi
src/main/scala/scapi/sigma/damgardjurik/product/Dealer.scala
Scala
cc0-1.0
1,983
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.execution.columnar import org.apache.commons.lang3.StringUtils import org.apache.spark.{SparkEnv, TaskContext} import org.apache.spark.network.util.JavaUtils import org.apache.spark.rdd.RDD import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.analysis.MultiInstanceRelation import org.apache.spark.sql.catalyst.expressions._ import org.apache.spark.sql.catalyst.plans.{logical, QueryPlan} import org.apache.spark.sql.catalyst.plans.logical.{ColumnStat, LogicalPlan, Statistics} import org.apache.spark.sql.catalyst.util.truncatedString import org.apache.spark.sql.columnar.{CachedBatch, CachedBatchSerializer, SimpleMetricsCachedBatch, SimpleMetricsCachedBatchSerializer} import org.apache.spark.sql.execution.{ColumnarToRowTransition, InputAdapter, QueryExecution, SparkPlan, WholeStageCodegenExec} import org.apache.spark.sql.execution.vectorized.{OffHeapColumnVector, OnHeapColumnVector, WritableColumnVector} import org.apache.spark.sql.internal.{SQLConf, StaticSQLConf} import org.apache.spark.sql.types.{BooleanType, ByteType, DoubleType, FloatType, IntegerType, LongType, ShortType, StructType, UserDefinedType} import org.apache.spark.sql.vectorized.{ColumnarBatch, ColumnVector} import org.apache.spark.storage.{RDDBlockId, StorageLevel} import org.apache.spark.util.{LongAccumulator, Utils} /** * The default implementation of CachedBatch. * * @param numRows The total number of rows in this batch * @param buffers The buffers for serialized columns * @param stats The stat of columns */ case class DefaultCachedBatch(numRows: Int, buffers: Array[Array[Byte]], stats: InternalRow) extends SimpleMetricsCachedBatch /** * The default implementation of CachedBatchSerializer. */ class DefaultCachedBatchSerializer extends SimpleMetricsCachedBatchSerializer { override def supportsColumnarInput(schema: Seq[Attribute]): Boolean = false override def convertColumnarBatchToCachedBatch( input: RDD[ColumnarBatch], schema: Seq[Attribute], storageLevel: StorageLevel, conf: SQLConf): RDD[CachedBatch] = throw new IllegalStateException("Columnar input is not supported") override def convertInternalRowToCachedBatch( input: RDD[InternalRow], schema: Seq[Attribute], storageLevel: StorageLevel, conf: SQLConf): RDD[CachedBatch] = { val batchSize = conf.columnBatchSize val useCompression = conf.useCompression convertForCacheInternal(input, schema, batchSize, useCompression) } def convertForCacheInternal( input: RDD[InternalRow], output: Seq[Attribute], batchSize: Int, useCompression: Boolean): RDD[CachedBatch] = { input.mapPartitionsInternal { rowIterator => new Iterator[DefaultCachedBatch] { def next(): DefaultCachedBatch = { val columnBuilders = output.map { attribute => ColumnBuilder(attribute.dataType, batchSize, attribute.name, useCompression) }.toArray var rowCount = 0 var totalSize = 0L while (rowIterator.hasNext && rowCount < batchSize && totalSize < ColumnBuilder.MAX_BATCH_SIZE_IN_BYTE) { val row = rowIterator.next() // Added for SPARK-6082. This assertion can be useful for scenarios when something // like Hive TRANSFORM is used. The external data generation script used in TRANSFORM // may result malformed rows, causing ArrayIndexOutOfBoundsException, which is somewhat // hard to decipher. assert( row.numFields == columnBuilders.length, s"Row column number mismatch, expected ${output.size} columns, " + s"but got ${row.numFields}." + s"\\nRow content: $row") var i = 0 totalSize = 0 while (i < row.numFields) { columnBuilders(i).appendFrom(row, i) totalSize += columnBuilders(i).columnStats.sizeInBytes i += 1 } rowCount += 1 } val stats = InternalRow.fromSeq( columnBuilders.flatMap(_.columnStats.collectedStatistics).toSeq) DefaultCachedBatch(rowCount, columnBuilders.map { builder => JavaUtils.bufferToArray(builder.build()) }, stats) } def hasNext: Boolean = rowIterator.hasNext } } } override def supportsColumnarOutput(schema: StructType): Boolean = schema.fields.forall(f => f.dataType match { // More types can be supported, but this is to match the original implementation that // only supported primitive types "for ease of review" case BooleanType | ByteType | ShortType | IntegerType | LongType | FloatType | DoubleType => true case _ => false }) override def vectorTypes(attributes: Seq[Attribute], conf: SQLConf): Option[Seq[String]] = Option(Seq.fill(attributes.length)( if (!conf.offHeapColumnVectorEnabled) { classOf[OnHeapColumnVector].getName } else { classOf[OffHeapColumnVector].getName } )) override def convertCachedBatchToColumnarBatch( input: RDD[CachedBatch], cacheAttributes: Seq[Attribute], selectedAttributes: Seq[Attribute], conf: SQLConf): RDD[ColumnarBatch] = { val offHeapColumnVectorEnabled = conf.offHeapColumnVectorEnabled val outputSchema = StructType.fromAttributes(selectedAttributes) val columnIndices = selectedAttributes.map(a => cacheAttributes.map(o => o.exprId).indexOf(a.exprId)).toArray def createAndDecompressColumn(cb: CachedBatch): ColumnarBatch = { val cachedColumnarBatch = cb.asInstanceOf[DefaultCachedBatch] val rowCount = cachedColumnarBatch.numRows val taskContext = Option(TaskContext.get()) val columnVectors = if (!offHeapColumnVectorEnabled || taskContext.isEmpty) { OnHeapColumnVector.allocateColumns(rowCount, outputSchema) } else { OffHeapColumnVector.allocateColumns(rowCount, outputSchema) } val columnarBatch = new ColumnarBatch(columnVectors.asInstanceOf[Array[ColumnVector]]) columnarBatch.setNumRows(rowCount) for (i <- selectedAttributes.indices) { ColumnAccessor.decompress( cachedColumnarBatch.buffers(columnIndices(i)), columnarBatch.column(i).asInstanceOf[WritableColumnVector], outputSchema.fields(i).dataType, rowCount) } taskContext.foreach(_.addTaskCompletionListener[Unit](_ => columnarBatch.close())) columnarBatch } input.map(createAndDecompressColumn) } override def convertCachedBatchToInternalRow( input: RDD[CachedBatch], cacheAttributes: Seq[Attribute], selectedAttributes: Seq[Attribute], conf: SQLConf): RDD[InternalRow] = { // Find the ordinals and data types of the requested columns. val (requestedColumnIndices, requestedColumnDataTypes) = selectedAttributes.map { a => cacheAttributes.map(_.exprId).indexOf(a.exprId) -> a.dataType }.unzip val columnTypes = requestedColumnDataTypes.map { case udt: UserDefinedType[_] => udt.sqlType case other => other }.toArray input.mapPartitionsInternal { cachedBatchIterator => val columnarIterator = GenerateColumnAccessor.generate(columnTypes) columnarIterator.initialize(cachedBatchIterator.asInstanceOf[Iterator[DefaultCachedBatch]], columnTypes, requestedColumnIndices.toArray) columnarIterator } } } private[sql] case class CachedRDDBuilder( serializer: CachedBatchSerializer, storageLevel: StorageLevel, @transient cachedPlan: SparkPlan, tableName: Option[String]) { @transient @volatile private var _cachedColumnBuffers: RDD[CachedBatch] = null @transient @volatile private var _cachedColumnBuffersAreLoaded: Boolean = false val sizeInBytesStats: LongAccumulator = cachedPlan.session.sparkContext.longAccumulator val rowCountStats: LongAccumulator = cachedPlan.session.sparkContext.longAccumulator val cachedName = tableName.map(n => s"In-memory table $n") .getOrElse(StringUtils.abbreviate(cachedPlan.toString, 1024)) def cachedColumnBuffers: RDD[CachedBatch] = { if (_cachedColumnBuffers == null) { synchronized { if (_cachedColumnBuffers == null) { _cachedColumnBuffers = buildBuffers() } } } _cachedColumnBuffers } def clearCache(blocking: Boolean = false): Unit = { if (_cachedColumnBuffers != null) { synchronized { if (_cachedColumnBuffers != null) { _cachedColumnBuffers.unpersist(blocking) _cachedColumnBuffers = null } } } } def isCachedColumnBuffersLoaded: Boolean = { _cachedColumnBuffers != null && isCachedRDDLoaded } def isCachedRDDLoaded: Boolean = { _cachedColumnBuffersAreLoaded || { val bmMaster = SparkEnv.get.blockManager.master val rddLoaded = _cachedColumnBuffers.partitions.forall { partition => bmMaster.getBlockStatus(RDDBlockId(_cachedColumnBuffers.id, partition.index), false) .exists { case(_, blockStatus) => blockStatus.isCached } } if (rddLoaded) { _cachedColumnBuffersAreLoaded = rddLoaded } rddLoaded } } private def buildBuffers(): RDD[CachedBatch] = { val cb = if (cachedPlan.supportsColumnar && serializer.supportsColumnarInput(cachedPlan.output)) { serializer.convertColumnarBatchToCachedBatch( cachedPlan.executeColumnar(), cachedPlan.output, storageLevel, cachedPlan.conf) } else { serializer.convertInternalRowToCachedBatch( cachedPlan.execute(), cachedPlan.output, storageLevel, cachedPlan.conf) } val cached = cb.map { batch => sizeInBytesStats.add(batch.sizeInBytes) rowCountStats.add(batch.numRows) batch }.persist(storageLevel) cached.setName(cachedName) cached } } object InMemoryRelation { private[this] var ser: Option[CachedBatchSerializer] = None private[this] def getSerializer(sqlConf: SQLConf): CachedBatchSerializer = synchronized { if (ser.isEmpty) { val serName = sqlConf.getConf(StaticSQLConf.SPARK_CACHE_SERIALIZER) val serClass = Utils.classForName(serName) val instance = serClass.getConstructor().newInstance().asInstanceOf[CachedBatchSerializer] ser = Some(instance) } ser.get } /* Visible for testing */ private[columnar] def clearSerializer(): Unit = synchronized { ser = None } def convertToColumnarIfPossible(plan: SparkPlan): SparkPlan = plan match { case gen: WholeStageCodegenExec => gen.child match { case c2r: ColumnarToRowTransition => c2r.child match { case ia: InputAdapter => ia.child case _ => plan } case _ => plan } case c2r: ColumnarToRowTransition => // This matches when whole stage code gen is disabled. c2r.child case _ => plan } def apply( storageLevel: StorageLevel, qe: QueryExecution, tableName: Option[String]): InMemoryRelation = { val optimizedPlan = qe.optimizedPlan val serializer = getSerializer(optimizedPlan.conf) val child = if (serializer.supportsColumnarInput(optimizedPlan.output)) { convertToColumnarIfPossible(qe.executedPlan) } else { qe.executedPlan } val cacheBuilder = CachedRDDBuilder(serializer, storageLevel, child, tableName) val relation = new InMemoryRelation(child.output, cacheBuilder, optimizedPlan.outputOrdering) relation.statsOfPlanToCache = optimizedPlan.stats relation } /** * This API is intended only to be used for testing. */ def apply( serializer: CachedBatchSerializer, storageLevel: StorageLevel, child: SparkPlan, tableName: Option[String], optimizedPlan: LogicalPlan): InMemoryRelation = { val cacheBuilder = CachedRDDBuilder(serializer, storageLevel, child, tableName) val relation = new InMemoryRelation(child.output, cacheBuilder, optimizedPlan.outputOrdering) relation.statsOfPlanToCache = optimizedPlan.stats relation } def apply(cacheBuilder: CachedRDDBuilder, qe: QueryExecution): InMemoryRelation = { val optimizedPlan = qe.optimizedPlan val newBuilder = if (cacheBuilder.serializer.supportsColumnarInput(optimizedPlan.output)) { cacheBuilder.copy(cachedPlan = convertToColumnarIfPossible(qe.executedPlan)) } else { cacheBuilder.copy(cachedPlan = qe.executedPlan) } val relation = new InMemoryRelation( newBuilder.cachedPlan.output, newBuilder, optimizedPlan.outputOrdering) relation.statsOfPlanToCache = optimizedPlan.stats relation } def apply( output: Seq[Attribute], cacheBuilder: CachedRDDBuilder, outputOrdering: Seq[SortOrder], statsOfPlanToCache: Statistics): InMemoryRelation = { val relation = InMemoryRelation(output, cacheBuilder, outputOrdering) relation.statsOfPlanToCache = statsOfPlanToCache relation } } case class InMemoryRelation( output: Seq[Attribute], @transient cacheBuilder: CachedRDDBuilder, override val outputOrdering: Seq[SortOrder]) extends logical.LeafNode with MultiInstanceRelation { @volatile var statsOfPlanToCache: Statistics = null override def innerChildren: Seq[SparkPlan] = Seq(cachedPlan) override def doCanonicalize(): logical.LogicalPlan = copy(output = output.map(QueryPlan.normalizeExpressions(_, cachedPlan.output)), cacheBuilder, outputOrdering) @transient val partitionStatistics = new PartitionStatistics(output) def cachedPlan: SparkPlan = cacheBuilder.cachedPlan private[sql] def updateStats( rowCount: Long, newColStats: Map[Attribute, ColumnStat]): Unit = this.synchronized { val newStats = statsOfPlanToCache.copy( rowCount = Some(rowCount), attributeStats = AttributeMap((statsOfPlanToCache.attributeStats ++ newColStats).toSeq) ) statsOfPlanToCache = newStats } override def computeStats(): Statistics = { if (!cacheBuilder.isCachedColumnBuffersLoaded) { // Underlying columnar RDD hasn't been materialized, use the stats from the plan to cache. statsOfPlanToCache } else { statsOfPlanToCache.copy( sizeInBytes = cacheBuilder.sizeInBytesStats.value.longValue, rowCount = Some(cacheBuilder.rowCountStats.value.longValue) ) } } def withOutput(newOutput: Seq[Attribute]): InMemoryRelation = InMemoryRelation(newOutput, cacheBuilder, outputOrdering, statsOfPlanToCache) override def newInstance(): this.type = { InMemoryRelation( output.map(_.newInstance()), cacheBuilder, outputOrdering, statsOfPlanToCache).asInstanceOf[this.type] } // override `clone` since the default implementation won't carry over mutable states. override def clone(): LogicalPlan = { val cloned = this.copy() cloned.statsOfPlanToCache = this.statsOfPlanToCache cloned } override def simpleString(maxFields: Int): String = s"InMemoryRelation [${truncatedString(output, ", ", maxFields)}], ${cacheBuilder.storageLevel}" }
mahak/spark
sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/InMemoryRelation.scala
Scala
apache-2.0
16,087
package com.project.akka.guice import org.scalatest.FeatureSpecLike import com.google.inject.Guice import scala.concurrent.duration._ import akka.util.Timeout import akka.actor.{ActorRef, Props, ActorSystem, Actor} import javax.inject.Inject import com.project.akka.guice.PerformanceTest._ import akka.pattern.ask import com.google.inject.util.Modules import scala.concurrent.Await class PerformanceTest extends FeatureSpecLike { implicit val timeout = Timeout(10.seconds) feature("Create the instance with a Child injector") { scenario("No parameter") { val system = ActorSystem("test") val injector = Guice.createInjector(Modules.EMPTY_MODULE) val props = injector.getInstance(classOf[InjectedProps]) val request = Request(Props(classOf[NoParamsActor]), props(classOf[NoParamsActor])) val time = Await.result((system.actorOf(Props[BothRunner]) ? request).mapTo[Result], 10.seconds) println("No Parameter:" + time) } scenario("Parameter") { val system = ActorSystem("test") val injector = Guice.createInjector(Modules.EMPTY_MODULE) val props = injector.getInstance(classOf[InjectedProps]) val request = Request(Props(classOf[ParamActor], ""), props(classOf[ParamActor], "")) val time = Await.result((system.actorOf(Props[BothRunner]) ? request).mapTo[Result], 10.seconds) println("Parameter:" + time) } } } object PerformanceTest { val Cycles = 10000 class BothRunner extends Actor { var propsSender: ActorRef = _ val nativeRunner = context.actorOf(Props[Runner]) val injectedRunner = context.actorOf(Props[Runner]) var nativeTime: Long = 0 var injectedTime: Long = 0 def receive = { case Request(native, injected) => nativeRunner ! SingleRequest(native, native = true) injectedRunner ! SingleRequest(injected, native = false) nativeTime = 0 injectedTime = 0 propsSender = sender() case NativeResult(time) => nativeTime = time sendResultIfComplete() case InjectedResult(time) => injectedTime = time sendResultIfComplete() } def sendResultIfComplete() = if (nativeTime != 0 && injectedTime != 0) propsSender ! Result(nativeTime, injectedTime) } case class NativeResult(time: Long) case class InjectedResult(time: Long) class Runner extends Actor { var props: Props = _ var propsSender: ActorRef = _ var time: Long = 0 var counter = 0 var isNative = true def receive = { case SingleRequest(prop, native) => this.props = prop propsSender = sender() isNative = native time = 0 counter = 0 context.actorOf(prop) ! InitCounter() case response: InitCounted => counter += 1 time += response.duration if (counter != Cycles) { context.actorOf(props) ! InitCounter() } else { propsSender ! (if (isNative) NativeResult(time) else InjectedResult(time)) } } } def time = System.currentTimeMillis() class NoParamsActor extends Actor { val end = time def receive = { case counter: InitCounter => sender() ! InitCounted(end - counter.start) } } class ParamActor @Inject() (hello: String) extends NoParamsActor case class Request(nativeProps: Props, injectedProps: Props) case class SingleRequest(props: Props, native: Boolean) case class InitCounter() { val start = time } case class InitCounted(duration: Long) case class Result(native: Long, injected: Long) { override def toString: String = s""" |Native: total $native mills |Injected: total $injected mills""".stripMargin } }
alessandrosimi/akka-guice
src/test/scala/com/project/akka/guice/PerformanceTest.scala
Scala
apache-2.0
3,766
package lamdheal import lamdheal.TypeSystem._ import java.io.FileReader import scala.io.Source /* Copyright 2013 Davi Pereira dos Santos This file is part of Lamdheal. Lamdheal is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Lamdheal is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Lamdheal. If not, see <http://www.gnu.org/licenses/>.*/ object CompilingToScala { var with_runtime = false def scalaType(typ: Type): String = typ match { case FunctionT(from, to) => "((" + scalaType(from) + ") => " + scalaType(to) + ")" case ListT(eT) => "L" + "[" + scalaType(eT) + "]" case NumberT => "Double" case BooleanT => "Boolean" case CharT => "Char" case EmptyT => "Unit" case VariableT(_) => "Any" } def run(ex: Expr): String = { ex match { // case ApplyE(f, a) => if (f.t != null && f.t.toString.startsWith("[")) run(f) + ".map(" + run(a) + ")" else run(f) + "(" + run(a) + ")" case ApplyE(f, a) => run(f) + "(" + run(a) + ")" case AssignE(id, e) => "val " + id + "=" + run(e) case BlockE(l) => "{" + l.map(run).mkString("\\n") + "}" //Block: { } ou ( ) ? case BooleanE(b) => b case CharE(c) => "'" + c + "'" case EmptyE => "Unit" case IdentE(id) => id match { case "`" => "println" case "`+" => "print" case "`]" => "prtln_as_list" case "`]+" => "prt_as_list" case "(*)" => "((x:Double) => (y:Double) => x*y)" case "(/)" => "((x:Double) => (y:Double) => x/y)" case "(\\\\)" => "((x:Double) => (y:Double) => math.round(x/y))" case "(%)" => "((x:Double) => (y:Double) => x%y)" case "(+)" => "((x:Double) => (y:Double) => x+y)" case "(-)" => "((x:Double) => (y:Double) => x-y)" case "(>=)" => "((x:Double) => (y:Double) => x>=y)" case "(<=)" => "((x:Double) => (y:Double) => x<=y)" case "(>)" => "((x:Double) => (y:Double) => x>y)" case "(<)" => "((x:Double) => (y:Double) => x<y)" case "(==)" => "((x:Any) => (y:Any) => x==y)" case "(!=)" => "((x:Any) => (y:Any) => x!=y)" case x => x } case lambda@LambdaE(arg, body) => "(" + arg + ":" + scalaType(lambda.t.asInstanceOf[FunctionT].from) + ") => {" + run(body) + "}" case liste@ListE(l) => val eT = liste.t.asInstanceOf[ListT].elem_type " new " + scalaType(liste.t) + "(List(" + l.map(run).mkString(",") + "))" case NumberE(n) => n case TypeE(t) => {with_runtime = true; "interpret(\\"" + t + "\\")"} } } def compile_and_run(expr: Expr) { val source = Source.fromFile("Runtime.scala").toList.dropRight(1).mkString + "\\n" + run(expr) + "\\n" + "}\\n//Runtime.main(Array())" // "//AntiBug.main(Array())" // println(source) // ScalaCompiler.interpret(source) ScalaCompiler.external_run(source) } // def executa_shell(code: String) = { // val gera = Process(code) // gera.!! //run() // ListExpr(gera.lines.toArray.map(x => ListExpr(x.toCharArray map CharacterExpr))) // } }
lamdheal/lamdheal-j
src/lamdheal/CompilingToScala.scala
Scala
gpl-3.0
3,713
import com.julianpeeters.toolbox.provider._ import models.{ ClassData, FieldData } import com.novus.salat._ import com.novus.salat.global._ import com.mongodb.casbah.Imports._ import org.specs2._ import mutable._ import specification._ class IntSpec extends mutable.Specification { val valueMembers: List[FieldData] = List(FieldData("a","Int")) val classData = ClassData(Some("intspec"), "MyRecord_IntSpec", valueMembers) val tbcc = new ToolBoxCaseClass(classData) val typeTemplate = tbcc.runtimeInstance type MyRecord = typeTemplate.type ctx.registerClassLoader(tbcc.loader) val dbo = grater[MyRecord].asDBObject(typeTemplate) println(dbo) val obj = grater[MyRecord].asObject(dbo) println(obj) "given a dynamically generated case class MyRecord_IntSpecs(a: Int) as a type parameter, a grater" should { "serialize and deserialize correctly" in { typeTemplate === obj } } }
julianpeeters/toolbox-type-provider
src/test/scala/singleField/IntSpec.scala
Scala
apache-2.0
925
/*********************************************************************** * Copyright (c) 2013-2019 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. ***********************************************************************/ package org.locationtech.geomesa.cassandra.tools.commands import com.beust.jcommander.Parameters import org.locationtech.geomesa.cassandra.data.CassandraDataStore import org.locationtech.geomesa.cassandra.tools.CassandraDataStoreCommand import org.locationtech.geomesa.cassandra.tools.CassandraDataStoreCommand.CassandraDataStoreParams import org.locationtech.geomesa.cassandra.tools.commands.CassandraExplainCommand.CassandraExplainParams import org.locationtech.geomesa.tools.status.{ExplainCommand, ExplainParams} class CassandraExplainCommand extends ExplainCommand[CassandraDataStore] with CassandraDataStoreCommand { override val params = new CassandraExplainParams() } object CassandraExplainCommand { @Parameters(commandDescription = "Explain how a GeoMesa query will be executed") class CassandraExplainParams extends ExplainParams with CassandraDataStoreParams }
elahrvivaz/geomesa
geomesa-cassandra/geomesa-cassandra-tools/src/main/scala/org/locationtech/geomesa/cassandra/tools/commands/CassandraExplainCommand.scala
Scala
apache-2.0
1,347
/* * sbt-haxe * Copyright 2014 ζ·±εœ³ε²‚ε‡‘η½‘η»œζœ‰ι™ε…¬εΈ (Shenzhen QiFun Network Corp., LTD) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.thoughtworks.microbuilder.sbtHaxe import sbt._ import Keys._ import HaxeKeys._ import HaxeConfigurations._ import sbt.AutoPlugin /** * A Plugin used to compile Haxe sources to Java sources. */ final object HaxeJavaPlugin extends AutoPlugin { override final def requires = BaseHaxePlugin override final def trigger = allRequirements private def javaLibOptions(injectConfiguration: Configuration) = { haxeNativeDependencyOptions in injectConfiguration := (for { path <- (dependencyClasspath in injectConfiguration).value if path.data.exists } yield { Seq(s"-${(haxePlatformName in injectConfiguration).value}-lib", path.data.toString) }).flatten } override final lazy val projectSettings: Seq[Setting[_]] = super.projectSettings ++ sbt.addArtifact(artifact in packageBin in HaxeJava, packageBin in HaxeJava) ++ inConfig(HaxeJava)(SbtHaxe.baseHaxeSettings) ++ inConfig(HaxeJava)(SbtHaxe.extendSettings) ++ inConfig(TestHaxeJava)(SbtHaxe.baseHaxeSettings) ++ inConfig(TestHaxeJava)(SbtHaxe.extendTestSettings) ++ SbtHaxe.injectSettings(HaxeJava, Compile) ++ SbtHaxe.injectSettings(TestHaxeJava, Test) ++ (for { injectConfiguration <- Seq(Compile, Test) setting <- Seq( haxePlatformName in injectConfiguration := "java", target in haxe in injectConfiguration := (sourceManaged in injectConfiguration).value, haxeOutputPath in injectConfiguration := None ) } yield setting) ++ Seq( javaLibOptions(Compile), javaLibOptions(Test), haxeXmls in Compile ++= (haxeXml in Compile).value, haxeXmls in Test ++= (haxeXml in Test).value, haxeOptions in Compile ++= Seq("-D", "no-compilation"), doxRegex in Compile := SbtHaxe.buildDoxRegex((sourceDirectories in HaxeJava).value), doxRegex in Test := SbtHaxe.buildDoxRegex((sourceDirectories in TestHaxeJava).value), ivyConfigurations += Haxe, ivyConfigurations += TestHaxe, ivyConfigurations += HaxeJava, ivyConfigurations += TestHaxeJava) }
ThoughtWorksInc/sbt-haxe
src/main/scala/com/thoughtworks/microbuilder/sbtHaxe/HaxeJavaPlugin.scala
Scala
apache-2.0
2,808
package org.jetbrains.plugins.scala package codeInspection package internal import org.jetbrains.plugins.scala.codeInspection.collections.{OperationOnCollectionInspection, Qualified, Simplification, SimplificationType, invocation, invocationText} import org.jetbrains.plugins.scala.extensions._ import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScExpression, ScGenericCall} import org.jetbrains.plugins.scala.project.ProjectExt import scala.collection.immutable.ArraySeq class InstanceOfInspection extends OperationOnCollectionInspection { override def possibleSimplificationTypes: ArraySeq[SimplificationType] = ArraySeq(InstanceOfShouldBeIsInspection) } object InstanceOfShouldBeIsInspection extends SimplificationType() { override val hint: String = ScalaInspectionBundle.message("replace.with.is") private val `.isInstanceOf`: Qualified = invocation("isInstanceOf") override def getSimplification(expr: ScExpression): Option[Simplification] = expr match { case _ if !expr.getProject.isIntellijScalaPluginProject && !isUnitTestMode => None case `.isInstanceOf`(base) && ScGenericCall(_, Seq(castType)) if base.`type`().map(_.widen).exists(castType.calcType.conforms) => Some(replace(expr).withText(invocationText(base, "is") + s"[${castType.getText}]").highlightRef) case _ => None } }
JetBrains/intellij-scala
scala/scala-impl/src/org/jetbrains/plugins/scala/codeInspection/internal/InstanceOfInspection.scala
Scala
apache-2.0
1,337
/* * (c) Copyright 2016 Hewlett Packard Enterprise Development LP * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package cogx.compiler.codegenerator.opencl.hyperkernels import cogx.compiler.codegenerator.opencl.fragments.{TensorElementAddressing, HyperKernel} import cogx.platform.types.{VirtualFieldRegister, Opcode, FieldType} import cogx.compiler.parser.op.ReplicateOp /** Replicate the contents of an input scalar field as each tensor * of an output matrix or vector field. * * A ReplicateHyperKernel takes a 2D scalar field and replicates * the scalar field to create a matrix field with each matrix holding the * contents of the original scalar field. Alternatively, the ReplicateHyperKernel * takes a 1D scalar field and replicates the scalar field to create a vector * field with each vector holding the contents of the original scalar field. * See Cog programming manual for a description of this operation. * * @author Greg Snider and Dick Carter * * @param in The virtual field register of the ScalarField to be replicated to * each tensor of the output. * @param operation The ReplicateOp with its result fieldShape parameter. * @param resultType The FieldType of the result of this kernel. */ private[cogx] class ReplicateHyperKernel private (in: Array[VirtualFieldRegister], operation: Opcode, resultType: FieldType) extends HyperKernel(operation, in, resultType, TensorElementAddressing) { val scalarFieldType = in(0).fieldType val scalarFieldDim = scalarFieldType.dimensions val code = new StringBuilder scalarFieldDim match { case 2 => code.append("row = _tensorElement / " + scalarFieldType.columns + ";\n") code.append("column = _tensorElement - row * " + scalarFieldType.columns + ";\n") case 1 => code.append("column = _tensorElement;\n") case _ => throw new RuntimeException("Illegal scalar field input dimension: " + scalarFieldDim) } code.append(" @out0 = readNonlocal(@in0);\n") addCode(code.toString()) // debugCompile() } /** Factory object for creating kernels of this type. */ private[cogx] object ReplicateHyperKernel { /** * Create a hyperkernel that performs the Replicate function. * * @param in The virtual field register of the ScalarField to be replicated * to each tensor of the output. * @param operation The ReplicateOp with its result fieldShape parameter. * @param resultType The FieldType of the result of this kernel. * @return The synthesized hyperkernel. */ def apply(in: Array[VirtualFieldRegister], operation: ReplicateOp, resultType: FieldType): HyperKernel = { require(in.length == 1) val expectedResultFieldShape = operation.resultShape val scalarFieldType = in(0).fieldType require(scalarFieldType.tensorOrder == 0) require(scalarFieldType.dimensions <= 2) val expectedResultType = new FieldType(expectedResultFieldShape, scalarFieldType.fieldShape, scalarFieldType.elementType) require(expectedResultType == resultType) new ReplicateHyperKernel(in, operation, resultType) } }
hpe-cct/cct-core
src/main/scala/cogx/compiler/codegenerator/opencl/hyperkernels/ReplicateHyperKernel.scala
Scala
apache-2.0
3,725
package io.finch import java.util.UUID import cats.Show import cats.effect.IO import com.twitter.finagle.http.{FileElement, RequestBuilder, SimpleElement} import com.twitter.finagle.http.exp.Multipart import com.twitter.io.Buf import io.finch.data.Foo class MultipartSpec extends FinchSpec { behavior of "multipart*" def withFileUpload(name: String, value: Buf): Input = Input.fromRequest(RequestBuilder() .url("http://example.com") .add(FileElement(name, value, Some("image/gif"), Some("dealwithit.gif"))) .buildFormPost(multipart = true) ) def withAttribute[A : Show](first: (String, A), rest: (String, A)*): Input = { val req = RequestBuilder() .url("http://example.com") .add(SimpleElement(first._1, Show[A].show(first._2))) Input.fromRequest( rest.foldLeft(req)((builder, attr) => builder.add(SimpleElement(attr._1, Show[A].show(attr._2))) ).buildFormPost(multipart = true) ) } checkAll("Attribute[String]", EntityEndpointLaws[IO, String](multipartAttributeOption("x"))(a => withAttribute("x" -> a)).evaluating) checkAll("Attribute[Int]", EntityEndpointLaws[IO, Int](multipartAttributeOption("x"))(a => withAttribute("x" -> a)).evaluating) checkAll("Attribute[Long]", EntityEndpointLaws[IO, Long](multipartAttributeOption("x"))(a => withAttribute("x" -> a)).evaluating) checkAll("Attribute[Boolean]", EntityEndpointLaws[IO, Boolean](multipartAttributeOption("x"))(a => withAttribute("x" -> a)).evaluating) checkAll("Attribute[Float]", EntityEndpointLaws[IO, Float](multipartAttributeOption("x"))(a => withAttribute("x" -> a)).evaluating) checkAll("Attribute[Double]", EntityEndpointLaws[IO, Double](multipartAttributeOption("x"))(a => withAttribute("x" -> a)).evaluating) checkAll("Attribute[UUID]", EntityEndpointLaws[IO, UUID](multipartAttributeOption("x"))(a => withAttribute("x" -> a)).evaluating) checkAll("Attribute[Foo]", EntityEndpointLaws[IO, Foo](multipartAttributeOption("x"))(a => withAttribute("x" -> a)).evaluating) checkAll( "EvaluatingAttribute[String]", EvaluatingEndpointLaws[IO, String](implicit de => multipartAttribute("foo")).all ) it should "file upload (single)" in { check { b: Buf => val i = withFileUpload("foo", b) val fu = multipartFileUpload("foo").apply(i).awaitValueUnsafe() val fuo = multipartFileUploadOption("foo").apply(i).awaitValueUnsafe().flatten fu.map(_.asInstanceOf[Multipart.InMemoryFileUpload].content) === Some(b) && fuo.map(_.asInstanceOf[Multipart.InMemoryFileUpload].content) === Some(b) } } it should "fail when attribute is missing" in { an[Error.NotPresent] should be thrownBy { multipartAttribute("foo").apply(Input.get("/")).awaitValueUnsafe() } } it should "return None for when attribute is missing for optional endpoint" in { multipartAttributeOption("foo").apply(Input.get("/")).awaitValueUnsafe().flatten shouldBe None } it should "fail when attributes are missing" in { an[Error.NotPresent] should be thrownBy { multipartAttributesNel("foo").apply(Input.get("/")).awaitValueUnsafe() } } it should "return empty sequence when attributes are missing for seq endpoint" in { multipartAttributes("foo").apply(Input.get("/")).awaitValueUnsafe() === Some(Seq()) } it should "fail when attribute is malformed" in { an[Error.NotParsed] should be thrownBy { multipartAttribute[Int]("foo").apply(withAttribute("foo" -> "bar")).awaitValueUnsafe() } } }
ImLiar/finch
core/src/test/scala/io/finch/MultipartSpec.scala
Scala
apache-2.0
3,571
package com.danielasfregola.twitter4s.entities final case class TweetUpdate(status: String, in_reply_to_status_id: Option[Long] = None, possibly_sensitive: Boolean = false, lat: Option[Long] = None, long: Option[Long] = None, place_id: Option[String] = None, display_coordinates: Boolean = false, trim_user: Boolean = false, media_ids: Seq[Long] = Seq.empty)
DanielaSfregola/twitter4s
src/main/scala/com/danielasfregola/twitter4s/entities/TweetUpdate.scala
Scala
apache-2.0
592
package org.jetbrains.plugins.scala.dfa.lattice import org.jetbrains.plugins.scala.dfa.lattice.MeetSemiLatticeOps.MeetSemiLatticeExt import org.jetbrains.plugins.scala.dfa.latticeTop import scala.language.implicitConversions /** * A meet-semi-lattice is a [[SemiLattice]] with a reflexive `meet` operation * where every pair of elements has exactly one infimum. * * A B * \\ / * \\ / * Y * * Every pair of elements (A, B) have one infimum Y == (A meet B), which is the greatest element so that Y <= A and Y <= B. * If `A <= B` than `A meet B == A` * * Because of this condition there is always a bottom element so that every element in the lattice >= bottom * (technically not for lattices that have an infinite height... but we are not using them here). * * @tparam L the type to implement the join-semi-lattice for */ trait MeetSemiLattice[L] extends SemiLattice[L] with HasBottom[L] { def meet(lhs: L, rhs: L): L def meetAll(first: L, others: IterableOnce[L]): L = others.iterator.foldLeft(first)(meet) } trait MeetSemiLatticeOps { implicit final def meetSemiLatticeExt[L](element: L): MeetSemiLatticeExt[L] = new MeetSemiLatticeExt(element) final def meet[L: MeetSemiLattice](first: L, others: L*): L = first.meetAll(others) final def meet[L: MeetSemiLattice : HasTop](elements: IterableOnce[L]): L = { val it = elements.iterator if (it.hasNext) it.next().meetAll(it) else latticeTop } } object MeetSemiLatticeOps extends MeetSemiLatticeOps { final class MeetSemiLatticeExt[L](private val element: L) extends AnyVal { def meet[LL >: L](other: LL)(implicit lattice: MeetSemiLattice[LL]): LL = lattice.meet(element, other) def &[LL >: L](other: LL)(implicit lattice: MeetSemiLattice[LL]): LL = lattice.meet(element, other) def meetAll[LL >: L](others: IterableOnce[LL])(implicit lattice: MeetSemiLattice[LL]): LL = lattice.meetAll(element, others) } }
JetBrains/intellij-scala
scala/dfa/src/org/jetbrains/plugins/scala/dfa/lattice/MeetSemiLattice.scala
Scala
apache-2.0
1,975
package com.sksamuel.scrimage.metadata import com.sksamuel.scrimage.{Tag, ImageMetadata} import org.scalatest.{ Matchers, WordSpec } /** @author Stephen Samuel */ class ImageMetadataTest extends WordSpec with Matchers { private val stream = getClass.getResourceAsStream("/vossen.jpg") "metadata" should { "read EXIF" in { val meta = ImageMetadata.fromStream(stream) meta.tags should contain(Tag("ISO Speed Ratings", 34855, "2500", "2500")) meta.tags should contain(Tag("Image Width", 256, "4928", "4928 pixels")) meta.tags should contain(Tag("White Balance Mode", 41987, "0", "Auto white balance")) } } }
carlosFattor/scrimage
scrimage-core/src/test/scala/com/sksamuel/scrimage/metadata/ImageMetadataTest.scala
Scala
apache-2.0
646
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.ui.jobs import java.net.URLEncoder import java.nio.charset.StandardCharsets.UTF_8 import java.util.{Date, Locale} import javax.servlet.http.HttpServletRequest import scala.collection.mutable.ListBuffer import scala.xml._ import org.apache.commons.text.StringEscapeUtils import org.apache.spark.JobExecutionStatus import org.apache.spark.internal.config.SCHEDULER_MODE import org.apache.spark.scheduler._ import org.apache.spark.status.AppStatusStore import org.apache.spark.status.api.v1 import org.apache.spark.ui._ import org.apache.spark.util.Utils /** Page showing list of all ongoing and recently finished jobs */ private[ui] class AllJobsPage(parent: JobsTab, store: AppStatusStore) extends WebUIPage("") { import ApiHelper._ private val JOBS_LEGEND = <div class="legend-area"><svg width="150px" height="85px"> <rect class="succeeded-job-legend" x="5px" y="5px" width="20px" height="15px" rx="2px" ry="2px"></rect> <text x="35px" y="17px">Succeeded</text> <rect class="failed-job-legend" x="5px" y="30px" width="20px" height="15px" rx="2px" ry="2px"></rect> <text x="35px" y="42px">Failed</text> <rect class="running-job-legend" x="5px" y="55px" width="20px" height="15px" rx="2px" ry="2px"></rect> <text x="35px" y="67px">Running</text> </svg></div>.toString.filter(_ != '\\n') private val EXECUTORS_LEGEND = <div class="legend-area"><svg width="150px" height="55px"> <rect class="executor-added-legend" x="5px" y="5px" width="20px" height="15px" rx="2px" ry="2px"></rect> <text x="35px" y="17px">Added</text> <rect class="executor-removed-legend" x="5px" y="30px" width="20px" height="15px" rx="2px" ry="2px"></rect> <text x="35px" y="42px">Removed</text> </svg></div>.toString.filter(_ != '\\n') private def makeJobEvent(jobs: Seq[v1.JobData]): Seq[String] = { jobs.filter { job => job.status != JobExecutionStatus.UNKNOWN && job.submissionTime.isDefined }.map { job => val jobId = job.jobId val status = job.status val (_, lastStageDescription) = lastStageNameAndDescription(store, job) val jobDescription = UIUtils.makeDescription( job.description.getOrElse(lastStageDescription), "", plainText = true).text val submissionTime = job.submissionTime.get.getTime() val completionTime = job.completionTime.map(_.getTime()).getOrElse(System.currentTimeMillis()) val classNameByStatus = status match { case JobExecutionStatus.SUCCEEDED => "succeeded" case JobExecutionStatus.FAILED => "failed" case JobExecutionStatus.RUNNING => "running" case JobExecutionStatus.UNKNOWN => "unknown" } // The timeline library treats contents as HTML, so we have to escape them. We need to add // extra layers of escaping in order to embed this in a JavaScript string literal. val escapedDesc = Utility.escape(jobDescription) val jsEscapedDescForTooltip = StringEscapeUtils.escapeEcmaScript(Utility.escape(escapedDesc)) val jsEscapedDescForLabel = StringEscapeUtils.escapeEcmaScript(escapedDesc) val jobEventJsonAsStr = s""" |{ | 'className': 'job application-timeline-object ${classNameByStatus}', | 'group': 'jobs', | 'start': new Date(${submissionTime}), | 'end': new Date(${completionTime}), | 'content': '<div class="application-timeline-content"' + | 'data-html="true" data-placement="top" data-toggle="tooltip"' + | 'data-title="${jsEscapedDescForTooltip} (Job ${jobId})<br>' + | 'Status: ${status}<br>' + | 'Submitted: ${UIUtils.formatDate(new Date(submissionTime))}' + | '${ if (status != JobExecutionStatus.RUNNING) { s"""<br>Completed: ${UIUtils.formatDate(new Date(completionTime))}""" } else { "" } }">' + | '${jsEscapedDescForLabel} (Job ${jobId})</div>' |} """.stripMargin jobEventJsonAsStr } } private def makeExecutorEvent(executors: Seq[v1.ExecutorSummary]): Seq[String] = { val events = ListBuffer[String]() executors.foreach { e => val addedEvent = s""" |{ | 'className': 'executor added', | 'group': 'executors', | 'start': new Date(${e.addTime.getTime()}), | 'content': '<div class="executor-event-content"' + | 'data-toggle="tooltip" data-placement="top"' + | 'data-title="Executor ${e.id}<br>' + | 'Added at ${UIUtils.formatDate(e.addTime)}"' + | 'data-html="true">Executor ${e.id} added</div>' |} """.stripMargin events += addedEvent e.removeTime.foreach { removeTime => val removedEvent = s""" |{ | 'className': 'executor removed', | 'group': 'executors', | 'start': new Date(${removeTime.getTime()}), | 'content': '<div class="executor-event-content"' + | 'data-toggle="tooltip" data-placement="top"' + | 'data-title="Executor ${e.id}<br>' + | 'Removed at ${UIUtils.formatDate(removeTime)}' + | '${ e.removeReason.map { reason => s"""<br>Reason: ${StringEscapeUtils.escapeEcmaScript( reason.replace("\\n", " "))}""" }.getOrElse("") }"' + | 'data-html="true">Executor ${e.id} removed</div>' |} """.stripMargin events += removedEvent } } events.toSeq } private def makeTimeline( jobs: Seq[v1.JobData], executors: Seq[v1.ExecutorSummary], startTime: Long): Seq[Node] = { val jobEventJsonAsStrSeq = makeJobEvent(jobs) val executorEventJsonAsStrSeq = makeExecutorEvent(executors) val groupJsonArrayAsStr = s""" |[ | { | 'id': 'executors', | 'content': '<div>Executors</div>${EXECUTORS_LEGEND}', | }, | { | 'id': 'jobs', | 'content': '<div>Jobs</div>${JOBS_LEGEND}', | } |] """.stripMargin val eventArrayAsStr = (jobEventJsonAsStrSeq ++ executorEventJsonAsStrSeq).mkString("[", ",", "]") <span class="expand-application-timeline"> <span class="expand-application-timeline-arrow arrow-closed"></span> <a data-toggle="tooltip" title={ToolTips.JOB_TIMELINE} data-placement="top"> Event Timeline </a> </span> ++ <div id="application-timeline" class="collapsed"> <div class="control-panel"> <div id="application-timeline-zoom-lock"> <input type="checkbox"></input> <span>Enable zooming</span> </div> </div> </div> ++ <script type="text/javascript"> {Unparsed(s"drawApplicationTimeline(${groupJsonArrayAsStr}," + s"${eventArrayAsStr}, ${startTime}, ${UIUtils.getTimeZoneOffset()});")} </script> } private def jobsTable( request: HttpServletRequest, tableHeaderId: String, jobTag: String, jobs: Seq[v1.JobData], killEnabled: Boolean): Seq[Node] = { val someJobHasJobGroup = jobs.exists(_.jobGroup.isDefined) val jobIdTitle = if (someJobHasJobGroup) "Job Id (Job Group)" else "Job Id" val jobPage = Option(request.getParameter(jobTag + ".page")).map(_.toInt).getOrElse(1) try { new JobPagedTable( request, store, jobs, tableHeaderId, jobTag, UIUtils.prependBaseUri(request, parent.basePath), "jobs", // subPath killEnabled, jobIdTitle ).table(jobPage) } catch { case e @ (_ : IllegalArgumentException | _ : IndexOutOfBoundsException) => <div class="alert alert-error"> <p>Error while rendering job table:</p> <pre> {Utils.exceptionString(e)} </pre> </div> } } def render(request: HttpServletRequest): Seq[Node] = { val appInfo = store.applicationInfo() val startTime = appInfo.attempts.head.startTime.getTime() val endTime = appInfo.attempts.head.endTime.getTime() val activeJobs = new ListBuffer[v1.JobData]() val completedJobs = new ListBuffer[v1.JobData]() val failedJobs = new ListBuffer[v1.JobData]() store.jobsList(null).foreach { job => job.status match { case JobExecutionStatus.SUCCEEDED => completedJobs += job case JobExecutionStatus.FAILED => failedJobs += job case _ => activeJobs += job } } val activeJobsTable = jobsTable(request, "active", "activeJob", activeJobs.toSeq, killEnabled = parent.killEnabled) val completedJobsTable = jobsTable(request, "completed", "completedJob", completedJobs.toSeq, killEnabled = false) val failedJobsTable = jobsTable(request, "failed", "failedJob", failedJobs.toSeq, killEnabled = false) val shouldShowActiveJobs = activeJobs.nonEmpty val shouldShowCompletedJobs = completedJobs.nonEmpty val shouldShowFailedJobs = failedJobs.nonEmpty val appSummary = store.appSummary() val completedJobNumStr = if (completedJobs.size == appSummary.numCompletedJobs) { s"${completedJobs.size}" } else { s"${appSummary.numCompletedJobs}, only showing ${completedJobs.size}" } // SPARK-33991 Avoid enumeration conversion error. val schedulingMode = store.environmentInfo().sparkProperties.toMap .get(SCHEDULER_MODE.key) .map { mode => SchedulingMode.withName(mode.toUpperCase(Locale.ROOT)).toString } .getOrElse("Unknown") val summary: NodeSeq = <div> <ul class="list-unstyled"> <li> <strong>User:</strong> {parent.getSparkUser} </li> <li> <strong>Total Uptime:</strong> { if (endTime < 0 && parent.sc.isDefined) { UIUtils.formatDuration(System.currentTimeMillis() - startTime) } else if (endTime > 0) { UIUtils.formatDuration(endTime - startTime) } } </li> <li> <strong>Scheduling Mode: </strong> {schedulingMode} </li> { if (shouldShowActiveJobs) { <li> <a href="#active"><strong>Active Jobs:</strong></a> {activeJobs.size} </li> } } { if (shouldShowCompletedJobs) { <li id="completed-summary"> <a href="#completed"><strong>Completed Jobs:</strong></a> {completedJobNumStr} </li> } } { if (shouldShowFailedJobs) { <li> <a href="#failed"><strong>Failed Jobs:</strong></a> {failedJobs.size} </li> } } </ul> </div> var content = summary content ++= makeTimeline((activeJobs ++ completedJobs ++ failedJobs).toSeq, store.executorList(false), startTime) if (shouldShowActiveJobs) { content ++= <span id="active" class="collapse-aggregated-activeJobs collapse-table" onClick="collapseTable('collapse-aggregated-activeJobs','aggregated-activeJobs')"> <h4> <span class="collapse-table-arrow arrow-open"></span> <a>Active Jobs ({activeJobs.size})</a> </h4> </span> ++ <div class="aggregated-activeJobs collapsible-table"> {activeJobsTable} </div> } if (shouldShowCompletedJobs) { content ++= <span id="completed" class="collapse-aggregated-completedJobs collapse-table" onClick="collapseTable('collapse-aggregated-completedJobs','aggregated-completedJobs')"> <h4> <span class="collapse-table-arrow arrow-open"></span> <a>Completed Jobs ({completedJobNumStr})</a> </h4> </span> ++ <div class="aggregated-completedJobs collapsible-table"> {completedJobsTable} </div> } if (shouldShowFailedJobs) { content ++= <span id ="failed" class="collapse-aggregated-failedJobs collapse-table" onClick="collapseTable('collapse-aggregated-failedJobs','aggregated-failedJobs')"> <h4> <span class="collapse-table-arrow arrow-open"></span> <a>Failed Jobs ({failedJobs.size})</a> </h4> </span> ++ <div class="aggregated-failedJobs collapsible-table"> {failedJobsTable} </div> } val helpText = """A job is triggered by an action, like count() or saveAsTextFile().""" + " Click on a job to see information about the stages of tasks inside it." UIUtils.headerSparkPage(request, "Spark Jobs", content, parent, helpText = Some(helpText)) } } private[ui] class JobTableRowData( val jobData: v1.JobData, val lastStageName: String, val lastStageDescription: String, val duration: Long, val formattedDuration: String, val submissionTime: Long, val formattedSubmissionTime: String, val jobDescription: NodeSeq, val detailUrl: String) private[ui] class JobDataSource( store: AppStatusStore, jobs: Seq[v1.JobData], basePath: String, pageSize: Int, sortColumn: String, desc: Boolean) extends PagedDataSource[JobTableRowData](pageSize) { import ApiHelper._ // Convert JobUIData to JobTableRowData which contains the final contents to show in the table // so that we can avoid creating duplicate contents during sorting the data private val data = jobs.map(jobRow).sorted(ordering(sortColumn, desc)) override def dataSize: Int = data.size override def sliceData(from: Int, to: Int): Seq[JobTableRowData] = data.slice(from, to) private def jobRow(jobData: v1.JobData): JobTableRowData = { val duration: Option[Long] = JobDataUtil.getDuration(jobData) val formattedDuration = JobDataUtil.getFormattedDuration(jobData) val submissionTime = jobData.submissionTime val formattedSubmissionTime = JobDataUtil.getFormattedSubmissionTime(jobData) val (lastStageName, lastStageDescription) = lastStageNameAndDescription(store, jobData) val jobDescription = UIUtils.makeDescription( jobData.description.getOrElse(lastStageDescription), basePath, plainText = false) val detailUrl = "%s/jobs/job/?id=%s".format(basePath, jobData.jobId) new JobTableRowData( jobData, lastStageName, lastStageDescription, duration.getOrElse(-1), formattedDuration, submissionTime.map(_.getTime()).getOrElse(-1L), formattedSubmissionTime, jobDescription, detailUrl ) } /** * Return Ordering according to sortColumn and desc */ private def ordering(sortColumn: String, desc: Boolean): Ordering[JobTableRowData] = { val ordering: Ordering[JobTableRowData] = sortColumn match { case "Job Id" | "Job Id (Job Group)" => Ordering.by(_.jobData.jobId) case "Description" => Ordering.by(x => (x.lastStageDescription, x.lastStageName)) case "Submitted" => Ordering.by(_.submissionTime) case "Duration" => Ordering.by(_.duration) case "Stages: Succeeded/Total" | "Tasks (for all stages): Succeeded/Total" => throw new IllegalArgumentException(s"Unsortable column: $sortColumn") case unknownColumn => throw new IllegalArgumentException(s"Unknown column: $unknownColumn") } if (desc) { ordering.reverse } else { ordering } } } private[ui] class JobPagedTable( request: HttpServletRequest, store: AppStatusStore, data: Seq[v1.JobData], tableHeaderId: String, jobTag: String, basePath: String, subPath: String, killEnabled: Boolean, jobIdTitle: String ) extends PagedTable[JobTableRowData] { private val (sortColumn, desc, pageSize) = getTableParameters(request, jobTag, jobIdTitle) private val parameterPath = basePath + s"/$subPath/?" + getParameterOtherTable(request, jobTag) private val encodedSortColumn = URLEncoder.encode(sortColumn, UTF_8.name()) override def tableId: String = jobTag + "-table" override def tableCssClass: String = "table table-bordered table-sm table-striped table-head-clickable table-cell-width-limited" override def pageSizeFormField: String = jobTag + ".pageSize" override def pageNumberFormField: String = jobTag + ".page" override val dataSource = new JobDataSource( store, data, basePath, pageSize, sortColumn, desc) override def pageLink(page: Int): String = { parameterPath + s"&$pageNumberFormField=$page" + s"&$jobTag.sort=$encodedSortColumn" + s"&$jobTag.desc=$desc" + s"&$pageSizeFormField=$pageSize" + s"#$tableHeaderId" } override def goButtonFormPath: String = s"$parameterPath&$jobTag.sort=$encodedSortColumn&$jobTag.desc=$desc#$tableHeaderId" override def headers: Seq[Node] = { // Information for each header: title, sortable, tooltip val jobHeadersAndCssClasses: Seq[(String, Boolean, Option[String])] = Seq( (jobIdTitle, true, None), ("Description", true, None), ("Submitted", true, None), ("Duration", true, Some("Elapsed time since the job was submitted " + "until execution completion of all its stages.")), ("Stages: Succeeded/Total", false, None), ("Tasks (for all stages): Succeeded/Total", false, None) ) isSortColumnValid(jobHeadersAndCssClasses, sortColumn) headerRow(jobHeadersAndCssClasses, desc, pageSize, sortColumn, parameterPath, jobTag, tableHeaderId) } override def row(jobTableRow: JobTableRowData): Seq[Node] = { val job = jobTableRow.jobData val killLink = if (killEnabled) { val confirm = s"if (window.confirm('Are you sure you want to kill job ${job.jobId} ?')) " + "{ this.parentNode.submit(); return true; } else { return false; }" // SPARK-6846 this should be POST-only but YARN AM won't proxy POST /* val killLinkUri = s"$basePathUri/jobs/job/kill/" <form action={killLinkUri} method="POST" style="display:inline"> <input type="hidden" name="id" value={job.jobId.toString}/> <a href="#" onclick={confirm} class="kill-link">(kill)</a> </form> */ val killLinkUri = s"$basePath/jobs/job/kill/?id=${job.jobId}" <a href={killLinkUri} onclick={confirm} class="kill-link">(kill)</a> } else { Seq.empty } <tr id={"job-" + job.jobId}> <td> {job.jobId} {job.jobGroup.map(id => s"($id)").getOrElse("")} </td> <td> {jobTableRow.jobDescription} {killLink} <a href={jobTableRow.detailUrl} class="name-link">{jobTableRow.lastStageName}</a> </td> <td> {jobTableRow.formattedSubmissionTime} </td> <td>{jobTableRow.formattedDuration}</td> <td class="stage-progress-cell"> {job.numCompletedStages}/{job.stageIds.size - job.numSkippedStages} {if (job.numFailedStages > 0) s"(${job.numFailedStages} failed)"} {if (job.numSkippedStages > 0) s"(${job.numSkippedStages} skipped)"} </td> <td class="progress-cell"> {UIUtils.makeProgressBar(started = job.numActiveTasks, completed = job.numCompletedIndices, failed = job.numFailedTasks, skipped = job.numSkippedTasks, reasonToNumKilled = job.killedTasksSummary, total = job.numTasks - job.numSkippedTasks)} </td> </tr> } }
witgo/spark
core/src/main/scala/org/apache/spark/ui/jobs/AllJobsPage.scala
Scala
apache-2.0
20,858
import org.scalatest.BeforeAndAfterAll import org.scalatestplus.play.{OneAppPerSuite, PlaySpec} import play.api.Logger import play.api.test.FakeApplication import play.api.test.Helpers._ class BranchTests extends PlaySpec with OneAppPerSuite with BeforeAndAfterAll { implicit val context = play.api.libs.concurrent.Execution.Implicits.defaultContext lazy val logger = Logger(classOf[BranchTests]) val gitlabAPI = GitlabHelper.gitlabAPI val projectName = GitlabHelper.projectName var projectId = -1 var commitSHA = "" override def beforeAll(): Unit = { running(FakeApplication()) { GitlabHelper.createTestSSHKey projectId = GitlabHelper.createTestProject logger.debug("Starting Branch Tests") } } override def afterAll() { running(FakeApplication()) { val response = await(gitlabAPI.deleteBranch(projectId, "branch_name")) GitlabHelper.checkDeleteAfterTest(response, BRANCH) GitlabHelper.deleteTestSSHKey() GitlabHelper.deleteTestProject() logger.debug("End of Branch Tests") Thread.sleep(1000L) } } "GitlabAPI must manage project branches" should { "get all branches" in { val response = await(gitlabAPI.getBranches(projectId)) response.status must be(200) } "get one branch" in { val response = await(gitlabAPI.getBranch(projectId, "master")) response.status must be(200) commitSHA = (response.json \\ "commit" \\ "id").as[String] } "protect a branch" in { await(gitlabAPI.protectBranch(projectId, "master")).status must be(200) } "unprotect a branch" in { await(gitlabAPI.unprotectBranch(projectId, "master")).status must be(200) } "create a new branch" in { val response = await(gitlabAPI.createBranch(projectId, "test_branch_name", commitSHA)) response.status must be(201) } "delete a branch" in { val response = await(gitlabAPI.deleteBranch(projectId, "test_branch_name")) response.status must be(200) response.json must not be null } } }
thomas-tosoni/scala-gitlab-api
test/BranchTests.scala
Scala
mit
2,068
package ru.org.codingteam.horta import java.nio.file.Paths import akka.actor.{ActorSystem, Props} import com.typesafe.config.ConfigFactory import com.typesafe.scalalogging.StrictLogging import org.jivesoftware.smack.SmackConfiguration import ru.org.codingteam.horta.configuration.Configuration import ru.org.codingteam.horta.core.Core import ru.org.codingteam.horta.events.{EventCollector, TwitterEndpoint} import ru.org.codingteam.horta.plugins.helper.HelperPlugin import ru.org.codingteam.horta.plugins.bash.BashPlugin import ru.org.codingteam.horta.plugins.diag.DiagnosticPlugin import ru.org.codingteam.horta.plugins.dice.DiceRoller import ru.org.codingteam.horta.plugins.karma.KarmaPlugin import ru.org.codingteam.horta.plugins.log.LogPlugin import ru.org.codingteam.horta.plugins.loglist.LogListPlugin import ru.org.codingteam.horta.plugins.mail.MailPlugin import ru.org.codingteam.horta.plugins.markov.MarkovPlugin import ru.org.codingteam.horta.plugins.pet.PetPlugin import ru.org.codingteam.horta.plugins.visitor.VisitorPlugin import ru.org.codingteam.horta.plugins.wtf.WtfPlugin import ru.org.codingteam.horta.plugins.{AccessPlugin, FortunePlugin, TwitterPlugin, VersionPlugin} import ru.org.codingteam.horta.protocol.jabber.JabberProtocol import scalikejdbc.GlobalSettings object Application extends App with StrictLogging { //TODO: This should be eventually handled by some kind of dependency injeciton val eventEndpoints = List( new TwitterEndpoint() ) val plugins = List( Props[DiagnosticPlugin], Props[FortunePlugin], Props[AccessPlugin], Props[LogPlugin], Props[VisitorPlugin], Props[WtfPlugin], Props[MailPlugin], Props[PetPlugin], Props[MarkovPlugin], Props[VersionPlugin], Props[BashPlugin], Props[DiceRoller], // Props[HtmlReaderPlugin], // TODO: Disabled for security reeasons, see #366 Props[HelperPlugin], Props[KarmaPlugin], Props[LogListPlugin], Props[TwitterPlugin] ) val protocols = List(Props[JabberProtocol]) initializeConfiguration(args) SmackConfiguration.setPacketReplyTimeout(Configuration.xmppTimeout.toMillis.toInt) val system = ActorSystem("CodingteamSystem", ConfigFactory.parseResources("application.conf")) val core = system.actorOf(Props(new Core(plugins, protocols)), "core") val eventCollector = system.actorOf(Props(classOf[EventCollector], eventEndpoints)) private def initializeConfiguration(args: Array[String]) { GlobalSettings.loggingSQLAndTime = GlobalSettings.loggingSQLAndTime.copy(singleLineMode = true) val configPath = args match { case Array(config, _*) => config case _ => "horta.properties" } Configuration.initialize(Paths.get(configPath)) } }
codingteam/horta-hell
src/main/scala/ru/org/codingteam/horta/Application.scala
Scala
mit
2,740
package services.migration import akka.actor._ import play.Logger import scala.util.Random import akka.util.Timeout import scala.concurrent.duration._ import akka.pattern.ask import scala.concurrent.Future import akka.contrib.throttle.Throttler._ import akka.contrib.throttle.TimerBasedThrottler trait ThrottleControl{ private val random = new Random() /** * Returns a future for the given work. * It will be only be performed once the throttle is happy. */ def throttler[T](throttle : Throttle)(work : => T) : Future[T] = { smallRandomDelay() //randomise the behaviour so we dont arrive all at the same time throttle.performThrottledWork[T](() => work) } import scala.concurrent.ExecutionContext.Implicits.global /** * The given future work will only be performed once the throttle is happy, * and the two futures are combined into one */ def futureWorkThrottler[T](throttle : Throttle)(work : => Future[T]) : Future[T] = throttler[Future[T]](throttle)(work).flatMap(identity) private def smallRandomDelay(max : Int = 30) { Thread.sleep(random.nextInt(max)) } } object ThrottleControl extends ThrottleControl{ private val system = ActorSystem("throttleSystem") val defaultRate = 10 msgsPer 1.second def throttle(rate : Rate = defaultRate) = new Throttle(system, rate) private val flexThrottle = throttle(10 msgsPer 1.second) private val r2Throttle = throttle(20 msgsPer 1.second) def flexThrottler[T] = throttler[T](flexThrottle) _ def r2Throttler[T] = throttler[T](r2Throttle) _ def flexThrottlerFt[T] = futureWorkThrottler[T](flexThrottle) _ def r2ThrottlerFt[T] = futureWorkThrottler[T](r2Throttle) _ } // A simple actor that invokes whatever function it receives and returns to the result to the sender class FunctionInvokerActor extends Actor { def receive = { case functionMsg : FunctionMessage[_] => { val response = functionMsg.theFunction.apply() sender ! response } case x : Any => throw new RuntimeException(s"Invoked with unrecognised message type : ${x}") } } case class FunctionMessage[T](theFunction : () => T) class DeadLetterActor extends Actor{ override def receive: Receive = { case d : DeadLetter => { Logger.error(s"Throttler dead message! ${d.message.toString}") //TODO: return this to the asker? } } } class Throttle(system : ActorSystem, rate : Rate) { import scala.language.postfixOps implicit val timeout = Timeout(5 minute) //this value should be larger than the calling client's timeout private val throttledInvoker = throttledActor(system.actorOf(Props[FunctionInvokerActor])) private def throttledActor(underlying : ActorRef) = { val throttledActor = system.actorOf(Props(classOf[TimerBasedThrottler], rate)) throttledActor ! SetTarget(Some(underlying)) throttledActor } system.eventStream.subscribe(system.actorOf(Props[DeadLetterActor]), classOf[DeadLetter]) def performThrottledWork[T]( work : () => T) : Future[T] = { val msg = FunctionMessage[T](work) ask( throttledInvoker, msg).asInstanceOf[Future[T]] } }
guardian/flex-content-migrator
app/services/migration/Throttle.scala
Scala
mit
3,129
package org.ninjatasks import org.scalatest.{Matchers, FlatSpec} /** * * Created by Gilad Ber on 6/14/2014. */ abstract class UnitSpec extends FlatSpec with Matchers
giladber/ninja-tasks
src/test/scala/org/ninjatasks/UnitSpec.scala
Scala
apache-2.0
170
package com.varunvats.practice.array import com.varunvats.practice.sorting.UnitSpec class SelfCrossingSpec extends UnitSpec { val theSequenceOfNumbers = afterWord("the sequence of numbers") "A sequence of points" must { "not be self-crossing" when theSequenceOfNumbers { "has fewer than or equal to 3 entries" in { val emptyArray = Array.empty[Int] SelfCrossing(emptyArray) shouldBe false val oneElem = Array(2) SelfCrossing(oneElem) shouldBe false val twoElems = Array(2, 1) SelfCrossing(twoElems) shouldBe false val threeElems = Array(3, 1, 2) SelfCrossing(threeElems) shouldBe false } "forms a monotonically expanding spiral" in { val seq = Array(1, 1, 2, 2, 3, 3, 4, 4) SelfCrossing(seq) shouldBe false } "forms an expanding spiral that contracts in the last step" in { val seq = Array(1, 1, 2, 2, 3, 4, 3) SelfCrossing(seq) shouldBe false } "forms a monotonically contracting spiral" in { val seq = Array(4, 4, 3, 3, 2, 2, 1, 1) SelfCrossing(seq) shouldBe false } } "be self crossing" when theSequenceOfNumbers { "forms a non-monotonically contracting spiral" in { val seq = Array(4, 4, 3, 3, 2, 2, 2) SelfCrossing(seq) shouldBe true } "forms a contracting spiral that expands" in { val seq = Array(4, 4, 3, 3, 2, 2, 3) SelfCrossing(seq) shouldBe true } "forms a box" in { val box1 = Array(1, 1, 2, 1, 1) SelfCrossing(box1) shouldBe true val box2 = Array(1, 1, 1, 1) SelfCrossing(box2) shouldBe true } } } }
varunvats/practice
jvm/src/test/scala/com/varunvats/practice/array/SelfCrossingSpec.scala
Scala
mit
1,707
package model import play.api.libs.json._ /** * Represents the Swagger definition for ExtensionClassContainerImpl1. * @param additionalProperties Any additional properties this model may have. */ @javax.annotation.Generated(value = Array("org.openapitools.codegen.languages.ScalaPlayFrameworkServerCodegen"), date = "2022-02-13T02:38:35.589632Z[Etc/UTC]") case class ExtensionClassContainerImpl1( `class`: Option[String], links: Option[ExtensionClassContainerImpl1links], map: Option[ExtensionClassContainerImpl1map] additionalProperties: ) object ExtensionClassContainerImpl1 { implicit lazy val extensionClassContainerImpl1JsonFormat: Format[ExtensionClassContainerImpl1] = { val realJsonFormat = Json.format[ExtensionClassContainerImpl1] val declaredPropNames = Set("`class`", "links", "map") Format( Reads { case JsObject(xs) => val declaredProps = xs.filterKeys(declaredPropNames) val additionalProps = JsObject(xs -- declaredPropNames) val restructuredProps = declaredProps + ("additionalProperties" -> additionalProps) val newObj = JsObject(restructuredProps) realJsonFormat.reads(newObj) case _ => JsError("error.expected.jsobject") }, Writes { extensionClassContainerImpl1 => val jsObj = realJsonFormat.writes(extensionClassContainerImpl1) val additionalProps = jsObj.value("additionalProperties").as[JsObject] val declaredProps = jsObj - "additionalProperties" val newObj = declaredProps ++ additionalProps newObj } ) } }
cliffano/swaggy-jenkins
clients/scala-play-server/generated/app/model/ExtensionClassContainerImpl1.scala
Scala
mit
1,616
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.tools import java.util.Properties import kafka.integration.KafkaServerTestHarness import kafka.server.KafkaConfig import kafka.utils.{Exit, Logging, TestUtils} import org.apache.kafka.clients.CommonClientConfigs import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord} import org.apache.kafka.common.serialization.StringSerializer import org.junit.jupiter.api.Assertions.assertEquals import org.junit.jupiter.api.{BeforeEach, Test} class GetOffsetShellTest extends KafkaServerTestHarness with Logging { private val topicCount = 4 private val offsetTopicPartitionCount = 4 override def generateConfigs: collection.Seq[KafkaConfig] = TestUtils.createBrokerConfigs(1, zkConnect) .map { p => p.put(KafkaConfig.OffsetsTopicPartitionsProp, Int.box(offsetTopicPartitionCount)) p }.map(KafkaConfig.fromProps) @BeforeEach def createTestAndInternalTopics(): Unit = { Range(1, topicCount + 1).foreach(i => createTopic(topicName(i), i)) val props = new Properties() props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, brokerList) props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer]) props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, classOf[StringSerializer]) // Send X messages to each partition of topicX val producer = new KafkaProducer[String, String](props) Range(1, topicCount + 1).foreach(i => Range(0, i*i) .foreach(msgCount => producer.send(new ProducerRecord[String, String](topicName(i), msgCount % i, null, "val" + msgCount)))) producer.close() TestUtils.createOffsetsTopic(zkClient, servers) } @Test def testNoFilterOptions(): Unit = { val offsets = executeAndParse(Array()) assertEquals(expectedOffsetsWithInternal(), offsets) } @Test def testInternalExcluded(): Unit = { val offsets = executeAndParse(Array("--exclude-internal-topics")) assertEquals(expectedTestTopicOffsets(), offsets) } @Test def testTopicNameArg(): Unit = { Range(1, topicCount + 1).foreach(i => { val offsets = executeAndParse(Array("--topic", topicName(i))) assertEquals(expectedOffsetsForTopic(i), offsets, () => "Offset output did not match for " + topicName(i)) }) } @Test def testTopicPatternArg(): Unit = { val offsets = executeAndParse(Array("--topic", "topic.*")) assertEquals(expectedTestTopicOffsets(), offsets) } @Test def testPartitionsArg(): Unit = { val offsets = executeAndParse(Array("--partitions", "0,1")) assertEquals(expectedOffsetsWithInternal().filter { case (_, partition, _) => partition <= 1 }, offsets) } @Test def testTopicPatternArgWithPartitionsArg(): Unit = { val offsets = executeAndParse(Array("--topic", "topic.*", "--partitions", "0,1")) assertEquals(expectedTestTopicOffsets().filter { case (_, partition, _) => partition <= 1 }, offsets) } @Test def testTopicPartitionsArg(): Unit = { val offsets = executeAndParse(Array("--topic-partitions", "topic1:0,topic2:1,topic(3|4):2,__.*:3")) assertEquals( List( ("__consumer_offsets", 3, Some(0)), ("topic1", 0, Some(1)), ("topic2", 1, Some(2)), ("topic3", 2, Some(3)), ("topic4", 2, Some(4)) ), offsets ) } @Test def testTopicPartitionsArgWithInternalExcluded(): Unit = { val offsets = executeAndParse(Array("--topic-partitions", "topic1:0,topic2:1,topic(3|4):2,__.*:3", "--exclude-internal-topics")) assertEquals( List( ("topic1", 0, Some(1)), ("topic2", 1, Some(2)), ("topic3", 2, Some(3)), ("topic4", 2, Some(4)) ), offsets ) } @Test def testTopicPartitionsNotFoundForNonExistentTopic(): Unit = { assertExitCodeIsOne(Array("--topic", "some_nonexistent_topic")) } @Test def testTopicPartitionsNotFoundForExcludedInternalTopic(): Unit = { assertExitCodeIsOne(Array("--topic", "some_nonexistent_topic:*")) } @Test def testTopicPartitionsNotFoundForNonMatchingTopicPartitionPattern(): Unit = { assertExitCodeIsOne(Array("--topic-partitions", "__consumer_offsets", "--exclude-internal-topics")) } @Test def testTopicPartitionsFlagWithTopicFlagCauseExit(): Unit = { assertExitCodeIsOne(Array("--topic-partitions", "__consumer_offsets", "--topic", "topic1")) } @Test def testTopicPartitionsFlagWithPartitionsFlagCauseExit(): Unit = { assertExitCodeIsOne(Array("--topic-partitions", "__consumer_offsets", "--partitions", "0")) } private def expectedOffsetsWithInternal(): List[(String, Int, Option[Long])] = { Range(0, offsetTopicPartitionCount).map(i => ("__consumer_offsets", i, Some(0L))).toList ++ expectedTestTopicOffsets() } private def expectedTestTopicOffsets(): List[(String, Int, Option[Long])] = { Range(1, topicCount + 1).flatMap(i => expectedOffsetsForTopic(i)).toList } private def expectedOffsetsForTopic(i: Int): List[(String, Int, Option[Long])] = { val name = topicName(i) Range(0, i).map(p => (name, p, Some(i.toLong))).toList } private def topicName(i: Int): String = "topic" + i private def assertExitCodeIsOne(args: Array[String]): Unit = { var exitStatus: Option[Int] = None Exit.setExitProcedure { (status, _) => exitStatus = Some(status) throw new RuntimeException } try { GetOffsetShell.main(addBootstrapServer(args)) } catch { case e: RuntimeException => } finally { Exit.resetExitProcedure() } assertEquals(Some(1), exitStatus) } private def executeAndParse(args: Array[String]): List[(String, Int, Option[Long])] = { val output = executeAndGrabOutput(args) output.split(System.lineSeparator()) .map(_.split(":")) .filter(_.length >= 2) .map { line => val topic = line(0) val partition = line(1).toInt val timestamp = if (line.length == 2 || line(2).isEmpty) None else Some(line(2).toLong) (topic, partition, timestamp) } .toList } private def executeAndGrabOutput(args: Array[String]): String = { TestUtils.grabConsoleOutput(GetOffsetShell.main(addBootstrapServer(args))) } private def addBootstrapServer(args: Array[String]): Array[String] = { args ++ Array("--bootstrap-server", brokerList) } }
guozhangwang/kafka
core/src/test/scala/kafka/tools/GetOffsetShellTest.scala
Scala
apache-2.0
7,151
// See the LICENCE.txt file distributed with this work for additional // information regarding copyright ownership. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package scray.core.service import java.net.InetSocketAddress import com.esotericsoftware.kryo.Kryo import scala.collection.JavaConversions._ import com.twitter.finagle.ListeningServer import com.twitter.finagle.Thrift import com.twitter.util.Await import com.twitter.util.Duration import com.twitter.util.Time import com.twitter.util.TimerTask import com.twitter.util.JavaTimer import scray.querying.description._ import scray.querying.caching.serialization._ import scray.common.serialization.KryoPoolSerialization import scray.common.serialization.numbers.KryoSerializerNumber import scray.common.properties.ScrayProperties import scray.common.properties.IntProperty import scray.common.properties.predefined.PredefinedProperties import scray.service.qservice.thrifscala.ScrayMetaTService import scray.service.qservice.thrifscala.ScrayTServiceEndpoint import scray.core.service.properties.ScrayServicePropertiesRegistration import scray.service.qmodel.thrifscala.ScrayUUID import com.typesafe.scalalogging.LazyLogging trait KryoPoolRegistration { def registerSerializers = RegisterRowCachingSerializers() } abstract class ScrayStatefulTServer extends AbstractScrayTServer { val server = Thrift.server.serveIface(SCRAY_QUERY_LISTENING_ENDPOINT, ScrayStatefulTServiceImpl()) override def getQueryServer: ListeningServer = server override def getVersion: String = "1.8.2" } abstract class ScrayStatelessTServer extends AbstractScrayTServer { val server = Thrift.server.serveIface(SCRAY_QUERY_LISTENING_ENDPOINT, ScrayStatelessTServiceImpl()) override def getQueryServer: ListeningServer = server override def getVersion: String = "0.9.2" } abstract class AbstractScrayTServer extends KryoPoolRegistration with App with LazyLogging { // abstract functions to be customized def initializeResources: Unit def destroyResources: Unit def getQueryServer: ListeningServer def getVersion: String def configureProperties configureProperties // kryo pool registrars registerSerializers // custom init initializeResources // the meta service is always part of the scray server val metaServer: ListeningServer = Thrift.server.serveIface(SCRAY_META_LISTENING_ENDPOINT, ScrayMetaTServiceImpl) // endpoint registration refresh timer private val refreshTimer = new JavaTimer(isDaemon = false) // refresh task handle private var refreshTask: Option[TimerTask] = None // this endpoint val endpoint = ScrayTServiceEndpoint(SCRAY_QUERY_HOST_ENDPOINT.getHostString, SCRAY_QUERY_HOST_ENDPOINT.getPort) val refreshPeriod = EXPIRATION * 2 / 3 def addrStr(): String = s"${SCRAY_QUERY_HOST_ENDPOINT.getHostString}:${SCRAY_QUERY_HOST_ENDPOINT.getPort}/${SCRAY_META_HOST_ENDPOINT.getPort}" // register this endpoint with all seeds and schedule regular refresh // the refresh loop keeps the server running SCRAY_SEEDS.map(inetAddr2EndpointString(_)).foreach { seedAddr => val client = Thrift.client.newIface[ScrayMetaTService.FutureIface](seedAddr) if (Await.result(client.ping())) { logger.info(s"$addrStr adding local service endpoint ($endpoint) to $seedAddr.") val _ep = Await.result(client.addServiceEndpoint(endpoint)) refreshTask = Some(refreshTimer.schedule(refreshPeriod.fromNow, refreshPeriod)(refresh(_ep.endpointId.get))) } } println(s"Scray Server Version ${getVersion} started on ${addrStr}. Waiting for client requests...") /** * Refresh the registry entry */ def refresh(id: ScrayUUID): Unit = { SCRAY_SEEDS.map(inetAddr2EndpointString(_)).foreach { seedAddr => try { val client = Thrift.client.newIface[ScrayMetaTService.FutureIface](seedAddr) if (Await.result(client.ping())) { logger.trace(s"$addrStr refreshing service endpoint ($id).") client.refreshServiceEndpoint(id) } } catch { case ex: Exception => logger.warn(s"Endpoint refresh failed: $ex") } } } override def finalize = { destroyResources } }
scray/scray
scray-service/src/main/scala/scray/core/service/scrayTServer.scala
Scala
apache-2.0
4,681
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql import org.apache.spark.sql.test.SharedSQLContext class SubquerySuite extends QueryTest with SharedSQLContext { import testImplicits._ setupTestData() val row = identity[(java.lang.Integer, java.lang.Double)](_) lazy val l = Seq( row(1, 2.0), row(1, 2.0), row(2, 1.0), row(2, 1.0), row(3, 3.0), row(null, null), row(null, 5.0), row(6, null)).toDF("a", "b") lazy val r = Seq( row(2, 3.0), row(2, 3.0), row(3, 2.0), row(4, 1.0), row(null, null), row(null, 5.0), row(6, null)).toDF("c", "d") lazy val t = r.filter($"c".isNotNull && $"d".isNotNull) protected override def beforeAll(): Unit = { super.beforeAll() l.createOrReplaceTempView("l") r.createOrReplaceTempView("r") t.createOrReplaceTempView("t") } test("SPARK-18854 numberedTreeString for subquery") { val df = sql("select * from range(10) where id not in " + "(select id from range(2) union all select id from range(2))") // The depth first traversal of the plan tree val dfs = Seq("Project", "Filter", "Union", "Project", "Range", "Project", "Range", "Range") val numbered = df.queryExecution.analyzed.numberedTreeString.split("\\n") // There should be 8 plan nodes in total assert(numbered.size == dfs.size) for (i <- dfs.indices) { val node = df.queryExecution.analyzed(i) assert(node.nodeName == dfs(i)) assert(numbered(i).contains(node.nodeName)) } } test("SPARK-15791: rdd deserialization does not crash") { sql("select (select 1 as b) as b").rdd.count() } test("simple uncorrelated scalar subquery") { checkAnswer( sql("select (select 1 as b) as b"), Array(Row(1)) ) checkAnswer( sql("select (select (select 1) + 1) + 1"), Array(Row(3)) ) // string type checkAnswer( sql("select (select 's' as s) as b"), Array(Row("s")) ) } test("define CTE in CTE subquery") { checkAnswer( sql( """ | with t2 as (with t1 as (select 1 as b, 2 as c) select b, c from t1) | select a from (select 1 as a union all select 2 as a) t | where a = (select max(b) from t2) """.stripMargin), Array(Row(1)) ) checkAnswer( sql( """ | with t2 as (with t1 as (select 1 as b, 2 as c) select b, c from t1), | t3 as ( | with t4 as (select 1 as d, 3 as e) | select * from t4 cross join t2 where t2.b = t4.d | ) | select a from (select 1 as a union all select 2 as a) | where a = (select max(d) from t3) """.stripMargin), Array(Row(1)) ) } test("uncorrelated scalar subquery in CTE") { checkAnswer( sql("with t2 as (select 1 as b, 2 as c) " + "select a from (select 1 as a union all select 2 as a) t " + "where a = (select max(b) from t2) "), Array(Row(1)) ) } test("uncorrelated scalar subquery should return null if there is 0 rows") { checkAnswer( sql("select (select 's' as s limit 0) as b"), Array(Row(null)) ) } test("runtime error when the number of rows is greater than 1") { val error2 = intercept[RuntimeException] { sql("select (select a from (select 1 as a union all select 2 as a) t) as b").collect() } assert(error2.getMessage.contains( "more than one row returned by a subquery used as an expression") ) } test("uncorrelated scalar subquery on a DataFrame generated query") { val df = Seq((1, "one"), (2, "two"), (3, "three")).toDF("key", "value") df.createOrReplaceTempView("subqueryData") checkAnswer( sql("select (select key from subqueryData where key > 2 order by key limit 1) + 1"), Array(Row(4)) ) checkAnswer( sql("select -(select max(key) from subqueryData)"), Array(Row(-3)) ) checkAnswer( sql("select (select value from subqueryData limit 0)"), Array(Row(null)) ) checkAnswer( sql("select (select min(value) from subqueryData" + " where key = (select max(key) from subqueryData) - 1)"), Array(Row("two")) ) } test("SPARK-15677: Queries against local relations with scalar subquery in Select list") { withTempView("t1", "t2") { Seq((1, 1), (2, 2)).toDF("c1", "c2").createOrReplaceTempView("t1") Seq((1, 1), (2, 2)).toDF("c1", "c2").createOrReplaceTempView("t2") checkAnswer( sql("SELECT (select 1 as col) from t1"), Row(1) :: Row(1) :: Nil) checkAnswer( sql("SELECT (select max(c1) from t2) from t1"), Row(2) :: Row(2) :: Nil) checkAnswer( sql("SELECT 1 + (select 1 as col) from t1"), Row(2) :: Row(2) :: Nil) checkAnswer( sql("SELECT c1, (select max(c1) from t2) + c2 from t1"), Row(1, 3) :: Row(2, 4) :: Nil) checkAnswer( sql("SELECT c1, (select max(c1) from t2 where t1.c2 = t2.c2) from t1"), Row(1, 1) :: Row(2, 2) :: Nil) } } test("SPARK-14791: scalar subquery inside broadcast join") { val df = sql("select a, sum(b) as s from l group by a having a > (select avg(a) from l)") val expected = Row(3, 2.0, 3, 3.0) :: Row(6, null, 6, null) :: Nil (1 to 10).foreach { _ => checkAnswer(r.join(df, $"c" === $"a"), expected) } } test("EXISTS predicate subquery") { checkAnswer( sql("select * from l where exists (select * from r where l.a = r.c)"), Row(2, 1.0) :: Row(2, 1.0) :: Row(3, 3.0) :: Row(6, null) :: Nil) checkAnswer( sql("select * from l where exists (select * from r where l.a = r.c) and l.a <= 2"), Row(2, 1.0) :: Row(2, 1.0) :: Nil) } test("NOT EXISTS predicate subquery") { checkAnswer( sql("select * from l where not exists (select * from r where l.a = r.c)"), Row(1, 2.0) :: Row(1, 2.0) :: Row(null, null) :: Row(null, 5.0) :: Nil) checkAnswer( sql("select * from l where not exists (select * from r where l.a = r.c and l.b < r.d)"), Row(1, 2.0) :: Row(1, 2.0) :: Row(3, 3.0) :: Row(null, null) :: Row(null, 5.0) :: Row(6, null) :: Nil) } test("EXISTS predicate subquery within OR") { checkAnswer( sql("select * from l where exists (select * from r where l.a = r.c)" + " or exists (select * from r where l.a = r.c)"), Row(2, 1.0) :: Row(2, 1.0) :: Row(3, 3.0) :: Row(6, null) :: Nil) checkAnswer( sql("select * from l where not exists (select * from r where l.a = r.c and l.b < r.d)" + " or not exists (select * from r where l.a = r.c)"), Row(1, 2.0) :: Row(1, 2.0) :: Row(3, 3.0) :: Row(null, null) :: Row(null, 5.0) :: Row(6, null) :: Nil) } test("IN predicate subquery") { checkAnswer( sql("select * from l where l.a in (select c from r)"), Row(2, 1.0) :: Row(2, 1.0) :: Row(3, 3.0) :: Row(6, null) :: Nil) checkAnswer( sql("select * from l where l.a in (select c from r where l.b < r.d)"), Row(2, 1.0) :: Row(2, 1.0) :: Nil) checkAnswer( sql("select * from l where l.a in (select c from r) and l.a > 2 and l.b is not null"), Row(3, 3.0) :: Nil) } test("NOT IN predicate subquery") { checkAnswer( sql("select * from l where a not in (select c from r)"), Nil) checkAnswer( sql("select * from l where a not in (select c from r where c is not null)"), Row(1, 2.0) :: Row(1, 2.0) :: Nil) checkAnswer( sql("select * from l where (a, b) not in (select c, d from t) and a < 4"), Row(1, 2.0) :: Row(1, 2.0) :: Row(2, 1.0) :: Row(2, 1.0) :: Row(3, 3.0) :: Nil) // Empty sub-query checkAnswer( sql("select * from l where (a, b) not in (select c, d from r where c > 10)"), Row(1, 2.0) :: Row(1, 2.0) :: Row(2, 1.0) :: Row(2, 1.0) :: Row(3, 3.0) :: Row(null, null) :: Row(null, 5.0) :: Row(6, null) :: Nil) } test("IN predicate subquery within OR") { checkAnswer( sql("select * from l where l.a in (select c from r)" + " or l.a in (select c from r where l.b < r.d)"), Row(2, 1.0) :: Row(2, 1.0) :: Row(3, 3.0) :: Row(6, null) :: Nil) intercept[AnalysisException] { sql("select * from l where a not in (select c from r)" + " or a not in (select c from r where c is not null)") } } test("complex IN predicate subquery") { checkAnswer( sql("select * from l where (a, b) not in (select c, d from r)"), Nil) checkAnswer( sql("select * from l where (a, b) not in (select c, d from t) and (a + b) is not null"), Row(1, 2.0) :: Row(1, 2.0) :: Row(2, 1.0) :: Row(2, 1.0) :: Row(3, 3.0) :: Nil) } test("same column in subquery and outer table") { checkAnswer( sql("select a from l l1 where a in (select a from l where a < 3 group by a)"), Row(1) :: Row(1) :: Row(2) :: Row(2) :: Nil ) } test("having with function in subquery") { checkAnswer( sql("select a from l group by 1 having exists (select 1 from r where d < min(b))"), Row(null) :: Row(1) :: Row(3) :: Nil) } test("SPARK-15832: Test embedded existential predicate sub-queries") { withTempView("t1", "t2", "t3", "t4", "t5") { Seq((1, 1), (2, 2)).toDF("c1", "c2").createOrReplaceTempView("t1") Seq((1, 1), (2, 2)).toDF("c1", "c2").createOrReplaceTempView("t2") Seq((1, 1), (2, 2), (1, 2)).toDF("c1", "c2").createOrReplaceTempView("t3") checkAnswer( sql( """ | select c1 from t1 | where c2 IN (select c2 from t2) | """.stripMargin), Row(1) :: Row(2) :: Nil) checkAnswer( sql( """ | select c1 from t1 | where c2 NOT IN (select c2 from t2) | """.stripMargin), Nil) checkAnswer( sql( """ | select c1 from t1 | where EXISTS (select c2 from t2) | """.stripMargin), Row(1) :: Row(2) :: Nil) checkAnswer( sql( """ | select c1 from t1 | where NOT EXISTS (select c2 from t2) | """.stripMargin), Nil) checkAnswer( sql( """ | select c1 from t1 | where NOT EXISTS (select c2 from t2) and | c2 IN (select c2 from t3) | """.stripMargin), Nil) checkAnswer( sql( """ | select c1 from t1 | where (case when c2 IN (select 1 as one) then 1 | else 2 end) = c1 | """.stripMargin), Row(1) :: Row(2) :: Nil) checkAnswer( sql( """ | select c1 from t1 | where (case when c2 IN (select 1 as one) then 1 | else 2 end) | IN (select c2 from t2) | """.stripMargin), Row(1) :: Row(2) :: Nil) checkAnswer( sql( """ | select c1 from t1 | where (case when c2 IN (select c2 from t2) then 1 | else 2 end) | IN (select c2 from t3) | """.stripMargin), Row(1) :: Row(2) :: Nil) checkAnswer( sql( """ | select c1 from t1 | where (case when c2 IN (select c2 from t2) then 1 | when c2 IN (select c2 from t3) then 2 | else 3 end) | IN (select c2 from t1) | """.stripMargin), Row(1) :: Row(2) :: Nil) checkAnswer( sql( """ | select c1 from t1 | where (c1, (case when c2 IN (select c2 from t2) then 1 | when c2 IN (select c2 from t3) then 2 | else 3 end)) | IN (select c1, c2 from t1) | """.stripMargin), Row(1) :: Nil) checkAnswer( sql( """ | select c1 from t3 | where ((case when c2 IN (select c2 from t2) then 1 else 2 end), | (case when c2 IN (select c2 from t3) then 2 else 3 end)) | IN (select c1, c2 from t3) | """.stripMargin), Row(1) :: Row(2) :: Row(1) :: Nil) checkAnswer( sql( """ | select c1 from t1 | where ((case when EXISTS (select c2 from t2) then 1 else 2 end), | (case when c2 IN (select c2 from t3) then 2 else 3 end)) | IN (select c1, c2 from t3) | """.stripMargin), Row(1) :: Row(2) :: Nil) checkAnswer( sql( """ | select c1 from t1 | where (case when c2 IN (select c2 from t2) then 3 | else 2 end) | NOT IN (select c2 from t3) | """.stripMargin), Row(1) :: Row(2) :: Nil) checkAnswer( sql( """ | select c1 from t1 | where ((case when c2 IN (select c2 from t2) then 1 else 2 end), | (case when NOT EXISTS (select c2 from t3) then 2 | when EXISTS (select c2 from t2) then 3 | else 3 end)) | NOT IN (select c1, c2 from t3) | """.stripMargin), Row(1) :: Row(2) :: Nil) checkAnswer( sql( """ | select c1 from t1 | where (select max(c1) from t2 where c2 IN (select c2 from t3)) | IN (select c2 from t2) | """.stripMargin), Row(1) :: Row(2) :: Nil) } } test("correlated scalar subquery in where") { checkAnswer( sql("select * from l where b < (select max(d) from r where a = c)"), Row(2, 1.0) :: Row(2, 1.0) :: Nil) } test("correlated scalar subquery in select") { checkAnswer( sql("select a, (select sum(b) from l l2 where l2.a = l1.a) sum_b from l l1"), Row(1, 4.0) :: Row(1, 4.0) :: Row(2, 2.0) :: Row(2, 2.0) :: Row(3, 3.0) :: Row(null, null) :: Row(null, null) :: Row(6, null) :: Nil) } test("correlated scalar subquery in select (null safe)") { checkAnswer( sql("select a, (select sum(b) from l l2 where l2.a <=> l1.a) sum_b from l l1"), Row(1, 4.0) :: Row(1, 4.0) :: Row(2, 2.0) :: Row(2, 2.0) :: Row(3, 3.0) :: Row(null, 5.0) :: Row(null, 5.0) :: Row(6, null) :: Nil) } test("correlated scalar subquery in aggregate") { checkAnswer( sql("select a, (select sum(d) from r where a = c) sum_d from l l1 group by 1, 2"), Row(1, null) :: Row(2, 6.0) :: Row(3, 2.0) :: Row(null, null) :: Row(6, null) :: Nil) } test("SPARK-18504 extra GROUP BY column in correlated scalar subquery is not permitted") { withTempView("t") { Seq((1, 1), (1, 2)).toDF("c1", "c2").createOrReplaceTempView("t") val errMsg = intercept[AnalysisException] { sql("select (select sum(-1) from t t2 where t1.c2 = t2.c1 group by t2.c2) sum from t t1") } assert(errMsg.getMessage.contains( "A GROUP BY clause in a scalar correlated subquery cannot contain non-correlated columns:")) } } test("non-aggregated correlated scalar subquery") { val msg1 = intercept[AnalysisException] { sql("select a, (select b from l l2 where l2.a = l1.a) sum_b from l l1") } assert(msg1.getMessage.contains("Correlated scalar subqueries must be aggregated")) val msg2 = intercept[AnalysisException] { sql("select a, (select b from l l2 where l2.a = l1.a group by 1) sum_b from l l1") } assert(msg2.getMessage.contains( "The output of a correlated scalar subquery must be aggregated")) } test("non-equal correlated scalar subquery") { val msg1 = intercept[AnalysisException] { sql("select a, (select sum(b) from l l2 where l2.a < l1.a) sum_b from l l1") } assert(msg1.getMessage.contains( "Correlated column is not allowed in a non-equality predicate:")) } test("disjunctive correlated scalar subquery") { checkAnswer( sql(""" |select a |from l |where (select count(*) | from r | where (a = c and d = 2.0) or (a = c and d = 1.0)) > 0 """.stripMargin), Row(3) :: Nil) } test("SPARK-15370: COUNT bug in WHERE clause (Filter)") { // Case 1: Canonical example of the COUNT bug checkAnswer( sql("select l.a from l where (select count(*) from r where l.a = r.c) < l.a"), Row(1) :: Row(1) :: Row(3) :: Row(6) :: Nil) // Case 2: count(*) = 0; could be rewritten to NOT EXISTS but currently uses // a rewrite that is vulnerable to the COUNT bug checkAnswer( sql("select l.a from l where (select count(*) from r where l.a = r.c) = 0"), Row(1) :: Row(1) :: Row(null) :: Row(null) :: Nil) // Case 3: COUNT bug without a COUNT aggregate checkAnswer( sql("select l.a from l where (select sum(r.d) is null from r where l.a = r.c)"), Row(1) :: Row(1) ::Row(null) :: Row(null) :: Row(6) :: Nil) } test("SPARK-15370: COUNT bug in SELECT clause (Project)") { checkAnswer( sql("select a, (select count(*) from r where l.a = r.c) as cnt from l"), Row(1, 0) :: Row(1, 0) :: Row(2, 2) :: Row(2, 2) :: Row(3, 1) :: Row(null, 0) :: Row(null, 0) :: Row(6, 1) :: Nil) } test("SPARK-15370: COUNT bug in HAVING clause (Filter)") { checkAnswer( sql("select l.a as grp_a from l group by l.a " + "having (select count(*) from r where grp_a = r.c) = 0 " + "order by grp_a"), Row(null) :: Row(1) :: Nil) } test("SPARK-15370: COUNT bug in Aggregate") { checkAnswer( sql("select l.a as aval, sum((select count(*) from r where l.a = r.c)) as cnt " + "from l group by l.a order by aval"), Row(null, 0) :: Row(1, 0) :: Row(2, 4) :: Row(3, 1) :: Row(6, 1) :: Nil) } test("SPARK-15370: COUNT bug negative examples") { // Case 1: Potential COUNT bug case that was working correctly prior to the fix checkAnswer( sql("select l.a from l where (select sum(r.d) from r where l.a = r.c) is null"), Row(1) :: Row(1) :: Row(null) :: Row(null) :: Row(6) :: Nil) // Case 2: COUNT aggregate but no COUNT bug due to > 0 test. checkAnswer( sql("select l.a from l where (select count(*) from r where l.a = r.c) > 0"), Row(2) :: Row(2) :: Row(3) :: Row(6) :: Nil) // Case 3: COUNT inside aggregate expression but no COUNT bug. checkAnswer( sql("select l.a from l where (select count(*) + sum(r.d) from r where l.a = r.c) = 0"), Nil) } test("SPARK-15370: COUNT bug in subquery in subquery in subquery") { checkAnswer( sql("""select l.a from l |where ( | select cntPlusOne + 1 as cntPlusTwo from ( | select cnt + 1 as cntPlusOne from ( | select sum(r.c) s, count(*) cnt from r where l.a = r.c having cnt = 0 | ) | ) |) = 2""".stripMargin), Row(1) :: Row(1) :: Row(null) :: Row(null) :: Nil) } test("SPARK-15370: COUNT bug with nasty predicate expr") { checkAnswer( sql("select l.a from l where " + "(select case when count(*) = 1 then null else count(*) end as cnt " + "from r where l.a = r.c) = 0"), Row(1) :: Row(1) :: Row(null) :: Row(null) :: Nil) } test("SPARK-15370: COUNT bug with attribute ref in subquery input and output ") { checkAnswer( sql( """ |select l.b, (select (r.c + count(*)) is null |from r |where l.a = r.c group by r.c) from l """.stripMargin), Row(1.0, false) :: Row(1.0, false) :: Row(2.0, true) :: Row(2.0, true) :: Row(3.0, false) :: Row(5.0, true) :: Row(null, false) :: Row(null, true) :: Nil) } test("SPARK-16804: Correlated subqueries containing LIMIT - 1") { withTempView("onerow") { Seq(1).toDF("c1").createOrReplaceTempView("onerow") checkAnswer( sql( """ | select c1 from onerow t1 | where exists (select 1 from onerow t2 where t1.c1=t2.c1) | and exists (select 1 from onerow LIMIT 1)""".stripMargin), Row(1) :: Nil) } } test("SPARK-16804: Correlated subqueries containing LIMIT - 2") { withTempView("onerow") { Seq(1).toDF("c1").createOrReplaceTempView("onerow") checkAnswer( sql( """ | select c1 from onerow t1 | where exists (select 1 | from (select c1 from onerow t2 LIMIT 1) t2 | where t1.c1=t2.c1)""".stripMargin), Row(1) :: Nil) } } test("SPARK-17337: Incorrect column resolution leads to incorrect results") { withTempView("t1", "t2") { Seq(1, 2).toDF("c1").createOrReplaceTempView("t1") Seq(1).toDF("c2").createOrReplaceTempView("t2") checkAnswer( sql( """ | select * | from (select t2.c2+1 as c3 | from t1 left join t2 on t1.c1=t2.c2) t3 | where c3 not in (select c2 from t2)""".stripMargin), Row(2) :: Nil) } } test("SPARK-17348: Correlated subqueries with non-equality predicate (good case)") { withTempView("t1", "t2") { Seq((1, 1)).toDF("c1", "c2").createOrReplaceTempView("t1") Seq((1, 1), (2, 0)).toDF("c1", "c2").createOrReplaceTempView("t2") // Simple case checkAnswer( sql( """ | select c1 | from t1 | where c1 in (select t2.c1 | from t2 | where t1.c2 >= t2.c2)""".stripMargin), Row(1) :: Nil) // More complex case with OR predicate checkAnswer( sql( """ | select t1.c1 | from t1, t1 as t3 | where t1.c1 = t3.c1 | and (t1.c1 in (select t2.c1 | from t2 | where t1.c2 >= t2.c2 | or t3.c2 < t2.c2) | or t1.c2 >= 0)""".stripMargin), Row(1) :: Nil) } } test("SPARK-17348: Correlated subqueries with non-equality predicate (error case)") { withTempView("t1", "t2", "t3", "t4") { Seq((1, 1)).toDF("c1", "c2").createOrReplaceTempView("t1") Seq((1, 1), (2, 0)).toDF("c1", "c2").createOrReplaceTempView("t2") Seq((2, 1)).toDF("c1", "c2").createOrReplaceTempView("t3") Seq((1, 1), (2, 2)).toDF("c1", "c2").createOrReplaceTempView("t4") // Simplest case intercept[AnalysisException] { sql( """ | select t1.c1 | from t1 | where t1.c1 in (select max(t2.c1) | from t2 | where t1.c2 >= t2.c2)""".stripMargin).collect() } // Add a HAVING on top and augmented within an OR predicate intercept[AnalysisException] { sql( """ | select t1.c1 | from t1 | where t1.c1 in (select max(t2.c1) | from t2 | where t1.c2 >= t2.c2 | having count(*) > 0 ) | or t1.c2 >= 0""".stripMargin).collect() } // Add a HAVING on top and augmented within an OR predicate intercept[AnalysisException] { sql( """ | select t1.c1 | from t1, t1 as t3 | where t1.c1 = t3.c1 | and (t1.c1 in (select max(t2.c1) | from t2 | where t1.c2 = t2.c2 | or t3.c2 = t2.c2) | )""".stripMargin).collect() } // In Window expression: changing the data set to // demonstrate if this query ran, it would return incorrect result. intercept[AnalysisException] { sql( """ | select c1 | from t3 | where c1 in (select max(t4.c1) over () | from t4 | where t3.c2 >= t4.c2)""".stripMargin).collect() } } } // This restriction applies to // the permutation of { LOJ, ROJ, FOJ } x { EXISTS, IN, scalar subquery } // where correlated predicates appears in right operand of LOJ, // or in left operand of ROJ, or in either operand of FOJ. // The test cases below cover the representatives of the patterns test("Correlated subqueries in outer joins") { withTempView("t1", "t2", "t3") { Seq(1).toDF("c1").createOrReplaceTempView("t1") Seq(2).toDF("c1").createOrReplaceTempView("t2") Seq(1).toDF("c1").createOrReplaceTempView("t3") // Left outer join (LOJ) in IN subquery context intercept[AnalysisException] { sql( """ | select t1.c1 | from t1 | where 1 IN (select 1 | from t3 left outer join | (select c1 from t2 where t1.c1 = 2) t2 | on t2.c1 = t3.c1)""".stripMargin).collect() } // Right outer join (ROJ) in EXISTS subquery context intercept[AnalysisException] { sql( """ | select t1.c1 | from t1 | where exists (select 1 | from (select c1 from t2 where t1.c1 = 2) t2 | right outer join t3 | on t2.c1 = t3.c1)""".stripMargin).collect() } // SPARK-18578: Full outer join (FOJ) in scalar subquery context intercept[AnalysisException] { sql( """ | select (select max(1) | from (select c1 from t2 where t1.c1 = 2 and t1.c1=t2.c1) t2 | full join t3 | on t2.c1=t3.c1) | from t1""".stripMargin).collect() } } } // Generate operator test("Correlated subqueries in LATERAL VIEW") { withTempView("t1", "t2") { Seq((1, 1), (2, 0)).toDF("c1", "c2").createOrReplaceTempView("t1") Seq[(Int, Array[Int])]((1, Array(1, 2)), (2, Array(-1, -3))) .toDF("c1", "arr_c2").createTempView("t2") checkAnswer( sql( """ | SELECT c2 | FROM t1 | WHERE EXISTS (SELECT * | FROM t2 LATERAL VIEW explode(arr_c2) q AS c2 WHERE t1.c1 = t2.c1)""".stripMargin), Row(1) :: Row(0) :: Nil) val msg1 = intercept[AnalysisException] { sql( """ | SELECT c1 | FROM t2 | WHERE EXISTS (SELECT * | FROM t1 LATERAL VIEW explode(t2.arr_c2) q AS c2 | WHERE t1.c1 = t2.c1) """.stripMargin) } assert(msg1.getMessage.contains( "Expressions referencing the outer query are not supported outside of WHERE/HAVING")) } } test("SPARK-19933 Do not eliminate top-level aliases in sub-queries") { withTempView("t1", "t2") { spark.range(4).createOrReplaceTempView("t1") checkAnswer( sql("select * from t1 where id in (select id as id from t1)"), Row(0) :: Row(1) :: Row(2) :: Row(3) :: Nil) spark.range(2).createOrReplaceTempView("t2") checkAnswer( sql("select * from t1 where id in (select id as id from t2)"), Row(0) :: Row(1) :: Nil) } } test("ListQuery and Exists should work even no correlated references") { checkAnswer( sql("select * from l, r where l.a = r.c AND (r.d in (select d from r) OR l.a >= 1)"), Row(2, 1.0, 2, 3.0) :: Row(2, 1.0, 2, 3.0) :: Row(2, 1.0, 2, 3.0) :: Row(2, 1.0, 2, 3.0) :: Row(3.0, 3.0, 3, 2.0) :: Row(6, null, 6, null) :: Nil) checkAnswer( sql("select * from l, r where l.a = r.c + 1 AND (exists (select * from r) OR l.a = r.c)"), Row(3, 3.0, 2, 3.0) :: Row(3, 3.0, 2, 3.0) :: Nil) } test("SPARK-20688: correctly check analysis for scalar sub-queries") { withTempView("t") { Seq(1 -> "a").toDF("i", "j").createOrReplaceTempView("t") val e = intercept[AnalysisException](sql("SELECT (SELECT count(*) FROM t WHERE a = 1)")) assert(e.message.contains("cannot resolve '`a`' given input columns: [t.i, t.j]")) } } }
nilsgrabbert/spark
sql/core/src/test/scala/org/apache/spark/sql/SubquerySuite.scala
Scala
apache-2.0
29,648
package com.realizationtime.btdogg.commons import java.nio.file.Path import scala.util.{Failure, Success, Try} case class ParsingResult[+T](key: TKey, path: Path, result: Try[T]) { def copyFailed[R](): ParsingResult[R] = { result match { case Failure(t) => ParsingResult[R](key, path, Failure(t)) case _ => throw new IllegalArgumentException(s"copyFailed method available only for ParsingResults with failed result. $this") } } def copyTyped[R](newResult: Try[R]): ParsingResult[R] = ParsingResult(key, path, newResult) def copyTyped[R](newResult: R): ParsingResult[R] = ParsingResult(key, path, Success(newResult)) }
bwrega/btdogg
src/main/scala/com/realizationtime/btdogg/parsing/ParsingResult.scala
Scala
mit
651
/** * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package rx.lang.scala /** * Emitted by Observables returned by [[rx.lang.scala.Observable.materialize]]. */ sealed trait Notification[+T] { private [scala] val asJavaNotification: rx.Notification[_ <: T] override def equals(that: Any): Boolean = that match { case other: Notification[_] => asJavaNotification.equals(other.asJavaNotification) case _ => false } override def hashCode(): Int = asJavaNotification.hashCode() /** * Invokes the function corresponding to the notification. * * @param onNext * The function to invoke for an [[rx.lang.scala.Notification.OnNext]] notification. * @param onError * The function to invoke for an [[rx.lang.scala.Notification.OnError]] notification. * @param onCompleted * The function to invoke for an [[rx.lang.scala.Notification.OnCompleted]] notification. */ def accept[R](onNext: T=>R, onError: Throwable=>R, onCompleted: ()=>R): R = { this match { case Notification.OnNext(value) => onNext(value) case Notification.OnError(error) => onError(error) case Notification.OnCompleted => onCompleted() } } def apply[R](onNext: T=>R, onError: Throwable=>R, onCompleted: ()=>R): R = accept(onNext, onError, onCompleted) /** * Invokes the observer corresponding to the notification * * @param observer * The observer that to observe the notification */ def accept(observer: Observer[T]): Unit = { this match { case Notification.OnNext(value) => observer.onNext(value) case Notification.OnError(error) => observer.onError(error) case Notification.OnCompleted => observer.onCompleted() } } def apply(observer: Observer[T]): Unit = accept(observer) } /** * Provides pattern matching support and constructors for Notifications. * * Example: * {{{ * import Notification._ * Observable.just(1, 2, 3).materialize.subscribe(n => n match { * case OnNext(v) => println("Got value " + v) * case OnCompleted => println("Completed") * case OnError(err) => println("Error: " + err.getMessage) * }) * }}} */ object Notification { private [scala] def apply[T](n: rx.Notification[_ <: T]): Notification[T] = n.getKind match { case rx.Notification.Kind.OnNext => new OnNext(n) case rx.Notification.Kind.OnCompleted => OnCompleted case rx.Notification.Kind.OnError => new OnError(n) } // OnNext, OnError, OnCompleted are not case classes because we don't want pattern matching // to extract the rx.Notification object OnNext { /** * Constructor for onNext notifications. * * @param value * The item passed to the onNext method. */ def apply[T](value: T): Notification[T] = { Notification(rx.Notification.createOnNext[T](value)) } /** * Extractor for onNext notifications. * @param notification * The [[rx.lang.scala.Notification]] to be destructed. * @return * The item contained in this notification. */ def unapply[U](notification: Notification[U]): Option[U] = notification match { case onNext: OnNext[U] => Some(onNext.value) case _ => None } } class OnNext[+T] private[scala] (val asJavaNotification: rx.Notification[_ <: T]) extends Notification[T] { def value: T = asJavaNotification.getValue override def toString = s"OnNext($value)" } object OnError { /** * Constructor for onError notifications. * * @param error * The exception passed to the onNext method. */ def apply[T](error: Throwable): Notification[T] = { Notification(rx.Notification.createOnError[T](error)) } /** * Destructor for onError notifications. * * @param notification * The [[rx.lang.scala.Notification]] to be deconstructed * @return * The `java.lang.Throwable` value contained in this notification. */ def unapply[U](notification: Notification[U]): Option[Throwable] = notification match { case onError: OnError[U] => Some(onError.error) case _ => None } } class OnError[+T] private[scala] (val asJavaNotification: rx.Notification[_ <: T]) extends Notification[T] { def error: Throwable = asJavaNotification.getThrowable override def toString = s"OnError($error)" } object OnCompleted extends Notification[Nothing] { override def toString = "OnCompleted" val asJavaNotification = rx.Notification.createOnCompleted[Nothing]() } }
zjrstar/RxScala
src/main/scala/rx/lang/scala/Notification.scala
Scala
apache-2.0
5,182
package com.henryp.sparkfinance import com.henryp.sparkfinance.feeds._ import com.henryp.sparkfinance.logging.Logging import org.apache.spark.SparkContext import org.apache.spark.mllib.stat.Statistics import org.apache.spark.rdd.RDD import scala.reflect.ClassTag package object sparkjobs extends Logging { /** * @param all An RDD of key to content. Typically, filename to file content if loaded via SparkContext.wholeTextFiles. */ def aggregate[T: ClassTag](all: RDD[(String, String)], isNotMeta: String => Boolean, toDomain: (String, String) => T ): RDD[T] = { def toRDD(ticker: String, text: String): TraversableOnce[T] = { val lines = text.lines.filter(isNotMeta(_)) lines.map { case (line) => toDomain(ticker, line) } } all.flatMap { case(ticker, text) => toRDD(ticker, text) } } def data[T: ClassTag](raw: RDD[String], isNotMeta: String => Boolean, toDomain: String => T): RDD[T] = raw.filter(isNotMeta(_)).map(toDomain(_)) def pearsonCorrelationValue[K: ClassTag](keyVal1: RDD[(K, Double)], keyVal2: RDD[(K, Double)]): Double = { val joined: RDD[(K, (Double, Double))] = keyVal1.join(keyVal2) val series1 = joined map { case(key, forKey) => forKey._1 } val series2 = joined map { case(key, forKey) => forKey._2 } val algorithm = "pearson" Statistics.corr(series1, series2, algorithm) } def seriesFor[D <: Tuple2[(U, String), Double], U: ClassTag](aggregated: RDD[D], ticker: String, toDatePrice: (D) => (U, Double)): RDD[(U, Double)] = aggregated.filter(matchesTicker[D](ticker, _)).map(toDatePrice) def joinByDate[T: ClassTag](tickers: Seq[String], domain: RDD[((T, String), Double)], toFeature: (Tuple2[(T, String), Double] => (T, Double))): RDD[(T, Seq[Double])] = { val dependent = seriesFor(domain, tickers.head, toFeature) var joined = dependent.map(kv => (kv._1, Seq(kv._2))) for (independentTicker <- tickers.tail) { val independent = seriesFor(domain, independentTicker, toFeature) joined = joined.join(independent).map(kv => (kv._1, kv._2._1 :+ kv._2._2)) } joined } def waitForKeyThenStop(context: SparkContext): Unit = { info("Finished. Press any key to end app") Console.in.read context.stop() } }
PhillHenry/SparkFinance
sparkjobs/src/main/scala/com/henryp/sparkfinance/sparkjobs/package.scala
Scala
apache-2.0
2,587
/* * Licensed to Cloudera, Inc. under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. Cloudera, Inc. licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.cloudera.hue.livy.server import com.cloudera.hue.livy.LivyConf import com.cloudera.hue.livy.sessions.{State, Success} import org.json4s.JsonAST.{JNothing, JValue} import org.scalatest.{FlatSpec, Matchers} import scala.concurrent.duration.Duration import scala.concurrent.{Await, Future} class SessionManagerSpec extends FlatSpec with Matchers { class MockSession(val id: Int) extends Session { override def stop(): Future[Unit] = Future.successful(()) override def logLines(): IndexedSeq[String] = IndexedSeq() override def state: State = Success(0) } class MockSessionFactory extends SessionFactory[MockSession] { override def create(id: Int, createRequest: JValue): MockSession = new MockSession(id) } it should "garbage collect old sessions" in { val livyConf = new LivyConf() livyConf.set(SessionManager.LIVY_SERVER_SESSION_TIMEOUT, "100") val manager = new SessionManager(livyConf, new MockSessionFactory) val session = manager.create(JNothing) manager.get(session.id).isDefined should be(true) Await.result(manager.collectGarbage(), Duration.Inf) manager.get(session.id).isEmpty should be(true) } }
hdinsight/hue
apps/spark/java/livy-server/src/test/scala/com/cloudera/hue/livy/server/SessionManagerSpec.scala
Scala
apache-2.0
1,973
package pl.touk.nussknacker.engine.util.config import com.typesafe.config.ConfigFactory import org.scalatest.{FunSuite, Matchers} import java.net.URI import java.nio.file.Files import scala.jdk.CollectionConverters.mapAsJavaMapConverter class ConfigFactoryExtSpec extends FunSuite with Matchers { test("loads in correct order") { val conf1 = writeToTemp(Map("f1" -> "default", "f2" ->"not so default", "akka.http.server.request-timeout" -> "300s")) val conf2 = writeToTemp(Map("f1" -> "I win!")) val result = ConfigFactoryExt.parseConfigFallbackChain(List(conf1, conf2, URI.create("classpath:someConfig.conf")), getClass.getClassLoader) result.getString("f1") shouldBe "I win!" result.getString("f2") shouldBe "not so default" result.getString("f4") shouldBe "fromClasspath" result.hasPath("f5") shouldBe false result.getString("akka.http.server.request-timeout") shouldBe "300s" } def writeToTemp(map: Map[String, Any]): URI = { val temp = Files.createTempFile("ConfigFactoryExt", ".conf") temp.toFile.deleteOnExit() Files.write(temp, ConfigFactory.parseMap(map.asJava).root().render().getBytes) temp.toUri } }
TouK/nussknacker
utils/utils-internal/src/test/scala/pl/touk/nussknacker/engine/util/config/ConfigFactoryExtSpec.scala
Scala
apache-2.0
1,177
package poly.util.progressbar import java.time._ import poly.util.logging._ /** * A simple console-based progress bar. * @param task Name of the progress bar. * @param initialMax Initial estimation of the number of steps when the task is complete. * @param length The length of the progress bar shown in console. Default value is 25 characters. * * @author Tongfei Chen ([email protected]). */ class ProgressBar(val task: String, val initialMax: Int, val length: Int = 25) extends HasLogger { private[this] var current = 0 private[this] var max = initialMax private[this] var startTime: LocalDateTime = null private[this] var lastTime: LocalDateTime = null private[this] def repeat(x: Char, n: Int): String = { new String(Array.fill[Char](n)(x)) } private[this] def progress: Int = { if (max == 0) 0 else math.round(current.toDouble / max * length).toInt } private[this] def formatDuration(d: Duration): String = { val s = d.getSeconds "%d:%02d:%02d".format(s / 3600, (s % 3600) / 60, s % 60) } private[this] def eta(elapsed: Duration) = { if (max == 0) "?" else if (current == 0) "?" else formatDuration(elapsed.dividedBy(current).multipliedBy(max - current)) } private[this] def percentage: String = { val res = if (max == 0) "? %" else math.round(current.toDouble / max * 100).toString + "%" repeat(' ', 4 - res.length) + res // pad space before percentage } private[this] def forceShow(currentTime: LocalDateTime): Unit = { print('\r') val elapsed = Duration.between(startTime, currentTime) lastTime = currentTime print( task + s" $percentage" + " [" + repeat('=', progress) + repeat(' ', length - progress) + "] " + s"$current/$max " + "(" + formatDuration(elapsed) + " / " + eta(elapsed) + ") " ) } private[this] def show(): Unit = { val currentTime = LocalDateTime.now if (Duration.between(lastTime, currentTime).getSeconds < 1) return forceShow(currentTime) } /** * Starts the progress bar. */ def start() = { startTime = LocalDateTime.now lastTime = LocalDateTime.now logger.info(s"Task ($task) starts.") forceShow(lastTime) } /** * Advances the progress bar by a specific amount. * @param n Step size */ def stepBy(n: Int) = { current += n if (current > max) max = current show() } /** * Advances the progress bar by one step. */ def step() = { current += 1 if (current > max) max = current show() } /** * Gives a hint to the maximum value of the progress bar. * @param n Hint to the maximum value */ def maxHint(n: Int) = { max = n show() } /** * Stops the progress bar. */ def stop() = { forceShow(LocalDateTime.now) println() logger.info(s"Task ($task) is complete.") } }
ctongfei/poly-util
src/main/scala/poly/util/progressbar/ProgressBar.scala
Scala
mit
2,888
package com.wavesplatform.transaction.assets import com.wavesplatform.account.{AddressScheme, KeyPair, PrivateKey, PublicKey} import com.wavesplatform.crypto import com.wavesplatform.lang.ValidationError import com.wavesplatform.transaction.Asset.IssuedAsset import com.wavesplatform.transaction._ import com.wavesplatform.transaction.serialization.impl.BurnTxSerializer import com.wavesplatform.transaction.validation.TxValidator import com.wavesplatform.transaction.validation.impl.BurnTxValidator import monix.eval.Coeval import play.api.libs.json.JsObject import scala.util.Try final case class BurnTransaction( version: TxVersion, sender: PublicKey, asset: IssuedAsset, quantity: TxAmount, fee: TxAmount, timestamp: TxTimestamp, proofs: Proofs, chainId: Byte ) extends ProvenTransaction with VersionedTransaction with SigProofsSwitch with TxWithFee.InWaves with FastHashId with LegacyPBSwitch.V3 { override def builder: TransactionParser = BurnTransaction override val bodyBytes: Coeval[Array[Byte]] = BurnTxSerializer.bodyBytes(this) override val bytes: Coeval[Array[Byte]] = BurnTxSerializer.toBytes(this) override val json: Coeval[JsObject] = BurnTxSerializer.toJson(this) override def checkedAssets: Seq[IssuedAsset] = Seq(asset) } object BurnTransaction extends TransactionParser { type TransactionT = BurnTransaction override val typeId: TxType = 6: Byte override val supportedVersions: Set[TxVersion] = Set(1, 2, 3) implicit val validator: TxValidator[BurnTransaction] = BurnTxValidator implicit def sign(tx: BurnTransaction, privateKey: PrivateKey): BurnTransaction = tx.copy(proofs = Proofs(crypto.sign(privateKey, tx.bodyBytes()))) val serializer = BurnTxSerializer override def parseBytes(bytes: Array[TxVersion]): Try[BurnTransaction] = serializer.parseBytes(bytes) def create( version: TxVersion, sender: PublicKey, asset: IssuedAsset, quantity: Long, fee: Long, timestamp: Long, proofs: Proofs, chainId: Byte = AddressScheme.current.chainId ): Either[ValidationError, BurnTransaction] = BurnTransaction(version, sender, asset, quantity, fee, timestamp, proofs, chainId).validatedEither def signed( version: TxVersion, sender: PublicKey, asset: IssuedAsset, quantity: Long, fee: Long, timestamp: Long, signer: PrivateKey, chainId: Byte = AddressScheme.current.chainId ): Either[ValidationError, BurnTransaction] = create(version, sender, asset, quantity, fee, timestamp, Proofs.empty, chainId).map(_.signWith(signer)) def selfSigned( version: TxVersion, sender: KeyPair, asset: IssuedAsset, quantity: Long, fee: Long, timestamp: Long, chainId: Byte = AddressScheme.current.chainId ): Either[ValidationError, BurnTransaction] = { signed(version, sender.publicKey, asset, quantity, fee, timestamp, sender.privateKey, chainId) } }
wavesplatform/Waves
node/src/main/scala/com/wavesplatform/transaction/assets/BurnTransaction.scala
Scala
mit
3,040
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.h2o.sparkling.benchmarks import ai.h2o.sparkling.H2OFrame import ai.h2o.sparkling.ml.models.H2OMOJOModel import ai.h2o.sparkling.ml.utils.EstimatorCommonUtils import org.apache.spark.sql.{DataFrame, SaveMode} class TrainAlgorithmFromDataFrameViaCsvConversionBenchmark(context: BenchmarkContext, algorithmBundle: AlgorithmBundle) extends AlgorithmBenchmarkBase[DataFrame, H2OFrame](context, algorithmBundle) with EstimatorCommonUtils { override protected def initialize(): DataFrame = loadDataToDataFrame() override protected def convertInput(input: DataFrame): H2OFrame = { val className = this.getClass.getSimpleName val destination = context.workingDir.resolve(className) input.write.mode(SaveMode.Overwrite).csv(destination.toString) H2OFrame(destination, input.columns) } override protected def train(trainingFrame: H2OFrame): H2OMOJOModel = { val (name, params) = algorithmBundle.h2oAlgorithm val newParams = params ++ Map( "training_frame" -> trainingFrame.frameId, "response_column" -> context.datasetDetails.labelCol) trainAndGetMOJOModel(s"/3/ModelBuilders/$name", newParams) } override protected def cleanUpData(dataFrame: DataFrame, frame: H2OFrame): Unit = { removeFromCache(dataFrame) frame.delete() } }
h2oai/sparkling-water
benchmarks/src/main/scala/ai/h2o/sparkling/benchmarks/TrainAlgorithmFromDataFrameViaCsvConversionBenchmark.scala
Scala
apache-2.0
2,102
package io.slicker.core /** * Page request that allows to set limit/offset values for a request in terms of pagination * * @param page Page number. Should always be equal or greater than 1 * @param perPage Number of entities to get from request. Should always be equal or greater than 0 * @param sort Fields to sort by */ case class PageRequest(page: Int, perPage: Int, sort: Seq[(String, SortDirection)] = Seq.empty) { /** * Offset value for SQL queries */ def offset: Int = (page - 1) * perPage /** * Limit value for SQL queries */ def limit: Int = perPage } object PageRequest { /** * Requesting all pages */ val ALL = new PageRequest(1, Int.MaxValue) /** * Requesting first page */ val FIRSTPAGE = new PageRequest(1, 10) def apply(sort: Seq[(String, SortDirection)]): PageRequest = PageRequest(1, Int.MaxValue, sort) } sealed abstract class SortDirection(val name: String) { def isAsc: Boolean = this match { case SortDirection.Asc => true case SortDirection.Desc => false } def isDesc: Boolean = !isAsc } object SortDirection { case object Asc extends SortDirection("asc") case object Desc extends SortDirection("desc") def apply(name: String): SortDirection = name match { case Asc.name => Asc case Desc.name => Desc } }
ImLiar/slicker
slicker-core/src/main/scala/io/slicker/core/PageRequest.scala
Scala
apache-2.0
1,342
package se.culvertsoft.mgen.cpppack.generator.impl.classh import se.culvertsoft.mgen.api.model.ClassType import se.culvertsoft.mgen.compiler.util.SourceCodeBuffer object MkDefaultCtor { def apply(t: ClassType)(implicit txtBuffer: SourceCodeBuffer) { txtBuffer.tabs(1).textln(s"${t.shortName()}();") } }
culvertsoft/mgen
mgen-cppgenerator/src/main/scala/se/culvertsoft/mgen/cpppack/generator/impl/classh/MkDefaultCtor.scala
Scala
mit
316
/* * Copyright (c) 2009 Sony Pictures Imageworks Inc. * * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the * distribution. Neither the name of Sony Pictures Imageworks nor the * names of its contributors may be used to endorse or promote * products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. */ package com.imageworks.migration.tests.grant_and_revoke import com.imageworks.migration.{Limit, Migration, NotNull, PrimaryKey, Unique} class Migrate_200811241940_CreateUser extends Migration { def up(): Unit = { createTable("scala_migrations_location") { t => t.varbinary("pk_scala_migrations_location", PrimaryKey, Limit(16)) t.varchar("name", Unique, Limit(255), NotNull) } } def down(): Unit = { dropTable("scala_migrations_location") } }
azinman/scala-migrations
src/test/scala/com/imageworks/migration/tests/grant_and_revoke/Migrate_200811241940_CreateUser.scala
Scala
bsd-3-clause
2,203
package com.example import akka.actor.Actor import spray.routing._ import spray.http._ import MediaTypes._ // we don't implement our route structure directly in the service actor because // we want to be able to test it independently, without having to spin up an actor class MyServiceActor extends Actor with MyService { // the HttpService trait defines only one abstract member, which // connects the services environment to the enclosing actor or test def actorRefFactory = context // this actor only runs our route, but you could add // other things here, like request stream processing // or timeout handling def receive = runRoute(myRoute) } // this trait defines our service behavior independently from the service actor trait MyService extends HttpService { val myRoute = pathPrefix("") { compressResponse() { getFromResourceDirectory("src/main/webapp") } } ~ pathPrefix("api") { path("tasks") { get { respondWithMediaType(`application/json`) { complete { } } } } } }
weixing0/scala-lang
spray-test/src/main/scala/com/example/MyServiceActor.scala
Scala
apache-2.0
1,107
/* * Scala (https://www.scala-lang.org) * * Copyright EPFL and Lightbend, Inc. * * Licensed under Apache License 2.0 * (http://www.apache.org/licenses/LICENSE-2.0). * * See the NOTICE file distributed with this work for * additional information regarding copyright ownership. */ package scala.reflect.macros package compiler import scala.reflect.internal.Flags._ trait Validators { self: DefaultMacroCompiler => import global._ import analyzer._ import definitions._ import runDefinitions.Predef_??? trait Validator { self: MacroImplRefCompiler => def validateMacroImplRef() = { sanityCheck() if (macroImpl != Predef_???) checkMacroDefMacroImplCorrespondence() } private def sanityCheck() = { if (!macroImpl.isMethod) MacroImplReferenceWrongShapeError() if (macroImpl.typeParams.sizeCompare(targs) != 0) MacroImplWrongNumberOfTypeArgumentsError() if (!macroImpl.isPublic) MacroImplNotPublicError() if (macroImpl.isOverloaded) MacroImplOverloadedError() val implicitParams = aparamss.flatten filter (_.isImplicit) if (implicitParams.nonEmpty) MacroImplNonTagImplicitParameters(implicitParams) val effectiveOwner = if (isImplMethod) macroImplOwner else macroImplOwner.owner val effectivelyStatic = effectiveOwner.isStaticOwner || effectiveOwner.moduleClass.isStaticOwner val correctBundleness = if (isImplMethod) macroImplOwner.isModuleClass else macroImplOwner.isClass && !macroImplOwner.isModuleClass if (!effectivelyStatic || !correctBundleness) { val isReplClassBased = settings.Yreplclassbased.value && effectiveOwner.enclosingTopLevelClass.isInterpreterWrapper MacroImplReferenceWrongShapeError(isReplClassBased) } } private def checkMacroDefMacroImplCorrespondence() = { val atvars = atparams map freshVar def atpeToRtpe(atpe: Type) = atpe.substSym(aparamss.flatten, rparamss.flatten).instantiateTypeParams(atparams, atvars) // we only check strict correspondence between value parameterss // type parameters of macro defs and macro impls don't have to coincide with each other if (aparamss.sizeCompare(rparamss) != 0) MacroImplParamssMismatchError() foreach2(aparamss, rparamss)((aparams, rparams) => { if (aparams.sizeCompare(rparams) < 0) MacroImplMissingParamsError(aparams, rparams) if (rparams.sizeCompare(aparams) < 0) MacroImplExtraParamsError(aparams, rparams) }) try { // cannot fuse this map2 and the map2 above because if aparamss.flatten != rparamss.flatten // then `atpeToRtpe` is going to fail with an unsound substitution map2(aparamss.flatten, rparamss.flatten)((aparam, rparam) => { if (aparam.name != rparam.name && !rparam.isSynthetic) MacroImplParamNameMismatchError(aparam, rparam) if (isRepeated(aparam) ^ isRepeated(rparam)) MacroImplVarargMismatchError(aparam, rparam) val aparamtpe = aparam.tpe match { case MacroContextType(tpe) => tpe case tpe => tpe } checkMacroImplParamTypeMismatch(atpeToRtpe(aparamtpe), rparam) }) checkMacroImplResultTypeMismatch(atpeToRtpe(aret), rret) val maxLubDepth = lubDepth(aparamss.flatten map (_.tpe)) max lubDepth(rparamss.flatten map (_.tpe)) val atargs = solvedTypes(atvars, atparams, varianceInType(aret), upper = false, maxLubDepth) val boundsOk = typer.silent(_.infer.checkBounds(macroDdef, NoPrefix, NoSymbol, atparams, atargs, "")) boundsOk match { case SilentResultValue(true) => // do nothing, success case SilentResultValue(false) | _: SilentTypeError => MacroImplTargMismatchError(atargs, atparams) } } catch { case ex: NoInstance => MacroImplTparamInstantiationError(atparams, ex) } } // aXXX (e.g. aparamss) => characteristics of the actual macro impl signature extracted from the macro impl ("a" stands for "actual") // rXXX (e.g. rparamss) => characteristics of the reference macro impl signature synthesized from the macro def ("r" stands for "reference") // FIXME: cannot write this concisely because of scala/bug#7507 //lazy val MacroImplSig(atparams, aparamss, aret) = macroImplSig //lazy val MacroImplSig(_, rparamss, rret) = referenceMacroImplSig lazy val atparams = macroImplSig.tparams lazy val aparamss = macroImplSig.paramss lazy val aret = macroImplSig.ret lazy val rparamss = referenceMacroImplSig.paramss lazy val rret = referenceMacroImplSig.ret // Technically this can be just an alias to MethodType, but promoting it to a first-class entity // provides better encapsulation and convenient syntax for pattern matching. private case class MacroImplSig(tparams: List[Symbol], paramss: List[List[Symbol]], ret: Type) { private def tparams_s = if (tparams.isEmpty) "" else tparams.map(_.defString).mkString("[", ", ", "]") private def paramss_s = paramss map (ps => ps.map(s => s"${s.name}: ${s.tpe_*}").mkString("(", ", ", ")")) mkString "" override def toString = "MacroImplSig(" + tparams_s + paramss_s + ret + ")" } /** An actual macro implementation signature extracted from a macro implementation method. * * For the following macro impl: * def fooBar[T: c.WeakTypeTag] * (c: scala.reflect.macros.blackbox.Context) * (xs: c.Expr[List[T]]) * : c.Expr[T] = ... * * This function will return: * (c: scala.reflect.macros.blackbox.Context)(xs: c.Expr[List[T]])c.Expr[T] * * Note that type tag evidence parameters are not included into the result. * Type tag context bounds for macro impl tparams are optional. * Therefore compatibility checks ignore such parameters, and we don't need to bother about them here. * * This method cannot be reduced to just macroImpl.info, because macro implementations might * come in different shapes. If the implementation is an apply method of a *box.Macro-compatible object, * then it won't have (c: *box.Context) in its parameters, but will rather refer to *boxMacro.c. * * @param macroImpl The macro implementation symbol */ private lazy val macroImplSig: MacroImplSig = { val tparams = macroImpl.typeParams val paramss = transformTypeTagEvidenceParams(macroImplRef, (param, tparam) => NoSymbol) val ret = macroImpl.info.finalResultType MacroImplSig(tparams, paramss, ret) } /** A reference macro implementation signature extracted from a given macro definition. * * For the following macro def: * def foo[T](xs: List[T]): T = macro fooBar * * This function will return: * (c: scala.reflect.macros.blackbox.Context)(xs: c.Expr[List[T]])c.Expr[T] or * (c: scala.reflect.macros.whitebox.Context)(xs: c.Expr[List[T]])c.Expr[T] * * Note that type tag evidence parameters are not included into the result. * Type tag context bounds for macro impl tparams are optional. * Therefore compatibility checks ignore such parameters, and we don't need to bother about them here. * * Also note that we need a DefDef, not the corresponding MethodSymbol, because that symbol would be of no use for us. * Macro signatures are verified when typechecking macro defs, which means that at that moment inspecting macroDef.info * means asking for cyclic reference errors. * * We need macro implementation symbol as well, because the return type of the macro definition might be omitted, * and in that case we'd need to infer it from the return type of the macro implementation. Luckily for us, we can * use that symbol without a risk of running into cycles. * * @param typer Typechecker of `macroDdef` * @param macroDdef The macro definition tree * @param macroImpl The macro implementation symbol */ private lazy val referenceMacroImplSig: MacroImplSig = { // had to move method's body to an object because of the recursive dependencies between sigma and param object SigGenerator { val cache = scala.collection.mutable.Map[Symbol, Symbol]() val ctxTpe = if (isBlackbox) BlackboxContextClass.tpe else WhiteboxContextClass.tpe val ctxPrefix = if (isImplMethod) singleType(NoPrefix, makeParam(nme.macroContext, macroDdef.pos, ctxTpe, SYNTHETIC)) else singleType(ThisType(macroImpl.owner), macroImpl.owner.tpe.member(nme.c)) val paramss = if (isImplMethod) List(ctxPrefix.termSymbol) :: mmap(macroDdef.vparamss)(param) else mmap(macroDdef.vparamss)(param) val macroDefRet = if (!macroDdef.tpt.isEmpty) typer.typedType(macroDdef.tpt).tpe else AnyTpe val implReturnType = sigma(increaseMetalevel(ctxPrefix, macroDefRet)) object SigmaTypeMap extends TypeMap { def mapPrefix(pre: Type) = pre match { case ThisType(sym) if sym == macroDef.owner => singleType(singleType(ctxPrefix, MacroContextPrefix), ExprValue) case SingleType(NoPrefix, sym) => mfind(macroDdef.vparamss)(_.symbol == sym).fold(pre)(p => singleType(singleType(NoPrefix, param(p)), ExprValue)) case _ => mapOver(pre) } def apply(tp: Type): Type = tp match { case TypeRef(pre, sym, args) => val pre1 = mapPrefix(pre) val args1 = args mapConserve this if ((pre eq pre1) && (args eq args1)) tp else typeRef(pre1, sym, args1) case _ => mapOver(tp) } } def sigma(tpe: Type): Type = SigmaTypeMap(tpe) def makeParam(name: Name, pos: Position, tpe: Type, flags: Long) = macroDef.newValueParameter(name.toTermName, pos, flags) setInfo tpe def param(tree: Tree): Symbol = ( cache.getOrElseUpdate(tree.symbol, { val sym = tree.symbol assert(sym.isTerm, s"sym = $sym, tree = $tree") makeParam(sym.name, sym.pos, sigma(increaseMetalevel(ctxPrefix, sym.tpe)), sym.flags) }) ) } import SigGenerator._ macroLogVerbose(s"generating macroImplSigs for: $macroDdef") val result = MacroImplSig(macroDdef.tparams map (_.symbol), paramss, implReturnType) macroLogVerbose(s"result is: $result") result } } }
lrytz/scala
src/compiler/scala/reflect/macros/compiler/Validators.scala
Scala
apache-2.0
10,569
import sbt._ import Keys._ import sbtrelease._ import ReleaseStateTransformations._ import xerial.sbt.Sonatype._ import scoverage._ object BuildSettings { val buildOrganization = "com.github.snowgooseyk" val buildVersion = "0.1.3-SNAPSHOT" val buildScalaVersion = "2.11.6" val clossBuildScalaVersion = Seq("2.10.4","2.11.6") val buildSettings = Defaults.defaultSettings ++ ReleasePlugin.releaseSettings ++ sonatypeSettings ++ ScoverageSbtPlugin.projectSettings ++ Seq ( organization := buildOrganization, version := buildVersion, scalaVersion := buildScalaVersion, crossScalaVersions := clossBuildScalaVersion, licenses := Seq("MIT" -> url("http://opensource.org/licenses/MIT")), description := "Simple CSV library for Scala.", publishMavenStyle := true, publishTo <<= version { (v: String) => val nexus = "https://oss.sonatype.org/" if (v.trim.endsWith("SNAPSHOT")) Some("snapshots" at nexus + "content/repositories/snapshots") else Some("releases" at nexus + "service/local/staging/deploy/maven2") }, publishArtifact in Test := false, scmInfo := Some(ScmInfo( url("https://github.com/snowgooseyk/sscsv"), "scm:git:[email protected]:snowgooseyk/sscsv.git" )), pomExtra := ( <url>https://github.com/snowgooseyk/sscsv</url> <developers> <developer> <id>snowgooseyk</id> <name>snowgooseyk</name> <url>https://github.com/snowgooseyk</url> </developer> </developers> ), isSnapshot := true, scalacOptions ++= Seq("-feature","-deprecation","-language:_") ) } object Dependencies { val specsVersion = "3.6.2" val specsCore = "org.specs2" % "specs2-core" % specsVersion % "test" cross CrossVersion.fullMapped { case "2.10.4" => "2.10" case "2.11.6" => "2.11" } val specsJunit = "org.specs2" % "specs2-junit" % specsVersion % "test" cross CrossVersion.fullMapped { case "2.10.4" => "2.10" case "2.11.6" => "2.11" } val specsMock = "org.specs2" % "specs2-mock" % specsVersion % "test" cross CrossVersion.fullMapped { case "2.10.4" => "2.10" case "2.11.6" => "2.11" } val all = Seq ( specsCore, specsJunit, specsMock ) } object Resolvers { val m2local = Resolver.mavenLocal val sonatype = Resolver.sonatypeRepo("snapshots") val scalazBintray = "scalaz-bintray" at "http://dl.bintray.com/scalaz/releases" val all = Seq ( m2local, sonatype, scalazBintray ) } object SSCSV extends Build { import BuildSettings._ import Dependencies._ lazy val root = Project ( id = "sscsv", base = file("."), settings = buildSettings ++ Seq ( resolvers ++= Resolvers.all, libraryDependencies ++= Dependencies.all ) ) }
snowgooseyk/sscsv
project/Build.scala
Scala
mit
2,976
//package dsentric.operators // //import dsentric.contracts.ContractFor //import dsentric.failure._ //import dsentric.schema._ //import dsentric.{DArray, DCodec, DNull, DObject, Path, Raw, RawObject, StringCodec} // //import scala.util.matching.Regex // //trait Validators extends ValidatorOps { // // // // def >(x:Long): ValueValidator[Numeric] = // new ValueValidator[Numeric] { // def apply[S >: Numeric, D <: DObject](contract:ContractFor[D], path: Path, value: S, currentState: => Option[S]): ValidationFailures = // for { // (r, a, b) <- compare(x, value).toList // if r >= 0 // } yield NumericalFailure(contract, path, b, a, "greater than") // // override def definition[D <: TypeDefinition]:PartialFunction[D, D] = { // case n:IntegerDefinition => // n.copy(exclusiveMinimum = Some(x), minimum = None).asInstanceOf[D] // case m:MultipleTypeDefinition => // m.remap(definition).asInstanceOf[D] // } // } // // def >(x:Double): ValueValidator[Numeric] = // new ValueValidator[Numeric] { // def apply[S >: Numeric, D <: DObject](contract:ContractFor[D], path:Path, value: S, currentState: => Option[S]): ValidationFailures = // for { // (r, a, b) <- compare(x, value).toList // if r >= 0 // } yield NumericalFailure(contract, path, b, a, "greater than") // // override def definition[D <: TypeDefinition]:PartialFunction[D, D] = { // case n:NumberDefinition => // n.copy(exclusiveMinimum = Some(x), minimum = None).asInstanceOf[D] // case m:MultipleTypeDefinition => // m.remap(definition).asInstanceOf[D] // } // } // // def >=(x:Long): ValueValidator[Numeric] = // new ValueValidator[Numeric] { // def apply[S >: Numeric, D <: DObject](contract:ContractFor[D], path:Path, value: S, currentState: => Option[S]): ValidationFailures = // for { // (r, a, b) <- compare(x, value).toList // if r > 0 // } yield NumericalFailure(contract, path, b, a, "greater than or equal to") // // override def definition[D <: TypeDefinition]:PartialFunction[D, D] = { // case n:IntegerDefinition => n.copy(minimum = Some(x), exclusiveMinimum = None).asInstanceOf[D] // case m:MultipleTypeDefinition => m.remap(definition).asInstanceOf[D] // } // // } // // def >=(x:Double): ValueValidator[Numeric] = // new ValueValidator[Numeric] { // def apply[S >: Numeric, D <: DObject](contract:ContractFor[D], path:Path, value: S, currentState: => Option[S]): ValidationFailures = // for { // (r, a, b) <- compare(x, value).toList // if r > 0 // } yield NumericalFailure(contract, path, b, a, "greater than or equal to") // // override def definition[D <: TypeDefinition]:PartialFunction[D, D] = { // case n:NumberDefinition => n.copy(minimum = Some(x), exclusiveMinimum = None).asInstanceOf[D] // case m:MultipleTypeDefinition => m.remap(definition).asInstanceOf[D] // } // // } // // def <(x:Long): ValueValidator[Numeric] = // new ValueValidator[Numeric] { // def apply[S >: Numeric, D <: DObject](contract:ContractFor[D], path:Path, value: S, currentState: => Option[S]): ValidationFailures = // for { // (r, a, b) <- compare(x, value).toList // if r <= 0 // } yield NumericalFailure(contract, path, b, a, "less than") // // override def definition[D <: TypeDefinition]:PartialFunction[D, D] = { // case n:IntegerDefinition => n.copy(exclusiveMaximum = Some(x), maximum = None).asInstanceOf[D] // case m:MultipleTypeDefinition => m.remap(definition).asInstanceOf[D] // } // // } // // def <(x:Double): ValueValidator[Numeric] = // new ValueValidator[Numeric] { // def apply[S >: Numeric, D <: DObject](contract:ContractFor[D], path:Path, value: S, currentState: => Option[S]): ValidationFailures = // for { // (r, a, b) <- compare(x, value).toList // if r <= 0 // } yield NumericalFailure(contract, path, b, a, "less than") // // override def definition[D <: TypeDefinition]:PartialFunction[D, D] = { // case n:NumberDefinition => n.copy(exclusiveMaximum = Some(x), maximum = None).asInstanceOf[D] // case m:MultipleTypeDefinition => m.remap(definition).asInstanceOf[D] // } // } // // def <=(x:Long): ValueValidator[Numeric] = // new ValueValidator[Numeric] { // def apply[S >: Numeric, D <: DObject](contract:ContractFor[D], path:Path, value: S, currentState: => Option[S]): ValidationFailures = // for { // (r, a, b) <- compare(x, value).toList // if r < 0 // } yield NumericalFailure(contract, path, b, a, "less than or equal to") // // override def definition[D <: TypeDefinition]:PartialFunction[D, D] = { // case n:IntegerDefinition => n.copy(maximum = Some(x), exclusiveMaximum = None).asInstanceOf[D] // case m:MultipleTypeDefinition => m.remap(definition).asInstanceOf[D] // } // } // // def <=(x:Double): ValueValidator[Numeric] = // new ValueValidator[Numeric] { // def apply[S >: Numeric, D <: DObject](contract:ContractFor[D], path:Path, value: S, currentState: => Option[S]): ValidationFailures = // for { // (r, a, b) <- compare(x, value).toList // if r < 0 // } yield NumericalFailure(contract, path, b, a, "less than or equal to") // // override def definition[D <: TypeDefinition]:PartialFunction[D, D] = { // case n:NumberDefinition => n.copy(maximum = Some(x), exclusiveMaximum = None).asInstanceOf[D] // case m:MultipleTypeDefinition => m.remap(definition).asInstanceOf[D] // } // } // //TODO Doesnt handle delta removal or addition // def minLength(x: Int): ValueValidator[Optionable[Length]] = // new ValueValidator[Optionable[Length]] { // def apply[S >: Optionable[Length], D <: DObject](contract:ContractFor[D], path:Path, value: S, currentState: => Option[S]): ValidationFailures = { // getLength(value, currentState) // .filter(_ < x) // .map(length => MinimumLengthFailure(contract, path, x, length)) // .toList // } // // // override def definition[D <: TypeDefinition]:PartialFunction[D, D] = { // case n:StringDefinition => n.copy(minLength = Some(x)).asInstanceOf[D] // case n:ArrayDefinition => n.copy(minLength = Some(x)).asInstanceOf[D] // case o:ObjectDefinition => o.copy(minProperties = Some(x)).asInstanceOf[D] // case m:MultipleTypeDefinition => m.remap(definition).asInstanceOf[D] // } // // } // // def maxLength(x: Int): ValueValidator[Optionable[Length]] = // new ValueValidator[Optionable[Length]] { // def apply[S >: Optionable[Length], D <: DObject](contract:ContractFor[D], path:Path, value: S, currentState: => Option[S]): ValidationFailures = // getLength(value, currentState) // .filter(_ > x) // .map(length => MaximumLengthFailure(contract, path, x, length)) // .toList // // override def definition[D <: TypeDefinition]:PartialFunction[D, D] = { // case n:StringDefinition => n.copy(maxLength = Some(x)).asInstanceOf[D] // case n:ArrayDefinition => n.copy(maxLength = Some(x)).asInstanceOf[D] // case o:ObjectDefinition => o.copy(maxProperties = Some(x)).asInstanceOf[D] // case m:MultipleTypeDefinition => m.remap(definition).asInstanceOf[D] // } // } // // def in[T](values:T*)(implicit codec:DCodec[T]): ValueValidator[Optionable[T]] = // new ValueValidator[Optionable[T]] { // // override def definition[D <: TypeDefinition]:PartialFunction[D, D] = { // case n:StringDefinition => // n.copy(values.map(codec.apply).map(_.value).toList).asInstanceOf[D] // case n:IntegerDefinition => // n.copy(values.map(codec.apply).map(_.value).toList).asInstanceOf[D] // case n:NumberDefinition => // n.copy(values.map(codec.apply).map(_.value).toList).asInstanceOf[D] // case m:MultipleTypeDefinition => // m.remap(definition).asInstanceOf[D] // } // // def apply[S >: Optionable[T], D <: DObject](contract:ContractFor[D], path:Path, value: S, currentState: => Option[S]): ValidationFailures = // for { // t <- getT[T, S](value).toList // if !values.contains(t) // } yield InvalidValueFailure(contract, path, t) // } // // def nin[T](values:T*)(implicit codec:DCodec[T]): ValueValidator[Optionable[T]] = // new ValueValidator[Optionable[T]] { // // def apply[S >: Optionable[T], D <: DObject](contract:ContractFor[D], path:Path, value: S, currentState: => Option[S]): ValidationFailures = // for { // t <- getT[T, S](value).toList // if values.contains(t) // } yield InvalidValueFailure(contract, path, t) // } // // //maybe change to generic equality // def inCaseInsensitive(values:String*): ValueValidator[Optionable[String]] = // new ValueValidator[Optionable[String]] { // // override def definition[D <: TypeDefinition]:PartialFunction[D, D] = { // case n:StringDefinition => n.copy(values.toList).asInstanceOf[D] // case m:MultipleTypeDefinition => m.remap(definition).asInstanceOf[D] // } // // def apply[S >: Optionable[String], D <: DObject](contract:ContractFor[D], path:Path, value: S, currentState: => Option[S]): ValidationFailures = // for { // t <- getString(value).toList // if !values.exists(_.equalsIgnoreCase(t)) // } yield InvalidValueFailure(contract, path, t) // } // // def ninCaseInsensitive(values:String*): ValueValidator[Optionable[String]] = // new ValueValidator[Optionable[String]] { // // def apply[S >: Optionable[String], D <: DObject](contract:ContractFor[D], path:Path, value: S, currentState: => Option[S]): ValidationFailures = // for { // t <- getString(value).toList // if values.exists(_.equalsIgnoreCase(t)) // } yield InvalidValueFailure(contract, path, t) // } // // val nonEmpty: ValueValidator[Optionable[Length]] = // minLength(1) // // val nonEmptyOrWhiteSpace: ValueValidator[Optionable[String]] = // new ValueValidator[Optionable[String]] { // // override def definition[D <: TypeDefinition]:PartialFunction[D, D] = { // case s:StringDefinition => // s.copy(minLength = s.minLength.orElse(Some(1)), pattern = s.pattern.orElse(Some("[^\\\\s]"))).asInstanceOf[D] // case m:MultipleTypeDefinition => m.remap(definition).asInstanceOf[D] // } // // def apply[S >: Optionable[String], D <: DObject](contract:ContractFor[D], path: Path, value: S, currentState: => Option[S]): ValidationFailures = // for { // t <- getString(value).toList // if t.trim().isEmpty // } yield NonEmptyOrWhitespaceFailure(contract, path) // } // // def custom[T](f: T => Boolean, message:T => String): ValueValidator[Optionable[T]] = // new ValueValidator[Optionable[T]] { // // def apply[S >: Optionable[T], D <: DObject](contract:ContractFor[D], path: Path, value: S, currentState: => Option[S]): ValidationFailures = // for { // t <- getT[T, S](value).toList // if !f(t) // } yield CustomValidationFailure(contract, path, t, message(t)) // } // // def regex(r:Regex):ValueValidator[Optionable[String]] = // regex(r, s => s"String '$s' fails to match pattern '$r'.") // // def regex(r:Regex, message:String => String):ValueValidator[Optionable[String]] = // new ValueValidator[Optionable[String]] { // // override def definition[D <: TypeDefinition]:PartialFunction[D, D] = { // case s:StringDefinition => // s.copy(pattern = Some(r.pattern.pattern())).asInstanceOf[D] // case m:MultipleTypeDefinition => m.remap(definition).asInstanceOf[D] // } // // def apply[S >: Optionable[String], D <: DObject](contract:ContractFor[D], path:Path, value: S, currentState: => Option[S]): ValidationFailures = // for { // t <- getString(value).toList // if !r.pattern.matcher(t).matches // } yield RegexFailure(contract, path, r, t, message(t)) // } // // val noKeyRemoval:RawValidator[Optionable[Keyable]] = // new RawValidator[Optionable[Keyable]] { // // def apply[D <: DObject](contract:ContractFor[D], path: Path, value: Option[Raw], currentState: Option[Raw]): ValidationFailures = // currentState.fold(ValidationFailures.empty){ // case r:RawObject@unchecked => // val removed = value.fold(Set.empty[String]){ // case r2:RawObject@unchecked => // r2.collect{ case (k, DNull) => k}.toSet // case _ => Set.empty // }.intersect(r.keySet) // removed.map(k => KeyRemovalFailure(contract, path, k)).toList // } // } // // // def noKeyRemoval[T](implicit D:StringCodec[T]):RawValidator[Optionable[Map[T, Nothing]]] = // new RawValidator[Optionable[Map[T, Nothing]]] { // // def apply[D <: DObject](contract:ContractFor[D], path: Path, value: Option[Raw], currentState: Option[Raw]): ValidationFailures = { // currentState.fold(ValidationFailures.empty){ // case r:RawObject@unchecked => // val removed = value.fold(Set.empty[String]){ // case r2:RawObject@unchecked => // r2.collect{ case (k, DNull) => k}.toSet // case _ => Set.empty // }.intersect(r.keySet) // removed.map(k => KeyRemovalFailure(contract, path, k)).toList // } // } // } // // def valueValidator[K, V](validators:Validator[V]*)(implicit CV:DCodec[V]):RawValidator[Optionable[Map[K, V]]] = // new RawValidator[Optionable[Map[K, V]]] { // override def apply[D <: DObject](contract: ContractFor[D], path: Path, value: Option[Raw], currentState: Option[Raw]): ValidationFailures = { // val maybeCurrentState = currentState.collect{ case r:RawObject@unchecked => r} // value.fold(ValidationFailures.empty){ // case r:RawObject@unchecked => // validators.flatMap { // case validator:ValueValidator[V]@unchecked => // r.flatMap { // case (k, CV(v)) => // validator(contract, path \\ k, v, maybeCurrentState.flatMap(_.get(k)).flatMap(CV.unapply)) // case _ => // ValidationFailures.empty // } // case validator:RawValidator[V]@unchecked => // r.flatMap { case (k, v) => // validator(contract, path \\ k, Some(v), maybeCurrentState.flatMap(_.get(k))) // } // }.toList // case _ => // Nil // } // } // // override def definition[D <: TypeDefinition]:PartialFunction[D, D] = { // case s:ObjectDefinition => // val newAdditionalProperties = s.additionalProperties.map{td => // validators.foldLeft(td)((a, e) => e.definition.lift(a).getOrElse(a)) // } // s.copy(additionalProperties = newAdditionalProperties).asInstanceOf[D] // case m:MultipleTypeDefinition => m.remap(definition).asInstanceOf[D] // } // } // // def keyValidator(r:Regex, message:String):ValueValidator[Optionable[Keyable]] = // new ValueValidator[Optionable[Keyable]] { // // override def definition[D <: TypeDefinition]:PartialFunction[D, D] = { // case s:ObjectDefinition => // s.copy(propertyNames = Some(StringDefinition(pattern = Some(r.pattern.pattern())))).asInstanceOf[D] // case m:MultipleTypeDefinition => m.remap(definition).asInstanceOf[D] // } // // def apply[S >: Optionable[Keyable], D <: DObject](contract:ContractFor[D], path:Path, value: S, currentState: => Option[S]): ValidationFailures = // for { // ct <- getKeyable(value).toList // key <- ct.keys.toList if !r.pattern.matcher(key).matches() // } yield RegexFailure(contract, path, r, key, message) // } // //} // //trait ValidatorOps { // // protected def getLength[S >: Optionable[Length]](x:S, state:Option[S]): Option[Int] = // x match { // case Some(v) => // getLength(v, state) // case None => // None // case v => // getKeyable(v) match { // case Some(c) => // Some { // state.flatMap(getKeyable) // .fold(c.keys.size)(s => getLengthDif(c, s)) // } // case None => // x match { // case s: Seq[Any]@unchecked => // Some(s.size) // case a: Iterable[_] => // Some(a.size) // case s: String => // Some(s.size) // case d: DObject => // Some(d.size) // case d: DArray => // Some(d.value.size) // case _ => // None // } // } // } // // protected def getLengthDif[T](c:Map[String, T], v:Map[String, T]):Int = { // val (remove, add) = c.partition(_._2 == DNull) // ((v.keySet -- remove.keySet) ++ add.keySet).size // } // // protected def getString[S >: Optionable[String]](x:S):Option[String] = // x match { // case Some(s:String) => Some(s) // case s:String => Some(s) // case _ => None // } // // protected def getT[T, S >: Optionable[T]](t:S):Option[T] = // t match { // case Some(s: T@unchecked) => Some(s) // case None => None // case s: T@unchecked => Some(s) // } // // protected def getKeyable[S >: Optionable[Keyable]](t:S):Option[Map[String, Any]] = // t match { // case Some(s: Map[String, Any]@unchecked) => // Some(s) // case Some(d:DObject) => // Some(d.value) // case None => // None // case s: Map[String, Any]@unchecked => // Some(s) // case d:DObject => // Some(d.value) // case _ => // None // } // // protected def resolve[S >: Numeric](value:S, target:S):Option[(Int, Number, Number)] = // value match { // case i:Int => // compare(i, target) // case l:Long => // compare(l, target) // case f:Float => // compare(f, target) // case d:Double => // compare(d, target) // case Some(n) => // resolve(value, n) // case _ => // None // } // // protected def compare[S >: Numeric](value:Long, target:S):Option[(Int, Number, Number)] = // target match { // case i:Int => // Some((value.compare(i), value, i)) // case l:Long => // Some((value.compare(l), value, l)) // case f:Float => // Some((value.toDouble.compare(f), value, f)) // case d:Double => // Some((value.toDouble.compare(d), value, d)) // case Some(n) => // compare(value, n) // case _ => // None // } // // protected def compare[S >: Numeric](value:Double, target:S):Option[(Int, Number, Number)] = // target match { // case i:Int => // Some((value.compare(i), value, i)) // case l:Long => // Some((value.compare(l), value, l)) // case f:Float => // Some((value.compare(f), value, f)) // case d:Double => // Some((value.compare(d), value, d)) // case Some(n) => // compare(value, n) // case _ => // None // } //} // //object Validators extends Validators with ValidatorSanitizers //
HigherState/dsentric
maps/src/main/scala/dsentric/operators/Validators.scala
Scala
apache-2.0
19,591
package hasheq package immutable import hasheq.std.int._ class HashSetTest extends TestSuite { // test set of Int checkAll(SetRepr.properties[HashSet, Int]("SetRepr[HashSet, Int]")) // test set of Int modulo 10 object Mod10 implicit object IntEqMod10 extends Equiv[Int, Mod10.type] { def mod10(i: Int): Int = { val r = i % 10 if (r < 0) r + 10 else r } def equiv(a: Int, b: Int): Boolean = mod10(a) == mod10(b) } implicit object IntHashMod10 extends HashEq[Int, Mod10.type] { def hash(a: Int): Int = IntEqMod10.mod10(a) } checkAll(Setoid.genProperties[HashSetoid, Int, Mod10.type]("Setoid[HashSetoid, Int, Mod10]")) // check that the equivalence on HashSet is lawful checkAll(Equiv.properties[HashSet[Int]]()) // check that the hash function on HashSet is lawful checkAll(HashEq.properties[HashSet[Int]]()) // compilation check val x = HashSetoid(HashSetoid(HashSet(1))) }
TomasMikula/hasheq
src/test/scala/hasheq/immutable/HashSetTest.scala
Scala
bsd-3-clause
944
package scala.pickling.open.sum1 import org.scalatest.FunSuite import scala.pickling._, scala.pickling.Defaults._, json._ package outer { abstract class Person { val name: String val age: Int } case class Firefighter(val name: String, val age: Int, val since: Int) extends Person package inner { case class Employee(val name: String, val age: Int, val salary: Int) extends Person } } class OpenSum1Test extends FunSuite { test("main") { import outer._ val f: Person = new Firefighter( name = "Jeff", age = 45, since = 1990 ) val pickle = f.pickle assert(pickle.value.toString === """ |{ | "tpe": "scala.pickling.open.sum1.outer.Firefighter", | "name": "Jeff", | "age": 45, | "since": 1990 |} """.stripMargin.trim) assert(pickle.unpickle[Person] === f) } }
eed3si9n/pickling-historical
core/src/test/scala/pickling/run/open-sum1.scala
Scala
bsd-3-clause
875
package uk.gov.homeoffice.configuration import com.typesafe.config.ConfigFactory trait HasConfig extends ConfigFactorySupport { implicit val config = ConfigFactory.load }
UKHomeOffice/rtp-io-lib
src/main/scala/uk/gov/homeoffice/configuration/HasConfig.scala
Scala
mit
174
package uk.org.nbn.nbnv.importer.darwin import uk.org.nbn.nbnv.importer.testing.BaseFunSuite import uk.org.nbn.nbnv.importer.Options import uk.org.nbn.nbnv.importer.utility.ResourceLoader class ReaderSuite extends BaseFunSuite with ResourceLoader { test("an empty value should be an empty string, not null") { // val archivePath = resource("/archives/valid.zip") // val options = Options(archivePath = archivePath.getFile, tempDir = tempDir, whatIf = true) // val am = new ArchiveManager() } }
JNCC-dev-team/nbn-importer
importer/src/test/scala/uk/org/nbn/nbnv/importer/darwin/ReaderSuite.scala
Scala
apache-2.0
511
package io.youi import io.youi.net.URL object Cache { var implementation: CacheImplementation = _ def cached(url: URL): String = { assert(implementation != null, "CacheImplementation must be set before Cache can be used.") implementation.cached(url) } } trait CacheImplementation { def cached(url: URL): String }
outr/youi
core/shared/src/main/scala/io/youi/Cache.scala
Scala
mit
332
package lore.compiler.feedback class LambdaReporter(f: Feedback => Unit) extends Reporter { private var _hasErrors = false override def report(feedback: Feedback): Unit = this.synchronized { if (feedback.isError) _hasErrors = true f(feedback) } override def hasErrors: Boolean = this.synchronized(_hasErrors) }
marcopennekamp/lore
compiler/src/lore/compiler/feedback/LambdaReporter.scala
Scala
mit
328
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.parser import scala.collection.mutable import scala.language.implicitConversions import org.apache.spark.sql.{CarbonToSparkAdapater, DeleteRecords, UpdateTable} import org.apache.spark.sql.catalyst.{CarbonDDLSqlParser, TableIdentifier} import org.apache.spark.sql.catalyst.CarbonTableIdentifierImplicit._ import org.apache.spark.sql.catalyst.plans.logical._ import org.apache.spark.sql.execution.command._ import org.apache.spark.sql.execution.command.datamap.{CarbonCreateDataMapCommand, CarbonDataMapRebuildCommand, CarbonDataMapShowCommand, CarbonDropDataMapCommand} import org.apache.spark.sql.execution.command.management._ import org.apache.spark.sql.execution.command.partition.{CarbonAlterTableDropPartitionCommand, CarbonAlterTableSplitPartitionCommand} import org.apache.spark.sql.execution.command.schema.{CarbonAlterTableAddColumnCommand, CarbonAlterTableDataTypeChangeCommand, CarbonAlterTableDropColumnCommand} import org.apache.spark.sql.execution.command.table.CarbonCreateTableCommand import org.apache.spark.sql.types.StructField import org.apache.spark.sql.CarbonExpressions.CarbonUnresolvedRelation import org.apache.spark.sql.catalyst.analysis.UnresolvedRelation import org.apache.spark.sql.execution.command.stream.{CarbonCreateStreamCommand, CarbonDropStreamCommand, CarbonShowStreamsCommand} import org.apache.spark.sql.util.CarbonException import org.apache.spark.util.CarbonReflectionUtils import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException import org.apache.carbondata.core.constants.CarbonCommonConstants import org.apache.carbondata.spark.CarbonOption import org.apache.carbondata.spark.util.{CarbonScalaUtil, CommonUtil} /** * TODO remove the duplicate code and add the common methods to common class. * Parser for All Carbon DDL, DML cases in Unified context */ class CarbonSpark2SqlParser extends CarbonDDLSqlParser { override def parse(input: String): LogicalPlan = { synchronized { // Initialize the Keywords. initLexical phrase(start)(new lexical.Scanner(input)) match { case Success(plan, _) => CarbonScalaUtil.cleanParserThreadLocals() plan match { case x: CarbonLoadDataCommand => x.inputSqlString = input x case x: CarbonAlterTableCompactionCommand => x.alterTableModel.alterSql = input x case logicalPlan => logicalPlan } case failureOrError => CarbonScalaUtil.cleanParserThreadLocals() CarbonException.analysisException(failureOrError.toString) } } } protected lazy val start: Parser[LogicalPlan] = explainPlan | startCommand protected lazy val startCommand: Parser[LogicalPlan] = loadManagement | showLoads | alterTable | restructure | updateTable | deleteRecords | alterPartition | datamapManagement | alterTableFinishStreaming | stream protected lazy val loadManagement: Parser[LogicalPlan] = deleteLoadsByID | deleteLoadsByLoadDate | cleanFiles | loadDataNew protected lazy val restructure: Parser[LogicalPlan] = alterTableModifyDataType | alterTableDropColumn | alterTableAddColumns protected lazy val alterPartition: Parser[LogicalPlan] = alterAddPartition | alterSplitPartition | alterDropPartition protected lazy val datamapManagement: Parser[LogicalPlan] = createDataMap | dropDataMap | showDataMap | refreshDataMap protected lazy val stream: Parser[LogicalPlan] = createStream | dropStream | showStreams protected lazy val alterAddPartition: Parser[LogicalPlan] = ALTER ~> TABLE ~> (ident <~ ".").? ~ ident ~ (ADD ~> PARTITION ~> "(" ~> repsep(stringLit, ",") <~ ")") <~ opt(";") ^^ { case dbName ~ table ~ addInfo => val alterTableAddPartitionModel = AlterTableSplitPartitionModel(dbName, table, "0", addInfo) CarbonAlterTableSplitPartitionCommand(alterTableAddPartitionModel) } protected lazy val alterSplitPartition: Parser[LogicalPlan] = ALTER ~> TABLE ~> (ident <~ ".").? ~ ident ~ (SPLIT ~> PARTITION ~> "(" ~> numericLit <~ ")") ~ (INTO ~> "(" ~> repsep(stringLit, ",") <~ ")") <~ opt(";") ^^ { case dbName ~ table ~ partitionId ~ splitInfo => val alterTableSplitPartitionModel = AlterTableSplitPartitionModel(dbName, table, partitionId, splitInfo) if (partitionId == 0) { sys.error("Please use [Alter Table Add Partition] statement to split default partition!") } CarbonAlterTableSplitPartitionCommand(alterTableSplitPartitionModel) } protected lazy val alterDropPartition: Parser[LogicalPlan] = ALTER ~> TABLE ~> (ident <~ ".").? ~ ident ~ (DROP ~> PARTITION ~> "(" ~> numericLit <~ ")") ~ (WITH ~> DATA).? <~ opt(";") ^^ { case dbName ~ table ~ partitionId ~ withData => val dropWithData = withData.getOrElse("NO") match { case "NO" => false case _ => true } val alterTableDropPartitionModel = AlterTableDropPartitionModel(dbName, table, partitionId, dropWithData) CarbonAlterTableDropPartitionCommand(alterTableDropPartitionModel) } protected lazy val alterTable: Parser[LogicalPlan] = ALTER ~> TABLE ~> (ident <~ ".").? ~ ident ~ (COMPACT ~ stringLit) ~ (WHERE ~> (SEGMENT ~ "." ~ ID) ~> IN ~> "(" ~> repsep(segmentId, ",") <~ ")").? <~ opt(";") ^^ { case dbName ~ table ~ (compact ~ compactType) ~ segs => val altertablemodel = AlterTableModel(convertDbNameToLowerCase(dbName), table, None, compactType, Some(System.currentTimeMillis()), null, segs) CarbonAlterTableCompactionCommand(altertablemodel) } /** * The below syntax is used to change the status of the segment * from "streaming" to "streaming finish". * ALTER TABLE tableName FINISH STREAMING */ protected lazy val alterTableFinishStreaming: Parser[LogicalPlan] = ALTER ~> TABLE ~> (ident <~ ".").? ~ ident <~ FINISH <~ STREAMING <~ opt(";") ^^ { case dbName ~ table => CarbonAlterTableFinishStreaming(dbName, table) } /** * The syntax of CREATE STREAM * CREATE STREAM [IF NOT EXISTS] streamName ON TABLE [dbName.]tableName * [STMPROPERTIES('KEY'='VALUE')] * AS SELECT COUNT(COL1) FROM tableName */ protected lazy val createStream: Parser[LogicalPlan] = CREATE ~> STREAM ~> opt(IF ~> NOT ~> EXISTS) ~ ident ~ (ON ~> TABLE ~> (ident <~ ".").?) ~ ident ~ (STMPROPERTIES ~> "(" ~> repsep(loadOptions, ",") <~ ")").? ~ (AS ~> restInput) <~ opt(";") ^^ { case ifNotExists ~ streamName ~ dbName ~ tableName ~ options ~ query => val optionMap = options.getOrElse(List[(String, String)]()).toMap[String, String] CarbonCreateStreamCommand( streamName, dbName, tableName, ifNotExists.isDefined, optionMap, query) } /** * The syntax of DROP STREAM * DROP STREAM [IF EXISTS] streamName */ protected lazy val dropStream: Parser[LogicalPlan] = DROP ~> STREAM ~> opt(IF ~> EXISTS) ~ ident <~ opt(";") ^^ { case ifExists ~ streamName => CarbonDropStreamCommand(streamName, ifExists.isDefined) } /** * The syntax of SHOW STREAMS * SHOW STREAMS [ON TABLE dbName.tableName] */ protected lazy val showStreams: Parser[LogicalPlan] = SHOW ~> STREAMS ~> opt(ontable) <~ opt(";") ^^ { case tableIdent => CarbonShowStreamsCommand(tableIdent) } /** * The syntax of datamap creation is as follows. * CREATE DATAMAP IF NOT EXISTS datamapName [ON TABLE tableName] * USING 'DataMapProviderName' * [WITH DEFERRED REBUILD] * DMPROPERTIES('KEY'='VALUE') AS SELECT COUNT(COL1) FROM tableName */ protected lazy val createDataMap: Parser[LogicalPlan] = CREATE ~> DATAMAP ~> opt(IF ~> NOT ~> EXISTS) ~ ident ~ opt(ontable) ~ (USING ~> stringLit) ~ opt(WITH ~> DEFERRED ~> REBUILD) ~ (DMPROPERTIES ~> "(" ~> repsep(loadOptions, ",") <~ ")").? ~ (AS ~> restInput).? <~ opt(";") ^^ { case ifnotexists ~ dmname ~ tableIdent ~ dmProviderName ~ deferred ~ dmprops ~ query => val map = dmprops.getOrElse(List[(String, String)]()).toMap[String, String] CarbonCreateDataMapCommand(dmname, tableIdent, dmProviderName, map, query, ifnotexists.isDefined, deferred.isDefined) } protected lazy val ontable: Parser[TableIdentifier] = ON ~> TABLE ~> (ident <~ ".").? ~ ident ^^ { case dbName ~ tableName => TableIdentifier(tableName, dbName) } /** * The below syntax is used to drop the datamap. * DROP DATAMAP IF EXISTS datamapName ON TABLE tablename */ protected lazy val dropDataMap: Parser[LogicalPlan] = DROP ~> DATAMAP ~> opt(IF ~> EXISTS) ~ ident ~ opt(ontable) <~ opt(";") ^^ { case ifexists ~ dmname ~ tableIdent => CarbonDropDataMapCommand(dmname, ifexists.isDefined, tableIdent) } /** * The syntax of show datamap is used to show datamaps on the table * SHOW DATAMAP ON TABLE tableName */ protected lazy val showDataMap: Parser[LogicalPlan] = SHOW ~> DATAMAP ~> opt(ontable) <~ opt(";") ^^ { case tableIdent => CarbonDataMapShowCommand(tableIdent) } /** * The syntax of show datamap is used to show datamaps on the table * REBUILD DATAMAP datamapname [ON TABLE] tableName */ protected lazy val refreshDataMap: Parser[LogicalPlan] = REBUILD ~> DATAMAP ~> ident ~ opt(ontable) <~ opt(";") ^^ { case datamap ~ tableIdent => CarbonDataMapRebuildCommand(datamap, tableIdent) } protected lazy val deleteRecords: Parser[LogicalPlan] = (DELETE ~> FROM ~> aliasTable) ~ restInput.? <~ opt(";") ^^ { case table ~ rest => val tableName = getTableName(table._2) val relation: LogicalPlan = table._3 match { case Some(a) => DeleteRecords( "select tupleId from " + tableName + " " + table._3.getOrElse("") + rest.getOrElse(""), Some(table._3.get), table._1) case None => DeleteRecords( "select tupleId from " + tableName + " " + rest.getOrElse(""), None, table._1) } relation } protected lazy val updateTable: Parser[LogicalPlan] = UPDATE ~> aliasTable ~ (SET ~> "(" ~> repsep(element, ",") <~ ")") ~ ("=" ~> restInput) <~ opt(";") ^^ { case tab ~ columns ~ rest => val (sel, where) = splitQuery(rest) val (selectStmt, relation) = if (!sel.toLowerCase.startsWith("select ")) { if (sel.trim.isEmpty) { sys.error("At least one source column has to be specified ") } // only list of expression are given, need to convert that list of expressions into // select statement on destination table val relation : UnresolvedRelation = tab._1 match { case r@CarbonUnresolvedRelation(tableIdentifier) => tab._3 match { case Some(a) => updateRelation(r, tableIdentifier, tab._4, Some(tab._3.get)) case None => updateRelation(r, tableIdentifier, tab._4, None) } case _ => tab._1 } tab._3 match { case Some(a) => ("select " + sel + " from " + getTableName(tab._2) + " " + tab._3.get, relation) case None => ("select " + sel + " from " + getTableName(tab._2), relation) } } else { (sel, updateRelation(tab._1, tab._2, tab._4, tab._3)) } val rel = tab._3 match { case Some(a) => UpdateTable(relation, columns, selectStmt, Some(tab._3.get), where) case None => UpdateTable(relation, columns, selectStmt, Some(tab._1.tableIdentifier.table), where) } rel } private def updateRelation( r: UnresolvedRelation, tableIdent: Seq[String], tableIdentifier: TableIdentifier, alias: Option[String]): UnresolvedRelation = { alias match { case Some(_) => r case _ => val tableAlias = tableIdent match { case Seq(dbName, tableName) => Some(tableName) case Seq(tableName) => Some(tableName) } // Use Reflection to choose between Spark2.1 and Spark2.2 // Move UnresolvedRelation(tableIdentifier, tableAlias) to reflection. CarbonReflectionUtils.getUnresolvedRelation(tableIdentifier, tableAlias) } } protected lazy val element: Parser[String] = (ident <~ ".").? ~ ident ^^ { case table ~ column => column.toLowerCase } protected lazy val table: Parser[UnresolvedRelation] = { rep1sep(attributeName, ".") ~ opt(ident) ^^ { case tableIdent ~ alias => UnresolvedRelation(tableIdent) } } protected lazy val aliasTable: Parser[(UnresolvedRelation, List[String], Option[String], TableIdentifier)] = { rep1sep(attributeName, ".") ~ opt(ident) ^^ { case tableIdent ~ alias => val tableIdentifier: TableIdentifier = toTableIdentifier(tableIdent) // Use Reflection to choose between Spark2.1 and Spark2.2 // Move (UnresolvedRelation(tableIdent, alias), tableIdent, alias) to reflection. val unresolvedRelation = CarbonReflectionUtils.getUnresolvedRelation(tableIdentifier, alias) (unresolvedRelation, tableIdent, alias, tableIdentifier) } } private def splitQuery(query: String): (String, String) = { val stack = scala.collection.mutable.Stack[Char]() var foundSingleQuotes = false var foundDoubleQuotes = false var foundEscapeChar = false var ignoreChar = false var stop = false var bracketCount = 0 val (selectStatement, where) = query.span { ch => { if (stop) { false } else { ignoreChar = false if (foundEscapeChar && (ch == '\\'' || ch == '\\"' || ch == '\\\\')) { foundEscapeChar = false ignoreChar = true } // If escaped single or double quotes found, no need to consider if (!ignoreChar) { if (ch == '\\\\') { foundEscapeChar = true } else if (ch == '\\'') { foundSingleQuotes = !foundSingleQuotes } else if (ch == '\\"') { foundDoubleQuotes = !foundDoubleQuotes } else if (ch == '(' && !foundSingleQuotes && !foundDoubleQuotes) { bracketCount = bracketCount + 1 stack.push(ch) } else if (ch == ')' && !foundSingleQuotes && !foundDoubleQuotes) { bracketCount = bracketCount + 1 stack.pop() if (0 == stack.size) { stop = true } } } true } } } if (bracketCount == 0 || bracketCount % 2 != 0) { sys.error("Parsing error, missing bracket ") } val select = selectStatement.trim select.substring(1, select.length - 1).trim -> where.trim } protected lazy val attributeName: Parser[String] = acceptMatch("attribute name", { case lexical.Identifier(str) => str.toLowerCase case lexical.Keyword(str) if !lexical.delimiters.contains(str) => str.toLowerCase }) private def getTableName(tableIdentifier: Seq[String]): String = { if (tableIdentifier.size > 1) { tableIdentifier.head + "." + tableIdentifier(1) } else { tableIdentifier.head } } protected lazy val loadDataNew: Parser[LogicalPlan] = LOAD ~> DATA ~> opt(LOCAL) ~> INPATH ~> stringLit ~ opt(OVERWRITE) ~ (INTO ~> TABLE ~> (ident <~ ".").? ~ ident) ~ (PARTITION ~>"("~> repsep(partitions, ",") <~ ")").? ~ (OPTIONS ~> "(" ~> repsep(loadOptions, ",") <~ ")").? <~ opt(";") ^^ { case filePath ~ isOverwrite ~ table ~ partitions ~ optionsList => val (databaseNameOp, tableName) = table match { case databaseName ~ tableName => (databaseName, tableName.toLowerCase()) } if (optionsList.isDefined) { validateOptions(optionsList) } val optionsMap = optionsList.getOrElse(List.empty[(String, String)]).toMap val partitionSpec = partitions.getOrElse(List.empty[(String, Option[String])]).toMap CarbonLoadDataCommand( databaseNameOp = convertDbNameToLowerCase(databaseNameOp), tableName = tableName, factPathFromUser = filePath, dimFilesPath = Seq(), options = optionsMap, isOverwriteTable = isOverwrite.isDefined, inputSqlString = null, dataFrame = None, updateModel = None, tableInfoOp = None, internalOptions = Map.empty, partition = partitionSpec) } protected lazy val deleteLoadsByID: Parser[LogicalPlan] = DELETE ~> FROM ~ TABLE ~> (ident <~ ".").? ~ ident ~ (WHERE ~> (SEGMENT ~ "." ~ ID) ~> IN ~> "(" ~> repsep(segmentId, ",")) <~ ")" ~ opt(";") ^^ { case dbName ~ tableName ~ loadids => CarbonDeleteLoadByIdCommand(loadids, dbName, tableName.toLowerCase()) } protected lazy val deleteLoadsByLoadDate: Parser[LogicalPlan] = DELETE ~> FROM ~> TABLE ~> (ident <~ ".").? ~ ident ~ (WHERE ~> (SEGMENT ~ "." ~ STARTTIME ~> BEFORE) ~ stringLit) <~ opt(";") ^^ { case database ~ table ~ condition => condition match { case dateField ~ dateValue => CarbonDeleteLoadByLoadDateCommand(convertDbNameToLowerCase(database), table.toLowerCase(), dateField, dateValue) } } protected lazy val cleanFiles: Parser[LogicalPlan] = CLEAN ~> FILES ~> FOR ~> TABLE ~> (ident <~ ".").? ~ ident <~ opt(";") ^^ { case databaseName ~ tableName => CarbonCleanFilesCommand( convertDbNameToLowerCase(databaseName), Option(tableName.toLowerCase())) } protected lazy val explainPlan: Parser[LogicalPlan] = (EXPLAIN ~> opt(EXTENDED)) ~ startCommand ^^ { case isExtended ~ logicalPlan => logicalPlan match { case _: CarbonCreateTableCommand => ExplainCommand(logicalPlan, extended = isExtended.isDefined) case _ => CarbonToSparkAdapater.getExplainCommandObj } } protected lazy val showLoads: Parser[LogicalPlan] = (SHOW ~> opt(HISTORY) <~ SEGMENTS <~ FOR <~ TABLE) ~ (ident <~ ".").? ~ ident ~ (LIMIT ~> numericLit).? <~ opt(";") ^^ { case showHistory ~ databaseName ~ tableName ~ limit => CarbonShowLoadsCommand( convertDbNameToLowerCase(databaseName), tableName.toLowerCase(), limit, showHistory.isDefined) } protected lazy val alterTableModifyDataType: Parser[LogicalPlan] = ALTER ~> TABLE ~> (ident <~ ".").? ~ ident ~ CHANGE ~ ident ~ ident ~ ident ~ opt("(" ~> rep1sep(valueOptions, ",") <~ ")") <~ opt(";") ^^ { case dbName ~ table ~ change ~ columnName ~ columnNameCopy ~ dataType ~ values => // both the column names should be same if (!columnName.equalsIgnoreCase(columnNameCopy)) { throw new MalformedCarbonCommandException( "Column names provided are different. Both the column names should be same") } val alterTableChangeDataTypeModel = AlterTableDataTypeChangeModel(parseDataType(dataType.toLowerCase, values), convertDbNameToLowerCase(dbName), table.toLowerCase, columnName.toLowerCase, columnNameCopy.toLowerCase) CarbonAlterTableDataTypeChangeCommand(alterTableChangeDataTypeModel) } protected lazy val alterTableAddColumns: Parser[LogicalPlan] = ALTER ~> TABLE ~> (ident <~ ".").? ~ ident ~ (ADD ~> COLUMNS ~> "(" ~> repsep(anyFieldDef, ",") <~ ")") ~ (TBLPROPERTIES ~> "(" ~> repsep(loadOptions, ",") <~ ")").? <~ opt(";") ^^ { case dbName ~ table ~ fields ~ tblProp => fields.foreach{ f => if (isComplexDimDictionaryExclude(f.dataType.get)) { throw new MalformedCarbonCommandException( s"Add column is unsupported for complex datatype column: ${f.column}") } } val tableProps = if (tblProp.isDefined) { tblProp.get.groupBy(_._1.toLowerCase).foreach(f => if (f._2.size > 1) { val name = f._1.toLowerCase val colName = name.substring(14) if (name.startsWith("default.value.") && fields.count(p => p.column.equalsIgnoreCase(colName)) == 1) { LOGGER.error(s"Duplicate default value exist for new column: ${ colName }") LOGGER.audit( s"Validation failed for Create/Alter Table Operation " + s"for ${ table }. " + s"Duplicate default value exist for new column: ${ colName }") sys.error(s"Duplicate default value exist for new column: ${ colName }") } } ) // default value should not be converted to lower case val tblProps = tblProp.get .map(f => if (CarbonCommonConstants.TABLE_BLOCKSIZE.equalsIgnoreCase(f._1) || CarbonCommonConstants.SORT_COLUMNS.equalsIgnoreCase(f._1) || CarbonCommonConstants.LOCAL_DICTIONARY_ENABLE.equalsIgnoreCase(f._1) || CarbonCommonConstants.LOCAL_DICTIONARY_THRESHOLD.equalsIgnoreCase(f._1)) { throw new MalformedCarbonCommandException( s"Unsupported Table property in add column: ${ f._1 }") } else if (f._1.toLowerCase.startsWith("default.value.")) { if (fields.count(field => checkFieldDefaultValue(field.column, f._1.toLowerCase)) == 1) { f._1 -> f._2 } else { throw new MalformedCarbonCommandException( s"Default.value property does not matches with the columns in ALTER command. " + s"Column name in property is: ${ f._1}") } } else { f._1 -> f._2.toLowerCase }) scala.collection.mutable.Map(tblProps: _*) } else { scala.collection.mutable.Map.empty[String, String] } val tableModel = prepareTableModel (false, convertDbNameToLowerCase(dbName), table.toLowerCase, fields.map(convertFieldNamesToLowercase), Seq.empty, tableProps, None, true) val alterTableAddColumnsModel = AlterTableAddColumnsModel( convertDbNameToLowerCase(dbName), table, tableProps.toMap, tableModel.dimCols, tableModel.msrCols, tableModel.highcardinalitydims.getOrElse(Seq.empty)) CarbonAlterTableAddColumnCommand(alterTableAddColumnsModel) } private def checkFieldDefaultValue(fieldName: String, defaultValueColumnName: String): Boolean = { defaultValueColumnName.equalsIgnoreCase("default.value." + fieldName) } private def convertFieldNamesToLowercase(field: Field): Field = { val name = field.column.toLowerCase field.copy(column = name, name = Some(name)) } protected lazy val alterTableDropColumn: Parser[LogicalPlan] = ALTER ~> TABLE ~> (ident <~ ".").? ~ ident ~ DROP ~ COLUMNS ~ ("(" ~> rep1sep(ident, ",") <~ ")") <~ opt(";") ^^ { case dbName ~ table ~ drop ~ columns ~ values => // validate that same column name is not repeated values.map(_.toLowerCase).groupBy(identity).collect { case (x, ys) if ys.lengthCompare(1) > 0 => throw new MalformedCarbonCommandException(s"$x is duplicate. Duplicate columns not " + s"allowed") } val alterTableDropColumnModel = AlterTableDropColumnModel(convertDbNameToLowerCase(dbName), table.toLowerCase, values.map(_.toLowerCase)) CarbonAlterTableDropColumnCommand(alterTableDropColumnModel) } def getFields(schema: Seq[StructField]): Seq[Field] = { schema.map { col => var columnComment: String = "" var plainComment: String = "" if (col.getComment().isDefined) { columnComment = " comment \\"" + col.getComment().get + "\\"" plainComment = col.getComment().get } val x = if (col.dataType.catalogString == "float") { '`' + col.name + '`' + " double" + columnComment } else { '`' + col.name + '`' + ' ' + col.dataType.catalogString + columnComment } val f: Field = anyFieldDef(new lexical.Scanner(x.toLowerCase)) match { case Success(field, _) => field.asInstanceOf[Field] case failureOrError => throw new MalformedCarbonCommandException( s"Unsupported data type: ${ col.dataType }") } // the data type of the decimal type will be like decimal(10,0) // so checking the start of the string and taking the precision and scale. // resetting the data type with decimal if (f.dataType.getOrElse("").startsWith("decimal")) { val (precision, scale) = CommonUtil.getScaleAndPrecision(col.dataType.catalogString) f.precision = precision f.scale = scale f.dataType = Some("decimal") } if (f.dataType.getOrElse("").startsWith("char")) { f.dataType = Some("char") } else if (f.dataType.getOrElse("").startsWith("float")) { f.dataType = Some("double") } f.rawSchema = x f.columnComment = plainComment f } } def addPreAggFunction(sql: String): String = { addPreAgg(new lexical.Scanner(sql.toLowerCase)) match { case Success(query, _) => query case _ => throw new MalformedCarbonCommandException(s"Unsupported query") } } def addPreAggLoadFunction(sql: String): String = { addPreAggLoad(new lexical.Scanner(sql.toLowerCase)) match { case Success(query, _) => query case _ => throw new MalformedCarbonCommandException(s"Unsupported query") } } def getBucketFields( properties: mutable.Map[String, String], fields: Seq[Field], options: CarbonOption): Option[BucketFields] = { if (!CommonUtil.validateTblProperties(properties, fields)) { throw new MalformedCarbonCommandException("Invalid table properties") } if (options.isBucketingEnabled) { if (options.bucketNumber.toString.contains("-") || options.bucketNumber.toString.contains("+") || options.bucketNumber == 0) { throw new MalformedCarbonCommandException("INVALID NUMBER OF BUCKETS SPECIFIED") } else { Some(BucketFields(options.bucketColumns.toLowerCase.split(",").map(_.trim), options.bucketNumber)) } } else { None } } }
sgururajshetty/carbondata
integration/spark2/src/main/scala/org/apache/spark/sql/parser/CarbonSpark2SqlParser.scala
Scala
apache-2.0
27,896
package sampler.abc.actor.children.worker import org.scalatest.FreeSpec import org.scalatest.mockito.MockitoSugar class AborterTest extends FreeSpec with MockitoSugar { trait Setup { val instance = new Aborter {} val exception = new DetectedAbortionException() } "Aborter should /" - { "throw exceptions if and only if aborted" - { "checkIfAborted throws DetectedAbortionException if it has been aborted" in new Setup { instance.abort() intercept[DetectedAbortionException]{ instance.checkIfAborted } } "checkIfAborted doesn't throw exception if not aborted" in new Setup { instance.checkIfAborted } "resetting" in { fail("TODO") // abort, reset, check no exception } } "query abort status" - { "when not aborted" in fail("TODO") "when aborted" in fail("TODO") "when reset" in fail("TODO") } } }
tearne/Sampler
sampler-abc/src/test/scala/sampler/abc/actor/children/worker/AborterTest.scala
Scala
apache-2.0
936
package org.aprsdroid.app import _root_.android.Manifest import _root_.android.content.{Context, Intent} import _root_.android.location.{Location, LocationManager} import _root_.android.os.Bundle import _root_.android.content.SharedPreferences import _root_.android.content.SharedPreferences.OnSharedPreferenceChangeListener import _root_.android.preference.{PreferenceActivity, PreferenceManager} import _root_.android.widget.Toast class LocationPrefs extends PreferenceActivity with OnSharedPreferenceChangeListener with PermissionHelper { lazy val prefs = new PrefsWrapper(this) def loadXml() { addPreferencesFromResource(R.xml.location) addPreferencesFromResource(LocationSource.instanciatePrefsAct(prefs)) } override def onCreate(savedInstanceState: Bundle) { super.onCreate(savedInstanceState) loadXml() getPreferenceScreen().getSharedPreferences().registerOnSharedPreferenceChangeListener(this) } override def onDestroy() { super.onDestroy() getPreferenceScreen().getSharedPreferences().unregisterOnSharedPreferenceChangeListener(this) } override def onSharedPreferenceChanged(sp: SharedPreferences, key : String) { if (key == "loc_source" || key == "manual_lat" || key == "manual_lon") { setPreferenceScreen(null) loadXml() } } val REQUEST_GPS = 101 val REQUEST_MAP = 102 override def onNewIntent(i : Intent) { if (i != null && i.getDataString() != null) { i.getDataString() match { case "gps2manual" => checkPermissions(Array(Manifest.permission.ACCESS_FINE_LOCATION), REQUEST_GPS) case "chooseOnMap" => val mapmode = MapModes.defaultMapMode(this, prefs) startActivityForResult(new Intent(this, mapmode.viewClass).putExtra("info", R.string.p_source_from_map_save), REQUEST_MAP) case _ => // ignore } } } override def onActivityResult(reqCode : Int, resultCode : Int, data : Intent) { android.util.Log.d("LocationPrefs", "onActResult: request=" + reqCode + " result=" + resultCode + " " + data) if (resultCode == android.app.Activity.RESULT_OK && reqCode == REQUEST_MAP) { prefs.prefs.edit() .putString("manual_lat", data.getFloatExtra("lat", 0.0f).toString()) .putString("manual_lon", data.getFloatExtra("lon", 0.0f).toString()) .commit() } else super.onActivityResult(reqCode, resultCode, data) } override def getActionName(action: Int): Int = R.string.p_source_get_last override def onAllPermissionsGranted(action: Int): Unit = { val ls = getSystemService(Context.LOCATION_SERVICE).asInstanceOf[LocationManager] val l = ls.getLastKnownLocation(PeriodicGPS.bestProvider(ls)) if (l != null) { val pe = prefs.prefs.edit() pe.putString("manual_lat", l.getLatitude().toString()) pe.putString("manual_lon", l.getLongitude().toString()) pe.commit() } else Toast.makeText(this, getString(R.string.map_track_unknown, prefs.getCallsign()), Toast.LENGTH_SHORT).show() } }
ge0rg/aprsdroid
src/LocationPrefs.scala
Scala
gpl-2.0
2,898
/** * Copyright (C) 2015 Evgeny Igumnov http://evgeny.igumnov.com [email protected] * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU Affero General Public License, version 3, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Affero General Public License for more details. * * You should have received a copy of the GNU Affero General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package io.protectednet.model import java.util.Date import scala.concurrent.Future import com.websudos.phantom.dsl._ case class Network(id: String, adminId: String, blockDate: Date ) class Networks extends CassandraTable[ConcreteNetworks, Network] { object id extends StringColumn(this) with PartitionKey[String] object adminId extends StringColumn(this) object blockDate extends DateColumn(this) def fromRow(row: Row): Network = { Network( id(row), adminId(row), blockDate(row) ) } } abstract class ConcreteNetworks extends Networks with RootConnector { def store(network: Network): Future[ResultSet] = { insert.value(_.id, network.id) .value(_.adminId, network.adminId) .value(_.blockDate, network.blockDate) .consistencyLevel_=(ConsistencyLevel.ALL) .future() } def getById(id: String): Future[Option[Network]] = { select.where(_.id eqs id).one() } def getAll(): Future[List[Network]] = { select.fetch() } }
evgenyigumnov/protectednet
app/io/protectednet/model/Networks.scala
Scala
agpl-3.0
1,794
package beam.router import java.io.{File, PrintWriter} import beam.agentsim.infrastructure.taz.TAZ import beam.router.BeamSkimmer.{BeamSkimmerADT, BeamSkimmerKey, SkimInternal} import beam.router.Modes.BeamMode import org.matsim.api.core.v01.Id import org.scalatest.{BeforeAndAfter, FlatSpec} import scala.collection.concurrent.TrieMap import scala.util.Random class BeamSkimmerSpec extends FlatSpec with BeforeAndAfter { val beamSkimmerAsObject: BeamSkimmerADT = TrieMap( (23, BeamMode.CAR, Id.create(2, classOf[TAZ]), Id.create(1, classOf[TAZ])) -> SkimInternal( time = 205.0, generalizedTime = 215.0, cost = 6.491215096413, generalizedCost = 6.968992874190778, distance = 4478.644999999999, count = 1, energy = 1.4275908092571782E7 ), (7, BeamMode.WALK_TRANSIT, Id.create(1, classOf[TAZ]), Id.create(1, classOf[TAZ])) -> SkimInternal( time = 90.0, generalizedTime = 108.99999999999999, cost = 0.0, generalizedCost = 1.232222222222222, distance = 1166.869, count = 1, energy = 2908432.6946756938 ) ) private val beamSkimmerAsCsv = """hour,mode,origTaz,destTaz,travelTimeInS,generalizedTimeInS,cost,generalizedCost,distanceInM,numObservations,energy |23,CAR,2,1,205.0,215.0,6.491215096413,6.968992874190778,4478.644999999999,1,1.4275908092571782E7 |7,WALK_TRANSIT,1,1,90.0,108.99999999999999,0.0,1.232222222222222,1166.869,1,2908432.6946756938 |""".stripMargin it should "serialize not empty map to CSV" in { val csvContent = BeamSkimmer.toCsv(beamSkimmerAsObject).mkString assert(csvContent === beamSkimmerAsCsv) } it should "serialize empty map to CSV" in { val emptyMap = TrieMap.empty[BeamSkimmerKey, SkimInternal] val csvContent = BeamSkimmer.toCsv(emptyMap).mkString assert(csvContent === BeamSkimmer.CsvLineHeader) } it should "deserialize from a CSV file" in { val file = File.createTempFile(Random.alphanumeric.take(10).mkString, ".csv") try { writeToFile(file, beamSkimmerAsCsv) val history: BeamSkimmerADT = BeamSkimmer.fromCsv(file.getAbsolutePath) assert(history === beamSkimmerAsObject) } finally { file.delete() } } private def writeToFile(file: File, content: String): Unit = { val writer = new PrintWriter(file) try { writer.println(content) } finally { writer.close() } } }
colinsheppard/beam
src/test/scala/beam/router/BeamSkimmerSpec.scala
Scala
gpl-3.0
2,442
import scala.reflect.runtime.universe._ import scala.tools.reflect.Eval object Test extends dotty.runtime.LegacyApp { { var counter = 0 lazy val x = { counter += 1; counter } lazy val y = { counter += 1; counter } val code = reify { def foo = y // ensures that y is the first freevar we find val bar = reify { println(y * x) } bar.eval println(x) println(y) } code.eval } }
yusuke2255/dotty
tests/disabled/macro/run/reify_newimpl_52.scala
Scala
bsd-3-clause
433
/* * Copyright (C) 2016-2017 Lightbend Inc. <https://www.lightbend.com> */ package com.lightbend.lagom.internal.client import java.net.{ URI, URLEncoder } import java.util.Locale import akka.NotUsed import akka.stream.Materializer import akka.stream.scaladsl.{ Sink, Source } import akka.util.ByteString import com.lightbend.lagom.internal.api.transport.LagomServiceApiBridge import play.api.http.HeaderNames import play.api.libs.streams.AkkaStreams import play.api.libs.ws.{ InMemoryBody, WSClient } import scala.collection.immutable import scala.concurrent.{ ExecutionContext, Future } private[lagom] abstract class ClientServiceCallInvoker[Request, Response]( ws: WSClient, serviceName: String, path: String, queryParams: Map[String, Seq[String]] )(implicit ec: ExecutionContext, mat: Materializer) extends LagomServiceApiBridge { val descriptor: Descriptor val serviceLocator: ServiceLocator val call: Call[Request, Response] def doInvoke(request: Request, requestHeaderHandler: RequestHeader => RequestHeader): Future[(ResponseHeader, Response)] = { serviceLocatorDoWithService(serviceLocator, descriptor, call, uri => { val queryString = if (queryParams.nonEmpty) { queryParams.flatMap { case (name, values) => values.map(value => URLEncoder.encode(name, "utf-8") + "=" + URLEncoder.encode(value, "utf-8")) }.mkString("?", "&", "") } else "" val url = s"$uri$path$queryString" val method = methodForCall(call) val requestSerializer = callRequestSerializer(call) val serializer = messageSerializerSerializerForRequest[Request, Nothing](requestSerializer) val responseSerializer = callResponseSerializer(call) val requestHeader = requestHeaderHandler(newRequestHeader(method, URI.create(url), negotiatedSerializerProtocol(serializer), messageSerializerAcceptResponseProtocols(responseSerializer), Option(newServicePrincipal(serviceName)), Map.empty)) val requestSerializerStreamed = messageSerializerIsStreamed(requestSerializer) val responseSerializerStreamed = messageSerializerIsStreamed(responseSerializer) val result: Future[(ResponseHeader, Response)] = (requestSerializerStreamed, responseSerializerStreamed) match { case (false, false) => makeStrictCall(requestHeader, requestSerializer.asInstanceOf[MessageSerializer[Request, ByteString]], responseSerializer.asInstanceOf[MessageSerializer[Response, ByteString]], request) case (false, true) => makeStreamedResponseCall(requestHeader, requestSerializer.asInstanceOf[MessageSerializer[Request, ByteString]], responseSerializer.asInstanceOf[MessageSerializer[Response, AkkaStreamsSource[ByteString, NotUsed]]], request) case (true, false) => makeStreamedRequestCall( requestHeader, requestSerializer.asInstanceOf[MessageSerializer[Request, AkkaStreamsSource[ByteString, NotUsed]]], responseSerializer.asInstanceOf[MessageSerializer[Response, ByteString]], request ) case (true, true) => makeStreamedCall( requestHeader, requestSerializer.asInstanceOf[MessageSerializer[Request, AkkaStreamsSource[ByteString, NotUsed]]], responseSerializer.asInstanceOf[MessageSerializer[Response, AkkaStreamsSource[ByteString, NotUsed]]], request ) } result }).map { case Some(response) => response case None => throw new IllegalStateException(s"Service ${descriptorName(descriptor)} was not found by service locator") } } /** * A call that has a strict request and a streamed response. * * Currently implemented using a WebSocket, and sending the request as the first and only message. */ private def makeStreamedResponseCall( requestHeader: RequestHeader, requestSerializer: MessageSerializer[Request, ByteString], responseSerializer: MessageSerializer[_, AkkaStreamsSource[ByteString, NotUsed]], request: Request ): Future[(ResponseHeader, Response)] = { val serializer = messageSerializerSerializerForRequest[Request, ByteString](requestSerializer) val transportRequestHeader = headerFilterTransformClientRequest(descriptorHeaderFilter(descriptor), requestHeader) // We have a single source, followed by a maybe source (that is, a source that never produces any message, and // never terminates). The maybe source is necessary because we want the response stream to stay open. val requestAsStream = if (messageSerializerIsUsed(requestSerializer)) { Source.single(negotiatedSerializerSerialize(serializer, request)).concat(Source.maybe) } else { // If it's not used, don't send any message Source.maybe[ByteString].mapMaterializedValue(_ => NotUsed) } doMakeStreamedCall(requestAsStream, serializer, transportRequestHeader).map( (deserializeResponseStream(responseSerializer, requestHeader) _).tupled ) } /** * A call that has a streamed request and a strict response. * * Currently implemented using a WebSocket, that converts the first message received to the strict message. If no * message is received, it assumes the response is an empty message. */ private def makeStreamedRequestCall( requestHeader: RequestHeader, requestSerializer: MessageSerializer[_, AkkaStreamsSource[ByteString, NotUsed]], responseSerializer: MessageSerializer[Response, ByteString], request: Request ): Future[(ResponseHeader, Response)] = { val serializer = messageSerializerSerializerForRequest(requestSerializer.asInstanceOf[MessageSerializer[AkkaStreamsSource[Any, NotUsed], AkkaStreamsSource[ByteString, NotUsed]]]) val requestStream = negotiatedSerializerSerialize(serializer, request.asInstanceOf[AkkaStreamsSource[Any, NotUsed]]) val headerFilter = descriptorHeaderFilter(descriptor) val transportRequestHeader = headerFilterTransformClientRequest(headerFilter, requestHeader) for { (transportResponseHeader, responseStream) <- doMakeStreamedCall(akkaStreamsSourceAsScala(requestStream), serializer, transportRequestHeader) // We want to take the first element (if it exists), and then ignore all subsequent elements. Ignoring, rather // than cancelling the stream, is important, because this is a WebSocket connection, we want the upstream to // still remain open, but if we cancel the stream, the upstream will disconnect too. maybeResponse <- responseStream via AkkaStreams.ignoreAfterCancellation runWith Sink.headOption } yield { val bytes = maybeResponse.getOrElse(ByteString.empty) val responseHeader = headerFilterTransformClientResponse(headerFilter, transportResponseHeader, requestHeader) val deserializer = messageSerializerDeserializer(responseSerializer, messageHeaderProtocol(responseHeader)) responseHeader -> negotiatedDeserializerDeserialize(deserializer, bytes) } } /** * A call that is streamed in both directions. */ private def makeStreamedCall( requestHeader: RequestHeader, requestSerializer: MessageSerializer[_, AkkaStreamsSource[ByteString, NotUsed]], responseSerializer: MessageSerializer[_, AkkaStreamsSource[ByteString, NotUsed]], request: Request ): Future[(ResponseHeader, Response)] = { val serializer = messageSerializerSerializerForRequest( requestSerializer.asInstanceOf[MessageSerializer[AkkaStreamsSource[Any, NotUsed], AkkaStreamsSource[ByteString, NotUsed]]] ) val requestStream = negotiatedSerializerSerialize(serializer, request.asInstanceOf[AkkaStreamsSource[Any, NotUsed]]) val transportRequestHeader = headerFilterTransformClientRequest(descriptorHeaderFilter(descriptor), requestHeader) doMakeStreamedCall(akkaStreamsSourceAsScala(requestStream), serializer, transportRequestHeader).map( (deserializeResponseStream(responseSerializer, requestHeader) _).tupled ) } private def deserializeResponseStream( responseSerializer: MessageSerializer[_, AkkaStreamsSource[ByteString, NotUsed]], requestHeader: RequestHeader )(transportResponseHeader: ResponseHeader, response: Source[ByteString, NotUsed]): (ResponseHeader, Response) = { val responseHeader = headerFilterTransformClientResponse(descriptorHeaderFilter(descriptor), transportResponseHeader, requestHeader) val deserializer = messageSerializerDeserializer( responseSerializer.asInstanceOf[MessageSerializer[AkkaStreamsSource[Any, NotUsed], AkkaStreamsSource[ByteString, NotUsed]]], messageHeaderProtocol(responseHeader) ) responseHeader -> negotiatedDeserializerDeserialize(deserializer, toAkkaStreamsSource(response)).asInstanceOf[Response] } protected def doMakeStreamedCall(requestStream: Source[ByteString, NotUsed], requestSerializer: NegotiatedSerializer[_, _], requestHeader: RequestHeader): Future[(ResponseHeader, Source[ByteString, NotUsed])] /** * A call that is strict in both directions. */ private def makeStrictCall(requestHeader: RequestHeader, requestSerializer: MessageSerializer[Request, ByteString], responseSerializer: MessageSerializer[Response, ByteString], request: Request): Future[(ResponseHeader, Response)] = { val headerFilter = descriptorHeaderFilter(descriptor) val transportRequestHeader = headerFilterTransformClientRequest(headerFilter, requestHeader) val contentTypeHeader = messageProtocolToContentTypeHeader(messageHeaderProtocol(transportRequestHeader)).toSeq.map(HeaderNames.CONTENT_TYPE -> _) val requestHolder = ws.url(requestHeaderUri(requestHeader).toString) .withHttpHeaders(contentTypeHeader: _*) .withMethod(requestHeaderMethod(requestHeader)) val requestWithBody = if (messageSerializerIsUsed(requestSerializer)) { val serializer = messageSerializerSerializerForRequest(requestSerializer) val body = negotiatedSerializerSerialize(serializer, request) requestHolder.withBody(InMemoryBody(body)) } else requestHolder val requestHeaders = messageHeaderHeaders(transportRequestHeader).toSeq.collect { case (_, values) if values.nonEmpty => values.head._1 -> values.map(_._2).mkString(", ") } val acceptHeader = { val accept = requestHeaderAcceptedResponseProtocols(transportRequestHeader).flatMap { accept => messageProtocolToContentTypeHeader(accept) }.mkString(", ") if (accept.nonEmpty) Seq(HeaderNames.ACCEPT -> accept) else Nil } requestWithBody.withHttpHeaders(requestHeaders ++ acceptHeader: _*).execute().map { response => // Create the message header val protocol = messageProtocolFromContentTypeHeader(response.header(HeaderNames.CONTENT_TYPE)) val headers = response.allHeaders.map { case (key, values) => key.toLowerCase(Locale.ENGLISH) -> values.map(key -> _).to[immutable.Seq] } val transportResponseHeader = newResponseHeader(response.status, protocol, headers) val responseHeader = headerFilterTransformClientResponse(headerFilter, transportResponseHeader, requestHeader) if (response.status >= 400 && response.status <= 599) { throw exceptionSerializerDeserializeHttpException( descriptorExceptionSerializer(descriptor), response.status, protocol, response.bodyAsBytes ) } else { val deserializer = messageSerializerDeserializer(responseSerializer, messageHeaderProtocol(responseHeader)) responseHeader -> negotiatedDeserializerDeserialize(deserializer, response.bodyAsBytes) } } } }
edouardKaiser/lagom
service/core/client/src/main/scala/com/lightbend/lagom/internal/client/ClientServiceCallInvoker.scala
Scala
apache-2.0
11,708
package org.dsa.iot.ignition.spark import org.apache.spark.sql.DataFrame import org.dsa.iot.rx.RxMergerN import org.dsa.iot.scala.Having import com.ignition.frame.SparkRuntime import rx.lang.scala.Observable /** * Executes an SQL statement against the inputs. Each input is injected as a table * under the name "inputX" where X is the index of the input. */ class SQLQuery(implicit rt: SparkRuntime) extends RxMergerN[DataFrame, DataFrame] { def query(sql: String): SQLQuery = this having (query <~ sql) val query = Port[String]("query") protected def compute = query.in flatMap { cql => val sqlq = com.ignition.frame.SQLQuery(cql) sources.combinedIns map { dfs => dfs.zipWithIndex foreach { case (df, idx) => producer(df) --> sqlq.in(idx) } sqlq.output } } } /** * Factory for [[SQLQuery]] instances. */ object SQLQuery { /** * Creates a new SQLQuery instance. */ def apply()(implicit rt: SparkRuntime): SQLQuery = new SQLQuery }
IOT-DSA/dslink-scala-ignition
src/main/scala/org/dsa/iot/ignition/spark/SQLQuery.scala
Scala
apache-2.0
1,004
/** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package kafka.server import java.util.Properties import kafka.api.{ApiVersion, KAFKA_0_10_0_IV1} import kafka.cluster.EndPoint import kafka.consumer.ConsumerConfig import kafka.coordinator.OffsetConfig import kafka.message.{BrokerCompressionCodec, CompressionCodec, Message, MessageSet} import kafka.utils.CoreUtils import org.apache.kafka.clients.CommonClientConfigs import org.apache.kafka.common.config.ConfigDef.ValidList import org.apache.kafka.common.config.SaslConfigs import org.apache.kafka.common.config.{AbstractConfig, ConfigDef, SslConfigs} import org.apache.kafka.common.metrics.MetricsReporter import org.apache.kafka.common.protocol.SecurityProtocol import org.apache.kafka.common.record.TimestampType import scala.collection.{Map, immutable} import scala.collection.JavaConverters._ object Defaults { /** ********* Zookeeper Configuration ***********/ val ZkSessionTimeoutMs = 6000 val ZkSyncTimeMs = 2000 val ZkEnableSecureAcls = false /** ********* General Configuration ***********/ val BrokerIdGenerationEnable = true val MaxReservedBrokerId = 1000 val BrokerId = -1 val MessageMaxBytes = 1000000 + MessageSet.LogOverhead val NumNetworkThreads = 3 val NumIoThreads = 8 val BackgroundThreads = 10 val QueuedMaxRequests = 500 /************* Authorizer Configuration ***********/ val AuthorizerClassName = "" /** ********* Socket Server Configuration ***********/ val Port = 9092 val HostName: String = new String("") val SocketSendBufferBytes: Int = 100 * 1024 val SocketReceiveBufferBytes: Int = 100 * 1024 val SocketRequestMaxBytes: Int = 100 * 1024 * 1024 val MaxConnectionsPerIp: Int = Int.MaxValue val MaxConnectionsPerIpOverrides: String = "" val ConnectionsMaxIdleMs = 10 * 60 * 1000L val RequestTimeoutMs = 30000 /** ********* Log Configuration ***********/ val NumPartitions = 1 val LogDir = "/tmp/kafka-logs" val LogSegmentBytes = 1 * 1024 * 1024 * 1024 val LogRollHours = 24 * 7 val LogRollJitterHours = 0 val LogRetentionHours = 24 * 7 val LogRetentionBytes = -1L val LogCleanupIntervalMs = 5 * 60 * 1000L val Delete = "delete" val Compact = "compact" val LogCleanupPolicy = Delete val LogCleanerThreads = 1 val LogCleanerIoMaxBytesPerSecond = Double.MaxValue val LogCleanerDedupeBufferSize = 128 * 1024 * 1024L val LogCleanerIoBufferSize = 512 * 1024 val LogCleanerDedupeBufferLoadFactor = 0.9d val LogCleanerBackoffMs = 15 * 1000 val LogCleanerMinCleanRatio = 0.5d val LogCleanerEnable = true val LogCleanerDeleteRetentionMs = 24 * 60 * 60 * 1000L val LogCleanerMinCompactionLagMs = 0L val LogIndexSizeMaxBytes = 10 * 1024 * 1024 val LogIndexIntervalBytes = 4096 val LogFlushIntervalMessages = Long.MaxValue val LogDeleteDelayMs = 60000 val LogFlushSchedulerIntervalMs = Long.MaxValue val LogFlushOffsetCheckpointIntervalMs = 60000 val LogPreAllocateEnable = false // lazy val as `InterBrokerProtocolVersion` is defined later lazy val LogMessageFormatVersion = InterBrokerProtocolVersion val LogMessageTimestampType = "CreateTime" val LogMessageTimestampDifferenceMaxMs = Long.MaxValue val NumRecoveryThreadsPerDataDir = 1 val AutoCreateTopicsEnable = true val MinInSyncReplicas = 1 /** ********* Replication configuration ***********/ val ControllerSocketTimeoutMs = RequestTimeoutMs val ControllerMessageQueueSize = Int.MaxValue val DefaultReplicationFactor = 1 val ReplicaLagTimeMaxMs = 10000L val ReplicaSocketTimeoutMs = ConsumerConfig.SocketTimeout val ReplicaSocketReceiveBufferBytes = ConsumerConfig.SocketBufferSize val ReplicaFetchMaxBytes = ConsumerConfig.FetchSize val ReplicaFetchWaitMaxMs = 500 val ReplicaFetchMinBytes = 1 val ReplicaFetchResponseMaxBytes = 10 * 1024 * 1024 val NumReplicaFetchers = 1 val ReplicaFetchBackoffMs = 1000 val ReplicaHighWatermarkCheckpointIntervalMs = 5000L val FetchPurgatoryPurgeIntervalRequests = 1000 val ProducerPurgatoryPurgeIntervalRequests = 1000 val AutoLeaderRebalanceEnable = true val LeaderImbalancePerBrokerPercentage = 10 val LeaderImbalanceCheckIntervalSeconds = 300 val UncleanLeaderElectionEnable = true val InterBrokerSecurityProtocol = SecurityProtocol.PLAINTEXT.toString val InterBrokerProtocolVersion = ApiVersion.latestVersion.toString /** ********* Controlled shutdown configuration ***********/ val ControlledShutdownMaxRetries = 3 val ControlledShutdownRetryBackoffMs = 5000 val ControlledShutdownEnable = true /** ********* Group coordinator configuration ***********/ val GroupMinSessionTimeoutMs = 6000 val GroupMaxSessionTimeoutMs = 300000 /** ********* Offset management configuration ***********/ val OffsetMetadataMaxSize = OffsetConfig.DefaultMaxMetadataSize val OffsetsLoadBufferSize = OffsetConfig.DefaultLoadBufferSize val OffsetsTopicReplicationFactor = OffsetConfig.DefaultOffsetsTopicReplicationFactor val OffsetsTopicPartitions: Int = OffsetConfig.DefaultOffsetsTopicNumPartitions val OffsetsTopicSegmentBytes: Int = OffsetConfig.DefaultOffsetsTopicSegmentBytes val OffsetsTopicCompressionCodec: Int = OffsetConfig.DefaultOffsetsTopicCompressionCodec.codec val OffsetsRetentionMinutes: Int = 24 * 60 val OffsetsRetentionCheckIntervalMs: Long = OffsetConfig.DefaultOffsetsRetentionCheckIntervalMs val OffsetCommitTimeoutMs = OffsetConfig.DefaultOffsetCommitTimeoutMs val OffsetCommitRequiredAcks = OffsetConfig.DefaultOffsetCommitRequiredAcks /** ********* Quota Configuration ***********/ val ProducerQuotaBytesPerSecondDefault = ClientQuotaManagerConfig.QuotaBytesPerSecondDefault val ConsumerQuotaBytesPerSecondDefault = ClientQuotaManagerConfig.QuotaBytesPerSecondDefault val NumQuotaSamples: Int = ClientQuotaManagerConfig.DefaultNumQuotaSamples val QuotaWindowSizeSeconds: Int = ClientQuotaManagerConfig.DefaultQuotaWindowSizeSeconds val NumReplicationQuotaSamples: Int = ReplicationQuotaManagerConfig.DefaultNumQuotaSamples val ReplicationQuotaWindowSizeSeconds: Int = ReplicationQuotaManagerConfig.DefaultQuotaWindowSizeSeconds val DeleteTopicEnable = false val CompressionType = "producer" /** ********* Kafka Metrics Configuration ***********/ val MetricNumSamples = 2 val MetricSampleWindowMs = 30000 val MetricReporterClasses = "" /** ********* SSL configuration ***********/ val PrincipalBuilderClass = SslConfigs.DEFAULT_PRINCIPAL_BUILDER_CLASS val SslProtocol = SslConfigs.DEFAULT_SSL_PROTOCOL val SslEnabledProtocols = SslConfigs.DEFAULT_SSL_ENABLED_PROTOCOLS val SslKeystoreType = SslConfigs.DEFAULT_SSL_KEYSTORE_TYPE val SslTruststoreType = SslConfigs.DEFAULT_SSL_TRUSTSTORE_TYPE val SslKeyManagerAlgorithm = SslConfigs.DEFAULT_SSL_KEYMANGER_ALGORITHM val SslTrustManagerAlgorithm = SslConfigs.DEFAULT_SSL_TRUSTMANAGER_ALGORITHM val SslClientAuthRequired = "required" val SslClientAuthRequested = "requested" val SslClientAuthNone = "none" val SslClientAuth = SslClientAuthNone /** ********* Sasl configuration ***********/ val SaslMechanismInterBrokerProtocol = SaslConfigs.DEFAULT_SASL_MECHANISM val SaslEnabledMechanisms = SaslConfigs.DEFAULT_SASL_ENABLED_MECHANISMS val SaslKerberosKinitCmd = SaslConfigs.DEFAULT_KERBEROS_KINIT_CMD val SaslKerberosTicketRenewWindowFactor = SaslConfigs.DEFAULT_KERBEROS_TICKET_RENEW_WINDOW_FACTOR val SaslKerberosTicketRenewJitter = SaslConfigs.DEFAULT_KERBEROS_TICKET_RENEW_JITTER val SaslKerberosMinTimeBeforeRelogin = SaslConfigs.DEFAULT_KERBEROS_MIN_TIME_BEFORE_RELOGIN val SaslKerberosPrincipalToLocalRules = SaslConfigs.DEFAULT_SASL_KERBEROS_PRINCIPAL_TO_LOCAL_RULES } object KafkaConfig { private val LogConfigPrefix = "log." def main(args: Array[String]) { System.out.println(configDef.toHtmlTable) } /** ********* Zookeeper Configuration ***********/ val ZkConnectProp = "zookeeper.connect" val ZkSessionTimeoutMsProp = "zookeeper.session.timeout.ms" val ZkConnectionTimeoutMsProp = "zookeeper.connection.timeout.ms" val ZkSyncTimeMsProp = "zookeeper.sync.time.ms" val ZkEnableSecureAclsProp = "zookeeper.set.acl" /** ********* General Configuration ***********/ val BrokerIdGenerationEnableProp = "broker.id.generation.enable" val MaxReservedBrokerIdProp = "reserved.broker.max.id" val BrokerIdProp = "broker.id" val MessageMaxBytesProp = "message.max.bytes" val NumNetworkThreadsProp = "num.network.threads" val NumIoThreadsProp = "num.io.threads" val BackgroundThreadsProp = "background.threads" val QueuedMaxRequestsProp = "queued.max.requests" val RequestTimeoutMsProp = CommonClientConfigs.REQUEST_TIMEOUT_MS_CONFIG /************* Authorizer Configuration ***********/ val AuthorizerClassNameProp = "authorizer.class.name" /** ********* Socket Server Configuration ***********/ val PortProp = "port" val HostNameProp = "host.name" val ListenersProp = "listeners" val AdvertisedHostNameProp: String = "advertised.host.name" val AdvertisedPortProp = "advertised.port" val AdvertisedListenersProp = "advertised.listeners" val SocketSendBufferBytesProp = "socket.send.buffer.bytes" val SocketReceiveBufferBytesProp = "socket.receive.buffer.bytes" val SocketRequestMaxBytesProp = "socket.request.max.bytes" val MaxConnectionsPerIpProp = "max.connections.per.ip" val MaxConnectionsPerIpOverridesProp = "max.connections.per.ip.overrides" val ConnectionsMaxIdleMsProp = "connections.max.idle.ms" /***************** rack configuration *************/ val RackProp = "broker.rack" /** ********* Log Configuration ***********/ val NumPartitionsProp = "num.partitions" val LogDirsProp = "log.dirs" val LogDirProp = "log.dir" val LogSegmentBytesProp = "log.segment.bytes" val LogRollTimeMillisProp = "log.roll.ms" val LogRollTimeHoursProp = "log.roll.hours" val LogRollTimeJitterMillisProp = "log.roll.jitter.ms" val LogRollTimeJitterHoursProp = "log.roll.jitter.hours" val LogRetentionTimeMillisProp = "log.retention.ms" val LogRetentionTimeMinutesProp = "log.retention.minutes" val LogRetentionTimeHoursProp = "log.retention.hours" val LogRetentionBytesProp = "log.retention.bytes" val LogCleanupIntervalMsProp = "log.retention.check.interval.ms" val LogCleanupPolicyProp = "log.cleanup.policy" val LogCleanerThreadsProp = "log.cleaner.threads" val LogCleanerIoMaxBytesPerSecondProp = "log.cleaner.io.max.bytes.per.second" val LogCleanerDedupeBufferSizeProp = "log.cleaner.dedupe.buffer.size" val LogCleanerIoBufferSizeProp = "log.cleaner.io.buffer.size" val LogCleanerDedupeBufferLoadFactorProp = "log.cleaner.io.buffer.load.factor" val LogCleanerBackoffMsProp = "log.cleaner.backoff.ms" val LogCleanerMinCleanRatioProp = "log.cleaner.min.cleanable.ratio" val LogCleanerEnableProp = "log.cleaner.enable" val LogCleanerDeleteRetentionMsProp = "log.cleaner.delete.retention.ms" val LogCleanerMinCompactionLagMsProp = "log.cleaner.min.compaction.lag.ms" val LogIndexSizeMaxBytesProp = "log.index.size.max.bytes" val LogIndexIntervalBytesProp = "log.index.interval.bytes" val LogFlushIntervalMessagesProp = "log.flush.interval.messages" val LogDeleteDelayMsProp = "log.segment.delete.delay.ms" val LogFlushSchedulerIntervalMsProp = "log.flush.scheduler.interval.ms" val LogFlushIntervalMsProp = "log.flush.interval.ms" val LogFlushOffsetCheckpointIntervalMsProp = "log.flush.offset.checkpoint.interval.ms" val LogPreAllocateProp = "log.preallocate" val LogMessageFormatVersionProp = LogConfigPrefix + "message.format.version" val LogMessageTimestampTypeProp = LogConfigPrefix + "message.timestamp.type" val LogMessageTimestampDifferenceMaxMsProp = LogConfigPrefix + "message.timestamp.difference.max.ms" val NumRecoveryThreadsPerDataDirProp = "num.recovery.threads.per.data.dir" val AutoCreateTopicsEnableProp = "auto.create.topics.enable" val MinInSyncReplicasProp = "min.insync.replicas" /** ********* Replication configuration ***********/ val ControllerSocketTimeoutMsProp = "controller.socket.timeout.ms" val DefaultReplicationFactorProp = "default.replication.factor" val ReplicaLagTimeMaxMsProp = "replica.lag.time.max.ms" val ReplicaSocketTimeoutMsProp = "replica.socket.timeout.ms" val ReplicaSocketReceiveBufferBytesProp = "replica.socket.receive.buffer.bytes" val ReplicaFetchMaxBytesProp = "replica.fetch.max.bytes" val ReplicaFetchWaitMaxMsProp = "replica.fetch.wait.max.ms" val ReplicaFetchMinBytesProp = "replica.fetch.min.bytes" val ReplicaFetchResponseMaxBytesProp = "replica.fetch.response.max.bytes" val ReplicaFetchBackoffMsProp = "replica.fetch.backoff.ms" val NumReplicaFetchersProp = "num.replica.fetchers" val ReplicaHighWatermarkCheckpointIntervalMsProp = "replica.high.watermark.checkpoint.interval.ms" val FetchPurgatoryPurgeIntervalRequestsProp = "fetch.purgatory.purge.interval.requests" val ProducerPurgatoryPurgeIntervalRequestsProp = "producer.purgatory.purge.interval.requests" val AutoLeaderRebalanceEnableProp = "auto.leader.rebalance.enable" val LeaderImbalancePerBrokerPercentageProp = "leader.imbalance.per.broker.percentage" val LeaderImbalanceCheckIntervalSecondsProp = "leader.imbalance.check.interval.seconds" val UncleanLeaderElectionEnableProp = "unclean.leader.election.enable" val InterBrokerSecurityProtocolProp = "security.inter.broker.protocol" val InterBrokerProtocolVersionProp = "inter.broker.protocol.version" /** ********* Controlled shutdown configuration ***********/ val ControlledShutdownMaxRetriesProp = "controlled.shutdown.max.retries" val ControlledShutdownRetryBackoffMsProp = "controlled.shutdown.retry.backoff.ms" val ControlledShutdownEnableProp = "controlled.shutdown.enable" /** ********* Group coordinator configuration ***********/ val GroupMinSessionTimeoutMsProp = "group.min.session.timeout.ms" val GroupMaxSessionTimeoutMsProp = "group.max.session.timeout.ms" /** ********* Offset management configuration ***********/ val OffsetMetadataMaxSizeProp = "offset.metadata.max.bytes" val OffsetsLoadBufferSizeProp = "offsets.load.buffer.size" val OffsetsTopicReplicationFactorProp = "offsets.topic.replication.factor" val OffsetsTopicPartitionsProp = "offsets.topic.num.partitions" val OffsetsTopicSegmentBytesProp = "offsets.topic.segment.bytes" val OffsetsTopicCompressionCodecProp = "offsets.topic.compression.codec" val OffsetsRetentionMinutesProp = "offsets.retention.minutes" val OffsetsRetentionCheckIntervalMsProp = "offsets.retention.check.interval.ms" val OffsetCommitTimeoutMsProp = "offsets.commit.timeout.ms" val OffsetCommitRequiredAcksProp = "offsets.commit.required.acks" /** ********* Quota Configuration ***********/ val ProducerQuotaBytesPerSecondDefaultProp = "quota.producer.default" val ConsumerQuotaBytesPerSecondDefaultProp = "quota.consumer.default" val NumQuotaSamplesProp = "quota.window.num" val NumReplicationQuotaSamplesProp = "replication.quota.window.num" val QuotaWindowSizeSecondsProp = "quota.window.size.seconds" val ReplicationQuotaWindowSizeSecondsProp = "replication.quota.window.size.seconds" val DeleteTopicEnableProp = "delete.topic.enable" val CompressionTypeProp = "compression.type" /** ********* Kafka Metrics Configuration ***********/ val MetricSampleWindowMsProp = CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_CONFIG val MetricNumSamplesProp: String = CommonClientConfigs.METRICS_NUM_SAMPLES_CONFIG val MetricReporterClassesProp: String = CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG /** ********* SSL Configuration ****************/ val PrincipalBuilderClassProp = SslConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG val SslProtocolProp = SslConfigs.SSL_PROTOCOL_CONFIG val SslProviderProp = SslConfigs.SSL_PROVIDER_CONFIG val SslCipherSuitesProp = SslConfigs.SSL_CIPHER_SUITES_CONFIG val SslEnabledProtocolsProp = SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG val SslKeystoreTypeProp = SslConfigs.SSL_KEYSTORE_TYPE_CONFIG val SslKeystoreLocationProp = SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG val SslKeystorePasswordProp = SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG val SslKeyPasswordProp = SslConfigs.SSL_KEY_PASSWORD_CONFIG val SslTruststoreTypeProp = SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG val SslTruststoreLocationProp = SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG val SslTruststorePasswordProp = SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG val SslKeyManagerAlgorithmProp = SslConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG val SslTrustManagerAlgorithmProp = SslConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG val SslEndpointIdentificationAlgorithmProp = SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_CONFIG val SslSecureRandomImplementationProp = SslConfigs.SSL_SECURE_RANDOM_IMPLEMENTATION_CONFIG val SslClientAuthProp = SslConfigs.SSL_CLIENT_AUTH_CONFIG /** ********* SASL Configuration ****************/ val SaslMechanismInterBrokerProtocolProp = "sasl.mechanism.inter.broker.protocol" val SaslEnabledMechanismsProp = SaslConfigs.SASL_ENABLED_MECHANISMS val SaslKerberosServiceNameProp = SaslConfigs.SASL_KERBEROS_SERVICE_NAME val SaslKerberosKinitCmdProp = SaslConfigs.SASL_KERBEROS_KINIT_CMD val SaslKerberosTicketRenewWindowFactorProp = SaslConfigs.SASL_KERBEROS_TICKET_RENEW_WINDOW_FACTOR val SaslKerberosTicketRenewJitterProp = SaslConfigs.SASL_KERBEROS_TICKET_RENEW_JITTER val SaslKerberosMinTimeBeforeReloginProp = SaslConfigs.SASL_KERBEROS_MIN_TIME_BEFORE_RELOGIN val SaslKerberosPrincipalToLocalRulesProp = SaslConfigs.SASL_KERBEROS_PRINCIPAL_TO_LOCAL_RULES /* Documentation */ /** ********* Zookeeper Configuration ***********/ val ZkConnectDoc = "Zookeeper host string" val ZkSessionTimeoutMsDoc = "Zookeeper session timeout" val ZkConnectionTimeoutMsDoc = "The max time that the client waits to establish a connection to zookeeper. If not set, the value in " + ZkSessionTimeoutMsProp + " is used" val ZkSyncTimeMsDoc = "How far a ZK follower can be behind a ZK leader" val ZkEnableSecureAclsDoc = "Set client to use secure ACLs" /** ********* General Configuration ***********/ val BrokerIdGenerationEnableDoc = s"Enable automatic broker id generation on the server. When enabled the value configured for $MaxReservedBrokerIdProp should be reviewed." val MaxReservedBrokerIdDoc = "Max number that can be used for a broker.id" val BrokerIdDoc = "The broker id for this server. If unset, a unique broker id will be generated." + "To avoid conflicts between zookeeper generated broker id's and user configured broker id's, generated broker ids " + "start from " + MaxReservedBrokerIdProp + " + 1." val MessageMaxBytesDoc = "The maximum size of message that the server can receive" val NumNetworkThreadsDoc = "the number of network threads that the server uses for handling network requests" val NumIoThreadsDoc = "The number of io threads that the server uses for carrying out network requests" val BackgroundThreadsDoc = "The number of threads to use for various background processing tasks" val QueuedMaxRequestsDoc = "The number of queued requests allowed before blocking the network threads" val RequestTimeoutMsDoc = CommonClientConfigs.REQUEST_TIMEOUT_MS_DOC /************* Authorizer Configuration ***********/ val AuthorizerClassNameDoc = "The authorizer class that should be used for authorization" /** ********* Socket Server Configuration ***********/ val PortDoc = "DEPRECATED: only used when `listeners` is not set. " + "Use `listeners` instead. \\n" + "the port to listen and accept connections on" val HostNameDoc = "DEPRECATED: only used when `listeners` is not set. " + "Use `listeners` instead. \\n" + "hostname of broker. If this is set, it will only bind to this address. If this is not set, it will bind to all interfaces" val ListenersDoc = "Listener List - Comma-separated list of URIs we will listen on and their protocols.\\n" + " Specify hostname as 0.0.0.0 to bind to all interfaces.\\n" + " Leave hostname empty to bind to default interface.\\n" + " Examples of legal listener lists:\\n" + " PLAINTEXT://myhost:9092,TRACE://:9091\\n" + " PLAINTEXT://0.0.0.0:9092, TRACE://localhost:9093\\n" val AdvertisedHostNameDoc = "DEPRECATED: only used when `advertised.listeners` or `listeners` are not set. " + "Use `advertised.listeners` instead. \\n" + "Hostname to publish to ZooKeeper for clients to use. In IaaS environments, this may " + "need to be different from the interface to which the broker binds. If this is not set, " + "it will use the value for `host.name` if configured. Otherwise " + "it will use the value returned from java.net.InetAddress.getCanonicalHostName()." val AdvertisedPortDoc = "DEPRECATED: only used when `advertised.listeners` or `listeners` are not set. " + "Use `advertised.listeners` instead. \\n" + "The port to publish to ZooKeeper for clients to use. In IaaS environments, this may " + "need to be different from the port to which the broker binds. If this is not set, " + "it will publish the same port that the broker binds to." val AdvertisedListenersDoc = "Listeners to publish to ZooKeeper for clients to use, if different than the listeners above." + " In IaaS environments, this may need to be different from the interface to which the broker binds." + " If this is not set, the value for `listeners` will be used." val SocketSendBufferBytesDoc = "The SO_SNDBUF buffer of the socket sever sockets. If the value is -1, the OS default will be used." val SocketReceiveBufferBytesDoc = "The SO_RCVBUF buffer of the socket sever sockets. If the value is -1, the OS default will be used." val SocketRequestMaxBytesDoc = "The maximum number of bytes in a socket request" val MaxConnectionsPerIpDoc = "The maximum number of connections we allow from each ip address" val MaxConnectionsPerIpOverridesDoc = "Per-ip or hostname overrides to the default maximum number of connections" val ConnectionsMaxIdleMsDoc = "Idle connections timeout: the server socket processor threads close the connections that idle more than this" /************* Rack Configuration **************/ val RackDoc = "Rack of the broker. This will be used in rack aware replication assignment for fault tolerance. Examples: `RACK1`, `us-east-1d`" /** ********* Log Configuration ***********/ val NumPartitionsDoc = "The default number of log partitions per topic" val LogDirDoc = "The directory in which the log data is kept (supplemental for " + LogDirsProp + " property)" val LogDirsDoc = "The directories in which the log data is kept. If not set, the value in " + LogDirProp + " is used" val LogSegmentBytesDoc = "The maximum size of a single log file" val LogRollTimeMillisDoc = "The maximum time before a new log segment is rolled out (in milliseconds). If not set, the value in " + LogRollTimeHoursProp + " is used" val LogRollTimeHoursDoc = "The maximum time before a new log segment is rolled out (in hours), secondary to " + LogRollTimeMillisProp + " property" val LogRollTimeJitterMillisDoc = "The maximum jitter to subtract from logRollTimeMillis (in milliseconds). If not set, the value in " + LogRollTimeJitterHoursProp + " is used" val LogRollTimeJitterHoursDoc = "The maximum jitter to subtract from logRollTimeMillis (in hours), secondary to " + LogRollTimeJitterMillisProp + " property" val LogRetentionTimeMillisDoc = "The number of milliseconds to keep a log file before deleting it (in milliseconds), If not set, the value in " + LogRetentionTimeMinutesProp + " is used" val LogRetentionTimeMinsDoc = "The number of minutes to keep a log file before deleting it (in minutes), secondary to " + LogRetentionTimeMillisProp + " property. If not set, the value in " + LogRetentionTimeHoursProp + " is used" val LogRetentionTimeHoursDoc = "The number of hours to keep a log file before deleting it (in hours), tertiary to " + LogRetentionTimeMillisProp + " property" val LogRetentionBytesDoc = "The maximum size of the log before deleting it" val LogCleanupIntervalMsDoc = "The frequency in milliseconds that the log cleaner checks whether any log is eligible for deletion" val LogCleanupPolicyDoc = "The default cleanup policy for segments beyond the retention window. A comma separated list of valid policies. Valid policies are: \\"delete\\" and \\"compact\\"" val LogCleanerThreadsDoc = "The number of background threads to use for log cleaning" val LogCleanerIoMaxBytesPerSecondDoc = "The log cleaner will be throttled so that the sum of its read and write i/o will be less than this value on average" val LogCleanerDedupeBufferSizeDoc = "The total memory used for log deduplication across all cleaner threads" val LogCleanerIoBufferSizeDoc = "The total memory used for log cleaner I/O buffers across all cleaner threads" val LogCleanerDedupeBufferLoadFactorDoc = "Log cleaner dedupe buffer load factor. The percentage full the dedupe buffer can become. A higher value " + "will allow more log to be cleaned at once but will lead to more hash collisions" val LogCleanerBackoffMsDoc = "The amount of time to sleep when there are no logs to clean" val LogCleanerMinCleanRatioDoc = "The minimum ratio of dirty log to total log for a log to eligible for cleaning" val LogCleanerEnableDoc = "Enable the log cleaner process to run on the server. Should be enabled if using any topics with a cleanup.policy=compact including the internal offsets topic. If disabled those topics will not be compacted and continually grow in size." val LogCleanerDeleteRetentionMsDoc = "How long are delete records retained?" val LogCleanerMinCompactionLagMsDoc = "The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted." val LogIndexSizeMaxBytesDoc = "The maximum size in bytes of the offset index" val LogIndexIntervalBytesDoc = "The interval with which we add an entry to the offset index" val LogFlushIntervalMessagesDoc = "The number of messages accumulated on a log partition before messages are flushed to disk " val LogDeleteDelayMsDoc = "The amount of time to wait before deleting a file from the filesystem" val LogFlushSchedulerIntervalMsDoc = "The frequency in ms that the log flusher checks whether any log needs to be flushed to disk" val LogFlushIntervalMsDoc = "The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in " + LogFlushSchedulerIntervalMsProp + " is used" val LogFlushOffsetCheckpointIntervalMsDoc = "The frequency with which we update the persistent record of the last flush which acts as the log recovery point" val LogPreAllocateEnableDoc = "Should pre allocate file when create new segment? If you are using Kafka on Windows, you probably need to set it to true." val LogMessageFormatVersionDoc = "Specify the message format version the broker will use to append messages to the logs. The value should be a valid ApiVersion. " + "Some examples are: 0.8.2, 0.9.0.0, 0.10.0, check ApiVersion for more details. By setting a particular message format version, the " + "user is certifying that all the existing messages on disk are smaller or equal than the specified version. Setting this value incorrectly " + "will cause consumers with older versions to break as they will receive messages with a format that they don't understand." val LogMessageTimestampTypeDoc = "Define whether the timestamp in the message is message create time or log append time. The value should be either " + "`CreateTime` or `LogAppendTime`" val LogMessageTimestampDifferenceMaxMsDoc = "The maximum difference allowed between the timestamp when a broker receives " + "a message and the timestamp specified in the message. If log.message.timestamp.type=CreateTime, a message will be rejected " + "if the difference in timestamp exceeds this threshold. This configuration is ignored if log.message.timestamp.type=LogAppendTime." val NumRecoveryThreadsPerDataDirDoc = "The number of threads per data directory to be used for log recovery at startup and flushing at shutdown" val AutoCreateTopicsEnableDoc = "Enable auto creation of topic on the server" val MinInSyncReplicasDoc = "When a producer sets acks to \\"all\\" (or \\"-1\\"), " + "min.insync.replicas specifies the minimum number of replicas that must acknowledge " + "a write for the write to be considered successful. If this minimum cannot be met, " + "then the producer will raise an exception (either NotEnoughReplicas or " + "NotEnoughReplicasAfterAppend).<br>When used together, min.insync.replicas and acks " + "allow you to enforce greater durability guarantees. A typical scenario would be to " + "create a topic with a replication factor of 3, set min.insync.replicas to 2, and " + "produce with acks of \\"all\\". This will ensure that the producer raises an exception " + "if a majority of replicas do not receive a write." /** ********* Replication configuration ***********/ val ControllerSocketTimeoutMsDoc = "The socket timeout for controller-to-broker channels" val ControllerMessageQueueSizeDoc = "The buffer size for controller-to-broker-channels" val DefaultReplicationFactorDoc = "default replication factors for automatically created topics" val ReplicaLagTimeMaxMsDoc = "If a follower hasn't sent any fetch requests or hasn't consumed up to the leaders log end offset for at least this time," + " the leader will remove the follower from isr" val ReplicaSocketTimeoutMsDoc = "The socket timeout for network requests. Its value should be at least replica.fetch.wait.max.ms" val ReplicaSocketReceiveBufferBytesDoc = "The socket receive buffer for network requests" val ReplicaFetchMaxBytesDoc = "The number of bytes of messages to attempt to fetch for each partition. This is not an absolute maximum, " + "if the first message in the first non-empty partition of the fetch is larger than this value, the message will still be returned " + "to ensure that progress can be made. The maximum message size accepted by the broker is defined via " + "<code>message.max.bytes</code> (broker config) or <code>max.message.bytes</code> (topic config)." val ReplicaFetchWaitMaxMsDoc = "max wait time for each fetcher request issued by follower replicas. This value should always be less than the " + "replica.lag.time.max.ms at all times to prevent frequent shrinking of ISR for low throughput topics" val ReplicaFetchMinBytesDoc = "Minimum bytes expected for each fetch response. If not enough bytes, wait up to replicaMaxWaitTimeMs" val ReplicaFetchResponseMaxBytesDoc = "Maximum bytes expected for the entire fetch response. This is not an absolute maximum, " + "if the first message in the first non-empty partition of the fetch is larger than this value, the message will still be returned " + "to ensure that progress can be made. The maximum message size accepted by the broker is defined via " + "<code>message.max.bytes</code> (broker config) or <code>max.message.bytes</code> (topic config)." val NumReplicaFetchersDoc = "Number of fetcher threads used to replicate messages from a source broker. " + "Increasing this value can increase the degree of I/O parallelism in the follower broker." val ReplicaFetchBackoffMsDoc = "The amount of time to sleep when fetch partition error occurs." val ReplicaHighWatermarkCheckpointIntervalMsDoc = "The frequency with which the high watermark is saved out to disk" val FetchPurgatoryPurgeIntervalRequestsDoc = "The purge interval (in number of requests) of the fetch request purgatory" val ProducerPurgatoryPurgeIntervalRequestsDoc = "The purge interval (in number of requests) of the producer request purgatory" val AutoLeaderRebalanceEnableDoc = "Enables auto leader balancing. A background thread checks and triggers leader balance if required at regular intervals" val LeaderImbalancePerBrokerPercentageDoc = "The ratio of leader imbalance allowed per broker. The controller would trigger a leader balance if it goes above this value per broker. The value is specified in percentage." val LeaderImbalanceCheckIntervalSecondsDoc = "The frequency with which the partition rebalance check is triggered by the controller" val UncleanLeaderElectionEnableDoc = "Indicates whether to enable replicas not in the ISR set to be elected as leader as a last resort, even though doing so may result in data loss" val InterBrokerSecurityProtocolDoc = "Security protocol used to communicate between brokers. Valid values are: " + s"${SecurityProtocol.nonTestingValues.asScala.toSeq.map(_.name).mkString(", ")}." val InterBrokerProtocolVersionDoc = "Specify which version of the inter-broker protocol will be used.\\n" + " This is typically bumped after all brokers were upgraded to a new version.\\n" + " Example of some valid values are: 0.8.0, 0.8.1, 0.8.1.1, 0.8.2, 0.8.2.0, 0.8.2.1, 0.9.0.0, 0.9.0.1 Check ApiVersion for the full list." /** ********* Controlled shutdown configuration ***********/ val ControlledShutdownMaxRetriesDoc = "Controlled shutdown can fail for multiple reasons. This determines the number of retries when such failure happens" val ControlledShutdownRetryBackoffMsDoc = "Before each retry, the system needs time to recover from the state that caused the previous failure (Controller fail over, replica lag etc). This config determines the amount of time to wait before retrying." val ControlledShutdownEnableDoc = "Enable controlled shutdown of the server" /** ********* Consumer coordinator configuration ***********/ val GroupMinSessionTimeoutMsDoc = "The minimum allowed session timeout for registered consumers. Shorter timeouts result in quicker failure detection at the cost of more frequent consumer heartbeating, which can overwhelm broker resources." val GroupMaxSessionTimeoutMsDoc = "The maximum allowed session timeout for registered consumers. Longer timeouts give consumers more time to process messages in between heartbeats at the cost of a longer time to detect failures." /** ********* Offset management configuration ***********/ val OffsetMetadataMaxSizeDoc = "The maximum size for a metadata entry associated with an offset commit" val OffsetsLoadBufferSizeDoc = "Batch size for reading from the offsets segments when loading offsets into the cache." val OffsetsTopicReplicationFactorDoc = "The replication factor for the offsets topic (set higher to ensure availability). " + "To ensure that the effective replication factor of the offsets topic is the configured value, " + "the number of alive brokers has to be at least the replication factor at the time of the " + "first request for the offsets topic. If not, either the offsets topic creation will fail or " + "it will get a replication factor of min(alive brokers, configured replication factor)" val OffsetsTopicPartitionsDoc = "The number of partitions for the offset commit topic (should not change after deployment)" val OffsetsTopicSegmentBytesDoc = "The offsets topic segment bytes should be kept relatively small in order to facilitate faster log compaction and cache loads" val OffsetsTopicCompressionCodecDoc = "Compression codec for the offsets topic - compression may be used to achieve \\"atomic\\" commits" val OffsetsRetentionMinutesDoc = "Log retention window in minutes for offsets topic" val OffsetsRetentionCheckIntervalMsDoc = "Frequency at which to check for stale offsets" val OffsetCommitTimeoutMsDoc = "Offset commit will be delayed until all replicas for the offsets topic receive the commit " + "or this timeout is reached. This is similar to the producer request timeout." val OffsetCommitRequiredAcksDoc = "The required acks before the commit can be accepted. In general, the default (-1) should not be overridden" /** ********* Quota Configuration ***********/ val ProducerQuotaBytesPerSecondDefaultDoc = "DEPRECATED: Used only when dynamic default quotas are not configured for <user>, <client-id> or <user, client-id> in Zookeeper. " + "Any producer distinguished by clientId will get throttled if it produces more bytes than this value per-second" val ConsumerQuotaBytesPerSecondDefaultDoc = "DEPRECATED: Used only when dynamic default quotas are not configured for <user, <client-id> or <user, client-id> in Zookeeper. " + "Any consumer distinguished by clientId/consumer group will get throttled if it fetches more bytes than this value per-second" val NumQuotaSamplesDoc = "The number of samples to retain in memory for client quotas" val NumReplicationQuotaSamplesDoc = "The number of samples to retain in memory for replication quotas" val QuotaWindowSizeSecondsDoc = "The time span of each sample for client quotas" val ReplicationQuotaWindowSizeSecondsDoc = "The time span of each sample for replication quotas" val DeleteTopicEnableDoc = "Enables delete topic. Delete topic through the admin tool will have no effect if this config is turned off" val CompressionTypeDoc = "Specify the final compression type for a given topic. This configuration accepts the standard compression codecs " + "('gzip', 'snappy', 'lz4'). It additionally accepts 'uncompressed' which is equivalent to no compression; and " + "'producer' which means retain the original compression codec set by the producer." /** ********* Kafka Metrics Configuration ***********/ val MetricSampleWindowMsDoc = CommonClientConfigs.METRICS_SAMPLE_WINDOW_MS_DOC val MetricNumSamplesDoc = CommonClientConfigs.METRICS_NUM_SAMPLES_DOC val MetricReporterClassesDoc = CommonClientConfigs.METRIC_REPORTER_CLASSES_DOC /** ********* SSL Configuration ****************/ val PrincipalBuilderClassDoc = SslConfigs.PRINCIPAL_BUILDER_CLASS_DOC val SslProtocolDoc = SslConfigs.SSL_PROTOCOL_DOC val SslProviderDoc = SslConfigs.SSL_PROVIDER_DOC val SslCipherSuitesDoc = SslConfigs.SSL_CIPHER_SUITES_DOC val SslEnabledProtocolsDoc = SslConfigs.SSL_ENABLED_PROTOCOLS_DOC val SslKeystoreTypeDoc = SslConfigs.SSL_KEYSTORE_TYPE_DOC val SslKeystoreLocationDoc = SslConfigs.SSL_KEYSTORE_LOCATION_DOC val SslKeystorePasswordDoc = SslConfigs.SSL_KEYSTORE_PASSWORD_DOC val SslKeyPasswordDoc = SslConfigs.SSL_KEY_PASSWORD_DOC val SslTruststoreTypeDoc = SslConfigs.SSL_TRUSTSTORE_TYPE_DOC val SslTruststorePasswordDoc = SslConfigs.SSL_TRUSTSTORE_PASSWORD_DOC val SslTruststoreLocationDoc = SslConfigs.SSL_TRUSTSTORE_LOCATION_DOC val SslKeyManagerAlgorithmDoc = SslConfigs.SSL_KEYMANAGER_ALGORITHM_DOC val SslTrustManagerAlgorithmDoc = SslConfigs.SSL_TRUSTMANAGER_ALGORITHM_DOC val SslEndpointIdentificationAlgorithmDoc = SslConfigs.SSL_ENDPOINT_IDENTIFICATION_ALGORITHM_DOC val SslSecureRandomImplementationDoc = SslConfigs.SSL_SECURE_RANDOM_IMPLEMENTATION_DOC val SslClientAuthDoc = SslConfigs.SSL_CLIENT_AUTH_DOC /** ********* Sasl Configuration ****************/ val SaslMechanismInterBrokerProtocolDoc = "SASL mechanism used for inter-broker communication. Default is GSSAPI." val SaslEnabledMechanismsDoc = SaslConfigs.SASL_ENABLED_MECHANISMS_DOC val SaslKerberosServiceNameDoc = SaslConfigs.SASL_KERBEROS_SERVICE_NAME_DOC val SaslKerberosKinitCmdDoc = SaslConfigs.SASL_KERBEROS_KINIT_CMD_DOC val SaslKerberosTicketRenewWindowFactorDoc = SaslConfigs.SASL_KERBEROS_TICKET_RENEW_WINDOW_FACTOR_DOC val SaslKerberosTicketRenewJitterDoc = SaslConfigs.SASL_KERBEROS_TICKET_RENEW_JITTER_DOC val SaslKerberosMinTimeBeforeReloginDoc = SaslConfigs.SASL_KERBEROS_MIN_TIME_BEFORE_RELOGIN_DOC val SaslKerberosPrincipalToLocalRulesDoc = SaslConfigs.SASL_KERBEROS_PRINCIPAL_TO_LOCAL_RULES_DOC private val configDef = { import ConfigDef.Importance._ import ConfigDef.Range._ import ConfigDef.Type._ import ConfigDef.ValidString._ new ConfigDef() /** ********* Zookeeper Configuration ***********/ .define(ZkConnectProp, STRING, HIGH, ZkConnectDoc) .define(ZkSessionTimeoutMsProp, INT, Defaults.ZkSessionTimeoutMs, HIGH, ZkSessionTimeoutMsDoc) .define(ZkConnectionTimeoutMsProp, INT, null, HIGH, ZkConnectionTimeoutMsDoc) .define(ZkSyncTimeMsProp, INT, Defaults.ZkSyncTimeMs, LOW, ZkSyncTimeMsDoc) .define(ZkEnableSecureAclsProp, BOOLEAN, Defaults.ZkEnableSecureAcls, HIGH, ZkEnableSecureAclsDoc) /** ********* General Configuration ***********/ .define(BrokerIdGenerationEnableProp, BOOLEAN, Defaults.BrokerIdGenerationEnable, MEDIUM, BrokerIdGenerationEnableDoc) .define(MaxReservedBrokerIdProp, INT, Defaults.MaxReservedBrokerId, atLeast(0), MEDIUM, MaxReservedBrokerIdDoc) .define(BrokerIdProp, INT, Defaults.BrokerId, HIGH, BrokerIdDoc) .define(MessageMaxBytesProp, INT, Defaults.MessageMaxBytes, atLeast(0), HIGH, MessageMaxBytesDoc) .define(NumNetworkThreadsProp, INT, Defaults.NumNetworkThreads, atLeast(1), HIGH, NumNetworkThreadsDoc) .define(NumIoThreadsProp, INT, Defaults.NumIoThreads, atLeast(1), HIGH, NumIoThreadsDoc) .define(BackgroundThreadsProp, INT, Defaults.BackgroundThreads, atLeast(1), HIGH, BackgroundThreadsDoc) .define(QueuedMaxRequestsProp, INT, Defaults.QueuedMaxRequests, atLeast(1), HIGH, QueuedMaxRequestsDoc) .define(RequestTimeoutMsProp, INT, Defaults.RequestTimeoutMs, HIGH, RequestTimeoutMsDoc) /************* Authorizer Configuration ***********/ .define(AuthorizerClassNameProp, STRING, Defaults.AuthorizerClassName, LOW, AuthorizerClassNameDoc) /** ********* Socket Server Configuration ***********/ .define(PortProp, INT, Defaults.Port, HIGH, PortDoc) .define(HostNameProp, STRING, Defaults.HostName, HIGH, HostNameDoc) .define(ListenersProp, STRING, null, HIGH, ListenersDoc) .define(AdvertisedHostNameProp, STRING, null, HIGH, AdvertisedHostNameDoc) .define(AdvertisedPortProp, INT, null, HIGH, AdvertisedPortDoc) .define(AdvertisedListenersProp, STRING, null, HIGH, AdvertisedListenersDoc) .define(SocketSendBufferBytesProp, INT, Defaults.SocketSendBufferBytes, HIGH, SocketSendBufferBytesDoc) .define(SocketReceiveBufferBytesProp, INT, Defaults.SocketReceiveBufferBytes, HIGH, SocketReceiveBufferBytesDoc) .define(SocketRequestMaxBytesProp, INT, Defaults.SocketRequestMaxBytes, atLeast(1), HIGH, SocketRequestMaxBytesDoc) .define(MaxConnectionsPerIpProp, INT, Defaults.MaxConnectionsPerIp, atLeast(1), MEDIUM, MaxConnectionsPerIpDoc) .define(MaxConnectionsPerIpOverridesProp, STRING, Defaults.MaxConnectionsPerIpOverrides, MEDIUM, MaxConnectionsPerIpOverridesDoc) .define(ConnectionsMaxIdleMsProp, LONG, Defaults.ConnectionsMaxIdleMs, MEDIUM, ConnectionsMaxIdleMsDoc) /************ Rack Configuration ******************/ .define(RackProp, STRING, null, MEDIUM, RackDoc) /** ********* Log Configuration ***********/ .define(NumPartitionsProp, INT, Defaults.NumPartitions, atLeast(1), MEDIUM, NumPartitionsDoc) .define(LogDirProp, STRING, Defaults.LogDir, HIGH, LogDirDoc) .define(LogDirsProp, STRING, null, HIGH, LogDirsDoc) .define(LogSegmentBytesProp, INT, Defaults.LogSegmentBytes, atLeast(Message.MinMessageOverhead), HIGH, LogSegmentBytesDoc) .define(LogRollTimeMillisProp, LONG, null, HIGH, LogRollTimeMillisDoc) .define(LogRollTimeHoursProp, INT, Defaults.LogRollHours, atLeast(1), HIGH, LogRollTimeHoursDoc) .define(LogRollTimeJitterMillisProp, LONG, null, HIGH, LogRollTimeJitterMillisDoc) .define(LogRollTimeJitterHoursProp, INT, Defaults.LogRollJitterHours, atLeast(0), HIGH, LogRollTimeJitterHoursDoc) .define(LogRetentionTimeMillisProp, LONG, null, HIGH, LogRetentionTimeMillisDoc) .define(LogRetentionTimeMinutesProp, INT, null, HIGH, LogRetentionTimeMinsDoc) .define(LogRetentionTimeHoursProp, INT, Defaults.LogRetentionHours, HIGH, LogRetentionTimeHoursDoc) .define(LogRetentionBytesProp, LONG, Defaults.LogRetentionBytes, HIGH, LogRetentionBytesDoc) .define(LogCleanupIntervalMsProp, LONG, Defaults.LogCleanupIntervalMs, atLeast(1), MEDIUM, LogCleanupIntervalMsDoc) .define(LogCleanupPolicyProp, LIST, Defaults.LogCleanupPolicy, ValidList.in(Defaults.Compact, Defaults.Delete), MEDIUM, LogCleanupPolicyDoc) .define(LogCleanerThreadsProp, INT, Defaults.LogCleanerThreads, atLeast(0), MEDIUM, LogCleanerThreadsDoc) .define(LogCleanerIoMaxBytesPerSecondProp, DOUBLE, Defaults.LogCleanerIoMaxBytesPerSecond, MEDIUM, LogCleanerIoMaxBytesPerSecondDoc) .define(LogCleanerDedupeBufferSizeProp, LONG, Defaults.LogCleanerDedupeBufferSize, MEDIUM, LogCleanerDedupeBufferSizeDoc) .define(LogCleanerIoBufferSizeProp, INT, Defaults.LogCleanerIoBufferSize, atLeast(0), MEDIUM, LogCleanerIoBufferSizeDoc) .define(LogCleanerDedupeBufferLoadFactorProp, DOUBLE, Defaults.LogCleanerDedupeBufferLoadFactor, MEDIUM, LogCleanerDedupeBufferLoadFactorDoc) .define(LogCleanerBackoffMsProp, LONG, Defaults.LogCleanerBackoffMs, atLeast(0), MEDIUM, LogCleanerBackoffMsDoc) .define(LogCleanerMinCleanRatioProp, DOUBLE, Defaults.LogCleanerMinCleanRatio, MEDIUM, LogCleanerMinCleanRatioDoc) .define(LogCleanerEnableProp, BOOLEAN, Defaults.LogCleanerEnable, MEDIUM, LogCleanerEnableDoc) .define(LogCleanerDeleteRetentionMsProp, LONG, Defaults.LogCleanerDeleteRetentionMs, MEDIUM, LogCleanerDeleteRetentionMsDoc) .define(LogCleanerMinCompactionLagMsProp, LONG, Defaults.LogCleanerMinCompactionLagMs, MEDIUM, LogCleanerMinCompactionLagMsDoc) .define(LogIndexSizeMaxBytesProp, INT, Defaults.LogIndexSizeMaxBytes, atLeast(4), MEDIUM, LogIndexSizeMaxBytesDoc) .define(LogIndexIntervalBytesProp, INT, Defaults.LogIndexIntervalBytes, atLeast(0), MEDIUM, LogIndexIntervalBytesDoc) .define(LogFlushIntervalMessagesProp, LONG, Defaults.LogFlushIntervalMessages, atLeast(1), HIGH, LogFlushIntervalMessagesDoc) .define(LogDeleteDelayMsProp, LONG, Defaults.LogDeleteDelayMs, atLeast(0), HIGH, LogDeleteDelayMsDoc) .define(LogFlushSchedulerIntervalMsProp, LONG, Defaults.LogFlushSchedulerIntervalMs, HIGH, LogFlushSchedulerIntervalMsDoc) .define(LogFlushIntervalMsProp, LONG, null, HIGH, LogFlushIntervalMsDoc) .define(LogFlushOffsetCheckpointIntervalMsProp, INT, Defaults.LogFlushOffsetCheckpointIntervalMs, atLeast(0), HIGH, LogFlushOffsetCheckpointIntervalMsDoc) .define(LogPreAllocateProp, BOOLEAN, Defaults.LogPreAllocateEnable, MEDIUM, LogPreAllocateEnableDoc) .define(NumRecoveryThreadsPerDataDirProp, INT, Defaults.NumRecoveryThreadsPerDataDir, atLeast(1), HIGH, NumRecoveryThreadsPerDataDirDoc) .define(AutoCreateTopicsEnableProp, BOOLEAN, Defaults.AutoCreateTopicsEnable, HIGH, AutoCreateTopicsEnableDoc) .define(MinInSyncReplicasProp, INT, Defaults.MinInSyncReplicas, atLeast(1), HIGH, MinInSyncReplicasDoc) .define(LogMessageFormatVersionProp, STRING, Defaults.LogMessageFormatVersion, MEDIUM, LogMessageFormatVersionDoc) .define(LogMessageTimestampTypeProp, STRING, Defaults.LogMessageTimestampType, in("CreateTime", "LogAppendTime"), MEDIUM, LogMessageTimestampTypeDoc) .define(LogMessageTimestampDifferenceMaxMsProp, LONG, Defaults.LogMessageTimestampDifferenceMaxMs, atLeast(0), MEDIUM, LogMessageTimestampDifferenceMaxMsDoc) /** ********* Replication configuration ***********/ .define(ControllerSocketTimeoutMsProp, INT, Defaults.ControllerSocketTimeoutMs, MEDIUM, ControllerSocketTimeoutMsDoc) .define(DefaultReplicationFactorProp, INT, Defaults.DefaultReplicationFactor, MEDIUM, DefaultReplicationFactorDoc) .define(ReplicaLagTimeMaxMsProp, LONG, Defaults.ReplicaLagTimeMaxMs, HIGH, ReplicaLagTimeMaxMsDoc) .define(ReplicaSocketTimeoutMsProp, INT, Defaults.ReplicaSocketTimeoutMs, HIGH, ReplicaSocketTimeoutMsDoc) .define(ReplicaSocketReceiveBufferBytesProp, INT, Defaults.ReplicaSocketReceiveBufferBytes, HIGH, ReplicaSocketReceiveBufferBytesDoc) .define(ReplicaFetchMaxBytesProp, INT, Defaults.ReplicaFetchMaxBytes, atLeast(0), MEDIUM, ReplicaFetchMaxBytesDoc) .define(ReplicaFetchWaitMaxMsProp, INT, Defaults.ReplicaFetchWaitMaxMs, HIGH, ReplicaFetchWaitMaxMsDoc) .define(ReplicaFetchBackoffMsProp, INT, Defaults.ReplicaFetchBackoffMs, atLeast(0), MEDIUM, ReplicaFetchBackoffMsDoc) .define(ReplicaFetchMinBytesProp, INT, Defaults.ReplicaFetchMinBytes, HIGH, ReplicaFetchMinBytesDoc) .define(ReplicaFetchResponseMaxBytesProp, INT, Defaults.ReplicaFetchResponseMaxBytes, atLeast(0), MEDIUM, ReplicaFetchResponseMaxBytesDoc) .define(NumReplicaFetchersProp, INT, Defaults.NumReplicaFetchers, HIGH, NumReplicaFetchersDoc) .define(ReplicaHighWatermarkCheckpointIntervalMsProp, LONG, Defaults.ReplicaHighWatermarkCheckpointIntervalMs, HIGH, ReplicaHighWatermarkCheckpointIntervalMsDoc) .define(FetchPurgatoryPurgeIntervalRequestsProp, INT, Defaults.FetchPurgatoryPurgeIntervalRequests, MEDIUM, FetchPurgatoryPurgeIntervalRequestsDoc) .define(ProducerPurgatoryPurgeIntervalRequestsProp, INT, Defaults.ProducerPurgatoryPurgeIntervalRequests, MEDIUM, ProducerPurgatoryPurgeIntervalRequestsDoc) .define(AutoLeaderRebalanceEnableProp, BOOLEAN, Defaults.AutoLeaderRebalanceEnable, HIGH, AutoLeaderRebalanceEnableDoc) .define(LeaderImbalancePerBrokerPercentageProp, INT, Defaults.LeaderImbalancePerBrokerPercentage, HIGH, LeaderImbalancePerBrokerPercentageDoc) .define(LeaderImbalanceCheckIntervalSecondsProp, LONG, Defaults.LeaderImbalanceCheckIntervalSeconds, HIGH, LeaderImbalanceCheckIntervalSecondsDoc) .define(UncleanLeaderElectionEnableProp, BOOLEAN, Defaults.UncleanLeaderElectionEnable, HIGH, UncleanLeaderElectionEnableDoc) .define(InterBrokerSecurityProtocolProp, STRING, Defaults.InterBrokerSecurityProtocol, MEDIUM, InterBrokerSecurityProtocolDoc) .define(InterBrokerProtocolVersionProp, STRING, Defaults.InterBrokerProtocolVersion, MEDIUM, InterBrokerProtocolVersionDoc) /** ********* Controlled shutdown configuration ***********/ .define(ControlledShutdownMaxRetriesProp, INT, Defaults.ControlledShutdownMaxRetries, MEDIUM, ControlledShutdownMaxRetriesDoc) .define(ControlledShutdownRetryBackoffMsProp, LONG, Defaults.ControlledShutdownRetryBackoffMs, MEDIUM, ControlledShutdownRetryBackoffMsDoc) .define(ControlledShutdownEnableProp, BOOLEAN, Defaults.ControlledShutdownEnable, MEDIUM, ControlledShutdownEnableDoc) /** ********* Group coordinator configuration ***********/ .define(GroupMinSessionTimeoutMsProp, INT, Defaults.GroupMinSessionTimeoutMs, MEDIUM, GroupMinSessionTimeoutMsDoc) .define(GroupMaxSessionTimeoutMsProp, INT, Defaults.GroupMaxSessionTimeoutMs, MEDIUM, GroupMaxSessionTimeoutMsDoc) /** ********* Offset management configuration ***********/ .define(OffsetMetadataMaxSizeProp, INT, Defaults.OffsetMetadataMaxSize, HIGH, OffsetMetadataMaxSizeDoc) .define(OffsetsLoadBufferSizeProp, INT, Defaults.OffsetsLoadBufferSize, atLeast(1), HIGH, OffsetsLoadBufferSizeDoc) .define(OffsetsTopicReplicationFactorProp, SHORT, Defaults.OffsetsTopicReplicationFactor, atLeast(1), HIGH, OffsetsTopicReplicationFactorDoc) .define(OffsetsTopicPartitionsProp, INT, Defaults.OffsetsTopicPartitions, atLeast(1), HIGH, OffsetsTopicPartitionsDoc) .define(OffsetsTopicSegmentBytesProp, INT, Defaults.OffsetsTopicSegmentBytes, atLeast(1), HIGH, OffsetsTopicSegmentBytesDoc) .define(OffsetsTopicCompressionCodecProp, INT, Defaults.OffsetsTopicCompressionCodec, HIGH, OffsetsTopicCompressionCodecDoc) .define(OffsetsRetentionMinutesProp, INT, Defaults.OffsetsRetentionMinutes, atLeast(1), HIGH, OffsetsRetentionMinutesDoc) .define(OffsetsRetentionCheckIntervalMsProp, LONG, Defaults.OffsetsRetentionCheckIntervalMs, atLeast(1), HIGH, OffsetsRetentionCheckIntervalMsDoc) .define(OffsetCommitTimeoutMsProp, INT, Defaults.OffsetCommitTimeoutMs, atLeast(1), HIGH, OffsetCommitTimeoutMsDoc) .define(OffsetCommitRequiredAcksProp, SHORT, Defaults.OffsetCommitRequiredAcks, HIGH, OffsetCommitRequiredAcksDoc) .define(DeleteTopicEnableProp, BOOLEAN, Defaults.DeleteTopicEnable, HIGH, DeleteTopicEnableDoc) .define(CompressionTypeProp, STRING, Defaults.CompressionType, HIGH, CompressionTypeDoc) /** ********* Kafka Metrics Configuration ***********/ .define(MetricNumSamplesProp, INT, Defaults.MetricNumSamples, atLeast(1), LOW, MetricNumSamplesDoc) .define(MetricSampleWindowMsProp, LONG, Defaults.MetricSampleWindowMs, atLeast(1), LOW, MetricSampleWindowMsDoc) .define(MetricReporterClassesProp, LIST, Defaults.MetricReporterClasses, LOW, MetricReporterClassesDoc) /** ********* Quota configuration ***********/ .define(ProducerQuotaBytesPerSecondDefaultProp, LONG, Defaults.ProducerQuotaBytesPerSecondDefault, atLeast(1), HIGH, ProducerQuotaBytesPerSecondDefaultDoc) .define(ConsumerQuotaBytesPerSecondDefaultProp, LONG, Defaults.ConsumerQuotaBytesPerSecondDefault, atLeast(1), HIGH, ConsumerQuotaBytesPerSecondDefaultDoc) .define(NumQuotaSamplesProp, INT, Defaults.NumQuotaSamples, atLeast(1), LOW, NumQuotaSamplesDoc) .define(NumReplicationQuotaSamplesProp, INT, Defaults.NumReplicationQuotaSamples, atLeast(1), LOW, NumReplicationQuotaSamplesDoc) .define(QuotaWindowSizeSecondsProp, INT, Defaults.QuotaWindowSizeSeconds, atLeast(1), LOW, QuotaWindowSizeSecondsDoc) .define(ReplicationQuotaWindowSizeSecondsProp, INT, Defaults.ReplicationQuotaWindowSizeSeconds, atLeast(1), LOW, ReplicationQuotaWindowSizeSecondsDoc) /** ********* SSL Configuration ****************/ .define(PrincipalBuilderClassProp, CLASS, Defaults.PrincipalBuilderClass, MEDIUM, PrincipalBuilderClassDoc) .define(SslProtocolProp, STRING, Defaults.SslProtocol, MEDIUM, SslProtocolDoc) .define(SslProviderProp, STRING, null, MEDIUM, SslProviderDoc) .define(SslEnabledProtocolsProp, LIST, Defaults.SslEnabledProtocols, MEDIUM, SslEnabledProtocolsDoc) .define(SslKeystoreTypeProp, STRING, Defaults.SslKeystoreType, MEDIUM, SslKeystoreTypeDoc) .define(SslKeystoreLocationProp, STRING, null, MEDIUM, SslKeystoreLocationDoc) .define(SslKeystorePasswordProp, PASSWORD, null, MEDIUM, SslKeystorePasswordDoc) .define(SslKeyPasswordProp, PASSWORD, null, MEDIUM, SslKeyPasswordDoc) .define(SslTruststoreTypeProp, STRING, Defaults.SslTruststoreType, MEDIUM, SslTruststoreTypeDoc) .define(SslTruststoreLocationProp, STRING, null, MEDIUM, SslTruststoreLocationDoc) .define(SslTruststorePasswordProp, PASSWORD, null, MEDIUM, SslTruststorePasswordDoc) .define(SslKeyManagerAlgorithmProp, STRING, Defaults.SslKeyManagerAlgorithm, MEDIUM, SslKeyManagerAlgorithmDoc) .define(SslTrustManagerAlgorithmProp, STRING, Defaults.SslTrustManagerAlgorithm, MEDIUM, SslTrustManagerAlgorithmDoc) .define(SslEndpointIdentificationAlgorithmProp, STRING, null, LOW, SslEndpointIdentificationAlgorithmDoc) .define(SslSecureRandomImplementationProp, STRING, null, LOW, SslSecureRandomImplementationDoc) .define(SslClientAuthProp, STRING, Defaults.SslClientAuth, in(Defaults.SslClientAuthRequired, Defaults.SslClientAuthRequested, Defaults.SslClientAuthNone), MEDIUM, SslClientAuthDoc) .define(SslCipherSuitesProp, LIST, null, MEDIUM, SslCipherSuitesDoc) /** ********* Sasl Configuration ****************/ .define(SaslMechanismInterBrokerProtocolProp, STRING, Defaults.SaslMechanismInterBrokerProtocol, MEDIUM, SaslMechanismInterBrokerProtocolDoc) .define(SaslEnabledMechanismsProp, LIST, Defaults.SaslEnabledMechanisms, MEDIUM, SaslEnabledMechanismsDoc) .define(SaslKerberosServiceNameProp, STRING, null, MEDIUM, SaslKerberosServiceNameDoc) .define(SaslKerberosKinitCmdProp, STRING, Defaults.SaslKerberosKinitCmd, MEDIUM, SaslKerberosKinitCmdDoc) .define(SaslKerberosTicketRenewWindowFactorProp, DOUBLE, Defaults.SaslKerberosTicketRenewWindowFactor, MEDIUM, SaslKerberosTicketRenewWindowFactorDoc) .define(SaslKerberosTicketRenewJitterProp, DOUBLE, Defaults.SaslKerberosTicketRenewJitter, MEDIUM, SaslKerberosTicketRenewJitterDoc) .define(SaslKerberosMinTimeBeforeReloginProp, LONG, Defaults.SaslKerberosMinTimeBeforeRelogin, MEDIUM, SaslKerberosMinTimeBeforeReloginDoc) .define(SaslKerberosPrincipalToLocalRulesProp, LIST, Defaults.SaslKerberosPrincipalToLocalRules, MEDIUM, SaslKerberosPrincipalToLocalRulesDoc) } def configNames() = configDef.names().asScala.toList.sorted def fromProps(props: Properties): KafkaConfig = fromProps(props, true) def fromProps(props: Properties, doLog: Boolean): KafkaConfig = new KafkaConfig(props, doLog) def fromProps(defaults: Properties, overrides: Properties): KafkaConfig = fromProps(defaults, overrides, true) def fromProps(defaults: Properties, overrides: Properties, doLog: Boolean): KafkaConfig = { val props = new Properties() props.putAll(defaults) props.putAll(overrides) fromProps(props, doLog) } def apply(props: java.util.Map[_, _]): KafkaConfig = new KafkaConfig(props, true) } class KafkaConfig(val props: java.util.Map[_, _], doLog: Boolean) extends AbstractConfig(KafkaConfig.configDef, props, doLog) { def this(props: java.util.Map[_, _]) = this(props, true) /** ********* Zookeeper Configuration ***********/ val zkConnect: String = getString(KafkaConfig.ZkConnectProp) val zkSessionTimeoutMs: Int = getInt(KafkaConfig.ZkSessionTimeoutMsProp) val zkConnectionTimeoutMs: Int = Option(getInt(KafkaConfig.ZkConnectionTimeoutMsProp)).map(_.toInt).getOrElse(getInt(KafkaConfig.ZkSessionTimeoutMsProp)) val zkSyncTimeMs: Int = getInt(KafkaConfig.ZkSyncTimeMsProp) val zkEnableSecureAcls: Boolean = getBoolean(KafkaConfig.ZkEnableSecureAclsProp) /** ********* General Configuration ***********/ val brokerIdGenerationEnable: Boolean = getBoolean(KafkaConfig.BrokerIdGenerationEnableProp) val maxReservedBrokerId: Int = getInt(KafkaConfig.MaxReservedBrokerIdProp) var brokerId: Int = getInt(KafkaConfig.BrokerIdProp) val numNetworkThreads = getInt(KafkaConfig.NumNetworkThreadsProp) val backgroundThreads = getInt(KafkaConfig.BackgroundThreadsProp) val queuedMaxRequests = getInt(KafkaConfig.QueuedMaxRequestsProp) val numIoThreads = getInt(KafkaConfig.NumIoThreadsProp) val messageMaxBytes = getInt(KafkaConfig.MessageMaxBytesProp) val requestTimeoutMs = getInt(KafkaConfig.RequestTimeoutMsProp) /************* Authorizer Configuration ***********/ val authorizerClassName: String = getString(KafkaConfig.AuthorizerClassNameProp) /** ********* Socket Server Configuration ***********/ val hostName = getString(KafkaConfig.HostNameProp) val port = getInt(KafkaConfig.PortProp) val advertisedHostName = Option(getString(KafkaConfig.AdvertisedHostNameProp)).getOrElse(hostName) val advertisedPort: java.lang.Integer = Option(getInt(KafkaConfig.AdvertisedPortProp)).getOrElse(port) val socketSendBufferBytes = getInt(KafkaConfig.SocketSendBufferBytesProp) val socketReceiveBufferBytes = getInt(KafkaConfig.SocketReceiveBufferBytesProp) val socketRequestMaxBytes = getInt(KafkaConfig.SocketRequestMaxBytesProp) val maxConnectionsPerIp = getInt(KafkaConfig.MaxConnectionsPerIpProp) val maxConnectionsPerIpOverrides: Map[String, Int] = getMap(KafkaConfig.MaxConnectionsPerIpOverridesProp, getString(KafkaConfig.MaxConnectionsPerIpOverridesProp)).map { case (k, v) => (k, v.toInt)} val connectionsMaxIdleMs = getLong(KafkaConfig.ConnectionsMaxIdleMsProp) /***************** rack configuration **************/ val rack = Option(getString(KafkaConfig.RackProp)) /** ********* Log Configuration ***********/ val autoCreateTopicsEnable = getBoolean(KafkaConfig.AutoCreateTopicsEnableProp) val numPartitions = getInt(KafkaConfig.NumPartitionsProp) val logDirs = CoreUtils.parseCsvList( Option(getString(KafkaConfig.LogDirsProp)).getOrElse(getString(KafkaConfig.LogDirProp))) val logSegmentBytes = getInt(KafkaConfig.LogSegmentBytesProp) val logFlushIntervalMessages = getLong(KafkaConfig.LogFlushIntervalMessagesProp) val logCleanerThreads = getInt(KafkaConfig.LogCleanerThreadsProp) val numRecoveryThreadsPerDataDir = getInt(KafkaConfig.NumRecoveryThreadsPerDataDirProp) val logFlushSchedulerIntervalMs = getLong(KafkaConfig.LogFlushSchedulerIntervalMsProp) val logFlushOffsetCheckpointIntervalMs = getInt(KafkaConfig.LogFlushOffsetCheckpointIntervalMsProp).toLong val logCleanupIntervalMs = getLong(KafkaConfig.LogCleanupIntervalMsProp) val logCleanupPolicy = getList(KafkaConfig.LogCleanupPolicyProp) val offsetsRetentionMinutes = getInt(KafkaConfig.OffsetsRetentionMinutesProp) val offsetsRetentionCheckIntervalMs = getLong(KafkaConfig.OffsetsRetentionCheckIntervalMsProp) val logRetentionBytes = getLong(KafkaConfig.LogRetentionBytesProp) val logCleanerDedupeBufferSize = getLong(KafkaConfig.LogCleanerDedupeBufferSizeProp) val logCleanerDedupeBufferLoadFactor = getDouble(KafkaConfig.LogCleanerDedupeBufferLoadFactorProp) val logCleanerIoBufferSize = getInt(KafkaConfig.LogCleanerIoBufferSizeProp) val logCleanerIoMaxBytesPerSecond = getDouble(KafkaConfig.LogCleanerIoMaxBytesPerSecondProp) val logCleanerDeleteRetentionMs = getLong(KafkaConfig.LogCleanerDeleteRetentionMsProp) val logCleanerMinCompactionLagMs = getLong(KafkaConfig.LogCleanerMinCompactionLagMsProp) val logCleanerBackoffMs = getLong(KafkaConfig.LogCleanerBackoffMsProp) val logCleanerMinCleanRatio = getDouble(KafkaConfig.LogCleanerMinCleanRatioProp) val logCleanerEnable = getBoolean(KafkaConfig.LogCleanerEnableProp) val logIndexSizeMaxBytes = getInt(KafkaConfig.LogIndexSizeMaxBytesProp) val logIndexIntervalBytes = getInt(KafkaConfig.LogIndexIntervalBytesProp) val logDeleteDelayMs = getLong(KafkaConfig.LogDeleteDelayMsProp) val logRollTimeMillis: java.lang.Long = Option(getLong(KafkaConfig.LogRollTimeMillisProp)).getOrElse(60 * 60 * 1000L * getInt(KafkaConfig.LogRollTimeHoursProp)) val logRollTimeJitterMillis: java.lang.Long = Option(getLong(KafkaConfig.LogRollTimeJitterMillisProp)).getOrElse(60 * 60 * 1000L * getInt(KafkaConfig.LogRollTimeJitterHoursProp)) val logFlushIntervalMs: java.lang.Long = Option(getLong(KafkaConfig.LogFlushIntervalMsProp)).getOrElse(getLong(KafkaConfig.LogFlushSchedulerIntervalMsProp)) val logRetentionTimeMillis = getLogRetentionTimeMillis val minInSyncReplicas = getInt(KafkaConfig.MinInSyncReplicasProp) val logPreAllocateEnable: java.lang.Boolean = getBoolean(KafkaConfig.LogPreAllocateProp) // We keep the user-provided String as `ApiVersion.apply` can choose a slightly different version (eg if `0.10.0` // is passed, `0.10.0-IV0` may be picked) val logMessageFormatVersionString = getString(KafkaConfig.LogMessageFormatVersionProp) val logMessageFormatVersion = ApiVersion(logMessageFormatVersionString) val logMessageTimestampType = TimestampType.forName(getString(KafkaConfig.LogMessageTimestampTypeProp)) val logMessageTimestampDifferenceMaxMs = getLong(KafkaConfig.LogMessageTimestampDifferenceMaxMsProp) /** ********* Replication configuration ***********/ val controllerSocketTimeoutMs: Int = getInt(KafkaConfig.ControllerSocketTimeoutMsProp) val defaultReplicationFactor: Int = getInt(KafkaConfig.DefaultReplicationFactorProp) val replicaLagTimeMaxMs = getLong(KafkaConfig.ReplicaLagTimeMaxMsProp) val replicaSocketTimeoutMs = getInt(KafkaConfig.ReplicaSocketTimeoutMsProp) val replicaSocketReceiveBufferBytes = getInt(KafkaConfig.ReplicaSocketReceiveBufferBytesProp) val replicaFetchMaxBytes = getInt(KafkaConfig.ReplicaFetchMaxBytesProp) val replicaFetchWaitMaxMs = getInt(KafkaConfig.ReplicaFetchWaitMaxMsProp) val replicaFetchMinBytes = getInt(KafkaConfig.ReplicaFetchMinBytesProp) val replicaFetchResponseMaxBytes = getInt(KafkaConfig.ReplicaFetchResponseMaxBytesProp) val replicaFetchBackoffMs = getInt(KafkaConfig.ReplicaFetchBackoffMsProp) val numReplicaFetchers = getInt(KafkaConfig.NumReplicaFetchersProp) val replicaHighWatermarkCheckpointIntervalMs = getLong(KafkaConfig.ReplicaHighWatermarkCheckpointIntervalMsProp) val fetchPurgatoryPurgeIntervalRequests = getInt(KafkaConfig.FetchPurgatoryPurgeIntervalRequestsProp) val producerPurgatoryPurgeIntervalRequests = getInt(KafkaConfig.ProducerPurgatoryPurgeIntervalRequestsProp) val autoLeaderRebalanceEnable = getBoolean(KafkaConfig.AutoLeaderRebalanceEnableProp) val leaderImbalancePerBrokerPercentage = getInt(KafkaConfig.LeaderImbalancePerBrokerPercentageProp) val leaderImbalanceCheckIntervalSeconds = getLong(KafkaConfig.LeaderImbalanceCheckIntervalSecondsProp) val uncleanLeaderElectionEnable: java.lang.Boolean = getBoolean(KafkaConfig.UncleanLeaderElectionEnableProp) val interBrokerSecurityProtocol = SecurityProtocol.forName(getString(KafkaConfig.InterBrokerSecurityProtocolProp)) // We keep the user-provided String as `ApiVersion.apply` can choose a slightly different version (eg if `0.10.0` // is passed, `0.10.0-IV0` may be picked) val interBrokerProtocolVersionString = getString(KafkaConfig.InterBrokerProtocolVersionProp) val interBrokerProtocolVersion = ApiVersion(interBrokerProtocolVersionString) /** ********* Controlled shutdown configuration ***********/ val controlledShutdownMaxRetries = getInt(KafkaConfig.ControlledShutdownMaxRetriesProp) val controlledShutdownRetryBackoffMs = getLong(KafkaConfig.ControlledShutdownRetryBackoffMsProp) val controlledShutdownEnable = getBoolean(KafkaConfig.ControlledShutdownEnableProp) /** ********* Group coordinator configuration ***********/ val groupMinSessionTimeoutMs = getInt(KafkaConfig.GroupMinSessionTimeoutMsProp) val groupMaxSessionTimeoutMs = getInt(KafkaConfig.GroupMaxSessionTimeoutMsProp) /** ********* Offset management configuration ***********/ val offsetMetadataMaxSize = getInt(KafkaConfig.OffsetMetadataMaxSizeProp) val offsetsLoadBufferSize = getInt(KafkaConfig.OffsetsLoadBufferSizeProp) val offsetsTopicReplicationFactor = getShort(KafkaConfig.OffsetsTopicReplicationFactorProp) val offsetsTopicPartitions = getInt(KafkaConfig.OffsetsTopicPartitionsProp) val offsetCommitTimeoutMs = getInt(KafkaConfig.OffsetCommitTimeoutMsProp) val offsetCommitRequiredAcks = getShort(KafkaConfig.OffsetCommitRequiredAcksProp) val offsetsTopicSegmentBytes = getInt(KafkaConfig.OffsetsTopicSegmentBytesProp) val offsetsTopicCompressionCodec = Option(getInt(KafkaConfig.OffsetsTopicCompressionCodecProp)).map(value => CompressionCodec.getCompressionCodec(value)).orNull /** ********* Metric Configuration **************/ val metricNumSamples = getInt(KafkaConfig.MetricNumSamplesProp) val metricSampleWindowMs = getLong(KafkaConfig.MetricSampleWindowMsProp) val metricReporterClasses: java.util.List[MetricsReporter] = getConfiguredInstances(KafkaConfig.MetricReporterClassesProp, classOf[MetricsReporter]) /** ********* SSL Configuration **************/ val principalBuilderClass = getClass(KafkaConfig.PrincipalBuilderClassProp) val sslProtocol = getString(KafkaConfig.SslProtocolProp) val sslProvider = getString(KafkaConfig.SslProviderProp) val sslEnabledProtocols = getList(KafkaConfig.SslEnabledProtocolsProp) val sslKeystoreType = getString(KafkaConfig.SslKeystoreTypeProp) val sslKeystoreLocation = getString(KafkaConfig.SslKeystoreLocationProp) val sslKeystorePassword = getPassword(KafkaConfig.SslKeystorePasswordProp) val sslKeyPassword = getPassword(KafkaConfig.SslKeyPasswordProp) val sslTruststoreType = getString(KafkaConfig.SslTruststoreTypeProp) val sslTruststoreLocation = getString(KafkaConfig.SslTruststoreLocationProp) val sslTruststorePassword = getPassword(KafkaConfig.SslTruststorePasswordProp) val sslKeyManagerAlgorithm = getString(KafkaConfig.SslKeyManagerAlgorithmProp) val sslTrustManagerAlgorithm = getString(KafkaConfig.SslTrustManagerAlgorithmProp) val sslClientAuth = getString(KafkaConfig.SslClientAuthProp) val sslCipher = getList(KafkaConfig.SslCipherSuitesProp) /** ********* Sasl Configuration **************/ val saslMechanismInterBrokerProtocol = getString(KafkaConfig.SaslMechanismInterBrokerProtocolProp) val saslEnabledMechanisms = getList(KafkaConfig.SaslEnabledMechanismsProp) val saslKerberosServiceName = getString(KafkaConfig.SaslKerberosServiceNameProp) val saslKerberosKinitCmd = getString(KafkaConfig.SaslKerberosKinitCmdProp) val saslKerberosTicketRenewWindowFactor = getDouble(KafkaConfig.SaslKerberosTicketRenewWindowFactorProp) val saslKerberosTicketRenewJitter = getDouble(KafkaConfig.SaslKerberosTicketRenewJitterProp) val saslKerberosMinTimeBeforeRelogin = getLong(KafkaConfig.SaslKerberosMinTimeBeforeReloginProp) val saslKerberosPrincipalToLocalRules = getList(KafkaConfig.SaslKerberosPrincipalToLocalRulesProp) val saslInterBrokerHandshakeRequestEnable = interBrokerProtocolVersion >= KAFKA_0_10_0_IV1 /** ********* Quota Configuration **************/ val producerQuotaBytesPerSecondDefault = getLong(KafkaConfig.ProducerQuotaBytesPerSecondDefaultProp) val consumerQuotaBytesPerSecondDefault = getLong(KafkaConfig.ConsumerQuotaBytesPerSecondDefaultProp) val numQuotaSamples = getInt(KafkaConfig.NumQuotaSamplesProp) val quotaWindowSizeSeconds = getInt(KafkaConfig.QuotaWindowSizeSecondsProp) val numReplicationQuotaSamples = getInt(KafkaConfig.NumReplicationQuotaSamplesProp) val replicationQuotaWindowSizeSeconds = getInt(KafkaConfig.ReplicationQuotaWindowSizeSecondsProp) val deleteTopicEnable = getBoolean(KafkaConfig.DeleteTopicEnableProp) val compressionType = getString(KafkaConfig.CompressionTypeProp) val listeners = getListeners val advertisedListeners = getAdvertisedListeners private def getLogRetentionTimeMillis: Long = { val millisInMinute = 60L * 1000L val millisInHour = 60L * millisInMinute val millis: java.lang.Long = Option(getLong(KafkaConfig.LogRetentionTimeMillisProp)).getOrElse( Option(getInt(KafkaConfig.LogRetentionTimeMinutesProp)) match { case Some(mins) => millisInMinute * mins case None => getInt(KafkaConfig.LogRetentionTimeHoursProp) * millisInHour }) if (millis < 0) return -1 millis } private def getMap(propName: String, propValue: String): Map[String, String] = { try { CoreUtils.parseCsvMap(propValue) } catch { case e: Exception => throw new IllegalArgumentException("Error parsing configuration property '%s': %s".format(propName, e.getMessage)) } } private def validateUniquePortAndProtocol(listeners: String) { val endpoints = try { val listenerList = CoreUtils.parseCsvList(listeners) listenerList.map(listener => EndPoint.createEndPoint(listener)) } catch { case e: Exception => throw new IllegalArgumentException("Error creating broker listeners from '%s': %s".format(listeners, e.getMessage)) } // filter port 0 for unit tests val endpointsWithoutZeroPort = endpoints.map(ep => ep.port).filter(_ != 0) val distinctPorts = endpointsWithoutZeroPort.distinct val distinctProtocols = endpoints.map(ep => ep.protocolType).distinct require(distinctPorts.size == endpointsWithoutZeroPort.size, "Each listener must have a different port") require(distinctProtocols.size == endpoints.size, "Each listener must have a different protocol") } // If the user did not define listeners but did define host or port, let's use them in backward compatible way // If none of those are defined, we default to PLAINTEXT://:9092 private def getListeners(): immutable.Map[SecurityProtocol, EndPoint] = { if (getString(KafkaConfig.ListenersProp) != null) { validateUniquePortAndProtocol(getString(KafkaConfig.ListenersProp)) CoreUtils.listenerListToEndPoints(getString(KafkaConfig.ListenersProp)) } else { CoreUtils.listenerListToEndPoints("PLAINTEXT://" + hostName + ":" + port) } } // If the user defined advertised listeners, we use those // If he didn't but did define advertised host or port, we'll use those and fill in the missing value from regular host / port or defaults // If none of these are defined, we'll use the listeners private def getAdvertisedListeners(): immutable.Map[SecurityProtocol, EndPoint] = { if (getString(KafkaConfig.AdvertisedListenersProp) != null) { validateUniquePortAndProtocol(getString(KafkaConfig.AdvertisedListenersProp)) CoreUtils.listenerListToEndPoints(getString(KafkaConfig.AdvertisedListenersProp)) } else if (getString(KafkaConfig.AdvertisedHostNameProp) != null || getInt(KafkaConfig.AdvertisedPortProp) != null) { CoreUtils.listenerListToEndPoints("PLAINTEXT://" + advertisedHostName + ":" + advertisedPort) } else { getListeners() } } validateValues() private def validateValues() { if(brokerIdGenerationEnable) { require(brokerId >= -1 && brokerId <= maxReservedBrokerId, "broker.id must be equal or greater than -1 and not greater than reserved.broker.max.id") } else { require(brokerId >= 0, "broker.id must be equal or greater than 0") } require(logRollTimeMillis >= 1, "log.roll.ms must be equal or greater than 1") require(logRollTimeJitterMillis >= 0, "log.roll.jitter.ms must be equal or greater than 0") require(logRetentionTimeMillis >= 1 || logRetentionTimeMillis == -1, "log.retention.ms must be unlimited (-1) or, equal or greater than 1") require(logDirs.nonEmpty) require(logCleanerDedupeBufferSize / logCleanerThreads > 1024 * 1024, "log.cleaner.dedupe.buffer.size must be at least 1MB per cleaner thread.") require(replicaFetchWaitMaxMs <= replicaSocketTimeoutMs, "replica.socket.timeout.ms should always be at least replica.fetch.wait.max.ms" + " to prevent unnecessary socket timeouts") require(replicaFetchWaitMaxMs <= replicaLagTimeMaxMs, "replica.fetch.wait.max.ms should always be at least replica.lag.time.max.ms" + " to prevent frequent changes in ISR") require(offsetCommitRequiredAcks >= -1 && offsetCommitRequiredAcks <= offsetsTopicReplicationFactor, "offsets.commit.required.acks must be greater or equal -1 and less or equal to offsets.topic.replication.factor") require(BrokerCompressionCodec.isValid(compressionType), "compression.type : " + compressionType + " is not valid." + " Valid options are " + BrokerCompressionCodec.brokerCompressionOptions.mkString(",")) require(advertisedListeners.keySet.contains(interBrokerSecurityProtocol), s"${KafkaConfig.InterBrokerSecurityProtocolProp} must be a protocol in the configured set of ${KafkaConfig.AdvertisedListenersProp}. " + s"The valid options based on currently configured protocols are ${advertisedListeners.keySet}") require(advertisedListeners.keySet.subsetOf(listeners.keySet), s"${KafkaConfig.AdvertisedListenersProp} protocols must be equal to or a subset of ${KafkaConfig.ListenersProp} protocols. " + s"Found ${advertisedListeners.keySet}. The valid options based on currently configured protocols are ${listeners.keySet}" ) require(interBrokerProtocolVersion >= logMessageFormatVersion, s"log.message.format.version $logMessageFormatVersionString cannot be used when inter.broker.protocol.version is set to $interBrokerProtocolVersionString") val interBrokerUsesSasl = interBrokerSecurityProtocol == SecurityProtocol.SASL_PLAINTEXT || interBrokerSecurityProtocol == SecurityProtocol.SASL_SSL require(!interBrokerUsesSasl || saslInterBrokerHandshakeRequestEnable || saslMechanismInterBrokerProtocol == SaslConfigs.GSSAPI_MECHANISM, s"Only GSSAPI mechanism is supported for inter-broker communication with SASL when inter.broker.protocol.version is set to $interBrokerProtocolVersionString") require(!interBrokerUsesSasl || saslEnabledMechanisms.contains(saslMechanismInterBrokerProtocol), s"${KafkaConfig.SaslMechanismInterBrokerProtocolProp} must be included in ${KafkaConfig.SaslEnabledMechanismsProp} when SASL is used for inter-broker communication") } }
eribeiro/kafka
core/src/main/scala/kafka/server/KafkaConfig.scala
Scala
apache-2.0
76,964
/* Copyright 2009-2015 - Big Data Technologies S.R.L. All Rights Reserved. */ package org.widok.moment import scala.scalajs.js trait Setters[T] extends js.Object { def add(time: Double, unit: String): T = js.native def add(millis: Int): T = js.native def add(duration: Duration): T = js.native def subtract(time: Double, unit: String): T = js.native def subtract(millis: Int): T = js.native def subtract(duration: Duration): T = js.native }
aparo/scalajs-supler
supler/js/src/main/org/widok/moment/Setters.scala
Scala
apache-2.0
457
package com.cloudera.sa.examples.tablestats.model import scala.collection.mutable /** * Created by ted.malaska on 6/29/15. */ class FirstPassStatsModel extends Serializable { var columnStatsMap = new mutable.HashMap[Integer, ColumnStats] def +=(colIndex: Int, colValue: Any, colCount: Long): Unit = { columnStatsMap.getOrElseUpdate(colIndex, new ColumnStats) += (colValue, colCount) } def +=(firstPassStatsModel: FirstPassStatsModel): Unit = { firstPassStatsModel.columnStatsMap.foreach{ e => val columnStats = columnStatsMap.getOrElse(e._1, null) if (columnStats != null) { columnStats += (e._2) } else { columnStatsMap += ((e._1, e._2)) } } } override def toString = s"FirstPassStatsModel(columnStatsMap=$columnStatsMap)" }
tmalaska/Spark.TableStatsExample
src/main/scala/com/cloudera/sa/examples/tablestats/model/FirstPassStatsModel.scala
Scala
apache-2.0
797
package doodle package js import doodle.core._ import doodle.core.transform.Transform /** Utilities for working with SVG */ object Svg { def toHSLA(color: Color): String = { val (h, s, l, a) = (color.hue, color.saturation, color.lightness, color.alpha) s"hsla(${h.toDegrees}, ${s.toPercentage}, ${l.toPercentage}, ${a.get})" } def toStyle(dc: DrawingContext): String = { import scala.collection.mutable.StringBuilder val builder = new StringBuilder(64) dc.stroke.fold(builder ++= "stroke: none; ") { case Stroke(width, color, cap, join) => val linecap = cap match { case Line.Cap.Butt => "butt" case Line.Cap.Round => "round" case Line.Cap.Square => "square" } val linejoin = join match { case Line.Join.Bevel => "bevel" case Line.Join.Round => "round" case Line.Join.Miter => "miter" } builder ++= s"stroke-width: ${width}px; " builder ++= s"stroke: ${toHSLA(color)};" builder ++= s"stroke-linecap: ${linecap}; " builder ++= s"stroke-linejoin: ${linejoin}; " } dc.fill.fold(builder ++= "fill: none; ") { case Fill(color) => builder ++= s"fill: ${toHSLA(color)}; " } builder.toString } def toSvgPath(elts: List[PathElement]): String = { import PathElement._ import scala.collection.mutable.StringBuilder val builder = new StringBuilder(64) elts.foreach { case MoveTo(end) => builder ++= s"M ${end.x} ${end.y} " case LineTo(end) => builder ++= s"L ${end.x} ${end.y} " case BezierCurveTo(cp1, cp2, end) => builder ++= s"C ${cp1.x} ${cp1.y}, ${cp2.x} ${cp2.y}, ${end.x} ${end.y} " } builder.toString } def toSvgTransform(tx: Transform): String = { val elt = tx.elements val a = elt(0) val b = elt(3) val c = elt(1) val d = elt(4) val e = elt(2) val f = elt(5) s"matrix($a,$b,$c,$d,$e,$f)" } }
Angeldude/doodle
js/src/main/scala/doodle/js/Svg.scala
Scala
apache-2.0
1,994
// scalac: -Xfatal-warnings // object Test { val f1: 1 = f1 // warning: recursive }
lrytz/scala
test/files/neg/sip23-uninitialized-1.scala
Scala
apache-2.0
88
package teststate.data final case class BeforeAfter[+A](before: A, after: A) { def map[B](f: A => B): BeforeAfter[B] = BeforeAfter(f(before), f(after)) def emap[E, B](f: A => E Or B): E Or BeforeAfter[B] = for { b <- f(before) a <- f(after) } yield BeforeAfter(b, a) } object BeforeAfter { def same[A](a: A): BeforeAfter[A] = BeforeAfter(a, a) }
japgolly/test-state
core/shared/src/main/scala/teststate/data/BeforeAfter.scala
Scala
apache-2.0
382
package sri.web.router import sri.core._ trait PathUtils { def createStaticPath(path: String) = s"/${path.removeForwardSlashes}" def createDynamicPath(path: String) = s"/${path.removeForwardSlashes}/" def createStaticModulePath(basename: String, path: String) = if(path.nonEmpty && path != FORWARD_SLASH) s"/${basename.removeForwardSlashes}/${path.removeForwardSlashes}" else s"/${basename.removeForwardSlashes}" def createDynamicModulePath(basename: String, path: String) = if(path.removeForwardSlashes.nonEmpty) s"/${basename.removeForwardSlashes}/${path.removeForwardSlashes}/" else s"/${basename.removeForwardSlashes}/" }
chandu0101/sri
web/src/main/scala/sri/web/router/PathUtils.scala
Scala
apache-2.0
641