code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
trait Foo[T]
trait FooSub[T] extends Foo[T] {
type Super = Foo[T]
}
object FooSub {
implicit def fooSub[T: Bar]: FooSub[T] = new FooSub[T] {}
}
trait Bar[T]
class Quux
object Quux {
implicit val barQuux: Bar[Quux] = new Bar[Quux] {}
val fooSubQuux = implicitly[FooSub[Quux]]
implicit val fooQuux: fooSubQuux.Super = fooSubQuux
}
object Test extends App {
implicitly[Foo[Quux]]
}
| lrytz/scala | test/files/run/implicit-caching.scala | Scala | apache-2.0 | 398 |
package aims.core
import akka.http.model.{ HttpMethod, HttpMethods, MediaType, MediaTypes }
import scala.annotation.StaticAnnotation
/**
* Component:
* Description:
* Date: 2014/12/23
* @author Andy Ai
*/
object Annotations {
final case class pattern(pattern: String, method: HttpMethod = HttpMethods.GET) extends StaticAnnotation
final case class mediaTypes(
consumers: List[MediaType] = List(MediaTypes.`application/json`),
producers: List[MediaType] = List(MediaTypes.`application/json`)) extends StaticAnnotation
}
| aiyanbo/aims | aims-core/src/main/scala/aims/core/Annotations.scala | Scala | mit | 542 |
package com.tpalanga.test.config
import com.tpalanga.testlib.test.config.RestServiceConfig
import com.typesafe.config.Config
object TestConfig {
def apply(config: Config): TestConfig =
new TestConfig(
restServiceConfig = RestServiceConfig(config.getConfig("remote.service"))
)
}
case class TestConfig(restServiceConfig: RestServiceConfig)
| tpalanga/akka-http-microservice | newsletterServiceTest/src/test/scala/com/tpalanga/test/config/TestConfig.scala | Scala | unlicense | 358 |
final class PulseOx extends ICE_MDS {
// Metadata...
override val vmds : Map[String, ICE_VMD] = Map(
"pulseox" -> new ICE_PulseOx_VMD {
override val status : ICE_VMD_Status = new ICE_VMD_Status {
override val exchange : ICE_Get_Exchange = new GetExchange {}
}
override val channels : Map[String, ICE_Channel] = Map(
"spo2" -> new ICE_SpO2_Channel {
override val metrics : Map[String, ICE_Metric] = Map(
"spo2_num" -> new ICE_SpO2_Numeric {
override val exchanges : Map[String, ICE_Data_Exchange] = Map(
"periodic" -> new PeriodicExchange {}
)
}
)
override val alerts : Map[String, ICE_Alert] = Map()
override val settings : Map[String, ICE_Setting] = Map()
override val statuses : Map[String, ICE_Status] = Map()
}
)
}
)
}
| scbarrett/aadl-translator | mdcf-architect-documentation/src/site/sphinx/dml-build/language/PulseOx.scala | Scala | epl-1.0 | 907 |
package me.yingrui.segment.filter
import me.yingrui.segment.conf.SegmentConfiguration
import me.yingrui.segment.core.SegmentResult
import me.yingrui.segment.dict.POSUtil
import me.yingrui.segment.filter.ner.HmmNameFilter
import org.junit.{Assert, Test}
class HmmNameFilterTest {
var filter = HmmNameFilter(SegmentConfiguration(Map("separate.xingming" -> "true")))
@Test
def should_recognize_xing_with_prefix {
val segmentResult = new SegmentResult(6)
segmentResult.setWords(List[String]("老", "张", "来", "到", "了", "社区").toArray)
segmentResult.setPOSArray(List[Int](POSUtil.POS_N, POSUtil.POS_N, POSUtil.POS_M, POSUtil.POS_N, POSUtil.POS_U, POSUtil.POS_N).toArray)
segmentResult.setDomainTypes(List[Int](0, 0, 0, 0, 0, 0).toArray)
segmentResult.setConcepts(List[String]("N/A", "N/A", "N/A", "N/A", "N/A", "N/A").toArray)
filter.setSegmentResult(segmentResult)
filter.filtering()
Assert.assertEquals("老张", segmentResult.getWord(0))
Assert.assertEquals(POSUtil.POS_NR, segmentResult.getPOS(0))
}
@Test
def should_recognize_xing_with_suffix {
val segmentResult = new SegmentResult(6)
segmentResult.setWords(List[String]("邱", "老", "来", "到", "了", "社区").toArray)
segmentResult.setPOSArray(List[Int](POSUtil.POS_N, POSUtil.POS_N, POSUtil.POS_M, POSUtil.POS_N, POSUtil.POS_U, POSUtil.POS_N).toArray)
segmentResult.setDomainTypes(List[Int](0, 0, 0, 0, 0, 0).toArray)
segmentResult.setConcepts(List[String]("N/A", "N/A", "N/A", "N/A", "N/A", "N/A").toArray)
filter.setSegmentResult(segmentResult)
filter.filtering()
Assert.assertEquals("邱老", segmentResult.getWord(0))
Assert.assertEquals(POSUtil.POS_NR, segmentResult.getPOS(0))
}
@Test
def should_recognize_xing_with_double_name {
val segmentResult = new SegmentResult(6)
segmentResult.setWords(List[String]("张", "三", "丰", "到", "了", "社区").toArray)
segmentResult.setPOSArray(List[Int](POSUtil.POS_N, POSUtil.POS_N, POSUtil.POS_M, POSUtil.POS_N, POSUtil.POS_U, POSUtil.POS_N).toArray)
segmentResult.setDomainTypes(List[Int](0, 0, 0, 0, 0, 0).toArray)
segmentResult.setConcepts(List[String]("N/A", "N/A", "N/A", "N/A", "N/A", "N/A").toArray)
filter.setSegmentResult(segmentResult)
filter.filtering()
Assert.assertEquals("张", segmentResult.getWord(0))
Assert.assertEquals(POSUtil.POS_NR, segmentResult.getPOS(0))
Assert.assertEquals("三丰", segmentResult.getWord(1))
Assert.assertEquals(POSUtil.POS_NR, segmentResult.getPOS(1))
}
@Test
def should_recognize_xing_with_double_character_word_as_name {
val segmentResult = new SegmentResult(6)
segmentResult.setWords(List[String]("张", "朝阳", "来", "到", "了", "社区").toArray)
segmentResult.setPOSArray(List[Int](POSUtil.POS_N, POSUtil.POS_N, POSUtil.POS_M, POSUtil.POS_N, POSUtil.POS_U, POSUtil.POS_N).toArray)
segmentResult.setDomainTypes(List[Int](0, 0, 0, 0, 0, 0).toArray)
segmentResult.setConcepts(List[String]("N/A", "N/A", "N/A", "N/A", "N/A", "N/A").toArray)
filter.setSegmentResult(segmentResult)
filter.filtering()
Assert.assertEquals("张", segmentResult.getWord(0))
Assert.assertEquals(POSUtil.POS_NR, segmentResult.getPOS(0))
Assert.assertEquals("朝阳", segmentResult.getWord(1))
Assert.assertEquals(POSUtil.POS_NR, segmentResult.getPOS(1))
}
@Test
def should_recognize_xing_with_single_name {
val segmentResult = new SegmentResult(6)
segmentResult.setWords(List[String]("李", "鹏", "总理", "到", "了", "社区").toArray)
segmentResult.setPOSArray(List[Int](POSUtil.POS_N, POSUtil.POS_N, POSUtil.POS_M, POSUtil.POS_N, POSUtil.POS_U, POSUtil.POS_N).toArray)
segmentResult.setDomainTypes(List[Int](0, 0, 0, 0, 0, 0).toArray)
segmentResult.setConcepts(List[String]("N/A", "N/A", "N/A", "N/A", "N/A", "N/A").toArray)
filter.setSegmentResult(segmentResult)
filter.filtering()
Assert.assertEquals("李", segmentResult.getWord(0))
Assert.assertEquals(POSUtil.POS_NR, segmentResult.getPOS(0))
Assert.assertEquals("鹏", segmentResult.getWord(1))
Assert.assertEquals(POSUtil.POS_NR, segmentResult.getPOS(1))
}
@Test
def should_recognize_word_contains_xing_and_name {
val segmentResult = new SegmentResult(6)
segmentResult.setWords(List[String]("王国", "维", "来", "到", "了", "社区").toArray)
segmentResult.setPOSArray(List[Int](POSUtil.POS_N, POSUtil.POS_N, POSUtil.POS_M, POSUtil.POS_N, POSUtil.POS_U, POSUtil.POS_N).toArray)
segmentResult.setDomainTypes(List[Int](0, 0, 0, 0, 0, 0).toArray)
segmentResult.setConcepts(List[String]("N/A", "N/A", "N/A", "N/A", "N/A", "N/A").toArray)
filter.setSegmentResult(segmentResult)
filter.filtering()
Assert.assertEquals("王", segmentResult.getWord(0))
Assert.assertEquals(POSUtil.POS_NR, segmentResult.getPOS(0))
Assert.assertEquals("国维", segmentResult.getWord(1))
Assert.assertEquals(POSUtil.POS_NR, segmentResult.getPOS(1))
}
@Test
def should_recognize_normal_word_composite_of_xing_and_name {
val segmentResult = new SegmentResult(5)
segmentResult.setWords(List[String]("主席", "汪洋", "发表", "了", "社区").toArray)
segmentResult.setPOSArray(List[Int](POSUtil.POS_N, POSUtil.POS_M, POSUtil.POS_N, POSUtil.POS_U, POSUtil.POS_N).toArray)
segmentResult.setDomainTypes(List[Int](0, 0, 0, 0, 0).toArray)
segmentResult.setConcepts(List[String]("N/A", "N/A", "N/A", "N/A", "N/A").toArray)
filter.setSegmentResult(segmentResult)
filter.filtering()
Assert.assertEquals("汪", segmentResult.getWord(1))
Assert.assertEquals(POSUtil.POS_NR, segmentResult.getPOS(1))
Assert.assertEquals("洋", segmentResult.getWord(2))
Assert.assertEquals(POSUtil.POS_NR, segmentResult.getPOS(2))
}
@Test
def should_recognize_last_word_overlap_with_xing {
val segmentResult = new SegmentResult(5)
segmentResult.setWords(List[String]("昨天", "同江", "泽民", "主席", "进行").toArray)
segmentResult.setPOSArray(List[Int](POSUtil.POS_N, POSUtil.POS_M, POSUtil.POS_N, POSUtil.POS_U, POSUtil.POS_N).toArray)
segmentResult.setDomainTypes(List[Int](0, 0, 0, 0, 0).toArray)
segmentResult.setConcepts(List[String]("N/A", "N/A", "N/A", "N/A", "N/A").toArray)
filter.setSegmentResult(segmentResult)
filter.filtering()
Assert.assertEquals("同", segmentResult.getWord(1))
Assert.assertEquals("江", segmentResult.getWord(2))
Assert.assertEquals(POSUtil.POS_NR, segmentResult.getPOS(2))
}
@Test
def should_recognize_last_word_overlap_with_xing_and_have_double_names {
val segmentResult = new SegmentResult(5)
segmentResult.setWords(List[String]("对白", "晓", "燕", "绑架", "案").toArray)
segmentResult.setPOSArray(List[Int](POSUtil.POS_N, POSUtil.POS_M, POSUtil.POS_N, POSUtil.POS_U, POSUtil.POS_N).toArray)
segmentResult.setDomainTypes(List[Int](0, 0, 0, 0, 0).toArray)
segmentResult.setConcepts(List[String]("N/A", "N/A", "N/A", "N/A", "N/A").toArray)
filter.setSegmentResult(segmentResult)
filter.filtering()
Assert.assertEquals("白", segmentResult.getWord(1))
Assert.assertEquals("晓燕", segmentResult.getWord(2))
Assert.assertEquals(POSUtil.POS_NR, segmentResult.getPOS(1))
Assert.assertEquals(POSUtil.POS_NR, segmentResult.getPOS(2))
}
@Test
def should_recognize_overlap_with_next_word {
val segmentResult = new SegmentResult(5)
segmentResult.setWords(List[String]("石", "宝", "良家", "的", "炕上").toArray)
segmentResult.setPOSArray(List[Int](POSUtil.POS_N, POSUtil.POS_M, POSUtil.POS_N, POSUtil.POS_U, POSUtil.POS_N).toArray)
segmentResult.setDomainTypes(List[Int](0, 0, 0, 0, 0).toArray)
segmentResult.setConcepts(List[String]("N/A", "N/A", "N/A", "N/A", "N/A").toArray)
filter.setSegmentResult(segmentResult)
filter.filtering()
Assert.assertEquals("石", segmentResult.getWord(0))
Assert.assertEquals("宝良", segmentResult.getWord(1))
Assert.assertEquals("家", segmentResult.getWord(2))
Assert.assertEquals(POSUtil.POS_NR, segmentResult.getPOS(0))
Assert.assertEquals(POSUtil.POS_NR, segmentResult.getPOS(1))
}
@Test
def should_recognize_overlap_with_next_word_when_there_is_single_name {
val segmentResult = new SegmentResult(5)
segmentResult.setWords(List[String]("石", "良家", "的", "炕上", "。").toArray)
segmentResult.setPOSArray(List[Int](POSUtil.POS_N, POSUtil.POS_M, POSUtil.POS_N, POSUtil.POS_U, POSUtil.POS_N).toArray)
segmentResult.setDomainTypes(List[Int](0, 0, 0, 0, 0).toArray)
segmentResult.setConcepts(List[String]("N/A", "N/A", "N/A", "N/A", "N/A").toArray)
filter.setSegmentResult(segmentResult)
filter.filtering()
Assert.assertEquals("石", segmentResult.getWord(0))
Assert.assertEquals("良", segmentResult.getWord(1))
Assert.assertEquals("家", segmentResult.getWord(2))
Assert.assertEquals(POSUtil.POS_NR, segmentResult.getPOS(0))
Assert.assertEquals(POSUtil.POS_NR, segmentResult.getPOS(1))
}
@Test
def should_recognize_foreign_name_fu_luo_si {
val segmentResult = new SegmentResult(5)
segmentResult.setWords(List[String]("福", "诺", "斯", "在", "该所").toArray)
segmentResult.setPOSArray(List[Int](POSUtil.POS_N, POSUtil.POS_M, POSUtil.POS_N, POSUtil.POS_U, POSUtil.POS_N).toArray)
segmentResult.setDomainTypes(List[Int](0, 0, 0, 0, 0).toArray)
segmentResult.setConcepts(List[String]("N/A", "N/A", "N/A", "N/A", "N/A").toArray)
filter.setSegmentResult(segmentResult)
filter.filtering()
Assert.assertEquals("福诺斯", segmentResult.getWord(0))
Assert.assertEquals(POSUtil.POS_NR, segmentResult.getPOS(0))
}
@Test
def should_recognize_foreign_name_sai_fu {
val segmentResult = new SegmentResult(2)
segmentResult.setWords(List[String]("塞", "夫").toArray)
segmentResult.setPOSArray(List[Int](POSUtil.POS_N, POSUtil.POS_M).toArray)
segmentResult.setDomainTypes(List[Int](0, 0).toArray)
segmentResult.setConcepts(List[String]("N/A", "N/A").toArray)
filter.setSegmentResult(segmentResult)
filter.filtering()
Assert.assertEquals("塞夫", segmentResult.getWord(0))
Assert.assertEquals(POSUtil.POS_NR, segmentResult.getPOS(0))
}
}
| yingrui/mahjong | lib-segment/src/test/scala/me/yingrui/segment/filter/HmmNameFilterTest.scala | Scala | gpl-3.0 | 10,339 |
package com.twitter.finagle.ssl
import org.scalatest.funsuite.AnyFunSuite
class ApplicationProtocolsTest extends AnyFunSuite {
test("Supported with alpn or npn values succeeds") {
val appProtos = ApplicationProtocols.Supported(Seq("h2", "http/1.1"))
assert(appProtos == ApplicationProtocols.Supported(Seq("h2", "http/1.1")))
}
test("Supported with non alpn or npn values fails") {
intercept[IllegalArgumentException] {
val appProtos = ApplicationProtocols.Supported(Seq("h2", "test", "http/1.1"))
}
}
test("fromString results in unspecified when application protocols are empty") {
val appProtos = ApplicationProtocols.fromString("")
assert(appProtos == ApplicationProtocols.Unspecified)
}
test("fromString drops empty application protocols") {
val appProtos1 = ApplicationProtocols.fromString(",")
assert(appProtos1 == ApplicationProtocols.Unspecified, "appProtos1")
val appProtos2 = ApplicationProtocols.fromString("h2,")
assert(appProtos2 == ApplicationProtocols.Supported(Seq("h2")), "appProtos2")
val appProtos3 = ApplicationProtocols.fromString(",spdy/3.1")
assert(appProtos3 == ApplicationProtocols.Supported(Seq("spdy/3.1")), "appProtos3")
val appProtos4 = ApplicationProtocols.fromString("h2,spdy/3.1")
assert(appProtos4 == ApplicationProtocols.Supported(Seq("h2", "spdy/3.1")), "appProtos4")
}
test("fromString handles multiple application protocols") {
val appProtos = ApplicationProtocols.fromString("h2,spdy/3.1,h2c,http/1.1")
val items: Seq[String] = appProtos match {
case ApplicationProtocols.Supported(list) => list
case _ => Seq.empty
}
assert(items == Seq("h2", "spdy/3.1", "h2c", "http/1.1"))
}
test("fromString handles multiple application protocols with spaces") {
val appProtos = ApplicationProtocols.fromString("h2, spdy/3.1 , http/1.1")
val items: Seq[String] = appProtos match {
case ApplicationProtocols.Supported(list) => list
case _ => Seq.empty
}
assert(items == Seq("h2", "spdy/3.1", "http/1.1"))
}
test("fromString with non alpn or npn values fails") {
intercept[IllegalArgumentException] {
val appProtos = ApplicationProtocols.fromString("h2, test, spdy/3.1, what")
}
}
test("combine results in Unspecified for two Unspecified items") {
val appProtos1 = ApplicationProtocols.Unspecified
val appProtos2 = ApplicationProtocols.Unspecified
val combined = ApplicationProtocols.combine(appProtos1, appProtos2)
assert(combined == ApplicationProtocols.Unspecified)
}
test("combine uses the second when the first is Unspecified") {
val appProtos1 = ApplicationProtocols.Unspecified
val appProtos2 = ApplicationProtocols.Supported(Seq("h2", "spdy/3.1", "http/1.1"))
val combined = ApplicationProtocols.combine(appProtos1, appProtos2)
assert(combined == appProtos2)
}
test("combine uses the first when the second is Unspecified") {
val appProtos1 = ApplicationProtocols.Supported(Seq("h2", "spdy/3.1", "http/1.1"))
val appProtos2 = ApplicationProtocols.Unspecified
val combined = ApplicationProtocols.combine(appProtos1, appProtos2)
assert(combined == appProtos1)
}
test("combine uniquely combines Supported lists") {
val appProtos1 = ApplicationProtocols.Supported(Seq("h2", "http/1.1"))
val appProtos2 = ApplicationProtocols.Supported(Seq("spdy/3.1", "http/1.1"))
val combined = ApplicationProtocols.combine(appProtos1, appProtos2)
assert(combined == ApplicationProtocols.Supported(Seq("h2", "http/1.1", "spdy/3.1")))
}
}
| twitter/finagle | finagle-core/src/test/scala/com/twitter/finagle/ssl/ApplicationProtocolsTest.scala | Scala | apache-2.0 | 3,608 |
// ' Project: smath
// Module: commons / ode
// Description: Adapter between FirstOrderSystem and math-commons FirstOrderDifferentialEquations
//
// Copyright (c) 2015 Johannes Kastner <[email protected]>
// Distributed under the MIT License (see included file LICENSE)
package biz.enef.smath.ode.commons
import biz.enef.smath.ode.FirstOrderSystem
import biz.enef.smath.ode.FirstOrderSystemD
import org.apache.commons.math3.ode.FirstOrderDifferentialEquations
case class FirstOrderSystemWrapper(wrapped: FirstOrderSystem[Double]) extends FirstOrderDifferentialEquations {
@inline
override def getDimension: Int = wrapped.dimension
@inline
override def computeDerivatives(t: Double, y: Array[Double], ydot: Array[Double]): Unit =
wrapped.computeDerivative(t,y,ydot)
}
| jokade/smath | commons/src/main/scala/biz/enef/smath/ode/commons/FirstOrderSystemWrapper.scala | Scala | mit | 804 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import scala.collection.immutable.HashSet
import scala.collection.mutable.{ArrayBuffer, Stack}
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.expressions.{BinaryExpression, MultiLikeBase, _}
import org.apache.spark.sql.catalyst.expressions.Literal.{FalseLiteral, TrueLiteral}
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.expressions.objects.AssertNotNull
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.catalyst.trees.AlwaysProcess
import org.apache.spark.sql.catalyst.trees.TreePattern._
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
/*
* Optimization rules defined in this file should not affect the structure of the logical plan.
*/
/**
* Replaces [[Expression Expressions]] that can be statically evaluated with
* equivalent [[Literal]] values.
*/
object ConstantFolding extends Rule[LogicalPlan] {
private def hasNoSideEffect(e: Expression): Boolean = e match {
case _: Attribute => true
case _: Literal => true
case _: NoThrow if e.deterministic => e.children.forall(hasNoSideEffect)
case _ => false
}
def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(AlwaysProcess.fn, ruleId) {
case q: LogicalPlan => q.transformExpressionsDownWithPruning(
AlwaysProcess.fn, ruleId) {
// Skip redundant folding of literals. This rule is technically not necessary. Placing this
// here avoids running the next rule for Literal values, which would create a new Literal
// object and running eval unnecessarily.
case l: Literal => l
case Size(c: CreateArray, _) if c.children.forall(hasNoSideEffect) =>
Literal(c.children.length)
case Size(c: CreateMap, _) if c.children.forall(hasNoSideEffect) =>
Literal(c.children.length / 2)
// Fold expressions that are foldable.
case e if e.foldable => Literal.create(e.eval(EmptyRow), e.dataType)
}
}
}
/**
* Substitutes [[Attribute Attributes]] which can be statically evaluated with their corresponding
* value in conjunctive [[Expression Expressions]]
* e.g.
* {{{
* SELECT * FROM table WHERE i = 5 AND j = i + 3
* ==> SELECT * FROM table WHERE i = 5 AND j = 8
* }}}
*
* Approach used:
* - Populate a mapping of attribute => constant value by looking at all the equals predicates
* - Using this mapping, replace occurrence of the attributes with the corresponding constant values
* in the AND node.
*/
object ConstantPropagation extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = plan.transformUpWithPruning(
_.containsAllPatterns(LITERAL, FILTER), ruleId) {
case f: Filter =>
val (newCondition, _) = traverse(f.condition, replaceChildren = true, nullIsFalse = true)
if (newCondition.isDefined) {
f.copy(condition = newCondition.get)
} else {
f
}
}
type EqualityPredicates = Seq[((AttributeReference, Literal), BinaryComparison)]
/**
* Traverse a condition as a tree and replace attributes with constant values.
* - On matching [[And]], recursively traverse each children and get propagated mappings.
* If the current node is not child of another [[And]], replace all occurrences of the
* attributes with the corresponding constant values.
* - If a child of [[And]] is [[EqualTo]] or [[EqualNullSafe]], propagate the mapping
* of attribute => constant.
* - On matching [[Or]] or [[Not]], recursively traverse each children, propagate empty mapping.
* - Otherwise, stop traversal and propagate empty mapping.
* @param condition condition to be traversed
* @param replaceChildren whether to replace attributes with constant values in children
* @param nullIsFalse whether a boolean expression result can be considered to false e.g. in the
* case of `WHERE e`, null result of expression `e` means the same as if it
* resulted false
* @return A tuple including:
* 1. Option[Expression]: optional changed condition after traversal
* 2. EqualityPredicates: propagated mapping of attribute => constant
*/
private def traverse(condition: Expression, replaceChildren: Boolean, nullIsFalse: Boolean)
: (Option[Expression], EqualityPredicates) =
condition match {
case e @ EqualTo(left: AttributeReference, right: Literal)
if safeToReplace(left, nullIsFalse) =>
(None, Seq(((left, right), e)))
case e @ EqualTo(left: Literal, right: AttributeReference)
if safeToReplace(right, nullIsFalse) =>
(None, Seq(((right, left), e)))
case e @ EqualNullSafe(left: AttributeReference, right: Literal)
if safeToReplace(left, nullIsFalse) =>
(None, Seq(((left, right), e)))
case e @ EqualNullSafe(left: Literal, right: AttributeReference)
if safeToReplace(right, nullIsFalse) =>
(None, Seq(((right, left), e)))
case a: And =>
val (newLeft, equalityPredicatesLeft) =
traverse(a.left, replaceChildren = false, nullIsFalse)
val (newRight, equalityPredicatesRight) =
traverse(a.right, replaceChildren = false, nullIsFalse)
val equalityPredicates = equalityPredicatesLeft ++ equalityPredicatesRight
val newSelf = if (equalityPredicates.nonEmpty && replaceChildren) {
Some(And(replaceConstants(newLeft.getOrElse(a.left), equalityPredicates),
replaceConstants(newRight.getOrElse(a.right), equalityPredicates)))
} else {
if (newLeft.isDefined || newRight.isDefined) {
Some(And(newLeft.getOrElse(a.left), newRight.getOrElse(a.right)))
} else {
None
}
}
(newSelf, equalityPredicates)
case o: Or =>
// Ignore the EqualityPredicates from children since they are only propagated through And.
val (newLeft, _) = traverse(o.left, replaceChildren = true, nullIsFalse)
val (newRight, _) = traverse(o.right, replaceChildren = true, nullIsFalse)
val newSelf = if (newLeft.isDefined || newRight.isDefined) {
Some(Or(left = newLeft.getOrElse(o.left), right = newRight.getOrElse((o.right))))
} else {
None
}
(newSelf, Seq.empty)
case n: Not =>
// Ignore the EqualityPredicates from children since they are only propagated through And.
val (newChild, _) = traverse(n.child, replaceChildren = true, nullIsFalse = false)
(newChild.map(Not), Seq.empty)
case _ => (None, Seq.empty)
}
// We need to take into account if an attribute is nullable and the context of the conjunctive
// expression. E.g. `SELECT * FROM t WHERE NOT(c = 1 AND c + 1 = 1)` where attribute `c` can be
// substituted into `1 + 1 = 1` if 'c' isn't nullable. If 'c' is nullable then the enclosing
// NOT prevents us to do the substitution as NOT flips the context (`nullIsFalse`) of what a
// null result of the enclosed expression means.
private def safeToReplace(ar: AttributeReference, nullIsFalse: Boolean) =
!ar.nullable || nullIsFalse
private def replaceConstants(condition: Expression, equalityPredicates: EqualityPredicates)
: Expression = {
val constantsMap = AttributeMap(equalityPredicates.map(_._1))
val predicates = equalityPredicates.map(_._2).toSet
def replaceConstants0(expression: Expression) = expression transform {
case a: AttributeReference => constantsMap.getOrElse(a, a)
}
condition transform {
case e @ EqualTo(_, _) if !predicates.contains(e) => replaceConstants0(e)
case e @ EqualNullSafe(_, _) if !predicates.contains(e) => replaceConstants0(e)
}
}
}
/**
* Reorder associative integral-type operators and fold all constants into one.
*/
object ReorderAssociativeOperator extends Rule[LogicalPlan] {
private def flattenAdd(
expression: Expression,
groupSet: ExpressionSet): Seq[Expression] = expression match {
case expr @ Add(l, r, _) if !groupSet.contains(expr) =>
flattenAdd(l, groupSet) ++ flattenAdd(r, groupSet)
case other => other :: Nil
}
private def flattenMultiply(
expression: Expression,
groupSet: ExpressionSet): Seq[Expression] = expression match {
case expr @ Multiply(l, r, _) if !groupSet.contains(expr) =>
flattenMultiply(l, groupSet) ++ flattenMultiply(r, groupSet)
case other => other :: Nil
}
private def collectGroupingExpressions(plan: LogicalPlan): ExpressionSet = plan match {
case Aggregate(groupingExpressions, aggregateExpressions, child) =>
ExpressionSet.apply(groupingExpressions)
case _ => ExpressionSet(Seq.empty)
}
def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(
_.containsPattern(BINARY_ARITHMETIC), ruleId) {
case q: LogicalPlan =>
// We have to respect aggregate expressions which exists in grouping expressions when plan
// is an Aggregate operator, otherwise the optimized expression could not be derived from
// grouping expressions.
// TODO: do not reorder consecutive `Add`s or `Multiply`s with different `failOnError` flags
val groupingExpressionSet = collectGroupingExpressions(q)
q.transformExpressionsDownWithPruning(_.containsPattern(BINARY_ARITHMETIC)) {
case a @ Add(_, _, f) if a.deterministic && a.dataType.isInstanceOf[IntegralType] =>
val (foldables, others) = flattenAdd(a, groupingExpressionSet).partition(_.foldable)
if (foldables.size > 1) {
val foldableExpr = foldables.reduce((x, y) => Add(x, y, f))
val c = Literal.create(foldableExpr.eval(EmptyRow), a.dataType)
if (others.isEmpty) c else Add(others.reduce((x, y) => Add(x, y, f)), c, f)
} else {
a
}
case m @ Multiply(_, _, f) if m.deterministic && m.dataType.isInstanceOf[IntegralType] =>
val (foldables, others) = flattenMultiply(m, groupingExpressionSet).partition(_.foldable)
if (foldables.size > 1) {
val foldableExpr = foldables.reduce((x, y) => Multiply(x, y, f))
val c = Literal.create(foldableExpr.eval(EmptyRow), m.dataType)
if (others.isEmpty) c else Multiply(others.reduce((x, y) => Multiply(x, y, f)), c, f)
} else {
m
}
}
}
}
/**
* Optimize IN predicates:
* 1. Converts the predicate to false when the list is empty and
* the value is not nullable.
* 2. Removes literal repetitions.
* 3. Replaces [[In (value, seq[Literal])]] with optimized version
* [[InSet (value, HashSet[Literal])]] which is much faster.
*/
object OptimizeIn extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(
_.containsPattern(IN), ruleId) {
case q: LogicalPlan => q.transformExpressionsDownWithPruning(_.containsPattern(IN), ruleId) {
case In(v, list) if list.isEmpty =>
// When v is not nullable, the following expression will be optimized
// to FalseLiteral which is tested in OptimizeInSuite.scala
If(IsNotNull(v), FalseLiteral, Literal(null, BooleanType))
case expr @ In(v, list) if expr.inSetConvertible =>
val newList = ExpressionSet(list).toSeq
if (newList.length == 1
// TODO: `EqualTo` for structural types are not working. Until SPARK-24443 is addressed,
// TODO: we exclude them in this rule.
&& !v.isInstanceOf[CreateNamedStruct]
&& !newList.head.isInstanceOf[CreateNamedStruct]) {
EqualTo(v, newList.head)
} else if (newList.length > conf.optimizerInSetConversionThreshold) {
val hSet = newList.map(e => e.eval(EmptyRow))
InSet(v, HashSet() ++ hSet)
} else if (newList.length < list.length) {
expr.copy(list = newList)
} else { // newList.length == list.length && newList.length > 1
expr
}
}
}
}
/**
* Simplifies boolean expressions:
* 1. Simplifies expressions whose answer can be determined without evaluating both sides.
* 2. Eliminates / extracts common factors.
* 3. Merge same expressions
* 4. Removes `Not` operator.
*/
object BooleanSimplification extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(
_.containsAnyPattern(AND_OR, NOT), ruleId) {
case q: LogicalPlan => q.transformExpressionsUpWithPruning(
_.containsAnyPattern(AND_OR, NOT), ruleId) {
case TrueLiteral And e => e
case e And TrueLiteral => e
case FalseLiteral Or e => e
case e Or FalseLiteral => e
case FalseLiteral And _ => FalseLiteral
case _ And FalseLiteral => FalseLiteral
case TrueLiteral Or _ => TrueLiteral
case _ Or TrueLiteral => TrueLiteral
case a And b if Not(a).semanticEquals(b) =>
If(IsNull(a), Literal.create(null, a.dataType), FalseLiteral)
case a And b if a.semanticEquals(Not(b)) =>
If(IsNull(b), Literal.create(null, b.dataType), FalseLiteral)
case a Or b if Not(a).semanticEquals(b) =>
If(IsNull(a), Literal.create(null, a.dataType), TrueLiteral)
case a Or b if a.semanticEquals(Not(b)) =>
If(IsNull(b), Literal.create(null, b.dataType), TrueLiteral)
case a And b if a.semanticEquals(b) => a
case a Or b if a.semanticEquals(b) => a
// The following optimizations are applicable only when the operands are not nullable,
// since the three-value logic of AND and OR are different in NULL handling.
// See the chart:
// +---------+---------+---------+---------+
// | operand | operand | OR | AND |
// +---------+---------+---------+---------+
// | TRUE | TRUE | TRUE | TRUE |
// | TRUE | FALSE | TRUE | FALSE |
// | FALSE | FALSE | FALSE | FALSE |
// | UNKNOWN | TRUE | TRUE | UNKNOWN |
// | UNKNOWN | FALSE | UNKNOWN | FALSE |
// | UNKNOWN | UNKNOWN | UNKNOWN | UNKNOWN |
// +---------+---------+---------+---------+
// (NULL And (NULL Or FALSE)) = NULL, but (NULL And FALSE) = FALSE. Thus, a can't be nullable.
case a And (b Or c) if !a.nullable && Not(a).semanticEquals(b) => And(a, c)
// (NULL And (FALSE Or NULL)) = NULL, but (NULL And FALSE) = FALSE. Thus, a can't be nullable.
case a And (b Or c) if !a.nullable && Not(a).semanticEquals(c) => And(a, b)
// ((NULL Or FALSE) And NULL) = NULL, but (FALSE And NULL) = FALSE. Thus, c can't be nullable.
case (a Or b) And c if !c.nullable && a.semanticEquals(Not(c)) => And(b, c)
// ((FALSE Or NULL) And NULL) = NULL, but (FALSE And NULL) = FALSE. Thus, c can't be nullable.
case (a Or b) And c if !c.nullable && b.semanticEquals(Not(c)) => And(a, c)
// (NULL Or (NULL And TRUE)) = NULL, but (NULL Or TRUE) = TRUE. Thus, a can't be nullable.
case a Or (b And c) if !a.nullable && Not(a).semanticEquals(b) => Or(a, c)
// (NULL Or (TRUE And NULL)) = NULL, but (NULL Or TRUE) = TRUE. Thus, a can't be nullable.
case a Or (b And c) if !a.nullable && Not(a).semanticEquals(c) => Or(a, b)
// ((NULL And TRUE) Or NULL) = NULL, but (TRUE Or NULL) = TRUE. Thus, c can't be nullable.
case (a And b) Or c if !c.nullable && a.semanticEquals(Not(c)) => Or(b, c)
// ((TRUE And NULL) Or NULL) = NULL, but (TRUE Or NULL) = TRUE. Thus, c can't be nullable.
case (a And b) Or c if !c.nullable && b.semanticEquals(Not(c)) => Or(a, c)
// Common factor elimination for conjunction
case and @ (left And right) =>
// 1. Split left and right to get the disjunctive predicates,
// i.e. lhs = (a || b), rhs = (a || c)
// 2. Find the common predict between lhsSet and rhsSet, i.e. common = (a)
// 3. Remove common predict from lhsSet and rhsSet, i.e. ldiff = (b), rdiff = (c)
// 4. If common is non-empty, apply the formula to get the optimized predicate:
// common || (ldiff && rdiff)
// 5. Else if common is empty, split left and right to get the conjunctive predicates.
// for example lhs = (a && b), rhs = (a && c) => all = (a, b, a, c), distinct = (a, b, c)
// optimized predicate: (a && b && c)
val lhs = splitDisjunctivePredicates(left)
val rhs = splitDisjunctivePredicates(right)
val common = lhs.filter(e => rhs.exists(e.semanticEquals))
if (common.nonEmpty) {
val ldiff = lhs.filterNot(e => common.exists(e.semanticEquals))
val rdiff = rhs.filterNot(e => common.exists(e.semanticEquals))
if (ldiff.isEmpty || rdiff.isEmpty) {
// (a || b || c || ...) && (a || b) => (a || b)
common.reduce(Or)
} else {
// (a || b || c || ...) && (a || b || d || ...) =>
// a || b || ((c || ...) && (d || ...))
(common :+ And(ldiff.reduce(Or), rdiff.reduce(Or))).reduce(Or)
}
} else {
// No common factors from disjunctive predicates, reduce common factor from conjunction
val all = splitConjunctivePredicates(left) ++ splitConjunctivePredicates(right)
val distinct = ExpressionSet(all)
if (all.size == distinct.size) {
// No common factors, return the original predicate
and
} else {
// (a && b) && a && (a && c) => a && b && c
buildBalancedPredicate(distinct.toSeq, And)
}
}
// Common factor elimination for disjunction
case or @ (left Or right) =>
// 1. Split left and right to get the conjunctive predicates,
// i.e. lhs = (a && b), rhs = (a && c)
// 2. Find the common predict between lhsSet and rhsSet, i.e. common = (a)
// 3. Remove common predict from lhsSet and rhsSet, i.e. ldiff = (b), rdiff = (c)
// 4. If common is non-empty, apply the formula to get the optimized predicate:
// common && (ldiff || rdiff)
// 5. Else if common is empty, split left and right to get the conjunctive predicates.
// for example lhs = (a || b), rhs = (a || c) => all = (a, b, a, c), distinct = (a, b, c)
// optimized predicate: (a || b || c)
val lhs = splitConjunctivePredicates(left)
val rhs = splitConjunctivePredicates(right)
val common = lhs.filter(e => rhs.exists(e.semanticEquals))
if (common.nonEmpty) {
val ldiff = lhs.filterNot(e => common.exists(e.semanticEquals))
val rdiff = rhs.filterNot(e => common.exists(e.semanticEquals))
if (ldiff.isEmpty || rdiff.isEmpty) {
// (a && b) || (a && b && c && ...) => a && b
common.reduce(And)
} else {
// (a && b && c && ...) || (a && b && d && ...) =>
// a && b && ((c && ...) || (d && ...))
(common :+ Or(ldiff.reduce(And), rdiff.reduce(And))).reduce(And)
}
} else {
// No common factors in conjunctive predicates, reduce common factor from disjunction
val all = splitDisjunctivePredicates(left) ++ splitDisjunctivePredicates(right)
val distinct = ExpressionSet(all)
if (all.size == distinct.size) {
// No common factors, return the original predicate
or
} else {
// (a || b) || a || (a || c) => a || b || c
buildBalancedPredicate(distinct.toSeq, Or)
}
}
case Not(TrueLiteral) => FalseLiteral
case Not(FalseLiteral) => TrueLiteral
case Not(a GreaterThan b) => LessThanOrEqual(a, b)
case Not(a GreaterThanOrEqual b) => LessThan(a, b)
case Not(a LessThan b) => GreaterThanOrEqual(a, b)
case Not(a LessThanOrEqual b) => GreaterThan(a, b)
case Not(a Or b) => And(Not(a), Not(b))
case Not(a And b) => Or(Not(a), Not(b))
case Not(Not(e)) => e
case Not(IsNull(e)) => IsNotNull(e)
case Not(IsNotNull(e)) => IsNull(e)
}
}
}
/**
* Simplifies binary comparisons with semantically-equal expressions:
* 1) Replace '<=>' with 'true' literal.
* 2) Replace '=', '<=', and '>=' with 'true' literal if both operands are non-nullable.
* 3) Replace '<' and '>' with 'false' literal if both operands are non-nullable.
*/
object SimplifyBinaryComparison
extends Rule[LogicalPlan] with PredicateHelper with ConstraintHelper {
private def canSimplifyComparison(
left: Expression,
right: Expression,
notNullExpressions: => ExpressionSet): Boolean = {
if (left.semanticEquals(right)) {
(!left.nullable && !right.nullable) || notNullExpressions.contains(left)
} else {
false
}
}
def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(
_.containsPattern(BINARY_COMPARISON), ruleId) {
case l: LogicalPlan =>
lazy val notNullExpressions = ExpressionSet(l match {
case Filter(fc, _) =>
splitConjunctivePredicates(fc).collect {
case i: IsNotNull => i.child
}
case _ => Seq.empty
})
l.transformExpressionsUpWithPruning(_.containsPattern(BINARY_COMPARISON)) {
// True with equality
case a EqualNullSafe b if a.semanticEquals(b) => TrueLiteral
case a EqualTo b if canSimplifyComparison(a, b, notNullExpressions) => TrueLiteral
case a GreaterThanOrEqual b if canSimplifyComparison(a, b, notNullExpressions) =>
TrueLiteral
case a LessThanOrEqual b if canSimplifyComparison(a, b, notNullExpressions) => TrueLiteral
// False with inequality
case a GreaterThan b if canSimplifyComparison(a, b, notNullExpressions) => FalseLiteral
case a LessThan b if canSimplifyComparison(a, b, notNullExpressions) => FalseLiteral
}
}
}
/**
* Simplifies conditional expressions (if / case).
*/
object SimplifyConditionals extends Rule[LogicalPlan] with PredicateHelper {
private def falseOrNullLiteral(e: Expression): Boolean = e match {
case FalseLiteral => true
case Literal(null, _) => true
case _ => false
}
def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(
_.containsAnyPattern(IF, CASE_WHEN), ruleId) {
case q: LogicalPlan => q transformExpressionsUp {
case If(TrueLiteral, trueValue, _) => trueValue
case If(FalseLiteral, _, falseValue) => falseValue
case If(Literal(null, _), _, falseValue) => falseValue
case If(cond, TrueLiteral, FalseLiteral) =>
if (cond.nullable) EqualNullSafe(cond, TrueLiteral) else cond
case If(cond, FalseLiteral, TrueLiteral) =>
if (cond.nullable) Not(EqualNullSafe(cond, TrueLiteral)) else Not(cond)
case If(cond, trueValue, falseValue)
if cond.deterministic && trueValue.semanticEquals(falseValue) => trueValue
case If(cond, l @ Literal(null, _), FalseLiteral) if !cond.nullable => And(cond, l)
case If(cond, l @ Literal(null, _), TrueLiteral) if !cond.nullable => Or(Not(cond), l)
case If(cond, FalseLiteral, l @ Literal(null, _)) if !cond.nullable => And(Not(cond), l)
case If(cond, TrueLiteral, l @ Literal(null, _)) if !cond.nullable => Or(cond, l)
case CaseWhen(Seq((cond, TrueLiteral)), Some(FalseLiteral)) =>
if (cond.nullable) EqualNullSafe(cond, TrueLiteral) else cond
case CaseWhen(Seq((cond, FalseLiteral)), Some(TrueLiteral)) =>
if (cond.nullable) Not(EqualNullSafe(cond, TrueLiteral)) else Not(cond)
case e @ CaseWhen(branches, elseValue) if branches.exists(x => falseOrNullLiteral(x._1)) =>
// If there are branches that are always false, remove them.
// If there are no more branches left, just use the else value.
// Note that these two are handled together here in a single case statement because
// otherwise we cannot determine the data type for the elseValue if it is None (i.e. null).
val newBranches = branches.filter(x => !falseOrNullLiteral(x._1))
if (newBranches.isEmpty) {
elseValue.getOrElse(Literal.create(null, e.dataType))
} else {
e.copy(branches = newBranches)
}
case CaseWhen(branches, _) if branches.headOption.map(_._1).contains(TrueLiteral) =>
// If the first branch is a true literal, remove the entire CaseWhen and use the value
// from that. Note that CaseWhen.branches should never be empty, and as a result the
// headOption (rather than head) added above is just an extra (and unnecessary) safeguard.
branches.head._2
case CaseWhen(branches, _) if branches.exists(_._1 == TrueLiteral) =>
// a branch with a true condition eliminates all following branches,
// these branches can be pruned away
val (h, t) = branches.span(_._1 != TrueLiteral)
CaseWhen( h :+ t.head, None)
case e @ CaseWhen(branches, elseOpt)
if branches.forall(_._2.semanticEquals(elseOpt.getOrElse(Literal(null, e.dataType)))) =>
val elseValue = elseOpt.getOrElse(Literal(null, e.dataType))
// For non-deterministic conditions with side effect, we can not remove it, or change
// the ordering. As a result, we try to remove the deterministic conditions from the tail.
var hitNonDeterministicCond = false
var i = branches.length
while (i > 0 && !hitNonDeterministicCond) {
hitNonDeterministicCond = !branches(i - 1)._1.deterministic
if (!hitNonDeterministicCond) {
i -= 1
}
}
if (i == 0) {
elseValue
} else {
e.copy(branches = branches.take(i).map(branch => (branch._1, elseValue)))
}
}
}
}
/**
* Push the foldable expression into (if / case) branches.
*/
object PushFoldableIntoBranches extends Rule[LogicalPlan] with PredicateHelper {
// To be conservative here: it's only a guaranteed win if all but at most only one branch
// end up being not foldable.
private def atMostOneUnfoldable(exprs: Seq[Expression]): Boolean = {
val (foldables, others) = exprs.partition(_.foldable)
foldables.nonEmpty && others.length < 2
}
// Not all UnaryExpression can be pushed into (if / case) branches, e.g. Alias.
private def supportedUnaryExpression(e: UnaryExpression): Boolean = e match {
case _: IsNull | _: IsNotNull => true
case _: UnaryMathExpression | _: Abs | _: Bin | _: Factorial | _: Hex => true
case _: String2StringExpression | _: Ascii | _: Base64 | _: BitLength | _: Chr | _: Length =>
true
case _: CastBase => true
case _: GetDateField | _: LastDay => true
case _: ExtractIntervalPart[_] => true
case _: ArraySetLike => true
case _: ExtractValue => true
case _ => false
}
// Not all BinaryExpression can be pushed into (if / case) branches.
private def supportedBinaryExpression(e: BinaryExpression): Boolean = e match {
case _: BinaryComparison | _: StringPredicate | _: StringRegexExpression => true
case _: BinaryArithmetic => true
case _: BinaryMathExpression => true
case _: AddMonths | _: DateAdd | _: DateAddInterval | _: DateDiff | _: DateSub |
_: DateAddYMInterval | _: TimestampAddYMInterval | _: TimeAdd => true
case _: FindInSet | _: RoundBase => true
case _ => false
}
def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(
_.containsAnyPattern(CASE_WHEN, IF), ruleId) {
case q: LogicalPlan => q.transformExpressionsUpWithPruning(
_.containsAnyPattern(CASE_WHEN, IF), ruleId) {
case u @ UnaryExpression(i @ If(_, trueValue, falseValue))
if supportedUnaryExpression(u) && atMostOneUnfoldable(Seq(trueValue, falseValue)) =>
i.copy(
trueValue = u.withNewChildren(Array(trueValue)),
falseValue = u.withNewChildren(Array(falseValue)))
case u @ UnaryExpression(c @ CaseWhen(branches, elseValue))
if supportedUnaryExpression(u) && atMostOneUnfoldable(branches.map(_._2) ++ elseValue) =>
c.copy(
branches.map(e => e.copy(_2 = u.withNewChildren(Array(e._2)))),
elseValue.map(e => u.withNewChildren(Array(e))))
case b @ BinaryExpression(i @ If(_, trueValue, falseValue), right)
if supportedBinaryExpression(b) && right.foldable &&
atMostOneUnfoldable(Seq(trueValue, falseValue)) =>
i.copy(
trueValue = b.withNewChildren(Array(trueValue, right)),
falseValue = b.withNewChildren(Array(falseValue, right)))
case b @ BinaryExpression(left, i @ If(_, trueValue, falseValue))
if supportedBinaryExpression(b) && left.foldable &&
atMostOneUnfoldable(Seq(trueValue, falseValue)) =>
i.copy(
trueValue = b.withNewChildren(Array(left, trueValue)),
falseValue = b.withNewChildren(Array(left, falseValue)))
case b @ BinaryExpression(c @ CaseWhen(branches, elseValue), right)
if supportedBinaryExpression(b) && right.foldable &&
atMostOneUnfoldable(branches.map(_._2) ++ elseValue) =>
c.copy(
branches.map(e => e.copy(_2 = b.withNewChildren(Array(e._2, right)))),
elseValue.map(e => b.withNewChildren(Array(e, right))))
case b @ BinaryExpression(left, c @ CaseWhen(branches, elseValue))
if supportedBinaryExpression(b) && left.foldable &&
atMostOneUnfoldable(branches.map(_._2) ++ elseValue) =>
c.copy(
branches.map(e => e.copy(_2 = b.withNewChildren(Array(left, e._2)))),
elseValue.map(e => b.withNewChildren(Array(left, e))))
}
}
}
/**
* Simplifies LIKE expressions that do not need full regular expressions to evaluate the condition.
* For example, when the expression is just checking to see if a string starts with a given
* pattern.
*/
object LikeSimplification extends Rule[LogicalPlan] {
// if guards below protect from escapes on trailing %.
// Cases like "something\\%" are not optimized, but this does not affect correctness.
private val startsWith = "([^_%]+)%".r
private val endsWith = "%([^_%]+)".r
private val startsAndEndsWith = "([^_%]+)%([^_%]+)".r
private val contains = "%([^_%]+)%".r
private val equalTo = "([^_%]*)".r
private def simplifyLike(
input: Expression, pattern: String, escapeChar: Char = '\\\\'): Option[Expression] = {
if (pattern.contains(escapeChar)) {
// There are three different situations when pattern containing escapeChar:
// 1. pattern contains invalid escape sequence, e.g. 'm\\aca'
// 2. pattern contains escaped wildcard character, e.g. 'ma\\%ca'
// 3. pattern contains escaped escape character, e.g. 'ma\\\\ca'
// Although there are patterns can be optimized if we handle the escape first, we just
// skip this rule if pattern contains any escapeChar for simplicity.
None
} else {
pattern match {
case startsWith(prefix) =>
Some(StartsWith(input, Literal(prefix)))
case endsWith(postfix) =>
Some(EndsWith(input, Literal(postfix)))
// 'a%a' pattern is basically same with 'a%' && '%a'.
// However, the additional `Length` condition is required to prevent 'a' match 'a%a'.
case startsAndEndsWith(prefix, postfix) =>
Some(And(GreaterThanOrEqual(Length(input), Literal(prefix.length + postfix.length)),
And(StartsWith(input, Literal(prefix)), EndsWith(input, Literal(postfix)))))
case contains(infix) =>
Some(Contains(input, Literal(infix)))
case equalTo(str) =>
Some(EqualTo(input, Literal(str)))
case _ => None
}
}
}
private def simplifyMultiLike(
child: Expression, patterns: Seq[UTF8String], multi: MultiLikeBase): Expression = {
val (remainPatternMap, replacementMap) =
patterns.map { p =>
p -> Option(p).flatMap(p => simplifyLike(child, p.toString))
}.partition(_._2.isEmpty)
val remainPatterns = remainPatternMap.map(_._1)
val replacements = replacementMap.map(_._2.get)
if (replacements.isEmpty) {
multi
} else {
multi match {
case l: LikeAll => And(replacements.reduceLeft(And), l.copy(patterns = remainPatterns))
case l: NotLikeAll =>
And(replacements.map(Not(_)).reduceLeft(And), l.copy(patterns = remainPatterns))
case l: LikeAny => Or(replacements.reduceLeft(Or), l.copy(patterns = remainPatterns))
case l: NotLikeAny =>
Or(replacements.map(Not(_)).reduceLeft(Or), l.copy(patterns = remainPatterns))
}
}
}
def apply(plan: LogicalPlan): LogicalPlan = plan.transformAllExpressionsWithPruning(
_.containsPattern(LIKE_FAMLIY), ruleId) {
case l @ Like(input, Literal(pattern, StringType), escapeChar) =>
if (pattern == null) {
// If pattern is null, return null value directly, since "col like null" == null.
Literal(null, BooleanType)
} else {
simplifyLike(input, pattern.toString, escapeChar).getOrElse(l)
}
case l @ LikeAll(child, patterns) => simplifyMultiLike(child, patterns, l)
case l @ NotLikeAll(child, patterns) => simplifyMultiLike(child, patterns, l)
case l @ LikeAny(child, patterns) => simplifyMultiLike(child, patterns, l)
case l @ NotLikeAny(child, patterns) => simplifyMultiLike(child, patterns, l)
}
}
/**
* Replaces [[Expression Expressions]] that can be statically evaluated with
* equivalent [[Literal]] values. This rule is more specific with
* Null value propagation from bottom to top of the expression tree.
*/
object NullPropagation extends Rule[LogicalPlan] {
private def isNullLiteral(e: Expression): Boolean = e match {
case Literal(null, _) => true
case _ => false
}
def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(
t => t.containsAnyPattern(NULL_CHECK, NULL_LITERAL, COUNT)
|| t.containsAllPatterns(WINDOW_EXPRESSION, CAST, LITERAL), ruleId) {
case q: LogicalPlan => q.transformExpressionsUpWithPruning(
t => t.containsAnyPattern(NULL_CHECK, NULL_LITERAL, COUNT)
|| t.containsAllPatterns(WINDOW_EXPRESSION, CAST, LITERAL), ruleId) {
case e @ WindowExpression(Cast(Literal(0L, _), _, _), _) =>
Cast(Literal(0L), e.dataType, Option(conf.sessionLocalTimeZone))
case e @ AggregateExpression(Count(exprs), _, _, _, _) if exprs.forall(isNullLiteral) =>
Cast(Literal(0L), e.dataType, Option(conf.sessionLocalTimeZone))
case ae @ AggregateExpression(Count(exprs), _, false, _, _) if !exprs.exists(_.nullable) =>
// This rule should be only triggered when isDistinct field is false.
ae.copy(aggregateFunction = Count(Literal(1)))
case IsNull(c) if !c.nullable => Literal.create(false, BooleanType)
case IsNotNull(c) if !c.nullable => Literal.create(true, BooleanType)
case EqualNullSafe(Literal(null, _), r) => IsNull(r)
case EqualNullSafe(l, Literal(null, _)) => IsNull(l)
case AssertNotNull(c, _) if !c.nullable => c
// For Coalesce, remove null literals.
case e @ Coalesce(children) =>
val newChildren = children.filterNot(isNullLiteral)
if (newChildren.isEmpty) {
Literal.create(null, e.dataType)
} else if (newChildren.length == 1) {
newChildren.head
} else {
Coalesce(newChildren)
}
// If the value expression is NULL then transform the In expression to null literal.
case In(Literal(null, _), _) => Literal.create(null, BooleanType)
case InSubquery(Seq(Literal(null, _)), _) => Literal.create(null, BooleanType)
// Non-leaf NullIntolerant expressions will return null, if at least one of its children is
// a null literal.
case e: NullIntolerant if e.children.exists(isNullLiteral) =>
Literal.create(null, e.dataType)
}
}
}
/**
* Replace attributes with aliases of the original foldable expressions if possible.
* Other optimizations will take advantage of the propagated foldable expressions. For example,
* this rule can optimize
* {{{
* SELECT 1.0 x, 'abc' y, Now() z ORDER BY x, y, 3
* }}}
* to
* {{{
* SELECT 1.0 x, 'abc' y, Now() z ORDER BY 1.0, 'abc', Now()
* }}}
* and other rules can further optimize it and remove the ORDER BY operator.
*/
object FoldablePropagation extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = {
CleanupAliases(propagateFoldables(plan)._1)
}
private def propagateFoldables(plan: LogicalPlan): (LogicalPlan, AttributeMap[Alias]) = {
plan match {
case p: Project =>
val (newChild, foldableMap) = propagateFoldables(p.child)
val newProject =
replaceFoldable(p.withNewChildren(Seq(newChild)).asInstanceOf[Project], foldableMap)
val newFoldableMap = collectFoldables(newProject.projectList)
(newProject, newFoldableMap)
case a: Aggregate =>
val (newChild, foldableMap) = propagateFoldables(a.child)
val newAggregate =
replaceFoldable(a.withNewChildren(Seq(newChild)).asInstanceOf[Aggregate], foldableMap)
val newFoldableMap = collectFoldables(newAggregate.aggregateExpressions)
(newAggregate, newFoldableMap)
// We can not replace the attributes in `Expand.output`. If there are other non-leaf
// operators that have the `output` field, we should put them here too.
case e: Expand =>
val (newChild, foldableMap) = propagateFoldables(e.child)
val expandWithNewChildren = e.withNewChildren(Seq(newChild)).asInstanceOf[Expand]
val newExpand = if (foldableMap.isEmpty) {
expandWithNewChildren
} else {
val newProjections = expandWithNewChildren.projections.map(_.map(_.transform {
case a: AttributeReference if foldableMap.contains(a) => foldableMap(a)
}))
if (newProjections == expandWithNewChildren.projections) {
expandWithNewChildren
} else {
expandWithNewChildren.copy(projections = newProjections)
}
}
(newExpand, foldableMap)
case u: UnaryNode if canPropagateFoldables(u) =>
val (newChild, foldableMap) = propagateFoldables(u.child)
val newU = replaceFoldable(u.withNewChildren(Seq(newChild)), foldableMap)
(newU, foldableMap)
// Join derives the output attributes from its child while they are actually not the
// same attributes. For example, the output of outer join is not always picked from its
// children, but can also be null. We should exclude these miss-derived attributes when
// propagating the foldable expressions.
// TODO(cloud-fan): It seems more reasonable to use new attributes as the output attributes
// of outer join.
case j: Join =>
val (newChildren, foldableMaps) = j.children.map(propagateFoldables).unzip
val foldableMap = AttributeMap(
foldableMaps.foldLeft(Iterable.empty[(Attribute, Alias)])(_ ++ _.baseMap.values).toSeq)
val newJoin =
replaceFoldable(j.withNewChildren(newChildren).asInstanceOf[Join], foldableMap)
val missDerivedAttrsSet: AttributeSet = AttributeSet(newJoin.joinType match {
case _: InnerLike | LeftExistence(_) => Nil
case LeftOuter => newJoin.right.output
case RightOuter => newJoin.left.output
case FullOuter => newJoin.left.output ++ newJoin.right.output
case _ => Nil
})
val newFoldableMap = AttributeMap(foldableMap.baseMap.values.filterNot {
case (attr, _) => missDerivedAttrsSet.contains(attr)
}.toSeq)
(newJoin, newFoldableMap)
// For other plans, they are not safe to apply foldable propagation, and they should not
// propagate foldable expressions from children.
case o =>
val newOther = o.mapChildren(propagateFoldables(_)._1)
(newOther, AttributeMap.empty)
}
}
private def replaceFoldable(plan: LogicalPlan, foldableMap: AttributeMap[Alias]): plan.type = {
if (foldableMap.isEmpty) {
plan
} else {
plan transformExpressions {
case a: AttributeReference if foldableMap.contains(a) => foldableMap(a)
}
}
}
private def collectFoldables(expressions: Seq[NamedExpression]) = {
AttributeMap(expressions.collect {
case a: Alias if a.child.foldable => (a.toAttribute, a)
})
}
/**
* List of all [[UnaryNode]]s which allow foldable propagation.
*/
private def canPropagateFoldables(u: UnaryNode): Boolean = u match {
// Handling `Project` is moved to `propagateFoldables`.
case _: Filter => true
case _: SubqueryAlias => true
// Handling `Aggregate` is moved to `propagateFoldables`.
case _: Window => true
case _: Sample => true
case _: GlobalLimit => true
case _: LocalLimit => true
case _: Generate => true
case _: Distinct => true
case _: AppendColumns => true
case _: AppendColumnsWithObject => true
case _: RepartitionByExpression => true
case _: Repartition => true
case _: Sort => true
case _: TypedFilter => true
case _ => false
}
}
/**
* Removes [[Cast Casts]] that are unnecessary because the input is already the correct type.
*/
object SimplifyCasts extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.transformAllExpressionsWithPruning(
_.containsPattern(CAST), ruleId) {
case Cast(e, dataType, _) if e.dataType == dataType => e
case c @ Cast(e, dataType, _) => (e.dataType, dataType) match {
case (ArrayType(from, false), ArrayType(to, true)) if from == to => e
case (MapType(fromKey, fromValue, false), MapType(toKey, toValue, true))
if fromKey == toKey && fromValue == toValue => e
case _ => c
}
}
}
/**
* Removes nodes that are not necessary.
*/
object RemoveDispensableExpressions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.transformAllExpressionsWithPruning(
_.containsPattern(UNARY_POSITIVE), ruleId) {
case UnaryPositive(child) => child
}
}
/**
* Removes the inner case conversion expressions that are unnecessary because
* the inner conversion is overwritten by the outer one.
*/
object SimplifyCaseConversionExpressions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(
_.containsPattern(UPPER_OR_LOWER), ruleId) {
case q: LogicalPlan => q.transformExpressionsUpWithPruning(
_.containsPattern(UPPER_OR_LOWER), ruleId) {
case Upper(Upper(child)) => Upper(child)
case Upper(Lower(child)) => Upper(child)
case Lower(Upper(child)) => Lower(child)
case Lower(Lower(child)) => Lower(child)
}
}
}
/**
* Combine nested [[Concat]] expressions.
*/
object CombineConcats extends Rule[LogicalPlan] {
private def flattenConcats(concat: Concat): Concat = {
val stack = Stack[Expression](concat)
val flattened = ArrayBuffer.empty[Expression]
while (stack.nonEmpty) {
stack.pop() match {
case Concat(children) =>
stack.pushAll(children.reverse)
// If `spark.sql.function.concatBinaryAsString` is false, nested `Concat` exprs possibly
// have `Concat`s with binary output. Since `TypeCoercion` casts them into strings,
// we need to handle the case to combine all nested `Concat`s.
case c @ Cast(Concat(children), StringType, _) =>
val newChildren = children.map { e => c.copy(child = e) }
stack.pushAll(newChildren.reverse)
case child =>
flattened += child
}
}
Concat(flattened.toSeq)
}
private def hasNestedConcats(concat: Concat): Boolean = concat.children.exists {
case c: Concat => true
case c @ Cast(Concat(children), StringType, _) => true
case _ => false
}
def apply(plan: LogicalPlan): LogicalPlan = plan.transformAllExpressionsWithPruning(
_.containsPattern(CONCAT), ruleId) {
case concat: Concat if hasNestedConcats(concat) =>
flattenConcats(concat)
}
}
| maropu/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/expressions.scala | Scala | apache-2.0 | 45,171 |
package edu.depauw.csc.scala.graphics3d;
import javax.media.j3d.Appearance
import javax.media.j3d.ColoringAttributes
import javax.media.j3d.TransformGroup
import javax.media.j3d.Transform3D
import javax.media.j3d.BranchGroup
import javax.media.j3d.Material
import javax.vecmath.Color3f
import javax.vecmath.Vector3d
import com.sun.j3d.utils.geometry.Primitive
import edu.depauw.csc.scala.graphics.Color
/**
Companion object for the cone class which handles constructors.
@author Cory Boatright, [email protected]
@version July 1, 2007
*/
object Cone {
def apply(rad: Double, height: Double) = new Cone(rad, height)
def apply(rad: Double, height: Double, c: Color) = new Cone(rad, height, c)
def apply(rad: Double, height: Double, c: Color, x: Double, y: Double, z: Double) = new Cone(rad, height, c, x, y, z)
}
/**
A cone primitive for the 3D graphics package.
@author Cory Boatright, [email protected]
@version June 26, 2007
*/
class Cone(protected val rad: Double, protected val height: Double) extends BranchGroup with Shape3D {
//Selfishly decided to keep the Cone name, so needed to use full path of the j3d cone class
protected var app: Appearance = new Appearance()
app.setCapability(Appearance.ALLOW_MATERIAL_WRITE)
app.setMaterial(new Material())
private var shape: com.sun.j3d.utils.geometry.Cone = new com.sun.j3d.utils.geometry.Cone(rad.asInstanceOf[Float],
height.asInstanceOf[Float], Primitive.GENERATE_NORMALS, 60, 4, app)
//The transform groups behave in a manner not necessarily intuitive, altering them will require recreating the tree
protected var rot: Transform3D = new Transform3D()
protected var tran: Transform3D = new Transform3D()
protected var scal: Transform3D = new Transform3D()
protected var txrot: TransformGroup = new TransformGroup(rot)
txrot.setCapability(TransformGroup.ALLOW_TRANSFORM_WRITE)
protected var txtran: TransformGroup = new TransformGroup(tran)
txtran.setCapability(TransformGroup.ALLOW_TRANSFORM_WRITE)
protected var txscale: TransformGroup = new TransformGroup(scal)
txscale.setCapability(TransformGroup.ALLOW_TRANSFORM_WRITE)
shape.setAppearance(app)
txrot.addChild(shape)
txscale.addChild(txrot)
txtran.addChild(txscale)
addChild(txtran)
/**
Secondary constructor which takes an initial color
@param rad The radius of the base of the cone
@param height The height of the cone
@param c The java.awt.Color the cone should be set to
*/
def this(rad: Double, height: Double, c: Color) {
this(rad, height)
val newColor: Color3f = new Color3f(c.dumpColor)
var mat: Material = new Material()
mat.setDiffuseColor(newColor)
mat.setShininess(85)
app.setMaterial(mat)
}
/**
Secondary constructor which takes a color but also takes a non-origin point of placement.
@param rad The radius of the base of the cone
@param height The height of the cone
@param c The java.awt.Color the cone should be set to
@param x The x-coordinate in three-dimensional space
@param y The y-coordinate in three-dimensional space
@param z The z-coordinate in three-dimensional space
*/
def this(rad: Double, height: Double, c: Color, x: Double, y: Double, z: Double) {
this(rad, height, c)
val pos: Vector3d = new Vector3d(x, y, z)
tran.set(pos)
txtran.setTransform(tran)
}
/**
Rotates the cone along the x-, y-, and z-directions
@param xm The radians of rotation in the x-direction in a counter-clockwise direction
@param ym The radians of rotation in the y-direction in a counter-clockwise direction
@param zm The radians of rotation in the z-direction in a counter-clockwise direction
*/
def rotate(xm: Double, ym: Double, zm: Double): Unit = {
var compZ: Transform3D = new Transform3D()
var compY: Transform3D = new Transform3D()
var compX: Transform3D = new Transform3D()
compZ.rotZ(zm)
compY.rotY(ym)
compX.rotX(xm)
rot = new Transform3D(compX)
rot.mul(compY)
rot.mul(compZ)
txrot.setTransform(rot)
}
/**
Scales the cone by the given value
@param ratio The amount by which to scale the shape, acquired by dividing the new size by the old size
*/
def scale(ratio: Double): Unit = {
scal.setScale(ratio)
txscale = new TransformGroup(scal)
}
/**
Translates the cone in three-dimensional space
@param xm The x-amount to translate the cone
@param ym The y-amount to translate the cone
@param zm The z-amount to translate the cone
*/
def translate(xm: Double, ym: Double, zm: Double): Unit = {
var vec2: Vector3d = new Vector3d(xm, ym, zm)
var vec1: Vector3d = new Vector3d()
tran.get(vec1)
vec1.add(vec2)
tran.set(vec1)
txtran.setTransform(tran)
}
/**
Changes the color of the cone
@param c The new color to use for the cone
*/
def changeColor(c: Color): Unit = {
val newColor: Color3f = new Color3f(c.dumpColor)
var mat: Material = new Material()
mat.setDiffuseColor(newColor)
mat.setShininess(85)
app.setMaterial(mat)
}
}
| bhoward/EscalatorOld | ScalaGraphics/src/edu/depauw/csc/scala/graphics3d/Cone.scala | Scala | apache-2.0 | 5,166 |
/*
* Copyright 2014 Adam Rosenberger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.nalloc.bitb.kcits.sandbox.foreach
import org.nalloc.bitb.kcits.sandbox.Inspectable
class BlockInlineLambda extends Inspectable {
b.foreach(x => {
println(x)
println(x + 1)
})
s.foreach(x => {
println(x)
println(x + 1)
})
i.foreach(x => {
println(x)
println(x + 1)
})
l.foreach(x => {
println(x)
println(x + 1)
})
f.foreach(x => {
println(x)
println(x + 1)
})
d.foreach(x => {
println(x)
println(x + 1)
})
st.foreach(x => {
println(x)
println(x + 1)
})
}
| arosenberger/nalloc_2.10 | sandbox/src/main/scala/org/nalloc/bitb/kcits/sandbox/foreach/BlockInlineLambda.scala | Scala | apache-2.0 | 1,149 |
// scalac: -Xasync
import scala.concurrent._
import ExecutionContext.Implicits.global
import scala.tools.testkit.async.Async._
import scala.concurrent.duration.Duration
object Test extends App { test
def func1() = async { "hello" }
def func(a: Option[Boolean]) = async {a match {
case null | None => await(func1()) + " world"
case _ => "okay"
}}
def test: Any = Await.result(func(None), Duration.Inf)
}
| scala/scala | test/async/jvm/concurrent_patternAlternativeBothAnnotations.scala | Scala | apache-2.0 | 432 |
package bad.robot.temperature
import org.specs2.matcher.DisjunctionMatchers._
import org.specs2.mutable.Specification
class TemperatureTest extends Specification {
"Celsius" >> {
Temperature(23.125).celsius must_== 23.125
}
"Fahrenheit" >> {
Temperature(23.125).fahrenheit must_== 73.625
}
"Display as celsius" >> {
Temperature(24.5).asCelsius must_== "24.5 °C"
}
"Display as fahrenheit" >> {
Temperature(72.24).asFahrenheit must_== "162.0 °F"
}
"Encode json" >> {
val expected =
"""{
| "celsius" : 66.99
|}""".stripMargin
encode(Temperature(66.99)).spaces2ps must_== expected
}
"Decode json" >> {
val temperature = """{ "celsius" : 99.1 }"""
val result = decodeAsDisjunction[Temperature](temperature)
result must be_\\/-(Temperature(99.1))
}
}
| tobyweston/temperature-machine | src/test/scala/bad/robot/temperature/TemperatureTest.scala | Scala | apache-2.0 | 840 |
package com.scalableQuality.quick.core.fileComponentDescriptions
import com.scalableQuality.quick.core.Reporting.ComparisonBetweenTwoColumns
import com.scalableQuality.quick.core.checks.{Check, CheckColumnValue}
import com.scalableQuality.quick.core.fileComponentDescriptions.errorMessages.DelimitedColumnDescriptionErrorMessages
import com.scalableQuality.quick.core.phases.{ColumnUsageStages, ShouldUseDuring, ValidationStage}
import com.scalableQuality.quick.core.valueMapping.ValueMapper
import com.scalableQuality.quick.mantle.constructFromXml._
import com.scalableQuality.quick.mantle.error.{BunchOfErrors, UnrecoverableError}
import scala.xml.MetaData
class DelimitedColumnDescription(
metaData: ColumnDescriptionMetaData,
position: DelimitedPosition,
comparisonMapper: ValueMapper,
columnValueChecks: CheckColumnValue
) {
def shouldUseDuring(stages: ColumnUsageStages*): Boolean =
metaData.shouldUseDuring(stages: _*)
def columnValue(row: Vector[String]): Option[String] =
position.extractColumnValue(row)
def comparisonValue(row: Vector[String]): Option[String] =
comparisonMapper(columnValue(row))
def compareTwoColumns(
leftRow: Option[Vector[String]],
rightRow: Option[Vector[String]]): ComparisonBetweenTwoColumns =
ComparisonBetweenTwoColumns(
this.metaData,
leftRow.flatMap(this.columnValue),
rightRow.flatMap(this.columnValue),
compare(leftRow, rightRow),
checkColumnValue(leftRow),
checkColumnValue(rightRow)
)
def checkColumnValue(row: Vector[String]): Boolean = {
val value = columnValue(row)
columnValueChecks(value)
}
def usableDuringValidation: Boolean = shouldUseDuring(ValidationStage) || columnValueChecks.checksAreDefined
private def checkColumnValue(maybeStrings: Option[Vector[String]]): Boolean =
maybeStrings
.map(checkColumnValue(_))
.getOrElse(Check.noChecksWereExecutedDefaultResult)
private def compare(leftRow: Option[Vector[String]],
rightRow: Option[Vector[String]]): Boolean = {
val leftColumn = leftRow.flatMap(this.comparisonValue)
val rightColumn = rightRow.flatMap(this.comparisonValue)
leftColumn == rightColumn
}
}
object DelimitedColumnDescription {
def apply(
metaData: ColumnDescriptionMetaData,
position: DelimitedPosition,
comparisonMapper: ValueMapper,
columnValueChecks: CheckColumnValue
): DelimitedColumnDescription =
new DelimitedColumnDescription(metaData,
position,
comparisonMapper,
columnValueChecks)
def apply(metaData: MetaData)
: Either[UnrecoverableError, DelimitedColumnDescription] = {
val unknownAttributeList = XMLHelperFunctions.collectUnknownAttributes(
listOfAttributesKeys,
metaData)
unknownAttributeList match {
case Nil =>
val attributesValues = AttributesValuesExtractor(metaData, labelKey)
val labelAttributeValue = attributesValues.get(labelKey)
val delimitedPositionEither = DelimitedPosition(metaData)
val shouldUseDuringEither = ShouldUseDuring(metaData)
val comparisonMapperEither = ValueMapper(metaData)
val checkColumnValueEither = CheckColumnValue(metaData)
validateAttributeValues(labelAttributeValue,
delimitedPositionEither,
shouldUseDuringEither,
comparisonMapperEither,
checkColumnValueEither) match {
case Right(
(label,
delimitedPosition,
shouldUseDuring,
comparisonMapper,
checkColumnValue)) =>
val metaData = ColumnDescriptionMetaData(delimitedPosition.toString,
label,
shouldUseDuring)
Right(
DelimitedColumnDescription(metaData,
delimitedPosition,
comparisonMapper,
checkColumnValue))
case Left(errorMessage) =>
Left(errorMessage)
}
case _ =>
val bunchOfErrors = BunchOfErrors(unknownAttributeList)
DelimitedColumnDescriptionErrorMessages.invalidAttributes(bunchOfErrors)
}
}
private def validateAttributeValues(
labelAttributeEither: Either[UnrecoverableError, String],
positionEither: Either[UnrecoverableError, DelimitedPosition],
shouldUseDuringEither: Either[UnrecoverableError, ShouldUseDuring],
valueMapperEither: Either[UnrecoverableError, ValueMapper],
checkColumnValueEither: Either[UnrecoverableError, CheckColumnValue]
): Either[UnrecoverableError,
(String,
DelimitedPosition,
ShouldUseDuring,
ValueMapper,
CheckColumnValue)] =
labelAttributeEither match {
case Right(labelAttributeValue) =>
(positionEither,
shouldUseDuringEither,
valueMapperEither,
checkColumnValueEither) match {
case (Right(position),
Right(shouldUseDuring),
Right(valueMapper),
Right(checkColumnValue)) =>
val classParameters =
(labelAttributeValue,
position,
shouldUseDuring,
valueMapper,
checkColumnValue)
Right(classParameters)
case _ =>
val errorMessages = UnrecoverableError.collectAllErrorsToList(
positionEither,
shouldUseDuringEither,
valueMapperEither,
checkColumnValueEither)
DelimitedColumnDescriptionErrorMessages.invalidAttributes(
labelAttributeValue,
errorMessages)
}
case Left(labelAttributeErrorMessage) =>
val otherErrors = UnrecoverableError.collectAllErrorsToList(
positionEither,
shouldUseDuringEither,
valueMapperEither,
checkColumnValueEither)
DelimitedColumnDescriptionErrorMessages.invalidAttributes(
labelAttributeErrorMessage :: otherErrors)
}
private val labelKey =
AttributeValueExtractor("label", AttributeValueConversion.extractValue)
val listOfAttributesKeys: List[AttributeValueExtractor[_]] =
labelKey ::
ShouldUseDuring.listOfAttributesKeys :::
ValueMapper.listOfAttributesKeys :::
DelimitedPosition.listOfAttributesKeys :::
CheckColumnValue.listOfAttributesKeys
}
| MouslihAbdelhakim/Quick | src/main/scala/com/scalableQuality/quick/core/fileComponentDescriptions/DelimitedColumnDescription.scala | Scala | apache-2.0 | 6,754 |
package reactivemongo.api.commands.bson
import reactivemongo.bson.{
BSONBooleanLike,
BSONDocument,
BSONDocumentReader,
BSONDocumentWriter
}
import reactivemongo.api.ReadConcern
import reactivemongo.api.commands.{ Command, CommandError, UnitBox }
@deprecated("Internal: will be made private", "0.16.0")
object CommonImplicits { // See CommandCodecs
implicit object UnitBoxReader
extends DealingWithGenericCommandErrorsReader[UnitBox.type] {
def readResult(doc: BSONDocument): UnitBox.type = UnitBox
}
implicit object ReadConcernWriter extends BSONDocumentWriter[ReadConcern] {
def write(concern: ReadConcern) = BSONDocument("level" -> concern.level)
}
}
@deprecated("Internal: will be made private", "0.16.0")
trait BSONCommandError extends CommandError {
def originalDocument: BSONDocument
}
// See CommandError.apply
@deprecated("Internal: will be made private", "0.16.0")
case class DefaultBSONCommandError(
code: Option[Int],
errmsg: Option[String],
originalDocument: BSONDocument) extends BSONCommandError {
override def getMessage = s"CommandError[code=${code.getOrElse("<unknown>")}, errmsg=${errmsg.getOrElse("<unknown>")}, doc: ${BSONDocument.pretty(originalDocument)}]"
}
/** Helper to read a command result, with error handling. */
@deprecated("Internal: will be made private", "0.16.0")
trait DealingWithGenericCommandErrorsReader[A] extends BSONDocumentReader[A] {
/** Results the successful result (only if `ok` is true). */
def readResult(doc: BSONDocument): A
final def read(doc: BSONDocument): A = {
if (!doc.getAs[BSONBooleanLike]("ok").forall(_.toBoolean)) {
throw new DefaultBSONCommandError(
code = doc.getAs[Int]("code"),
errmsg = doc.getAs[String]("errmsg"),
originalDocument = doc)
} else {
doc.getAs[String]("note").foreach { note =>
Command.logger.info(s"${note}: ${BSONDocument pretty doc}")
}
readResult(doc)
}
}
}
| ornicar/ReactiveMongo | driver/src/main/scala/api/commands/bson/bsoncommands.scala | Scala | apache-2.0 | 1,965 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.codegen.calls
import org.apache.flink.table.api.JsonOnNull
import org.apache.flink.table.planner.codegen.CodeGenUtils._
import org.apache.flink.table.planner.codegen.JsonGenerateUtils.{createNodeTerm, getOnNullBehavior}
import org.apache.flink.table.planner.codegen.{CodeGeneratorContext, GeneratedExpression}
import org.apache.flink.table.runtime.functions.SqlJsonUtils
import org.apache.flink.table.types.logical.LogicalType
import org.apache.flink.shaded.jackson2.com.fasterxml.jackson.databind.node.{NullNode, ObjectNode}
import org.apache.calcite.rex.RexCall
/**
* [[CallGenerator]] for `JSON_OBJECT`.
*
* `JSON_OBJECT` returns a character string. However, this creates an issue when nesting calls to
* this function with the intention of creating a nested JSON structure. Instead of a nested JSON
* object, a JSON string would be inserted, i.e.
* `JSON_OBJECT(KEY 'K' VALUE JSON_OBJECT(KEY 'A' VALUE 'B'))` would result in
* `{"K":"{\\"A\\":\\"B\\"}"}` instead of the intended `{"K":{"A":"B"}}`. We remedy this by treating
* nested calls to this function differently and inserting the value as a raw node instead of as a
* string node.
*/
class JsonObjectCallGen(call: RexCall) extends CallGenerator {
private def jsonUtils = className[SqlJsonUtils]
override def generate(
ctx: CodeGeneratorContext,
operands: Seq[GeneratedExpression],
returnType: LogicalType): GeneratedExpression = {
val nodeTerm = newName("node")
ctx.addReusableMember(s"${className[ObjectNode]} $nodeTerm = $jsonUtils.createObjectNode();")
val nullNodeTerm = newName("nullNode")
ctx.addReusableMember(s"${className[NullNode]} $nullNodeTerm = $nodeTerm.nullNode();")
val onNull = getOnNullBehavior(operands.head)
val populateNodeCode = operands.zipWithIndex.drop(1).grouped(2).map {
case Seq((keyExpr, _), (valueExpr, valueIdx)) =>
val valueTerm = createNodeTerm(ctx, valueExpr, call.operands.get(valueIdx))
onNull match {
case JsonOnNull.NULL =>
s"""
|if (${valueExpr.nullTerm}) {
| $nodeTerm.set(${keyExpr.resultTerm}.toString(), $nullNodeTerm);
|} else {
| $nodeTerm.set(${keyExpr.resultTerm}.toString(), $valueTerm);
|}
|""".stripMargin
case JsonOnNull.ABSENT =>
s"""
|if (!${valueExpr.nullTerm}) {
| $nodeTerm.set(${keyExpr.resultTerm}.toString(), $valueTerm);
|}
|""".stripMargin
}
}.mkString
val resultTerm = newName("result")
val resultTermType = primitiveTypeTermForType(returnType)
val resultCode = s"""
|${operands.map(_.code).mkString}
|
|$nodeTerm.removeAll();
|$populateNodeCode
|
|$resultTermType $resultTerm =
| $BINARY_STRING.fromString($jsonUtils.serializeJson($nodeTerm));
|""".stripMargin
GeneratedExpression(resultTerm, "false", resultCode, returnType)
}
}
| apache/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/codegen/calls/JsonObjectCallGen.scala | Scala | apache-2.0 | 3,877 |
package controllers.stack
import scalaz._
import Scalaz._
import scalaz.Validation._
import scala.concurrent.Future
import net.liftweb.json._
import net.liftweb.json.JsonParser._
import controllers.Constants._
import io.megam.auth.funnel._
import io.megam.auth.funnel.FunnelErrors._
import io.megam.auth.stack.AccountResult
import play.api.mvc._
import play.api.libs.iteratee.Enumerator
import models.base.Accounts
/**
* @author rajthilak
*
*/
object Results {
protected val JSONClazKey = models.Constants.JSON_CLAZ
protected val ResultsKey = "results"
def resultset(jsonclaz: String, result: String): String = {
val res = JsonParser.parse(result)
prettyRender(JObject(JField(JSONClazKey, JString(jsonclaz)) :: JField(ResultsKey, res) :: Nil))
}
}
| megamsys/verticegateway | app/controllers/stack/Results.scala | Scala | mit | 775 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package pagespecs.pages
import langswitch.{Language, Languages}
import langswitch.Languages.{English, Welsh}
import org.openqa.selenium.WebDriver
import org.scalatestplus.selenium.WebBrowser.pageTitle
import testsupport.RichMatchers._
class NotOnIaPage(baseUrl: BaseUrl)(implicit webDriver: WebDriver) extends BasePage(baseUrl) {
override def path: String = "/pay-what-you-owe-in-instalments/eligibility/ia/call-us"
override def assertPageIsDisplayed(implicit lang: Language): Unit = probing {
readPath() shouldBe path
readGlobalHeaderText().stripSpaces shouldBe Expected.GlobalHeaderText().stripSpaces
pageTitle shouldBe expectedTitle(expectedHeadingContent(lang), lang)
val expectedLines = Expected.MainText().stripSpaces().split("\\n")
assertContentMatchesExpectedLines(expectedLines)
}
def expectedHeadingContent(language: Language): String = language match {
case Languages.English => "Please call us"
case Languages.Welsh => "Ffoniwch ni"
}
object Expected {
object GlobalHeaderText {
def apply()(implicit language: Language): String = language match {
case English => globalHeaderTextEnglish
case Welsh => globalHeaderTextWelsh
}
private val globalHeaderTextEnglish = """Set up a Self Assessment payment plan"""
private val globalHeaderTextWelsh = """Trefnu cynllun talu"""
}
object MainText {
def apply()(implicit language: Language): String = language match {
case English => mainTextEnglish
case Welsh => mainTextWelsh
}
private val mainTextEnglish =
"""Please call us
|You are not eligible to set up a payment plan online.
|
|For further support you can contact the Business Support Service and speak to an adviser on 0300 200 3835.
|
|Before you call, make sure you have:
|information on any savings or investments you have
|your bank details
|We're likely to ask:
|what you've done to try to pay the bill
|if you can pay some of the bill now
|Our opening times are:
|Monday to Friday: 8am to 4pm"""
.stripMargin
private val mainTextWelsh =
"""Ffoniwch ni
|Nid ydych yn gymwys i drefnu cynllun talu ar-lein.
|
|Am gymorth pellach, gallwch gysylltu â’r Gwasanaeth Cymorth Busnes a siarad ag ymgynghorydd ar 0300 200 1900.
|
|Cyn i chi ffonio, sicrhewch fod gennych y canlynol:
|gwybodaeth am unrhyw gynilion neu fuddsoddiadau sydd gennych
|eich manylion banc
|Rydym yn debygol o ofyn:
|beth rydych wedi’i wneud i geisio talu’r bil
|a allwch dalu rhywfaint o’r bil nawr
|Ein horiau agor yw:
|Dydd Llun i ddydd Gwener: 08:30 – 16:00
""".stripMargin
}
}
}
| hmrc/self-service-time-to-pay-frontend | test/pagespecs/pages/NotOnIaPage.scala | Scala | apache-2.0 | 3,513 |
package edu.cmu.cs.oak.lib.array
import edu.cmu.cs.oak.core.OakInterpreter
import edu.cmu.cs.oak.value.OakValue
import edu.cmu.cs.oak.lib.InterpreterPlugin
import edu.cmu.cs.oak.lib.InterpreterPluginProvider
import edu.cmu.cs.oak.env.Environment
import java.nio.file.Path
import com.caucho.quercus.expr.Expr
import edu.cmu.cs.oak.value.ArrayValue
import edu.cmu.cs.oak.core.SymbolFlag
import edu.cmu.cs.oak.value.SymbolValue
import edu.cmu.cs.oak.env.OakHeap
import edu.cmu.cs.oak.value.IntValue
import com.caucho.quercus.Location
class ArraySlice extends InterpreterPlugin {
override def getName(): String = "array_slice"
override def visit(provider: InterpreterPluginProvider, args: List[OakValue], loc: Location, env: Environment): OakValue = {
val interpreter = provider.asInstanceOf[OakInterpreter]
/* Assert that the function has two arguments */
assert(args.size > 1)
val array = args.head
array match {
case av: ArrayValue => {
try {
val offset = args(1).asInstanceOf[IntValue].value
val length = if (args.size > 2) args(2).asInstanceOf[IntValue].value else av.array.size
val start = if (offset >= 0) offset else av.array.size + offset
val end = if (length >= 0) math.min(start + length, av.array.size) else math.max(av.array.size + length, 0)
val arraySliced = new ArrayValue()
av.array.slice(start.toInt, end.toInt).foreach {
case (k, ref) =>{
arraySliced.setRef(k, ref)
}
}
return arraySliced
} catch {
case e: Exception => SymbolValue("array_slice("+e+")", OakHeap.getIndex, SymbolFlag.DUMMY)
}
}
case _ => SymbolValue("array_slice()b", OakHeap.getIndex, SymbolFlag.DUMMY)
}
}
} | smba/oak | edu.cmu.cs.oak/src/main/scala/edu/cmu/cs/oak/lib/array/ArraySlice.scala | Scala | lgpl-3.0 | 1,883 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.system.kafka
import org.I0Itec.zkclient.ZkClient
import org.apache.samza.Partition
import org.apache.samza.SamzaException
import org.apache.samza.system.SystemAdmin
import org.apache.samza.system.SystemStreamMetadata
import org.apache.samza.system.SystemStreamPartition
import org.apache.samza.util.{ ClientUtilTopicMetadataStore, ExponentialSleepStrategy, Logging }
import kafka.api._
import kafka.consumer.SimpleConsumer
import kafka.common.{ TopicExistsException, TopicAndPartition }
import java.util.{ Properties, UUID }
import scala.collection.JavaConversions._
import org.apache.samza.system.SystemStreamMetadata.SystemStreamPartitionMetadata
import kafka.consumer.ConsumerConfig
import kafka.admin.AdminUtils
import org.apache.samza.util.KafkaUtil
object KafkaSystemAdmin extends Logging {
/**
* A helper method that takes oldest, newest, and upcoming offsets for each
* system stream partition, and creates a single map from stream name to
* SystemStreamMetadata.
*/
def assembleMetadata(oldestOffsets: Map[SystemStreamPartition, String], newestOffsets: Map[SystemStreamPartition, String], upcomingOffsets: Map[SystemStreamPartition, String]): Map[String, SystemStreamMetadata] = {
val allMetadata = (oldestOffsets.keySet ++ newestOffsets.keySet ++ upcomingOffsets.keySet)
.groupBy(_.getStream)
.map {
case (streamName, systemStreamPartitions) =>
val streamPartitionMetadata = systemStreamPartitions
.map(systemStreamPartition => {
val partitionMetadata = new SystemStreamPartitionMetadata(
// If the topic/partition is empty then oldest and newest will
// be stripped of their offsets, so default to null.
oldestOffsets.getOrElse(systemStreamPartition, null),
newestOffsets.getOrElse(systemStreamPartition, null),
upcomingOffsets(systemStreamPartition))
(systemStreamPartition.getPartition, partitionMetadata)
})
.toMap
val streamMetadata = new SystemStreamMetadata(streamName, streamPartitionMetadata)
(streamName, streamMetadata)
}
.toMap
info("Got metadata: %s" format allMetadata)
allMetadata
}
}
/**
* A helper class that is used to construct the changelog stream specific information
* @param replicationFactor The number of replicas for the changelog stream
* @param kafkaProps The kafka specific properties that need to be used for changelog stream creation
*/
case class ChangelogInfo(var replicationFactor: Int, var kafkaProps: Properties)
/**
* A Kafka-based implementation of SystemAdmin.
*/
class KafkaSystemAdmin(
/**
* The system name to use when creating SystemStreamPartitions to return in
* the getSystemStreamMetadata responser.
*/
systemName: String,
// TODO whenever Kafka decides to make the Set[Broker] class public, let's switch to Set[Broker] here.
/**
* List of brokers that are part of the Kafka system that we wish to
* interact with. The format is host1:port1,host2:port2.
*/
brokerListString: String,
/**
* A method that returns a ZkClient for the Kafka system. This is invoked
* when the system admin is attempting to create a coordinator stream.
*/
connectZk: () => ZkClient,
/**
* Custom properties to use when the system admin tries to create a new
* coordinator stream.
*/
coordinatorStreamProperties: Properties = new Properties,
/**
* The replication factor to use when the system admin creates a new
* coordinator stream.
*/
coordinatorStreamReplicationFactor: Int = 1,
/**
* The timeout to use for the simple consumer when fetching metadata from
* Kafka. Equivalent to Kafka's socket.timeout.ms configuration.
*/
timeout: Int = Int.MaxValue,
/**
* The buffer size to use for the simple consumer when fetching metadata
* from Kafka. Equivalent to Kafka's socket.receive.buffer.bytes
* configuration.
*/
bufferSize: Int = ConsumerConfig.SocketBufferSize,
/**
* The client ID to use for the simple consumer when fetching metadata from
* Kafka. Equivalent to Kafka's client.id configuration.
*/
clientId: String = UUID.randomUUID.toString,
/**
* Replication factor for the Changelog topic in kafka
* Kafka properties to be used during the Changelog topic creation
*/
topicMetaInformation: Map[String, ChangelogInfo] = Map[String, ChangelogInfo]()) extends SystemAdmin with Logging {
import KafkaSystemAdmin._
/**
* Returns the offset for the message after the specified offset for each
* SystemStreamPartition that was passed in.
*/
def getOffsetsAfter(offsets: java.util.Map[SystemStreamPartition, String]) = {
// This is safe to do with Kafka, even if a topic is key-deduped. If the
// offset doesn't exist on a compacted topic, Kafka will return the first
// message AFTER the offset that was specified in the fetch request.
offsets.mapValues(offset => (offset.toLong + 1).toString)
}
def getSystemStreamMetadata(streams: java.util.Set[String]) =
getSystemStreamMetadata(streams, new ExponentialSleepStrategy(initialDelayMs = 500))
/**
* Given a set of stream names (topics), fetch metadata from Kafka for each
* stream, and return a map from stream name to SystemStreamMetadata for
* each stream. This method will return null for oldest and newest offsets
* if a given SystemStreamPartition is empty. This method will block and
* retry indefinitely until it gets a successful response from Kafka.
*/
def getSystemStreamMetadata(streams: java.util.Set[String], retryBackoff: ExponentialSleepStrategy) = {
debug("Fetching system stream metadata for: %s" format streams)
retryBackoff.run(
loop => {
val metadata = TopicMetadataCache.getTopicMetadata(
streams.toSet,
systemName,
getTopicMetadata)
debug("Got metadata for streams: %s" format metadata)
val brokersToTopicPartitions = getTopicsAndPartitionsByBroker(metadata)
var partitions = Map[String, Set[Partition]]()
var oldestOffsets = Map[SystemStreamPartition, String]()
var newestOffsets = Map[SystemStreamPartition, String]()
var upcomingOffsets = Map[SystemStreamPartition, String]()
// Get oldest, newest, and upcoming offsets for each topic and partition.
for ((broker, topicsAndPartitions) <- brokersToTopicPartitions) {
debug("Fetching offsets for %s:%s: %s" format (broker.host, broker.port, topicsAndPartitions))
val consumer = new SimpleConsumer(broker.host, broker.port, timeout, bufferSize, clientId)
try {
oldestOffsets ++= getOffsets(consumer, topicsAndPartitions, OffsetRequest.EarliestTime)
upcomingOffsets ++= getOffsets(consumer, topicsAndPartitions, OffsetRequest.LatestTime)
// Kafka's "latest" offset is always last message in stream's offset +
// 1, so get newest message in stream by subtracting one. this is safe
// even for key-deduplicated streams, since the last message will
// never be deduplicated.
newestOffsets = upcomingOffsets.mapValues(offset => (offset.toLong - 1).toString)
// Keep only oldest/newest offsets where there is a message. Should
// return null offsets for empty streams.
upcomingOffsets.foreach {
case (topicAndPartition, offset) =>
if (offset.toLong <= 0) {
debug("Stripping oldest/newest offsets for %s because the topic appears empty." format topicAndPartition)
oldestOffsets -= topicAndPartition
newestOffsets -= topicAndPartition
}
}
} finally {
consumer.close
}
}
val result = assembleMetadata(oldestOffsets, newestOffsets, upcomingOffsets)
loop.done
result
},
(exception, loop) => {
warn("Unable to fetch last offsets for streams %s due to %s. Retrying." format (streams, exception))
debug("Exception detail:", exception)
}).getOrElse(throw new SamzaException("Failed to get system stream metadata"))
}
def createCoordinatorStream(streamName: String) {
info("Attempting to create coordinator stream %s." format streamName)
new ExponentialSleepStrategy(initialDelayMs = 500).run(
loop => {
val zkClient = connectZk()
try {
AdminUtils.createTopic(
zkClient,
streamName,
1, // Always one partition for coordinator stream.
coordinatorStreamReplicationFactor,
coordinatorStreamProperties)
} finally {
zkClient.close
}
info("Created coordinator stream %s." format streamName)
loop.done
},
(exception, loop) => {
exception match {
case e: TopicExistsException =>
info("Coordinator stream %s already exists." format streamName)
loop.done
case e: Exception =>
warn("Failed to create topic %s: %s. Retrying." format (streamName, e))
debug("Exception detail:", e)
}
})
}
/**
* Helper method to use topic metadata cache when fetching metadata, so we
* don't hammer Kafka more than we need to.
*/
protected def getTopicMetadata(topics: Set[String]) = {
new ClientUtilTopicMetadataStore(brokerListString, clientId, timeout)
.getTopicInfo(topics)
}
/**
* Break topic metadata topic/partitions into per-broker map so that we can
* execute only one offset request per broker.
*/
private def getTopicsAndPartitionsByBroker(metadata: Map[String, TopicMetadata]) = {
val brokersToTopicPartitions = metadata
.values
// Convert the topic metadata to a Seq[(Broker, TopicAndPartition)]
.flatMap(topicMetadata => {
KafkaUtil.maybeThrowException(topicMetadata.errorCode)
topicMetadata
.partitionsMetadata
// Convert Seq[PartitionMetadata] to Seq[(Broker, TopicAndPartition)]
.map(partitionMetadata => {
val topicAndPartition = new TopicAndPartition(topicMetadata.topic, partitionMetadata.partitionId)
val leader = partitionMetadata
.leader
.getOrElse(throw new SamzaException("Need leaders for all partitions when fetching offsets. No leader available for TopicAndPartition: %s" format topicAndPartition))
(leader, topicAndPartition)
})
})
// Convert to a Map[Broker, Seq[(Broker, TopicAndPartition)]]
.groupBy(_._1)
// Convert to a Map[Broker, Set[TopicAndPartition]]
.mapValues(_.map(_._2).toSet)
debug("Got topic partition data for brokers: %s" format brokersToTopicPartitions)
brokersToTopicPartitions
}
/**
* Use a SimpleConsumer to fetch either the earliest or latest offset from
* Kafka for each topic/partition in the topicsAndPartitions set. It is
* assumed that all topics/partitions supplied reside on the broker that the
* consumer is connected to.
*/
private def getOffsets(consumer: SimpleConsumer, topicsAndPartitions: Set[TopicAndPartition], earliestOrLatest: Long) = {
debug("Getting offsets for %s using earliest/latest value of %s." format (topicsAndPartitions, earliestOrLatest))
var offsets = Map[SystemStreamPartition, String]()
val partitionOffsetInfo = topicsAndPartitions
.map(topicAndPartition => (topicAndPartition, PartitionOffsetRequestInfo(earliestOrLatest, 1)))
.toMap
val brokerOffsets = consumer
.getOffsetsBefore(new OffsetRequest(partitionOffsetInfo))
.partitionErrorAndOffsets
.mapValues(partitionErrorAndOffset => {
KafkaUtil.maybeThrowException(partitionErrorAndOffset.error)
partitionErrorAndOffset.offsets.head
})
for ((topicAndPartition, offset) <- brokerOffsets) {
offsets += new SystemStreamPartition(systemName, topicAndPartition.topic, new Partition(topicAndPartition.partition)) -> offset.toString
}
debug("Got offsets for %s using earliest/latest value of %s: %s" format (topicsAndPartitions, earliestOrLatest, offsets))
offsets
}
private def createTopicInKafka(topicName: String, numKafkaChangelogPartitions: Int) {
val retryBackoff: ExponentialSleepStrategy = new ExponentialSleepStrategy
info("Attempting to create change log topic %s." format topicName)
info("Using partition count " + numKafkaChangelogPartitions + " for creating change log topic")
val topicMetaInfo = topicMetaInformation.getOrElse(topicName, throw new KafkaChangelogException("Unable to find topic information for topic " + topicName))
retryBackoff.run(
loop => {
val zkClient = connectZk()
try {
AdminUtils.createTopic(
zkClient,
topicName,
numKafkaChangelogPartitions,
topicMetaInfo.replicationFactor,
topicMetaInfo.kafkaProps)
} finally {
zkClient.close
}
info("Created changelog topic %s." format topicName)
loop.done
},
(exception, loop) => {
exception match {
case e: TopicExistsException =>
info("Changelog topic %s already exists." format topicName)
loop.done
case e: Exception =>
warn("Failed to create topic %s: %s. Retrying." format (topicName, e))
debug("Exception detail:", e)
}
})
}
private def validateTopicInKafka(topicName: String, numKafkaChangelogPartitions: Int) {
val retryBackoff: ExponentialSleepStrategy = new ExponentialSleepStrategy
info("Validating changelog topic %s." format topicName)
retryBackoff.run(
loop => {
val metadataStore = new ClientUtilTopicMetadataStore(brokerListString, clientId, timeout)
val topicMetadataMap = TopicMetadataCache.getTopicMetadata(Set(topicName), systemName, metadataStore.getTopicInfo)
val topicMetadata = topicMetadataMap(topicName)
KafkaUtil.maybeThrowException(topicMetadata.errorCode)
val partitionCount = topicMetadata.partitionsMetadata.length
if (partitionCount < numKafkaChangelogPartitions) {
throw new KafkaChangelogException("Changelog topic validation failed for topic %s because partition count %s did not match expected partition count of %d" format (topicName, topicMetadata.partitionsMetadata.length, numKafkaChangelogPartitions))
}
info("Successfully validated changelog topic %s." format topicName)
loop.done
},
(exception, loop) => {
exception match {
case e: KafkaChangelogException => throw e
case e: Exception =>
warn("While trying to validate topic %s: %s. Retrying." format (topicName, e))
debug("Exception detail:", e)
}
})
}
/**
* Exception to be thrown when the change log stream creation or validation has failed
*/
class KafkaChangelogException(s: String, t: Throwable) extends SamzaException(s, t) {
def this(s: String) = this(s, null)
}
override def createChangelogStream(topicName: String, numKafkaChangelogPartitions: Int) = {
createTopicInKafka(topicName, numKafkaChangelogPartitions)
validateChangelogStream(topicName, numKafkaChangelogPartitions)
}
/**
* Validates change log stream in Kafka. Should not be called before createChangelogStream(),
* since ClientUtils.fetchTopicMetadata(), used by different Kafka clients, is not read-only and
* will auto-create a new topic.
*/
override def validateChangelogStream(topicName: String, numKafkaChangelogPartitions: Int) = {
validateTopicInKafka(topicName, numKafkaChangelogPartitions)
}
/**
* Compare the two offsets. Returns x where x < 0 if offset1 < offset2;
* x == 0 if offset1 == offset2; x > 0 if offset1 > offset2.
*
* Currently it's used in the context of the broadcast streams to detect
* the mismatch between two streams when consuming the broadcast streams.
*/
override def offsetComparator(offset1: String, offset2: String) = {
offset1.toLong compare offset2.toLong
}
}
| savaki/samza | samza-kafka/src/main/scala/org/apache/samza/system/kafka/KafkaSystemAdmin.scala | Scala | apache-2.0 | 17,168 |
/*
* Copyright (c) 2018. Fengguo Wei and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License v2.0
* which accompanies this distribution, and is available at
* https://www.apache.org/licenses/LICENSE-2.0
*
* Detailed contributors are listed in the CONTRIBUTOR.md
*/
package org.argus.jnsaf.submitter
import java.io.File
import org.argus.amandroid.core.ApkGlobal
import org.argus.amandroid.plugin.TaintAnalysisApproach
import org.argus.jawa.core.io.{MsgLevel, PrintReporter}
import org.argus.jawa.core.util._
import org.argus.jawa.flow.taint_result.TaintResult
import org.argus.jnsaf.client.JNSafClient
object ApkSubmitter {
def apply(sourcePath: String, address: String, port: Int, approach: TaintAnalysisApproach.Value): IMap[String, Option[TaintResult]] = {
val analysisResult: MMap[String, Option[TaintResult]] = mmapEmpty
val fileOrDir = new File(sourcePath)
fileOrDir match {
case dir if dir.isDirectory =>
submitApkInDir(FileUtil.toUri(dir), address, port, approach, analysisResult)
case file =>
if(ApkGlobal.isValidApk(FileUtil.toUri(file)))
submitApk(FileUtil.toUri(file), address, port, approach, analysisResult)
else println(file + " is not decompilable.")
}
analysisResult.toMap
}
def submitApkInDir(dirUri: FileResourceUri, address: String, port: Int, approach: TaintAnalysisApproach.Value, result: MMap[String, Option[TaintResult]]): Unit = {
FileUtil.listFiles(dirUri, ".apk", recursive = true) foreach { apkUri =>
submitApk(apkUri, address, port, approach, result)
}
}
def submitApk(apkUri: FileResourceUri, address: String, port: Int, approach: TaintAnalysisApproach.Value, result: MMap[String, Option[TaintResult]]): Unit = {
val reporter = new PrintReporter(MsgLevel.INFO)
val client = new JNSafClient(address, port, reporter)
val apk = FileUtil.toFile(apkUri)
result(apk.getName) = client.taintAnalysis(apkUri, approach)
}
}
| arguslab/Argus-SAF | jnsaf/src/main/scala/org/argus/jnsaf/submitter/ApkSubmitter.scala | Scala | apache-2.0 | 2,045 |
package org.jetbrains.plugins.scala
package lang
package completion
import java.io.File
import com.intellij.codeInsight.completion.{CodeCompletionHandlerBase, CompletionType}
import com.intellij.codeInsight.lookup.impl.LookupImpl
import com.intellij.codeInsight.lookup.{LookupElement, LookupElementPresentation, LookupManager}
import com.intellij.openapi.fileEditor.{FileEditorManager, OpenFileDescriptor}
import com.intellij.openapi.util.io.FileUtil
import com.intellij.openapi.util.text.StringUtil
import com.intellij.openapi.vfs.{CharsetToolkit, LocalFileSystem}
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.junit.Assert._
import scala.annotation.nowarn
/**
* User: Alexander Podkhalyuzin
* Date: 23.09.2009
*/
@nowarn("msg=ScalaLightPlatformCodeInsightTestCaseAdapter")
abstract class CompletionTestBase extends base.ScalaLightPlatformCodeInsightTestCaseAdapter {
// Must be lazy so it can be overriden without early initializers
protected lazy val caretMarker = "/*caret*/"
protected lazy val extension: String = "scala"
def folderPath: String = baseRootPath + "completion/"
/**
* Fetches last PSI element, checks if it is comment or not
* If it is some kind of comment, treat it like an expected result string
* If it's not, fail and return empty string
*
* @return Expected result string
*/
protected final def getExpectedResult: String = {
import lang.lexer.ScalaTokenTypes._
val scalaFile = getFileAdapter.asInstanceOf[ScalaFile]
val lastPsi = scalaFile.findElementAt(scalaFile.getText.length - 1)
val trimRight = lastPsi.getNode.getElementType match {
case `tLINE_COMMENT` => 0
case `tBLOCK_COMMENT` |
`tDOC_COMMENT` => 2
case _ =>
throw new AssertionError("Test result must be in last comment statement.")
}
val text = lastPsi.getText
text.substring(2, text.length - trimRight).trim
}
protected def checkResult(variants: Array[String], expected: String): Unit = {
val actual = variants.sortWith(_ < _)
.mkString("\\n").trim
assertEquals(expected, actual)
}
protected def doTest(): Unit = {
val fileName = getTestName(false) + s".$extension"
val filePath = s"$folderPath$fileName".replace(File.separatorChar, '/')
val file = LocalFileSystem.getInstance.findFileByPath(filePath)
assertNotNull(s"file '$filePath' not found", file)
val fileText = StringUtil.convertLineSeparators(
FileUtil.loadFile(
new File(file.getCanonicalPath),
CharsetToolkit.UTF8
)
)
configureFromFileTextAdapter(fileName, fileText)
val offset = fileText.indexOf(caretMarker) match {
case -1 => throw new AssertionError(s"Not specified end marker in test case. Use $caretMarker in scala file for this.")
case index => index
}
val project = getProjectAdapter
val editor = FileEditorManager.getInstance(project)
.openTextEditor(new OpenFileDescriptor(project, getVFileAdapter, offset), false)
val completionType = if (fileName.startsWith("Smart")) CompletionType.SMART else CompletionType.BASIC
new CodeCompletionHandlerBase(
completionType,
false,
false,
true
).invokeCompletion(project, editor)
val items = LookupManager.getActiveLookup(editor) match {
case lookup: LookupImpl =>
val items = lookup.getItems.toArray(LookupElement.EMPTY_ARRAY)
// TODO: test completion items presentations instead of just getLookupString
// something like:
// val presentations: Array[LookupElementPresentation] = items.map { item =>
// val presentation = new LookupElementPresentation
// item.renderElement(presentation)
// presentation
// }
// val itemTexts = presentations.map(_.getItemText)
val lookups = items.map(_.getLookupString)
lookups
case _ => Array.empty[String]
}
val expected = getExpectedResult
checkResult(items, expected)
}
} | JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/lang/completion/CompletionTestBase.scala | Scala | apache-2.0 | 4,023 |
package net.snowflake.spark.snowflake
import java.sql.Connection
import net.snowflake.spark.snowflake.Parameters.MergedParameters
import net.snowflake.spark.snowflake.io.{CloudStorage, SupportedFormat}
import net.snowflake.spark.snowflake.io.SupportedFormat.SupportedFormat
import net.snowflake.spark.snowflake.DefaultJDBCWrapper.DataBaseOperations
import org.apache.spark.sql.types.StructType
import org.slf4j.LoggerFactory
import scala.collection.mutable
import scala.concurrent.{Await, Future}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
package object streaming {
private val LOGGER = LoggerFactory.getLogger(this.getClass.getName)
private val SLEEP_TIME = 5000 // 5 seconds
private val TIME_OUT = 5 // 5 minutes
private val pipeList: mutable.HashMap[String, SnowflakeIngestService] =
new mutable.HashMap()
private[streaming] def openIngestionService(
param: MergedParameters,
pipeName: String,
format: SupportedFormat,
schema: StructType,
storage: CloudStorage,
conn: Connection
): SnowflakeIngestService = {
LOGGER.debug(s"create new ingestion service, pipe name: $pipeName")
var pipeDropped = false
val checkPrevious: Future[Boolean] = Future {
while (pipeList.contains(pipeName)) {
LOGGER.debug(s"waiting previous pipe dropped")
Thread.sleep(SLEEP_TIME)
}
LOGGER.debug(s"previous pipe dropped")
pipeDropped = true
pipeDropped
}
Await.result(checkPrevious, TIME_OUT minutes)
if (pipeDropped) {
conn.createTable(param.table.get.name, schema, param, overwrite = false, temporary = false)
val copy = ConstantString(copySql(param, conn, format, schema)) !
if (verifyPipe(conn, pipeName, copy.toString)) {
LOGGER.info(s"reuse pipe: $pipeName")
} else conn.createPipe(pipeName, copy, overwrite = true)
val ingestion = new SnowflakeIngestService(param, pipeName, storage, conn)
pipeList.put(pipeName, ingestion)
ingestion
} else {
LOGGER.error(s"waiting pipe dropped time out")
throw new IllegalStateException(
s"Waiting pipe dropped time out, pipe name: $pipeName"
)
}
}
private[streaming] def closeIngestionService(pipeName: String): Unit = {
LOGGER.debug(s"closing ingestion service, pipe name: $pipeName")
if (pipeList.contains(pipeName)) {
pipeList(pipeName).close()
pipeList.remove(pipeName)
LOGGER.debug(s"ingestion service closed, pipe name: $pipeName")
} else {
LOGGER.error(s"ingestion service not found, pipe name: $pipeName")
}
}
private[streaming] def closeAllIngestionService(): Unit = {
LOGGER.debug(s"closing ingestion service")
pipeList.par.foreach(_._2.close())
LOGGER.debug(s"all ingestion service closed")
}
/**
* Generate the COPY SQL command for creating pipe only
*/
private def copySql(param: MergedParameters,
conn: Connection,
format: SupportedFormat,
schema: StructType
): String = {
val tableName = param.table.get
val stageName = param.streamingStage.get
val tableSchema =
DefaultJDBCWrapper.resolveTable(conn, tableName.toString, param)
def getMappingToString(list: Option[List[(Int, String)]]): String =
format match {
case SupportedFormat.JSON =>
val schema =
DefaultJDBCWrapper.resolveTable(conn, tableName.name, param)
if (list.isEmpty || list.get.isEmpty) {
s"(${schema.fields.map(x => Utils.quotedNameIgnoreCase(x.name)).mkString(",")})"
} else {
s"(${list.get.map(x => Utils.quotedNameIgnoreCase(tableSchema(x._1).name)).mkString(", ")})"
}
case SupportedFormat.CSV =>
if (list.isEmpty || list.get.isEmpty) {
""
} else {
s"(${list.get.map(x => Utils.quotedNameIgnoreCase(x._2)).mkString(", ")})"
}
}
def getMappingFromString(list: Option[List[(Int, String)]],
from: String): String =
format match {
case SupportedFormat.JSON =>
if (list.isEmpty || list.get.isEmpty) {
val names =
tableSchema.fields
.map(
x =>
"parse_json($1):".concat(Utils.quotedNameIgnoreCase(x.name))
)
.mkString(",")
s"from (select $names $from tmp)"
} else {
s"from (select ${list.get.map(x => "parse_json($1):".concat(
Utils.quotedNameIgnoreCase(x._2))).mkString(", ")} $from tmp)"
}
case SupportedFormat.CSV =>
if (list.isEmpty || list.get.isEmpty) {
from
} else {
s"from (select ${list.get
.map(x => "tmp.$".concat(Utils.quotedNameIgnoreCase((x._1 + 1).toString)))
.mkString(", ")} $from tmp)"
}
}
val fromString = s"FROM @$stageName"
if (param.columnMap.isEmpty && param.columnMapping == "name") {
param.setColumnMap(Option(schema), Option(tableSchema))
}
val mappingList: Option[List[(Int, String)]] = param.columnMap match {
case Some(map) =>
Some(map.toList.map {
case (key, value) =>
try {
(tableSchema.fieldIndex(value), key)
} catch {
case e: Exception =>
LOGGER.error("Error occurred while column mapping: " + e)
throw e
}
})
case None => None
}
val mappingToString = getMappingToString(mappingList)
val mappingFromString = getMappingFromString(mappingList, fromString)
val formatString =
format match {
case SupportedFormat.CSV =>
s"""
|FILE_FORMAT = (
| TYPE=CSV
| FIELD_DELIMITER='|'
| NULL_IF=()
| FIELD_OPTIONALLY_ENCLOSED_BY='"'
| TIMESTAMP_FORMAT='TZHTZM YYYY-MM-DD HH24:MI:SS.FF3'
| )
""".stripMargin
case SupportedFormat.JSON =>
s"""
|FILE_FORMAT = (
| TYPE = JSON
|)
""".stripMargin
}
s"""
|COPY INTO $tableName $mappingToString
|$mappingFromString
|$formatString
""".stripMargin.trim
}
private[streaming] def verifyPipe(conn: Connection,
pipeName: String,
copyStatement: String): Boolean =
conn.pipeDefinition(pipeName) match {
case Some(str) => str.trim.equals(copyStatement.trim)
case _ => false
}
}
| snowflakedb/spark-snowflakedb | src/main/scala/net/snowflake/spark/snowflake/streaming/package.scala | Scala | apache-2.0 | 6,787 |
package chrome.sockets.tcp
package object bindings {
type SocketId = Int
}
| lucidd/scala-js-chrome | bindings/src/main/scala/chrome/sockets/tcp/bindings/package.scala | Scala | mit | 80 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.jdbc.connection
class DB2ConnectionProviderSuite extends ConnectionProviderSuiteBase {
test("setAuthenticationConfigIfNeeded must set authentication if not set") {
val driver = registerDriver(DB2ConnectionProvider.driverClass)
val provider = new DB2ConnectionProvider(driver, options("jdbc:db2://localhost/db2"))
testSecureConnectionProvider(provider)
}
}
| dbtsai/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/jdbc/connection/DB2ConnectionProviderSuite.scala | Scala | apache-2.0 | 1,227 |
/*
* Copyright (c) 2015 Andreas Wolf
*
* See te LICENSE file in the project root for further copyright information.
*/
package info.andreaswolf.roadhopper.road
object Turn {
def splitSegmentBeforeTurn(turn: Turn): Seq[RoadSegment] = {
splitSegmentBeforeTurn(turn.from, turn.to)
}
def splitSegmentBeforeTurn(from: RoadSegment, to: RoadSegment): Seq[RoadSegment] = {
val beforeTurnLength = Math.min(from.length, 5.0)
// TODO move the speed limit to the velocity estimator, just include a turn-degrees indicator in the turn segment
// instead. The estimator should then find an appropriate target velocity based on the driver preferences
// The target velocity should also not be fixed, but instead depend on the allowed velocity on the segment before;
// otherwise the slowdown might get too harsh e.g. on highway exits in Germany
val speedLimit = Math.abs(from.calculateNecessaryTurn(to)).toDegrees match {
case x if x > 110 => 7 / 3.6
case x if x > 80 => 10 / 3.6
case x if x > 60 => 15 / 3.6
case x => from.speedLimit
}
if (from.length - beforeTurnLength < 1.0) {
// if we would get a very short segment (< 1m) and the pre-turn segment, we skip the first segment
return Seq(new PreTurnSegment(from.start, from.end, speedLimit))
}
// get the segment until 5 meters before the turn point
val beforeTurn = RoadSegment.fromExisting(from, beforeTurnLength)
// get the segment right before the turn point
val turnBase = new PreTurnSegment(beforeTurn.end, from.end, speedLimit)
// filter out a zero-length segment that might occur if the from-segment was shorter than the slowdown distance
// of 5 meters
Seq(beforeTurn, turnBase).filter(_.length > 0.0)
}
}
class Turn(val from: RoadSegment, val to: RoadSegment) {
}
| andreaswolf/roadhopper | src/main/scala/info/andreaswolf/roadhopper/road/Turn.scala | Scala | mit | 1,780 |
/*
Copyright (c) 2009, 2010 Hanno Braun <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.hannobraun.sd.collision
import com.hannobraun.sd.core.StepPhase
import com.hannobraun.sd.collision.phase.BroadPhase
import com.hannobraun.sd.collision.phase.NarrowPhase
import com.hannobraun.sd.collision.shape.Contact
import com.hannobraun.sd.collision.shape.Shape
class CollisionDetector( broadPhase: BroadPhase, narrowPhase: NarrowPhase ) extends StepPhase[ Shape, Contact ] {
def execute( dt: Double, shapes: Iterable[ Shape ], constraints: Iterable[ Contact ] ) = {
// Broad phase. Checks all shapes in a performant way and returns a list of possible collisions.
val possiblyCollidingPairs = broadPhase( shapes )
// Narrow Phase. Performs a detailed and possibly performance-heavy collision check for a pair of
// shapes.
val possibleContacts = for ( possibleContact <- possiblyCollidingPairs ) yield {
narrowPhase( possibleContact._1, possibleContact._2 )
}
// Compile a list of all actual contacts that we can return.
val updatedConstraints = for( possibleContact <- possibleContacts; contact <- possibleContact ) yield {
contact
}
( shapes, updatedConstraints )
}
}
| hannobraun/ScalableDynamics | src/main/scala/com/hannobraun/sd/collision/CollisionDetector.scala | Scala | apache-2.0 | 1,723 |
package org.riedelcastro.frontlets.programs
import org.scalatest.FunSpec
import org.scalatest.matchers.MustMatchers
import org.riedelcastro.frontlets.Frontlet
/**
* @author Sebastian Riedel
*/
class CompilerSpec extends FunSpec with MustMatchers{
import TermImplicits._
describe("A compiler") {
it ("should support integer addition") {
val i = SimpleVar("i",0)
val x = SimpleVar("x",0)
val exe = Compiler.compile(Program(Seq(x := i + i)))
exe.execute(State(Map(i -> 1)))(x) must be (2)
}
it ("should support frontlet slot getter for simple slots") {
class Person extends Frontlet {
val age = IntSlot("age")
}
val x = SimpleVar("x",0)
val exe = Compiler.compile(Program(Seq(x := Const(new Person().age(20))(_.age))))
exe.execute(State.empty)(x) must be (20)
}
it ("should support frontlet slot setter for simple slots") {
class Person extends Frontlet {
val age = IntSlot("age")
}
val x = FrontletVar("x",() => new Person)
val exe = Compiler.compile(Program(Seq(x := Const(new Person())(_.age,20))))
exe.execute(State.empty)(x) must be (new Person().age(20))
}
}
}
| riedelcastro/frontlets | src/test/scala/org/riedelcastro/frontlets/programs/CompilerSpec.scala | Scala | apache-2.0 | 1,205 |
package chrome.app.window.bindings
import scala.scalajs.js
object CreateWindowOptions {
def apply(id: js.UndefOr[String] = js.undefined,
innerBounds: js.UndefOr[BoundsSpecification] = js.undefined,
outerBounds: js.UndefOr[BoundsSpecification] = js.undefined,
alphaEnabled: js.UndefOr[Boolean] = js.undefined,
state: js.UndefOr[String] = js.undefined,
hidden: js.UndefOr[Boolean] = js.undefined,
resizable: js.UndefOr[Boolean] = js.undefined,
alwaysOnTop: js.UndefOr[Boolean] = js.undefined,
focused: js.UndefOr[Boolean] = js.undefined,
visibleOnAllWorkspaces: js.UndefOr[Boolean] = js.undefined
): CreateWindowOptions = {
js.Dynamic.literal(
id = id,
alphaEnabled = alphaEnabled,
state = state,
hidden = hidden,
resizable = resizable,
alwaysOnTop = alwaysOnTop,
focused = focused,
visibleOnAllWorkspaces = visibleOnAllWorkspaces
).asInstanceOf[CreateWindowOptions]
}
}
class CreateWindowOptions extends js.Object {
val alphaEnabled: js.UndefOr[Boolean] = js.native
val state: js.UndefOr[String] = js.native
val hidden: js.UndefOr[Boolean] = js.native
val resizable: js.UndefOr[Boolean] = js.native
val alwaysOnTop: js.UndefOr[Boolean] = js.native
val focused: js.UndefOr[Boolean] = js.native
val visibleOnAllWorkspaces: js.UndefOr[Boolean] = js.native
var id: js.UndefOr[String] = js.native
var innerBounds: js.UndefOr[BoundsSpecification] = js.native
var outerBounds: js.UndefOr[BoundsSpecification] = js.native
}
| amsayk/scala-js-chrome | bindings/src/main/scala/chrome/app/window/bindings/CreateWindowOptions.scala | Scala | mit | 1,624 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.visor.commands.deploy
import org.apache.ignite.visor.visor
import org.scalatest._
import org.apache.ignite.visor.commands.deploy.VisorDeployCommand._
/**
* Unit test for 'deploy' command.
*/
class VisorDeployCommandSpec extends FlatSpec with Matchers {
behavior of "A 'deploy' visor command"
it should "copy folder" in {
visor.deploy("-h=uname:passwd@localhost -s=/home/uname/test -d=dir")
}
}
| abhishek-ch/incubator-ignite | modules/visor-console/src/test/scala/org/apache/ignite/visor/commands/deploy/VisorDeployCommandSpec.scala | Scala | apache-2.0 | 1,250 |
package com.baeldung.scala
/**
* Some utility methods.
*
* @author Chandra Prakash
*
*/
object Utils {
def average(x : Double, y : Double): Double = (x + y) / 2
def randomLessThan(d : Double): Double = {
var random = 0d
do {
random = Math.random()
} while (random >= d)
random
}
def power(x : Int, y : Int) : Int = {
def powNested(i : Int, accumulator : Int) : Int = {
if (i <= 0) accumulator
else powNested(i - 1, x * accumulator)
}
powNested(y, 1)
}
def fibonacci(n : Int) : Int = n match {
case 0 | 1 => 1
case x if x > 1 =>
fibonacci(x - 1) + fibonacci(x - 2)
}
} | Niky4000/UsefulUtils | projects/tutorials-master/tutorials-master/core-scala/src/main/scala/com/baeldung/scala/Utils.scala | Scala | gpl-3.0 | 650 |
package com.mehmetakiftutuncu.muezzinapi.data
import java.io.FileInputStream
import com.google.auth.oauth2.GoogleCredentials
import com.google.firebase.database.{DataSnapshot, DatabaseReference, FirebaseDatabase}
import com.google.firebase.{FirebaseApp, FirebaseOptions}
import com.google.inject.{ImplementedBy, Singleton}
import com.mehmetakiftutuncu.muezzinapi.utilities.{AbstractConf, Log, Logging}
import javax.inject.Inject
import play.api.inject.ApplicationLifecycle
import scala.concurrent.Future
@ImplementedBy(classOf[FirebaseRealtimeDatabase])
trait AbstractFirebaseRealtimeDatabase {
val root: DatabaseReference
}
@Singleton
class FirebaseRealtimeDatabase @Inject()(ApplicationLifecycle: ApplicationLifecycle,
Conf: AbstractConf) extends AbstractFirebaseRealtimeDatabase with Logging {
private val credentialsFile: String = Conf.getString("muezzinApi.firebaseRealtimeDatabase.credentialsFile", "")
private val databaseUrl: String = Conf.getString("muezzinApi.firebaseRealtimeDatabase.databaseUrl", "")
private val firebaseOptions: FirebaseOptions = new FirebaseOptions.Builder()
.setCredentials(GoogleCredentials.fromStream(new FileInputStream(credentialsFile)))
.setDatabaseUrl(databaseUrl)
.build()
Log.warn(s"""Connecting to Firebase Realtime Database at "$databaseUrl" with credentials file "$credentialsFile"...""")
private val firebaseApp: FirebaseApp = FirebaseApp.initializeApp(firebaseOptions)
private val firebaseDatabase: FirebaseDatabase = FirebaseDatabase.getInstance(firebaseApp)
override val root: DatabaseReference = firebaseDatabase.getReference
ApplicationLifecycle.addStopHook {
() =>
Log.warn("Shutting down Firebase Realtime Database connection...")
Future.successful(firebaseDatabase.goOffline())
}
}
object FirebaseRealtimeDatabase {
implicit class DatabaseReferenceExtensions(databaseReference: DatabaseReference) {
def / (path: Int): DatabaseReference = databaseReference.child(path.toString)
def / (path: String): DatabaseReference = databaseReference.child(path)
def cacheKey: String = databaseReference.getPath.toString
}
implicit class DataSnapshotExtensions(dataSnapshot: DataSnapshot) {
def / (path: String): DataSnapshot = dataSnapshot.child(path)
}
}
| mehmetakiftutuncu/MuezzinAPI | app/com/mehmetakiftutuncu/muezzinapi/data/FirebaseRealtimeDatabase.scala | Scala | mit | 2,339 |
package com.twitter.finagle.stats
/**
* Provides a `StatsReceiver` that prints nice summaries. Handy for
* short-lived programs where you want summaries.
*/
import com.google.common.util.concurrent.AtomicLongMap
import com.google.common.cache.{CacheBuilder, CacheLoader}
import scala.collection.JavaConverters._
import scala.collection.mutable.{ArrayBuffer, SynchronizedBuffer}
class SummarizingStatsReceiver extends StatsReceiverWithCumulativeGauges {
val repr = this
private[this] val counters = AtomicLongMap.create[Seq[String]]()
// Just keep all the samples.
private[this] val stats = CacheBuilder.newBuilder()
.build(new CacheLoader[Seq[String], ArrayBuffer[Float]] {
def load(k: Seq[String]) = new ArrayBuffer[Float]
})
// synchronized on `this`
private[this] var _gauges = Map[Seq[String], () => Float]()
def gauges: Map[Seq[String], () => Float] = synchronized { _gauges }
def counter(name: String*) = new Counter {
def incr(delta: Int) { counters.addAndGet(name, delta) }
}
def stat(name: String*) = new Stat {
def add(value: Float) = SummarizingStatsReceiver.this.synchronized {
stats.get(name) += value
}
}
// Ignoring gauges for now, but we may consider sampling them.
protected[this] def registerGauge(name: Seq[String], f: => Float) = synchronized {
_gauges += (name -> (() => f))
}
protected[this] def deregisterGauge(name: Seq[String]) = synchronized {
_gauges -= name
}
/* Summary */
/* ======= */
private[this] def variableName(name: Seq[String]) = name mkString "/"
def summary(): String = summary(false)
def summary(includeTails: Boolean): String = synchronized {
val counterValues = counters.asMap.asScala
val gaugeValues = gauges.toSeq map {
case (names, gauge) => variableName(names) -> gauge().toString
}
val statValues = stats.asMap.asScala collect {
case (k, buf) if buf.nonEmpty =>
val n = buf.size
val values = new Array[Float](n)
buf.copyToArray(values, 0, n)
val xs = values.sorted
(k, xs)
}
val counterLines = (counterValues map { case (k, v) => (variableName(k), v.toString) }).toSeq
val statLines = (statValues map { case (k, xs) =>
val n = xs.size
def idx(ptile: Double) = math.floor(ptile*n).toInt
(variableName(k), "n=%d min=%.1f med=%.1f p90=%.1f p95=%.1f p99=%.1f p999=%.1f p9999=%.1f max=%.1f".format(
n, xs(0), xs(n/2), xs(idx(.9D)), xs(idx(.95D)), xs(idx(.99D)), xs(idx(.999D)), xs(idx(.9999D)), xs(n-1)))
}).toSeq
lazy val tailValues = (statValues map { case (k, xs) =>
val n = xs.size
def slice(ptile: Double) = {
val end = math.floor(ptile*n).toInt
val start = math.ceil(end-((1.0-ptile)*n)).toInt
for (i <- start to end) yield xs(i)
}
(variableName(k), "p999=%s, p9999=%s".format(slice(.999D), slice(.9999D)))
}).toSeq
val sortedCounters = counterLines.sortBy { case (k, _) => k }
val sortedGauges = gaugeValues.sortBy { case (k, _) => k }
val sortedStats = statLines.sortBy { case (k, _) => k }
lazy val sortedTails = tailValues.sortBy { case (k, _) => k }
val fmt = Function.tupled { (k: String, v: String) => "%-30s %s".format(k, v) }
val fmtCounters = sortedCounters.map(fmt)
val fmtGauges = gaugeValues.map(fmt)
val fmtStats = sortedStats.map(fmt)
lazy val fmtTails = sortedTails.map(fmt)
"# counters\n" + fmtCounters.mkString("\n") +
"\n# gauges\n" + fmtGauges.sorted.mkString("\n") +
"\n# stats\n" + fmtStats.mkString("\n") +
(if (includeTails) "\n# stats-tails\n" + (fmtTails mkString "\n") else "")
}
def print() = println(summary(false))
}
| kristofa/finagle | finagle-core/src/main/scala/com/twitter/finagle/stats/SummarizingStatsReceiver.scala | Scala | apache-2.0 | 3,753 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rpc
/**
* A callback that [[RpcEndpoint]] can use to send back a message or failure. It's thread-safe
* and can be called in any thread.
*/
private[spark] trait RpcCallContext {
/**
* Reply a message to the sender. If the sender is [[RpcEndpoint]], its [[RpcEndpoint.receive]]
* will be called.
*/
def reply(response: Any): Unit
/**
* Report a failure to the sender.
*/
def sendFailure(e: Throwable): Unit
/**
* The sender of this message.
*/
def senderAddress: RpcAddress
}
| wangyixiaohuihui/spark2-annotation | core/src/main/scala/org/apache/spark/rpc/RpcCallContext.scala | Scala | apache-2.0 | 1,383 |
package com.typesafe.slick.testkit.tests
import org.junit.Assert._
import com.typesafe.slick.testkit.util.{TestkitTest, TestDB}
class ZipTest(val tdb: TestDB) extends TestkitTest {
import tdb.profile.simple._
object Categories extends Table[(Int, String)]("categories") {
def id = column[Int]("id")
def name = column[String]("name")
def * = id ~ name
}
object Posts extends Table[(Int, String, Int)]("posts") {
def id = column[Int]("id", O.PrimaryKey, O.AutoInc)
def title = column[String]("title")
def category = column[Int]("category")
def * = id ~ title ~ category
}
def testZip = ifCap(bcap.zip) {
(Categories.ddl ++ Posts.ddl).create
Categories insertAll (
(1, "Scala"),
(3, "Windows"),
(2, "ScalaQuery"),
(4, "Software")
)
Posts.title ~ Posts.category insertAll (
("Test Post", -1),
("Formal Language Processing in Scala, Part 5", 1),
("Efficient Parameterized Queries in ScalaQuery", 2),
("Removing Libraries and HomeGroup icons from the Windows 7 desktop", 3),
("A ScalaQuery Update", 2)
)
val q1 = for {
(c, i) <- Categories.sortBy(_.id).zipWithIndex
} yield (c.id, i)
println("ZipWithIndex: "+q1.selectStatement)
q1.foreach(x => println(" "+x))
assertEquals(List((1,0), (2,1), (3,2), (4,3)), q1.list)
val q2 = for {
(c, p) <- Categories.sortBy(_.id) zip Posts.sortBy(_.category)
} yield (c.id, p.category)
println("Zip: "+q2.selectStatement)
q2.foreach(x => println(" "+x))
assertEquals(List((1,-1), (2,1), (3,2), (4,2)), q2.list)
}
}
| zefonseca/slick-1.0.0-scala.2.11.1 | slick-testkit/src/main/scala/com/typesafe/slick/testkit/tests/ZipTest.scala | Scala | bsd-2-clause | 1,627 |
package com.twitter.finagle.http.filter
import com.twitter.finagle.Service
import com.twitter.finagle.http.{Request, Response}
import com.twitter.finagle.stats.MetricBuilder.{CounterType, HistogramType}
import com.twitter.finagle.stats.{InMemoryStatsReceiver, MetricBuilder}
import com.twitter.util.{Await, Duration, Future, Stopwatch, Time}
import org.mockito.Mockito.{spy, verify}
import org.scalatest.funsuite.AnyFunSuite
class StatsFilterTest extends AnyFunSuite {
val service = new Service[Request, Response] {
def apply(request: Request): Future[Response] = {
val response = Response(request)
response.statusCode = 404
response.write("hello")
Future.value(response)
}
}
test("increment stats") {
val receiver = spy(new InMemoryStatsReceiver)
val filter = new StatsFilter(receiver, Stopwatch.timeMillis) andThen service
Time.withCurrentTimeFrozen { _ => Await.result(filter(Request()), Duration.fromSeconds(5)) }
assert(receiver.counters(Seq("status", "404")) == 1)
assert(receiver.counters(Seq("status", "4XX")) == 1)
assert(receiver.stats(Seq("time", "404")) == Seq(0.0))
assert(receiver.stats(Seq("time", "4XX")) == Seq(0.0))
}
test("status and time counters and stats are memoized") {
val receiver = spy(new InMemoryStatsReceiver)
val filter = new StatsFilter(receiver, Stopwatch.timeMillis) andThen service
Time.withCurrentTimeFrozen { _ =>
Await.result(filter(Request()), Duration.fromSeconds(5))
Await.result(filter(Request()), Duration.fromSeconds(5))
}
// Verify that the counters and stats were only created once
verify(receiver).counter(
MetricBuilder(
name = Seq("status", "404"),
metricType = CounterType,
statsReceiver = receiver))
verify(receiver).counter(
MetricBuilder(
name = Seq("status", "4XX"),
metricType = CounterType,
statsReceiver = receiver))
verify(receiver).stat(
MetricBuilder(
name = Seq("time", "404"),
metricType = HistogramType,
statsReceiver = receiver))
verify(receiver).stat(
MetricBuilder(
name = Seq("time", "4XX"),
metricType = HistogramType,
statsReceiver = receiver))
}
}
| twitter/finagle | finagle-http/src/test/scala/com/twitter/finagle/http/filter/StatsFilterTest.scala | Scala | apache-2.0 | 2,269 |
package knot.net.http
import knot.data.ByteNode
import knot.net.http.models.ParserStates._
import knot.net.http.models._
import knot.net.http.models.headers.{`Content-Length`, `Transfer-Encoding`}
import scala.annotation.tailrec
import scala.collection.mutable.ListBuffer
class HttpRequestParser(headerParser: HttpHeaderParser) {
import knot.core.stream.dsl.Implicits._
def this() = this(HttpHeaderParser())
private[this] val _result = new ListBuffer[RequestSignal]
private[this] var headers: List[HttpHeader] = _
final def result: ListBuffer[RequestSignal] = _result
final def parse(input: ByteNode): (ParserState, Int) = {
parse(Initial, input)
}
@tailrec
final def parse(initialState: ParserState, input: ByteNode, cursor: Int = 0): (ParserState, Int) = {
initialState match {
case s@(Initial | ReadHeader | MoreHeaderData) =>
val (state, idx) = headerParser.parse(s, input, cursor)
if (state == SuccessHeader) {
headers = headerParser.headers.toList
parse(ReadBody, input, idx)
} else {
(state, idx)
}
case ReadBody =>
parseBody(ReadBody, input, cursor, headers)
case MoreBodyData(_, l) =>
parseFixed(l, input, cursor)
case SuccessBody => (initialState, cursor)
}
}
def parseBody(state: ParserState, input: ByteNode, cursor: Int, headers: List[HttpHeader]): (ParserState, Int) = {
HttpHeader.get[`Transfer-Encoding`](headers) match {
case Some(teh) =>
if (teh.isChunked) {
_result += requestStart(headers) {
StreamedEntityCreator { s =>
val data = s.collect {
case EntityChunk(chunk) => chunk
}
Chunked(ContentTypes.`application/octet-stream`, data)
}
}
parseChunked(state, input, cursor)
} else {
throw ParsingException("unknown transfer-encoding support chunked only.")
}
case None =>
val length = HttpHeader.get[`Content-Length`](headers) match {
case Some(cl) => cl.length
case None => 0
}
if (length == 0) {
_result += requestStart(headers) {
FixedEntityCreator {
Entity.emptyRequest
}
}
(SuccessBody, cursor)
} else if (length <= input.length - cursor) {
_result += requestStart(headers) {
FixedEntityCreator {
Fixed(ContentTypes.`application/octet-stream`, input.slice(cursor, cursor + length).shrink)
}
}
(SuccessBody, cursor + length)
} else {
_result +=
requestStart(headers) {
StreamedEntityCreator { s =>
val data = s.collect {
case EntityPart(d) => d
}
Default(ContentTypes.`application/octet-stream`, length, data)
}
}
parseFixed(length, input, cursor)
}
}
}
private[this] def parseFixed(remainingContentLength: Int, input: ByteNode, cursor: Int): (ParserState, Int) = {
val remainingInput = input.length - cursor
if (remainingInput > 0) {
if (remainingInput < remainingContentLength) {
_result += EntityPart(input.drop(cursor).shrink)
(MoreBodyData(remainingContentLength, remainingContentLength - remainingInput), cursor + remainingInput)
} else {
val offset = cursor + remainingInput
_result += EntityPart(input.slice(cursor, offset).shrink)
_result += EndOfEntity
// connection header has "close", shutdown.
(SuccessBody, offset)
}
} else {
(MoreBodyData(remainingContentLength, remainingContentLength), cursor)
}
}
private[this] def parseChunked(state: ParserState, input: ByteNode, cursor: Int): (ParserState, Int) = {
throw new NotImplementedError()
}
private[this] def requestStart(headers: List[HttpHeader])(entity: EntityCreator[RequestSignal, Entity]): RequestSignal = {
RequestStart(
headerParser.method,
headerParser.uri,
headers,
entity,
headerParser.protocol
)
}
}
| defvar/knot | knot-net/src/main/scala/knot/net/http/HttpRequestParser.scala | Scala | mit | 4,207 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.noop
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.execution.streaming.MemoryStream
import org.apache.spark.sql.streaming.{StreamingQuery, StreamTest, Trigger}
class NoopStreamSuite extends StreamTest {
import testImplicits._
test("microbatch") {
val input = MemoryStream[Int]
val query = input.toDF().writeStream.format("noop").start()
testMicroBatchQuery(query, input)
}
test("microbatch restart with checkpoint") {
val input = MemoryStream[Int]
withTempDir { checkpointDir =>
def testWithCheckpoint(): Unit = {
val query = input.toDF().writeStream
.option("checkpointLocation", checkpointDir.getAbsolutePath)
.format("noop")
.start()
testMicroBatchQuery(query, input)
}
testWithCheckpoint()
testWithCheckpoint()
}
}
private def testMicroBatchQuery(
query: StreamingQuery,
input: MemoryStream[Int],
data: Int*): Unit = {
assert(query.isActive)
try {
input.addData(1, 2, 3)
eventually(timeout(streamingTimeout)) {
assert(query.recentProgress.map(_.numInputRows).sum == 3)
}
} finally {
query.stop()
}
}
test("continuous") {
val input = getRateDataFrame()
val query = input.writeStream.format("noop").trigger(Trigger.Continuous(200)).start()
assert(query.isActive)
query.stop()
}
test("continuous restart with checkpoint") {
withTempDir { checkpointDir =>
def testWithCheckpoint(): Unit = {
val input = getRateDataFrame()
val query = input.writeStream
.option("checkpointLocation", checkpointDir.getAbsolutePath)
.format("noop")
.trigger(Trigger.Continuous(200))
.start()
assert(query.isActive)
query.stop()
}
testWithCheckpoint()
testWithCheckpoint()
}
}
private def getRateDataFrame(): DataFrame = {
spark.readStream
.format("rate")
.option("numPartitions", "1")
.option("rowsPerSecond", "5")
.load()
.select('value)
}
}
| ConeyLiu/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/noop/NoopStreamSuite.scala | Scala | apache-2.0 | 2,945 |
package com.blstream.sbtsearchmavenplugin
import org.specs2.mutable.Specification
class ArtifactsPrinterTest extends Specification
with ArtifactsPrinter {
"printArtifacts" should {
"properly format artifacts output" >> {
val artifacts = List(
Artifact("org.scalaz", "z", "1"),
Artifact("foo", "bar", "baz"),
Artifact("some", "other", "2")
)
val text = printArtifacts("foo")(artifacts)
val expectedResult =
s"""Results for foo:
|"org.scalaz" % "z" % "1"
|"foo" % "bar" % "baz"
|"some" % "other" % "2"""".stripMargin
text must beEqualTo(expectedResult)
}
}
}
| blstream/sbt-search-maven-plugin | src/test/scala/com/blstream/sbtsearchmavenplugin/ArtifactsPrinterTest.scala | Scala | mit | 692 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules.physical.stream
import org.apache.flink.table.planner.calcite.FlinkTypeFactory
import org.apache.flink.table.planner.plan.nodes.FlinkRelNode
import org.apache.flink.table.planner.plan.nodes.logical.FlinkLogicalJoin
import org.apache.flink.table.planner.plan.nodes.physical.stream.StreamExecIntervalJoin
import org.apache.calcite.plan.{RelOptRule, RelOptRuleCall, RelTraitSet}
import org.apache.calcite.rel.RelNode
import scala.collection.JavaConversions._
/**
* Rule that converts non-SEMI/ANTI [[FlinkLogicalJoin]] with window bounds in join condition
* to [[StreamExecIntervalJoin]].
*/
class StreamExecIntervalJoinRule
extends StreamExecJoinRuleBase("StreamExecIntervalJoinRule") {
override def matches(call: RelOptRuleCall): Boolean = {
val join: FlinkLogicalJoin = call.rel(0)
val joinRowType = join.getRowType
// TODO support SEMI/ANTI join
if (!join.getJoinType.projectsRight) {
return false
}
val (windowBounds, _) = extractWindowBounds(join)
if (windowBounds.isDefined) {
if (windowBounds.get.isEventTime) {
true
} else {
// Check that no event-time attributes are in the input because the processing time window
// join does not correctly hold back watermarks.
// We rely on projection pushdown to remove unused attributes before the join.
!joinRowType.getFieldList.exists(f => FlinkTypeFactory.isRowtimeIndicatorType(f.getType))
}
} else {
// the given join does not have valid window bounds. We cannot translate it.
false
}
}
override protected def transform(
join: FlinkLogicalJoin,
leftInput: FlinkRelNode,
leftConversion: RelNode => RelNode,
rightInput: FlinkRelNode,
rightConversion: RelNode => RelNode,
providedTraitSet: RelTraitSet): FlinkRelNode = {
val (windowBounds, remainCondition) = extractWindowBounds(join)
new StreamExecIntervalJoin(
join.getCluster,
providedTraitSet,
leftConversion(leftInput),
rightConversion(rightInput),
join.getCondition,
join.getJoinType,
join.getRowType,
windowBounds.get.isEventTime,
windowBounds.get.leftLowerBound,
windowBounds.get.leftUpperBound,
windowBounds.get.leftTimeIdx,
windowBounds.get.rightTimeIdx,
remainCondition)
}
}
object StreamExecIntervalJoinRule {
val INSTANCE: RelOptRule = new StreamExecIntervalJoinRule
}
| aljoscha/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/rules/physical/stream/StreamExecIntervalJoinRule.scala | Scala | apache-2.0 | 3,298 |
/**
* *****************************************************************************
* Copyright (c) 2013 Guillaume DUBUISSON DUPLESSIS <[email protected]>.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the GNU Public License v3.0
* which accompanies this distribution, and is available at
* http://www.gnu.org/licenses/gpl.html
*
* Contributors:
* Guillaume DUBUISSON DUPLESSIS <[email protected]> - initial API and implementation
* ****************************************************************************
*/
package list.P23
import util._
import list.P20.sol01._
import scala.annotation.tailrec
class sol02 extends P23 {
def randomSelect[T](nb: Int, l: List[T]): List[T] =
sol02.randomSelect(nb, l)
}
object sol02 {
def randomSelect[T](nb: Int, l: List[T]): List[T] =
{
require(nb <= l.size, s"$nb is greater than the list size ($l.size)")
@tailrec
def randomSelectHelper(n: Int, ls: List[T], acc: List[T]): List[T] =
if (n <= 0) {
acc
} else {
// Index random selection (between 0 (included) and the list size (excluded))
val randomIndex = randomInt(ls.size)
// Removal of the list element
val (newList, elt) = removeAt(randomIndex, ls)
// Random selection of other element
randomSelectHelper(n - 1, newList, elt :: acc)
}
randomSelectHelper(nb, l, List())
}
}
| GuillaumeDD/scala99problems | src/main/scala/list/P23/sol02.scala | Scala | gpl-3.0 | 1,535 |
/*
* Copyright 2014 Nick Edwards
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.njeuk.dbmapper.macros
/**
* Provides details about an Entity object used to guide the SQL generation
* @param tableName the name of the table the Entity is stored in
* @param identityColumn the column which uniquely identifies the entity, i.e. a primary key
*
* if you get the error:
* `not found: type CodeToSql
* () => DbCodeGenerator.codeToSql[T]()`
*
* then you haven't added:
* `import com.github.njeuk.dbmapper.macros.CodeToSql`
*/
case class CodeToSql(tableName: String, identityColumn: String) | njeuk/dbmapper-macros | src/main/scala/com/github/njeuk/dbmapper/macros/CodeToSql.scala | Scala | apache-2.0 | 1,137 |
package korolev.server
import java.util.concurrent.ConcurrentLinkedQueue
import bridge.JSAccess
import korolev.Async
import korolev.Async.Promise
import scala.annotation.switch
import scala.collection.concurrent.TrieMap
import scala.collection.mutable
import scala.language.higherKinds
/**
* @author Aleksey Fomkin <[email protected]>
*/
case class JsonQueuedJsAccess[F[+_]: Async](sendJson: String => Unit) extends JSAccess[F] {
protected val promises = TrieMap.empty[Int, Promise[F, Any]]
protected val callbacks = TrieMap.empty[String, (Any) => Unit]
val queue = new ConcurrentLinkedQueue[String]()
def escape(sb: StringBuilder, s: String, unicode: Boolean): Unit = {
sb.append('"')
var i = 0
val len = s.length
while (i < len) {
(s.charAt(i): @switch) match {
case '"' => sb.append("\\\"")
case '\\' => sb.append("\\\\")
case '\b' => sb.append("\\b")
case '\f' => sb.append("\\f")
case '\n' => sb.append("\\n")
case '\r' => sb.append("\\r")
case '\t' => sb.append("\\t")
case c =>
if (c < ' ' || (c > '~' && unicode)) sb.append("\\u%04x" format c.toInt)
else sb.append(c)
}
i += 1
}
sb.append('"')
}
def seqToJSON(xs: Seq[Any]): String = {
val xs2 =
xs map {
case s: String if !s.startsWith("[") ⇒
val sb = new StringBuilder
escape(sb, s, unicode = true)
sb.mkString
case any ⇒ any
}
"[" + xs2.reduce(_ + ", " + _) + "]"
}
override def platformDependentPack(value: Any): Any = value match {
case xs: Seq[Any] ⇒ seqToJSON(xs)
case x ⇒ super.platformDependentPack(x)
}
/**
* Abstract method sends message to remote page
*/
def send(args: Seq[Any]): Unit = {
val message = seqToJSON(args)
queue.add(message)
}
override def flush(): Unit = {
val buffer = mutable.Buffer.empty[String]
while (!queue.isEmpty) {
buffer += queue.poll()
}
if (buffer.size == 1) {
sendJson(buffer.head)
}
else if (buffer.nonEmpty) {
val requests = buffer.mkString(",")
sendJson(s"""["batch",$requests]""")
}
}
def receive(message: String): Unit = {
def prepareString(value: String) = {
value match {
case s: String if s.startsWith("\"") ⇒
s.substring(1, s.length - 1).trim
case s ⇒ s.trim
}
}
val args =
message.stripPrefix("[").stripSuffix("]").split(",").map(prepareString)
val reqId = args(0).toInt
if (reqId == -1) {
val callbackId = args(1)
val arg = args(2)
fireCallback(callbackId, arg)
} else {
val isSuccess = args(1).toBoolean
val res = args(2)
resolvePromise(reqId, isSuccess, res)
}
}
}
| PhilAndrew/JumpMicro | JMSangriaGraphql/src/main/scala/korolev/server/JsonQueuedJsAccess.scala | Scala | mit | 2,818 |
package org.jetbrains.plugins.scala
package lang
package completion
package filters.toplevel
import psi.api.base.patterns.{ScCaseClause, ScStableReferenceElementPattern, ScReferencePattern}
import psi.api.base.ScStableCodeReferenceElement
import com.intellij.lang.ASTNode
import psi.api.ScalaFile;
import com.intellij.psi.PsiElement;
import com.intellij.psi.PsiErrorElement;
import com.intellij.psi.filters.ElementFilter;
import org.jetbrains.annotations.NonNls;
import org.jetbrains.plugins.scala.lang.psi._
import com.intellij.psi._
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.packaging._
import org.jetbrains.plugins.scala.lang.psi.api.statements.params._
import org.jetbrains.plugins.scala.lang.completion.ScalaCompletionUtil._
import org.jetbrains.plugins.scala.lang.lexer._
import org.jetbrains.plugins.scala.lang.parser._
/**
* @author Alexander Podkhalyuzin
* Date: 22.05.2008
*/
class TemplateFilter extends ElementFilter {
def isAcceptable(element: Object, context: PsiElement): Boolean = {
if (context.isInstanceOf[PsiComment]) return false
val (leaf, _) = processPsiLeafForFilter(getLeafByOffset(context.getTextRange.getStartOffset, context))
if (leaf != null) {
val parent = leaf.getParent
val tuple = ScalaCompletionUtil.getForAll(parent, leaf)
if (tuple._1) return tuple._2
parent match {
case _: ScReferenceExpression => {
parent.getParent match {
case y: ScStableReferenceElementPattern => {
y.getParent match {
case x: ScCaseClause => {
x.getParent.getParent match {
case _: ScMatchStmt if (x.getParent.getFirstChild == x) => return false
case _: ScMatchStmt => return true
case _ => return true
}
}
case _ =>
}
}
case _ =>
}
}
case _ =>
}
}
false
}
def isClassAcceptable(hintClass: java.lang.Class[_]): Boolean = true
@NonNls
override def toString = "template definitions keyword filter"
} | consulo/consulo-scala | src/org/jetbrains/plugins/scala/lang/completion/filters/toplevel/TemplateFilter.scala | Scala | apache-2.0 | 2,270 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.sampler
import com.twitter.conversions.time._
import com.twitter.finagle.stats.NullStatsReceiver
import com.twitter.util.{MockTimer, Time, Var}
import org.scalatest.FunSuite
class StoreRateCheckTest extends FunSuite {
test("fails when the request rate is non-positive") {
val rate = Var(1)
val check = new StoreRateCheck[Unit](rate)
assert(check(Some(())).isDefined)
rate.update(0)
assert(check(Some(())).isEmpty)
}
test("fails when sufficient data is not present") {
val check = new SufficientDataCheck[Unit](2)
assert(check(Some(Seq.empty[Unit])).isEmpty)
assert(check(Some(Seq((), ()))).isDefined)
assert(check(Some(Seq((), (), ()))).isDefined)
}
test("fails when data fails validation") {
val check = new ValidDataCheck[Int](_ > 1)
assert(check(Some(Seq(0, 1, 2))).isEmpty)
assert(check(Some(Seq(2, 3, 4))).isDefined)
}
test("allows only once per period") {
Time.withCurrentTimeFrozen { tc =>
val timer = new MockTimer
val check = new CooldownCheck[Unit](1.minute, NullStatsReceiver, timer = timer)
assert(check(Some(())).isDefined)
assert(check(Some(())).isEmpty)
tc.advance(61.seconds)
timer.tick()
assert(check(Some(())).isDefined)
assert(check(Some(())).isEmpty)
}
}
test("fail unless enough outliers are encountered") {
val rate = Var(10)
val check = new OutlierCheck(rate, 2, 0.1)
assert(check(Some(Seq())).isEmpty)
assert(check(Some(Seq(10, 10, 10))).isEmpty)
assert(check(Some(Seq(1, 1, 10, 10))).isEmpty)
assert(check(Some(Seq(1, 1))).isDefined)
assert(check(Some(Seq(10, 10, 1, 1))).isDefined)
// these fall within the 0.1 threshold, thus shouldn't be counted
assert(check(Some(Seq(9, 9))).isEmpty)
}
test("calculates the discounted average of a series of numbers") {
assert(DiscountedAverage.calculate(Seq(10, 5, 0), 1.0) === 5.0)
val n = DiscountedAverage.calculate(Seq(10, 5, 0), 0.5)
assert(DiscountedAverage.truncate(n) === 7.142)
}
test("calculates a discounted average based on the current req rate and sample rate") {
val tgtStoreRate = Var(100)
val sampleRate = Var(1.0)
val calc = new CalculateSampleRate(tgtStoreRate, sampleRate, DiscountedAverage, 0.05, 1.0)
assert(calc(Some(Seq(1000, 1000, 1000))) === Some(0.1))
}
}
| prat0318/zipkin | zipkin-sampler/src/test/scala/com/twitter/zipkin/sampler/StoreRateCheckTest.scala | Scala | apache-2.0 | 2,981 |
/*
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.flaminem.flamy.parsing.hive
import com.flaminem.flamy.parsing.hive.HiveParserUtils.getName
import com.flaminem.flamy.parsing.model.ColumnDependency
import org.apache.hadoop.hive.ql.parse.{ASTNode, HiveParser}
import scala.util.matching.Regex
/**
* Created by fpin on 1/30/17.
*/
object HiveToken {
case object EQUAL {
def unapply(tree: ASTNode): Option[(ASTNode, ASTNode)] = {
if (tree.getType == HiveParser.EQUAL){
val left: ASTNode = tree.getChild(0).asInstanceOf[ASTNode]
val right: ASTNode = tree.getChild(1).asInstanceOf[ASTNode]
Some(left -> right)
}
else {
None
}
}
}
case object TOK_PARTVAL {
def unapplySeq(tree: ASTNode): Option[(ASTNode, Seq[ASTNode])] = {
if (tree.getType == HiveParser.TOK_PARTVAL){
if(tree.getChildCount>1){
val left: ASTNode = tree.getChild(0).asInstanceOf[ASTNode]
val right: ASTNode = tree.getChild(1).asInstanceOf[ASTNode]
Some(left, right::Nil)
}
else {
Some(tree.getChild(0).asInstanceOf[ASTNode], Nil)
}
}
else {
None
}
}
}
case object DOT {
def unapply(tree: ASTNode): Option[(ASTNode, ASTNode)] = {
if (tree.getType == HiveParser.DOT){
val left: ASTNode = tree.getChild(0).asInstanceOf[ASTNode]
val right: ASTNode = tree.getChild(1).asInstanceOf[ASTNode]
Some(left -> right)
}
else {
None
}
}
}
case object Identifier {
def unapply(tree: ASTNode): Option[String] = {
if (tree.getType == HiveParser.Identifier){
Some(tree.getText)
}
else {
None
}
}
}
case object TOK_TABLE_OR_COL {
def unapply(tree: ASTNode): Option[String] = {
if (tree.getType == HiveParser.TOK_TABLE_OR_COL){
Some(tree.getChild(0).getText)
}
else {
None
}
}
}
case object PartitionVar {
private val partitionVariableRE: Regex = """\\A[$][{]partition:(.+)[}]\\z""".r
def unapply(tree: ASTNode): Option[String] = {
if (tree.getType == HiveParser.StringLiteral) {
getName(tree) match {
case partitionVariableRE(name) => Some(name)
case _ => None
}
}
else {
None
}
}
}
case object Column {
def unapply(tree: ASTNode): Option[ColumnDependency] = {
tree match {
case DOT(TOK_TABLE_OR_COL(table), Identifier(column)) =>
Some(new ColumnDependency(column, table))
case TOK_TABLE_OR_COL(column) =>
Some(new ColumnDependency(column))
case _ => None
}
}
}
}
| flaminem/flamy | src/main/scala/com/flaminem/flamy/parsing/hive/HiveToken.scala | Scala | apache-2.0 | 3,278 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.{AggregateExpression, AggregateFunction, Complete}
import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, Expand, LogicalPlan}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.types.IntegerType
/**
* This rule rewrites an aggregate query with distinct aggregations into an expanded double
* aggregation in which the regular aggregation expressions and every distinct clause is aggregated
* in a separate group. The results are then combined in a second aggregate.
*
* For example (in scala):
* {{{
* val data = Seq(
* ("a", "ca1", "cb1", 10),
* ("a", "ca1", "cb2", 5),
* ("b", "ca1", "cb1", 13))
* .toDF("key", "cat1", "cat2", "value")
* data.createOrReplaceTempView("data")
*
* val agg = data.groupBy($"key")
* .agg(
* countDistinct($"cat1").as("cat1_cnt"),
* countDistinct($"cat2").as("cat2_cnt"),
* sum($"value").as("total"))
* }}}
*
* This translates to the following (pseudo) logical plan:
* {{{
* Aggregate(
* key = ['key]
* functions = [COUNT(DISTINCT 'cat1),
* COUNT(DISTINCT 'cat2),
* sum('value)]
* output = ['key, 'cat1_cnt, 'cat2_cnt, 'total])
* LocalTableScan [...]
* }}}
*
* This rule rewrites this logical plan to the following (pseudo) logical plan:
* {{{
* Aggregate(
* key = ['key]
* functions = [count(if (('gid = 1)) 'cat1 else null),
* count(if (('gid = 2)) 'cat2 else null),
* first(if (('gid = 0)) 'total else null) ignore nulls]
* output = ['key, 'cat1_cnt, 'cat2_cnt, 'total])
* Aggregate(
* key = ['key, 'cat1, 'cat2, 'gid]
* functions = [sum('value)]
* output = ['key, 'cat1, 'cat2, 'gid, 'total])
* Expand(
* projections = [('key, null, null, 0, cast('value as bigint)),
* ('key, 'cat1, null, 1, null),
* ('key, null, 'cat2, 2, null)]
* output = ['key, 'cat1, 'cat2, 'gid, 'value])
* LocalTableScan [...]
* }}}
*
* The rule does the following things here:
* 1. Expand the data. There are three aggregation groups in this query:
* i. the non-distinct group;
* ii. the distinct 'cat1 group;
* iii. the distinct 'cat2 group.
* An expand operator is inserted to expand the child data for each group. The expand will null
* out all unused columns for the given group; this must be done in order to ensure correctness
* later on. Groups can by identified by a group id (gid) column added by the expand operator.
* 2. De-duplicate the distinct paths and aggregate the non-aggregate path. The group by clause of
* this aggregate consists of the original group by clause, all the requested distinct columns
* and the group id. Both de-duplication of distinct column and the aggregation of the
* non-distinct group take advantage of the fact that we group by the group id (gid) and that we
* have nulled out all non-relevant columns the given group.
* 3. Aggregating the distinct groups and combining this with the results of the non-distinct
* aggregation. In this step we use the group id to filter the inputs for the aggregate
* functions. The result of the non-distinct group are 'aggregated' by using the first operator,
* it might be more elegant to use the native UDAF merge mechanism for this in the future.
*
* This rule duplicates the input data by two or more times (# distinct groups + an optional
* non-distinct group). This will put quite a bit of memory pressure of the used aggregate and
* exchange operators. Keeping the number of distinct groups as low as possible should be priority,
* we could improve this in the current rule by applying more advanced expression canonicalization
* techniques.
*/
object RewriteDistinctAggregates extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
case a: Aggregate => rewrite(a)
}
def rewrite(a: Aggregate): Aggregate = {
// Collect all aggregate expressions.
val aggExpressions = a.aggregateExpressions.flatMap { e =>
e.collect {
case ae: AggregateExpression => ae
}
}
// Extract distinct aggregate expressions.
val distinctAggGroups = aggExpressions.filter(_.isDistinct).groupBy { e =>
val unfoldableChildren = e.aggregateFunction.children.filter(!_.foldable).toSet
if (unfoldableChildren.nonEmpty) {
// Only expand the unfoldable children
unfoldableChildren
} else {
// If aggregateFunction's children are all foldable
// we must expand at least one of the children (here we take the first child),
// or If we don't, we will get the wrong result, for example:
// count(distinct 1) will be explained to count(1) after the rewrite function.
// Generally, the distinct aggregateFunction should not run
// foldable TypeCheck for the first child.
e.aggregateFunction.children.take(1).toSet
}
}
// Aggregation strategy can handle queries with a single distinct group.
if (distinctAggGroups.size > 1) {
// Create the attributes for the grouping id and the group by clause.
val gid = AttributeReference("gid", IntegerType, nullable = false)()
val groupByMap = a.groupingExpressions.collect {
case ne: NamedExpression => ne -> ne.toAttribute
case e => e -> AttributeReference(e.sql, e.dataType, e.nullable)()
}
val groupByAttrs = groupByMap.map(_._2)
// Functions used to modify aggregate functions and their inputs.
def evalWithinGroup(id: Literal, e: Expression) = If(EqualTo(gid, id), e, nullify(e))
def patchAggregateFunctionChildren(
af: AggregateFunction)(
attrs: Expression => Option[Expression]): AggregateFunction = {
val newChildren = af.children.map(c => attrs(c).getOrElse(c))
af.withNewChildren(newChildren).asInstanceOf[AggregateFunction]
}
// Setup unique distinct aggregate children.
val distinctAggChildren = distinctAggGroups.keySet.flatten.toSeq.distinct
val distinctAggChildAttrMap = distinctAggChildren.map(expressionAttributePair)
val distinctAggChildAttrs = distinctAggChildAttrMap.map(_._2)
// Setup expand & aggregate operators for distinct aggregate expressions.
val distinctAggChildAttrLookup = distinctAggChildAttrMap.toMap
val distinctAggOperatorMap = distinctAggGroups.toSeq.zipWithIndex.map {
case ((group, expressions), i) =>
val id = Literal(i + 1)
// Expand projection
val projection = distinctAggChildren.map {
case e if group.contains(e) => e
case e => nullify(e)
} :+ id
// Final aggregate
val operators = expressions.map { e =>
val af = e.aggregateFunction
val naf = patchAggregateFunctionChildren(af) { x =>
distinctAggChildAttrLookup.get(x).map(evalWithinGroup(id, _))
}
(e, e.copy(aggregateFunction = naf, isDistinct = false))
}
(projection, operators)
}
// Setup expand for the 'regular' aggregate expressions.
// only expand unfoldable children
val regularAggExprs = aggExpressions
.filter(e => !e.isDistinct && e.children.exists(!_.foldable))
val regularAggChildren = regularAggExprs
.flatMap(_.aggregateFunction.children.filter(!_.foldable))
.distinct
val regularAggChildAttrMap = regularAggChildren.map(expressionAttributePair)
// Setup aggregates for 'regular' aggregate expressions.
val regularGroupId = Literal(0)
val regularAggChildAttrLookup = regularAggChildAttrMap.toMap
val regularAggOperatorMap = regularAggExprs.map { e =>
// Perform the actual aggregation in the initial aggregate.
val af = patchAggregateFunctionChildren(e.aggregateFunction)(regularAggChildAttrLookup.get)
val operator = Alias(e.copy(aggregateFunction = af), e.sql)()
// Select the result of the first aggregate in the last aggregate.
val result = AggregateExpression(
aggregate.First(evalWithinGroup(regularGroupId, operator.toAttribute), Literal(true)),
mode = Complete,
isDistinct = false)
// Some aggregate functions (COUNT) have the special property that they can return a
// non-null result without any input. We need to make sure we return a result in this case.
val resultWithDefault = af.defaultResult match {
case Some(lit) => Coalesce(Seq(result, lit))
case None => result
}
// Return a Tuple3 containing:
// i. The original aggregate expression (used for look ups).
// ii. The actual aggregation operator (used in the first aggregate).
// iii. The operator that selects and returns the result (used in the second aggregate).
(e, operator, resultWithDefault)
}
// Construct the regular aggregate input projection only if we need one.
val regularAggProjection = if (regularAggExprs.nonEmpty) {
Seq(a.groupingExpressions ++
distinctAggChildren.map(nullify) ++
Seq(regularGroupId) ++
regularAggChildren)
} else {
Seq.empty[Seq[Expression]]
}
// Construct the distinct aggregate input projections.
val regularAggNulls = regularAggChildren.map(nullify)
val distinctAggProjections = distinctAggOperatorMap.map {
case (projection, _) =>
a.groupingExpressions ++
projection ++
regularAggNulls
}
// Construct the expand operator.
val expand = Expand(
regularAggProjection ++ distinctAggProjections,
groupByAttrs ++ distinctAggChildAttrs ++ Seq(gid) ++ regularAggChildAttrMap.map(_._2),
a.child)
// Construct the first aggregate operator. This de-duplicates all the children of
// distinct operators, and applies the regular aggregate operators.
val firstAggregateGroupBy = groupByAttrs ++ distinctAggChildAttrs :+ gid
val firstAggregate = Aggregate(
firstAggregateGroupBy,
firstAggregateGroupBy ++ regularAggOperatorMap.map(_._2),
expand)
// Construct the second aggregate
val transformations: Map[Expression, Expression] =
(distinctAggOperatorMap.flatMap(_._2) ++
regularAggOperatorMap.map(e => (e._1, e._3))).toMap
val patchedAggExpressions = a.aggregateExpressions.map { e =>
e.transformDown {
case e: Expression =>
// The same GROUP BY clauses can have different forms (different names for instance) in
// the groupBy and aggregate expressions of an aggregate. This makes a map lookup
// tricky. So we do a linear search for a semantically equal group by expression.
groupByMap
.find(ge => e.semanticEquals(ge._1))
.map(_._2)
.getOrElse(transformations.getOrElse(e, e))
}.asInstanceOf[NamedExpression]
}
Aggregate(groupByAttrs, patchedAggExpressions, firstAggregate)
} else {
a
}
}
private def nullify(e: Expression) = Literal.create(null, e.dataType)
private def expressionAttributePair(e: Expression) =
// We are creating a new reference here instead of reusing the attribute in case of a
// NamedExpression. This is done to prevent collisions between distinct and regular aggregate
// children, in this case attribute reuse causes the input of the regular aggregate to bound to
// the (nulled out) input of the distinct aggregate.
e -> AttributeReference(e.sql, e.dataType, nullable = true)()
}
| pgandhi999/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/RewriteDistinctAggregates.scala | Scala | apache-2.0 | 12,766 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.box
trait Linked[S <: CtValue[_], T <: CtBoxIdentifier] {
def apply(source: S): T
}
| hmrc/ct-calculations | src/main/scala/uk/gov/hmrc/ct/box/Linked.scala | Scala | apache-2.0 | 715 |
package controllers
import play.api.mvc._
object Application extends Controller {
def index = Action {
Ok(views.html.miruo())
}
} | kamekoopa/redis-miruo | app/controllers/Application.scala | Scala | apache-2.0 | 142 |
import sbt._
import Keys._
import play.twirl.sbt.Import.TwirlKeys
object ApplicationBuild extends Build {
val appName = "play2-auth"
val playVersion = play.core.PlayVersion.current
lazy val baseSettings = Seq(
version := "0.15.0",
scalaVersion := "2.11.8",
crossScalaVersions := Seq("2.11.8"),
organization := "com.github.tototoshi",
resolvers ++=
Resolver.sonatypeRepo("releases") ::
Nil,
scalacOptions ++= Seq("-language:_", "-deprecation")
)
lazy val appPublishMavenStyle = true
lazy val appPublishArtifactInTest = false
lazy val appPomIncludeRepository = { _: MavenRepository => false }
lazy val appPublishTo = { (v: String) =>
val nexus = "https://oss.sonatype.org/"
if (v.trim.endsWith("SNAPSHOT"))
Some("snapshots" at nexus + "content/repositories/snapshots")
else
Some("releases" at nexus + "service/local/staging/deploy/maven2")
}
lazy val appPomExtra = {
<url>https://github.com/t2v/play2-auth</url>
<licenses>
<license>
<name>Apache License, Version 2.0</name>
<url>http://www.apache.org/licenses/LICENSE-2.0.html</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<url>[email protected]:t2v/play2-auth.git</url>
<connection>scm:git:[email protected]:t2v/play2-auth.git</connection>
</scm>
<developers>
<developer>
<id>gakuzzzz</id>
<name>gakuzzzz</name>
<url>https://github.com/gakuzzzz</url>
</developer>
<developer>
<id>tototoshi</id>
<name>Toshiyuki Takahashi</name>
<url>https://tototoshi.github.io</url>
</developer>
</developers>
}
lazy val core = Project("core", base = file("module"))
.settings(
baseSettings,
libraryDependencies += "com.typesafe.play" %% "play" % playVersion % "provided",
libraryDependencies += "com.typesafe.play" %% "play-cache" % playVersion % "provided",
libraryDependencies += "jp.t2v" %% "stackable-controller" % "0.5.1",
name := appName,
publishMavenStyle := appPublishMavenStyle,
publishArtifact in Test := appPublishArtifactInTest,
pomIncludeRepository := appPomIncludeRepository,
publishTo <<=(version)(appPublishTo),
pomExtra := appPomExtra
)
lazy val test = Project("test", base = file("test"))
.settings(
baseSettings,
libraryDependencies += "com.typesafe.play" %% "play-test" % playVersion,
name := appName + "-test",
publishMavenStyle := appPublishMavenStyle,
publishArtifact in Test := appPublishArtifactInTest,
pomIncludeRepository := appPomIncludeRepository,
publishTo <<=(version)(appPublishTo),
pomExtra := appPomExtra
).dependsOn(core)
lazy val sample = Project("sample", file("sample"))
.enablePlugins(play.sbt.PlayScala)
.settings(
baseSettings,
resolvers += "scalaz-bintray" at "https://dl.bintray.com/scalaz/releases",
libraryDependencies += play.sbt.Play.autoImport.cache,
libraryDependencies += play.sbt.Play.autoImport.specs2 % Test,
libraryDependencies += play.sbt.Play.autoImport.jdbc,
libraryDependencies += "org.mindrot" % "jbcrypt" % "0.3m",
libraryDependencies += "org.scalikejdbc" %% "scalikejdbc" % "2.2.7",
libraryDependencies += "org.scalikejdbc" %% "scalikejdbc-config" % "2.2.7",
libraryDependencies += "org.scalikejdbc" %% "scalikejdbc-syntax-support-macro" % "2.2.7",
libraryDependencies += "org.scalikejdbc" %% "scalikejdbc-test" % "2.2.7" % "test",
libraryDependencies += "org.scalikejdbc" %% "scalikejdbc-play-initializer" % "2.4.0",
libraryDependencies += "org.scalikejdbc" %% "scalikejdbc-play-dbapi-adapter" % "2.4.0",
libraryDependencies += "org.scalikejdbc" %% "scalikejdbc-play-fixture" % "2.4.0",
libraryDependencies += "org.flywaydb" %% "flyway-play" % "2.0.1",
TwirlKeys.templateImports in Compile ++= Seq(
"com.github.tototoshi.play2.auth.sample._",
"play.api.data.Form",
"play.api.mvc.Flash",
"views._",
"views.html.helper",
"controllers._"
),
publish := { },
publishArtifact := false,
packagedArtifacts := Map.empty,
publishTo <<=(version)(appPublishTo),
pomExtra := appPomExtra
)
.dependsOn(core, test % "test")
lazy val social = Project (id = "social", base = file ("social"))
.settings(
baseSettings,
name := appName + "-social",
libraryDependencies += "com.typesafe.play" %% "play" % playVersion % "provided",
libraryDependencies += "com.typesafe.play" %% "play-ws" % playVersion % "provided",
libraryDependencies += "com.typesafe.play" %% "play-cache" % playVersion % "provided",
publishMavenStyle := appPublishMavenStyle,
publishArtifact in Test := appPublishArtifactInTest,
pomIncludeRepository := appPomIncludeRepository,
publishTo <<=(version)(appPublishTo),
pomExtra := appPomExtra
).dependsOn(core)
lazy val socialSample = Project("social-sample", file("social-sample"))
.enablePlugins(play.sbt.PlayScala)
.settings(
baseSettings,
name := appName + "-social-sample",
resourceDirectories in Test += baseDirectory.value / "conf",
resolvers += "Sonatype OSS Snapshots" at "https://oss.sonatype.org/content/repositories/snapshots",
libraryDependencies ++= Seq(
"com.typesafe.play" %% "play-ws" % playVersion,
"com.typesafe.play" %% "play-cache" % playVersion,
"org.flywaydb" %% "flyway-play" % "2.0.1",
"org.scalikejdbc" %% "scalikejdbc" % "2.2.7",
"org.scalikejdbc" %% "scalikejdbc-config" % "2.2.7",
"org.scalikejdbc" %% "scalikejdbc-syntax-support-macro" % "2.2.7",
"org.scalikejdbc" %% "scalikejdbc-test" % "2.2.7" % "test",
"org.scalikejdbc" %% "scalikejdbc-play-initializer" % "2.4.0",
"org.scalikejdbc" %% "scalikejdbc-play-dbapi-adapter" % "2.4.0",
"org.scalikejdbc" %% "scalikejdbc-play-fixture" % "2.4.0"
),
publish := { },
publishArtifact := false,
packagedArtifacts := Map.empty,
publishTo <<=(version)(appPublishTo),
pomExtra := appPomExtra
)
.dependsOn(core, social)
lazy val root = Project("root", base = file("."))
.settings(baseSettings)
.settings(
publish := { },
publishArtifact := false,
packagedArtifacts := Map.empty,
publishTo <<=(version)(appPublishTo),
pomExtra := appPomExtra
).aggregate(core, test, sample, social, socialSample)
}
| tototoshi/play2-auth | project/Build.scala | Scala | apache-2.0 | 7,616 |
class ann(i: Int) extends scala.annotation.Annotation
class cfann(x: String) extends annotation.ConstantAnnotation
// annotations on abstract types
abstract class C2[@deprecated
@ann(1) T <: Number,
V]
abstract class C3 {
@ann(2) type X <: Number
}
object Test {
// bug #1028
val x = 1
@ann(x) val a = ()
@ann({val yy = 2; yy}) val b = ()
val bb: Int @ann({val yy = 2; yy}) = 10
def c: Int @ann(x) = 1
def d: String @ann({val z = 0; z - 1}) = "2"
def e[@deprecated T, U](x: T) = x
//bug #1214
val y = new (Integer @ann(0))(2)
import scala.beans.BeanProperty
// bug #637
trait S { def getField(): Int }
class O extends S { @BeanProperty val field = 0 }
// bug #1070
trait T { @BeanProperty var field = 1 }
// annotation on annotation constructor
@(ann @ann(100))(200) def foo() = 300
// #2984
private final val NAMESPACE = "/info"
@cfann(x = NAMESPACE + "/index") def index = "success"
}
// test forward references to getters / setters
class BeanPropertyTests {
@scala.beans.BeanProperty lazy val lv1 = 0
def foo(): Unit = {
val bp1 = new BeanPropertyTests1
println(lv1)
println(getLv1())
println(bp1.getLv2())
println(getV1())
setV1(10)
bp1.setV2(100)
}
@scala.beans.BeanProperty var v1 = 0
}
class BeanPropertyTests1 {
@scala.beans.BeanProperty lazy val lv2 = "0"
@scala.beans.BeanProperty var v2 = 0
}
// test mixin of getters / setters, and implementing abstract
// methods using @BeanProperty
class C extends T with BeanF {
def foo(): Unit = {
setF("doch!")
setG(true)
this.getF()
}
}
trait T {
@scala.beans.BeanProperty var f = "nei"
@scala.beans.BooleanBeanProperty var g = false
}
trait BeanF {
def getF(): String
def setF(n: String): Unit
def isG(): Boolean
def setG(nb: Boolean): Unit
}
class Ann3(arr: Array[String]) extends annotation.ConstantAnnotation
class Ann4(i: Int) extends annotation.ConstantAnnotation
class Ann5(value: Class[_]) extends annotation.ConstantAnnotation
object Test3 {
final val i = 1083
final val cls = classOf[String]
}
class Test4 {
@Ann3(arr = Array("dlkfj", "DSF"))
@Ann4(i = 2908)
@Ann5(value = classOf[Int])
def foo: Unit = {}
@Ann4(i = Test3.i)
@Ann5(Test3.cls)
def bar: Unit = {}
}
| lrytz/scala | test/files/pos/annotations.scala | Scala | apache-2.0 | 2,317 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import java.util.{Locale, Map => JMap, TimeZone}
import scala.collection.JavaConverters._
import org.apache.hadoop.conf.Configuration
import org.apache.parquet.hadoop.api.{InitContext, ReadSupport}
import org.apache.parquet.hadoop.api.ReadSupport.ReadContext
import org.apache.parquet.io.api.RecordMaterializer
import org.apache.parquet.schema._
import org.apache.parquet.schema.Type.Repetition
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.expressions.UnsafeRow
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
/**
* A Parquet [[ReadSupport]] implementation for reading Parquet records as Catalyst
* [[UnsafeRow]]s.
*
* The API interface of [[ReadSupport]] is a little bit over complicated because of historical
* reasons. In older versions of parquet-mr (say 1.6.0rc3 and prior), [[ReadSupport]] need to be
* instantiated and initialized twice on both driver side and executor side. The [[init()]] method
* is for driver side initialization, while [[prepareForRead()]] is for executor side. However,
* starting from parquet-mr 1.6.0, it's no longer the case, and [[ReadSupport]] is only instantiated
* and initialized on executor side. So, theoretically, now it's totally fine to combine these two
* methods into a single initialization method. The only reason (I could think of) to still have
* them here is for parquet-mr API backwards-compatibility.
*
* Due to this reason, we no longer rely on [[ReadContext]] to pass requested schema from [[init()]]
* to [[prepareForRead()]], but use a private `var` for simplicity.
*/
class ParquetReadSupport(val convertTz: Option[TimeZone],
enableVectorizedReader: Boolean)
extends ReadSupport[UnsafeRow] with Logging {
private var catalystRequestedSchema: StructType = _
def this() {
// We need a zero-arg constructor for SpecificParquetRecordReaderBase. But that is only
// used in the vectorized reader, where we get the convertTz value directly, and the value here
// is ignored.
this(None, enableVectorizedReader = true)
}
/**
* Called on executor side before [[prepareForRead()]] and instantiating actual Parquet record
* readers. Responsible for figuring out Parquet requested schema used for column pruning.
*/
override def init(context: InitContext): ReadContext = {
val conf = context.getConfiguration
catalystRequestedSchema = {
val schemaString = conf.get(ParquetReadSupport.SPARK_ROW_REQUESTED_SCHEMA)
assert(schemaString != null, "Parquet requested schema not set.")
StructType.fromString(schemaString)
}
val caseSensitive = conf.getBoolean(SQLConf.CASE_SENSITIVE.key,
SQLConf.CASE_SENSITIVE.defaultValue.get)
val schemaPruningEnabled = conf.getBoolean(SQLConf.NESTED_SCHEMA_PRUNING_ENABLED.key,
SQLConf.NESTED_SCHEMA_PRUNING_ENABLED.defaultValue.get)
val parquetFileSchema = context.getFileSchema
val parquetClippedSchema = ParquetReadSupport.clipParquetSchema(parquetFileSchema,
catalystRequestedSchema, caseSensitive)
// We pass two schema to ParquetRecordMaterializer:
// - parquetRequestedSchema: the schema of the file data we want to read
// - catalystRequestedSchema: the schema of the rows we want to return
// The reader is responsible for reconciling the differences between the two.
val parquetRequestedSchema = if (schemaPruningEnabled && !enableVectorizedReader) {
// Parquet-MR reader requires that parquetRequestedSchema include only those fields present
// in the underlying parquetFileSchema. Therefore, we intersect the parquetClippedSchema
// with the parquetFileSchema
ParquetReadSupport.intersectParquetGroups(parquetClippedSchema, parquetFileSchema)
.map(groupType => new MessageType(groupType.getName, groupType.getFields))
.getOrElse(ParquetSchemaConverter.EMPTY_MESSAGE)
} else {
// Spark's vectorized reader only support atomic types currently. It also skip fields
// in parquetRequestedSchema which are not present in the file.
parquetClippedSchema
}
logDebug(
s"""Going to read the following fields from the Parquet file with the following schema:
|Parquet file schema:
|$parquetFileSchema
|Parquet clipped schema:
|$parquetClippedSchema
|Parquet requested schema:
|$parquetRequestedSchema
|Catalyst requested schema:
|${catalystRequestedSchema.treeString}
""".stripMargin)
new ReadContext(parquetRequestedSchema, Map.empty[String, String].asJava)
}
/**
* Called on executor side after [[init()]], before instantiating actual Parquet record readers.
* Responsible for instantiating [[RecordMaterializer]], which is used for converting Parquet
* records to Catalyst [[UnsafeRow]]s.
*/
override def prepareForRead(
conf: Configuration,
keyValueMetaData: JMap[String, String],
fileSchema: MessageType,
readContext: ReadContext): RecordMaterializer[UnsafeRow] = {
val parquetRequestedSchema = readContext.getRequestedSchema
new ParquetRecordMaterializer(
parquetRequestedSchema,
ParquetReadSupport.expandUDT(catalystRequestedSchema),
new ParquetToSparkSchemaConverter(conf),
convertTz)
}
}
object ParquetReadSupport {
val SPARK_ROW_REQUESTED_SCHEMA = "org.apache.spark.sql.parquet.row.requested_schema"
val SPARK_METADATA_KEY = "org.apache.spark.sql.parquet.row.metadata"
/**
* Tailors `parquetSchema` according to `catalystSchema` by removing column paths don't exist
* in `catalystSchema`, and adding those only exist in `catalystSchema`.
*/
def clipParquetSchema(
parquetSchema: MessageType,
catalystSchema: StructType,
caseSensitive: Boolean = true): MessageType = {
val clippedParquetFields = clipParquetGroupFields(
parquetSchema.asGroupType(), catalystSchema, caseSensitive)
if (clippedParquetFields.isEmpty) {
ParquetSchemaConverter.EMPTY_MESSAGE
} else {
Types
.buildMessage()
.addFields(clippedParquetFields: _*)
.named(ParquetSchemaConverter.SPARK_PARQUET_SCHEMA_NAME)
}
}
private def clipParquetType(
parquetType: Type, catalystType: DataType, caseSensitive: Boolean): Type = {
catalystType match {
case t: ArrayType if !isPrimitiveCatalystType(t.elementType) =>
// Only clips array types with nested type as element type.
clipParquetListType(parquetType.asGroupType(), t.elementType, caseSensitive)
case t: MapType
if !isPrimitiveCatalystType(t.keyType) ||
!isPrimitiveCatalystType(t.valueType) =>
// Only clips map types with nested key type or value type
clipParquetMapType(parquetType.asGroupType(), t.keyType, t.valueType, caseSensitive)
case t: StructType =>
clipParquetGroup(parquetType.asGroupType(), t, caseSensitive)
case _ =>
// UDTs and primitive types are not clipped. For UDTs, a clipped version might not be able
// to be mapped to desired user-space types. So UDTs shouldn't participate schema merging.
parquetType
}
}
/**
* Whether a Catalyst [[DataType]] is primitive. Primitive [[DataType]] is not equivalent to
* [[AtomicType]]. For example, [[CalendarIntervalType]] is primitive, but it's not an
* [[AtomicType]].
*/
private def isPrimitiveCatalystType(dataType: DataType): Boolean = {
dataType match {
case _: ArrayType | _: MapType | _: StructType => false
case _ => true
}
}
/**
* Clips a Parquet [[GroupType]] which corresponds to a Catalyst [[ArrayType]]. The element type
* of the [[ArrayType]] should also be a nested type, namely an [[ArrayType]], a [[MapType]], or a
* [[StructType]].
*/
private def clipParquetListType(
parquetList: GroupType, elementType: DataType, caseSensitive: Boolean): Type = {
// Precondition of this method, should only be called for lists with nested element types.
assert(!isPrimitiveCatalystType(elementType))
// Unannotated repeated group should be interpreted as required list of required element, so
// list element type is just the group itself. Clip it.
if (parquetList.getOriginalType == null && parquetList.isRepetition(Repetition.REPEATED)) {
clipParquetType(parquetList, elementType, caseSensitive)
} else {
assert(
parquetList.getOriginalType == OriginalType.LIST,
"Invalid Parquet schema. " +
"Original type of annotated Parquet lists must be LIST: " +
parquetList.toString)
assert(
parquetList.getFieldCount == 1 && parquetList.getType(0).isRepetition(Repetition.REPEATED),
"Invalid Parquet schema. " +
"LIST-annotated group should only have exactly one repeated field: " +
parquetList)
// Precondition of this method, should only be called for lists with nested element types.
assert(!parquetList.getType(0).isPrimitive)
val repeatedGroup = parquetList.getType(0).asGroupType()
// If the repeated field is a group with multiple fields, or the repeated field is a group
// with one field and is named either "array" or uses the LIST-annotated group's name with
// "_tuple" appended then the repeated type is the element type and elements are required.
// Build a new LIST-annotated group with clipped `repeatedGroup` as element type and the
// only field.
if (
repeatedGroup.getFieldCount > 1 ||
repeatedGroup.getName == "array" ||
repeatedGroup.getName == parquetList.getName + "_tuple"
) {
Types
.buildGroup(parquetList.getRepetition)
.as(OriginalType.LIST)
.addField(clipParquetType(repeatedGroup, elementType, caseSensitive))
.named(parquetList.getName)
} else {
// Otherwise, the repeated field's type is the element type with the repeated field's
// repetition.
Types
.buildGroup(parquetList.getRepetition)
.as(OriginalType.LIST)
.addField(
Types
.repeatedGroup()
.addField(clipParquetType(repeatedGroup.getType(0), elementType, caseSensitive))
.named(repeatedGroup.getName))
.named(parquetList.getName)
}
}
}
/**
* Clips a Parquet [[GroupType]] which corresponds to a Catalyst [[MapType]]. Either key type or
* value type of the [[MapType]] must be a nested type, namely an [[ArrayType]], a [[MapType]], or
* a [[StructType]].
*/
private def clipParquetMapType(
parquetMap: GroupType,
keyType: DataType,
valueType: DataType,
caseSensitive: Boolean): GroupType = {
// Precondition of this method, only handles maps with nested key types or value types.
assert(!isPrimitiveCatalystType(keyType) || !isPrimitiveCatalystType(valueType))
val repeatedGroup = parquetMap.getType(0).asGroupType()
val parquetKeyType = repeatedGroup.getType(0)
val parquetValueType = repeatedGroup.getType(1)
val clippedRepeatedGroup =
Types
.repeatedGroup()
.as(repeatedGroup.getOriginalType)
.addField(clipParquetType(parquetKeyType, keyType, caseSensitive))
.addField(clipParquetType(parquetValueType, valueType, caseSensitive))
.named(repeatedGroup.getName)
Types
.buildGroup(parquetMap.getRepetition)
.as(parquetMap.getOriginalType)
.addField(clippedRepeatedGroup)
.named(parquetMap.getName)
}
/**
* Clips a Parquet [[GroupType]] which corresponds to a Catalyst [[StructType]].
*
* @return A clipped [[GroupType]], which has at least one field.
* @note Parquet doesn't allow creating empty [[GroupType]] instances except for empty
* [[MessageType]]. Because it's legal to construct an empty requested schema for column
* pruning.
*/
private def clipParquetGroup(
parquetRecord: GroupType, structType: StructType, caseSensitive: Boolean): GroupType = {
val clippedParquetFields = clipParquetGroupFields(parquetRecord, structType, caseSensitive)
Types
.buildGroup(parquetRecord.getRepetition)
.as(parquetRecord.getOriginalType)
.addFields(clippedParquetFields: _*)
.named(parquetRecord.getName)
}
/**
* Clips a Parquet [[GroupType]] which corresponds to a Catalyst [[StructType]].
*
* @return A list of clipped [[GroupType]] fields, which can be empty.
*/
private def clipParquetGroupFields(
parquetRecord: GroupType, structType: StructType, caseSensitive: Boolean): Seq[Type] = {
val toParquet = new SparkToParquetSchemaConverter(writeLegacyParquetFormat = false)
if (caseSensitive) {
val caseSensitiveParquetFieldMap =
parquetRecord.getFields.asScala.map(f => f.getName -> f).toMap
structType.map { f =>
caseSensitiveParquetFieldMap
.get(f.name)
.map(clipParquetType(_, f.dataType, caseSensitive))
.getOrElse(toParquet.convertField(f))
}
} else {
// Do case-insensitive resolution only if in case-insensitive mode
val caseInsensitiveParquetFieldMap =
parquetRecord.getFields.asScala.groupBy(_.getName.toLowerCase(Locale.ROOT))
structType.map { f =>
caseInsensitiveParquetFieldMap
.get(f.name.toLowerCase(Locale.ROOT))
.map { parquetTypes =>
if (parquetTypes.size > 1) {
// Need to fail if there is ambiguity, i.e. more than one field is matched
val parquetTypesString = parquetTypes.map(_.getName).mkString("[", ", ", "]")
throw new RuntimeException(s"""Found duplicate field(s) "${f.name}": """ +
s"$parquetTypesString in case-insensitive mode")
} else {
clipParquetType(parquetTypes.head, f.dataType, caseSensitive)
}
}.getOrElse(toParquet.convertField(f))
}
}
}
/**
* Computes the structural intersection between two Parquet group types.
* This is used to create a requestedSchema for ReadContext of Parquet-MR reader.
* Parquet-MR reader does not support the nested field access to non-existent field
* while parquet library does support to read the non-existent field by regular field access.
*/
private def intersectParquetGroups(
groupType1: GroupType, groupType2: GroupType): Option[GroupType] = {
val fields =
groupType1.getFields.asScala
.filter(field => groupType2.containsField(field.getName))
.flatMap {
case field1: GroupType =>
val field2 = groupType2.getType(field1.getName)
if (field2.isPrimitive) {
None
} else {
intersectParquetGroups(field1, field2.asGroupType)
}
case field1 => Some(field1)
}
if (fields.nonEmpty) {
Some(groupType1.withNewFields(fields.asJava))
} else {
None
}
}
def expandUDT(schema: StructType): StructType = {
def expand(dataType: DataType): DataType = {
dataType match {
case t: ArrayType =>
t.copy(elementType = expand(t.elementType))
case t: MapType =>
t.copy(
keyType = expand(t.keyType),
valueType = expand(t.valueType))
case t: StructType =>
val expandedFields = t.fields.map(f => f.copy(dataType = expand(f.dataType)))
t.copy(fields = expandedFields)
case t: UserDefinedType[_] =>
t.sqlType
case t =>
t
}
}
expand(schema).asInstanceOf[StructType]
}
}
| aosagie/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetReadSupport.scala | Scala | apache-2.0 | 16,598 |
package ch.epfl.bluebrain.nexus.iam.index
import java.util.concurrent.ConcurrentHashMap
import java.util.function.BiFunction
import cats.Applicative
import ch.epfl.bluebrain.nexus.iam.acls._
import ch.epfl.bluebrain.nexus.iam.config.AppConfig.{HttpConfig, PermissionsConfig}
import ch.epfl.bluebrain.nexus.iam.index.InMemoryAclsTree._
import ch.epfl.bluebrain.nexus.iam.syntax._
import ch.epfl.bluebrain.nexus.iam.types.Identity
import ch.epfl.bluebrain.nexus.rdf.Iri.Path
import ch.epfl.bluebrain.nexus.rdf.Iri.Path.Segment
import scala.annotation.tailrec
/**
* An in memory implementation of [[AclsIndex]]. It uses a tree structure, stored in the ''tree'' map.
* Every key on the map is a [[Path]] and its values are a set of children [[Path]]s. In this way one can
* navigate down the tree.
*
* @param tree the data structure used to build the tree with the parent paths and the children paths
* @param acls a data structure used to store the ACLs for a path
*/
class InMemoryAclsTree[F[_]] private (tree: ConcurrentHashMap[Path, Set[Path]], acls: ConcurrentHashMap[Path, Resource])(
implicit F: Applicative[F],
pc: PermissionsConfig,
http: HttpConfig)
extends AclsIndex[F] {
private val any = "*"
override def replace(path: Path, aclResource: Resource): F[Boolean] = {
@tailrec
def inner(p: Path, children: Set[Path]): Unit = {
tree.merge(p, children, (current, _) => current ++ children)
if (!(p.isEmpty || p == Path./))
inner(p.parent, Set(p))
}
val rev = aclResource.rev
val f: BiFunction[Resource, Resource, Resource] = (curr, _) =>
curr match {
case c if rev > c.rev => aclResource
case other => other
}
val updated = acls.merge(path, aclResource, f)
val update = updated == aclResource
if (update) inner(path, Set.empty)
F.pure(update)
}
override def get(path: Path, ancestors: Boolean, self: Boolean)(
implicit identities: Set[Identity]): F[AccessControlLists] = {
def removeNotOwn(currentAcls: AccessControlLists): AccessControlLists = {
def containsAclsRead(acl: AccessControlList): Boolean =
acl.value.exists { case (ident, perms) => identities.contains(ident) && perms.contains(read) }
val (_, result) = currentAcls.sorted.value
.foldLeft(Set.empty[Path] -> AccessControlLists.empty) {
case ((ownPaths, acc), entry @ (p, _)) if ownPaths.exists(p.startsWith) => ownPaths -> (acc + entry)
case ((ownPaths, acc), entry @ (p, acl)) if containsAclsRead(acl.value) => ownPaths + p -> (acc + entry)
case ((ownPaths, acc), (p, acl)) => ownPaths -> (acc + (p -> acl.map(_.filter(identities))))
}
result
}
F.pure {
if (self) {
val result = if (ancestors) getWithAncestors(path) else get(path)
result.filter(identities).removeEmpty
} else {
val result = removeNotOwn(getWithAncestors(path))
if (ancestors)
result.removeEmpty
else
AccessControlLists(result.value.filterKeys(_.size == path.size)).removeEmpty
}
}
}
private def getWithAncestors(path: Path): AccessControlLists = {
val currentAcls = get(path)
if (path.isEmpty || path == Path./) currentAcls
else currentAcls ++ getWithAncestors(path.parent)
}
private def pathOf(segments: Vector[String]): Path =
if (segments.isEmpty) Path./ else segments.foldLeft[Path](Path.Empty)(_ / _)
private def get(path: Path): AccessControlLists = {
val segments = path.segments.toVector
def inner(toConsume: Vector[String]): AccessControlLists = {
if (toConsume.contains(any)) {
val consumed = toConsume.takeWhile(_ != any)
val path = pathOf(consumed)
tree.getSafe(path) match {
case Some(children) if consumed.size + 1 == segments.size =>
AccessControlLists(children.foldLeft(Map.empty[Path, Resource]) { (acc, p) =>
acls.getSafe(p).map(r => acc + (p -> r)).getOrElse(acc)
})
case Some(children) =>
children.foldLeft(AccessControlLists.empty) {
case (acc, Segment(head, _)) =>
val toConsumeNew = (consumed :+ head) ++ segments.takeRight(segments.size - 1 - consumed.size)
acc ++ inner(toConsumeNew)
case (acc, _) => acc
}
case None => initialAcls(path)
}
} else {
val path = pathOf(toConsume)
acls.getSafe(path).map(r => AccessControlLists(path -> r)).getOrElse(initialAcls(path))
}
}
inner(segments)
}
private def initialAcls(path: Path): AccessControlLists =
if (path == Path./) AccessControlLists(Path./ -> defaultResourceOnSlash) else AccessControlLists.empty
}
object InMemoryAclsTree {
private[index] implicit class ConcurrentHashMapSyntax[K, V](private val map: ConcurrentHashMap[K, V]) extends AnyVal {
def getSafe(key: K): Option[V] = Option(map.get(key))
}
/**
* Constructs an in memory implementation of [[AclsIndex]]
*
*/
final def apply[F[_]: Applicative](implicit pc: PermissionsConfig, http: HttpConfig): InMemoryAclsTree[F] =
new InMemoryAclsTree(new ConcurrentHashMap[Path, Set[Path]](), new ConcurrentHashMap[Path, Resource]())
}
| hygt/nexus-iam | src/main/scala/ch/epfl/bluebrain/nexus/iam/index/InMemoryAclsTree.scala | Scala | apache-2.0 | 5,363 |
/**
* Copyright 2015 Mohiva Organisation (license at mohiva dot com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mohiva.play.silhouette.impl.providers.oauth2
import com.mohiva.play.silhouette.api.LoginInfo
import com.mohiva.play.silhouette.impl.exceptions.{ ProfileRetrievalException, UnexpectedResponseException }
import com.mohiva.play.silhouette.impl.providers.OAuth2Provider._
import com.mohiva.play.silhouette.impl.providers.SocialProfileBuilder._
import com.mohiva.play.silhouette.impl.providers._
import com.mohiva.play.silhouette.impl.providers.oauth2.FoursquareProvider._
import play.api.libs.json.Json
import play.api.libs.ws.{ WSRequest, WSResponse }
import play.api.test.{ FakeRequest, WithApplication }
import test.Helper
import scala.concurrent.Future
/**
* Test case for the [[FoursquareProvider]] class.
*/
class FoursquareProviderSpec extends OAuth2ProviderSpec {
"The `withSettings` method" should {
"create a new instance with customized settings" in new WithApplication with Context {
val s = provider.withSettings { s =>
s.copy(accessTokenURL = "new-access-token-url")
}
s.settings.accessTokenURL must be equalTo "new-access-token-url"
}
}
"The `authenticate` method" should {
"fail with UnexpectedResponseException if OAuth2Info can be build because of an unexpected response" in new WithApplication with Context {
val requestHolder = mock[WSRequest]
val response = mock[WSResponse]
implicit val req = FakeRequest(GET, "?" + Code + "=my.code")
response.json returns Json.obj()
requestHolder.withHeaders(any) returns requestHolder
requestHolder.post[Map[String, Seq[String]]](any)(any) returns Future.successful(response)
httpLayer.url(oAuthSettings.accessTokenURL) returns requestHolder
stateProvider.validate(any, any) returns Future.successful(state)
failed[UnexpectedResponseException](provider.authenticate()) {
case e => e.getMessage must startWith(InvalidInfoFormat.format(provider.id, ""))
}
}
"return the auth info" in new WithApplication with Context {
val requestHolder = mock[WSRequest]
val response = mock[WSResponse]
implicit val req = FakeRequest(GET, "?" + Code + "=my.code")
response.json returns oAuthInfo
requestHolder.withHeaders(any) returns requestHolder
requestHolder.post[Map[String, Seq[String]]](any)(any) returns Future.successful(response)
httpLayer.url(oAuthSettings.accessTokenURL) returns requestHolder
stateProvider.validate(any, any) returns Future.successful(state)
authInfo(provider.authenticate()) {
case authInfo => authInfo must be equalTo oAuthInfo.as[OAuth2Info]
}
}
}
"The `retrieveProfile` method" should {
"fail with ProfileRetrievalException if API returns error" in new WithApplication with Context {
val requestHolder = mock[WSRequest]
val response = mock[WSResponse]
response.json returns Helper.loadJson("providers/oauth2/foursquare.error.json")
requestHolder.get() returns Future.successful(response)
httpLayer.url(API.format("my.access.token", DefaultAPIVersion)) returns requestHolder
failed[ProfileRetrievalException](provider.retrieveProfile(oAuthInfo.as[OAuth2Info])) {
case e => e.getMessage must equalTo(SpecifiedProfileError.format(
provider.id,
400,
Some("param_error"),
Some("Must provide a valid user ID or 'self.'")))
}
}
"fail with ProfileRetrievalException if an unexpected error occurred" in new WithApplication with Context {
val requestHolder = mock[WSRequest]
val response = mock[WSResponse]
response.json throws new RuntimeException("")
requestHolder.get() returns Future.successful(response)
httpLayer.url(API.format("my.access.token", DefaultAPIVersion)) returns requestHolder
failed[ProfileRetrievalException](provider.retrieveProfile(oAuthInfo.as[OAuth2Info])) {
case e => e.getMessage must equalTo(UnspecifiedProfileError.format(provider.id))
}
}
"return the social profile" in new WithApplication with Context {
val requestHolder = mock[WSRequest]
val response = mock[WSResponse]
response.json returns Helper.loadJson("providers/oauth2/foursquare.success.json")
requestHolder.get() returns Future.successful(response)
httpLayer.url(API.format("my.access.token", DefaultAPIVersion)) returns requestHolder
profile(provider.retrieveProfile(oAuthInfo.as[OAuth2Info])) {
case p =>
p must be equalTo new CommonSocialProfile(
loginInfo = LoginInfo(provider.id, "13221052"),
firstName = Some("Apollonia"),
lastName = Some("Vanova"),
email = Some("[email protected]"),
avatarURL = Some("https://irs0.4sqi.net/img/user/100x100/blank_girl.png")
)
}
}
"return the social profile if API is deprecated" in new WithApplication with Context {
val requestHolder = mock[WSRequest]
val response = mock[WSResponse]
response.json returns Helper.loadJson("providers/oauth2/foursquare.deprecated.json")
requestHolder.get() returns Future.successful(response)
httpLayer.url(API.format("my.access.token", DefaultAPIVersion)) returns requestHolder
profile(provider.retrieveProfile(oAuthInfo.as[OAuth2Info])) {
case p =>
p must be equalTo new CommonSocialProfile(
loginInfo = LoginInfo(provider.id, "13221052"),
firstName = Some("Apollonia"),
lastName = Some("Vanova"),
email = Some("[email protected]"),
avatarURL = Some("https://irs0.4sqi.net/img/user/100x100/blank_girl.png")
)
}
}
"handle the custom API version property" in new WithApplication with Context {
val customProperties = Map(APIVersion -> "20120101")
val requestHolder = mock[WSRequest]
val response = mock[WSResponse]
response.json returns Helper.loadJson("providers/oauth2/foursquare.success.json")
requestHolder.get() returns Future.successful(response)
httpLayer.url(API.format("my.access.token", "20120101")) returns requestHolder
profile(provider.withSettings(_.copy(customProperties = customProperties))
.retrieveProfile(oAuthInfo.as[OAuth2Info])) {
case p =>
p must be equalTo new CommonSocialProfile(
loginInfo = LoginInfo(provider.id, "13221052"),
firstName = Some("Apollonia"),
lastName = Some("Vanova"),
email = Some("[email protected]"),
avatarURL = Some("https://irs0.4sqi.net/img/user/100x100/blank_girl.png")
)
}
}
"handle the custom avatar resolution property" in new WithApplication with Context {
val customProperties = Map(AvatarResolution -> "150x150")
val requestHolder = mock[WSRequest]
val response = mock[WSResponse]
response.json returns Helper.loadJson("providers/oauth2/foursquare.success.json")
requestHolder.get() returns Future.successful(response)
httpLayer.url(API.format("my.access.token", DefaultAPIVersion)) returns requestHolder
profile(provider.withSettings(_.copy(customProperties = customProperties))
.retrieveProfile(oAuthInfo.as[OAuth2Info])) {
case p =>
p must be equalTo new CommonSocialProfile(
loginInfo = LoginInfo(provider.id, "13221052"),
firstName = Some("Apollonia"),
lastName = Some("Vanova"),
email = Some("[email protected]"),
avatarURL = Some("https://irs0.4sqi.net/img/user/150x150/blank_girl.png")
)
}
}
}
/**
* Defines the context for the abstract OAuth2 provider spec.
*
* @return The Context to use for the abstract OAuth2 provider spec.
*/
override protected def context: OAuth2ProviderSpecContext = new Context {}
/**
* The context.
*/
trait Context extends OAuth2ProviderSpecContext {
/**
* The OAuth2 settings.
*/
lazy val oAuthSettings = spy(OAuth2Settings(
authorizationURL = Some("https://foursquare.com/oauth2/authenticate"),
accessTokenURL = "https://foursquare.com/oauth2/access_token",
redirectURL = "https://www.mohiva.com",
clientID = "my.client.id",
clientSecret = "my.client.secret"))
/**
* The OAuth2 info returned by Foursquare.
*
* @see https://developer.foursquare.com/overview/auth
*/
override lazy val oAuthInfo = Helper.loadJson("providers/oauth2/foursquare.access.token.json")
/**
* The provider to test.
*/
lazy val provider = new FoursquareProvider(httpLayer, stateProvider, oAuthSettings)
}
}
| cemcatik/play-silhouette | silhouette/test/com/mohiva/play/silhouette/impl/providers/oauth2/FoursquareProviderSpec.scala | Scala | apache-2.0 | 9,378 |
package dtable.dblayer
import shared.model.{Account, User}
object AccountDB {
import slick.jdbc.SQLiteProfile.api._
val userDb = UserDB
class Accounts(tag: Tag) extends Table[Account](tag, "accounts") {
import slick.jdbc.SQLiteProfile.api._
def * = (user, token, active) <> (Account.tupled, Account.unapply)
def user = column[Int]("user")
def token = column[String]("token")
def active = column[Boolean]("active")
}
val accounts = TableQuery[Accounts]
def all() = {
val res = for {
user <- userDb.users
account <- accounts if user.id === account.user
} yield (user, account)
call(res)
}
}
| SergiiPolokhalo/DTable | server/src/main/scala/dtable/dblayer/AccountDB.scala | Scala | apache-2.0 | 654 |
package domino
import org.osgi.framework.{ ServiceReference, BundleContext }
/**
* Wrapper for a service reference which adds methods to resolve the corresponding service.
*
* @constructor Wraps the given service reference.
* @param ref Wrapped service reference
* @param bundleContext Bundle context for resolving the service
*/
class RichServiceReference[S <: AnyRef](val ref: ServiceReference[S], bundleContext: BundleContext) {
/**
* Returns the service for this reference if available.
*/
def service: Option[S] = Option(bundleContext.getService(ref))
/**
* Executes the given function with a service obtained from this reference or with `None`.
*
* When the function returns, the service is released using [[org.osgi.framework.BundleContext#ungetService]].
*
* @param f function that uses the service
* @return function result
*/
def withService[R](f: Option[S] => R): R = {
val service = Option(bundleContext.getService(ref))
try f(service) finally bundleContext.ungetService(ref)
}
}
| helgoboss/domino | src/main/scala/domino/RichServiceReference.scala | Scala | mit | 1,047 |
package wrappers.scala {
import scalan._
import special.wrappers.WrappersModule
trait WOptions extends Base { self: WrappersModule =>
@External("Option") @ContainerType @FunctorType @Liftable @WithMethodCallRecognizers trait WOption[A] extends Def[WOption[A]] {
implicit def eA: Elem[A];
@External def fold[B](ifEmpty: Ref[Thunk[B]], f: Ref[scala.Function1[A, B]]): Ref[B];
@External def isEmpty: Ref[Boolean];
@External def isDefined: Ref[Boolean];
@External def filter(p: Ref[scala.Function1[A, Boolean]]): Ref[WOption[A]];
@External def flatMap[B](f: Ref[scala.Function1[A, WOption[B]]]): Ref[WOption[B]];
@External def map[B](f: Ref[scala.Function1[A, B]]): Ref[WOption[B]];
@External def getOrElse[B](default: Ref[Thunk[B]]): Ref[B];
@External def get: Ref[A]
};
trait WOptionCompanion
}
} | ScorexFoundation/sigmastate-interpreter | library/src/main/scala/wrappers/scala/WOptions.scala | Scala | mit | 870 |
package quisp.enums
import spray.json.{JsString, JsValue, JsonFormat}
/**
* Enum classes are represented by implementations of this trait
* A companion object will contain case objects, one for each valid value
* The simplified class name of the companion object is returned by toString()
* @author rodneykinney
*/
trait EnumTrait {
override def toString = this.getClass.getSimpleName.stripSuffix("$")
}
/**
* Use toString() to represent enum class instances
*/
object EnumTrait {
def jsFormat[T <: EnumTrait]: JsonFormat[T] = new JsonFormat[T] {
def write(data: T) = JsString(data.toString)
def read(json: JsValue): T = ???
}
}
| rodneykinney/quisp | src/main/scala/quisp/enums/EnumTrait.scala | Scala | apache-2.0 | 688 |
package blended.container.context.impl.internal
import java.io.File
import java.util.Properties
import scala.collection.JavaConverters._
import blended.container.context.api.ContainerContext
import blended.updater.config.{ LocalOverlays, OverlayRef, RuntimeConfig }
import blended.util.logging.Logger
import com.typesafe.config.{ Config, ConfigFactory, ConfigParseOptions }
object ContainerContextImpl {
private val PROP_BLENDED_HOME = "blended.home"
private val CONFIG_DIR = "etc"
}
class ContainerContextImpl() extends ContainerContext {
import ContainerContextImpl._
private[this] val log = Logger[ContainerContextImpl]
override def getContainerDirectory() = new File(System.getProperty("blended.home")).getAbsolutePath
override def getContainerHostname(): String = {
try {
val localMachine = java.net.InetAddress.getLocalHost()
localMachine.getCanonicalHostName()
} catch {
case uhe: java.net.UnknownHostException => "UNKNOWN"
}
}
override def getContainerLogDirectory(): String = containerLogDir
override def getProfileDirectory(): String = profileDir
val brandingProperties: Map[String, String] = {
val props = (try {
import blended.launcher.runtime.Branding
// it is possible, that this optional class is not available at runtime,
// e.g. when started with another launcher
log.debug("About to read launcher branding properies")
Option(Branding.getProperties())
} catch {
case e: NoClassDefFoundError => None
}) getOrElse {
log.warn("Could not read launcher branding properies")
new Properties()
}
props.entrySet().asScala.map(e => e.getKey().toString() -> e.getValue().toString()).toMap
}
private[this] lazy val profileDir: String = {
val profileHome =
brandingProperties.get(RuntimeConfig.Properties.PROFILE_DIR) orElse {
log.warn("Could not read the profile directory from read launcher branding properties")
None
}
val dir = profileHome getOrElse {
Option(System.getProperty(PROP_BLENDED_HOME)) getOrElse {
Option(System.getProperty("user.dir")) getOrElse {
"."
}
}
}
val configDir = new File(dir)
if (!configDir.exists()) {
log.error(s"Container directory [${dir}] does not exist.")
} else if (!configDir.isDirectory() || !configDir.canRead()) {
log.error(s"Container directory [${dir}] is not readable.")
}
val absDir = configDir.getAbsolutePath
System.setProperty("blended.container.home", absDir)
absDir
}
private[this] lazy val containerLogDir: String = {
val f = new File(getContainerDirectory() + "/log")
f.getAbsolutePath()
}
override def getContainerConfigDirectory() = new File(getContainerDirectory(), CONFIG_DIR).getAbsolutePath
override def getProfileConfigDirectory(): String = new File(getProfileDirectory(), CONFIG_DIR).getAbsolutePath
private[this] lazy val ctConfig : Config = {
val sysProps = ConfigFactory.systemProperties()
val envProps = ConfigFactory.systemEnvironment()
val branding = brandingProperties
val overlayConfig = branding.get(RuntimeConfig.Properties.PROFILE_DIR) match {
case Some(profileDir) =>
branding.get(RuntimeConfig.Properties.OVERLAYS) match {
case Some(overlays) =>
val overlayRefs = overlays.split("[,]").toList.map(_.split("[:]", 2)).flatMap {
case Array(n, v) => Some(OverlayRef(n, v))
case x =>
log.debug("Unsupported overlay: " + x.mkString(":"))
None
}.toSet
if (overlayRefs.isEmpty) None
else {
val dir = LocalOverlays.materializedDir(overlayRefs, new File(profileDir))
val confFile = new File(dir, s"$CONFIG_DIR/application_overlay.conf")
if (confFile.exists()) {
log.debug(s"About to read extra application overlay override file: ${confFile}")
Some(confFile)
} else None
}
case _ => None
}
case _ => None
}
log.debug(s"Overlay config: ${overlayConfig}")
val config = overlayConfig match {
case Some(oc) => ConfigFactory.parseFile(oc, ConfigParseOptions.defaults().setAllowMissing(false))
case _ => ConfigFactory.empty()
}
config.withFallback(ConfigFactory.parseFile(
new File(getProfileConfigDirectory(), "application.conf"), ConfigParseOptions.defaults().setAllowMissing(false)
))
.withFallback(sysProps)
.withFallback(envProps)
.resolve()
}
override def getContainerConfig() = ctConfig
}
| lefou/blended | blended.container.context.impl/src/main/scala/blended/container/context/impl/internal/ContainerContextImpl.scala | Scala | apache-2.0 | 4,671 |
package com.seanshubin.web.sync.domain
import java.nio.file.Path
trait Logger {
def summary(logPath: Path, downloadResults: Seq[DownloadResult])
}
| SeanShubin/web-sync | domain/src/main/scala/com/seanshubin/web/sync/domain/Logger.scala | Scala | unlicense | 151 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package toplevel
package imports
import org.jetbrains.plugins.scala.lang.psi.api.base.ScStableCodeReferenceElement
/**
* @author Alexander Podkhalyuzin
* Date: 20.02.2008
*/
trait ScImportSelector extends ScalaPsiElement {
def importedName: Option[String]
def reference: Option[ScStableCodeReferenceElement]
def deleteSelector(): Unit
def isAliasedImport: Boolean
} | loskutov/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/api/toplevel/imports/ScImportSelector.scala | Scala | apache-2.0 | 465 |
import boxity.BlackboxParserMacroSuite
import suites.IncorrectParserMacros
class BlackboxMetaIncorrectMacros extends BlackboxParserMacroSuite with IncorrectParserMacros
| Duhemm/parsermacros | tests/src/test/scala/BlackboxMetaIncorrectMacros.scala | Scala | bsd-3-clause | 170 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.coordinator.group
import java.util.Optional
import kafka.common.OffsetAndMetadata
import kafka.server.{DelayedOperationPurgatory, HostedPartition, KafkaConfig, ReplicaManager, RequestLocal}
import kafka.utils._
import kafka.utils.timer.MockTimer
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.record.{MemoryRecords, RecordBatch}
import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse
import org.apache.kafka.common.requests.{JoinGroupRequest, OffsetCommitRequest, OffsetFetchResponse, TransactionResult}
import java.util.concurrent.TimeUnit
import java.util.concurrent.locks.ReentrantLock
import kafka.cluster.Partition
import kafka.log.AppendOrigin
import kafka.zk.KafkaZkClient
import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription
import org.apache.kafka.clients.consumer.internals.ConsumerProtocol
import org.apache.kafka.common.internals.Topic
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.{AfterEach, BeforeEach, Test}
import org.junit.jupiter.params.ParameterizedTest
import org.junit.jupiter.params.provider.ValueSource
import org.mockito.{ArgumentCaptor, ArgumentMatchers}
import org.mockito.ArgumentMatchers.{any, anyLong, anyShort}
import org.mockito.Mockito.{mock, when}
import scala.jdk.CollectionConverters._
import scala.collection.{Seq, mutable}
import scala.collection.mutable.ArrayBuffer
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, Future, Promise, TimeoutException}
class GroupCoordinatorTest {
import GroupCoordinatorTest._
type JoinGroupCallback = JoinGroupResult => Unit
type SyncGroupCallback = SyncGroupResult => Unit
type HeartbeatCallbackParams = Errors
type HeartbeatCallback = Errors => Unit
type CommitOffsetCallbackParams = Map[TopicPartition, Errors]
type CommitOffsetCallback = Map[TopicPartition, Errors] => Unit
type LeaveGroupCallback = LeaveGroupResult => Unit
val ClientId = "consumer-test"
val ClientHost = "localhost"
val GroupMinSessionTimeout = 10
val GroupMaxSessionTimeout = 10 * 60 * 1000
val GroupMaxSize = 4
val DefaultRebalanceTimeout = 500
val DefaultSessionTimeout = 500
val GroupInitialRebalanceDelay = 50
var timer: MockTimer = null
var groupCoordinator: GroupCoordinator = null
var replicaManager: ReplicaManager = null
var scheduler: KafkaScheduler = null
var zkClient: KafkaZkClient = null
private val groupId = "groupId"
private val protocolType = "consumer"
private val protocolName = "range"
private val memberId = "memberId"
private val groupInstanceId = "groupInstanceId"
private val leaderInstanceId = "leader"
private val followerInstanceId = "follower"
private val invalidMemberId = "invalidMember"
private val metadata = Array[Byte]()
private val protocols = List((protocolName, metadata))
private val protocolSuperset = List((protocolName, metadata), ("roundrobin", metadata))
private val requireStable = true
private var groupPartitionId: Int = -1
// we use this string value since its hashcode % #.partitions is different
private val otherGroupId = "otherGroup"
@BeforeEach
def setUp(): Unit = {
val props = TestUtils.createBrokerConfig(nodeId = 0, zkConnect = "")
props.setProperty(KafkaConfig.GroupMinSessionTimeoutMsProp, GroupMinSessionTimeout.toString)
props.setProperty(KafkaConfig.GroupMaxSessionTimeoutMsProp, GroupMaxSessionTimeout.toString)
props.setProperty(KafkaConfig.GroupMaxSizeProp, GroupMaxSize.toString)
props.setProperty(KafkaConfig.GroupInitialRebalanceDelayMsProp, GroupInitialRebalanceDelay.toString)
// make two partitions of the group topic to make sure some partitions are not owned by the coordinator
val ret = mutable.Map[String, Map[Int, Seq[Int]]]()
ret += (Topic.GROUP_METADATA_TOPIC_NAME -> Map(0 -> Seq(1), 1 -> Seq(1)))
replicaManager = mock(classOf[ReplicaManager])
zkClient = mock(classOf[KafkaZkClient])
// make two partitions of the group topic to make sure some partitions are not owned by the coordinator
when(zkClient.getTopicPartitionCount(Topic.GROUP_METADATA_TOPIC_NAME)).thenReturn(Some(2))
timer = new MockTimer
val config = KafkaConfig.fromProps(props)
val heartbeatPurgatory = new DelayedOperationPurgatory[DelayedHeartbeat]("Heartbeat", timer, config.brokerId, reaperEnabled = false)
val rebalancePurgatory = new DelayedOperationPurgatory[DelayedRebalance]("Rebalance", timer, config.brokerId, reaperEnabled = false)
groupCoordinator = GroupCoordinator(config, replicaManager, heartbeatPurgatory, rebalancePurgatory, timer.time, new Metrics())
groupCoordinator.startup(() => zkClient.getTopicPartitionCount(Topic.GROUP_METADATA_TOPIC_NAME).getOrElse(config.offsetsTopicPartitions),
enableMetadataExpiration = false)
// add the partition into the owned partition list
groupPartitionId = groupCoordinator.partitionFor(groupId)
groupCoordinator.groupManager.addPartitionOwnership(groupPartitionId)
}
@AfterEach
def tearDown(): Unit = {
if (groupCoordinator != null)
groupCoordinator.shutdown()
}
@Test
def testRequestHandlingWhileLoadingInProgress(): Unit = {
val otherGroupPartitionId = groupCoordinator.groupManager.partitionFor(otherGroupId)
assertTrue(otherGroupPartitionId != groupPartitionId)
groupCoordinator.groupManager.addLoadingPartition(otherGroupPartitionId)
assertTrue(groupCoordinator.groupManager.isGroupLoading(otherGroupId))
// Dynamic Member JoinGroup
var joinGroupResponse: Option[JoinGroupResult] = None
groupCoordinator.handleJoinGroup(otherGroupId, memberId, None, true, true, "clientId", "clientHost", 60000, 10000, "consumer",
List("range" -> new Array[Byte](0)), result => { joinGroupResponse = Some(result)})
assertEquals(Some(Errors.COORDINATOR_LOAD_IN_PROGRESS), joinGroupResponse.map(_.error))
// Static Member JoinGroup
groupCoordinator.handleJoinGroup(otherGroupId, memberId, Some("groupInstanceId"), false, true, "clientId", "clientHost", 60000, 10000, "consumer",
List("range" -> new Array[Byte](0)), result => { joinGroupResponse = Some(result)})
assertEquals(Some(Errors.COORDINATOR_LOAD_IN_PROGRESS), joinGroupResponse.map(_.error))
// SyncGroup
var syncGroupResponse: Option[Errors] = None
groupCoordinator.handleSyncGroup(otherGroupId, 1, memberId, Some("consumer"), Some("range"), None, Map.empty[String, Array[Byte]],
syncGroupResult => syncGroupResponse = Some(syncGroupResult.error))
assertEquals(Some(Errors.REBALANCE_IN_PROGRESS), syncGroupResponse)
// OffsetCommit
val topicPartition = new TopicPartition("foo", 0)
var offsetCommitErrors = Map.empty[TopicPartition, Errors]
groupCoordinator.handleCommitOffsets(otherGroupId, memberId, None, 1,
Map(topicPartition -> offsetAndMetadata(15L)), result => { offsetCommitErrors = result })
assertEquals(Some(Errors.COORDINATOR_LOAD_IN_PROGRESS), offsetCommitErrors.get(topicPartition))
// Heartbeat
var heartbeatError: Option[Errors] = None
groupCoordinator.handleHeartbeat(otherGroupId, memberId, None, 1, error => { heartbeatError = Some(error) })
assertEquals(Some(Errors.NONE), heartbeatError)
// DescribeGroups
val (describeGroupError, _) = groupCoordinator.handleDescribeGroup(otherGroupId)
assertEquals(Errors.COORDINATOR_LOAD_IN_PROGRESS, describeGroupError)
// ListGroups
val (listGroupsError, _) = groupCoordinator.handleListGroups(Set())
assertEquals(Errors.COORDINATOR_LOAD_IN_PROGRESS, listGroupsError)
// DeleteGroups
val deleteGroupsErrors = groupCoordinator.handleDeleteGroups(Set(otherGroupId))
assertEquals(Some(Errors.COORDINATOR_LOAD_IN_PROGRESS), deleteGroupsErrors.get(otherGroupId))
// Check that non-loading groups are still accessible
assertEquals(Errors.NONE, groupCoordinator.handleDescribeGroup(groupId)._1)
// After loading, we should be able to access the group
val otherGroupMetadataTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, otherGroupPartitionId)
when(replicaManager.getLog(otherGroupMetadataTopicPartition)).thenReturn(None)
// Call removeGroupsAndOffsets so that partition removed from loadingPartitions
groupCoordinator.groupManager.removeGroupsAndOffsets(otherGroupMetadataTopicPartition, Some(1), group => {})
groupCoordinator.groupManager.loadGroupsAndOffsets(otherGroupMetadataTopicPartition, 1, group => {}, 0L)
assertEquals(Errors.NONE, groupCoordinator.handleDescribeGroup(otherGroupId)._1)
}
@Test
def testOffsetsRetentionMsIntegerOverflow(): Unit = {
val props = TestUtils.createBrokerConfig(nodeId = 0, zkConnect = "")
props.setProperty(KafkaConfig.OffsetsRetentionMinutesProp, Integer.MAX_VALUE.toString)
val config = KafkaConfig.fromProps(props)
val offsetConfig = GroupCoordinator.offsetConfig(config)
assertEquals(offsetConfig.offsetsRetentionMs, Integer.MAX_VALUE * 60L * 1000L)
}
@Test
def testJoinGroupWrongCoordinator(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
var joinGroupResult = dynamicJoinGroup(otherGroupId, memberId, protocolType, protocols)
assertEquals(Errors.NOT_COORDINATOR, joinGroupResult.error)
joinGroupResult = staticJoinGroup(otherGroupId, memberId, groupInstanceId, protocolType, protocols)
assertEquals(Errors.NOT_COORDINATOR, joinGroupResult.error)
}
@Test
def testJoinGroupShouldReceiveErrorIfGroupOverMaxSize(): Unit = {
val futures = ArrayBuffer[Future[JoinGroupResult]]()
val rebalanceTimeout = GroupInitialRebalanceDelay * 2
for (i <- 1.to(GroupMaxSize)) {
futures += sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, rebalanceTimeout = rebalanceTimeout)
if (i != 1)
timer.advanceClock(GroupInitialRebalanceDelay)
}
// advance clock beyond rebalanceTimeout
timer.advanceClock(GroupInitialRebalanceDelay + 1)
for (future <- futures) {
assertEquals(Errors.NONE, await(future, 1).error)
}
// Should receive an error since the group is full
val errorFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, rebalanceTimeout = rebalanceTimeout)
assertEquals(Errors.GROUP_MAX_SIZE_REACHED, await(errorFuture, 1).error)
}
@Test
def testDynamicMembersJoinGroupWithMaxSizeAndRequiredKnownMember(): Unit = {
val requiredKnownMemberId = true
val nbMembers = GroupMaxSize + 1
// First JoinRequests
var futures = 1.to(nbMembers).map { _ =>
sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols,
None, DefaultSessionTimeout, DefaultRebalanceTimeout, requiredKnownMemberId)
}
// Get back the assigned member ids
val memberIds = futures.map(await(_, 1).memberId)
// Second JoinRequests
futures = memberIds.map { memberId =>
sendJoinGroup(groupId, memberId, protocolType, protocols,
None, DefaultSessionTimeout, DefaultRebalanceTimeout, requiredKnownMemberId)
}
// advance clock by GroupInitialRebalanceDelay to complete first InitialDelayedJoin
timer.advanceClock(GroupInitialRebalanceDelay + 1)
// advance clock by GroupInitialRebalanceDelay to complete second InitialDelayedJoin
timer.advanceClock(GroupInitialRebalanceDelay + 1)
// Awaiting results
val errors = futures.map(await(_, DefaultRebalanceTimeout + 1).error)
assertEquals(GroupMaxSize, errors.count(_ == Errors.NONE))
assertEquals(nbMembers-GroupMaxSize, errors.count(_ == Errors.GROUP_MAX_SIZE_REACHED))
// Members which were accepted can rejoin, others are rejected, while
// completing rebalance
futures = memberIds.map { memberId =>
sendJoinGroup(groupId, memberId, protocolType, protocols,
None, DefaultSessionTimeout, DefaultRebalanceTimeout, requiredKnownMemberId)
}
// Awaiting results
val rejoinErrors = futures.map(await(_, 1).error)
assertEquals(errors, rejoinErrors)
}
@Test
def testDynamicMembersJoinGroupWithMaxSize(): Unit = {
val requiredKnownMemberId = false
val nbMembers = GroupMaxSize + 1
// JoinRequests
var futures = 1.to(nbMembers).map { _ =>
sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols,
None, DefaultSessionTimeout, DefaultRebalanceTimeout, requiredKnownMemberId)
}
// advance clock by GroupInitialRebalanceDelay to complete first InitialDelayedJoin
timer.advanceClock(GroupInitialRebalanceDelay + 1)
// advance clock by GroupInitialRebalanceDelay to complete second InitialDelayedJoin
timer.advanceClock(GroupInitialRebalanceDelay + 1)
// Awaiting results
val joinGroupResults = futures.map(await(_, DefaultRebalanceTimeout + 1))
val errors = joinGroupResults.map(_.error)
assertEquals(GroupMaxSize, errors.count(_ == Errors.NONE))
assertEquals(nbMembers-GroupMaxSize, errors.count(_ == Errors.GROUP_MAX_SIZE_REACHED))
// Members which were accepted can rejoin, others are rejected, while
// completing rebalance
val memberIds = joinGroupResults.map(_.memberId)
futures = memberIds.map { memberId =>
sendJoinGroup(groupId, memberId, protocolType, protocols,
None, DefaultSessionTimeout, DefaultRebalanceTimeout, requiredKnownMemberId)
}
// Awaiting results
val rejoinErrors = futures.map(await(_, 1).error)
assertEquals(errors, rejoinErrors)
}
@Test
def testStaticMembersJoinGroupWithMaxSize(): Unit = {
val nbMembers = GroupMaxSize + 1
val instanceIds = 1.to(nbMembers).map(i => Some(s"instance-id-$i"))
// JoinRequests
var futures = instanceIds.map { instanceId =>
sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols,
instanceId, DefaultSessionTimeout, DefaultRebalanceTimeout)
}
// advance clock by GroupInitialRebalanceDelay to complete first InitialDelayedJoin
timer.advanceClock(GroupInitialRebalanceDelay + 1)
// advance clock by GroupInitialRebalanceDelay to complete second InitialDelayedJoin
timer.advanceClock(GroupInitialRebalanceDelay + 1)
// Awaiting results
val joinGroupResults = futures.map(await(_, DefaultRebalanceTimeout + 1))
val errors = joinGroupResults.map(_.error)
assertEquals(GroupMaxSize, errors.count(_ == Errors.NONE))
assertEquals(nbMembers-GroupMaxSize, errors.count(_ == Errors.GROUP_MAX_SIZE_REACHED))
// Members which were accepted can rejoin, others are rejected, while
// completing rebalance
val memberIds = joinGroupResults.map(_.memberId)
futures = instanceIds.zip(memberIds).map { case (instanceId, memberId) =>
sendJoinGroup(groupId, memberId, protocolType, protocols,
instanceId, DefaultSessionTimeout, DefaultRebalanceTimeout)
}
// Awaiting results
val rejoinErrors = futures.map(await(_, 1).error)
assertEquals(errors, rejoinErrors)
}
@Test
def testDynamicMembersCanReJoinGroupWithMaxSizeWhileRebalancing(): Unit = {
val requiredKnownMemberId = true
val nbMembers = GroupMaxSize + 1
// First JoinRequests
var futures = 1.to(nbMembers).map { _ =>
sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols,
None, DefaultSessionTimeout, DefaultRebalanceTimeout, requiredKnownMemberId)
}
// Get back the assigned member ids
val memberIds = futures.map(await(_, 1).memberId)
// Second JoinRequests
memberIds.map { memberId =>
sendJoinGroup(groupId, memberId, protocolType, protocols,
None, DefaultSessionTimeout, DefaultRebalanceTimeout, requiredKnownMemberId)
}
// Members can rejoin while rebalancing
futures = memberIds.map { memberId =>
sendJoinGroup(groupId, memberId, protocolType, protocols,
None, DefaultSessionTimeout, DefaultRebalanceTimeout, requiredKnownMemberId)
}
// advance clock by GroupInitialRebalanceDelay to complete first InitialDelayedJoin
timer.advanceClock(GroupInitialRebalanceDelay + 1)
// advance clock by GroupInitialRebalanceDelay to complete second InitialDelayedJoin
timer.advanceClock(GroupInitialRebalanceDelay + 1)
// Awaiting results
val errors = futures.map(await(_, DefaultRebalanceTimeout + 1).error)
assertEquals(GroupMaxSize, errors.count(_ == Errors.NONE))
assertEquals(nbMembers-GroupMaxSize, errors.count(_ == Errors.GROUP_MAX_SIZE_REACHED))
}
@Test
def testLastJoiningMembersAreKickedOutWhenReJoiningGroupWithMaxSize(): Unit = {
val nbMembers = GroupMaxSize + 2
val group = new GroupMetadata(groupId, Stable, new MockTime())
val memberIds = 1.to(nbMembers).map(_ => group.generateMemberId(ClientId, None))
memberIds.foreach { memberId =>
group.add(new MemberMetadata(memberId, None, ClientId, ClientHost,
DefaultRebalanceTimeout, GroupMaxSessionTimeout, protocolType, protocols))
}
groupCoordinator.groupManager.addGroup(group)
groupCoordinator.prepareRebalance(group, "")
val futures = memberIds.map { memberId =>
sendJoinGroup(groupId, memberId, protocolType, protocols,
None, GroupMaxSessionTimeout, DefaultRebalanceTimeout)
}
// advance clock by GroupInitialRebalanceDelay to complete first InitialDelayedJoin
timer.advanceClock(DefaultRebalanceTimeout + 1)
// Awaiting results
val errors = futures.map(await(_, DefaultRebalanceTimeout + 1).error)
assertEquals(Set(Errors.NONE), errors.take(GroupMaxSize).toSet)
assertEquals(Set(Errors.GROUP_MAX_SIZE_REACHED), errors.drop(GroupMaxSize).toSet)
memberIds.drop(GroupMaxSize).foreach { memberId =>
assertFalse(group.has(memberId))
}
}
@Test
def testJoinGroupSessionTimeoutTooSmall(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols, sessionTimeout = GroupMinSessionTimeout - 1)
assertEquals(Errors.INVALID_SESSION_TIMEOUT, joinGroupResult.error)
}
@Test
def testJoinGroupSessionTimeoutTooLarge(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols, sessionTimeout = GroupMaxSessionTimeout + 1)
assertEquals(Errors.INVALID_SESSION_TIMEOUT, joinGroupResult.error)
}
@Test
def testJoinGroupUnknownConsumerNewGroup(): Unit = {
var joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols)
assertEquals(Errors.UNKNOWN_MEMBER_ID, joinGroupResult.error)
joinGroupResult = staticJoinGroup(groupId, memberId, groupInstanceId, protocolType, protocols)
assertEquals(Errors.UNKNOWN_MEMBER_ID, joinGroupResult.error)
}
@Test
def testInvalidGroupId(): Unit = {
val groupId = ""
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols)
assertEquals(Errors.INVALID_GROUP_ID, joinGroupResult.error)
}
@Test
def testValidJoinGroup(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols)
assertEquals(Errors.NONE, joinGroupResult.error)
}
@Test
def testJoinGroupInconsistentProtocolType(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val otherMemberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols)
assertEquals(Errors.NONE, joinGroupResult.error)
val otherJoinGroupResult = await(sendJoinGroup(groupId, otherMemberId, "connect", protocols), 1)
assertEquals(Errors.INCONSISTENT_GROUP_PROTOCOL, otherJoinGroupResult.error)
}
@Test
def testJoinGroupWithEmptyProtocolType(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
var joinGroupResult = dynamicJoinGroup(groupId, memberId, "", protocols)
assertEquals(Errors.INCONSISTENT_GROUP_PROTOCOL, joinGroupResult.error)
joinGroupResult = staticJoinGroup(groupId, memberId, groupInstanceId, "", protocols)
assertEquals(Errors.INCONSISTENT_GROUP_PROTOCOL, joinGroupResult.error)
}
@Test
def testJoinGroupWithEmptyGroupProtocol(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, List())
assertEquals(Errors.INCONSISTENT_GROUP_PROTOCOL, joinGroupResult.error)
}
@Test
def testNewMemberTimeoutCompletion(): Unit = {
val sessionTimeout = GroupCoordinator.NewMemberJoinTimeoutMs + 5000
val responseFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, None, sessionTimeout, DefaultRebalanceTimeout, false)
timer.advanceClock(GroupInitialRebalanceDelay + 1)
val joinResult = Await.result(responseFuture, Duration(DefaultRebalanceTimeout + 100, TimeUnit.MILLISECONDS))
val group = groupCoordinator.groupManager.getGroup(groupId).get
val memberId = joinResult.memberId
assertEquals(Errors.NONE, joinResult.error)
assertEquals(0, group.allMemberMetadata.count(_.isNew))
val syncGroupResult = syncGroupLeader(groupId, joinResult.generationId, memberId, Map(memberId -> Array[Byte]()))
assertEquals(Errors.NONE, syncGroupResult.error)
assertEquals(1, group.size)
timer.advanceClock(GroupCoordinator.NewMemberJoinTimeoutMs + 100)
// Make sure the NewMemberTimeout is not still in effect, and the member is not kicked
assertEquals(1, group.size)
timer.advanceClock(sessionTimeout + 100)
assertEquals(0, group.size)
}
@Test
def testNewMemberJoinExpiration(): Unit = {
// This tests new member expiration during a protracted rebalance. We first create a
// group with one member which uses a large value for session timeout and rebalance timeout.
// We then join with one new member and let the rebalance hang while we await the first member.
// The new member join timeout expires and its JoinGroup request is failed.
val sessionTimeout = GroupCoordinator.NewMemberJoinTimeoutMs + 5000
val rebalanceTimeout = GroupCoordinator.NewMemberJoinTimeoutMs * 2
val firstJoinResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols,
sessionTimeout, rebalanceTimeout)
val firstMemberId = firstJoinResult.memberId
assertEquals(firstMemberId, firstJoinResult.leaderId)
assertEquals(Errors.NONE, firstJoinResult.error)
val groupOpt = groupCoordinator.groupManager.getGroup(groupId)
assertTrue(groupOpt.isDefined)
val group = groupOpt.get
assertEquals(0, group.allMemberMetadata.count(_.isNew))
val responseFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, None, sessionTimeout, rebalanceTimeout)
assertFalse(responseFuture.isCompleted)
assertEquals(2, group.allMembers.size)
assertEquals(1, group.allMemberMetadata.count(_.isNew))
val newMember = group.allMemberMetadata.find(_.isNew).get
assertNotEquals(firstMemberId, newMember.memberId)
timer.advanceClock(GroupCoordinator.NewMemberJoinTimeoutMs + 1)
assertTrue(responseFuture.isCompleted)
val response = Await.result(responseFuture, Duration(0, TimeUnit.MILLISECONDS))
assertEquals(Errors.UNKNOWN_MEMBER_ID, response.error)
assertEquals(1, group.allMembers.size)
assertEquals(0, group.allMemberMetadata.count(_.isNew))
assertEquals(firstMemberId, group.allMembers.head)
}
@Test
def testNewMemberFailureAfterJoinGroupCompletion(): Unit = {
// For old versions of the JoinGroup protocol, new members were subject
// to expiration if the rebalance took long enough. This test case ensures
// that following completion of the JoinGroup phase, new members follow
// normal heartbeat expiration logic.
val firstJoinResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
val firstMemberId = firstJoinResult.memberId
val firstGenerationId = firstJoinResult.generationId
assertEquals(firstMemberId, firstJoinResult.leaderId)
assertEquals(Errors.NONE, firstJoinResult.error)
val firstSyncResult = syncGroupLeader(groupId, firstGenerationId, firstMemberId,
Map(firstMemberId -> Array[Byte]()))
assertEquals(Errors.NONE, firstSyncResult.error)
val otherJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
val joinFuture = sendJoinGroup(groupId, firstMemberId, protocolType, protocols,
requireKnownMemberId = false)
val joinResult = await(joinFuture, DefaultSessionTimeout+100)
val otherJoinResult = await(otherJoinFuture, DefaultSessionTimeout+100)
assertEquals(Errors.NONE, joinResult.error)
assertEquals(Errors.NONE, otherJoinResult.error)
verifySessionExpiration(groupId)
}
@Test
def testNewMemberFailureAfterSyncGroupCompletion(): Unit = {
// For old versions of the JoinGroup protocol, new members were subject
// to expiration if the rebalance took long enough. This test case ensures
// that following completion of the SyncGroup phase, new members follow
// normal heartbeat expiration logic.
val firstJoinResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
val firstMemberId = firstJoinResult.memberId
val firstGenerationId = firstJoinResult.generationId
assertEquals(firstMemberId, firstJoinResult.leaderId)
assertEquals(Errors.NONE, firstJoinResult.error)
val firstSyncResult = syncGroupLeader(groupId, firstGenerationId, firstMemberId,
Map(firstMemberId -> Array[Byte]()))
assertEquals(Errors.NONE, firstSyncResult.error)
val otherJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
val joinFuture = sendJoinGroup(groupId, firstMemberId, protocolType, protocols,
requireKnownMemberId = false)
val joinResult = await(joinFuture, DefaultSessionTimeout+100)
val otherJoinResult = await(otherJoinFuture, DefaultSessionTimeout+100)
assertEquals(Errors.NONE, joinResult.error)
assertEquals(Errors.NONE, otherJoinResult.error)
val secondGenerationId = joinResult.generationId
val secondMemberId = otherJoinResult.memberId
sendSyncGroupFollower(groupId, secondGenerationId, secondMemberId)
val syncGroupResult = syncGroupLeader(groupId, secondGenerationId, firstMemberId,
Map(firstMemberId -> Array.emptyByteArray, secondMemberId -> Array.emptyByteArray))
assertEquals(Errors.NONE, syncGroupResult.error)
verifySessionExpiration(groupId)
}
private def verifySessionExpiration(groupId: String): Unit = {
when(replicaManager.getMagic(any[TopicPartition]))
.thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE))
timer.advanceClock(DefaultSessionTimeout + 1)
val groupMetadata = group(groupId)
assertEquals(Empty, groupMetadata.currentState)
assertTrue(groupMetadata.allMembers.isEmpty)
}
@Test
def testJoinGroupInconsistentGroupProtocol(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val otherMemberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupFuture = sendJoinGroup(groupId, memberId, protocolType, List(("range", metadata)))
val otherJoinGroupResult = dynamicJoinGroup(groupId, otherMemberId, protocolType, List(("roundrobin", metadata)))
timer.advanceClock(GroupInitialRebalanceDelay + 1)
val joinGroupResult = await(joinGroupFuture, 1)
assertEquals(Errors.NONE, joinGroupResult.error)
assertEquals(Errors.INCONSISTENT_GROUP_PROTOCOL, otherJoinGroupResult.error)
}
@Test
def testJoinGroupUnknownConsumerExistingGroup(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val otherMemberId = "memberId"
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols)
assertEquals(Errors.NONE, joinGroupResult.error)
val otherJoinGroupResult = await(sendJoinGroup(groupId, otherMemberId, protocolType, protocols), 1)
assertEquals(Errors.UNKNOWN_MEMBER_ID, otherJoinGroupResult.error)
}
@Test
def testJoinGroupUnknownConsumerNewDeadGroup(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val deadGroupId = "deadGroupId"
groupCoordinator.groupManager.addGroup(new GroupMetadata(deadGroupId, Dead, new MockTime()))
val joinGroupResult = dynamicJoinGroup(deadGroupId, memberId, protocolType, protocols)
assertEquals(Errors.COORDINATOR_NOT_AVAILABLE, joinGroupResult.error)
}
@Test
def testSyncDeadGroup(): Unit = {
val memberId = "memberId"
val deadGroupId = "deadGroupId"
groupCoordinator.groupManager.addGroup(new GroupMetadata(deadGroupId, Dead, new MockTime()))
val syncGroupResult = syncGroupFollower(deadGroupId, 1, memberId)
assertEquals(Errors.COORDINATOR_NOT_AVAILABLE, syncGroupResult.error)
}
@Test
def testJoinGroupSecondJoinInconsistentProtocol(): Unit = {
var responseFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, requireKnownMemberId = true)
var joinGroupResult = Await.result(responseFuture, Duration(DefaultRebalanceTimeout + 1, TimeUnit.MILLISECONDS))
assertEquals(Errors.MEMBER_ID_REQUIRED, joinGroupResult.error)
val memberId = joinGroupResult.memberId
// Sending an inconsistent protocol shall be refused
responseFuture = sendJoinGroup(groupId, memberId, protocolType, List(), requireKnownMemberId = true)
joinGroupResult = Await.result(responseFuture, Duration(DefaultRebalanceTimeout + 1, TimeUnit.MILLISECONDS))
assertEquals(Errors.INCONSISTENT_GROUP_PROTOCOL, joinGroupResult.error)
// Sending consistent protocol shall be accepted
responseFuture = sendJoinGroup(groupId, memberId, protocolType, protocols, requireKnownMemberId = true)
timer.advanceClock(GroupInitialRebalanceDelay + 1)
joinGroupResult = Await.result(responseFuture, Duration(DefaultRebalanceTimeout + 1, TimeUnit.MILLISECONDS))
assertEquals(Errors.NONE, joinGroupResult.error)
}
@Test
def staticMemberJoinAsFirstMember(): Unit = {
val joinGroupResult = staticJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, groupInstanceId, protocolType, protocols)
assertEquals(Errors.NONE, joinGroupResult.error)
}
@Test
def staticMemberReJoinWithExplicitUnknownMemberId(): Unit = {
var joinGroupResult = staticJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, groupInstanceId, protocolType, protocols)
assertEquals(Errors.NONE, joinGroupResult.error)
val unknownMemberId = "unknown_member"
joinGroupResult = staticJoinGroup(groupId, unknownMemberId, groupInstanceId, protocolType, protocols)
assertEquals(Errors.FENCED_INSTANCE_ID, joinGroupResult.error)
}
@Test
def staticMemberFenceDuplicateRejoinedFollower(): Unit = {
val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId)
// A third member joins will trigger rebalance.
sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
timer.advanceClock(1)
assertTrue(getGroup(groupId).is(PreparingRebalance))
timer.advanceClock(1)
// Old follower rejoins group will be matching current member.id.
val oldFollowerJoinGroupFuture =
sendJoinGroup(groupId, rebalanceResult.followerId, protocolType, protocols, groupInstanceId = Some(followerInstanceId))
timer.advanceClock(1)
// Duplicate follower joins group with unknown member id will trigger member.id replacement.
val duplicateFollowerJoinFuture =
sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, groupInstanceId = Some(followerInstanceId))
timer.advanceClock(1)
// Old member shall be fenced immediately upon duplicate follower joins.
val oldFollowerJoinGroupResult = Await.result(oldFollowerJoinGroupFuture, Duration(1, TimeUnit.MILLISECONDS))
checkJoinGroupResult(oldFollowerJoinGroupResult,
Errors.FENCED_INSTANCE_ID,
-1,
Set.empty,
PreparingRebalance,
None)
verifyDelayedTaskNotCompleted(duplicateFollowerJoinFuture)
}
@Test
def staticMemberFenceDuplicateSyncingFollowerAfterMemberIdChanged(): Unit = {
val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId)
// Known leader rejoins will trigger rebalance.
val leaderJoinGroupFuture =
sendJoinGroup(groupId, rebalanceResult.leaderId, protocolType, protocols, groupInstanceId = Some(leaderInstanceId))
timer.advanceClock(1)
assertTrue(getGroup(groupId).is(PreparingRebalance))
timer.advanceClock(1)
// Old follower rejoins group will match current member.id.
val oldFollowerJoinGroupFuture =
sendJoinGroup(groupId, rebalanceResult.followerId, protocolType, protocols, groupInstanceId = Some(followerInstanceId))
timer.advanceClock(1)
val leaderJoinGroupResult = Await.result(leaderJoinGroupFuture, Duration(1, TimeUnit.MILLISECONDS))
checkJoinGroupResult(leaderJoinGroupResult,
Errors.NONE,
rebalanceResult.generation + 1,
Set(leaderInstanceId, followerInstanceId),
CompletingRebalance,
Some(protocolType))
assertEquals(rebalanceResult.leaderId, leaderJoinGroupResult.memberId)
assertEquals(rebalanceResult.leaderId, leaderJoinGroupResult.leaderId)
// Old follower shall be getting a successful join group response.
val oldFollowerJoinGroupResult = Await.result(oldFollowerJoinGroupFuture, Duration(1, TimeUnit.MILLISECONDS))
checkJoinGroupResult(oldFollowerJoinGroupResult,
Errors.NONE,
rebalanceResult.generation + 1,
Set.empty,
CompletingRebalance,
Some(protocolType),
expectedLeaderId = leaderJoinGroupResult.memberId)
assertEquals(rebalanceResult.followerId, oldFollowerJoinGroupResult.memberId)
assertEquals(rebalanceResult.leaderId, oldFollowerJoinGroupResult.leaderId)
assertTrue(getGroup(groupId).is(CompletingRebalance))
// Duplicate follower joins group with unknown member id will trigger member.id replacement,
// and will also trigger a rebalance under CompletingRebalance state; the old follower sync callback
// will return fenced exception while broker replaces the member identity with the duplicate follower joins.
val oldFollowerSyncGroupFuture = sendSyncGroupFollower(groupId, oldFollowerJoinGroupResult.generationId,
oldFollowerJoinGroupResult.memberId, Some(protocolType), Some(protocolName), Some(followerInstanceId))
val duplicateFollowerJoinFuture =
sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, groupInstanceId = Some(followerInstanceId))
timer.advanceClock(1)
val oldFollowerSyncGroupResult = Await.result(oldFollowerSyncGroupFuture, Duration(1, TimeUnit.MILLISECONDS))
assertEquals(Errors.FENCED_INSTANCE_ID, oldFollowerSyncGroupResult.error)
assertTrue(getGroup(groupId).is(PreparingRebalance))
timer.advanceClock(GroupInitialRebalanceDelay + 1)
timer.advanceClock(DefaultRebalanceTimeout + 1)
val duplicateFollowerJoinGroupResult = Await.result(duplicateFollowerJoinFuture, Duration(1, TimeUnit.MILLISECONDS))
checkJoinGroupResult(duplicateFollowerJoinGroupResult,
Errors.NONE,
rebalanceResult.generation + 2,
Set(followerInstanceId), // this follower will become the new leader, and hence it would have the member list
CompletingRebalance,
Some(protocolType),
expectedLeaderId = duplicateFollowerJoinGroupResult.memberId)
assertTrue(getGroup(groupId).is(CompletingRebalance))
}
@Test
def staticMemberFenceDuplicateRejoiningFollowerAfterMemberIdChanged(): Unit = {
val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId)
// Known leader rejoins will trigger rebalance.
val leaderJoinGroupFuture =
sendJoinGroup(groupId, rebalanceResult.leaderId, protocolType, protocols, groupInstanceId = Some(leaderInstanceId))
timer.advanceClock(1)
assertTrue(getGroup(groupId).is(PreparingRebalance))
// Duplicate follower joins group will trigger member.id replacement.
val duplicateFollowerJoinGroupFuture =
sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, groupInstanceId = Some(followerInstanceId))
timer.advanceClock(1)
// Old follower rejoins group will fail because member.id already updated.
val oldFollowerJoinGroupFuture =
sendJoinGroup(groupId, rebalanceResult.followerId, protocolType, protocols, groupInstanceId = Some(followerInstanceId))
val leaderRejoinGroupResult = Await.result(leaderJoinGroupFuture, Duration(1, TimeUnit.MILLISECONDS))
checkJoinGroupResult(leaderRejoinGroupResult,
Errors.NONE,
rebalanceResult.generation + 1,
Set(leaderInstanceId, followerInstanceId),
CompletingRebalance,
Some(protocolType))
val duplicateFollowerJoinGroupResult = Await.result(duplicateFollowerJoinGroupFuture, Duration(1, TimeUnit.MILLISECONDS))
checkJoinGroupResult(duplicateFollowerJoinGroupResult,
Errors.NONE,
rebalanceResult.generation + 1,
Set.empty,
CompletingRebalance,
Some(protocolType))
assertNotEquals(rebalanceResult.followerId, duplicateFollowerJoinGroupResult.memberId)
val oldFollowerJoinGroupResult = Await.result(oldFollowerJoinGroupFuture, Duration(1, TimeUnit.MILLISECONDS))
checkJoinGroupResult(oldFollowerJoinGroupResult,
Errors.FENCED_INSTANCE_ID,
-1,
Set.empty,
CompletingRebalance,
None)
}
@Test
def staticMemberRejoinWithKnownMemberId(): Unit = {
var joinGroupResult = staticJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, groupInstanceId, protocolType, protocols)
assertEquals(Errors.NONE, joinGroupResult.error)
val assignedMemberId = joinGroupResult.memberId
// The second join group should return immediately since we are using the same metadata during CompletingRebalance.
val rejoinResponseFuture = sendJoinGroup(groupId, assignedMemberId, protocolType, protocols, Some(groupInstanceId))
timer.advanceClock(1)
joinGroupResult = Await.result(rejoinResponseFuture, Duration(1, TimeUnit.MILLISECONDS))
assertEquals(Errors.NONE, joinGroupResult.error)
assertTrue(getGroup(groupId).is(CompletingRebalance))
val syncGroupFuture = sendSyncGroupLeader(groupId, joinGroupResult.generationId, assignedMemberId,
Some(protocolType), Some(protocolName), Some(groupInstanceId), Map(assignedMemberId -> Array[Byte]()))
timer.advanceClock(1)
val syncGroupResult = Await.result(syncGroupFuture, Duration(1, TimeUnit.MILLISECONDS))
assertEquals(Errors.NONE, syncGroupResult.error)
assertTrue(getGroup(groupId).is(Stable))
}
@ParameterizedTest
@ValueSource(booleans = Array(true, false))
def staticMemberRejoinWithLeaderIdAndUnknownMemberId(supportSkippingAssignment: Boolean): Unit = {
val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId)
// A static leader rejoin with unknown id will not trigger rebalance, and no assignment will be returned.
val joinGroupResult = staticJoinGroupWithPersistence(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID,
leaderInstanceId, protocolType, protocolSuperset, clockAdvance = 1, supportSkippingAssignment = supportSkippingAssignment)
checkJoinGroupResult(joinGroupResult,
Errors.NONE,
rebalanceResult.generation, // The group should be at the same generation
if (supportSkippingAssignment) Set(leaderInstanceId, followerInstanceId) else Set.empty,
Stable,
Some(protocolType),
if (supportSkippingAssignment) joinGroupResult.memberId else rebalanceResult.leaderId,
expectedSkipAssignment = supportSkippingAssignment
)
val oldLeaderJoinGroupResult = staticJoinGroup(groupId, rebalanceResult.leaderId, leaderInstanceId, protocolType, protocolSuperset, clockAdvance = 1)
assertEquals(Errors.FENCED_INSTANCE_ID, oldLeaderJoinGroupResult.error)
// Old leader will get fenced.
val oldLeaderSyncGroupResult = syncGroupLeader(groupId, rebalanceResult.generation, rebalanceResult.leaderId,
Map.empty, None, None, Some(leaderInstanceId))
assertEquals(Errors.FENCED_INSTANCE_ID, oldLeaderSyncGroupResult.error)
// Calling sync on old leader.id will fail because that leader.id is no longer valid and replaced.
val newLeaderSyncGroupResult = syncGroupLeader(groupId, rebalanceResult.generation, rebalanceResult.leaderId, Map.empty)
assertEquals(Errors.UNKNOWN_MEMBER_ID, newLeaderSyncGroupResult.error)
}
@Test
def staticMemberRejoinWithLeaderIdAndKnownMemberId(): Unit = {
val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId,
sessionTimeout = DefaultRebalanceTimeout / 2)
// A static leader with known id rejoin will trigger rebalance.
val joinGroupResult = staticJoinGroup(groupId, rebalanceResult.leaderId, leaderInstanceId,
protocolType, protocolSuperset, clockAdvance = DefaultRebalanceTimeout + 1)
// Timeout follower in the meantime.
assertFalse(getGroup(groupId).hasStaticMember(followerInstanceId))
checkJoinGroupResult(joinGroupResult,
Errors.NONE,
rebalanceResult.generation + 1, // The group has promoted to the new generation.
Set(leaderInstanceId),
CompletingRebalance,
Some(protocolType),
rebalanceResult.leaderId,
rebalanceResult.leaderId)
}
@Test
def staticMemberRejoinWithLeaderIdAndUnexpectedDeadGroup(): Unit = {
val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId)
getGroup(groupId).transitionTo(Dead)
val joinGroupResult = staticJoinGroup(groupId, rebalanceResult.leaderId, leaderInstanceId, protocolType, protocols, clockAdvance = 1)
assertEquals(Errors.COORDINATOR_NOT_AVAILABLE, joinGroupResult.error)
}
@Test
def staticMemberRejoinWithLeaderIdAndUnexpectedEmptyGroup(): Unit = {
val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId)
getGroup(groupId).transitionTo(PreparingRebalance)
getGroup(groupId).transitionTo(Empty)
val joinGroupResult = staticJoinGroup(groupId, rebalanceResult.leaderId, leaderInstanceId, protocolType, protocols, clockAdvance = 1)
assertEquals(Errors.UNKNOWN_MEMBER_ID, joinGroupResult.error)
}
@Test
def staticMemberRejoinWithFollowerIdAndChangeOfProtocol(): Unit = {
val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId, sessionTimeout = DefaultSessionTimeout * 2)
// A static follower rejoin with changed protocol will trigger rebalance.
val newProtocols = List(("roundrobin", metadata))
// Old leader hasn't joined in the meantime, triggering a re-election.
val joinGroupResult = staticJoinGroup(groupId, rebalanceResult.followerId, followerInstanceId, protocolType, newProtocols, clockAdvance = DefaultSessionTimeout + 1)
assertEquals(rebalanceResult.followerId, joinGroupResult.memberId)
assertTrue(getGroup(groupId).hasStaticMember(leaderInstanceId))
assertTrue(getGroup(groupId).isLeader(rebalanceResult.followerId))
checkJoinGroupResult(joinGroupResult,
Errors.NONE,
rebalanceResult.generation + 1, // The group has promoted to the new generation, and leader has changed because old one times out.
Set(leaderInstanceId, followerInstanceId),
CompletingRebalance,
Some(protocolType),
rebalanceResult.followerId,
rebalanceResult.followerId)
}
@Test
def staticMemberRejoinWithUnknownMemberIdAndChangeOfProtocolWithSelectedProtocolChanged(): Unit = {
val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId)
// A static follower rejoin with protocol changed and also cause updated group's selectedProtocol changed
// should trigger rebalance.
val selectedProtocols = getGroup(groupId).selectProtocol
val newProtocols = List(("roundrobin", metadata))
assert(!newProtocols.map(_._1).contains(selectedProtocols))
// Old leader hasn't joined in the meantime, triggering a re-election.
val joinGroupResult = staticJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, followerInstanceId, protocolType, newProtocols, clockAdvance = DefaultSessionTimeout + 1)
checkJoinGroupResult(joinGroupResult,
Errors.NONE,
rebalanceResult.generation + 1,
Set(leaderInstanceId, followerInstanceId),
CompletingRebalance,
Some(protocolType))
assertTrue(getGroup(groupId).isLeader(joinGroupResult.memberId))
assertNotEquals(rebalanceResult.followerId, joinGroupResult.memberId)
assertEquals(joinGroupResult.protocolName, Some("roundrobin"))
}
@Test
def staticMemberRejoinWithUnknownMemberIdAndChangeOfProtocolWhileSelectProtocolUnchangedPersistenceFailure(): Unit = {
val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId)
val selectedProtocol = getGroup(groupId).selectProtocol
val newProtocols = List((selectedProtocol, metadata))
// Timeout old leader in the meantime.
val joinGroupResult = staticJoinGroupWithPersistence(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID,
followerInstanceId, protocolType, newProtocols, clockAdvance = 1, appendRecordError = Errors.MESSAGE_TOO_LARGE)
checkJoinGroupResult(joinGroupResult,
Errors.UNKNOWN_SERVER_ERROR,
rebalanceResult.generation,
Set.empty,
Stable,
Some(protocolType))
// Join with old member id will not fail because the member id is not updated because of persistence failure
assertNotEquals(rebalanceResult.followerId, joinGroupResult.memberId)
val oldFollowerJoinGroupResult = staticJoinGroup(groupId, rebalanceResult.followerId, followerInstanceId, protocolType, newProtocols, clockAdvance = 1)
assertEquals(Errors.NONE, oldFollowerJoinGroupResult.error)
// Sync with old member id will also not fail because the member id is not updated because of persistence failure
val syncGroupWithOldMemberIdResult = syncGroupFollower(groupId, rebalanceResult.generation,
rebalanceResult.followerId, None, None, Some(followerInstanceId))
assertEquals(Errors.NONE, syncGroupWithOldMemberIdResult.error)
}
@Test
def staticMemberRejoinWithUnknownMemberIdAndChangeOfProtocolWhileSelectProtocolUnchanged(): Unit = {
val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId)
// A static follower rejoin with protocol changing to leader protocol subset won't trigger rebalance if updated
// group's selectProtocol remain unchanged.
val selectedProtocol = getGroup(groupId).selectProtocol
val newProtocols = List((selectedProtocol, metadata))
// Timeout old leader in the meantime.
val joinGroupResult = staticJoinGroupWithPersistence(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID,
followerInstanceId, protocolType, newProtocols, clockAdvance = 1)
checkJoinGroupResult(joinGroupResult,
Errors.NONE,
rebalanceResult.generation,
Set.empty,
Stable,
Some(protocolType))
// Join with old member id will fail because the member id is updated
assertNotEquals(rebalanceResult.followerId, joinGroupResult.memberId)
val oldFollowerJoinGroupResult = staticJoinGroup(groupId, rebalanceResult.followerId, followerInstanceId, protocolType, newProtocols, clockAdvance = 1)
assertEquals(Errors.FENCED_INSTANCE_ID, oldFollowerJoinGroupResult.error)
// Sync with old member id will fail because the member id is updated
val syncGroupWithOldMemberIdResult = syncGroupFollower(groupId, rebalanceResult.generation,
rebalanceResult.followerId, None, None, Some(followerInstanceId))
assertEquals(Errors.FENCED_INSTANCE_ID, syncGroupWithOldMemberIdResult.error)
val syncGroupWithNewMemberIdResult = syncGroupFollower(groupId, rebalanceResult.generation,
joinGroupResult.memberId, None, None, Some(followerInstanceId))
assertEquals(Errors.NONE, syncGroupWithNewMemberIdResult.error)
assertEquals(rebalanceResult.followerAssignment, syncGroupWithNewMemberIdResult.memberAssignment)
}
@Test
def staticMemberRejoinWithKnownLeaderIdToTriggerRebalanceAndFollowerWithChangeofProtocol(): Unit = {
val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId)
// A static leader rejoin with known member id will trigger rebalance.
val leaderRejoinGroupFuture = sendJoinGroup(groupId, rebalanceResult.leaderId, protocolType,
protocolSuperset, Some(leaderInstanceId))
// Rebalance complete immediately after follower rejoin.
val followerRejoinWithFuture = sendJoinGroup(groupId, rebalanceResult.followerId, protocolType,
protocolSuperset, Some(followerInstanceId))
timer.advanceClock(1)
// Leader should get the same assignment as last round.
checkJoinGroupResult(await(leaderRejoinGroupFuture, 1),
Errors.NONE,
rebalanceResult.generation + 1, // The group has promoted to the new generation.
Set(leaderInstanceId, followerInstanceId),
CompletingRebalance,
Some(protocolType),
rebalanceResult.leaderId,
rebalanceResult.leaderId)
checkJoinGroupResult(await(followerRejoinWithFuture, 1),
Errors.NONE,
rebalanceResult.generation + 1, // The group has promoted to the new generation.
Set.empty,
CompletingRebalance,
Some(protocolType),
rebalanceResult.leaderId,
rebalanceResult.followerId)
// The follower protocol changed from protocolSuperset to general protocols.
val followerRejoinWithProtocolChangeFuture = sendJoinGroup(groupId, rebalanceResult.followerId,
protocolType, protocols, Some(followerInstanceId))
// The group will transit to PreparingRebalance due to protocol change from follower.
assertTrue(getGroup(groupId).is(PreparingRebalance))
timer.advanceClock(DefaultRebalanceTimeout + 1)
checkJoinGroupResult(await(followerRejoinWithProtocolChangeFuture, 1),
Errors.NONE,
rebalanceResult.generation + 2, // The group has promoted to the new generation.
Set(followerInstanceId),
CompletingRebalance,
Some(protocolType),
rebalanceResult.followerId,
rebalanceResult.followerId)
}
@Test
def staticMemberRejoinAsFollowerWithUnknownMemberId(): Unit = {
val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId)
// A static follower rejoin with no protocol change will not trigger rebalance.
val joinGroupResult = staticJoinGroupWithPersistence(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, followerInstanceId, protocolType, protocolSuperset, clockAdvance = 1)
// Old leader shouldn't be timed out.
assertTrue(getGroup(groupId).hasStaticMember(leaderInstanceId))
checkJoinGroupResult(joinGroupResult,
Errors.NONE,
rebalanceResult.generation, // The group has no change.
Set.empty,
Stable,
Some(protocolType))
assertNotEquals(rebalanceResult.followerId, joinGroupResult.memberId)
val syncGroupResult = syncGroupFollower(groupId, rebalanceResult.generation, joinGroupResult.memberId)
assertEquals(Errors.NONE, syncGroupResult.error)
assertEquals(rebalanceResult.followerAssignment, syncGroupResult.memberAssignment)
}
@Test
def staticMemberRejoinAsFollowerWithKnownMemberIdAndNoProtocolChange(): Unit = {
val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId)
// A static follower rejoin with no protocol change will not trigger rebalance.
val joinGroupResult = staticJoinGroup(groupId, rebalanceResult.followerId, followerInstanceId, protocolType, protocolSuperset, clockAdvance = 1)
// Old leader shouldn't be timed out.
assertTrue(getGroup(groupId).hasStaticMember(leaderInstanceId))
checkJoinGroupResult(joinGroupResult,
Errors.NONE,
rebalanceResult.generation, // The group has no change.
Set.empty,
Stable,
Some(protocolType),
rebalanceResult.leaderId,
rebalanceResult.followerId)
}
@Test
def staticMemberRejoinAsFollowerWithMismatchedMemberId(): Unit = {
val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId)
val joinGroupResult = staticJoinGroup(groupId, rebalanceResult.followerId, leaderInstanceId, protocolType, protocolSuperset, clockAdvance = 1)
assertEquals(Errors.FENCED_INSTANCE_ID, joinGroupResult.error)
}
@Test
def staticMemberRejoinAsLeaderWithMismatchedMemberId(): Unit = {
val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId)
val joinGroupResult = staticJoinGroup(groupId, rebalanceResult.leaderId, followerInstanceId, protocolType, protocolSuperset, clockAdvance = 1)
assertEquals(Errors.FENCED_INSTANCE_ID, joinGroupResult.error)
}
@Test
def staticMemberSyncAsLeaderWithInvalidMemberId(): Unit = {
val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId)
val syncGroupResult = syncGroupLeader(groupId, rebalanceResult.generation, "invalid",
Map.empty, None, None, Some(leaderInstanceId))
assertEquals(Errors.FENCED_INSTANCE_ID, syncGroupResult.error)
}
@Test
def staticMemberHeartbeatLeaderWithInvalidMemberId(): Unit = {
val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId)
val syncGroupResult = syncGroupLeader(groupId, rebalanceResult.generation, rebalanceResult.leaderId, Map.empty)
assertEquals(Errors.NONE, syncGroupResult.error)
val validHeartbeatResult = heartbeat(groupId, rebalanceResult.leaderId, rebalanceResult.generation)
assertEquals(Errors.NONE, validHeartbeatResult)
val invalidHeartbeatResult = heartbeat(groupId, invalidMemberId, rebalanceResult.generation, Some(leaderInstanceId))
assertEquals(Errors.FENCED_INSTANCE_ID, invalidHeartbeatResult)
}
@Test
def shouldGetDifferentStaticMemberIdAfterEachRejoin(): Unit = {
val initialResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId)
val timeAdvance = 1
var lastMemberId = initialResult.leaderId
for (_ <- 1 to 5) {
val joinGroupResult = staticJoinGroupWithPersistence(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID,
leaderInstanceId, protocolType, protocols, clockAdvance = timeAdvance)
assertTrue(joinGroupResult.memberId.startsWith(leaderInstanceId))
assertNotEquals(lastMemberId, joinGroupResult.memberId)
lastMemberId = joinGroupResult.memberId
}
}
@Test
def testOffsetCommitDeadGroup(): Unit = {
val memberId = "memberId"
val deadGroupId = "deadGroupId"
val tp = new TopicPartition("topic", 0)
val offset = offsetAndMetadata(0)
groupCoordinator.groupManager.addGroup(new GroupMetadata(deadGroupId, Dead, new MockTime()))
val offsetCommitResult = commitOffsets(deadGroupId, memberId, 1, Map(tp -> offset))
assertEquals(Errors.COORDINATOR_NOT_AVAILABLE, offsetCommitResult(tp))
}
@Test
def staticMemberCommitOffsetWithInvalidMemberId(): Unit = {
val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId)
val syncGroupResult = syncGroupLeader(groupId, rebalanceResult.generation, rebalanceResult.leaderId, Map.empty)
assertEquals(Errors.NONE, syncGroupResult.error)
val tp = new TopicPartition("topic", 0)
val offset = offsetAndMetadata(0)
val validOffsetCommitResult = commitOffsets(groupId, rebalanceResult.leaderId, rebalanceResult.generation, Map(tp -> offset))
assertEquals(Errors.NONE, validOffsetCommitResult(tp))
val invalidOffsetCommitResult = commitOffsets(groupId, invalidMemberId, rebalanceResult.generation,
Map(tp -> offset), Some(leaderInstanceId))
assertEquals(Errors.FENCED_INSTANCE_ID, invalidOffsetCommitResult(tp))
}
@Test
def staticMemberJoinWithUnknownInstanceIdAndKnownMemberId(): Unit = {
val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId)
val joinGroupResult = staticJoinGroup(groupId, rebalanceResult.leaderId, "unknown_instance",
protocolType, protocolSuperset, clockAdvance = 1)
assertEquals(Errors.UNKNOWN_MEMBER_ID, joinGroupResult.error)
}
@Test
def staticMemberReJoinWithIllegalStateAsUnknownMember(): Unit = {
staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId)
val group = groupCoordinator.groupManager.getGroup(groupId).get
group.transitionTo(PreparingRebalance)
group.transitionTo(Empty)
// Illegal state exception shall trigger since follower id resides in pending member bucket.
val expectedException = assertThrows(classOf[IllegalStateException],
() => staticJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, followerInstanceId, protocolType, protocolSuperset, clockAdvance = 1))
val message = expectedException.getMessage
assertTrue(message.contains(group.groupId))
assertTrue(message.contains(followerInstanceId))
}
@Test
def testLeaderFailToRejoinBeforeFinalRebalanceTimeoutWithLongSessionTimeout(): Unit = {
groupStuckInRebalanceTimeoutDueToNonjoinedStaticMember()
timer.advanceClock(DefaultRebalanceTimeout + 1)
// The static leader should already session timeout, moving group towards Empty
assertEquals(Set.empty, getGroup(groupId).allMembers)
assertNull(getGroup(groupId).leaderOrNull)
assertEquals(3, getGroup(groupId).generationId)
assertGroupState(groupState = Empty)
}
@Test
def testLeaderRejoinBeforeFinalRebalanceTimeoutWithLongSessionTimeout(): Unit = {
groupStuckInRebalanceTimeoutDueToNonjoinedStaticMember()
// The static leader should be back now, moving group towards CompletingRebalance
val leaderRejoinGroupResult = staticJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, leaderInstanceId, protocolType, protocols)
checkJoinGroupResult(leaderRejoinGroupResult,
Errors.NONE,
3,
Set(leaderInstanceId),
CompletingRebalance,
Some(protocolType)
)
assertEquals(Set(leaderRejoinGroupResult.memberId), getGroup(groupId).allMembers)
assertNotNull(getGroup(groupId).leaderOrNull)
assertEquals(3, getGroup(groupId).generationId)
}
def groupStuckInRebalanceTimeoutDueToNonjoinedStaticMember(): Unit = {
val longSessionTimeout = DefaultSessionTimeout * 2
val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId, sessionTimeout = longSessionTimeout)
val dynamicJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocolSuperset, sessionTimeout = longSessionTimeout)
timer.advanceClock(DefaultRebalanceTimeout + 1)
val dynamicJoinResult = await(dynamicJoinFuture, 100)
// The new dynamic member has been elected as leader
assertEquals(dynamicJoinResult.leaderId, dynamicJoinResult.memberId)
assertEquals(Errors.NONE, dynamicJoinResult.error)
assertEquals(3, dynamicJoinResult.members.size)
assertEquals(2, dynamicJoinResult.generationId)
assertGroupState(groupState = CompletingRebalance)
assertEquals(Set(rebalanceResult.leaderId, rebalanceResult.followerId,
dynamicJoinResult.memberId), getGroup(groupId).allMembers)
assertEquals(Set(leaderInstanceId, followerInstanceId),
getGroup(groupId).allStaticMembers)
assertEquals(Set(dynamicJoinResult.memberId), getGroup(groupId).allDynamicMembers)
// Send a special leave group request from static follower, moving group towards PreparingRebalance
val followerLeaveGroupResults = singleLeaveGroup(groupId, rebalanceResult.followerId)
verifyLeaveGroupResult(followerLeaveGroupResults)
assertGroupState(groupState = PreparingRebalance)
timer.advanceClock(DefaultRebalanceTimeout + 1)
// Only static leader is maintained, and group is stuck at PreparingRebalance stage
assertTrue(getGroup(groupId).allDynamicMembers.isEmpty)
assertEquals(Set(rebalanceResult.leaderId), getGroup(groupId).allMembers)
assertTrue(getGroup(groupId).allDynamicMembers.isEmpty)
assertEquals(2, getGroup(groupId).generationId)
assertGroupState(groupState = PreparingRebalance)
}
@Test
def testStaticMemberFollowerFailToRejoinBeforeRebalanceTimeout(): Unit = {
// Increase session timeout so that the follower won't be evicted when rebalance timeout is reached.
val initialRebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId, sessionTimeout = DefaultRebalanceTimeout * 2)
val newMemberInstanceId = "newMember"
val leaderId = initialRebalanceResult.leaderId
val newMemberJoinGroupFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType,
protocolSuperset, Some(newMemberInstanceId))
assertGroupState(groupState = PreparingRebalance)
val leaderRejoinGroupResult = staticJoinGroup(groupId, leaderId, leaderInstanceId, protocolType, protocolSuperset, clockAdvance = DefaultRebalanceTimeout + 1)
checkJoinGroupResult(leaderRejoinGroupResult,
Errors.NONE,
initialRebalanceResult.generation + 1,
Set(leaderInstanceId, followerInstanceId, newMemberInstanceId),
CompletingRebalance,
Some(protocolType),
expectedLeaderId = leaderId,
expectedMemberId = leaderId)
val newMemberJoinGroupResult = Await.result(newMemberJoinGroupFuture, Duration(1, TimeUnit.MILLISECONDS))
assertEquals(Errors.NONE, newMemberJoinGroupResult.error)
checkJoinGroupResult(newMemberJoinGroupResult,
Errors.NONE,
initialRebalanceResult.generation + 1,
Set.empty,
CompletingRebalance,
Some(protocolType),
expectedLeaderId = leaderId)
}
@Test
def testStaticMemberLeaderFailToRejoinBeforeRebalanceTimeout(): Unit = {
// Increase session timeout so that the leader won't be evicted when rebalance timeout is reached.
val initialRebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId, sessionTimeout = DefaultRebalanceTimeout * 2)
val newMemberInstanceId = "newMember"
val newMemberJoinGroupFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType,
protocolSuperset, Some(newMemberInstanceId))
timer.advanceClock(1)
assertGroupState(groupState = PreparingRebalance)
val oldFollowerRejoinGroupResult = staticJoinGroup(groupId, initialRebalanceResult.followerId, followerInstanceId, protocolType, protocolSuperset, clockAdvance = DefaultRebalanceTimeout + 1)
val newMemberJoinGroupResult = Await.result(newMemberJoinGroupFuture, Duration(1, TimeUnit.MILLISECONDS))
val (newLeaderResult, newFollowerResult) = if (oldFollowerRejoinGroupResult.leaderId == oldFollowerRejoinGroupResult.memberId)
(oldFollowerRejoinGroupResult, newMemberJoinGroupResult)
else
(newMemberJoinGroupResult, oldFollowerRejoinGroupResult)
checkJoinGroupResult(newLeaderResult,
Errors.NONE,
initialRebalanceResult.generation + 1,
Set(leaderInstanceId, followerInstanceId, newMemberInstanceId),
CompletingRebalance,
Some(protocolType))
checkJoinGroupResult(newFollowerResult,
Errors.NONE,
initialRebalanceResult.generation + 1,
Set.empty,
CompletingRebalance,
Some(protocolType),
expectedLeaderId = newLeaderResult.memberId)
}
@Test
def testJoinGroupProtocolTypeIsNotProvidedWhenAnErrorOccurs(): Unit = {
// JoinGroup(leader)
val leaderResponseFuture = sendJoinGroup(groupId, "fake-id", protocolType,
protocolSuperset, Some(leaderInstanceId), DefaultSessionTimeout)
// The Protocol Type is None when there is an error
val leaderJoinGroupResult = await(leaderResponseFuture, 1)
assertEquals(Errors.UNKNOWN_MEMBER_ID, leaderJoinGroupResult.error)
assertEquals(None, leaderJoinGroupResult.protocolType)
}
@Test
def testJoinGroupReturnsTheProtocolType(): Unit = {
// JoinGroup(leader)
val leaderResponseFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType,
protocolSuperset, Some(leaderInstanceId), DefaultSessionTimeout)
// JoinGroup(follower)
val followerResponseFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType,
protocolSuperset, Some(followerInstanceId), DefaultSessionTimeout)
timer.advanceClock(GroupInitialRebalanceDelay + 1)
timer.advanceClock(DefaultRebalanceTimeout + 1)
// The Protocol Type is Defined when there is not error
val leaderJoinGroupResult = await(leaderResponseFuture, 1)
assertEquals(Errors.NONE, leaderJoinGroupResult.error)
assertEquals(protocolType, leaderJoinGroupResult.protocolType.orNull)
// The Protocol Type is Defined when there is not error
val followerJoinGroupResult = await(followerResponseFuture, 1)
assertEquals(Errors.NONE, followerJoinGroupResult.error)
assertEquals(protocolType, followerJoinGroupResult.protocolType.orNull)
}
@Test
def testSyncGroupReturnsAnErrorWhenProtocolTypeIsInconsistent(): Unit = {
testSyncGroupProtocolTypeAndNameWith(Some("whatever"), None, Errors.INCONSISTENT_GROUP_PROTOCOL,
None, None)
}
@Test
def testSyncGroupReturnsAnErrorWhenProtocolNameIsInconsistent(): Unit = {
testSyncGroupProtocolTypeAndNameWith(None, Some("whatever"), Errors.INCONSISTENT_GROUP_PROTOCOL,
None, None)
}
@Test
def testSyncGroupSucceedWhenProtocolTypeAndNameAreNotProvided(): Unit = {
testSyncGroupProtocolTypeAndNameWith(None, None, Errors.NONE,
Some(protocolType), Some(protocolName))
}
@Test
def testSyncGroupSucceedWhenProtocolTypeAndNameAreConsistent(): Unit = {
testSyncGroupProtocolTypeAndNameWith(Some(protocolType), Some(protocolName),
Errors.NONE, Some(protocolType), Some(protocolName))
}
private def testSyncGroupProtocolTypeAndNameWith(protocolType: Option[String],
protocolName: Option[String],
expectedError: Errors,
expectedProtocolType: Option[String],
expectedProtocolName: Option[String]): Unit = {
// JoinGroup(leader) with the Protocol Type of the group
val leaderResponseFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, this.protocolType,
protocolSuperset, Some(leaderInstanceId), DefaultSessionTimeout)
// JoinGroup(follower) with the Protocol Type of the group
val followerResponseFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, this.protocolType,
protocolSuperset, Some(followerInstanceId), DefaultSessionTimeout)
timer.advanceClock(GroupInitialRebalanceDelay + 1)
timer.advanceClock(DefaultRebalanceTimeout + 1)
val leaderJoinGroupResult = await(leaderResponseFuture, 1)
val leaderId = leaderJoinGroupResult.memberId
val generationId = leaderJoinGroupResult.generationId
val followerJoinGroupResult = await(followerResponseFuture, 1)
val followerId = followerJoinGroupResult.memberId
// SyncGroup with the provided Protocol Type and Name
val leaderSyncGroupResult = syncGroupLeader(groupId, generationId, leaderId,
Map(leaderId -> Array.empty), protocolType, protocolName)
assertEquals(expectedError, leaderSyncGroupResult.error)
assertEquals(expectedProtocolType, leaderSyncGroupResult.protocolType)
assertEquals(expectedProtocolName, leaderSyncGroupResult.protocolName)
// SyncGroup with the provided Protocol Type and Name
val followerSyncGroupResult = syncGroupFollower(groupId, generationId, followerId,
protocolType, protocolName)
assertEquals(expectedError, followerSyncGroupResult.error)
assertEquals(expectedProtocolType, followerSyncGroupResult.protocolType)
assertEquals(expectedProtocolName, followerSyncGroupResult.protocolName)
}
private class RebalanceResult(val generation: Int,
val leaderId: String,
val leaderAssignment: Array[Byte],
val followerId: String,
val followerAssignment: Array[Byte])
/**
* Generate static member rebalance results, including:
* - generation
* - leader id
* - leader assignment
* - follower id
* - follower assignment
*/
private def staticMembersJoinAndRebalance(leaderInstanceId: String,
followerInstanceId: String,
sessionTimeout: Int = DefaultSessionTimeout): RebalanceResult = {
val leaderResponseFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType,
protocolSuperset, Some(leaderInstanceId), sessionTimeout)
val followerResponseFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType,
protocolSuperset, Some(followerInstanceId), sessionTimeout)
// The goal for two timer advance is to let first group initial join complete and set newMemberAdded flag to false. Next advance is
// to trigger the rebalance as needed for follower delayed join. One large time advance won't help because we could only populate one
// delayed join from purgatory and the new delayed op is created at that time and never be triggered.
timer.advanceClock(GroupInitialRebalanceDelay + 1)
timer.advanceClock(DefaultRebalanceTimeout + 1)
val newGeneration = 1
val leaderJoinGroupResult = await(leaderResponseFuture, 1)
assertEquals(Errors.NONE, leaderJoinGroupResult.error)
assertEquals(newGeneration, leaderJoinGroupResult.generationId)
val followerJoinGroupResult = await(followerResponseFuture, 1)
assertEquals(Errors.NONE, followerJoinGroupResult.error)
assertEquals(newGeneration, followerJoinGroupResult.generationId)
val leaderId = leaderJoinGroupResult.memberId
val leaderSyncGroupResult = syncGroupLeader(groupId, leaderJoinGroupResult.generationId, leaderId, Map(leaderId -> Array[Byte]()))
assertEquals(Errors.NONE, leaderSyncGroupResult.error)
assertTrue(getGroup(groupId).is(Stable))
val followerId = followerJoinGroupResult.memberId
val followerSyncGroupResult = syncGroupFollower(groupId, leaderJoinGroupResult.generationId, followerId)
assertEquals(Errors.NONE, followerSyncGroupResult.error)
assertTrue(getGroup(groupId).is(Stable))
new RebalanceResult(newGeneration,
leaderId,
leaderSyncGroupResult.memberAssignment,
followerId,
followerSyncGroupResult.memberAssignment)
}
private def checkJoinGroupResult(joinGroupResult: JoinGroupResult,
expectedError: Errors,
expectedGeneration: Int,
expectedGroupInstanceIds: Set[String],
expectedGroupState: GroupState,
expectedProtocolType: Option[String],
expectedLeaderId: String = JoinGroupRequest.UNKNOWN_MEMBER_ID,
expectedMemberId: String = JoinGroupRequest.UNKNOWN_MEMBER_ID,
expectedSkipAssignment: Boolean = false): Unit = {
assertEquals(expectedError, joinGroupResult.error)
assertEquals(expectedGeneration, joinGroupResult.generationId)
assertEquals(expectedGroupInstanceIds.size, joinGroupResult.members.size)
val resultedGroupInstanceIds = joinGroupResult.members.map(member => member.groupInstanceId).toSet
assertEquals(expectedGroupInstanceIds, resultedGroupInstanceIds)
assertGroupState(groupState = expectedGroupState)
assertEquals(expectedProtocolType, joinGroupResult.protocolType)
assertEquals(expectedSkipAssignment, joinGroupResult.skipAssignment)
if (!expectedLeaderId.equals(JoinGroupRequest.UNKNOWN_MEMBER_ID)) {
assertEquals(expectedLeaderId, joinGroupResult.leaderId)
}
if (!expectedMemberId.equals(JoinGroupRequest.UNKNOWN_MEMBER_ID)) {
assertEquals(expectedMemberId, joinGroupResult.memberId)
}
}
@Test
def testHeartbeatWrongCoordinator(): Unit = {
val heartbeatResult = heartbeat(otherGroupId, memberId, -1)
assertEquals(Errors.NOT_COORDINATOR, heartbeatResult)
}
@Test
def testHeartbeatUnknownGroup(): Unit = {
val heartbeatResult = heartbeat(groupId, memberId, -1)
assertEquals(Errors.UNKNOWN_MEMBER_ID, heartbeatResult)
}
@Test
def testHeartbeatDeadGroup(): Unit = {
val memberId = "memberId"
val deadGroupId = "deadGroupId"
groupCoordinator.groupManager.addGroup(new GroupMetadata(deadGroupId, Dead, new MockTime()))
val heartbeatResult = heartbeat(deadGroupId, memberId, 1)
assertEquals(Errors.COORDINATOR_NOT_AVAILABLE, heartbeatResult)
}
@Test
def testHeartbeatEmptyGroup(): Unit = {
val memberId = "memberId"
val group = new GroupMetadata(groupId, Empty, new MockTime())
val member = new MemberMetadata(memberId, Some(groupInstanceId),
ClientId, ClientHost, DefaultRebalanceTimeout, DefaultSessionTimeout,
protocolType, List(("range", Array.empty[Byte]), ("roundrobin", Array.empty[Byte])))
group.add(member)
groupCoordinator.groupManager.addGroup(group)
val heartbeatResult = heartbeat(groupId, memberId, 0)
assertEquals(Errors.UNKNOWN_MEMBER_ID, heartbeatResult)
}
@Test
def testHeartbeatUnknownConsumerExistingGroup(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val otherMemberId = "memberId"
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols)
val assignedMemberId = joinGroupResult.memberId
val joinGroupError = joinGroupResult.error
assertEquals(Errors.NONE, joinGroupError)
val syncGroupResult = syncGroupLeader(groupId, joinGroupResult.generationId, assignedMemberId, Map(assignedMemberId -> Array[Byte]()))
assertEquals(Errors.NONE, syncGroupResult.error)
val heartbeatResult = heartbeat(groupId, otherMemberId, 1)
assertEquals(Errors.UNKNOWN_MEMBER_ID, heartbeatResult)
}
@Test
def testHeartbeatRebalanceInProgress(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols)
val assignedMemberId = joinGroupResult.memberId
val joinGroupError = joinGroupResult.error
assertEquals(Errors.NONE, joinGroupError)
val heartbeatResult = heartbeat(groupId, assignedMemberId, 1)
assertEquals(Errors.NONE, heartbeatResult)
}
@Test
def testHeartbeatIllegalGeneration(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols)
val assignedMemberId = joinGroupResult.memberId
val joinGroupError = joinGroupResult.error
assertEquals(Errors.NONE, joinGroupError)
val syncGroupResult = syncGroupLeader(groupId, joinGroupResult.generationId, assignedMemberId, Map(assignedMemberId -> Array[Byte]()))
assertEquals(Errors.NONE, syncGroupResult.error)
val heartbeatResult = heartbeat(groupId, assignedMemberId, 2)
assertEquals(Errors.ILLEGAL_GENERATION, heartbeatResult)
}
@Test
def testValidHeartbeat(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols)
val assignedConsumerId = joinGroupResult.memberId
val generationId = joinGroupResult.generationId
val joinGroupError = joinGroupResult.error
assertEquals(Errors.NONE, joinGroupError)
val syncGroupResult = syncGroupLeader(groupId, generationId, assignedConsumerId, Map(assignedConsumerId -> Array[Byte]()))
assertEquals(Errors.NONE, syncGroupResult.error)
val heartbeatResult = heartbeat(groupId, assignedConsumerId, 1)
assertEquals(Errors.NONE, heartbeatResult)
}
@Test
def testSessionTimeout(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols)
val assignedConsumerId = joinGroupResult.memberId
val generationId = joinGroupResult.generationId
val joinGroupError = joinGroupResult.error
assertEquals(Errors.NONE, joinGroupError)
val syncGroupResult = syncGroupLeader(groupId, generationId, assignedConsumerId, Map(assignedConsumerId -> Array[Byte]()))
assertEquals(Errors.NONE, syncGroupResult.error)
when(replicaManager.getPartition(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId)))
.thenReturn(HostedPartition.None)
when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.MAGIC_VALUE_V1))
timer.advanceClock(DefaultSessionTimeout + 100)
val heartbeatResult = heartbeat(groupId, assignedConsumerId, 1)
assertEquals(Errors.UNKNOWN_MEMBER_ID, heartbeatResult)
}
@Test
def testHeartbeatMaintainsSession(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val sessionTimeout = 1000
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols,
rebalanceTimeout = sessionTimeout, sessionTimeout = sessionTimeout)
val assignedConsumerId = joinGroupResult.memberId
val generationId = joinGroupResult.generationId
val joinGroupError = joinGroupResult.error
assertEquals(Errors.NONE, joinGroupError)
val syncGroupResult = syncGroupLeader(groupId, generationId, assignedConsumerId, Map(assignedConsumerId -> Array[Byte]()))
assertEquals(Errors.NONE, syncGroupResult.error)
timer.advanceClock(sessionTimeout / 2)
var heartbeatResult = heartbeat(groupId, assignedConsumerId, 1)
assertEquals(Errors.NONE, heartbeatResult)
timer.advanceClock(sessionTimeout / 2 + 100)
heartbeatResult = heartbeat(groupId, assignedConsumerId, 1)
assertEquals(Errors.NONE, heartbeatResult)
}
@Test
def testCommitMaintainsSession(): Unit = {
val sessionTimeout = 1000
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val tp = new TopicPartition("topic", 0)
val offset = offsetAndMetadata(0)
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols,
rebalanceTimeout = sessionTimeout, sessionTimeout = sessionTimeout)
val assignedMemberId = joinGroupResult.memberId
val generationId = joinGroupResult.generationId
val joinGroupError = joinGroupResult.error
assertEquals(Errors.NONE, joinGroupError)
val syncGroupResult = syncGroupLeader(groupId, generationId, assignedMemberId, Map(assignedMemberId -> Array[Byte]()))
assertEquals(Errors.NONE, syncGroupResult.error)
timer.advanceClock(sessionTimeout / 2)
val commitOffsetResult = commitOffsets(groupId, assignedMemberId, generationId, Map(tp -> offset))
assertEquals(Errors.NONE, commitOffsetResult(tp))
timer.advanceClock(sessionTimeout / 2 + 100)
val heartbeatResult = heartbeat(groupId, assignedMemberId, 1)
assertEquals(Errors.NONE, heartbeatResult)
}
@Test
def testSessionTimeoutDuringRebalance(): Unit = {
// create a group with a single member
val firstJoinResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols,
rebalanceTimeout = 2000, sessionTimeout = 1000)
val firstMemberId = firstJoinResult.memberId
val firstGenerationId = firstJoinResult.generationId
assertEquals(firstMemberId, firstJoinResult.leaderId)
assertEquals(Errors.NONE, firstJoinResult.error)
val firstSyncResult = syncGroupLeader(groupId, firstGenerationId, firstMemberId, Map(firstMemberId -> Array[Byte]()))
assertEquals(Errors.NONE, firstSyncResult.error)
// now have a new member join to trigger a rebalance
val otherJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
timer.advanceClock(500)
var heartbeatResult = heartbeat(groupId, firstMemberId, firstGenerationId)
assertEquals(Errors.REBALANCE_IN_PROGRESS, heartbeatResult)
// letting the session expire should make the member fall out of the group
timer.advanceClock(1100)
heartbeatResult = heartbeat(groupId, firstMemberId, firstGenerationId)
assertEquals(Errors.UNKNOWN_MEMBER_ID, heartbeatResult)
// and the rebalance should complete with only the new member
val otherJoinResult = await(otherJoinFuture, DefaultSessionTimeout+100)
assertEquals(Errors.NONE, otherJoinResult.error)
}
@Test
def testRebalanceCompletesBeforeMemberJoins(): Unit = {
// create a group with a single member
val firstJoinResult = staticJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, leaderInstanceId, protocolType, protocols,
rebalanceTimeout = 1200, sessionTimeout = 1000)
val firstMemberId = firstJoinResult.memberId
val firstGenerationId = firstJoinResult.generationId
assertEquals(firstMemberId, firstJoinResult.leaderId)
assertEquals(Errors.NONE, firstJoinResult.error)
val firstSyncResult = syncGroupLeader(groupId, firstGenerationId, firstMemberId, Map(firstMemberId -> Array[Byte]()))
assertEquals(Errors.NONE, firstSyncResult.error)
// now have a new member join to trigger a rebalance
val otherMemberSessionTimeout = DefaultSessionTimeout
val otherJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
// send a couple heartbeats to keep the member alive while the rebalance finishes
var expectedResultList = List(Errors.REBALANCE_IN_PROGRESS, Errors.REBALANCE_IN_PROGRESS)
for (expectedResult <- expectedResultList) {
timer.advanceClock(otherMemberSessionTimeout)
val heartbeatResult = heartbeat(groupId, firstMemberId, firstGenerationId)
assertEquals(expectedResult, heartbeatResult)
}
// now timeout the rebalance
timer.advanceClock(otherMemberSessionTimeout)
val otherJoinResult = await(otherJoinFuture, otherMemberSessionTimeout+100)
val otherMemberId = otherJoinResult.memberId
val otherGenerationId = otherJoinResult.generationId
val syncResult = syncGroupLeader(groupId, otherGenerationId, otherMemberId, Map(otherMemberId -> Array[Byte]()))
assertEquals(Errors.NONE, syncResult.error)
// the unjoined static member should be remained in the group before session timeout.
assertEquals(Errors.NONE, otherJoinResult.error)
var heartbeatResult = heartbeat(groupId, firstMemberId, firstGenerationId)
assertEquals(Errors.ILLEGAL_GENERATION, heartbeatResult)
expectedResultList = List(Errors.NONE, Errors.NONE, Errors.REBALANCE_IN_PROGRESS)
// now session timeout the unjoined member. Still keeping the new member.
for (expectedResult <- expectedResultList) {
timer.advanceClock(otherMemberSessionTimeout)
heartbeatResult = heartbeat(groupId, otherMemberId, otherGenerationId)
assertEquals(expectedResult, heartbeatResult)
}
val otherRejoinGroupFuture = sendJoinGroup(groupId, otherMemberId, protocolType, protocols)
val otherReJoinResult = await(otherRejoinGroupFuture, otherMemberSessionTimeout+100)
assertEquals(Errors.NONE, otherReJoinResult.error)
val otherRejoinGenerationId = otherReJoinResult.generationId
val reSyncResult = syncGroupLeader(groupId, otherRejoinGenerationId, otherMemberId, Map(otherMemberId -> Array[Byte]()))
assertEquals(Errors.NONE, reSyncResult.error)
// the joined member should get heart beat response with no error. Let the new member keep heartbeating for a while
// to verify that no new rebalance is triggered unexpectedly
for ( _ <- 1 to 20) {
timer.advanceClock(500)
heartbeatResult = heartbeat(groupId, otherMemberId, otherRejoinGenerationId)
assertEquals(Errors.NONE, heartbeatResult)
}
}
@Test
def testSyncGroupEmptyAssignment(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols)
val assignedConsumerId = joinGroupResult.memberId
val generationId = joinGroupResult.generationId
val joinGroupError = joinGroupResult.error
assertEquals(Errors.NONE, joinGroupError)
val syncGroupResult = syncGroupLeader(groupId, generationId, assignedConsumerId, Map())
assertEquals(Errors.NONE, syncGroupResult.error)
assertTrue(syncGroupResult.memberAssignment.isEmpty)
val heartbeatResult = heartbeat(groupId, assignedConsumerId, 1)
assertEquals(Errors.NONE, heartbeatResult)
}
@Test
def testSyncGroupNotCoordinator(): Unit = {
val generation = 1
val syncGroupResult = syncGroupFollower(otherGroupId, generation, memberId)
assertEquals(Errors.NOT_COORDINATOR, syncGroupResult.error)
}
@Test
def testSyncGroupFromUnknownGroup(): Unit = {
val syncGroupResult = syncGroupFollower(groupId, 1, memberId)
assertEquals(Errors.UNKNOWN_MEMBER_ID, syncGroupResult.error)
}
@Test
def testSyncGroupFromUnknownMember(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols)
val assignedConsumerId = joinGroupResult.memberId
val generationId = joinGroupResult.generationId
assertEquals(Errors.NONE, joinGroupResult.error)
val syncGroupResult = syncGroupLeader(groupId, generationId, assignedConsumerId, Map(assignedConsumerId -> Array[Byte]()))
val syncGroupError = syncGroupResult.error
assertEquals(Errors.NONE, syncGroupError)
val unknownMemberId = "blah"
val unknownMemberSyncResult = syncGroupFollower(groupId, generationId, unknownMemberId)
assertEquals(Errors.UNKNOWN_MEMBER_ID, unknownMemberSyncResult.error)
}
@Test
def testSyncGroupFromIllegalGeneration(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols)
val assignedConsumerId = joinGroupResult.memberId
val generationId = joinGroupResult.generationId
assertEquals(Errors.NONE, joinGroupResult.error)
// send the sync group with an invalid generation
val syncGroupResult = syncGroupLeader(groupId, generationId+1, assignedConsumerId, Map(assignedConsumerId -> Array[Byte]()))
assertEquals(Errors.ILLEGAL_GENERATION, syncGroupResult.error)
}
@Test
def testJoinGroupFromUnchangedFollowerDoesNotRebalance(): Unit = {
// to get a group of two members:
// 1. join and sync with a single member (because we can't immediately join with two members)
// 2. join and sync with the first member and a new member
val firstJoinResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
val firstMemberId = firstJoinResult.memberId
val firstGenerationId = firstJoinResult.generationId
assertEquals(firstMemberId, firstJoinResult.leaderId)
assertEquals(Errors.NONE, firstJoinResult.error)
val firstSyncResult = syncGroupLeader(groupId, firstGenerationId, firstMemberId, Map(firstMemberId -> Array[Byte]()))
assertEquals(Errors.NONE, firstSyncResult.error)
val otherJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
val joinFuture = sendJoinGroup(groupId, firstMemberId, protocolType, protocols)
val joinResult = await(joinFuture, DefaultSessionTimeout+100)
val otherJoinResult = await(otherJoinFuture, DefaultSessionTimeout+100)
assertEquals(Errors.NONE, joinResult.error)
assertEquals(Errors.NONE, otherJoinResult.error)
assertTrue(joinResult.generationId == otherJoinResult.generationId)
assertEquals(firstMemberId, joinResult.leaderId)
assertEquals(firstMemberId, otherJoinResult.leaderId)
val nextGenerationId = joinResult.generationId
// this shouldn't cause a rebalance since protocol information hasn't changed
val followerJoinResult = await(sendJoinGroup(groupId, otherJoinResult.memberId, protocolType, protocols), 1)
assertEquals(Errors.NONE, followerJoinResult.error)
assertEquals(nextGenerationId, followerJoinResult.generationId)
}
@Test
def testJoinGroupFromUnchangedLeaderShouldRebalance(): Unit = {
val firstJoinResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
val firstMemberId = firstJoinResult.memberId
val firstGenerationId = firstJoinResult.generationId
assertEquals(firstMemberId, firstJoinResult.leaderId)
assertEquals(Errors.NONE, firstJoinResult.error)
val firstSyncResult = syncGroupLeader(groupId, firstGenerationId, firstMemberId, Map(firstMemberId -> Array[Byte]()))
assertEquals(Errors.NONE, firstSyncResult.error)
// join groups from the leader should force the group to rebalance, which allows the
// leader to push new assignments when local metadata changes
val secondJoinResult = await(sendJoinGroup(groupId, firstMemberId, protocolType, protocols), 1)
assertEquals(Errors.NONE, secondJoinResult.error)
assertNotEquals(firstGenerationId, secondJoinResult.generationId)
}
/**
* Test if the following scenario completes a rebalance correctly: A new member starts a JoinGroup request with
* an UNKNOWN_MEMBER_ID, attempting to join a stable group. But never initiates the second JoinGroup request with
* the provided member ID and times out. The test checks if original member remains the sole member in this group,
* which should remain stable throughout this test.
*/
@Test
def testSecondMemberPartiallyJoinAndTimeout(): Unit = {
val firstJoinResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
val firstMemberId = firstJoinResult.memberId
val firstGenerationId = firstJoinResult.generationId
assertEquals(firstMemberId, firstJoinResult.leaderId)
assertEquals(Errors.NONE, firstJoinResult.error)
// Starting sync group leader
val firstSyncResult = syncGroupLeader(groupId, firstGenerationId, firstMemberId, Map(firstMemberId -> Array[Byte]()))
assertEquals(Errors.NONE, firstSyncResult.error)
timer.advanceClock(100)
assertEquals(Set(firstMemberId), groupCoordinator.groupManager.getGroup(groupId).get.allMembers)
assertEquals(groupCoordinator.groupManager.getGroup(groupId).get.allMembers,
groupCoordinator.groupManager.getGroup(groupId).get.allDynamicMembers)
assertEquals(0, groupCoordinator.groupManager.getGroup(groupId).get.numPending)
val group = groupCoordinator.groupManager.getGroup(groupId).get
// ensure the group is stable before a new member initiates join request
assertEquals(Stable, group.currentState)
// new member initiates join group
val secondJoinResult = joinGroupPartial(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
assertEquals(Errors.MEMBER_ID_REQUIRED, secondJoinResult.error)
assertEquals(1, group.numPending)
assertEquals(Stable, group.currentState)
when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.MAGIC_VALUE_V1))
// advance clock to timeout the pending member
assertEquals(Set(firstMemberId), group.allMembers)
assertEquals(1, group.numPending)
timer.advanceClock(300)
// original (firstMember) member sends heartbeats to prevent session timeouts.
val heartbeatResult = heartbeat(groupId, firstMemberId, 1)
assertEquals(Errors.NONE, heartbeatResult)
// timeout the pending member
timer.advanceClock(300)
// at this point the second member should have been removed from pending list (session timeout),
// and the group should be in Stable state with only the first member in it.
assertEquals(Set(firstMemberId), group.allMembers)
assertEquals(0, group.numPending)
assertEquals(Stable, group.currentState)
assertTrue(group.has(firstMemberId))
}
/**
* Create a group with two members in Stable state. Create a third pending member by completing it's first JoinGroup
* request without a member id.
*/
private def setupGroupWithPendingMember(): JoinGroupResult = {
// add the first member
val joinResult1 = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
assertGroupState(groupState = CompletingRebalance)
// now the group is stable, with the one member that joined above
val firstSyncResult = syncGroupLeader(groupId, joinResult1.generationId, joinResult1.memberId, Map(joinResult1.memberId -> Array[Byte]()))
assertEquals(Errors.NONE, firstSyncResult.error)
assertGroupState(groupState = Stable)
// start the join for the second member
val secondJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
// rejoin the first member back into the group
val firstJoinFuture = sendJoinGroup(groupId, joinResult1.memberId, protocolType, protocols)
val firstMemberJoinResult = await(firstJoinFuture, DefaultSessionTimeout+100)
val secondMemberJoinResult = await(secondJoinFuture, DefaultSessionTimeout+100)
assertGroupState(groupState = CompletingRebalance)
// stabilize the group
val secondSyncResult = syncGroupLeader(groupId, firstMemberJoinResult.generationId, joinResult1.memberId, Map(joinResult1.memberId -> Array[Byte]()))
assertEquals(Errors.NONE, secondSyncResult.error)
assertGroupState(groupState = Stable)
// re-join an existing member, to transition the group to PreparingRebalance state.
sendJoinGroup(groupId, firstMemberJoinResult.memberId, protocolType, protocols)
assertGroupState(groupState = PreparingRebalance)
// create a pending member in the group
val pendingMember = joinGroupPartial(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, sessionTimeout=100)
assertEquals(1, groupCoordinator.groupManager.getGroup(groupId).get.numPending)
// re-join the second existing member
sendJoinGroup(groupId, secondMemberJoinResult.memberId, protocolType, protocols)
assertGroupState(groupState = PreparingRebalance)
assertEquals(1, groupCoordinator.groupManager.getGroup(groupId).get.numPending)
pendingMember
}
/**
* Setup a group in with a pending member. The test checks if the a pending member joining completes the rebalancing
* operation
*/
@Test
def testJoinGroupCompletionWhenPendingMemberJoins(): Unit = {
val pendingMember = setupGroupWithPendingMember()
// compete join group for the pending member
val pendingMemberJoinFuture = sendJoinGroup(groupId, pendingMember.memberId, protocolType, protocols)
await(pendingMemberJoinFuture, DefaultSessionTimeout+100)
assertGroupState(groupState = CompletingRebalance)
assertEquals(3, group().allMembers.size)
assertEquals(0, group().numPending)
}
/**
* Setup a group in with a pending member. The test checks if the timeout of the pending member will
* cause the group to return to a CompletingRebalance state.
*/
@Test
def testJoinGroupCompletionWhenPendingMemberTimesOut(): Unit = {
setupGroupWithPendingMember()
// Advancing Clock by > 100 (session timeout for third and fourth member)
// and < 500 (for first and second members). This will force the coordinator to attempt join
// completion on heartbeat expiration (since we are in PendingRebalance stage).
when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.MAGIC_VALUE_V1))
timer.advanceClock(120)
assertGroupState(groupState = CompletingRebalance)
assertEquals(2, group().allMembers.size)
assertEquals(0, group().numPending)
}
@Test
def testPendingMembersLeavesGroup(): Unit = {
val pending = setupGroupWithPendingMember()
val leaveGroupResults = singleLeaveGroup(groupId, pending.memberId)
verifyLeaveGroupResult(leaveGroupResults)
assertGroupState(groupState = CompletingRebalance)
assertEquals(2, group().allMembers.size)
assertEquals(2, group().allDynamicMembers.size)
assertEquals(0, group().numPending)
}
private def verifyHeartbeat(
joinGroupResult: JoinGroupResult,
expectedError: Errors
): Unit = {
val heartbeatResult = heartbeat(
groupId,
joinGroupResult.memberId,
joinGroupResult.generationId
)
assertEquals(expectedError, heartbeatResult)
}
private def joinWithNMembers(nbMembers: Int): Seq[JoinGroupResult] = {
val requiredKnownMemberId = true
// First JoinRequests
var futures = 1.to(nbMembers).map { _ =>
sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols,
None, DefaultSessionTimeout, DefaultRebalanceTimeout, requiredKnownMemberId)
}
// Get back the assigned member ids
val memberIds = futures.map(await(_, 1).memberId)
// Second JoinRequests
futures = memberIds.map { memberId =>
sendJoinGroup(groupId, memberId, protocolType, protocols,
None, DefaultSessionTimeout, DefaultRebalanceTimeout, requiredKnownMemberId)
}
timer.advanceClock(GroupInitialRebalanceDelay + 1)
timer.advanceClock(DefaultRebalanceTimeout + 1)
futures.map(await(_, 1))
}
@Test
def testRebalanceTimesOutWhenSyncRequestIsNotReceived(): Unit = {
// This test case ensure that the DelayedSync does kick out all members
// if they don't sent a sync request before the rebalance timeout. The
// group is in the Stable state in this case.
val results = joinWithNMembers(nbMembers = 3)
assertEquals(Set(Errors.NONE), results.map(_.error).toSet)
// Advance time
timer.advanceClock(DefaultRebalanceTimeout / 2)
// Heartbeats to ensure that heartbeating does not interfere with the
// delayed sync operation.
results.foreach { joinGroupResult =>
verifyHeartbeat(joinGroupResult, Errors.NONE)
}
// Advance part the rebalance timeout to trigger the delayed operation.
when(replicaManager.getMagic(any[TopicPartition]))
.thenReturn(Some(RecordBatch.MAGIC_VALUE_V1))
timer.advanceClock(DefaultRebalanceTimeout / 2 + 1)
// Heartbeats fail because none of the members have sent the sync request
results.foreach { joinGroupResult =>
verifyHeartbeat(joinGroupResult, Errors.UNKNOWN_MEMBER_ID)
}
}
@Test
def testRebalanceTimesOutWhenSyncRequestIsNotReceivedFromFollowers(): Unit = {
// This test case ensure that the DelayedSync does kick out the followers
// if they don't sent a sync request before the rebalance timeout. The
// group is in the Stable state in this case.
val results = joinWithNMembers(nbMembers = 3)
assertEquals(Set(Errors.NONE), results.map(_.error).toSet)
// Advance time
timer.advanceClock(DefaultRebalanceTimeout / 2)
// Heartbeats to ensure that heartbeating does not interfere with the
// delayed sync operation.
results.foreach { joinGroupResult =>
verifyHeartbeat(joinGroupResult, Errors.NONE)
}
// Leader sends Sync
val assignments = results.map(result => result.memberId -> Array.empty[Byte]).toMap
val leaderResult = sendSyncGroupLeader(groupId, results.head.generationId, results.head.memberId,
Some(protocolType), Some(protocolName), None, assignments)
assertEquals(Errors.NONE, await(leaderResult, 1).error)
// Leader should be able to heartbeart
verifyHeartbeat(results.head, Errors.NONE)
// Advance part the rebalance timeout to trigger the delayed operation.
timer.advanceClock(DefaultRebalanceTimeout / 2 + 1)
// Leader should be able to heartbeart
verifyHeartbeat(results.head, Errors.REBALANCE_IN_PROGRESS)
// Followers should have been removed.
results.tail.foreach { joinGroupResult =>
verifyHeartbeat(joinGroupResult, Errors.UNKNOWN_MEMBER_ID)
}
}
@Test
def testRebalanceTimesOutWhenSyncRequestIsNotReceivedFromLeaders(): Unit = {
// This test case ensure that the DelayedSync does kick out the leader
// if it does not sent a sync request before the rebalance timeout. The
// group is in the CompletingRebalance state in this case.
val results = joinWithNMembers(nbMembers = 3)
assertEquals(Set(Errors.NONE), results.map(_.error).toSet)
// Advance time
timer.advanceClock(DefaultRebalanceTimeout / 2)
// Heartbeats to ensure that heartbeating does not interfere with the
// delayed sync operation.
results.foreach { joinGroupResult =>
verifyHeartbeat(joinGroupResult, Errors.NONE)
}
// Followers send Sync
val followerResults = results.tail.map { joinGroupResult =>
sendSyncGroupFollower(groupId, joinGroupResult.generationId, joinGroupResult.memberId,
Some(protocolType), Some(protocolName), None)
}
// Advance part the rebalance timeout to trigger the delayed operation.
timer.advanceClock(DefaultRebalanceTimeout / 2 + 1)
val followerErrors = followerResults.map(await(_, 1).error)
assertEquals(Set(Errors.REBALANCE_IN_PROGRESS), followerErrors.toSet)
// Leader should have been removed.
verifyHeartbeat(results.head, Errors.UNKNOWN_MEMBER_ID)
// Followers should be able to heartbeat.
results.tail.foreach { joinGroupResult =>
verifyHeartbeat(joinGroupResult, Errors.REBALANCE_IN_PROGRESS)
}
}
@Test
def testRebalanceDoesNotTimeOutWhenAllSyncAreReceived(): Unit = {
// This test case ensure that the DelayedSync does not kick any
// members out when they have all sent their sync requests.
val results = joinWithNMembers(nbMembers = 3)
assertEquals(Set(Errors.NONE), results.map(_.error).toSet)
// Advance time
timer.advanceClock(DefaultRebalanceTimeout / 2)
// Heartbeats to ensure that heartbeating does not interfere with the
// delayed sync operation.
results.foreach { joinGroupResult =>
verifyHeartbeat(joinGroupResult, Errors.NONE)
}
val assignments = results.map(result => result.memberId -> Array.empty[Byte]).toMap
val leaderResult = sendSyncGroupLeader(groupId, results.head.generationId, results.head.memberId,
Some(protocolType), Some(protocolName), None, assignments)
assertEquals(Errors.NONE, await(leaderResult, 1).error)
// Followers send Sync
val followerResults = results.tail.map { joinGroupResult =>
sendSyncGroupFollower(groupId, joinGroupResult.generationId, joinGroupResult.memberId,
Some(protocolType), Some(protocolName), None)
}
val followerErrors = followerResults.map(await(_, 1).error)
assertEquals(Set(Errors.NONE), followerErrors.toSet)
// Advance past the rebalance timeout to expire the Sync timout. All
// members should remain and the group should not rebalance.
timer.advanceClock(DefaultRebalanceTimeout / 2 + 1)
// Followers should be able to heartbeat.
results.foreach { joinGroupResult =>
verifyHeartbeat(joinGroupResult, Errors.NONE)
}
// Advance a bit more.
timer.advanceClock(DefaultRebalanceTimeout / 2)
// Followers should be able to heartbeat.
results.foreach { joinGroupResult =>
verifyHeartbeat(joinGroupResult, Errors.NONE)
}
}
private def group(groupId: String = groupId) = {
groupCoordinator.groupManager.getGroup(groupId) match {
case Some(g) => g
case None => null
}
}
private def assertGroupState(groupId: String = groupId,
groupState: GroupState): Unit = {
groupCoordinator.groupManager.getGroup(groupId) match {
case Some(group) => assertEquals(groupState, group.currentState)
case None => fail(s"Group $groupId not found in coordinator")
}
}
private def joinGroupPartial(groupId: String,
memberId: String,
protocolType: String,
protocols: List[(String, Array[Byte])],
sessionTimeout: Int = DefaultSessionTimeout,
rebalanceTimeout: Int = DefaultRebalanceTimeout): JoinGroupResult = {
val requireKnownMemberId = true
val responseFuture = sendJoinGroup(groupId, memberId, protocolType, protocols, None, sessionTimeout, rebalanceTimeout, requireKnownMemberId)
Await.result(responseFuture, Duration(rebalanceTimeout + 100, TimeUnit.MILLISECONDS))
}
@Test
def testLeaderFailureInSyncGroup(): Unit = {
// to get a group of two members:
// 1. join and sync with a single member (because we can't immediately join with two members)
// 2. join and sync with the first member and a new member
val firstJoinResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
val firstMemberId = firstJoinResult.memberId
val firstGenerationId = firstJoinResult.generationId
assertEquals(firstMemberId, firstJoinResult.leaderId)
assertEquals(Errors.NONE, firstJoinResult.error)
val firstSyncResult = syncGroupLeader(groupId, firstGenerationId, firstMemberId, Map(firstMemberId -> Array[Byte]()))
assertEquals(Errors.NONE, firstSyncResult.error)
val otherJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
val joinFuture = sendJoinGroup(groupId, firstMemberId, protocolType, protocols)
val joinResult = await(joinFuture, DefaultSessionTimeout+100)
val otherJoinResult = await(otherJoinFuture, DefaultSessionTimeout+100)
assertEquals(Errors.NONE, joinResult.error)
assertEquals(Errors.NONE, otherJoinResult.error)
assertTrue(joinResult.generationId == otherJoinResult.generationId)
assertEquals(firstMemberId, joinResult.leaderId)
assertEquals(firstMemberId, otherJoinResult.leaderId)
val nextGenerationId = joinResult.generationId
// with no leader SyncGroup, the follower's request should fail with an error indicating
// that it should rejoin
val followerSyncFuture = sendSyncGroupFollower(groupId, nextGenerationId, otherJoinResult.memberId, None, None, None)
timer.advanceClock(DefaultSessionTimeout + 100)
val followerSyncResult = await(followerSyncFuture, DefaultSessionTimeout+100)
assertEquals(Errors.REBALANCE_IN_PROGRESS, followerSyncResult.error)
}
@Test
def testSyncGroupFollowerAfterLeader(): Unit = {
// to get a group of two members:
// 1. join and sync with a single member (because we can't immediately join with two members)
// 2. join and sync with the first member and a new member
val firstJoinResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
val firstMemberId = firstJoinResult.memberId
val firstGenerationId = firstJoinResult.generationId
assertEquals(firstMemberId, firstJoinResult.leaderId)
assertEquals(Errors.NONE, firstJoinResult.error)
val firstSyncResult = syncGroupLeader(groupId, firstGenerationId, firstMemberId, Map(firstMemberId -> Array[Byte]()))
assertEquals(Errors.NONE, firstSyncResult.error)
val otherJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
val joinFuture = sendJoinGroup(groupId, firstMemberId, protocolType, protocols)
val joinResult = await(joinFuture, DefaultSessionTimeout+100)
val otherJoinResult = await(otherJoinFuture, DefaultSessionTimeout+100)
assertEquals(Errors.NONE, joinResult.error)
assertEquals(Errors.NONE, otherJoinResult.error)
assertTrue(joinResult.generationId == otherJoinResult.generationId)
assertEquals(firstMemberId, joinResult.leaderId)
assertEquals(firstMemberId, otherJoinResult.leaderId)
val nextGenerationId = joinResult.generationId
val leaderId = firstMemberId
val leaderAssignment = Array[Byte](0)
val followerId = otherJoinResult.memberId
val followerAssignment = Array[Byte](1)
val leaderSyncResult = syncGroupLeader(groupId, nextGenerationId, leaderId,
Map(leaderId -> leaderAssignment, followerId -> followerAssignment))
assertEquals(Errors.NONE, leaderSyncResult.error)
assertEquals(leaderAssignment, leaderSyncResult.memberAssignment)
val followerSyncResult = syncGroupFollower(groupId, nextGenerationId, otherJoinResult.memberId)
assertEquals(Errors.NONE, followerSyncResult.error)
assertEquals(followerAssignment, followerSyncResult.memberAssignment)
}
@Test
def testSyncGroupLeaderAfterFollower(): Unit = {
// to get a group of two members:
// 1. join and sync with a single member (because we can't immediately join with two members)
// 2. join and sync with the first member and a new member
val joinGroupResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
val firstMemberId = joinGroupResult.memberId
val firstGenerationId = joinGroupResult.generationId
assertEquals(firstMemberId, joinGroupResult.leaderId)
assertEquals(Errors.NONE, joinGroupResult.error)
val syncGroupResult = syncGroupLeader(groupId, firstGenerationId, firstMemberId, Map(firstMemberId -> Array[Byte]()))
assertEquals(Errors.NONE, syncGroupResult.error)
val otherJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
val joinFuture = sendJoinGroup(groupId, firstMemberId, protocolType, protocols)
val joinResult = await(joinFuture, DefaultSessionTimeout+100)
val otherJoinResult = await(otherJoinFuture, DefaultSessionTimeout+100)
assertEquals(Errors.NONE, joinResult.error)
assertEquals(Errors.NONE, otherJoinResult.error)
assertTrue(joinResult.generationId == otherJoinResult.generationId)
val nextGenerationId = joinResult.generationId
val leaderId = joinResult.leaderId
val leaderAssignment = Array[Byte](0)
val followerId = otherJoinResult.memberId
val followerAssignment = Array[Byte](1)
assertEquals(firstMemberId, joinResult.leaderId)
assertEquals(firstMemberId, otherJoinResult.leaderId)
val followerSyncFuture = sendSyncGroupFollower(groupId, nextGenerationId, followerId, None, None, None)
val leaderSyncResult = syncGroupLeader(groupId, nextGenerationId, leaderId,
Map(leaderId -> leaderAssignment, followerId -> followerAssignment))
assertEquals(Errors.NONE, leaderSyncResult.error)
assertEquals(leaderAssignment, leaderSyncResult.memberAssignment)
val followerSyncResult = await(followerSyncFuture, DefaultSessionTimeout+100)
assertEquals(Errors.NONE, followerSyncResult.error)
assertEquals(followerAssignment, followerSyncResult.memberAssignment)
}
@Test
def testCommitOffsetFromUnknownGroup(): Unit = {
val generationId = 1
val tp = new TopicPartition("topic", 0)
val offset = offsetAndMetadata(0)
val commitOffsetResult = commitOffsets(groupId, memberId, generationId, Map(tp -> offset))
assertEquals(Errors.ILLEGAL_GENERATION, commitOffsetResult(tp))
}
@Test
def testCommitOffsetWithDefaultGeneration(): Unit = {
val tp = new TopicPartition("topic", 0)
val offset = offsetAndMetadata(0)
val commitOffsetResult = commitOffsets(groupId, OffsetCommitRequest.DEFAULT_MEMBER_ID,
OffsetCommitRequest.DEFAULT_GENERATION_ID, Map(tp -> offset))
assertEquals(Errors.NONE, commitOffsetResult(tp))
}
@Test
def testCommitOffsetsAfterGroupIsEmpty(): Unit = {
// Tests the scenario where the reset offset tool modifies the offsets
// of a group after it becomes empty
// A group member joins
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols)
val assignedMemberId = joinGroupResult.memberId
val joinGroupError = joinGroupResult.error
assertEquals(Errors.NONE, joinGroupError)
// and leaves.
val leaveGroupResults = singleLeaveGroup(groupId, assignedMemberId)
verifyLeaveGroupResult(leaveGroupResults)
// The simple offset commit should now fail
val tp = new TopicPartition("topic", 0)
val offset = offsetAndMetadata(0)
val commitOffsetResult = commitOffsets(groupId, OffsetCommitRequest.DEFAULT_MEMBER_ID,
OffsetCommitRequest.DEFAULT_GENERATION_ID, Map(tp -> offset))
assertEquals(Errors.NONE, commitOffsetResult(tp))
val (error, partitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tp)))
assertEquals(Errors.NONE, error)
assertEquals(Some(0), partitionData.get(tp).map(_.offset))
}
@Test
def testFetchOffsets(): Unit = {
val tp = new TopicPartition("topic", 0)
val offset = 97L
val metadata = "some metadata"
val leaderEpoch = Optional.of[Integer](15)
val offsetAndMetadata = OffsetAndMetadata(offset, leaderEpoch, metadata, timer.time.milliseconds(), None)
val commitOffsetResult = commitOffsets(groupId, OffsetCommitRequest.DEFAULT_MEMBER_ID,
OffsetCommitRequest.DEFAULT_GENERATION_ID, Map(tp -> offsetAndMetadata))
assertEquals(Errors.NONE, commitOffsetResult(tp))
val (error, partitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tp)))
assertEquals(Errors.NONE, error)
val maybePartitionData = partitionData.get(tp)
assertTrue(maybePartitionData.isDefined)
assertEquals(offset, maybePartitionData.get.offset)
assertEquals(metadata, maybePartitionData.get.metadata)
assertEquals(leaderEpoch, maybePartitionData.get.leaderEpoch)
}
@Test
def testCommitAndFetchOffsetsWithEmptyGroup(): Unit = {
// For backwards compatibility, the coordinator supports committing/fetching offsets with an empty groupId.
// To allow inspection and removal of the empty group, we must also support DescribeGroups and DeleteGroups
val tp = new TopicPartition("topic", 0)
val offset = offsetAndMetadata(0)
val groupId = ""
val commitOffsetResult = commitOffsets(groupId, OffsetCommitRequest.DEFAULT_MEMBER_ID,
OffsetCommitRequest.DEFAULT_GENERATION_ID, Map(tp -> offset))
assertEquals(Errors.NONE, commitOffsetResult(tp))
val (fetchError, partitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tp)))
assertEquals(Errors.NONE, fetchError)
assertEquals(Some(0), partitionData.get(tp).map(_.offset))
val (describeError, summary) = groupCoordinator.handleDescribeGroup(groupId)
assertEquals(Errors.NONE, describeError)
assertEquals(Empty.toString, summary.state)
val groupTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId)
val partition: Partition = mock(classOf[Partition])
when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE))
when(replicaManager.getPartition(groupTopicPartition)).thenReturn(HostedPartition.Online(partition))
when(replicaManager.onlinePartition(groupTopicPartition)).thenReturn(Some(partition))
val deleteErrors = groupCoordinator.handleDeleteGroups(Set(groupId))
assertEquals(Errors.NONE, deleteErrors(groupId))
val (err, data) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tp)))
assertEquals(Errors.NONE, err)
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), data.get(tp).map(_.offset))
}
@Test
def testBasicFetchTxnOffsets(): Unit = {
val tp = new TopicPartition("topic", 0)
val offset = offsetAndMetadata(0)
val producerId = 1000L
val producerEpoch : Short = 2
val commitOffsetResult = commitTransactionalOffsets(groupId, producerId, producerEpoch, Map(tp -> offset))
assertEquals(Errors.NONE, commitOffsetResult(tp))
val (error, partitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tp)))
// Validate that the offset isn't materialjzed yet.
assertEquals(Errors.NONE, error)
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), partitionData.get(tp).map(_.offset))
val offsetsTopic = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId)
// Send commit marker.
handleTxnCompletion(producerId, List(offsetsTopic), TransactionResult.COMMIT)
// Validate that committed offset is materialized.
val (secondReqError, secondReqPartitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tp)))
assertEquals(Errors.NONE, secondReqError)
assertEquals(Some(0), secondReqPartitionData.get(tp).map(_.offset))
}
@Test
def testFetchTxnOffsetsWithAbort(): Unit = {
val tp = new TopicPartition("topic", 0)
val offset = offsetAndMetadata(0)
val producerId = 1000L
val producerEpoch : Short = 2
val commitOffsetResult = commitTransactionalOffsets(groupId, producerId, producerEpoch, Map(tp -> offset))
assertEquals(Errors.NONE, commitOffsetResult(tp))
val (error, partitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tp)))
assertEquals(Errors.NONE, error)
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), partitionData.get(tp).map(_.offset))
val offsetsTopic = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId)
// Validate that the pending commit is discarded.
handleTxnCompletion(producerId, List(offsetsTopic), TransactionResult.ABORT)
val (secondReqError, secondReqPartitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tp)))
assertEquals(Errors.NONE, secondReqError)
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), secondReqPartitionData.get(tp).map(_.offset))
}
@Test
def testFetchPendingTxnOffsetsWithAbort(): Unit = {
val tp = new TopicPartition("topic", 0)
val offset = offsetAndMetadata(0)
val producerId = 1000L
val producerEpoch : Short = 2
val commitOffsetResult = commitTransactionalOffsets(groupId, producerId, producerEpoch, Map(tp -> offset))
assertEquals(Errors.NONE, commitOffsetResult(tp))
val nonExistTp = new TopicPartition("non-exist-topic", 0)
val (error, partitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tp, nonExistTp)))
assertEquals(Errors.NONE, error)
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), partitionData.get(tp).map(_.offset))
assertEquals(Some(Errors.UNSTABLE_OFFSET_COMMIT), partitionData.get(tp).map(_.error))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), partitionData.get(nonExistTp).map(_.offset))
assertEquals(Some(Errors.NONE), partitionData.get(nonExistTp).map(_.error))
val offsetsTopic = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId)
// Validate that the pending commit is discarded.
handleTxnCompletion(producerId, List(offsetsTopic), TransactionResult.ABORT)
val (secondReqError, secondReqPartitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tp)))
assertEquals(Errors.NONE, secondReqError)
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), secondReqPartitionData.get(tp).map(_.offset))
assertEquals(Some(Errors.NONE), secondReqPartitionData.get(tp).map(_.error))
}
@Test
def testFetchPendingTxnOffsetsWithCommit(): Unit = {
val tp = new TopicPartition("topic", 0)
val offset = offsetAndMetadata(25)
val producerId = 1000L
val producerEpoch : Short = 2
val commitOffsetResult = commitTransactionalOffsets(groupId, producerId, producerEpoch, Map(tp -> offset))
assertEquals(Errors.NONE, commitOffsetResult(tp))
val (error, partitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tp)))
assertEquals(Errors.NONE, error)
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), partitionData.get(tp).map(_.offset))
assertEquals(Some(Errors.UNSTABLE_OFFSET_COMMIT), partitionData.get(tp).map(_.error))
val offsetsTopic = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId)
// Validate that the pending commit is committed
handleTxnCompletion(producerId, List(offsetsTopic), TransactionResult.COMMIT)
val (secondReqError, secondReqPartitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tp)))
assertEquals(Errors.NONE, secondReqError)
assertEquals(Some(25), secondReqPartitionData.get(tp).map(_.offset))
assertEquals(Some(Errors.NONE), secondReqPartitionData.get(tp).map(_.error))
}
@Test
def testFetchTxnOffsetsIgnoreSpuriousCommit(): Unit = {
val tp = new TopicPartition("topic", 0)
val offset = offsetAndMetadata(0)
val producerId = 1000L
val producerEpoch : Short = 2
val commitOffsetResult = commitTransactionalOffsets(groupId, producerId, producerEpoch, Map(tp -> offset))
assertEquals(Errors.NONE, commitOffsetResult(tp))
val (error, partitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tp)))
assertEquals(Errors.NONE, error)
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), partitionData.get(tp).map(_.offset))
val offsetsTopic = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId)
handleTxnCompletion(producerId, List(offsetsTopic), TransactionResult.ABORT)
val (secondReqError, secondReqPartitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tp)))
assertEquals(Errors.NONE, secondReqError)
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), secondReqPartitionData.get(tp).map(_.offset))
// Ignore spurious commit.
handleTxnCompletion(producerId, List(offsetsTopic), TransactionResult.COMMIT)
val (thirdReqError, thirdReqPartitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tp)))
assertEquals(Errors.NONE, thirdReqError)
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), thirdReqPartitionData.get(tp).map(_.offset))
}
@Test
def testFetchTxnOffsetsOneProducerMultipleGroups(): Unit = {
// One producer, two groups located on separate offsets topic partitions.
// Both group have pending offset commits.
// Marker for only one partition is received. That commit should be materialized while the other should not.
val partitions = List(new TopicPartition("topic1", 0), new TopicPartition("topic2", 0))
val offsets = List(offsetAndMetadata(10), offsetAndMetadata(15))
val producerId = 1000L
val producerEpoch: Short = 3
val groupIds = List(groupId, otherGroupId)
val offsetTopicPartitions = List(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupCoordinator.partitionFor(groupId)),
new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupCoordinator.partitionFor(otherGroupId)))
groupCoordinator.groupManager.addPartitionOwnership(offsetTopicPartitions(1).partition)
val errors = mutable.ArrayBuffer[Errors]()
val partitionData = mutable.ArrayBuffer[scala.collection.Map[TopicPartition, OffsetFetchResponse.PartitionData]]()
val commitOffsetResults = mutable.ArrayBuffer[CommitOffsetCallbackParams]()
// Ensure that the two groups map to different partitions.
assertNotEquals(offsetTopicPartitions(0), offsetTopicPartitions(1))
commitOffsetResults.append(commitTransactionalOffsets(groupId, producerId, producerEpoch, Map(partitions(0) -> offsets(0))))
assertEquals(Errors.NONE, commitOffsetResults(0)(partitions(0)))
commitOffsetResults.append(commitTransactionalOffsets(otherGroupId, producerId, producerEpoch, Map(partitions(1) -> offsets(1))))
assertEquals(Errors.NONE, commitOffsetResults(1)(partitions(1)))
// We got a commit for only one __consumer_offsets partition. We should only materialize it's group offsets.
handleTxnCompletion(producerId, List(offsetTopicPartitions(0)), TransactionResult.COMMIT)
groupCoordinator.handleFetchOffsets(groupIds(0), requireStable, Some(partitions)) match {
case (error, partData) =>
errors.append(error)
partitionData.append(partData)
case _ =>
}
groupCoordinator.handleFetchOffsets(groupIds(1), requireStable, Some(partitions)) match {
case (error, partData) =>
errors.append(error)
partitionData.append(partData)
case _ =>
}
assertEquals(2, errors.size)
assertEquals(Errors.NONE, errors(0))
assertEquals(Errors.NONE, errors(1))
// Exactly one offset commit should have been materialized.
assertEquals(Some(offsets(0).offset), partitionData(0).get(partitions(0)).map(_.offset))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), partitionData(0).get(partitions(1)).map(_.offset))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), partitionData(1).get(partitions(0)).map(_.offset))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), partitionData(1).get(partitions(1)).map(_.offset))
// Now we receive the other marker.
handleTxnCompletion(producerId, List(offsetTopicPartitions(1)), TransactionResult.COMMIT)
errors.clear()
partitionData.clear()
groupCoordinator.handleFetchOffsets(groupIds(0), requireStable, Some(partitions)) match {
case (error, partData) =>
errors.append(error)
partitionData.append(partData)
case _ =>
}
groupCoordinator.handleFetchOffsets(groupIds(1), requireStable, Some(partitions)) match {
case (error, partData) =>
errors.append(error)
partitionData.append(partData)
case _ =>
}
// Two offsets should have been materialized
assertEquals(Some(offsets(0).offset), partitionData(0).get(partitions(0)).map(_.offset))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), partitionData(0).get(partitions(1)).map(_.offset))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), partitionData(1).get(partitions(0)).map(_.offset))
assertEquals(Some(offsets(1).offset), partitionData(1).get(partitions(1)).map(_.offset))
}
@Test
def testFetchTxnOffsetsMultipleProducersOneGroup(): Unit = {
// One group, two producers
// Different producers will commit offsets for different partitions.
// Each partition's offsets should be materialized when the corresponding producer's marker is received.
val partitions = List(new TopicPartition("topic1", 0), new TopicPartition("topic2", 0))
val offsets = List(offsetAndMetadata(10), offsetAndMetadata(15))
val producerIds = List(1000L, 1005L)
val producerEpochs: Seq[Short] = List(3, 4)
val offsetTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupCoordinator.partitionFor(groupId))
val errors = mutable.ArrayBuffer[Errors]()
val partitionData = mutable.ArrayBuffer[scala.collection.Map[TopicPartition, OffsetFetchResponse.PartitionData]]()
val commitOffsetResults = mutable.ArrayBuffer[CommitOffsetCallbackParams]()
// producer0 commits the offsets for partition0
commitOffsetResults.append(commitTransactionalOffsets(groupId, producerIds(0), producerEpochs(0), Map(partitions(0) -> offsets(0))))
assertEquals(Errors.NONE, commitOffsetResults(0)(partitions(0)))
// producer1 commits the offsets for partition1
commitOffsetResults.append(commitTransactionalOffsets(groupId, producerIds(1), producerEpochs(1), Map(partitions(1) -> offsets(1))))
assertEquals(Errors.NONE, commitOffsetResults(1)(partitions(1)))
// producer0 commits its transaction.
handleTxnCompletion(producerIds(0), List(offsetTopicPartition), TransactionResult.COMMIT)
groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(partitions)) match {
case (error, partData) =>
errors.append(error)
partitionData.append(partData)
case _ =>
}
assertEquals(Errors.NONE, errors(0))
// We should only see the offset commit for producer0
assertEquals(Some(offsets(0).offset), partitionData(0).get(partitions(0)).map(_.offset))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), partitionData(0).get(partitions(1)).map(_.offset))
// producer1 now commits its transaction.
handleTxnCompletion(producerIds(1), List(offsetTopicPartition), TransactionResult.COMMIT)
groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(partitions)) match {
case (error, partData) =>
errors.append(error)
partitionData.append(partData)
case _ =>
}
assertEquals(Errors.NONE, errors(1))
// We should now see the offset commits for both producers.
assertEquals(Some(offsets(0).offset), partitionData(1).get(partitions(0)).map(_.offset))
assertEquals(Some(offsets(1).offset), partitionData(1).get(partitions(1)).map(_.offset))
}
@Test
def testFetchOffsetForUnknownPartition(): Unit = {
val tp = new TopicPartition("topic", 0)
val (error, partitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tp)))
assertEquals(Errors.NONE, error)
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), partitionData.get(tp).map(_.offset))
}
@Test
def testFetchOffsetNotCoordinatorForGroup(): Unit = {
val tp = new TopicPartition("topic", 0)
val (error, partitionData) = groupCoordinator.handleFetchOffsets(otherGroupId, requireStable, Some(Seq(tp)))
assertEquals(Errors.NOT_COORDINATOR, error)
assertTrue(partitionData.isEmpty)
}
@Test
def testFetchAllOffsets(): Unit = {
val tp1 = new TopicPartition("topic", 0)
val tp2 = new TopicPartition("topic", 1)
val tp3 = new TopicPartition("other-topic", 0)
val offset1 = offsetAndMetadata(15)
val offset2 = offsetAndMetadata(16)
val offset3 = offsetAndMetadata(17)
assertEquals((Errors.NONE, Map.empty), groupCoordinator.handleFetchOffsets(groupId, requireStable))
val commitOffsetResult = commitOffsets(groupId, OffsetCommitRequest.DEFAULT_MEMBER_ID,
OffsetCommitRequest.DEFAULT_GENERATION_ID, Map(tp1 -> offset1, tp2 -> offset2, tp3 -> offset3))
assertEquals(Errors.NONE, commitOffsetResult(tp1))
assertEquals(Errors.NONE, commitOffsetResult(tp2))
assertEquals(Errors.NONE, commitOffsetResult(tp3))
val (error, partitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable)
assertEquals(Errors.NONE, error)
assertEquals(3, partitionData.size)
assertTrue(partitionData.forall(_._2.error == Errors.NONE))
assertEquals(Some(offset1.offset), partitionData.get(tp1).map(_.offset))
assertEquals(Some(offset2.offset), partitionData.get(tp2).map(_.offset))
assertEquals(Some(offset3.offset), partitionData.get(tp3).map(_.offset))
}
@Test
def testCommitOffsetInCompletingRebalance(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val tp = new TopicPartition("topic", 0)
val offset = offsetAndMetadata(0)
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols)
val assignedMemberId = joinGroupResult.memberId
val generationId = joinGroupResult.generationId
val joinGroupError = joinGroupResult.error
assertEquals(Errors.NONE, joinGroupError)
val commitOffsetResult = commitOffsets(groupId, assignedMemberId, generationId, Map(tp -> offset))
assertEquals(Errors.REBALANCE_IN_PROGRESS, commitOffsetResult(tp))
}
@Test
def testCommitOffsetInCompletingRebalanceFromUnknownMemberId(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val tp = new TopicPartition("topic", 0)
val offset = offsetAndMetadata(0)
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols)
val generationId = joinGroupResult.generationId
val joinGroupError = joinGroupResult.error
assertEquals(Errors.NONE, joinGroupError)
val commitOffsetResult = commitOffsets(groupId, memberId, generationId, Map(tp -> offset))
assertEquals(Errors.UNKNOWN_MEMBER_ID, commitOffsetResult(tp))
}
@Test
def testCommitOffsetInCompletingRebalanceFromIllegalGeneration(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val tp = new TopicPartition("topic", 0)
val offset = offsetAndMetadata(0)
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols)
val assignedMemberId = joinGroupResult.memberId
val generationId = joinGroupResult.generationId
val joinGroupError = joinGroupResult.error
assertEquals(Errors.NONE, joinGroupError)
val commitOffsetResult = commitOffsets(groupId, assignedMemberId, generationId + 1, Map(tp -> offset))
assertEquals(Errors.ILLEGAL_GENERATION, commitOffsetResult(tp))
}
@Test
def testTxnCommitOffsetWithFencedInstanceId(): Unit = {
val tp = new TopicPartition("topic", 0)
val offset = offsetAndMetadata(0)
val producerId = 1000L
val producerEpoch : Short = 2
val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId)
val leaderNoMemberIdCommitOffsetResult = commitTransactionalOffsets(groupId, producerId, producerEpoch,
Map(tp -> offset), memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID, groupInstanceId = Some(leaderInstanceId))
assertEquals(Errors.FENCED_INSTANCE_ID, leaderNoMemberIdCommitOffsetResult(tp))
val leaderInvalidMemberIdCommitOffsetResult = commitTransactionalOffsets(groupId, producerId, producerEpoch,
Map(tp -> offset), memberId = "invalid-member", groupInstanceId = Some(leaderInstanceId))
assertEquals(Errors.FENCED_INSTANCE_ID, leaderInvalidMemberIdCommitOffsetResult (tp))
val leaderCommitOffsetResult = commitTransactionalOffsets(groupId, producerId, producerEpoch,
Map(tp -> offset), rebalanceResult.leaderId, Some(leaderInstanceId), rebalanceResult.generation)
assertEquals(Errors.NONE, leaderCommitOffsetResult (tp))
}
@Test
def testTxnCommitOffsetWithInvalidMemberId(): Unit = {
val tp = new TopicPartition("topic", 0)
val offset = offsetAndMetadata(0)
val producerId = 1000L
val producerEpoch : Short = 2
val joinGroupResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
val joinGroupError = joinGroupResult.error
assertEquals(Errors.NONE, joinGroupError)
val invalidIdCommitOffsetResult = commitTransactionalOffsets(groupId, producerId, producerEpoch,
Map(tp -> offset), "invalid-member")
assertEquals(Errors.UNKNOWN_MEMBER_ID, invalidIdCommitOffsetResult (tp))
}
@Test
def testTxnCommitOffsetWithKnownMemberId(): Unit = {
val tp = new TopicPartition("topic", 0)
val offset = offsetAndMetadata(0)
val producerId = 1000L
val producerEpoch : Short = 2
val joinGroupResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
val joinGroupError = joinGroupResult.error
assertEquals(Errors.NONE, joinGroupError)
val assignedConsumerId = joinGroupResult.memberId
val leaderCommitOffsetResult = commitTransactionalOffsets(groupId, producerId, producerEpoch,
Map(tp -> offset), assignedConsumerId, generationId = joinGroupResult.generationId)
assertEquals(Errors.NONE, leaderCommitOffsetResult (tp))
}
@Test
def testTxnCommitOffsetWithIllegalGeneration(): Unit = {
val tp = new TopicPartition("topic", 0)
val offset = offsetAndMetadata(0)
val producerId = 1000L
val producerEpoch : Short = 2
val joinGroupResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
val joinGroupError = joinGroupResult.error
assertEquals(Errors.NONE, joinGroupError)
val assignedConsumerId = joinGroupResult.memberId
val initialGenerationId = joinGroupResult.generationId
val illegalGenerationCommitOffsetResult = commitTransactionalOffsets(groupId, producerId, producerEpoch,
Map(tp -> offset), memberId = assignedConsumerId, generationId = initialGenerationId + 5)
assertEquals(Errors.ILLEGAL_GENERATION, illegalGenerationCommitOffsetResult(tp))
}
@Test
def testTxnCommitOffsetWithLegalGeneration(): Unit = {
val tp = new TopicPartition("topic", 0)
val offset = offsetAndMetadata(0)
val producerId = 1000L
val producerEpoch : Short = 2
val joinGroupResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
val joinGroupError = joinGroupResult.error
assertEquals(Errors.NONE, joinGroupError)
val assignedConsumerId = joinGroupResult.memberId
val initialGenerationId = joinGroupResult.generationId
val leaderCommitOffsetResult = commitTransactionalOffsets(groupId, producerId, producerEpoch,
Map(tp -> offset), memberId = assignedConsumerId, generationId = initialGenerationId)
assertEquals(Errors.NONE, leaderCommitOffsetResult (tp))
}
@Test
def testHeartbeatDuringRebalanceCausesRebalanceInProgress(): Unit = {
// First start up a group (with a slightly larger timeout to give us time to heartbeat when the rebalance starts)
val joinGroupResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
val assignedConsumerId = joinGroupResult.memberId
val initialGenerationId = joinGroupResult.generationId
val joinGroupError = joinGroupResult.error
assertEquals(Errors.NONE, joinGroupError)
// Then join with a new consumer to trigger a rebalance
sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
// We should be in the middle of a rebalance, so the heartbeat should return rebalance in progress
val heartbeatResult = heartbeat(groupId, assignedConsumerId, initialGenerationId)
assertEquals(Errors.REBALANCE_IN_PROGRESS, heartbeatResult)
}
@Test
def testGenerationIdIncrementsOnRebalance(): Unit = {
val joinGroupResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
val initialGenerationId = joinGroupResult.generationId
val joinGroupError = joinGroupResult.error
val memberId = joinGroupResult.memberId
assertEquals(1, initialGenerationId)
assertEquals(Errors.NONE, joinGroupError)
val syncGroupResult = syncGroupLeader(groupId, initialGenerationId, memberId, Map(memberId -> Array[Byte]()))
assertEquals(Errors.NONE, syncGroupResult.error)
val joinGroupFuture = sendJoinGroup(groupId, memberId, protocolType, protocols)
val otherJoinGroupResult = await(joinGroupFuture, 1)
val nextGenerationId = otherJoinGroupResult.generationId
val otherJoinGroupError = otherJoinGroupResult.error
assertEquals(2, nextGenerationId)
assertEquals(Errors.NONE, otherJoinGroupError)
}
@Test
def testLeaveGroupWrongCoordinator(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val leaveGroupResults = singleLeaveGroup(otherGroupId, memberId)
verifyLeaveGroupResult(leaveGroupResults, Errors.NOT_COORDINATOR)
}
@Test
def testLeaveGroupUnknownGroup(): Unit = {
val leaveGroupResults = singleLeaveGroup(groupId, memberId)
verifyLeaveGroupResult(leaveGroupResults, Errors.NONE, List(Errors.UNKNOWN_MEMBER_ID))
}
@Test
def testLeaveGroupUnknownConsumerExistingGroup(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val otherMemberId = "consumerId"
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols)
val joinGroupError = joinGroupResult.error
assertEquals(Errors.NONE, joinGroupError)
val leaveGroupResults = singleLeaveGroup(groupId, otherMemberId)
verifyLeaveGroupResult(leaveGroupResults, Errors.NONE, List(Errors.UNKNOWN_MEMBER_ID))
}
@Test
def testSingleLeaveDeadGroup(): Unit = {
val deadGroupId = "deadGroupId"
groupCoordinator.groupManager.addGroup(new GroupMetadata(deadGroupId, Dead, new MockTime()))
val leaveGroupResults = singleLeaveGroup(deadGroupId, memberId)
verifyLeaveGroupResult(leaveGroupResults, Errors.COORDINATOR_NOT_AVAILABLE)
}
@Test
def testBatchLeaveDeadGroup(): Unit = {
val deadGroupId = "deadGroupId"
groupCoordinator.groupManager.addGroup(new GroupMetadata(deadGroupId, Dead, new MockTime()))
val leaveGroupResults = batchLeaveGroup(deadGroupId,
List(new MemberIdentity().setMemberId(memberId), new MemberIdentity().setMemberId(memberId)))
verifyLeaveGroupResult(leaveGroupResults, Errors.COORDINATOR_NOT_AVAILABLE)
}
@Test
def testValidLeaveGroup(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols)
val assignedMemberId = joinGroupResult.memberId
val joinGroupError = joinGroupResult.error
assertEquals(Errors.NONE, joinGroupError)
val leaveGroupResults = singleLeaveGroup(groupId, assignedMemberId)
verifyLeaveGroupResult(leaveGroupResults)
}
@Test
def testLeaveGroupWithFencedInstanceId(): Unit = {
val joinGroupResult = staticJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, leaderInstanceId, protocolType, protocolSuperset)
assertEquals(Errors.NONE, joinGroupResult.error)
val leaveGroupResults = singleLeaveGroup(groupId, "some_member", Some(leaderInstanceId))
verifyLeaveGroupResult(leaveGroupResults, Errors.NONE, List(Errors.FENCED_INSTANCE_ID))
}
@Test
def testLeaveGroupStaticMemberWithUnknownMemberId(): Unit = {
val joinGroupResult = staticJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, leaderInstanceId, protocolType, protocolSuperset)
assertEquals(Errors.NONE, joinGroupResult.error)
// Having unknown member id will not affect the request processing.
val leaveGroupResults = singleLeaveGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, Some(leaderInstanceId))
verifyLeaveGroupResult(leaveGroupResults, Errors.NONE, List(Errors.NONE))
}
@Test
def testStaticMembersValidBatchLeaveGroup(): Unit = {
staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId)
val leaveGroupResults = batchLeaveGroup(groupId, List(new MemberIdentity()
.setGroupInstanceId(leaderInstanceId), new MemberIdentity().setGroupInstanceId(followerInstanceId)))
verifyLeaveGroupResult(leaveGroupResults, Errors.NONE, List(Errors.NONE, Errors.NONE))
}
@Test
def testStaticMembersWrongCoordinatorBatchLeaveGroup(): Unit = {
staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId)
val leaveGroupResults = batchLeaveGroup("invalid-group", List(new MemberIdentity()
.setGroupInstanceId(leaderInstanceId), new MemberIdentity().setGroupInstanceId(followerInstanceId)))
verifyLeaveGroupResult(leaveGroupResults, Errors.NOT_COORDINATOR)
}
@Test
def testStaticMembersUnknownGroupBatchLeaveGroup(): Unit = {
val leaveGroupResults = batchLeaveGroup(groupId, List(new MemberIdentity()
.setGroupInstanceId(leaderInstanceId), new MemberIdentity().setGroupInstanceId(followerInstanceId)))
verifyLeaveGroupResult(leaveGroupResults, Errors.NONE, List(Errors.UNKNOWN_MEMBER_ID, Errors.UNKNOWN_MEMBER_ID))
}
@Test
def testStaticMembersFencedInstanceBatchLeaveGroup(): Unit = {
staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId)
val leaveGroupResults = batchLeaveGroup(groupId, List(new MemberIdentity()
.setGroupInstanceId(leaderInstanceId), new MemberIdentity()
.setGroupInstanceId(followerInstanceId)
.setMemberId("invalid-member")))
verifyLeaveGroupResult(leaveGroupResults, Errors.NONE, List(Errors.NONE, Errors.FENCED_INSTANCE_ID))
}
@Test
def testStaticMembersUnknownInstanceBatchLeaveGroup(): Unit = {
staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId)
val leaveGroupResults = batchLeaveGroup(groupId, List(new MemberIdentity()
.setGroupInstanceId("unknown-instance"), new MemberIdentity()
.setGroupInstanceId(followerInstanceId)))
verifyLeaveGroupResult(leaveGroupResults, Errors.NONE, List(Errors.UNKNOWN_MEMBER_ID, Errors.NONE))
}
@Test
def testPendingMemberBatchLeaveGroup(): Unit = {
val pendingMember = setupGroupWithPendingMember()
val leaveGroupResults = batchLeaveGroup(groupId, List(new MemberIdentity()
.setGroupInstanceId("unknown-instance"), new MemberIdentity()
.setMemberId(pendingMember.memberId)))
verifyLeaveGroupResult(leaveGroupResults, Errors.NONE, List(Errors.UNKNOWN_MEMBER_ID, Errors.NONE))
}
@Test
def testListGroupsIncludesStableGroups(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols)
val assignedMemberId = joinGroupResult.memberId
val generationId = joinGroupResult.generationId
assertEquals(Errors.NONE, joinGroupResult.error)
val syncGroupResult = syncGroupLeader(groupId, generationId, assignedMemberId, Map(assignedMemberId -> Array[Byte]()))
assertEquals(Errors.NONE, syncGroupResult.error)
val (error, groups) = groupCoordinator.handleListGroups(Set())
assertEquals(Errors.NONE, error)
assertEquals(1, groups.size)
assertEquals(GroupOverview("groupId", "consumer", Stable.toString), groups.head)
}
@Test
def testListGroupsIncludesRebalancingGroups(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols)
assertEquals(Errors.NONE, joinGroupResult.error)
val (error, groups) = groupCoordinator.handleListGroups(Set())
assertEquals(Errors.NONE, error)
assertEquals(1, groups.size)
assertEquals(GroupOverview("groupId", "consumer", CompletingRebalance.toString), groups.head)
}
@Test
def testListGroupsWithStates(): Unit = {
val allStates = Set(PreparingRebalance, CompletingRebalance, Stable, Dead, Empty).map(s => s.toString)
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
// Member joins the group
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols)
val assignedMemberId = joinGroupResult.memberId
val generationId = joinGroupResult.generationId
assertEquals(Errors.NONE, joinGroupResult.error)
// The group should be in CompletingRebalance
val (error, groups) = groupCoordinator.handleListGroups(Set(CompletingRebalance.toString))
assertEquals(Errors.NONE, error)
assertEquals(1, groups.size)
val (error2, groups2) = groupCoordinator.handleListGroups(allStates.filterNot(s => s == CompletingRebalance.toString))
assertEquals(Errors.NONE, error2)
assertEquals(0, groups2.size)
// Member syncs
val syncGroupResult = syncGroupLeader(groupId, generationId, assignedMemberId, Map(assignedMemberId -> Array[Byte]()))
assertEquals(Errors.NONE, syncGroupResult.error)
// The group is now stable
val (error3, groups3) = groupCoordinator.handleListGroups(Set(Stable.toString))
assertEquals(Errors.NONE, error3)
assertEquals(1, groups3.size)
val (error4, groups4) = groupCoordinator.handleListGroups(allStates.filterNot(s => s == Stable.toString))
assertEquals(Errors.NONE, error4)
assertEquals(0, groups4.size)
// Member leaves
val leaveGroupResults = singleLeaveGroup(groupId, assignedMemberId)
verifyLeaveGroupResult(leaveGroupResults)
// The group is now empty
val (error5, groups5) = groupCoordinator.handleListGroups(Set(Empty.toString))
assertEquals(Errors.NONE, error5)
assertEquals(1, groups5.size)
val (error6, groups6) = groupCoordinator.handleListGroups(allStates.filterNot(s => s == Empty.toString))
assertEquals(Errors.NONE, error6)
assertEquals(0, groups6.size)
}
@Test
def testDescribeGroupWrongCoordinator(): Unit = {
val (error, _) = groupCoordinator.handleDescribeGroup(otherGroupId)
assertEquals(Errors.NOT_COORDINATOR, error)
}
@Test
def testDescribeGroupInactiveGroup(): Unit = {
val (error, summary) = groupCoordinator.handleDescribeGroup(groupId)
assertEquals(Errors.NONE, error)
assertEquals(GroupCoordinator.DeadGroup, summary)
}
@Test
def testDescribeGroupStableForDynamicMember(): Unit = {
val joinGroupResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
val assignedMemberId = joinGroupResult.memberId
val generationId = joinGroupResult.generationId
val joinGroupError = joinGroupResult.error
assertEquals(Errors.NONE, joinGroupError)
val syncGroupResult = syncGroupLeader(groupId, generationId, assignedMemberId, Map(assignedMemberId -> Array[Byte]()))
assertEquals(Errors.NONE, syncGroupResult.error)
val (error, summary) = groupCoordinator.handleDescribeGroup(groupId)
assertEquals(Errors.NONE, error)
assertEquals(protocolType, summary.protocolType)
assertEquals("range", summary.protocol)
assertEquals(List(assignedMemberId), summary.members.map(_.memberId))
}
@Test
def testDescribeGroupStableForStaticMember(): Unit = {
val joinGroupResult = staticJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, leaderInstanceId, protocolType, protocols)
val assignedMemberId = joinGroupResult.memberId
val generationId = joinGroupResult.generationId
val joinGroupError = joinGroupResult.error
assertEquals(Errors.NONE, joinGroupError)
val syncGroupResult = syncGroupLeader(groupId, generationId, assignedMemberId, Map(assignedMemberId -> Array[Byte]()))
assertEquals(Errors.NONE, syncGroupResult.error)
val (error, summary) = groupCoordinator.handleDescribeGroup(groupId)
assertEquals(Errors.NONE, error)
assertEquals(protocolType, summary.protocolType)
assertEquals("range", summary.protocol)
assertEquals(List(assignedMemberId), summary.members.map(_.memberId))
assertEquals(List(leaderInstanceId), summary.members.flatMap(_.groupInstanceId))
}
@Test
def testDescribeGroupRebalancing(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols)
val joinGroupError = joinGroupResult.error
assertEquals(Errors.NONE, joinGroupError)
val (error, summary) = groupCoordinator.handleDescribeGroup(groupId)
assertEquals(Errors.NONE, error)
assertEquals(protocolType, summary.protocolType)
assertEquals(GroupCoordinator.NoProtocol, summary.protocol)
assertEquals(CompletingRebalance.toString, summary.state)
assertTrue(summary.members.map(_.memberId).contains(joinGroupResult.memberId))
assertTrue(summary.members.forall(_.metadata.isEmpty))
assertTrue(summary.members.forall(_.assignment.isEmpty))
}
@Test
def testDeleteNonEmptyGroup(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
dynamicJoinGroup(groupId, memberId, protocolType, protocols)
val result = groupCoordinator.handleDeleteGroups(Set(groupId))
assert(result.size == 1 && result.contains(groupId) && result.get(groupId).contains(Errors.NON_EMPTY_GROUP))
}
@Test
def testDeleteGroupWithInvalidGroupId(): Unit = {
val invalidGroupId = null
val result = groupCoordinator.handleDeleteGroups(Set(invalidGroupId))
assert(result.size == 1 && result.contains(invalidGroupId) && result.get(invalidGroupId).contains(Errors.INVALID_GROUP_ID))
}
@Test
def testDeleteGroupWithWrongCoordinator(): Unit = {
val result = groupCoordinator.handleDeleteGroups(Set(otherGroupId))
assert(result.size == 1 && result.contains(otherGroupId) && result.get(otherGroupId).contains(Errors.NOT_COORDINATOR))
}
@Test
def testDeleteEmptyGroup(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols)
val leaveGroupResults = singleLeaveGroup(groupId, joinGroupResult.memberId)
verifyLeaveGroupResult(leaveGroupResults)
val groupTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId)
val partition: Partition = mock(classOf[Partition])
when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE))
when(replicaManager.getPartition(groupTopicPartition)).thenReturn(HostedPartition.Online(partition))
when(replicaManager.onlinePartition(groupTopicPartition)).thenReturn(Some(partition))
val result = groupCoordinator.handleDeleteGroups(Set(groupId))
assert(result.size == 1 && result.contains(groupId) && result.get(groupId).contains(Errors.NONE))
}
@Test
def testDeleteEmptyGroupWithStoredOffsets(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols)
val assignedMemberId = joinGroupResult.memberId
val joinGroupError = joinGroupResult.error
assertEquals(Errors.NONE, joinGroupError)
val syncGroupResult = syncGroupLeader(groupId, joinGroupResult.generationId, assignedMemberId, Map(assignedMemberId -> Array[Byte]()))
assertEquals(Errors.NONE, syncGroupResult.error)
val tp = new TopicPartition("topic", 0)
val offset = offsetAndMetadata(0)
val commitOffsetResult = commitOffsets(groupId, assignedMemberId, joinGroupResult.generationId, Map(tp -> offset))
assertEquals(Errors.NONE, commitOffsetResult(tp))
val describeGroupResult = groupCoordinator.handleDescribeGroup(groupId)
assertEquals(Stable.toString, describeGroupResult._2.state)
assertEquals(assignedMemberId, describeGroupResult._2.members.head.memberId)
val leaveGroupResults = singleLeaveGroup(groupId, assignedMemberId)
verifyLeaveGroupResult(leaveGroupResults)
val groupTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId)
val partition: Partition = mock(classOf[Partition])
when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE))
when(replicaManager.getPartition(groupTopicPartition)).thenReturn(HostedPartition.Online(partition))
when(replicaManager.onlinePartition(groupTopicPartition)).thenReturn(Some(partition))
val result = groupCoordinator.handleDeleteGroups(Set(groupId))
assert(result.size == 1 && result.contains(groupId) && result.get(groupId).contains(Errors.NONE))
assertEquals(Dead.toString, groupCoordinator.handleDescribeGroup(groupId)._2.state)
}
@Test
def testDeleteOffsetOfNonExistingGroup(): Unit = {
val tp = new TopicPartition("foo", 0)
val (groupError, topics) = groupCoordinator.handleDeleteOffsets(groupId, Seq(tp),
RequestLocal.NoCaching)
assertEquals(Errors.GROUP_ID_NOT_FOUND, groupError)
assertTrue(topics.isEmpty)
}
@Test
def testDeleteOffsetOfNonEmptyNonConsumerGroup(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
dynamicJoinGroup(groupId, memberId, "My Protocol", protocols)
val tp = new TopicPartition("foo", 0)
val (groupError, topics) = groupCoordinator.handleDeleteOffsets(groupId, Seq(tp),
RequestLocal.NoCaching)
assertEquals(Errors.NON_EMPTY_GROUP, groupError)
assertTrue(topics.isEmpty)
}
@Test
def testDeleteOffsetOfEmptyNonConsumerGroup(): Unit = {
// join the group
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = dynamicJoinGroup(groupId, memberId, "My Protocol", protocols)
assertEquals(Errors.NONE, joinGroupResult.error)
val syncGroupResult = syncGroupLeader(groupId, joinGroupResult.generationId, joinGroupResult.leaderId, Map.empty)
assertEquals(Errors.NONE, syncGroupResult.error)
val t1p0 = new TopicPartition("foo", 0)
val t2p0 = new TopicPartition("bar", 0)
val offset = offsetAndMetadata(37)
val validOffsetCommitResult = commitOffsets(groupId, joinGroupResult.memberId, joinGroupResult.generationId,
Map(t1p0 -> offset, t2p0 -> offset))
assertEquals(Errors.NONE, validOffsetCommitResult(t1p0))
assertEquals(Errors.NONE, validOffsetCommitResult(t2p0))
// and leaves.
val leaveGroupResults = singleLeaveGroup(groupId, joinGroupResult.memberId)
verifyLeaveGroupResult(leaveGroupResults)
assertTrue(groupCoordinator.groupManager.getGroup(groupId).exists(_.is(Empty)))
val groupTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId)
val partition: Partition = mock(classOf[Partition])
when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE))
when(replicaManager.getPartition(groupTopicPartition)).thenReturn(HostedPartition.Online(partition))
when(replicaManager.onlinePartition(groupTopicPartition)).thenReturn(Some(partition))
val (groupError, topics) = groupCoordinator.handleDeleteOffsets(groupId, Seq(t1p0),
RequestLocal.NoCaching)
assertEquals(Errors.NONE, groupError)
assertEquals(1, topics.size)
assertEquals(Some(Errors.NONE), topics.get(t1p0))
val cachedOffsets = groupCoordinator.groupManager.getOffsets(groupId, requireStable, Some(Seq(t1p0, t2p0)))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(t1p0).map(_.offset))
assertEquals(Some(offset.offset), cachedOffsets.get(t2p0).map(_.offset))
}
@Test
def testDeleteOffsetOfConsumerGroupWithUnparsableProtocol(): Unit = {
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols)
val syncGroupResult = syncGroupLeader(groupId, joinGroupResult.generationId, joinGroupResult.leaderId, Map.empty)
assertEquals(Errors.NONE, syncGroupResult.error)
val tp = new TopicPartition("foo", 0)
val offset = offsetAndMetadata(37)
val validOffsetCommitResult = commitOffsets(groupId, joinGroupResult.memberId, joinGroupResult.generationId,
Map(tp -> offset))
assertEquals(Errors.NONE, validOffsetCommitResult(tp))
val (groupError, topics) = groupCoordinator.handleDeleteOffsets(groupId, Seq(tp),
RequestLocal.NoCaching)
assertEquals(Errors.NONE, groupError)
assertEquals(1, topics.size)
assertEquals(Some(Errors.GROUP_SUBSCRIBED_TO_TOPIC), topics.get(tp))
}
@Test
def testDeleteOffsetOfDeadConsumerGroup(): Unit = {
val group = new GroupMetadata(groupId, Dead, new MockTime())
group.protocolType = Some(protocolType)
groupCoordinator.groupManager.addGroup(group)
val tp = new TopicPartition("foo", 0)
val (groupError, topics) = groupCoordinator.handleDeleteOffsets(groupId, Seq(tp),
RequestLocal.NoCaching)
assertEquals(Errors.GROUP_ID_NOT_FOUND, groupError)
assertTrue(topics.isEmpty)
}
@Test
def testDeleteOffsetOfEmptyConsumerGroup(): Unit = {
// join the group
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols)
assertEquals(Errors.NONE, joinGroupResult.error)
val syncGroupResult = syncGroupLeader(groupId, joinGroupResult.generationId, joinGroupResult.leaderId, Map.empty)
assertEquals(Errors.NONE, syncGroupResult.error)
val t1p0 = new TopicPartition("foo", 0)
val t2p0 = new TopicPartition("bar", 0)
val offset = offsetAndMetadata(37)
val validOffsetCommitResult = commitOffsets(groupId, joinGroupResult.memberId, joinGroupResult.generationId,
Map(t1p0 -> offset, t2p0 -> offset))
assertEquals(Errors.NONE, validOffsetCommitResult(t1p0))
assertEquals(Errors.NONE, validOffsetCommitResult(t2p0))
// and leaves.
val leaveGroupResults = singleLeaveGroup(groupId, joinGroupResult.memberId)
verifyLeaveGroupResult(leaveGroupResults)
assertTrue(groupCoordinator.groupManager.getGroup(groupId).exists(_.is(Empty)))
val groupTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId)
val partition: Partition = mock(classOf[Partition])
when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE))
when(replicaManager.getPartition(groupTopicPartition)).thenReturn(HostedPartition.Online(partition))
when(replicaManager.onlinePartition(groupTopicPartition)).thenReturn(Some(partition))
val (groupError, topics) = groupCoordinator.handleDeleteOffsets(groupId, Seq(t1p0),
RequestLocal.NoCaching)
assertEquals(Errors.NONE, groupError)
assertEquals(1, topics.size)
assertEquals(Some(Errors.NONE), topics.get(t1p0))
val cachedOffsets = groupCoordinator.groupManager.getOffsets(groupId, requireStable, Some(Seq(t1p0, t2p0)))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(t1p0).map(_.offset))
assertEquals(Some(offset.offset), cachedOffsets.get(t2p0).map(_.offset))
}
@Test
def testDeleteOffsetOfStableConsumerGroup(): Unit = {
// join the group
val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID
val subscription = new Subscription(List("bar").asJava)
val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType,
List(("protocol", ConsumerProtocol.serializeSubscription(subscription).array())))
assertEquals(Errors.NONE, joinGroupResult.error)
val syncGroupResult = syncGroupLeader(groupId, joinGroupResult.generationId, joinGroupResult.leaderId, Map.empty)
assertEquals(Errors.NONE, syncGroupResult.error)
val t1p0 = new TopicPartition("foo", 0)
val t2p0 = new TopicPartition("bar", 0)
val offset = offsetAndMetadata(37)
val validOffsetCommitResult = commitOffsets(groupId, joinGroupResult.memberId, joinGroupResult.generationId,
Map(t1p0 -> offset, t2p0 -> offset))
assertEquals(Errors.NONE, validOffsetCommitResult(t1p0))
assertEquals(Errors.NONE, validOffsetCommitResult(t2p0))
assertTrue(groupCoordinator.groupManager.getGroup(groupId).exists(_.is(Stable)))
val groupTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId)
val partition: Partition = mock(classOf[Partition])
when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.CURRENT_MAGIC_VALUE))
when(replicaManager.getPartition(groupTopicPartition)).thenReturn(HostedPartition.Online(partition))
when(replicaManager.onlinePartition(groupTopicPartition)).thenReturn(Some(partition))
val (groupError, topics) = groupCoordinator.handleDeleteOffsets(groupId, Seq(t1p0, t2p0),
RequestLocal.NoCaching)
assertEquals(Errors.NONE, groupError)
assertEquals(2, topics.size)
assertEquals(Some(Errors.NONE), topics.get(t1p0))
assertEquals(Some(Errors.GROUP_SUBSCRIBED_TO_TOPIC), topics.get(t2p0))
val cachedOffsets = groupCoordinator.groupManager.getOffsets(groupId, requireStable, Some(Seq(t1p0, t2p0)))
assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(t1p0).map(_.offset))
assertEquals(Some(offset.offset), cachedOffsets.get(t2p0).map(_.offset))
}
@Test
def shouldDelayInitialRebalanceByGroupInitialRebalanceDelayOnEmptyGroup(): Unit = {
val firstJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols)
timer.advanceClock(GroupInitialRebalanceDelay / 2)
verifyDelayedTaskNotCompleted(firstJoinFuture)
timer.advanceClock((GroupInitialRebalanceDelay / 2) + 1)
val joinGroupResult = await(firstJoinFuture, 1)
assertEquals(Errors.NONE, joinGroupResult.error)
}
private def verifyDelayedTaskNotCompleted(firstJoinFuture: Future[JoinGroupResult]) = {
assertThrows(classOf[TimeoutException], () => await(firstJoinFuture, 1),
() => "should have timed out as rebalance delay not expired")
}
@Test
def shouldResetRebalanceDelayWhenNewMemberJoinsGroupInInitialRebalance(): Unit = {
val rebalanceTimeout = GroupInitialRebalanceDelay * 3
val firstMemberJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, rebalanceTimeout = rebalanceTimeout)
timer.advanceClock(GroupInitialRebalanceDelay - 1)
val secondMemberJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, rebalanceTimeout = rebalanceTimeout)
timer.advanceClock(2)
// advance past initial rebalance delay and make sure that tasks
// haven't been completed
timer.advanceClock(GroupInitialRebalanceDelay / 2 + 1)
verifyDelayedTaskNotCompleted(firstMemberJoinFuture)
verifyDelayedTaskNotCompleted(secondMemberJoinFuture)
// advance clock beyond updated delay and make sure the
// tasks have completed
timer.advanceClock(GroupInitialRebalanceDelay / 2)
val firstResult = await(firstMemberJoinFuture, 1)
val secondResult = await(secondMemberJoinFuture, 1)
assertEquals(Errors.NONE, firstResult.error)
assertEquals(Errors.NONE, secondResult.error)
}
@Test
def shouldDelayRebalanceUptoRebalanceTimeout(): Unit = {
val rebalanceTimeout = GroupInitialRebalanceDelay * 2
val firstMemberJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, rebalanceTimeout = rebalanceTimeout)
val secondMemberJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, rebalanceTimeout = rebalanceTimeout)
timer.advanceClock(GroupInitialRebalanceDelay + 1)
val thirdMemberJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, rebalanceTimeout = rebalanceTimeout)
timer.advanceClock(GroupInitialRebalanceDelay)
verifyDelayedTaskNotCompleted(firstMemberJoinFuture)
verifyDelayedTaskNotCompleted(secondMemberJoinFuture)
verifyDelayedTaskNotCompleted(thirdMemberJoinFuture)
// advance clock beyond rebalanceTimeout
timer.advanceClock(1)
val firstResult = await(firstMemberJoinFuture, 1)
val secondResult = await(secondMemberJoinFuture, 1)
val thirdResult = await(thirdMemberJoinFuture, 1)
assertEquals(Errors.NONE, firstResult.error)
assertEquals(Errors.NONE, secondResult.error)
assertEquals(Errors.NONE, thirdResult.error)
}
@Test
def testCompleteHeartbeatWithGroupDead(): Unit = {
val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId)
heartbeat(groupId, rebalanceResult.leaderId, rebalanceResult.generation)
val group = getGroup(groupId)
group.transitionTo(Dead)
val leaderMemberId = rebalanceResult.leaderId
assertTrue(groupCoordinator.tryCompleteHeartbeat(group, leaderMemberId, false, () => true))
groupCoordinator.onExpireHeartbeat(group, leaderMemberId, false)
assertTrue(group.has(leaderMemberId))
}
@Test
def testCompleteHeartbeatWithMemberAlreadyRemoved(): Unit = {
val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId)
heartbeat(groupId, rebalanceResult.leaderId, rebalanceResult.generation)
val group = getGroup(groupId)
val leaderMemberId = rebalanceResult.leaderId
group.remove(leaderMemberId)
assertTrue(groupCoordinator.tryCompleteHeartbeat(group, leaderMemberId, false, () => true))
}
private def getGroup(groupId: String): GroupMetadata = {
val groupOpt = groupCoordinator.groupManager.getGroup(groupId)
assertTrue(groupOpt.isDefined)
groupOpt.get
}
private def setupJoinGroupCallback: (Future[JoinGroupResult], JoinGroupCallback) = {
val responsePromise = Promise[JoinGroupResult]()
val responseFuture = responsePromise.future
val responseCallback: JoinGroupCallback = responsePromise.success
(responseFuture, responseCallback)
}
private def setupSyncGroupCallback: (Future[SyncGroupResult], SyncGroupCallback) = {
val responsePromise = Promise[SyncGroupResult]()
val responseFuture = responsePromise.future
val responseCallback: SyncGroupCallback = responsePromise.success
(responseFuture, responseCallback)
}
private def setupHeartbeatCallback: (Future[HeartbeatCallbackParams], HeartbeatCallback) = {
val responsePromise = Promise[HeartbeatCallbackParams]()
val responseFuture = responsePromise.future
val responseCallback: HeartbeatCallback = error => responsePromise.success(error)
(responseFuture, responseCallback)
}
private def setupCommitOffsetsCallback: (Future[CommitOffsetCallbackParams], CommitOffsetCallback) = {
val responsePromise = Promise[CommitOffsetCallbackParams]()
val responseFuture = responsePromise.future
val responseCallback: CommitOffsetCallback = offsets => responsePromise.success(offsets)
(responseFuture, responseCallback)
}
private def setupLeaveGroupCallback: (Future[LeaveGroupResult], LeaveGroupCallback) = {
val responsePromise = Promise[LeaveGroupResult]()
val responseFuture = responsePromise.future
val responseCallback: LeaveGroupCallback = result => responsePromise.success(result)
(responseFuture, responseCallback)
}
private def sendJoinGroup(groupId: String,
memberId: String,
protocolType: String,
protocols: List[(String, Array[Byte])],
groupInstanceId: Option[String] = None,
sessionTimeout: Int = DefaultSessionTimeout,
rebalanceTimeout: Int = DefaultRebalanceTimeout,
requireKnownMemberId: Boolean = false,
supportSkippingAssignment: Boolean = true): Future[JoinGroupResult] = {
val (responseFuture, responseCallback) = setupJoinGroupCallback
when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.MAGIC_VALUE_V1))
groupCoordinator.handleJoinGroup(groupId, memberId, groupInstanceId, requireKnownMemberId, supportSkippingAssignment,
"clientId", "clientHost", rebalanceTimeout, sessionTimeout, protocolType, protocols, responseCallback)
responseFuture
}
private def sendStaticJoinGroupWithPersistence(groupId: String,
memberId: String,
protocolType: String,
protocols: List[(String, Array[Byte])],
groupInstanceId: String,
sessionTimeout: Int,
rebalanceTimeout: Int,
appendRecordError: Errors,
requireKnownMemberId: Boolean = false,
supportSkippingAssignment: Boolean): Future[JoinGroupResult] = {
val (responseFuture, responseCallback) = setupJoinGroupCallback
val capturedArgument: ArgumentCaptor[scala.collection.Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[scala.collection.Map[TopicPartition, PartitionResponse] => Unit])
when(replicaManager.appendRecords(anyLong,
anyShort(),
internalTopicsAllowed = ArgumentMatchers.eq(true),
origin = ArgumentMatchers.eq(AppendOrigin.Coordinator),
any[Map[TopicPartition, MemoryRecords]],
capturedArgument.capture(),
any[Option[ReentrantLock]],
any(),
any()
)).thenAnswer(_ => {
capturedArgument.getValue.apply(
Map(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId) ->
new PartitionResponse(appendRecordError, 0L, RecordBatch.NO_TIMESTAMP, 0L)
)
)
})
when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.MAGIC_VALUE_V1))
groupCoordinator.handleJoinGroup(groupId, memberId, Some(groupInstanceId), requireKnownMemberId, supportSkippingAssignment,
"clientId", "clientHost", rebalanceTimeout, sessionTimeout, protocolType, protocols, responseCallback)
responseFuture
}
private def sendSyncGroupLeader(groupId: String,
generation: Int,
leaderId: String,
protocolType: Option[String],
protocolName: Option[String],
groupInstanceId: Option[String],
assignment: Map[String, Array[Byte]]): Future[SyncGroupResult] = {
val (responseFuture, responseCallback) = setupSyncGroupCallback
val capturedArgument: ArgumentCaptor[scala.collection.Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[scala.collection.Map[TopicPartition, PartitionResponse] => Unit])
when(replicaManager.appendRecords(anyLong,
anyShort(),
internalTopicsAllowed = ArgumentMatchers.eq(true),
origin = ArgumentMatchers.eq(AppendOrigin.Coordinator),
any[Map[TopicPartition, MemoryRecords]],
capturedArgument.capture(),
any[Option[ReentrantLock]],
any(),
any())).thenAnswer(_ => {
capturedArgument.getValue.apply(
Map(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId) ->
new PartitionResponse(Errors.NONE, 0L, RecordBatch.NO_TIMESTAMP, 0L)
)
)
}
)
when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.MAGIC_VALUE_V1))
groupCoordinator.handleSyncGroup(groupId, generation, leaderId, protocolType, protocolName,
groupInstanceId, assignment, responseCallback)
responseFuture
}
private def sendSyncGroupFollower(groupId: String,
generation: Int,
memberId: String,
prototolType: Option[String] = None,
prototolName: Option[String] = None,
groupInstanceId: Option[String] = None): Future[SyncGroupResult] = {
val (responseFuture, responseCallback) = setupSyncGroupCallback
groupCoordinator.handleSyncGroup(groupId, generation, memberId,
prototolType, prototolName, groupInstanceId, Map.empty[String, Array[Byte]], responseCallback)
responseFuture
}
private def dynamicJoinGroup(groupId: String,
memberId: String,
protocolType: String,
protocols: List[(String, Array[Byte])],
sessionTimeout: Int = DefaultSessionTimeout,
rebalanceTimeout: Int = DefaultRebalanceTimeout): JoinGroupResult = {
val requireKnownMemberId = true
var responseFuture = sendJoinGroup(groupId, memberId, protocolType, protocols, None, sessionTimeout, rebalanceTimeout, requireKnownMemberId)
// Since member id is required, we need another bounce to get the successful join group result.
if (memberId == JoinGroupRequest.UNKNOWN_MEMBER_ID && requireKnownMemberId) {
val joinGroupResult = Await.result(responseFuture, Duration(rebalanceTimeout + 100, TimeUnit.MILLISECONDS))
// If some other error is triggered, return the error immediately for caller to handle.
if (joinGroupResult.error != Errors.MEMBER_ID_REQUIRED) {
return joinGroupResult
}
responseFuture = sendJoinGroup(groupId, joinGroupResult.memberId, protocolType, protocols, None, sessionTimeout, rebalanceTimeout, requireKnownMemberId)
}
timer.advanceClock(GroupInitialRebalanceDelay + 1)
// should only have to wait as long as session timeout, but allow some extra time in case of an unexpected delay
Await.result(responseFuture, Duration(rebalanceTimeout + 100, TimeUnit.MILLISECONDS))
}
private def staticJoinGroup(groupId: String,
memberId: String,
groupInstanceId: String,
protocolType: String,
protocols: List[(String, Array[Byte])],
clockAdvance: Int = GroupInitialRebalanceDelay + 1,
sessionTimeout: Int = DefaultSessionTimeout,
rebalanceTimeout: Int = DefaultRebalanceTimeout,
supportSkippingAssignment: Boolean = true): JoinGroupResult = {
val responseFuture = sendJoinGroup(groupId, memberId, protocolType, protocols, Some(groupInstanceId), sessionTimeout, rebalanceTimeout,
supportSkippingAssignment = supportSkippingAssignment)
timer.advanceClock(clockAdvance)
// should only have to wait as long as session timeout, but allow some extra time in case of an unexpected delay
Await.result(responseFuture, Duration(rebalanceTimeout + 100, TimeUnit.MILLISECONDS))
}
private def staticJoinGroupWithPersistence(groupId: String,
memberId: String,
groupInstanceId: String,
protocolType: String,
protocols: List[(String, Array[Byte])],
clockAdvance: Int,
sessionTimeout: Int = DefaultSessionTimeout,
rebalanceTimeout: Int = DefaultRebalanceTimeout,
appendRecordError: Errors = Errors.NONE,
supportSkippingAssignment: Boolean = true): JoinGroupResult = {
val responseFuture = sendStaticJoinGroupWithPersistence(groupId, memberId, protocolType, protocols,
groupInstanceId, sessionTimeout, rebalanceTimeout, appendRecordError, supportSkippingAssignment = supportSkippingAssignment)
timer.advanceClock(clockAdvance)
// should only have to wait as long as session timeout, but allow some extra time in case of an unexpected delay
Await.result(responseFuture, Duration(rebalanceTimeout + 100, TimeUnit.MILLISECONDS))
}
private def syncGroupFollower(groupId: String,
generationId: Int,
memberId: String,
protocolType: Option[String] = None,
protocolName: Option[String] = None,
groupInstanceId: Option[String] = None,
sessionTimeout: Int = DefaultSessionTimeout): SyncGroupResult = {
val responseFuture = sendSyncGroupFollower(groupId, generationId, memberId, protocolType,
protocolName, groupInstanceId)
Await.result(responseFuture, Duration(sessionTimeout + 100, TimeUnit.MILLISECONDS))
}
private def syncGroupLeader(groupId: String,
generationId: Int,
memberId: String,
assignment: Map[String, Array[Byte]],
protocolType: Option[String] = None,
protocolName: Option[String] = None,
groupInstanceId: Option[String] = None,
sessionTimeout: Int = DefaultSessionTimeout): SyncGroupResult = {
val responseFuture = sendSyncGroupLeader(groupId, generationId, memberId, protocolType,
protocolName, groupInstanceId, assignment)
Await.result(responseFuture, Duration(sessionTimeout + 100, TimeUnit.MILLISECONDS))
}
private def heartbeat(groupId: String,
consumerId: String,
generationId: Int,
groupInstanceId: Option[String] = None): HeartbeatCallbackParams = {
val (responseFuture, responseCallback) = setupHeartbeatCallback
groupCoordinator.handleHeartbeat(groupId, consumerId, groupInstanceId, generationId, responseCallback)
Await.result(responseFuture, Duration(40, TimeUnit.MILLISECONDS))
}
private def await[T](future: Future[T], millis: Long): T = {
Await.result(future, Duration(millis, TimeUnit.MILLISECONDS))
}
private def commitOffsets(groupId: String,
memberId: String,
generationId: Int,
offsets: Map[TopicPartition, OffsetAndMetadata],
groupInstanceId: Option[String] = None): CommitOffsetCallbackParams = {
val (responseFuture, responseCallback) = setupCommitOffsetsCallback
val capturedArgument: ArgumentCaptor[scala.collection.Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[scala.collection.Map[TopicPartition, PartitionResponse] => Unit])
when(replicaManager.appendRecords(anyLong,
anyShort(),
internalTopicsAllowed = ArgumentMatchers.eq(true),
origin = ArgumentMatchers.eq(AppendOrigin.Coordinator),
any[Map[TopicPartition, MemoryRecords]],
capturedArgument.capture(),
any[Option[ReentrantLock]],
any(),
any())
).thenAnswer(_ => {
capturedArgument.getValue.apply(
Map(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId) ->
new PartitionResponse(Errors.NONE, 0L, RecordBatch.NO_TIMESTAMP, 0L)
)
)
})
when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.MAGIC_VALUE_V1))
groupCoordinator.handleCommitOffsets(groupId, memberId, groupInstanceId, generationId, offsets, responseCallback)
Await.result(responseFuture, Duration(40, TimeUnit.MILLISECONDS))
}
private def commitTransactionalOffsets(groupId: String,
producerId: Long,
producerEpoch: Short,
offsets: Map[TopicPartition, OffsetAndMetadata],
memberId: String = JoinGroupRequest.UNKNOWN_MEMBER_ID,
groupInstanceId: Option[String] = Option.empty,
generationId: Int = JoinGroupRequest.UNKNOWN_GENERATION_ID) = {
val (responseFuture, responseCallback) = setupCommitOffsetsCallback
val capturedArgument: ArgumentCaptor[scala.collection.Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[scala.collection.Map[TopicPartition, PartitionResponse] => Unit])
when(replicaManager.appendRecords(anyLong,
anyShort(),
internalTopicsAllowed = ArgumentMatchers.eq(true),
origin = ArgumentMatchers.eq(AppendOrigin.Coordinator),
any[Map[TopicPartition, MemoryRecords]],
capturedArgument.capture(),
any[Option[ReentrantLock]],
any(),
any())
).thenAnswer(_ => {
capturedArgument.getValue.apply(
Map(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupCoordinator.partitionFor(groupId)) ->
new PartitionResponse(Errors.NONE, 0L, RecordBatch.NO_TIMESTAMP, 0L)
)
)
})
when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.MAGIC_VALUE_V2))
groupCoordinator.handleTxnCommitOffsets(groupId, producerId, producerEpoch,
memberId, groupInstanceId, generationId, offsets, responseCallback)
val result = Await.result(responseFuture, Duration(40, TimeUnit.MILLISECONDS))
result
}
private def singleLeaveGroup(groupId: String,
consumerId: String,
groupInstanceId: Option[String] = None): LeaveGroupResult = {
val singleMemberIdentity = List(
new MemberIdentity()
.setMemberId(consumerId)
.setGroupInstanceId(groupInstanceId.orNull))
batchLeaveGroup(groupId, singleMemberIdentity)
}
private def batchLeaveGroup(groupId: String,
memberIdentities: List[MemberIdentity]): LeaveGroupResult = {
val (responseFuture, responseCallback) = setupLeaveGroupCallback
when(replicaManager.getPartition(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId)))
.thenReturn(HostedPartition.None)
when(replicaManager.getMagic(any[TopicPartition])).thenReturn(Some(RecordBatch.MAGIC_VALUE_V1))
groupCoordinator.handleLeaveGroup(groupId, memberIdentities, responseCallback)
Await.result(responseFuture, Duration(40, TimeUnit.MILLISECONDS))
}
def handleTxnCompletion(producerId: Long,
offsetsPartitions: Iterable[TopicPartition],
transactionResult: TransactionResult): Unit = {
val isCommit = transactionResult == TransactionResult.COMMIT
groupCoordinator.groupManager.handleTxnCompletion(producerId, offsetsPartitions.map(_.partition).toSet, isCommit)
}
private def offsetAndMetadata(offset: Long): OffsetAndMetadata = {
OffsetAndMetadata(offset, "", timer.time.milliseconds())
}
}
object GroupCoordinatorTest {
def verifyLeaveGroupResult(leaveGroupResult: LeaveGroupResult,
expectedTopLevelError: Errors = Errors.NONE,
expectedMemberLevelErrors: List[Errors] = List.empty): Unit = {
assertEquals(expectedTopLevelError, leaveGroupResult.topLevelError)
if (expectedMemberLevelErrors.nonEmpty) {
assertEquals(expectedMemberLevelErrors.size, leaveGroupResult.memberResponses.size)
for (i <- expectedMemberLevelErrors.indices) {
assertEquals(expectedMemberLevelErrors(i), leaveGroupResult.memberResponses(i).error)
}
}
}
}
| TiVo/kafka | core/src/test/scala/unit/kafka/coordinator/group/GroupCoordinatorTest.scala | Scala | apache-2.0 | 186,767 |
package com.signalcollect.dcop.evaluation.maxsum
import com.signalcollect.dcop.benchmark.BenchmarkConfiguration
import com.signalcollect.configuration.ExecutionMode
import com.signalcollect.dcop.benchmark.BenchmarkModes
import scala.collection.mutable.ListBuffer
class MaxSumAlgorithm(config: BenchmarkConfiguration) {
private val configuration = config
/*
* measurements
*/
private var conflictsOverSteps: List[Tuple2[Int, Int]] = List()
private var conflictsOverTime: List[Tuple2[Long, Int]] = List()
private var stepsToConvergence: Long = 0
private var timeToConvergence: Long = 0
//an executable instance of the algorithm
val algorithm: MaxSumExecutor = new MaxSumExecutor(configuration.file, configuration.executionConfiguration, configuration.isAdopt, configuration.aggregationOperation)
def runEvaluation() = {
configuration.mode match {
case BenchmarkModes.SyncConflictsOverSteps => evaluateSyncConflictsOverSteps()
case BenchmarkModes.AsyncConflictsOverTime => evaluateConflictsOverTime()
case BenchmarkModes.SyncConflictsOverTime => evaluateConflictsOverTime()
case BenchmarkModes.SyncStepsToConvergence => evaluateSyncStepsToConvergence()
case BenchmarkModes.AsyncTimeToConvergence => evaluateAsyncTimeToConvergence()
case BenchmarkModes.SyncTimeToConvergence => evaluateSyncTimeToConvergence()
case _ => println("Unknown BenchmarkMode. Exiting.."); System.exit(-1)
}
}
def getResult() = {
configuration.mode match {
case BenchmarkModes.SyncConflictsOverSteps => conflictsOverSteps
case BenchmarkModes.AsyncConflictsOverTime => conflictsOverTime
case BenchmarkModes.SyncConflictsOverTime => conflictsOverTime
case BenchmarkModes.SyncStepsToConvergence => stepsToConvergence
case BenchmarkModes.AsyncTimeToConvergence => timeToConvergence
case BenchmarkModes.SyncTimeToConvergence => timeToConvergence
case _ => println("Unknown BenchmarkMode. Exiting.."); System.exit(-1)
}
}
private def evaluateSyncConflictsOverSteps() = {
if (configuration.executionConfiguration.executionMode != ExecutionMode.Synchronous) {
println("ERROR: Can't evaluate ConflictsOverSteps on Asynchronous BenchmarkConfiguration.")
println("Exiting...")
System.exit(-1)
} else {
for (run <- 1 to configuration.stepsLimit) {
val partialResult = algorithm.executeWithAggregation()
if (partialResult == -1) {
println("ERROR: executeWithAggregation failed, AggregationOperation was null")
System.exit(-1)
} else {
conflictsOverSteps :+= (run, partialResult)
}
}
}
}
private def evaluateConflictsOverTime() = {
val partialResult = algorithm.executeForConflictsOverTime()
if (partialResult.isEmpty) {
println("ERROR: executeWithAggregation failed, AggregationOperation was null")
System.exit(-1)
} else {
conflictsOverTime = partialResult
}
}
private def evaluateSyncStepsToConvergence() = {
if (configuration.executionConfiguration.executionMode != ExecutionMode.Synchronous) {
println("ERROR: Can't evaluate StepsToConvergence on Asynchronous BenchmarkConfiguration.")
println("Exiting...")
System.exit(-1)
} else {
val partialResult = algorithm.executeForConvergenceSteps()
if (partialResult == -1) {
println("ERROR: SyncStepsToConvergence did not terminate because of Convergence.")
println("Exiting...")
System.exit(-1)
} else {
stepsToConvergence = partialResult
}
}
}
private def evaluateSyncTimeToConvergence() = {
if (configuration.executionConfiguration.executionMode != ExecutionMode.Synchronous) {
println("ERROR: Can't evaluate SyncTimeToConvergence on Asynchronous BenchmarkConfiguration.")
println("Exiting...")
System.exit(-1)
} else {
val partialResult = algorithm.executeForConvergenceTime()
if (partialResult == -1) {
println("ERROR: SyncTimeToConvergence did not terminate because of Convergence.")
println("Exiting...")
System.exit(-1)
} else {
timeToConvergence = partialResult
}
}
}
private def evaluateAsyncTimeToConvergence() = {
if (configuration.executionConfiguration.executionMode == ExecutionMode.Synchronous) {
println("ERROR: Can't evaluate AsyncTimeToConvergence on Synchronous BenchmarkConfiguration.")
println("Exiting...")
System.exit(-1)
} else {
val partialResult = algorithm.executeForConvergenceTime()
if (partialResult == -1) {
println("ERROR: SyncTimeToConvergence did not terminate because of Convergence.")
println("Exiting...")
System.exit(-1)
} else {
timeToConvergence = partialResult
}
}
}
} | gmazlami/dcop-maxsum | src/main/scala/com/signalcollect/dcop/evaluation/maxsum/MaxSumAlgorithm.scala | Scala | apache-2.0 | 4,890 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.io.{InterruptedIOException, IOException, UncheckedIOException}
import java.nio.channels.ClosedByInterruptException
import java.util.UUID
import java.util.concurrent.{CountDownLatch, ExecutionException, TimeoutException, TimeUnit}
import java.util.concurrent.atomic.AtomicReference
import java.util.concurrent.locks.ReentrantLock
import scala.collection.JavaConverters._
import scala.collection.mutable.{Map => MutableMap}
import scala.util.control.NonFatal
import com.google.common.util.concurrent.UncheckedExecutionException
import org.apache.hadoop.fs.Path
import org.apache.spark.{SparkContext, SparkException}
import org.apache.spark.internal.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.streaming.InternalOutputModes._
import org.apache.spark.sql.connector.catalog.{SupportsWrite, Table}
import org.apache.spark.sql.connector.read.streaming.{Offset => OffsetV2, SparkDataStream}
import org.apache.spark.sql.connector.write.{LogicalWriteInfoImpl, SupportsTruncate}
import org.apache.spark.sql.connector.write.streaming.StreamingWrite
import org.apache.spark.sql.execution.QueryExecution
import org.apache.spark.sql.execution.command.StreamingExplainCommand
import org.apache.spark.sql.execution.datasources.v2.StreamWriterCommitProgress
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.streaming._
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.util.{Clock, UninterruptibleThread, Utils}
/** States for [[StreamExecution]]'s lifecycle. */
trait State
case object INITIALIZING extends State
case object ACTIVE extends State
case object TERMINATED extends State
case object RECONFIGURING extends State
/**
* Manages the execution of a streaming Spark SQL query that is occurring in a separate thread.
* Unlike a standard query, a streaming query executes repeatedly each time new data arrives at any
* [[Source]] present in the query plan. Whenever new data arrives, a [[QueryExecution]] is created
* and the results are committed transactionally to the given [[Sink]].
*
* @param deleteCheckpointOnStop whether to delete the checkpoint if the query is stopped without
* errors. Checkpoint deletion can be forced with the appropriate
* Spark configuration.
*/
abstract class StreamExecution(
override val sparkSession: SparkSession,
override val name: String,
private val checkpointRoot: String,
analyzedPlan: LogicalPlan,
val sink: Table,
val trigger: Trigger,
val triggerClock: Clock,
val outputMode: OutputMode,
deleteCheckpointOnStop: Boolean)
extends StreamingQuery with ProgressReporter with Logging {
import org.apache.spark.sql.streaming.StreamingQueryListener._
protected val pollingDelayMs: Long = sparkSession.sessionState.conf.streamingPollingDelay
protected val minLogEntriesToMaintain: Int = sparkSession.sessionState.conf.minBatchesToRetain
require(minLogEntriesToMaintain > 0, "minBatchesToRetain has to be positive")
/**
* A lock used to wait/notify when batches complete. Use a fair lock to avoid thread starvation.
*/
protected val awaitProgressLock = new ReentrantLock(true)
protected val awaitProgressLockCondition = awaitProgressLock.newCondition()
private val initializationLatch = new CountDownLatch(1)
private val startLatch = new CountDownLatch(1)
private val terminationLatch = new CountDownLatch(1)
val resolvedCheckpointRoot = {
val checkpointPath = new Path(checkpointRoot)
val fs = checkpointPath.getFileSystem(sparkSession.sessionState.newHadoopConf())
if (sparkSession.conf.get(SQLConf.STREAMING_CHECKPOINT_ESCAPED_PATH_CHECK_ENABLED)
&& StreamExecution.containsSpecialCharsInPath(checkpointPath)) {
// In Spark 2.4 and earlier, the checkpoint path is escaped 3 times (3 `Path.toUri.toString`
// calls). If this legacy checkpoint path exists, we will throw an error to tell the user how
// to migrate.
val legacyCheckpointDir =
new Path(new Path(checkpointPath.toUri.toString).toUri.toString).toUri.toString
val legacyCheckpointDirExists =
try {
fs.exists(new Path(legacyCheckpointDir))
} catch {
case NonFatal(e) =>
// We may not have access to this directory. Don't fail the query if that happens.
logWarning(e.getMessage, e)
false
}
if (legacyCheckpointDirExists) {
throw new SparkException(
s"""Error: we detected a possible problem with the location of your checkpoint and you
|likely need to move it before restarting this query.
|
|Earlier version of Spark incorrectly escaped paths when writing out checkpoints for
|structured streaming. While this was corrected in Spark 3.0, it appears that your
|query was started using an earlier version that incorrectly handled the checkpoint
|path.
|
|Correct Checkpoint Directory: $checkpointPath
|Incorrect Checkpoint Directory: $legacyCheckpointDir
|
|Please move the data from the incorrect directory to the correct one, delete the
|incorrect directory, and then restart this query. If you believe you are receiving
|this message in error, you can disable it with the SQL conf
|${SQLConf.STREAMING_CHECKPOINT_ESCAPED_PATH_CHECK_ENABLED.key}."""
.stripMargin)
}
}
val checkpointDir = checkpointPath.makeQualified(fs.getUri, fs.getWorkingDirectory)
fs.mkdirs(checkpointDir)
checkpointDir.toString
}
logInfo(s"Checkpoint root $checkpointRoot resolved to $resolvedCheckpointRoot.")
def logicalPlan: LogicalPlan
/**
* Tracks how much data we have processed and committed to the sink or state store from each
* input source.
* Only the scheduler thread should modify this field, and only in atomic steps.
* Other threads should make a shallow copy if they are going to access this field more than
* once, since the field's value may change at any time.
*/
@volatile
var committedOffsets = new StreamProgress
/**
* Tracks the offsets that are available to be processed, but have not yet be committed to the
* sink.
* Only the scheduler thread should modify this field, and only in atomic steps.
* Other threads should make a shallow copy if they are going to access this field more than
* once, since the field's value may change at any time.
*/
@volatile
var availableOffsets = new StreamProgress
@volatile
var sinkCommitProgress: Option[StreamWriterCommitProgress] = None
/** The current batchId or -1 if execution has not yet been initialized. */
protected var currentBatchId: Long = -1
/** Metadata associated with the whole query */
protected val streamMetadata: StreamMetadata = {
val metadataPath = new Path(checkpointFile("metadata"))
val hadoopConf = sparkSession.sessionState.newHadoopConf()
StreamMetadata.read(metadataPath, hadoopConf).getOrElse {
val newMetadata = new StreamMetadata(UUID.randomUUID.toString)
StreamMetadata.write(newMetadata, metadataPath, hadoopConf)
newMetadata
}
}
/** Metadata associated with the offset seq of a batch in the query. */
protected var offsetSeqMetadata = OffsetSeqMetadata(
batchWatermarkMs = 0, batchTimestampMs = 0, sparkSession.conf)
/**
* A map of current watermarks, keyed by the position of the watermark operator in the
* physical plan.
*
* This state is 'soft state', which does not affect the correctness and semantics of watermarks
* and is not persisted across query restarts.
* The fault-tolerant watermark state is in offsetSeqMetadata.
*/
protected val watermarkMsMap: MutableMap[Int, Long] = MutableMap()
override val id: UUID = UUID.fromString(streamMetadata.id)
override val runId: UUID = UUID.randomUUID
/**
* Pretty identified string of printing in logs. Format is
* If name is set "queryName [id = xyz, runId = abc]" else "[id = xyz, runId = abc]"
*/
protected val prettyIdString =
Option(name).map(_ + " ").getOrElse("") + s"[id = $id, runId = $runId]"
/**
* A list of unique sources in the query plan. This will be set when generating logical plan.
*/
@volatile protected var uniqueSources: Seq[SparkDataStream] = Seq.empty
/** Defines the internal state of execution */
protected val state = new AtomicReference[State](INITIALIZING)
@volatile
var lastExecution: IncrementalExecution = _
/** Holds the most recent input data for each source. */
protected var newData: Map[SparkDataStream, LogicalPlan] = _
@volatile
protected var streamDeathCause: StreamingQueryException = null
/* Get the call site in the caller thread; will pass this into the micro batch thread */
private val callSite = Utils.getCallSite()
/** Used to report metrics to coda-hale. This uses id for easier tracking across restarts. */
lazy val streamMetrics = new MetricsReporter(
this, s"spark.streaming.${Option(name).getOrElse(id)}")
/** Isolated spark session to run the batches with. */
private val sparkSessionForStream = sparkSession.cloneSession()
/**
* The thread that runs the micro-batches of this stream. Note that this thread must be
* [[org.apache.spark.util.UninterruptibleThread]] to workaround KAFKA-1894: interrupting a
* running `KafkaConsumer` may cause endless loop.
*/
val queryExecutionThread: QueryExecutionThread =
new QueryExecutionThread(s"stream execution thread for $prettyIdString") {
override def run(): Unit = {
// To fix call site like "run at <unknown>:0", we bridge the call site from the caller
// thread to this micro batch thread
sparkSession.sparkContext.setCallSite(callSite)
runStream()
}
}
/**
* A write-ahead-log that records the offsets that are present in each batch. In order to ensure
* that a given batch will always consist of the same data, we write to this log *before* any
* processing is done. Thus, the Nth record in this log indicated data that is currently being
* processed and the N-1th entry indicates which offsets have been durably committed to the sink.
*/
val offsetLog = new OffsetSeqLog(sparkSession, checkpointFile("offsets"))
/**
* A log that records the batch ids that have completed. This is used to check if a batch was
* fully processed, and its output was committed to the sink, hence no need to process it again.
* This is used (for instance) during restart, to help identify which batch to run next.
*/
val commitLog = new CommitLog(sparkSession, checkpointFile("commits"))
/** Whether all fields of the query have been initialized */
private def isInitialized: Boolean = state.get != INITIALIZING
/** Whether the query is currently active or not */
override def isActive: Boolean = state.get != TERMINATED
/** Returns the [[StreamingQueryException]] if the query was terminated by an exception. */
override def exception: Option[StreamingQueryException] = Option(streamDeathCause)
/** Returns the path of a file with `name` in the checkpoint directory. */
protected def checkpointFile(name: String): String =
new Path(new Path(resolvedCheckpointRoot), name).toString
/**
* Starts the execution. This returns only after the thread has started and [[QueryStartedEvent]]
* has been posted to all the listeners.
*/
def start(): Unit = {
logInfo(s"Starting $prettyIdString. Use $resolvedCheckpointRoot to store the query checkpoint.")
queryExecutionThread.setDaemon(true)
queryExecutionThread.start()
startLatch.await() // Wait until thread started and QueryStart event has been posted
}
/**
* Run the activated stream until stopped.
*/
protected def runActivatedStream(sparkSessionForStream: SparkSession): Unit
/**
* Activate the stream and then wrap a callout to runActivatedStream, handling start and stop.
*
* Note that this method ensures that [[QueryStartedEvent]] and [[QueryTerminatedEvent]] are
* posted such that listeners are guaranteed to get a start event before a termination.
* Furthermore, this method also ensures that [[QueryStartedEvent]] event is posted before the
* `start()` method returns.
*/
private def runStream(): Unit = {
try {
sparkSession.sparkContext.setJobGroup(runId.toString, getBatchDescriptionString,
interruptOnCancel = true)
sparkSession.sparkContext.setLocalProperty(StreamExecution.QUERY_ID_KEY, id.toString)
if (sparkSession.sessionState.conf.streamingMetricsEnabled) {
sparkSession.sparkContext.env.metricsSystem.registerSource(streamMetrics)
}
// `postEvent` does not throw non fatal exception.
postEvent(new QueryStartedEvent(id, runId, name))
// Unblock starting thread
startLatch.countDown()
// While active, repeatedly attempt to run batches.
SparkSession.setActiveSession(sparkSession)
updateStatusMessage("Initializing sources")
// force initialization of the logical plan so that the sources can be created
logicalPlan
// Adaptive execution can change num shuffle partitions, disallow
sparkSessionForStream.conf.set(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key, "false")
// Disable cost-based join optimization as we do not want stateful operations to be rearranged
sparkSessionForStream.conf.set(SQLConf.CBO_ENABLED.key, "false")
offsetSeqMetadata = OffsetSeqMetadata(
batchWatermarkMs = 0, batchTimestampMs = 0, sparkSessionForStream.conf)
if (state.compareAndSet(INITIALIZING, ACTIVE)) {
// Unblock `awaitInitialization`
initializationLatch.countDown()
runActivatedStream(sparkSessionForStream)
updateStatusMessage("Stopped")
} else {
// `stop()` is already called. Let `finally` finish the cleanup.
}
} catch {
case e if isInterruptedByStop(e, sparkSession.sparkContext) =>
// interrupted by stop()
updateStatusMessage("Stopped")
case e: IOException if e.getMessage != null
&& e.getMessage.startsWith(classOf[InterruptedException].getName)
&& state.get == TERMINATED =>
// This is a workaround for HADOOP-12074: `Shell.runCommand` converts `InterruptedException`
// to `new IOException(ie.toString())` before Hadoop 2.8.
updateStatusMessage("Stopped")
case e: Throwable =>
streamDeathCause = new StreamingQueryException(
toDebugString(includeLogicalPlan = isInitialized),
s"Query $prettyIdString terminated with exception: ${e.getMessage}",
e,
committedOffsets.toOffsetSeq(sources, offsetSeqMetadata).toString,
availableOffsets.toOffsetSeq(sources, offsetSeqMetadata).toString)
logError(s"Query $prettyIdString terminated with error", e)
updateStatusMessage(s"Terminated with exception: ${e.getMessage}")
// Rethrow the fatal errors to allow the user using `Thread.UncaughtExceptionHandler` to
// handle them
if (!NonFatal(e)) {
throw e
}
} finally queryExecutionThread.runUninterruptibly {
// The whole `finally` block must run inside `runUninterruptibly` to avoid being interrupted
// when a query is stopped by the user. We need to make sure the following codes finish
// otherwise it may throw `InterruptedException` to `UncaughtExceptionHandler` (SPARK-21248).
// Release latches to unblock the user codes since exception can happen in any place and we
// may not get a chance to release them
startLatch.countDown()
initializationLatch.countDown()
try {
stopSources()
state.set(TERMINATED)
currentStatus = status.copy(isTriggerActive = false, isDataAvailable = false)
// Update metrics and status
sparkSession.sparkContext.env.metricsSystem.removeSource(streamMetrics)
// Notify others
sparkSession.streams.notifyQueryTermination(StreamExecution.this)
postEvent(
new QueryTerminatedEvent(id, runId, exception.map(_.cause).map(Utils.exceptionString)))
// Delete the temp checkpoint when either force delete enabled or the query didn't fail
if (deleteCheckpointOnStop &&
(sparkSession.sessionState.conf
.getConf(SQLConf.FORCE_DELETE_TEMP_CHECKPOINT_LOCATION) || exception.isEmpty)) {
val checkpointPath = new Path(resolvedCheckpointRoot)
try {
logInfo(s"Deleting checkpoint $checkpointPath.")
val fs = checkpointPath.getFileSystem(sparkSession.sessionState.newHadoopConf())
fs.delete(checkpointPath, true)
} catch {
case NonFatal(e) =>
// Deleting temp checkpoint folder is best effort, don't throw non fatal exceptions
// when we cannot delete them.
logWarning(s"Cannot delete $checkpointPath", e)
}
}
} finally {
awaitProgressLock.lock()
try {
// Wake up any threads that are waiting for the stream to progress.
awaitProgressLockCondition.signalAll()
} finally {
awaitProgressLock.unlock()
}
terminationLatch.countDown()
}
}
}
private def isInterruptedByStop(e: Throwable, sc: SparkContext): Boolean = {
if (state.get == TERMINATED) {
StreamExecution.isInterruptionException(e, sc)
} else {
false
}
}
override protected def postEvent(event: StreamingQueryListener.Event): Unit = {
sparkSession.streams.postListenerEvent(event)
}
/** Stops all streaming sources safely. */
protected def stopSources(): Unit = {
uniqueSources.foreach { source =>
try {
source.stop()
} catch {
case NonFatal(e) =>
logWarning(s"Failed to stop streaming source: $source. Resources may have leaked.", e)
}
}
}
/**
* Interrupts the query execution thread and awaits its termination until until it exceeds the
* timeout. The timeout can be set on "spark.sql.streaming.stopTimeout".
*
* @throws TimeoutException If the thread cannot be stopped within the timeout
*/
@throws[TimeoutException]
protected def interruptAndAwaitExecutionThreadTermination(): Unit = {
val timeout = math.max(
sparkSession.sessionState.conf.getConf(SQLConf.STREAMING_STOP_TIMEOUT), 0)
queryExecutionThread.interrupt()
queryExecutionThread.join(timeout)
if (queryExecutionThread.isAlive) {
val stackTraceException = new SparkException("The stream thread was last executing:")
stackTraceException.setStackTrace(queryExecutionThread.getStackTrace)
val timeoutException = new TimeoutException(
s"Stream Execution thread failed to stop within $timeout milliseconds (specified by " +
s"${SQLConf.STREAMING_STOP_TIMEOUT.key}). See the cause on what was " +
"being executed in the streaming query thread.")
timeoutException.initCause(stackTraceException)
throw timeoutException
}
}
/**
* Blocks the current thread until processing for data from the given `source` has reached at
* least the given `Offset`. This method is intended for use primarily when writing tests.
*/
private[sql] def awaitOffset(sourceIndex: Int, newOffset: OffsetV2, timeoutMs: Long): Unit = {
assertAwaitThread()
def notDone = {
val localCommittedOffsets = committedOffsets
if (sources == null) {
// sources might not be initialized yet
false
} else {
val source = sources(sourceIndex)
!localCommittedOffsets.contains(source) || localCommittedOffsets(source) != newOffset
}
}
while (notDone) {
awaitProgressLock.lock()
try {
awaitProgressLockCondition.await(timeoutMs, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
}
} finally {
awaitProgressLock.unlock()
}
}
logDebug(s"Unblocked at $newOffset for ${sources(sourceIndex)}")
}
/** A flag to indicate that a batch has completed with no new data available. */
@volatile protected var noNewData = false
/**
* Assert that the await APIs should not be called in the stream thread. Otherwise, it may cause
* dead-lock, e.g., calling any await APIs in `StreamingQueryListener.onQueryStarted` will block
* the stream thread forever.
*/
private def assertAwaitThread(): Unit = {
if (queryExecutionThread eq Thread.currentThread) {
throw new IllegalStateException(
"Cannot wait for a query state from the same thread that is running the query")
}
}
/**
* Await until all fields of the query have been initialized.
*/
def awaitInitialization(timeoutMs: Long): Unit = {
assertAwaitThread()
require(timeoutMs > 0, "Timeout has to be positive")
if (streamDeathCause != null) {
throw streamDeathCause
}
initializationLatch.await(timeoutMs, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
}
}
override def processAllAvailable(): Unit = {
assertAwaitThread()
if (streamDeathCause != null) {
throw streamDeathCause
}
if (!isActive) return
awaitProgressLock.lock()
try {
noNewData = false
while (true) {
awaitProgressLockCondition.await(10000, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
}
if (noNewData || !isActive) {
return
}
}
} finally {
awaitProgressLock.unlock()
}
}
override def awaitTermination(): Unit = {
assertAwaitThread()
terminationLatch.await()
if (streamDeathCause != null) {
throw streamDeathCause
}
}
override def awaitTermination(timeoutMs: Long): Boolean = {
assertAwaitThread()
require(timeoutMs > 0, "Timeout has to be positive")
terminationLatch.await(timeoutMs, TimeUnit.MILLISECONDS)
if (streamDeathCause != null) {
throw streamDeathCause
} else {
!isActive
}
}
/** Expose for tests */
def explainInternal(extended: Boolean): String = {
if (lastExecution == null) {
"No physical plan. Waiting for data."
} else {
val explain = StreamingExplainCommand(lastExecution, extended = extended)
sparkSession.sessionState.executePlan(explain).executedPlan.executeCollect()
.map(_.getString(0)).mkString("\\n")
}
}
override def explain(extended: Boolean): Unit = {
// scalastyle:off println
println(explainInternal(extended))
// scalastyle:on println
}
override def explain(): Unit = explain(extended = false)
override def toString: String = {
s"Streaming Query $prettyIdString [state = $state]"
}
private def toDebugString(includeLogicalPlan: Boolean): String = {
val debugString =
s"""|=== Streaming Query ===
|Identifier: $prettyIdString
|Current Committed Offsets: $committedOffsets
|Current Available Offsets: $availableOffsets
|
|Current State: $state
|Thread State: ${queryExecutionThread.getState}""".stripMargin
if (includeLogicalPlan) {
debugString + s"\\n\\nLogical Plan:\\n$logicalPlan"
} else {
debugString
}
}
protected def getBatchDescriptionString: String = {
val batchDescription = if (currentBatchId < 0) "init" else currentBatchId.toString
s"""|${Option(name).getOrElse("")}
|id = $id
|runId = $runId
|batch = $batchDescription""".stripMargin
}
protected def createStreamingWrite(
table: SupportsWrite,
options: Map[String, String],
inputPlan: LogicalPlan): StreamingWrite = {
val info = LogicalWriteInfoImpl(
queryId = id.toString,
inputPlan.schema,
new CaseInsensitiveStringMap(options.asJava))
val writeBuilder = table.newWriteBuilder(info)
outputMode match {
case Append =>
writeBuilder.buildForStreaming()
case Complete =>
// TODO: we should do this check earlier when we have capability API.
require(writeBuilder.isInstanceOf[SupportsTruncate],
table.name + " does not support Complete mode.")
writeBuilder.asInstanceOf[SupportsTruncate].truncate().buildForStreaming()
case Update =>
// Although no v2 sinks really support Update mode now, but during tests we do want them
// to pretend to support Update mode, and treat Update mode same as Append mode.
if (Utils.isTesting) {
writeBuilder.buildForStreaming()
} else {
throw new IllegalArgumentException(
"Data source v2 streaming sinks does not support Update mode.")
}
}
}
protected def purge(threshold: Long): Unit = {
logDebug(s"Purging metadata at threshold=$threshold")
offsetLog.purge(threshold)
commitLog.purge(threshold)
}
}
object StreamExecution {
val QUERY_ID_KEY = "sql.streaming.queryId"
val IS_CONTINUOUS_PROCESSING = "__is_continuous_processing"
def isInterruptionException(e: Throwable, sc: SparkContext): Boolean = e match {
// InterruptedIOException - thrown when an I/O operation is interrupted
// ClosedByInterruptException - thrown when an I/O operation upon a channel is interrupted
case _: InterruptedException | _: InterruptedIOException | _: ClosedByInterruptException =>
true
// The cause of the following exceptions may be one of the above exceptions:
//
// UncheckedIOException - thrown by codes that cannot throw a checked IOException, such as
// BiFunction.apply
// ExecutionException - thrown by codes running in a thread pool and these codes throw an
// exception
// UncheckedExecutionException - thrown by codes that cannot throw a checked
// ExecutionException, such as BiFunction.apply
case e2 @ (_: UncheckedIOException | _: ExecutionException | _: UncheckedExecutionException)
if e2.getCause != null =>
isInterruptionException(e2.getCause, sc)
case se: SparkException =>
val jobGroup = sc.getLocalProperty("spark.jobGroup.id")
if (jobGroup == null) return false
val errorMsg = se.getMessage
if (errorMsg.contains("cancelled") && errorMsg.contains(jobGroup) && se.getCause == null) {
true
} else if (se.getCause != null) {
isInterruptionException(se.getCause, sc)
} else {
false
}
case _ =>
false
}
/** Whether the path contains special chars that will be escaped when converting to a `URI`. */
def containsSpecialCharsInPath(path: Path): Boolean = {
path.toUri.getPath != new Path(path.toUri.toString).toUri.getPath
}
}
/**
* A special thread to run the stream query. Some codes require to run in the QueryExecutionThread
* and will use `classOf[QueryxecutionThread]` to check.
*/
abstract class QueryExecutionThread(name: String) extends UninterruptibleThread(name)
| ptkool/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamExecution.scala | Scala | apache-2.0 | 28,255 |
package org.jetbrains.plugins.scala.project.gradle
import java.io.File
import java.util
import com.intellij.openapi.externalSystem.model.project.ProjectData
import com.intellij.openapi.externalSystem.model.{DataNode, Key}
import com.intellij.openapi.module.ModuleManager
import org.jetbrains.plugins.gradle.model.data.{ScalaCompileOptionsData, ScalaModelData}
import org.jetbrains.plugins.scala.project.DebuggingInfoLevel
import org.jetbrains.plugins.scala.project.settings.ScalaCompilerConfiguration
import org.jetbrains.sbt.UsefulTestCaseHelper
import org.jetbrains.sbt.project.SbtProjectSystem
import org.jetbrains.sbt.project.data._
import org.jetbrains.sbt.project.data.service.ExternalSystemDataDsl._
import org.jetbrains.sbt.project.data.service.{ExternalSystemDataDsl, ProjectDataServiceTestCase}
import scala.collection.JavaConverters._
/**
* @author Nikolay Obedin
* @since 6/4/15.
*/
class ScalaGradleDataServiceTest extends ProjectDataServiceTestCase with UsefulTestCaseHelper {
private def generateProject(scalaVersion: Option[String], scalaCompilerClasspath: Set[File],
compilerOptions: Option[ScalaCompileOptionsData]): DataNode[ProjectData] =
new project {
name := getProject.getName
ideDirectoryPath := getProject.getBasePath
linkedProjectPath := getProject.getBasePath
val scalaLibrary = scalaVersion.map { version =>
new library { name := "org.scala-lang:scala-library:" + version }
}
scalaLibrary.foreach(libraries += _)
modules += new javaModule {
name := "Module 1"
moduleFileDirectoryPath := getProject.getBasePath + "/module1"
externalConfigPath := getProject.getBasePath + "/module1"
scalaLibrary.foreach(libraryDependencies += _)
arbitraryNodes += new Node[ScalaModelData] {
override protected val data: ScalaModelData = new ScalaModelData(SbtProjectSystem.Id)
override protected def key: Key[ScalaModelData] = ScalaModelData.KEY
def asSerializableJavaSet[T](scalaSet: Set[T]): util.Set[T] = {
val classpath = new util.HashSet[T]()
util.Collections.addAll(classpath, scalaSet.toSeq:_*)
classpath
}
data.setScalaClasspath(asSerializableJavaSet(scalaCompilerClasspath))
data.setScalaCompileOptions(compilerOptions.getOrElse(new ScalaCompileOptionsData))
data.setTargetCompatibility("1.5")
}
}
}.build.toDataNode
def testEmptyScalaCompilerClasspath(): Unit = {
importProjectData(generateProject(None, Set.empty, None))
// FIXME: can't check notification count for Gradle because tool window is uninitialized
// assertNotificationsCount(NotificationSource.PROJECT_SYNC, NotificationCategory.WARNING, GradleConstants.SYSTEM_ID, 1)
}
def testScalaCompilerClasspathWithoutScala(): Unit = {
importProjectData(generateProject(None, Set(new File("/tmp/test/not-a-scala-library.jar")), None))
// FIXME: can't check notification count for Gradle because tool window is uninitialized
// assertNotificationsCount(NotificationSource.PROJECT_SYNC, NotificationCategory.WARNING, GradleConstants.SYSTEM_ID, 1)
}
def testWithoutScalaLibrary(): Unit = {
importProjectData(generateProject(None, Set(new File("/tmp/test/scala-library-2.10.4.jar")), None))
// FIXME: can't check notification count for Gradle because tool window is uninitialized
// assertNotificationsCount(NotificationSource.PROJECT_SYNC, NotificationCategory.WARNING, GradleConstants.SYSTEM_ID, 1)
}
def testWithDifferentVersionOfScalaLibrary(): Unit = {
importProjectData(generateProject(Some("2.11.5"), Set(new File("/tmp/test/scala-library-2.10.4.jar")), None))
// FIXME: can't check notification count for Gradle because tool window is uninitialized
// assertNotificationsCount(NotificationSource.PROJECT_SYNC, NotificationCategory.WARNING, GradleConstants.SYSTEM_ID, 1)
}
def testWithTheSameVersionOfScalaLibrary(): Unit = {
importProjectData(generateProject(Some("2.10.4"), Set(new File("/tmp/test/scala-library-2.10.4.jar")), None))
import org.jetbrains.plugins.scala.project._
val isLibrarySetUp = getProject.libraries
.filter(_.getName.contains("scala-library"))
.exists(_.isScalaSdk)
assert(isLibrarySetUp, "Scala library is not set up")
}
def testCompilerOptionsSetup(): Unit = {
val additionalOptions = Seq(
"-Xplugin:test-plugin.jar",
"-Xexperimental",
"-P:continuations:enable",
"-language:dynamics",
"-language:existentials",
"-explaintypes",
"-feature",
"-language:higherKinds",
"-language:implicitConversions",
"-language:macros",
"-language:postfixOps",
"-language:reflectiveCalls",
"-no-specialization",
"-nowarn"
)
val options = new ScalaCompileOptionsData
options.setDebugLevel("source")
options.setEncoding("utf-8")
options.setDeprecation(true)
options.setOptimize(true)
options.setUnchecked(true)
options.setAdditionalParameters(additionalOptions.asJava)
importProjectData(generateProject(Some("2.10.4"), Set(new File("/tmp/test/scala-library-2.10.4.jar")), Some(options)))
val module = ModuleManager.getInstance(getProject).findModuleByName("Module 1")
val compilerConfiguration = ScalaCompilerConfiguration.instanceIn(getProject).getSettingsForModule(module)
assert(compilerConfiguration.debuggingInfoLevel == DebuggingInfoLevel.Source)
assert(compilerConfiguration.plugins == Seq("test-plugin.jar"))
assert(compilerConfiguration.additionalCompilerOptions == Seq("-encoding", "utf-8", "-target:jvm-1.5"))
assert(compilerConfiguration.experimental)
assert(compilerConfiguration.continuations)
assert(compilerConfiguration.deprecationWarnings)
assert(compilerConfiguration.dynamics)
assert(compilerConfiguration.existentials)
assert(compilerConfiguration.explainTypeErrors)
assert(compilerConfiguration.featureWarnings)
assert(compilerConfiguration.higherKinds)
assert(compilerConfiguration.implicitConversions)
assert(compilerConfiguration.macros)
assert(compilerConfiguration.optimiseBytecode)
assert(compilerConfiguration.postfixOps)
assert(compilerConfiguration.reflectiveCalls)
assert(!compilerConfiguration.specialization)
assert(compilerConfiguration.uncheckedWarnings)
assert(!compilerConfiguration.warnings)
}
def testModuleIsNull(): Unit = {
val testProject = new project {
name := getProject.getName
ideDirectoryPath := getProject.getBasePath
linkedProjectPath := getProject.getBasePath
arbitraryNodes += new Node[ScalaModelData] {
override protected val data: ScalaModelData = new ScalaModelData(SbtProjectSystem.Id)
override protected def key: Key[ScalaModelData] = ScalaModelData.KEY
}
}.build.toDataNode
importProjectData(testProject)
}
}
| ilinum/intellij-scala | test/org/jetbrains/plugins/scala/project/gradle/ScalaGradleDataServiceTest.scala | Scala | apache-2.0 | 6,974 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models.responsiblepeople
import jto.validation.forms._
import jto.validation.{From, Rule, Write}
import play.api.libs.json.Json
case class BankAccountRegistered(registerAnotherBank: Boolean)
object BankAccountRegistered {
implicit val formats = Json.format[BankAccountRegistered]
import utils.MappingUtils.Implicits._
implicit val formRule: Rule[UrlFormEncoded, BankAccountRegistered] =
From[UrlFormEncoded] { __ =>
import jto.validation.forms.Rules._
(__ \\ "registerAnotherBank").read[Boolean].withMessage("error.required.bankdetails.register.another.bank") map BankAccountRegistered.apply
}
implicit val formWrites: Write[BankAccountRegistered, UrlFormEncoded] =
Write {
case BankAccountRegistered(b) =>
Map("registerAnotherBank" -> Seq(b.toString))
}
}
| hmrc/amls-frontend | app/models/bankdetails/BankAccountRegistered.scala | Scala | apache-2.0 | 1,426 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2013 Association du Paris Java User Group.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package library
import java.io.File
import org.apache.commons.io.IOUtils
import scala.collection.JavaConversions._
case class GitInfo(version: String, branch: String)
/**
* Extracts the current Git version and store it for bug reports.
*
* @author Nicolas Martignole, Innoteria
*/
object GitUtils {
def getGitVersion: GitInfo = {
try {
val version = execCmd("git log -1").headOption.getOrElse("Unknown").replace("commit", "").trim
val branch: String = execCmd("git branch").filter(s => s.contains("*")).headOption.getOrElse("No current branch").replace("*", "").trim
GitInfo(version, branch)
} catch {
case _: Exception => GitInfo("Unknown", "Unknown")
}
}
private def execCmd(extractedLocalValue: java.lang.String): List[String] = {
val process = Runtime.getRuntime.exec(extractedLocalValue, null, new File("."))
process.waitFor()
IOUtils.readLines(process.getInputStream).toList
}
}
| dadoonet/cfp-devoxx | app/library/GitUtils.scala | Scala | mit | 2,119 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import org.apache.spark._
class AccumulatorV2Suite extends SparkFunSuite {
test("LongAccumulator add/avg/sum/count/isZero") {
val acc = new LongAccumulator
assert(acc.isZero)
assert(acc.count == 0)
assert(acc.sum == 0)
assert(acc.avg.isNaN)
acc.add(0)
assert(!acc.isZero)
assert(acc.count == 1)
assert(acc.sum == 0)
assert(acc.avg == 0.0)
acc.add(1)
assert(acc.count == 2)
assert(acc.sum == 1)
assert(acc.avg == 0.5)
// Also test add using non-specialized add function
acc.add(new java.lang.Long(2))
assert(acc.count == 3)
assert(acc.sum == 3)
assert(acc.avg == 1.0)
// Test merging
val acc2 = new LongAccumulator
acc2.add(2)
acc.merge(acc2)
assert(acc.count == 4)
assert(acc.sum == 5)
assert(acc.avg == 1.25)
}
test("DoubleAccumulator add/avg/sum/count/isZero") {
val acc = new DoubleAccumulator
assert(acc.isZero)
assert(acc.count == 0)
assert(acc.sum == 0.0)
assert(acc.avg.isNaN)
acc.add(0.0)
assert(!acc.isZero)
assert(acc.count == 1)
assert(acc.sum == 0.0)
assert(acc.avg == 0.0)
acc.add(1.0)
assert(acc.count == 2)
assert(acc.sum == 1.0)
assert(acc.avg == 0.5)
// Also test add using non-specialized add function
acc.add(new java.lang.Double(2.0))
assert(acc.count == 3)
assert(acc.sum == 3.0)
assert(acc.avg == 1.0)
// Test merging
val acc2 = new DoubleAccumulator
acc2.add(2.0)
acc.merge(acc2)
assert(acc.count == 4)
assert(acc.sum == 5.0)
assert(acc.avg == 1.25)
}
test("ListAccumulator") {
val acc = new CollectionAccumulator[Double]
assert(acc.value.isEmpty)
assert(acc.isZero)
acc.add(0.0)
assert(acc.value.contains(0.0))
assert(!acc.isZero)
acc.add(new java.lang.Double(1.0))
val acc2 = acc.copyAndReset()
assert(acc2.value.isEmpty)
assert(acc2.isZero)
assert(acc.value.contains(1.0))
assert(!acc.isZero)
assert(acc.value.size() === 2)
acc2.add(2.0)
assert(acc2.value.contains(2.0))
assert(!acc2.isZero)
assert(acc2.value.size() === 1)
// Test merging
acc.merge(acc2)
assert(acc.value.contains(2.0))
assert(!acc.isZero)
assert(acc.value.size() === 3)
val acc3 = acc.copy()
assert(acc3.value.contains(2.0))
assert(!acc3.isZero)
assert(acc3.value.size() === 3)
acc3.reset()
assert(acc3.isZero)
assert(acc3.value.isEmpty)
}
test("LegacyAccumulatorWrapper") {
val acc = new LegacyAccumulatorWrapper("default", AccumulatorParam.StringAccumulatorParam)
assert(acc.value === "default")
assert(!acc.isZero)
acc.add("foo")
assert(acc.value === "foo")
assert(!acc.isZero)
acc.add(new java.lang.String("bar"))
val acc2 = acc.copyAndReset()
assert(acc2.value === "")
assert(acc2.isZero)
assert(acc.value === "bar")
assert(!acc.isZero)
acc2.add("baz")
assert(acc2.value === "baz")
assert(!acc2.isZero)
// Test merging
acc.merge(acc2)
assert(acc.value === "baz")
assert(!acc.isZero)
val acc3 = acc.copy()
assert(acc3.value === "baz")
assert(!acc3.isZero)
acc3.reset()
assert(acc3.isZero)
assert(acc3.value === "")
}
}
| aokolnychyi/spark | core/src/test/scala/org/apache/spark/util/AccumulatorV2Suite.scala | Scala | apache-2.0 | 4,120 |
package com.chinthaka.imagesimilarity.db
import java.sql.{ResultSet, Statement, PreparedStatement, DriverManager, Connection}
import java.util.logging.Logger
import com.chinthaka.imagesimilarity.util.GlobalContext
import com.chinthaka.imagesimilarity.storage.ImageMetadataStorage
/**
* @author - Eran Withana ([email protected])
*/
object PostgresImageMetadataStorage extends ImageMetadataStorage {
val logger: Logger = Logger.getLogger(this.getClass.getName)
val DBHostName = GlobalContext.config.getString("app.db.hostname")
val DBPort = GlobalContext.config.getInt("app.db.port")
val DBName = GlobalContext.config.getString("app.db.schema")
val DBUsername = GlobalContext.config.getString("app.db.username")
val DBPassword = GlobalContext.config.getString("app.db.password")
var lastUpdateTime: Long = 0
def getConnection(jdbcURL: String = s"jdbc:postgresql://$DBHostName:$DBPort/$DBName",
dbUsername: String = DBUsername,
dbPassword: String = DBPassword,
timeout: Int = 1, // timeout in seconds
maxRetries: Int = 3): Option[Connection] = {
logger.info(s"DB Parameters $DBUsername@${DBHostName}:${DBPort}/$DBName")
try {
val c = Iterator
.continually(DriverManager.getConnection(jdbcURL, dbUsername, dbPassword))
.zipWithIndex
.dropWhile({
case (conn, i) =>
if (i >= maxRetries)
throw new IllegalStateException(s"Failed after $i tries, giving up")
if (i > 0)
logger.info(s"Retry ${i + 1}/$maxRetries to get DB connection for ${jdbcURL} after $timeout seconds")
// Ref: http://docs.oracle.com/javase/7/docs/api/java/sql/Connection.html#isValid(int)
!conn.isValid(timeout)
})
.next
._1
Option(c)
} catch {
case e: Exception => {
val msg = s"Error in creating a connection for ${jdbcURL}"
println(msg, e)
throw new IllegalStateException(msg)
}
}
}
override def insertImageMetadata(imageMetadata: ImageMetadata) {
getConnection() match {
case Some(connection) => {
val preparedStatement: PreparedStatement = connection
.prepareStatement(s"INSERT into ${ImageMetadata.Schema}.${ImageMetadata.Table} (" +
s" ${ImageMetadata.UUID}," +
s" ${ImageMetadata.Timestamp}," +
s" ${ImageMetadata.LowResHist}," +
s" ${ImageMetadata.HighResHist}) values (?, ?, ?, ?)")
preparedStatement.setString(1, imageMetadata.uuid)
preparedStatement.setLong(2, System.currentTimeMillis)
preparedStatement.setString(3, imageMetadata.lowResHist)
preparedStatement.setString(4, imageMetadata.highResHist)
preparedStatement.execute()
preparedStatement.close()
connection.close()
}
case _ => {
logger.severe("Couldn't get a connection to the database")
}
}
}
override def retrieveNewImageMetadata: List[ImageMetadata] = {
getConnection() match {
case Some(connection) => {
val statement: Statement = connection.createStatement()
val sql: String = s"select * from ${ImageMetadata.Schema}.${ImageMetadata.Table} WHERE ${ImageMetadata.Timestamp} > $lastUpdateTime"
val resultSet: ResultSet = statement.executeQuery(sql)
val imageMetaData = Iterator.continually(resultSet).takeWhile(_.next).map(rs => {
val lastUpdateTimeCurrent = rs.getLong(ImageMetadata.Timestamp)
lastUpdateTime = if (lastUpdateTimeCurrent > lastUpdateTime) lastUpdateTimeCurrent else lastUpdateTime
new ImageMetadata(rs.getString("uuid"),
rs.getString("low_res_hist"),
rs.getString("high_res_hist"))
}).toList
connection.close()
imageMetaData
}
case _ => {
logger.severe("Couldn't get a connection to the database")
List.empty
}
}
}
override def retrieveImagesWithProperty(propertyName: String, value: String, constraintName: Option[String] = None,
constraintValue: Option[String] = None): Option[List[ImageMetadata]] = {
getConnection() match {
case Some(connection) => {
val statement: Statement = connection.createStatement()
val optionalWhereClause = if (constraintName.nonEmpty) s" and ${constraintName.get} != \\'${constraintValue.get}\\'" else ""
val sql: String = s"SELECT * from ${ImageMetadata.Schema}.${ImageMetadata.Table} WHERE $propertyName = \\'$value\\' $optionalWhereClause"
logger.info(s"Executing sql $sql")
val resultSet: ResultSet = statement.executeQuery(sql)
val imageMetaData = Iterator.continually(resultSet).takeWhile(_.next).map(rs =>
new ImageMetadata(rs.getString("uuid"),
rs.getString("low_res_hist"),
rs.getString("high_res_hist"))
).toList
Option(imageMetaData)
}
case _ => {
logger.severe("Couldn't get a connection to the database")
None
}
}
}
}
| echinthaka/ImageSimilarity | src/main/scala/com/chinthaka/imagesimilarity/db/PostgresImageMetadataStorage.scala | Scala | apache-2.0 | 5,953 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// scalastyle:off println
package org.apache.spark.examples.ml
// $example on$
import org.apache.spark.ml.feature.PolynomialExpansion
import org.apache.spark.ml.linalg.Vectors
// $example off$
import org.apache.spark.sql.SparkSession
object PolynomialExpansionExample {
def main(args: Array[String]): Unit = {
val spark = SparkSession
.builder
.appName("PolynomialExpansionExample")
.getOrCreate()
// $example on$
val data = Array(
Vectors.dense(2.0, 1.0),
Vectors.dense(0.0, 0.0),
Vectors.dense(3.0, -1.0)
)
val df = spark.createDataFrame(data.map(Tuple1.apply)).toDF("features")
val polyExpansion = new PolynomialExpansion()
.setInputCol("features")
.setOutputCol("polyFeatures")
.setDegree(3)
val polyDF = polyExpansion.transform(df)
polyDF.show(false)
// $example off$
spark.stop()
}
}
// scalastyle:on println
| lhfei/spark-in-action | spark-2.x/src/main/scala/org/apache/spark/examples/ml/PolynomialExpansionExample.scala | Scala | apache-2.0 | 1,775 |
package net.takasing
import org.slf4j.LoggerFactory
/**
* @author takasing
*/
object HelloWorld extends App {
val logger = LoggerFactory.getLogger(this.getClass())
def logging(x : String): Unit = {
logger.info(x)
}
def sumOfInt(n: Int, m: Int): BigInt = {
var a : BigInt = 0
for (i <- n to m) a += i
a
}
logging("hello")
logging("hella")
logging(sumOfInt(1, 10).toString())
}
| takasing/scala-playground | src/main/scala/net/takasing/HelloWorld.scala | Scala | mit | 412 |
/*
* Copyright 2011-2019 Asakusa Framework Team.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.asakusafw.spark.compiler
package operator
package user.join
import org.junit.runner.RunWith
import org.scalatest.{ Assertions, FlatSpec }
import org.scalatest.junit.JUnitRunner
import java.io.{ DataInput, DataOutput }
import java.util.{ List => JList }
import java.util.function.Consumer
import scala.collection.JavaConversions._
import org.apache.hadoop.io.Writable
import org.apache.spark.broadcast.{ Broadcast => Broadcasted }
import com.asakusafw.lang.compiler.model.description.ClassDescription
import com.asakusafw.lang.compiler.model.graph.{ Groups, MarkerOperator, Operator, OperatorInput }
import com.asakusafw.lang.compiler.model.testing.OperatorExtractor
import com.asakusafw.lang.compiler.planning.PlanMarker
import com.asakusafw.runtime.core.{ GroupView, View }
import com.asakusafw.runtime.model.DataModel
import com.asakusafw.runtime.value.IntOption
import com.asakusafw.spark.compiler.broadcast.MockBroadcast
import com.asakusafw.spark.compiler.spi.{ OperatorCompiler, OperatorType }
import com.asakusafw.spark.runtime.fragment.{ Fragment, GenericOutputFragment }
import com.asakusafw.spark.runtime.graph.BroadcastId
import com.asakusafw.spark.runtime.io.WritableSerDe
import com.asakusafw.spark.runtime.rdd.ShuffleKey
import com.asakusafw.spark.tools.asm._
import com.asakusafw.vocabulary.operator.{ MasterCheck => MasterCheckOp, MasterSelection }
@RunWith(classOf[JUnitRunner])
class BroadcastMasterCheckOperatorCompilerSpecTest extends BroadcastMasterCheckOperatorCompilerSpec
class BroadcastMasterCheckOperatorCompilerSpec extends FlatSpec with UsingCompilerContext {
import BroadcastMasterCheckOperatorCompilerSpec._
behavior of classOf[BroadcastMasterCheckOperatorCompiler].getSimpleName
it should "compile MasterCheck operator without master selection" in {
val foosMarker = MarkerOperator.builder(ClassDescription.of(classOf[Foo]))
.attribute(classOf[PlanMarker], PlanMarker.BROADCAST).build()
val operator = OperatorExtractor
.extract(classOf[MasterCheckOp], classOf[MasterCheckOperator], "check")
.input("foos", ClassDescription.of(classOf[Foo]),
new Consumer[Operator.InputOptionBuilder] {
override def accept(builder: Operator.InputOptionBuilder): Unit = {
builder
.unit(OperatorInput.InputUnit.WHOLE)
.group(Groups.parse(Seq("id")))
.upstream(foosMarker.getOutput)
}
})
.input("bars", ClassDescription.of(classOf[Bar]),
Groups.parse(Seq("fooId"), Seq("+id")))
.output("found", ClassDescription.of(classOf[Bar]))
.output("missed", ClassDescription.of(classOf[Bar]))
.build()
implicit val context = newOperatorCompilerContext("flowId")
val thisType = OperatorCompiler.compile(operator, OperatorType.ExtractType)
context.addClass(context.broadcastIds)
val cls = context.loadClass[Fragment[Bar]](thisType.getClassName)
val broadcastIdsCls = context.loadClass(context.broadcastIds.thisType.getClassName)
def getBroadcastId(marker: MarkerOperator): BroadcastId = {
val sn = marker.getSerialNumber
broadcastIdsCls.getField(context.broadcastIds.getField(sn)).get(null).asInstanceOf[BroadcastId]
}
val found = new GenericOutputFragment[Bar]()
val missed = new GenericOutputFragment[Bar]()
val ctor = cls.getConstructor(
classOf[Map[BroadcastId, Broadcasted[_]]],
classOf[Fragment[_]], classOf[Fragment[_]])
{
val foo = new Foo()
foo.id.modify(1)
val foos = Seq(foo)
val shuffleKey = new ShuffleKey(
WritableSerDe.serialize(foo.id), Array.emptyByteArray)
val fragment = ctor.newInstance(
Map(getBroadcastId(foosMarker) -> new MockBroadcast(0, Map(shuffleKey -> foos))),
found,
missed)
fragment.reset()
val bar = new Bar()
bar.id.modify(10)
bar.fooId.modify(1)
fragment.add(bar)
val founds = found.iterator.toSeq
assert(founds.size === 1)
assert(founds.head.id.get === 10)
val misseds = missed.iterator.toSeq
assert(misseds.size === 0)
fragment.reset()
assert(found.iterator.size === 0)
assert(missed.iterator.size === 0)
}
{
val fragment = ctor.newInstance(
Map(getBroadcastId(foosMarker) -> new MockBroadcast(0, Map.empty)),
found,
missed)
fragment.reset()
val bar = new Bar()
bar.id.modify(10)
bar.fooId.modify(1)
fragment.add(bar)
val founds = found.iterator.toSeq
assert(founds.size === 0)
val misseds = missed.iterator.toSeq
assert(misseds.size === 1)
assert(misseds.head.id.get === 10)
fragment.reset()
assert(found.iterator.size === 0)
assert(missed.iterator.size === 0)
}
}
it should "compile MasterCheck operator with master selection" in {
val foosMarker = MarkerOperator.builder(ClassDescription.of(classOf[Foo]))
.attribute(classOf[PlanMarker], PlanMarker.BROADCAST).build()
val operator = OperatorExtractor
.extract(classOf[MasterCheckOp], classOf[MasterCheckOperator], "checkWithSelection")
.input("foos", ClassDescription.of(classOf[Foo]),
new Consumer[Operator.InputOptionBuilder] {
override def accept(builder: Operator.InputOptionBuilder): Unit = {
builder
.unit(OperatorInput.InputUnit.WHOLE)
.group(Groups.parse(Seq("id")))
.upstream(foosMarker.getOutput)
}
})
.input("bars", ClassDescription.of(classOf[Bar]),
Groups.parse(Seq("fooId"), Seq("+id")))
.output("found", ClassDescription.of(classOf[Bar]))
.output("missed", ClassDescription.of(classOf[Bar]))
.build()
implicit val context = newOperatorCompilerContext("flowId")
val thisType = OperatorCompiler.compile(operator, OperatorType.ExtractType)
context.addClass(context.broadcastIds)
val cls = context.loadClass[Fragment[Bar]](thisType.getClassName)
val broadcastIdsCls = context.loadClass(context.broadcastIds.thisType.getClassName)
def getBroadcastId(marker: MarkerOperator): BroadcastId = {
val sn = marker.getSerialNumber
broadcastIdsCls.getField(context.broadcastIds.getField(sn)).get(null).asInstanceOf[BroadcastId]
}
val found = new GenericOutputFragment[Bar]()
val missed = new GenericOutputFragment[Bar]()
val ctor = cls.getConstructor(
classOf[Map[BroadcastId, Broadcasted[_]]],
classOf[Fragment[_]], classOf[Fragment[_]])
{
val foo = new Foo()
foo.id.modify(0)
val foos = Seq(foo)
val shuffleKey = new ShuffleKey(
WritableSerDe.serialize(foo.id), Array.emptyByteArray)
val fragment = ctor.newInstance(
Map(getBroadcastId(foosMarker) -> new MockBroadcast(0, Map(shuffleKey -> foos))),
found,
missed)
fragment.reset()
val bars = (0 until 10).map { i =>
val bar = new Bar()
bar.id.modify(i)
bar.fooId.modify(0)
fragment.add(bar)
}
val founds = found.iterator.toSeq
assert(founds.size === 5)
assert(founds.map(_.id.get) === (0 until 10 by 2))
val misseds = missed.iterator.toSeq
assert(misseds.size === 5)
assert(misseds.map(_.id.get) === (1 until 10 by 2))
fragment.reset()
assert(found.iterator.size === 0)
assert(missed.iterator.size === 0)
}
{
val fragment = ctor.newInstance(
Map(getBroadcastId(foosMarker) -> new MockBroadcast(0, Map.empty)),
found,
missed)
fragment.reset()
val bar = new Bar()
bar.id.modify(10)
bar.fooId.modify(1)
fragment.add(bar)
val founds = found.iterator.toSeq
assert(founds.size === 0)
val misseds = missed.iterator.toSeq
assert(misseds.size === 1)
assert(misseds.head.id.get === 10)
fragment.reset()
assert(found.iterator.size === 0)
assert(missed.iterator.size === 0)
}
}
it should "compile MasterCheck operator with master from core.empty" in {
val operator = OperatorExtractor
.extract(classOf[MasterCheckOp], classOf[MasterCheckOperator], "check")
.input("foos", ClassDescription.of(classOf[Foo]),
new Consumer[Operator.InputOptionBuilder] {
override def accept(builder: Operator.InputOptionBuilder): Unit = {
builder
.unit(OperatorInput.InputUnit.WHOLE)
.group(Groups.parse(Seq("id")))
}
})
.input("bars", ClassDescription.of(classOf[Bar]),
Groups.parse(Seq("fooId"), Seq("+id")))
.output("found", ClassDescription.of(classOf[Bar]))
.output("missed", ClassDescription.of(classOf[Bar]))
.build()
implicit val context = newOperatorCompilerContext("flowId")
val thisType = OperatorCompiler.compile(operator, OperatorType.ExtractType)
context.addClass(context.broadcastIds)
val cls = context.loadClass[Fragment[Bar]](thisType.getClassName)
val found = new GenericOutputFragment[Bar]()
val missed = new GenericOutputFragment[Bar]()
val ctor = cls.getConstructor(
classOf[Map[BroadcastId, Broadcasted[_]]],
classOf[Fragment[_]], classOf[Fragment[_]])
{
val fragment = ctor.newInstance(Map.empty, found, missed)
fragment.reset()
val bar = new Bar()
bar.id.modify(10)
bar.fooId.modify(1)
fragment.add(bar)
val founds = found.iterator.toSeq
assert(founds.size === 0)
val misseds = missed.iterator.toSeq
assert(misseds.size === 1)
assert(misseds.head.id.get === 10)
fragment.reset()
assert(found.iterator.size === 0)
assert(missed.iterator.size === 0)
}
}
it should "compile MasterCheck operator with view" in {
val vMarker = MarkerOperator.builder(ClassDescription.of(classOf[Foo]))
.attribute(classOf[PlanMarker], PlanMarker.BROADCAST).build()
val gvMarker = MarkerOperator.builder(ClassDescription.of(classOf[Foo]))
.attribute(classOf[PlanMarker], PlanMarker.BROADCAST).build()
val foosMarker = MarkerOperator.builder(ClassDescription.of(classOf[Foo]))
.attribute(classOf[PlanMarker], PlanMarker.BROADCAST).build()
val operator = OperatorExtractor
.extract(classOf[MasterCheckOp], classOf[MasterCheckOperator], "checkWithView")
.input("foos", ClassDescription.of(classOf[Foo]),
new Consumer[Operator.InputOptionBuilder] {
override def accept(builder: Operator.InputOptionBuilder): Unit = {
builder
.unit(OperatorInput.InputUnit.WHOLE)
.group(Groups.parse(Seq("id")))
.upstream(foosMarker.getOutput)
}
})
.input("bars", ClassDescription.of(classOf[Bar]),
Groups.parse(Seq("fooId"), Seq("+id")))
.input("v", ClassDescription.of(classOf[Foo]),
new Consumer[Operator.InputOptionBuilder] {
override def accept(builder: Operator.InputOptionBuilder): Unit = {
builder
.unit(OperatorInput.InputUnit.WHOLE)
.group(Groups.parse(Seq.empty, Seq.empty))
.upstream(vMarker.getOutput)
}
})
.input("gv", ClassDescription.of(classOf[Foo]),
new Consumer[Operator.InputOptionBuilder] {
override def accept(builder: Operator.InputOptionBuilder): Unit = {
builder
.unit(OperatorInput.InputUnit.WHOLE)
.group(Groups.parse(Seq("id"), Seq.empty))
.upstream(gvMarker.getOutput)
}
})
.output("found", ClassDescription.of(classOf[Bar]))
.output("missed", ClassDescription.of(classOf[Bar]))
.build()
implicit val context = newOperatorCompilerContext("flowId")
val thisType = OperatorCompiler.compile(operator, OperatorType.ExtractType)
context.addClass(context.broadcastIds)
val cls = context.loadClass[Fragment[Bar]](thisType.getClassName)
val broadcastIdsCls = context.loadClass(context.broadcastIds.thisType.getClassName)
def getBroadcastId(marker: MarkerOperator): BroadcastId = {
val sn = marker.getSerialNumber
broadcastIdsCls.getField(context.broadcastIds.getField(sn)).get(null).asInstanceOf[BroadcastId]
}
val found = new GenericOutputFragment[Bar]()
val missed = new GenericOutputFragment[Bar]()
val ctor = cls.getConstructor(
classOf[Map[BroadcastId, Broadcasted[_]]],
classOf[Fragment[_]], classOf[Fragment[_]])
val view = new MockBroadcast(0, Map(ShuffleKey.empty -> Seq(new Foo())))
val groupview = new MockBroadcast(1,
(0 until 10).map { i =>
val foo = new Foo()
foo.id.modify(i)
new ShuffleKey(WritableSerDe.serialize(foo.id)) -> Seq(foo)
}.toMap)
{
val foo = new Foo()
foo.id.modify(0)
val foos = Seq(foo)
val shuffleKey = new ShuffleKey(
WritableSerDe.serialize(foo.id), Array.emptyByteArray)
val fragment = ctor.newInstance(
Map(
getBroadcastId(vMarker) -> view,
getBroadcastId(gvMarker) -> groupview,
getBroadcastId(foosMarker) -> new MockBroadcast(2, Map(shuffleKey -> foos))),
found,
missed)
fragment.reset()
val bars = (0 until 10).map { i =>
val bar = new Bar()
bar.id.modify(i)
bar.fooId.modify(0)
fragment.add(bar)
}
val founds = found.iterator.toSeq
assert(founds.size === 5)
assert(founds.map(_.id.get) === (0 until 10 by 2))
val misseds = missed.iterator.toSeq
assert(misseds.size === 5)
assert(misseds.map(_.id.get) === (1 until 10 by 2))
fragment.reset()
assert(found.iterator.size === 0)
assert(missed.iterator.size === 0)
}
{
val fragment = ctor.newInstance(
Map(
getBroadcastId(vMarker) -> view,
getBroadcastId(gvMarker) -> groupview,
getBroadcastId(foosMarker) -> new MockBroadcast(2, Map.empty)),
found,
missed)
fragment.reset()
val bar = new Bar()
bar.id.modify(10)
bar.fooId.modify(1)
fragment.add(bar)
val founds = found.iterator.toSeq
assert(founds.size === 0)
val misseds = missed.iterator.toSeq
assert(misseds.size === 1)
assert(misseds.head.id.get === 10)
fragment.reset()
assert(found.iterator.size === 0)
assert(missed.iterator.size === 0)
}
}
}
object BroadcastMasterCheckOperatorCompilerSpec {
class Foo extends DataModel[Foo] with Writable {
val id = new IntOption()
override def reset(): Unit = {
id.setNull()
}
override def copyFrom(other: Foo): Unit = {
id.copyFrom(other.id)
}
override def readFields(in: DataInput): Unit = {
id.readFields(in)
}
override def write(out: DataOutput): Unit = {
id.write(out)
}
def getIdOption: IntOption = id
}
class Bar extends DataModel[Bar] with Writable {
val id = new IntOption()
val fooId = new IntOption()
override def reset(): Unit = {
id.setNull()
fooId.setNull()
}
override def copyFrom(other: Bar): Unit = {
id.copyFrom(other.id)
fooId.copyFrom(other.fooId)
}
override def readFields(in: DataInput): Unit = {
id.readFields(in)
fooId.readFields(in)
}
override def write(out: DataOutput): Unit = {
id.write(out)
fooId.write(out)
}
def getIdOption: IntOption = id
def getFooIdOption: IntOption = fooId
}
class MasterCheckOperator extends Assertions {
@MasterCheckOp
def check(foo: Foo, bar: Bar): Boolean = ???
@MasterCheckOp(selection = "select")
def checkWithSelection(foo: Foo, bar: Bar): Boolean = ???
@MasterSelection
def select(foos: JList[Foo], bar: Bar): Foo = {
if (bar.id.get % 2 == 0) {
foos.headOption.orNull
} else {
null
}
}
@MasterCheckOp(selection = "selectWithView")
def checkWithView(foo: Foo, bar: Bar, v: View[Foo], gv: GroupView[Foo]): Boolean = ???
@MasterSelection
def selectWithView(foos: JList[Foo], bar: Bar, v: View[Foo], gv: GroupView[Foo]): Foo = {
val view = v.toSeq
assert(view.size === 1)
assert(view.head.id.isNull())
val group = gv.find(bar.id).toSeq
if (bar.id.get < 10) {
assert(group.size === 1)
assert(group.head.id.get === bar.id.get)
} else {
assert(group.size === 0)
}
if (bar.id.get % 2 == 0) {
foos.headOption.orNull
} else {
null
}
}
}
}
| ashigeru/asakusafw-spark | compiler/src/test/scala/com/asakusafw/spark/compiler/operator/user/join/BroadcastMasterCheckOperatorCompilerSpec.scala | Scala | apache-2.0 | 17,293 |
package scala.generator.mock
import com.bryzek.apidoc.generator.v0.models.{File, InvocationForm}
import generator.ServiceFileNames
import lib.generator.CodeGenerator
import lib.Text._
import scala.models.ApidocComments
import scala.generator._
object MockClientGenerator {
object Play24 extends CodeGenerator {
override def invoke(form: InvocationForm) = {
val ssd = new ScalaService(form.service)
MockClientGenerator(form, ScalaClientMethodConfigs.Play24(ssd.namespaces.base, None)).invoke()
}
}
object Play25 extends CodeGenerator {
override def invoke(form: InvocationForm) = {
val ssd = new ScalaService(form.service)
MockClientGenerator(form, ScalaClientMethodConfigs.Play25(ssd.namespaces.base, None)).invoke()
}
}
}
case class MockClientGenerator(
form: InvocationForm,
config: ScalaClientMethodConfig
) {
private[this] val ssd = new ScalaService(form.service)
private[this] val generator = ScalaClientMethodGenerator(config, ssd)
def invoke(): Either[Seq[String], Seq[File]] = {
val header = ApidocComments(form.service.version, form.userAgent).toJavaString() + "\n"
val code = generateCode(ssd)
Right(
Seq(
ServiceFileNames.toFile(
form.service.namespace,
form.service.organization.key,
form.service.application.key,
form.service.version,
"MockClient",
header ++ code,
Some("Scala")
)
)
)
}
private[this] def generateCode(ssd: ScalaService): String = {
Seq(
s"package ${ssd.namespaces.mock} {",
Seq(
ssd.resources match {
case Nil => None
case _ => Some(
Seq(
s"trait Client extends ${ssd.namespaces.interfaces}.Client {",
""" val baseUrl = "http://mock.localhost"""",
ssd.resources.map { resource =>
s"override def ${generator.methodName(resource)}: Mock${resource.plural} = Mock${resource.plural}Impl"
}.mkString("\n").indent(2),
"}",
ssd.resources.map { resource =>
generateMockResource(resource)
}.mkString("\n\n")
).mkString("\n\n")
)
},
Some(
Seq(
"object Factories {",
Seq(
"def randomString(): String = {",
""" "Test " + _root_.java.util.UUID.randomUUID.toString.replaceAll("-", " ")""",
"}"
).mkString("\n").indent(2),
Seq(
ssd.enums.map { makeEnum(_) },
ssd.models.map { makeModel(_) },
ssd.unions.map { makeUnion(_) }
).flatten.mkString("\n\n").indent(2),
"}"
).mkString("\n\n")
)
).flatten.mkString("\n\n").indent(2),
"}"
).mkString("\n\n")
}
private[this] def makeEnum(enum: ScalaEnum): String = {
val name = enum.values.headOption match {
case None => {
"""UNDEFINED("other")"""
}
case Some(value) => {
value.name
}
}
s"def make${enum.name}() = ${enum.qualifiedName}.$name"
}
private[this] def makeModel(model: ScalaModel): String = {
Seq(
s"def make${model.name}() = ${model.qualifiedName}(",
model.fields.map { field =>
s"${field.name} = ${mockValue(field.datatype)}"
}.mkString(",\n").indent(2),
")"
).mkString("\n")
}
private[this] def makeUnion(union: ScalaUnion): String = {
val typ = union.types.headOption.getOrElse {
sys.error("Union type[${union.qualifiedName}] does not have any times")
}
s"def make${union.name}() = ${mockValue(typ.datatype)}"
}
private[this] def generateMockResource(resource: ScalaResource): String = {
Seq(
s"object Mock${resource.plural}Impl extends Mock${resource.plural}",
s"trait Mock${resource.plural} extends ${ssd.namespaces.base}.${resource.plural} {",
generator.methods(resource).map { m =>
Seq(
m.interface + " = scala.concurrent.Future {",
mockImplementation(m).indent(2),
"}"
).mkString("\n")
}.mkString("\n\n").indent(2),
"}"
).mkString("\n\n")
}
private[this] def mockImplementation(cm: ScalaClientMethod): String = {
cm.operation.responses.find(_.isSuccess) match {
case None => {
"// No-op as there is no successful response defined"
}
case Some(r) => {
mockValue(ssd.scalaDatatype(r.`type`))
}
}
}
private[this] def mockValue(datatype: ScalaDatatype): String = {
datatype match {
case ScalaPrimitive.Boolean => "true"
case ScalaPrimitive.Double => "1.0"
case ScalaPrimitive.Integer => "1"
case ScalaPrimitive.Long => "1l"
case ScalaPrimitive.DateIso8601 => "new org.joda.time.LocalDate()"
case ScalaPrimitive.DateTimeIso8601 => "new org.joda.time.DateTime()"
case ScalaPrimitive.Decimal => """BigDecimal("1")"""
case ScalaPrimitive.Object => "play.api.libs.json.Json.obj()"
case ScalaPrimitive.String => "randomString()"
case ScalaPrimitive.Unit => "// unit type"
case ScalaPrimitive.Uuid => "java.util.UUID.randomUUID"
case ScalaDatatype.List(_) => "Nil"
case ScalaDatatype.Map(_) => "Map()"
case ScalaDatatype.Option(_) => "None"
case ScalaPrimitive.Enum(ns, name) => s"${ns.mock}.Factories.make$name()"
case ScalaPrimitive.Model(ns, name) => s"${ns.mock}.Factories.make$name()"
case ScalaPrimitive.Union(ns, name) => s"${ns.mock}.Factories.make$name()"
}
}
}
| Seanstoppable/apidoc-generator | scala-generator/src/main/scala/models/generator/mock/MockClientGenerator.scala | Scala | mit | 5,636 |
package scala.collection.parallel
import org.scalacheck._
import org.scalacheck.Gen
import org.scalacheck.Gen._
import org.scalacheck.Prop._
import org.scalacheck.Properties
import scala.collection._
import scala.collection.parallel._
abstract class ParallelSeqCheck[T](collName: String) extends ParallelIterableCheck[T](collName) with SeqOperators[T] {
type CollType <: collection.parallel.ParSeq[T]
def ofSize(vals: Seq[Gen[T]], sz: Int): Seq[T]
def fromSeq(s: Seq[T]): CollType
override def instances(vals: Seq[Gen[T]]): Gen[Seq[T]] = oneOf(
Gen.const(ofSize(vals, 1)),
sized(
sz =>
ofSize(vals, sz)
),
for (sz <- choose(1000, 2000)) yield ofSize(vals, sz)
)
def fromTraversable(t: Traversable[T]) = fromSeq(traversable2Seq(t))
def traversable2Seq(t: Traversable[T]): Seq[T] = {
if (t.isInstanceOf[Iterable[_]]) t.asInstanceOf[Iterable[T]].iterator.toList else t.toList
}
override def collectionPairs: Gen[(Seq[T], CollType)] = for (inst <- instances(values)) yield (inst, fromSeq(inst))
override def collectionPairsWithLengths: Gen[(Seq[T], CollType, Int)] =
for (inst <- instances(values); s <- choose(0, inst.size)) yield (inst, fromSeq(inst), s);
def collectionPairsWithModifiedWithLengths: Gen[(Seq[T], CollType, ParSeq[T], Int)] =
for (inst <- instances(values); s <- choose(0, inst.size);
updateStart <- choose(0, inst.size); howMany <- choose(0, inst.size)) yield {
val parcoll = fromSeq(inst)
val parcollmodif = fromSeq(modifySlightly(inst, updateStart, howMany))
(inst, parcoll, parcollmodif, s)
}
def collectionPairsWithModified: Gen[(Seq[T], CollType, ParSeq[T])] =
for (inst <- instances(values); updateStart <- choose(0, inst.size); howMany <- choose(0, inst.size)) yield {
val parcoll = fromSeq(inst)
val parcollmodif = fromSeq(modifySlightly(inst, updateStart, howMany))
(inst, parcoll, parcollmodif)
}
def collectionPairsWithSliced: Gen[(Seq[T], CollType, ParSeq[T])] =
for (inst <- instances(values); sliceStart <- choose(0, inst.size); howMany <- choose(0, inst.size)) yield {
val parcoll = fromSeq(inst)
val parcollsliced = fromSeq(inst.slice(sliceStart, sliceStart + howMany))
(inst, parcoll, parcollsliced)
}
def collectionTripletsWith2Indices: Gen[(Seq[T], CollType, Seq[T], Int, Int)] =
for (inst <- instances(values); f <- choose(0, inst.size); s <- choose(0, inst.size - f);
third <- instances(values); sliceStart <- choose(0, inst.size); howMany <- choose(0, inst.size)) yield {
(inst, fromSeq(inst), inst.slice(sliceStart, sliceStart + howMany), f, s)
}
private def modifySlightly(coll: Seq[T], updateStart: Int, howMany: Int) = {
coll.patch(updateStart, coll, howMany)
}
property("segmentLengths must be equal") = forAllNoShrink(collectionPairsWithLengths) { case (s, coll, len) =>
(for ((pred, ind) <- segmentLengthPredicates.zipWithIndex) yield {
val slen = s.segmentLength(pred, if (len < 0) 0 else len)
val clen = coll.segmentLength(pred, len)
if (slen != clen) {
println("from: " + s)
println("and: " + coll)
println(slen)
println(clen)
}
("operator " + ind) |: slen == clen
}).reduceLeft(_ && _)
}
property("prefixLengths must be equal") = forAllNoShrink(collectionPairs) { case (s, coll) =>
(for ((pred, ind) <- segmentLengthPredicates.zipWithIndex) yield {
("operator " + ind) |: s.prefixLength(pred) == coll.prefixLength(pred)
}).reduceLeft(_ && _)
}
property("indexWheres must be equal") = forAllNoShrink(collectionPairsWithLengths) { case (s, coll, len) =>
(for ((pred, ind) <- indexWherePredicates.zipWithIndex) yield {
val sind = s.indexWhere(pred, len)
val cind = coll.indexWhere(pred, len)
if (sind != cind) {
println("from: " + s)
println("and: " + coll)
println("at: " + len)
println(sind)
println(cind)
}
("operator " + ind) |: sind == cind
}).reduceLeft(_ && _)
}
property("lastIndexWheres must be equal") = forAllNoShrink(collectionPairsWithLengths) { case (s, coll, len) =>
(for ((pred, ind) <- lastIndexWherePredicates.zipWithIndex) yield {
val end = if (len >= s.size) s.size - 1 else len
val sind = s.lastIndexWhere(pred, end)
val cind = coll.lastIndexWhere(pred, end)
("operator " + ind) |: sind == cind
}).reduceLeft(_ && _)
}
property("reverses must be equal") = forAllNoShrink(collectionPairs) { case (s, coll) =>
(s.length == 0 && s.getClass == classOf[collection.immutable.Range]) ||
{
val sr = s.reverse
val cr = coll.reverse
if (sr != cr) {
println("from: " + s)
println("and: " + coll)
println(sr)
println(cr)
}
sr == cr
}
}
property("reverseMaps must be equal") = forAllNoShrink(collectionPairs) { case (s, coll) =>
(for ((f, ind) <- reverseMapFunctions.zipWithIndex) yield {
("operator " + ind) |: s.reverseMap(f) == coll.reverseMap(f)
}).reduceLeft(_ && _)
}
property("sameElements must be equal") = forAllNoShrink(collectionPairsWithModifiedWithLengths) {
case (s, coll, collmodif, len) =>
val pos = if (len < 0) 0 else len
val scm = s.sameElements(collmodif)
val ccm = coll.sameElements(collmodif)
if (scm != ccm) {
println("Comparing: " + s)
println("and: " + coll)
println("with: " + collmodif)
println(scm)
println(ccm)
}
("Nil" |: s.sameElements(Nil) == coll.sameElements(Nil)) &&
("toList" |: s.sameElements(s.toList) == coll.sameElements(coll.toList)) &&
("identity" |: s.sameElements(s.map(e => e)) == coll.sameElements(coll.map(e => e))) &&
("vice-versa" |: s.sameElements(coll) == coll.sameElements(s)) &&
("equal" |: s.sameElements(coll)) &&
("modified" |: scm == ccm) &&
(for ((it, ind) <- sameElementsSeqs.zipWithIndex) yield {
val sres = s.sameElements(it)
val pres = coll.sameElements(it)
if (sres != pres) {
println("Comparing: " + s)
println("and: " + coll)
println("with: " + it)
println(sres)
println(pres)
}
("collection " + ind) |: sres == pres
}).reduceLeft(_ && _)
}
property("startsWiths must be equal") = forAllNoShrink(collectionPairsWithModifiedWithLengths) {
case (s, coll, collmodif, len) =>
val pos = if (len < 0) 0 else len
("start with self" |: s.startsWith(s) == coll.startsWith(coll)) &&
("tails correspond" |: (s.length == 0 || s.startsWith(s.tail, 1) == coll.startsWith(coll.tail, 1))) &&
("with each other" |: coll.startsWith(s)) &&
("modified" |: s.startsWith(collmodif) == coll.startsWith(collmodif)) &&
("modified2" |: s.startsWith(collmodif, pos) == coll.startsWith(collmodif, pos)) &&
(for (sq <- startEndSeqs) yield {
val ss = s.startsWith(sq, pos)
val cs = coll.startsWith(fromSeq(sq), pos)
if (ss != cs) {
println("from: " + s)
println("and: " + coll)
println("test seq: " + sq)
println("from pos: " + pos)
println(ss)
println(cs)
println(coll.iterator.psplit(pos, coll.length - pos)(1).toList)
}
("seq " + sq) |: ss == cs
}).reduceLeft(_ && _)
}
property("endsWiths must be equal") = forAllNoShrink(collectionPairsWithModified) {
case (s, coll, collmodif) =>
("ends with self" |: s.endsWith(s) == coll.endsWith(s)) &&
("ends with tail" |: (s.length == 0 || s.endsWith(s.tail) == coll.endsWith(coll.tail))) &&
("with each other" |: coll.endsWith(s)) &&
("modified" |: s.startsWith(collmodif) == coll.endsWith(collmodif)) &&
(for (sq <- startEndSeqs) yield {
val sew = s.endsWith(sq)
val cew = coll.endsWith(fromSeq(sq))
if (sew != cew) {
println("from: " + s)
println("and: " + coll)
println(sew)
println(cew)
}
("seq " + sq) |: sew == cew
}).reduceLeft(_ && _)
}
property("unions must be equal") = forAllNoShrink(collectionPairsWithModified) { case (s, coll, collmodif) =>
("modified" |: s.union(collmodif.seq) == coll.union(collmodif)) &&
("empty" |: s.union(Nil) == coll.union(fromSeq(Nil)))
}
// This is failing with my views patch: array index out of bounds in the array iterator.
// Couldn't see why this and only this was impacted, could use a second pair of eyes.
//
// This was failing because some corner cases weren't added to the patch method in ParSeqLike.
// Curiously, this wasn't detected before.
//
if (!isCheckingViews) property("patches must be equal") = forAll(collectionTripletsWith2Indices) {
case (s, coll, pat, from, repl) =>
("with seq" |: s.patch(from, pat, repl) == coll.patch(from, pat, repl)) &&
("with par" |: s.patch(from, pat, repl) == coll.patch(from, fromSeq(pat), repl)) &&
("with empty" |: s.patch(from, Nil, repl) == coll.patch(from, fromSeq(Nil), repl)) &&
("with one" |: (s.length == 0 || s.patch(from, List(s(0)), 1) == coll.patch(from, fromSeq(List(coll(0))), 1)))
}
if (!isCheckingViews) property("updates must be equal") = forAllNoShrink(collectionPairsWithLengths) { case (s, coll, len) =>
val pos = if (len >= s.length) s.length - 1 else len
if (s.length > 0) {
val supd = s.updated(pos, s(0))
val cupd = coll.updated(pos, coll(0))
if (supd != cupd) {
println("from: " + s)
println("and: " + coll)
println(supd)
println(cupd)
}
"from first" |: (supd == cupd)
} else "trivially" |: true
}
property("prepends must be equal") = forAllNoShrink(collectionPairs) { case (s, coll) =>
s.length == 0 || s(0) +: s == coll(0) +: coll
}
property("appends must be equal") = forAllNoShrink(collectionPairs) { case (s, coll) =>
s.length == 0 || s :+ s(0) == coll :+ coll(0)
}
property("padTos must be equal") = forAllNoShrink(collectionPairsWithLengths) { case (s, coll, len) =>
val someValue = sampleValue
val sdoub = s.padTo(len * 2, someValue)
val cdoub = coll.padTo(len * 2, someValue)
if (sdoub != cdoub) {
println("from: " + s)
println("and: " + coll)
println(sdoub)
println(cdoub)
}
("smaller" |: s.padTo(len / 2, someValue) == coll.padTo(len / 2, someValue)) &&
("bigger" |: sdoub == cdoub)
}
property("corresponds must be equal") = forAllNoShrink(collectionPairsWithModified) { case (s, coll, modified) =>
val modifcut = modified.toSeq.slice(0, modified.length)
("self" |: s.corresponds(s)(_ == _) == coll.corresponds(coll)(_ == _)) &&
("modified" |: s.corresponds(modified.seq)(_ == _) == coll.corresponds(modified)(_ == _)) &&
("modified2" |: s.corresponds(modifcut)(_ == _) == coll.corresponds(modifcut)(_ == _))
}
}
| shimib/scala | test/scalacheck/scala/collection/parallel/ParallelSeqCheck.scala | Scala | bsd-3-clause | 10,905 |
package net.sansa_stack.rdf.spark.qualityassessment.metrics.conciseness
import org.apache.jena.graph.Triple
import org.apache.spark.rdd.RDD
/**
* @author Gezim Sejdiu
*/
object ExtensionalConciseness {
/**
* The extensional conciseness
* This metric metric checks for redundant resources in the assessed dataset,
* and thus measures the number of unique instances found in the dataset.
* @return No. of unique subjects / Total No. of subjects
*/
def assessExtensionalConciseness(dataset: RDD[Triple]): Double = {
val mapSubjects = dataset.map(_.getSubject)
val mapSubjectsWithPredicates = dataset.filter(triple => triple.getSubject.isURI() && triple.getPredicate.isURI())
.map(f => (f.getSubject, f.getPredicate))
.map((_, 1L))
.reduceByKey(_ + _)
.map { case ((k, v), cnt) => (k, (v, cnt)) }
.groupByKey()
val duplicateSubjects = dataset.filter(triple => triple.getSubject.isURI() && triple.getPredicate.isURI())
.map(f => (f.getSubject, f.getPredicate.getURI.toString() + " " + f.getObject.toString() + " "))
.map(f => (f._2, 1L))
.reduceByKey(_ + _)
.filter(_._2 > 1)
.values.sum()
// val duplicates = mapSubjectsWithPredicatesValue.map(x => (x._1, x._2.groupBy(_._1).map(y => (y._1, y._2.size))))
val totalSubjects = mapSubjects.count().toDouble
if (totalSubjects > 0) (totalSubjects - duplicateSubjects) / totalSubjects else 0
}
}
| SANSA-Stack/SANSA-RDF | sansa-rdf/sansa-rdf-spark/src/main/scala/net/sansa_stack/rdf/spark/qualityassessment/metrics/conciseness/ExtensionalConciseness.scala | Scala | apache-2.0 | 1,452 |
// Copyright 2014 Commonwealth Bank of Australia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package au.com.cba.omnia.maestro.schema
package taste
import scala.collection._
import au.com.cba.omnia.maestro.schema.pretty._
/** A SampleMap is used to build a historgram of the number of times we've seen
* each string in a set of strings. The number of strings we're prepared to
* track is limited to some fixed amount. */
case class Sample(
maxSize: Int, // Maximum size of the histogram.
spilled: Array[Int], // Count of strings that wouldn't fit in this histogram.
histogram: mutable.Map[String, Int]) // Histogram of times we've seen each string.
{
/** Pretty print a column sample as JSON lines. */
def toJson: JsonDoc =
JsonMap(List(
("maxSize", JsonNum(maxSize)),
("spilled", JsonNum(spilled(0))),
("histogram", toJsonHistogram)))
/** Pretty print the sample histogram.
* We print it sorted, so the most frequently occurring strings are listed
* first */
def toJsonHistogram: JsonDoc =
JsonMap(
histogram
.toList
.sortBy { _._2 }
.reverse
.map { case (k, v) => (k, JsonNum(v)) },
false)
}
object Sample {
/** Create a new, empty SampleMap */
def empty(maxSize: Int): Sample =
Sample(maxSize, Array(0), mutable.Map())
/** Accumulate a new string into a SampleMap */
def accumulate(smap: Sample, str: String): Unit = {
// If there is already an entry in the map for this string then
// we can increment that.
if (smap.histogram.isDefinedAt(str)) {
// assigning to m suppresses a Scala warning: "discarded non-Unit value"
// the map is mutable, so we don't need the new reference to it
val m = smap.histogram += ((str, smap.histogram(str) + 1))
}
// If there is still space in the map then add a new entry.
else if (smap.histogram.size < smap.maxSize - 1) {
// assigning to m suppresses a Scala warning: "discarded non-Unit value"
// the map is mutable, so we don't need the new reference to it
val m = smap.histogram += ((str, 1))
}
// Otherwise remember that we had to spill this string.
else
smap.spilled(0) += 1
}
/** Combine the information in two SampleMaps, to produce a new one. */
def combine(sm1: Sample, sm2: Sample): Sample = {
// Accumulate the result into this empty SampleMap.
val sm3: Sample
= empty(sm1.maxSize)
// Combine the histograms.
for ((str, count1) <- sm1.histogram) {
if (sm3.histogram.isDefinedAt(str))
sm3.histogram += ((str, sm3.histogram(str) + count1))
else sm3.histogram += ((str, count1))
}
for ((str, count2) <- sm1.histogram) {
if (sm3.histogram.isDefinedAt(str))
sm3.histogram += ((str, sm3.histogram(str) + count2))
else sm3.histogram += ((str, count2))
}
// Combine the spill counters.
sm3.spilled(0) = sm1.spilled(0) + sm2.spilled(0)
sm3
}
}
| CommBank/maestro | maestro-schema/src/main/scala/au/com/cba/omnia/maestro/schema/taste/Sample.scala | Scala | apache-2.0 | 3,558 |
/**
* This file is part of mycollab-web.
*
* mycollab-web is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* mycollab-web is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with mycollab-web. If not, see <http://www.gnu.org/licenses/>.
*/
package com.esofthead.mycollab.module.project.view.settings
import com.esofthead.mycollab.common.UrlTokenizer
import com.esofthead.mycollab.core.arguments.{StringSearchField, NumberSearchField}
import com.esofthead.mycollab.eventmanager.EventBusFactory
import com.esofthead.mycollab.module.project.ProjectMemberStatusConstants
import com.esofthead.mycollab.module.project.domain.criteria.ProjectMemberSearchCriteria
import com.esofthead.mycollab.module.project.events.ProjectEvent
import com.esofthead.mycollab.module.project.service.ProjectMemberService
import com.esofthead.mycollab.module.project.view.ProjectUrlResolver
import com.esofthead.mycollab.module.project.view.parameters.{ProjectMemberScreenData, ProjectScreenData}
import com.esofthead.mycollab.spring.ApplicationContextUtil
import com.esofthead.mycollab.vaadin.AppContext
import com.esofthead.mycollab.vaadin.mvp.PageActionChain
/**
* @author MyCollab Ltd
* @since 5.0.9
*/
class UserUrlResolver extends ProjectUrlResolver {
this.addSubResolver("list", new ListUrlResolver)
this.addSubResolver("preview", new PreviewUrlResolver)
this.addSubResolver("add", new AddUrlResolver)
this.addSubResolver("edit", new EditUrlResolver)
private class ListUrlResolver extends ProjectUrlResolver {
protected override def handlePage(params: String*) {
val projectId: Integer = new UrlTokenizer(params(0)).getInt
val memberSearchCriteria: ProjectMemberSearchCriteria = new ProjectMemberSearchCriteria
memberSearchCriteria.setProjectId(new NumberSearchField(projectId))
memberSearchCriteria.setStatus(new StringSearchField(ProjectMemberStatusConstants.ACTIVE))
val chain: PageActionChain = new PageActionChain(new ProjectScreenData.Goto(projectId), new ProjectMemberScreenData.Search(memberSearchCriteria))
EventBusFactory.getInstance.post(new ProjectEvent.GotoMyProject(this, chain))
}
}
private class PreviewUrlResolver extends ProjectUrlResolver {
protected override def handlePage(params: String*) {
val token: UrlTokenizer = new UrlTokenizer(params(0))
val projectId: Integer = token.getInt
val memberName: String = token.getString
val chain: PageActionChain = new PageActionChain(new ProjectScreenData.Goto(projectId),
new ProjectMemberScreenData.Read(memberName))
EventBusFactory.getInstance.post(new ProjectEvent.GotoMyProject(this, chain))
}
}
private class AddUrlResolver extends ProjectUrlResolver {
protected override def handlePage(params: String*) {
val token: UrlTokenizer = new UrlTokenizer(params(0))
val projectId: Integer = token.getInt
val chain: PageActionChain = new PageActionChain(new ProjectScreenData.Goto(projectId),
new ProjectMemberScreenData.InviteProjectMembers)
EventBusFactory.getInstance.post(new ProjectEvent.GotoMyProject(this, chain))
}
}
private class EditUrlResolver extends ProjectUrlResolver {
protected override def handlePage(params: String*) {
val token: UrlTokenizer = new UrlTokenizer(params(0))
val projectId: Integer = token.getInt
val memberId: Integer = token.getInt
val projectMemberService = ApplicationContextUtil.getSpringBean(classOf[ProjectMemberService])
val member = projectMemberService.findById(memberId, AppContext.getAccountId)
val chain: PageActionChain = new PageActionChain(new ProjectScreenData.Goto(projectId),
new ProjectMemberScreenData.Add(member))
EventBusFactory.getInstance.post(new ProjectEvent.GotoMyProject(this, chain))
}
}
}
| maduhu/mycollab | mycollab-web/src/main/scala/com/esofthead/mycollab/module/project/view/settings/UserUrlResolver.scala | Scala | agpl-3.0 | 4,480 |
package boilerplate
import jsactor.bridge.client.SocketManager
import jsactor.bridge.client.util.RemoteActorListener
import jsactor.logging.impl.JsPrintlnActorLoggerFactory
import jsactor.{JsActorRef, JsActorSystem}
import org.scalajs.dom
import scala.scalajs.js.Dynamic
/**
* Created by Milan Satala
* Date: 7.6.2015
* Time: 19:57
*/
class ProxyActor(remoteActorPath: String, val wsManager: JsActorRef) extends RemoteActorListener {
override def actorPath: String = remoteActorPath
override def onConnect(serverActor: JsActorRef): Unit = {}
override def whenConnected(serverActor: JsActorRef): Receive = {
case msg =>
if (sender() == context.parent) {
serverActor ! msg
} else {
context.parent ! msg
}
}
}
object WebsocketJsActors {
val actorSystem = JsActorSystem("WunderxClient", JsPrintlnActorLoggerFactory)
val wsManager = {
val webSocketUrl = dom.window.asInstanceOf[Dynamic].webSocketUrl.asInstanceOf[String]
implicit val protocol = WunderxProtocol
actorSystem.actorOf(SocketManager.props(SocketManager.Config(webSocketUrl)), "socketManager")
}
}
| msatala/wunderx | wunderx-client/src/main/scala/boilerplate/WebsocketJsActors.scala | Scala | mit | 1,131 |
package com.cave.metrics.data.kinesis
import java.util.{List => JList}
import com.amazonaws.services.kinesis.clientlibrary.interfaces.{IRecordProcessor, IRecordProcessorCheckpointer}
import com.amazonaws.services.kinesis.clientlibrary.types.ShutdownReason
import com.amazonaws.services.kinesis.model.Record
import com.cave.metrics.data._
import org.apache.commons.logging.LogFactory
import play.api.libs.json.Json
import scala.collection.JavaConverters._
import scala.util.{Success, Try}
class RecordProcessor(config: AwsConfig, sink: DataSink) extends IRecordProcessor with ExponentialBackOff {
private[this] var shardId: String = _
private var nextCheckpointTimeMillis: Long = _
private[this] val log = LogFactory.getLog(classOf[RecordProcessor])
// Back off and retry settings for checkpoint
override val MaxBackOffTimeInMillis = 10000L
override val ShouldLogErrors: Boolean = true
private val NumRetries = 10
private val CheckpointIntervalInMillis = 1000L
override def initialize(shardId: String): Unit = {
this.shardId = shardId
}
override def shutdown(check: IRecordProcessorCheckpointer, reason: ShutdownReason): Unit = {
if (reason == ShutdownReason.TERMINATE) {
checkpoint(check)
}
}
override def processRecords(records: JList[Record], check: IRecordProcessorCheckpointer): Unit = {
val metrics = (records.asScala map convert).filter(_.isSuccess)
if (metrics.size == records.size()) {
// all metrics successfully converted
log.info(s"Received $metrics")
sink.sendMetrics(for (Success(metric) <- metrics) yield metric)
} else {
log.error("Failed to parse records into Metric objects.")
}
if (System.currentTimeMillis() > nextCheckpointTimeMillis) {
checkpoint(check)
nextCheckpointTimeMillis = System.currentTimeMillis() + CheckpointIntervalInMillis
}
}
private[this] def convert(record: Record): Try[Metric] =
Try (Json.parse(new String(record.getData.array())).as[Metric])
private[this] def checkpoint(check: IRecordProcessorCheckpointer): Unit = {
Try {
retryUpTo(NumRetries) {
check.checkpoint()
}
} recover {
case e: Exception =>
log.warn(s"Failed to checkpoint shard $shardId: ${e.getMessage}")
}
}
} | gilt/cave | core/src/main/scala/com/cave/metrics/data/kinesis/RecordProcessor.scala | Scala | mit | 2,289 |
package hello
import org.springframework.context.annotation.{Configuration, ComponentScan}
import org.springframework.boot.autoconfigure.EnableAutoConfiguration
import org.springframework.boot;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.stereotype.Controller;
import org.springframework.web.bind.annotation.RequestMethod;
/**
* This config class will trigger Spring @annotation scanning and auto configure Spring context.
*
* @author Jude
* @since 1.0
*/
@Controller
@Configuration
@EnableAutoConfiguration
@ComponentScan
class HelloConfig {
@RequestMapping(value=Array("/"),method=Array(RequestMethod.GET))
//@RequestMapping(Array("/"))
@ResponseBody
def home(): String = "Hello World!"
}
| JudeMartin/CMPE-273-Lab-1 | src/main/scala/hello/HelloConfig.scala | Scala | mit | 813 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.filterexpr
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.spark.sql.test.util.QueryTest
/**
* Test cases for testing columns having \\N or \\null values for non numeric columns
*/
class TestGrtLessFilter extends QueryTest with BeforeAndAfterAll {
override def beforeAll {
sql("drop table if exists carbonTable")
sql("drop table if exists hiveTable")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT
)
val csvFilePath = s"$resourcesPath/filter/datagrtlrt.csv"
sql(
"CREATE TABLE IF NOT EXISTS carbonTable(date Timestamp, country String, salary Int) STORED " +
"BY " +
"'carbondata'"
)
sql(
"create table if not exists hiveTable(date Timestamp, country String, salary Int)row format" +
" delimited fields " +
"terminated by ','"
)
sql(
"LOAD DATA LOCAL INPATH '" + csvFilePath + "' into table carbonTable OPTIONS " +
"('FILEHEADER'='date,country,salary')"
)
sql(
"LOAD DATA local inpath '" + csvFilePath + "' INTO table hiveTable"
)
}
test("select * from carbonTable where date > cast('2017-7-25 12:07:29' as timestamp)") {
checkAnswer(
sql("select * from carbonTable where date > cast('2017-7-25 12:07:29' as timestamp)"),
sql("select * from hiveTable where date > cast('2017-7-25 12:07:29' as timestamp)")
)
}
test("select * from carbonTable where date < cast('2017-7-25 12:07:29' as timestamp)") {
checkAnswer(
sql("select * from carbonTable where date < cast('2017-7-25 12:07:29' as timestamp)"),
sql("select * from hiveTable where date < cast('2017-7-25 12:07:29' as timestamp)")
)
}
test("select * from carbonTable where date > cast('2018-7-24 12:07:28' as timestamp)") {
checkAnswer(
sql("select * from carbonTable where date > cast('2018-7-24 12:07:28' as timestamp)"),
sql("select * from hiveTable where date > cast('2018-7-24 12:07:28' as timestamp)")
)
}
test("select * from carbonTable where date < cast('2018-7-24 12:07:28' as timestamp)") {
checkAnswer(
sql("select * from carbonTable where date < cast('2018-7-24 12:07:28' as timestamp)"),
sql("select * from hiveTable where date < cast('2018-7-24 12:07:28' as timestamp)")
)
}
override def afterAll {
sql("drop table if exists carbonTable")
sql("drop table if exists hiveTable")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "dd-MM-yyyy")
}
}
| sgururajshetty/carbondata | integration/spark-common-test/src/test/scala/org/apache/carbondata/spark/testsuite/filterexpr/TestGrtLessFilter.scala | Scala | apache-2.0 | 3,578 |
package common
import java.io._
import java.security.MessageDigest
import java.text.SimpleDateFormat
import java.util
import java.util.concurrent.Executors
import java.util.zip.{GZIPInputStream, GZIPOutputStream}
import java.util.{Date, TimeZone, UUID}
import com.fasterxml.jackson.databind.{DeserializationFeature, ObjectMapper, SerializationFeature}
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import com.fasterxml.jackson.module.scala.experimental.ScalaObjectMapper
import common.JedisCacheKeys._
import scala.Some
import scala.collection.mutable.{HashMap, ListBuffer}
import scala.collection.parallel.{ForkJoinTaskSupport, ParIterable}
import scala.collection.{Parallelizable, mutable}
import scala.concurrent._
import scala.concurrent.forkjoin.ForkJoinPool
import scala.util.Random
/**
* Created by 林 on 14-4-3.
*/
object Tool {
private val settingCache = new mutable.HashMap[String, String]()
private val chars: Array[Char] = "0123456789ABCDEF".toCharArray
private val settingObjectCache = new util.Hashtable[String, AnyRef]()
private val AES_DEFAULT_KEY = "#$#$#^T#$45rw3d4g$%^"
private val map = new ObjectMapper() with ScalaObjectMapper
map.registerModule(DefaultScalaModule)
map.configure(SerializationFeature.FAIL_ON_EMPTY_BEANS, false)
map.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false)
map.setTimeZone(TimeZone.getTimeZone("GMT+8"))
map.setDateFormat(new SimpleDateFormat("yyyy-MM-dd"))
val pool = Executors.newFixedThreadPool(100)
implicit val ec: ExecutionContext = ExecutionContext.fromExecutor(pool)
/*
* 霸气侧漏吊炸天的东西,把集合方法直接变成多线程执行
*/
implicit class ParToMutile[+A](parable: Parallelizable[A, ParIterable[A]]) {
def mutile(thread: Int = -1) = {
if (thread == -1) {
parable.par
} else {
val resutl = parable.par
resutl.tasksupport = new ForkJoinTaskSupport(new ForkJoinPool(thread))
resutl
}
}
}
implicit class AnyRefAddMethod[A <: AnyRef](bean: A) {
def toJson(): String = {
map.writeValueAsString(bean)
}
def toBean(json: String): A = {
Tool.toBean(json, bean.getClass)
}
def checkEmpty(): A = {
bean match {
case _: String => if (bean.asInstanceOf[String].trim.isEmpty) new EmptyFieldExcepiton()
case _: List[_] => if (bean.asInstanceOf[List[AnyRef]].isEmpty) new EmptyFieldExcepiton()
case _: Map[_, _] => if (bean.asInstanceOf[Map[AnyRef, AnyRef]].isEmpty) new EmptyFieldExcepiton()
case _: Array[_] => if (bean.asInstanceOf[Array[AnyRef]].length == 0) new EmptyFieldExcepiton()
case _ => if (bean == null) new EmptyFieldExcepiton()
}
bean
}
}
def toBean[T](json: String, clazz: Class[T]): T = {
map.readValue(json, clazz).asInstanceOf[T]
}
def isAESData(s: String) = {
s.length % 32 == 0 && s.matches("[0-9a-fA-F]+")
}
def hex2bytes(hex: String): Array[Byte] = {
hex.replaceAll("[^0-9A-Fa-f]", "").sliding(2, 2).toArray.map(Integer.parseInt(_, 16).toByte)
}
def bytes2hex(bytes: Array[Byte], sep: Option[String] = None): String = {
sep match {
case None => bytes.map("%02x".format(_)).mkString
case _ => bytes.map("%02x".format(_)).mkString(sep.get)
}
}
/**
* md5加密.
*
* @param str
* the str
* @return string
* @throws Exception
* the exception
*/
def md5(str: String): String = {
val md5: MessageDigest = MessageDigest.getInstance("MD5")
val sb: StringBuilder = new StringBuilder
for (b <- md5.digest(str.getBytes("utf-8"))) {
sb.append(str2HexStr(b))
}
return sb.toString
}
/**
* Str to hex str.
*
* @param b the b
* @return the string
* @author 黄林
*/
def str2HexStr(b: Byte): String = {
val r: Array[Char] = new Array[Char](2)
var bit: Int = (b & 0x0f0) >> 4
r(0) = chars(bit)
bit = b & 0x0f
r(1) = chars(bit)
val str: String = new String(r)
return str
}
//带重试的区段
def reTry(count: Int = 5)(f: => Unit) {
var retryCount = 0
while (retryCount <= count) {
try {
f
retryCount = count + 1
} catch {
case e: Throwable =>
retryCount += 1
Thread.sleep(100*retryCount)
if (retryCount > count) {
e.printStackTrace()
throw e
}
}
}
}
def safe[T](f: => T) = {
try {
f
} catch {
case e: Throwable =>
println("error")
e.printStackTrace()
null.asInstanceOf[T]
}
}
/**
* 获取表达式的内容
*
* @param context
* 内容
* @param separate
* 表达式分隔
* @return the expr
*/
def getExpr(context: String, separate: String): Option[String] = {
getExpr(context, separate, separate, null)
}
/**
* 获取表达式的内容.
*
* @param context
* 内容
* @param startseparate
* 表达式分隔开始
* @param endSeparate
* 表达式分隔结束
* @param includeKey
* 表达式关键字
* @return the expr
*/
def getExpr(context: String, startseparate: String,
endSeparate: String, includeKey: String): Option[String] = {
if (null == context || context.isEmpty || context.indexOf(startseparate) == -1) {
None
} else {
val start = context.indexOf(startseparate)
val end = context.indexOf(endSeparate, start + 1)
val result = context.substring(start, end + 1)
if (null != includeKey && result.indexOf(includeKey) == -1) {
getExpr(context.drop(end), startseparate, endSeparate, includeKey)
} else
Some(result)
}
}
// def getSettingMap(): Map[String, String] = {
// //这里取数据有一秒缓存延迟
// Cache.getCache(SETTING_VALUE_CACHE_KEY).get.asInstanceOf[Map[String, String]]
// }
def getSettingCacheObject[T](keys: String*)(f: Array[String] => T): T = {
val setting = getSettingMap()
val key = keys.mkString
if (settingObjectCache.contains(key + "_values") && settingObjectCache.contains(key)) {
val values = settingObjectCache.get(key + "_values").asInstanceOf[Map[String, String]]
val hasNoChange = keys map (k => setting.get(k).equals(values(k))) reduceLeft (_ && _)
if (hasNoChange) {
return settingObjectCache.get(key).asInstanceOf[T]
}
}
val lists = keys map (setting.get(_).get)
settingObjectCache.put(key + "_values", (keys zip lists toMap))
f(lists toArray)
}
def cacheMethodString[T<:AnyRef](key: String, time: Int)(f: => T):T = {
val value=Cache.getCache(key)
if (value.isDefined) {
value.get.asInstanceOf[T]
} else {
val v = f
Cache.setCache(key, v, time)
v
}
}
def cacheOTS[T<:AnyRef](key: String)(f: => T):T = {
val value=OtsCache.getCache[T](key)
if (value.isDefined) {
value.get.asInstanceOf[T]
} else {
val v = f
OtsCache.setCache(key, v)
v
}
}
//后台执行
def run[T](body: => T) = Future[T](body)
implicit class StringAddMethod[A <: String](bean: A) {
def encrypt(): String = {
AESCoder.encrypt(bean, AES_DEFAULT_KEY)
}
def decrypt(): String = {
AESCoder.decrypt(bean, AES_DEFAULT_KEY)
}
def md5(): String = Tool.md5(bean)
def isPhone: Boolean = """^1\\d{10}$""".r.pattern.matcher(bean).matches()
def isNumber: Boolean = """^\\d+$""".r.pattern.matcher(bean).matches()
def toBigDecimal = if (isEmpty(bean)) null else BigDecimal(bean)
def safeInt(v:Int= -1) = if (isEmpty(bean)) v else bean.toInt
def safeInt:Int=safeInt(-1)
def safeDouble = if (isEmpty(bean)) -1d else bean.toDouble
def safeLong = if (isEmpty(bean)) -1l else bean.toLong
def toIntList(split:String) = StrtoList[Int](bean, split, _.toInt)
def toIntList = StrtoList[Int](bean, ",", _.toInt)
def toLongList = StrtoList[Long](bean, ",", _.toLong)
def toDoubleList = StrtoList[Double](bean, ",", _.toDouble)
def toDate = if (null == bean || bean.isEmpty) null else TimeTool.parseStringToDate(bean)
def toDateTime = if (null == bean || bean.isEmpty) null else TimeTool.parseStringToDateTime(bean)
def jsonToMap=Tool.toBean(bean,Map.empty[Any,Any].getClass)
def jsonToHashMap=Tool.toBean(bean,HashMap[String, Any]().getClass)
def dateExp=if (null == bean || bean.isEmpty) new Date() else TimeTool.getAfterDate(bean)
}
implicit class DateAddMethod[A <: Date](bean: A) {
//yy-mm-dd
def sdate = if (bean == null) "" else TimeTool.getDateStringByDate(bean)
//yy-mm-dd
def sdatetime = if (bean == null) "" else TimeTool.getFormatStringByDate(bean)
def toSqlDate=if (null == bean) null else new java.sql.Date(bean.getTime)
}
implicit class NumberAddMethod[A <: BigDecimal](bean: A) {
def toMoney(): BigDecimal = {
bean.setScale(2, BigDecimal.RoundingMode.HALF_UP)
}
}
implicit class IntegerAddMethod[A <: Int](bean: A) {
def checkStr = if (isEmpty(bean)) "" else bean.toString
}
//数字自动转字符串 (老子受够了到处写toString)
implicit def intToString(i: Int): String = i.toString
def isEmpty(str: String) = {
(null == str || str.isEmpty)
}
def isEmpty(bean: Any): Boolean = {
bean match {
case s: String => isEmpty(bean.asInstanceOf[String])
case i: Int => bean.asInstanceOf[Int] == -1
case d: Double => bean.asInstanceOf[Double] == -1
case b: BigDecimal => b == null || b.asInstanceOf[BigDecimal] == -1
case a: Traversable[_] => a == null || a.asInstanceOf[Traversable[AnyRef]].isEmpty
case _ => bean == null
}
}
def StrtoList[T](bean: String, split: String, fun: String => T): List[T] = {
if (isEmpty(bean)) Nil else bean.split(split).map(fun(_)).toList
}
def randomStr(len: Int) = {
val randomValue = randomChars + randomNums
0 to (len - 1) map (v => randomValue(Random.nextInt(randomValue.length))) mkString
}
private val randomChars = "abcdefghjkmnpqrstvwxyABCDEFGHJKLMNPQRSTVWXY2346789"
private val randomNums = "2346789"
def gzip(data:Array[Byte])={
val bos = new ByteArrayOutputStream()
val gzip = new GZIPOutputStream(bos)
gzip.write(data)
gzip.finish()
gzip.close()
val gdata = bos.toByteArray()
bos.close()
gdata
}
def ungzip(gdata:Array[Byte])={
val bis = new ByteArrayInputStream(gdata)
val gzip = new GZIPInputStream(bis)
val buf = new Array[Byte](1024)
var num = -1
val baos = new ByteArrayOutputStream()
num = gzip.read(buf, 0, buf.length)
while (num != -1) {
baos.write(buf, 0, num)
num = gzip.read(buf, 0, buf.length)
}
val data = baos.toByteArray()
baos.flush()
baos.close()
gzip.close()
bis.close()
data
}
implicit class DataAddMethod[A <: Array[Byte]](data: A) {
def gzip = Tool.gzip(data)
def ungzip=Tool.ungzip(data)
}
implicit class ListAddMethod[A <: Any,B <:Any](list: Seq[Tuple2[A,B]]) {
def toHashMap={
val map=new mutable.HashMap[A,ListBuffer[B]]()
list.foreach{kv=>
val (k,v)=kv.asInstanceOf[Tuple2[A,B]]
if(!map.contains(k)){
map+=((k,new ListBuffer()))
}
map(k).append(v)
}
map
}
}
def uuid=UUID.randomUUID().toString.replace("-","")
def Stream2Byte(is: InputStream)={
val baos=new ByteArrayOutputStream
var b = is.read()
while (b != -1) {
baos.write(b)
b = is.read()
}
baos.toByteArray
}
def File2Byte(file: File):Array[Byte]={
Stream2Byte(new FileInputStream(file))
}
def getSettingMap(): Map[String, String] = {
settingCache.toMap[String, String]
}
def setSetting(data: Map[String, String]) = {
settingCache.clear()
data.foreach(v => settingCache.put(v._1, v._2))
}
}
| livehl/paipai | src/main/scala/common/Tool.scala | Scala | apache-2.0 | 11,915 |
package com.github.libsml.aggregation.evaluation
/**
* Created by huangyu on 15/9/9.
*/
object Evaluation {
}
| libsml/libsml | aggregation/src/main/scala/com/github/libsml/aggregation/evaluation/Evaluation.scala | Scala | apache-2.0 | 116 |
package co.blocke.scalajack
package json.mapkeys
import co.blocke.scala_reflection._
import TestUtil._
import munit._
import munit.internal.console
import co.blocke.scalajack.json.JSON
class ListCollKeys() extends FunSuite:
val sj = co.blocke.scalajack.ScalaJack()
test("List as key") {
describe(
"------------------------\\n: List Map Key Tests :\\n------------------------", Console.BLUE
)
val l1 = List(1, 2, 3)
val l2 = List(4, 5, 6)
val inst = Map(l1 -> l2)
val js = sj.render(inst)
assertEquals("""{"[1,2,3]":[4,5,6]}""".asInstanceOf[JSON],js)
assertEquals(inst, sj.read[Map[List[Int], List[Int]]](js))
}
test("List of Lists as key") {
val l1 = List(List(1, 2, 3), List(9, 8, 7))
val l2 = List(List(4, 5, 6), List(1, 3, 5))
val inst = Map(l1 -> l2)
val js = sj.render(inst)
assertEquals("""{"[[1,2,3],[9,8,7]]":[[4,5,6],[1,3,5]]}""".asInstanceOf[JSON],js)
assertEquals(inst, sj.read[Map[List[List[Int]], List[List[Int]]]](js))
}
test("List of Tuples as key") {
val l1: List[(String, String)] = List(("A", "a"), ("B", "b"), (null, "c"))
val l2: List[(String, String)] = List(("X", "x"), ("Y", "y"), (null, "z"))
val inst = Map(l1 -> l2)
val js = sj.render(inst)
assertEquals(
"""{"[[\\"A\\",\\"a\\"],[\\"B\\",\\"b\\"],[null,\\"c\\"]]":[["X","x"],["Y","y"],[null,"z"]]}""".asInstanceOf[JSON],js)
assertEquals(inst, sj.read[Map[List[(String, String)], List[(String, String)]]](js))
}
test("List of Maps as key") {
val l1 = List(Map("wow" -> true), Map("ya" -> false))
val l2 = List(Map("zing" -> false), Map("bling" -> true))
val inst = Map(l1 -> l2)
val js = sj.render(inst)
assertEquals(
"""{"[{\\"wow\\":true},{\\"ya\\":false}]":[{"zing":false},{"bling":true}]}""".asInstanceOf[JSON],js)
assertEquals(inst, sj.read[Map[List[Map[String, Boolean]], List[Map[String, Boolean]]]](js))
}
test("List of Case Class as key") {
val fish = FishPet("Flipper", Food.Meat, 68.9)
val inst = Map(List(fish, fish) -> List(fish, fish))
val js = sj.render(inst)
assertEquals(
"""{"[{\\"name\\":\\"Flipper\\",\\"food\\":\\"Meat\\",\\"waterTemp\\":68.9},{\\"name\\":\\"Flipper\\",\\"food\\":\\"Meat\\",\\"waterTemp\\":68.9}]":[{"name":"Flipper","food":"Meat","waterTemp":68.9},{"name":"Flipper","food":"Meat","waterTemp":68.9}]}""".asInstanceOf[JSON],js)
assertEquals(inst, sj.read[Map[List[FishPet], List[FishPet]]](js))
}
test("List of Trait as key") {
val fish: Pet = FishPet("Flipper", Food.Meat, 68.9)
val inst = Map(List(fish, fish) -> List(fish, fish))
val js = sj.render(inst)
assertEquals(
"""{"[{\\"_hint\\":\\"co.blocke.scalajack.json.mapkeys.FishPet\\",\\"name\\":\\"Flipper\\",\\"food\\":\\"Meat\\",\\"waterTemp\\":68.9},{\\"_hint\\":\\"co.blocke.scalajack.json.mapkeys.FishPet\\",\\"name\\":\\"Flipper\\",\\"food\\":\\"Meat\\",\\"waterTemp\\":68.9}]":[{"_hint":"co.blocke.scalajack.json.mapkeys.FishPet","name":"Flipper","food":"Meat","waterTemp":68.9},{"_hint":"co.blocke.scalajack.json.mapkeys.FishPet","name":"Flipper","food":"Meat","waterTemp":68.9}]}""".asInstanceOf[JSON],js)
assertEquals(inst, sj.read[Map[List[Pet], List[Pet]]](js))
}
test("List of Any as key") {
val inst: Map[List[Any], List[Any]] =
Map(List(23L, "wow", true) -> List(12.2, 0))
val js = sj.render(inst)
assertEquals("""{"[23,\\"wow\\",true]":[12.2,0.0]}""".asInstanceOf[JSON],js)
assertEquals(true, sj.read[Map[List[Any], List[Any]]](js).isInstanceOf[Map[List[Any], List[Any]]])
}
test("List of parameterized class as key") {
val inst = Map(
List(AThing(true, "True"), AThing(false, "False")) -> List(
AThing(true, "Yes"),
AThing(false, "No")
)
)
val js = sj.render(inst)
assertEquals(
"""{"[{\\"a\\":true,\\"b\\":\\"True\\"},{\\"a\\":false,\\"b\\":\\"False\\"}]":[{"a":true,"b":"Yes"},{"a":false,"b":"No"}]}""".asInstanceOf[JSON],js)
assertEquals(true, sj.read[Map[List[AThing[String, Boolean]], List[AThing[String, Boolean]]]](js)
.isInstanceOf[Map[List[AThing[String, Boolean]], List[AThing[String, Boolean]]]])
}
test("List of parameterized trait as key") {
val inst: Map[List[Thing[Boolean, String]], List[Thing[Boolean, String]]] =
Map(
List(AThing(true, "True"), AThing(false, "False")) -> List(
AThing(true, "Yes"),
AThing(false, "No")
)
)
val js = sj.render(inst)
assertEquals(
"""{"[{\\"_hint\\":\\"co.blocke.scalajack.json.mapkeys.AThing\\",\\"a\\":true,\\"b\\":\\"True\\"},{\\"_hint\\":\\"co.blocke.scalajack.json.mapkeys.AThing\\",\\"a\\":false,\\"b\\":\\"False\\"}]":[{"_hint":"co.blocke.scalajack.json.mapkeys.AThing","a":true,"b":"Yes"},{"_hint":"co.blocke.scalajack.json.mapkeys.AThing","a":false,"b":"No"}]}""".asInstanceOf[JSON],js)
assertEquals(true, sj.read[Map[List[Thing[Boolean, String]], List[Thing[Boolean, String]]]](js)
.isInstanceOf[Map[List[Thing[Boolean, String]], List[Thing[Boolean, String]]]])
}
test("List of Optional as key") {
val inst: Map[List[Option[String]], List[Option[String]]] =
Map(List(Some("hey"), Some("you")) -> List(Some("stop"), Some("go")))
val js = sj.render(inst)
assertEquals("""{"[\\"hey\\",\\"you\\"]":["stop","go"]}""".asInstanceOf[JSON],js)
assertEquals(inst, sj.read[Map[List[Option[String]], List[Option[String]]]](js))
}
test("List of ValueClass as key") {
val inst =
Map(List(VCChar('A'), VCChar('a')) -> List(VCChar('B'), VCChar('b')))
val js = sj.render(inst)
assertEquals("""{"[\\"A\\",\\"a\\"]":["B","b"]}""".asInstanceOf[JSON],js)
assertEquals(inst, sj.read[Map[List[VCChar], List[VCChar]]](js))
}
| gzoller/ScalaJack | core/src/test/scala/co.blocke.scalajack/json/mapkeys/ListCollKeys.scala | Scala | mit | 5,686 |
package util
/**
* An object containing the Functor typeclass
*/
object Functors {
type Id[+T] = T
/**
* F has a map on it
*/
abstract class Functor[F[_]] {
def map[T, U](f: T => U): F[T] => F[U]
}
implicit object IdFunctor extends Functor[Id] {
def map[T, U](f: T => U) = (x: T) => f(x)
}
/**
* inspired from
* https://www.safaribooksonline.com/blog/2013/05/28/scala-type-classes-demystified/
*/
implicit class FOps[F[_]: Functor, T](t: F[T]) {
val witness = implicitly[Functor[F]]
def map[U](f: T => U): F[U] = witness.map[T, U](f)(t)
}
}
| manojo/parsequery | macros/src/main/scala/util/Functor.scala | Scala | mit | 596 |
package com.karasiq.shadowcloud.metadata.tika
import java.io.File
import akka.stream.scaladsl.{FileIO, Keep}
import akka.stream.testkit.scaladsl.TestSink
import akka.util.ByteString
import com.karasiq.shadowcloud.metadata.Metadata
import com.karasiq.shadowcloud.test.utils.{ActorSpec, ActorSpecImplicits}
import org.apache.commons.io.FileUtils
import org.apache.tika.Tika
import org.scalatest.{FlatSpecLike, SequentialNestedSuiteExecution}
import scala.concurrent.duration._
class TikaMetadataProviderTest extends ActorSpec with ActorSpecImplicits with FlatSpecLike with SequentialNestedSuiteExecution {
val testPdfName = "TypeClasses.pdf"
val testPdfFile = new File(getClass.getClassLoader.getResource(testPdfName).toURI)
val testPdfBytes = ByteString.fromArrayUnsafe(FileUtils.readFileToByteArray(testPdfFile))
val tika = new Tika()
val detector = TikaMimeDetector(tika)
"Mime detector" should "detect PDF" in {
detector.getMimeType("TypeClasses.pdf", testPdfBytes) shouldBe Some("application/pdf")
}
val autoParserConfig = system.settings.config.getConfig("shadowcloud.metadata.tika.auto-parser")
val autoParser = TikaAutoParser(tika, autoParserConfig)
"Parser" should "extract text" in {
val stream = FileIO.fromPath(testPdfFile.toPath)
.via(autoParser.parseMetadata(testPdfName, "application/pdf"))
.toMat(TestSink.probe)(Keep.right)
.run()
val metaTable = stream.requestNext(1 minute)
metaTable.tag shouldBe Some(Metadata.Tag("tika", "auto", Metadata.Tag.Disposition.METADATA))
println(metaTable)
assert(metaTable.value.table.exists(_.values("dcterms:created").values == Seq("2010-07-26T09:01:12Z")))
val textPreview = stream.requestNext(1 minute)
textPreview.tag shouldBe Some(Metadata.Tag("tika", "auto", Metadata.Tag.Disposition.PREVIEW))
textPreview.value.text.exists(t ⇒ t.format == "text/plain" && t.data.contains("Type Classes as Objects and Implicits")) shouldBe true
val text = stream.requestNext(1 minute)
text.tag shouldBe Some(Metadata.Tag("tika", "auto", Metadata.Tag.Disposition.CONTENT))
text.value.text.exists(t ⇒ t.format == "text/plain" && t.data.contains("Type Classes as Objects and Implicits")) shouldBe true
val xml = stream.requestNext()
xml.tag shouldBe Some(Metadata.Tag("tika", "auto", Metadata.Tag.Disposition.CONTENT))
xml.value.text.exists(t ⇒ t.format == "text/html" && t.data.contains("<p>Adriaan Moors Martin Odersky\\nEPFL\\n</p>")) shouldBe true
stream.request(1)
stream.expectComplete()
}
}
| Karasiq/shadowcloud | metadata/tika/src/test/scala/com/karasiq/shadowcloud/metadata/tika/TikaMetadataProviderTest.scala | Scala | apache-2.0 | 2,557 |
package com.dukeforthought.lsystem
import scala.annotation.tailrec
case class LSystem(
constants : Set[Char],
axiom : Seq[Char],
rules : Map[Char, Seq[Char]],
iterations : Int) {
// Check that there are no rules associated with constants
rules.keys foreach { symbol =>
require(
!constants.contains(symbol),
s"Symbol $symbol is a constant! Rules are not allowed for constants!")
}
@tailrec
private def expand(acc: Seq[Char], i: Int): Seq[Char] = i match {
case 0 => acc
case _ => expand(
acc.map(symbol => rules.getOrElse(symbol, List(symbol))).flatten,
i-1)
}
lazy val expanded = expand(axiom, iterations)
}
object Constants {
def apply(symbols: String): Set[Char] = symbols.toSet
}
object Successor {
def apply(symbols: String): Seq[Char] = symbols.toList
}
object LSystemTester extends App {
private val axiomChar = 'F'
private val successor = Successor("F+F-F")
private val expectedResult = "F+F-F+F+F-F-F+F-F".trim
val constants = Constants("+-[]")
val axiom = List(axiomChar)
val rules = Map(
axiomChar -> successor)
val iterations = 2
val l = LSystem(constants, axiom, rules, iterations)
private val expandedStr = l.expanded.mkString.trim
println("Expanded string: " + expandedStr)
println("Expected result: " + expectedResult)
println("Test passed: " + (expandedStr == expectedResult))
}
| krnhotwings/L-system | src/com/dukeforthought/lsystem/LSystem.scala | Scala | mit | 1,418 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.observers.buffers
import monix.eval.Coeval
import monix.execution.Ack
import monix.execution.Ack.{Continue, Stop}
import monix.execution.internal.collection.{JSArrayQueue, _}
import scala.util.control.NonFatal
import monix.execution.exceptions.BufferOverflowException
import monix.reactive.observers.{BufferedSubscriber, Subscriber}
import scala.concurrent.Future
import scala.util.{Failure, Success}
/** A [[BufferedSubscriber]] implementation for the
* [[monix.reactive.OverflowStrategy.DropNew DropNew]] overflow strategy.
*/
private[observers] final class SyncBufferedSubscriber[-A] private (
out: Subscriber[A],
queue: EvictingQueue[A],
onOverflow: Long => Coeval[Option[A]] = null)
extends BufferedSubscriber[A] with Subscriber.Sync[A] {
implicit val scheduler = out.scheduler
// to be modified only in onError, before upstreamIsComplete
private[this] var errorThrown: Throwable = _
// to be modified only in onError / onComplete
private[this] var upstreamIsComplete = false
// to be modified only by consumer
private[this] var downstreamIsComplete = false
// represents an indicator that there's a loop in progress
private[this] var isLoopActive = false
// events being dropped
private[this] var droppedCount = 0L
// last acknowledgement received by consumer loop
private[this] var lastIterationAck: Future[Ack] = Continue
// Used on the consumer side to split big synchronous workloads in batches
private[this] val em = scheduler.executionModel
def onNext(elem: A): Ack = {
if (!upstreamIsComplete && !downstreamIsComplete) {
if (elem == null) {
onError(new NullPointerException("Null not supported in onNext"))
Stop
} else
try {
droppedCount += queue.offer(elem)
consume()
Continue
} catch {
case ex if NonFatal(ex) =>
onError(ex)
Stop
}
} else
Stop
}
def onError(ex: Throwable): Unit = {
if (!upstreamIsComplete && !downstreamIsComplete) {
errorThrown = ex
upstreamIsComplete = true
consume()
}
}
def onComplete(): Unit = {
if (!upstreamIsComplete && !downstreamIsComplete) {
upstreamIsComplete = true
consume()
}
}
private def consume(): Unit =
if (!isLoopActive) {
isLoopActive = true
scheduler.execute(consumerRunLoop)
}
private[this] val consumerRunLoop = new Runnable {
def run(): Unit = {
fastLoop(lastIterationAck, 0)
}
private final def signalNext(next: A): Future[Ack] =
try {
val ack = out.onNext(next)
// Tries flattening the Future[Ack] to a
// synchronous value
if (ack == Continue || ack == Stop)
ack
else
ack.value match {
case Some(Success(success)) =>
success
case Some(Failure(ex)) =>
downstreamSignalComplete(ex)
Stop
case None =>
ack
}
} catch {
case ex if NonFatal(ex) =>
downstreamSignalComplete(ex)
Stop
}
private def downstreamSignalComplete(ex: Throwable = null): Unit = {
downstreamIsComplete = true
try {
if (ex != null) out.onError(ex)
else out.onComplete()
} catch {
case err if NonFatal(err) =>
scheduler.reportFailure(err)
}
}
private def goAsync(next: A, ack: Future[Ack]): Unit =
ack.onComplete {
case Success(Continue) =>
val nextAck = signalNext(next)
val isSync = ack == Continue || ack == Stop
val nextFrame = if (isSync) em.nextFrameIndex(0) else 0
fastLoop(nextAck, nextFrame)
case Success(Stop) =>
// ending loop
downstreamIsComplete = true
isLoopActive = false
case Failure(ex) =>
// ending loop
isLoopActive = false
downstreamSignalComplete(ex)
}
private def fastLoop(prevAck: Future[Ack], startIndex: Int): Unit = {
var ack = if (prevAck == null) Continue else prevAck
var isFirstIteration = ack == Continue
var nextIndex = startIndex
while (isLoopActive && !downstreamIsComplete) {
var streamErrors = true
try {
val next = {
// Do we have an overflow message to send?
val overflowMessage =
if (onOverflow == null || droppedCount == 0)
null.asInstanceOf[A]
else {
val msg = onOverflow(droppedCount).value() match {
case Some(value) => value
case None => null.asInstanceOf[A]
}
droppedCount = 0
msg
}
if (overflowMessage != null) overflowMessage
else
queue.poll()
}
// Threshold after which we are no longer allowed to
// stream errors downstream if they happen
streamErrors = false
if (next != null) {
if (nextIndex > 0 || isFirstIteration) {
isFirstIteration = false
ack match {
case Continue =>
ack = signalNext(next)
if (ack == Stop) {
// ending loop
downstreamIsComplete = true
isLoopActive = false
return
} else {
val isSync = ack == Continue
nextIndex = if (isSync) em.nextFrameIndex(nextIndex) else 0
}
case Stop =>
// ending loop
downstreamIsComplete = true
isLoopActive = false
return
case _ =>
goAsync(next, ack)
return
}
} else {
goAsync(next, ack)
return
}
} else {
if (upstreamIsComplete) downstreamSignalComplete(errorThrown)
// ending loop
lastIterationAck = ack
isLoopActive = false
return
}
} catch {
case ex if NonFatal(ex) =>
if (streamErrors) {
// ending loop
downstreamSignalComplete(ex)
isLoopActive = false
return
} else {
scheduler.reportFailure(ex)
return
}
}
}
}
}
}
private[monix] object SyncBufferedSubscriber {
/**
* Returns an instance of a [[SyncBufferedSubscriber]]
* for the [[monix.reactive.OverflowStrategy.DropNew DropNew]]
* overflow strategy.
*/
def unbounded[A](underlying: Subscriber[A]): Subscriber.Sync[A] = {
val buffer = JSArrayQueue.unbounded[A]
new SyncBufferedSubscriber[A](underlying, buffer, null)
}
/**
* Returns an instance of a [[SyncBufferedSubscriber]]
* for the [[monix.reactive.OverflowStrategy.DropNew DropNew]]
* overflow strategy.
*/
def bounded[A](underlying: Subscriber[A], bufferSize: Int): Subscriber.Sync[A] = {
require(bufferSize > 1, "bufferSize must be strictly higher than 1")
val buffer = JSArrayQueue.bounded[A](bufferSize, _ => {
BufferOverflowException(
s"Downstream observer is too slow, buffer over capacity with a " +
s"specified buffer size of $bufferSize")
})
new SyncBufferedSubscriber[A](underlying, buffer, null)
}
/**
* Returns an instance of a [[SyncBufferedSubscriber]]
* for the [[monix.reactive.OverflowStrategy.DropNew DropNew]]
* overflow strategy.
*/
def dropNew[A](underlying: Subscriber[A], bufferSize: Int): Subscriber.Sync[A] = {
require(bufferSize > 1, "bufferSize must be strictly higher than 1")
val buffer = JSArrayQueue.bounded[A](bufferSize)
new SyncBufferedSubscriber[A](underlying, buffer, null)
}
/**
* Returns an instance of a [[SyncBufferedSubscriber]]
* for the [[monix.reactive.OverflowStrategy.DropNew DropNew]]
* overflow strategy.
*/
def dropNewAndSignal[A](
underlying: Subscriber[A],
bufferSize: Int,
onOverflow: Long => Coeval[Option[A]]): Subscriber.Sync[A] = {
require(bufferSize > 1, "bufferSize must be strictly higher than 1")
val buffer = JSArrayQueue.bounded[A](bufferSize)
new SyncBufferedSubscriber[A](underlying, buffer, onOverflow)
}
/**
* Returns an instance of a [[SyncBufferedSubscriber]]
* for the [[monix.reactive.OverflowStrategy.DropOld DropOld]]
* overflow strategy.
*/
def dropOld[A](underlying: Subscriber[A], bufferSize: Int): Subscriber.Sync[A] = {
require(bufferSize > 1, "bufferSize must be strictly higher than 1")
val buffer = DropHeadOnOverflowQueue[AnyRef](bufferSize).asInstanceOf[EvictingQueue[A]]
new SyncBufferedSubscriber[A](underlying, buffer, null)
}
/**
* Returns an instance of a [[SyncBufferedSubscriber]]
* for the [[monix.reactive.OverflowStrategy.DropOld DropOld]]
* overflow strategy, with signaling of the number of events that
* were dropped.
*/
def dropOldAndSignal[A](
underlying: Subscriber[A],
bufferSize: Int,
onOverflow: Long => Coeval[Option[A]]): Subscriber.Sync[A] = {
require(bufferSize > 1, "bufferSize must be strictly higher than 1")
val buffer = DropHeadOnOverflowQueue[AnyRef](bufferSize).asInstanceOf[EvictingQueue[A]]
new SyncBufferedSubscriber[A](underlying, buffer, onOverflow)
}
/**
* Returns an instance of a [[SyncBufferedSubscriber]] for the
* [[monix.reactive.OverflowStrategy.ClearBuffer ClearBuffer]]
* overflow strategy.
*/
def clearBuffer[A](underlying: Subscriber[A], bufferSize: Int): Subscriber.Sync[A] = {
require(bufferSize > 1, "bufferSize must be strictly higher than 1")
val buffer = DropAllOnOverflowQueue[AnyRef](bufferSize).asInstanceOf[EvictingQueue[A]]
new SyncBufferedSubscriber[A](underlying, buffer, null)
}
/**
* Returns an instance of a [[SyncBufferedSubscriber]]
* for the [[monix.reactive.OverflowStrategy.ClearBuffer ClearBuffer]]
* overflow strategy, with signaling of the number of events that
* were dropped.
*/
def clearBufferAndSignal[A](
underlying: Subscriber[A],
bufferSize: Int,
onOverflow: Long => Coeval[Option[A]]): Subscriber.Sync[A] = {
require(bufferSize > 1, "bufferSize must be strictly higher than 1")
val buffer = DropAllOnOverflowQueue[AnyRef](bufferSize).asInstanceOf[EvictingQueue[A]]
new SyncBufferedSubscriber[A](underlying, buffer, onOverflow)
}
}
| alexandru/monifu | monix-reactive/js/src/main/scala/monix/reactive/observers/buffers/SyncBufferedSubscriber.scala | Scala | apache-2.0 | 11,404 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models.businessdetails
import org.scalatestplus.mockito.MockitoSugar
import org.scalatestplus.play.PlaySpec
class ContactingYouSpec extends PlaySpec with MockitoSugar {
"Contacting You Form Details" must {
"write correct data" in {
val model = ContactingYou(Some("1234567890"), Some("[email protected]"))
ContactingYou.formWrites.writes(model) must
be(Map(
"phoneNumber" -> Seq("1234567890"),
"email" -> Seq("[email protected]")
))
}
}
} | hmrc/amls-frontend | test/models/businessdetails/ContactingYouSpec.scala | Scala | apache-2.0 | 1,106 |
package akka.rtcweb.protocol.ice.stun
import java.net.InetAddress
import akka.rtcweb.CodecSpec
import akka.rtcweb.protocol.ice.stun.`ERROR-CODE`.Code
import scodec.Attempt.Successful
import scodec.DecodeResult
import scodec.bits._
import shapeless.HNil
class StunMessageSpec extends org.specs2.mutable.Specification with CodecSpec {
"StunMessage" should {
"encoding roundtrip with a complex message" in {
roundtrip(
StunMessage(Class.request, Method.Binding, hex"0x000000000000000000000001", Vector(
`MAPPED-ADDRESS`(Family.IPv4, 42, InetAddress.getLoopbackAddress),
`ALTERNATE-SERVER`(Family.IPv4, 43, InetAddress.getLoopbackAddress),
`XOR-MAPPED-ADDRESS`(Family.IPv4, 42, InetAddress.getLoopbackAddress),
`ERROR-CODE`(Code.UNKNOWN(501), "bad is bad"),
`UNKNOWN-ATTRIBUTES`(Vector(StunAttributeType.NONCE, StunAttributeType.UNKNOWN(hex"0x4242".bits))),
SOFTWARE("MY AWESOME SOFTWARE 1.0"),
`USE-CANDIDATE`(),
PRIORITY(Integer.MAX_VALUE),
`ICE-CONTROLLING`(hex"0x1223344556677889"),
FINGERPRINT(123456789L)
)))
}
}
"StunMessageHeader" should {
"decode funky encoded stun message type" in {
StunMessage.stunMessageTypeBitCodec.decode(bin"11000100011001") shouldEqual
Successful(DecodeResult(bin"110000001001" :: bin"11" :: HNil, BitVector.empty))
}
"encode funky encoded stun message type" in {
StunMessage.stunMessageTypeBitCodec.encode(bin"000000000000" :: bin"11" :: HNil) shouldEqual
Successful(bin"00000100010000")
}
"decode funky encoded stun message type in types" in {
StunMessage.stunMessageTypeCodec.decode(bin"00000000010001") shouldEqual
Successful(DecodeResult(Class.indication :: Method.Binding :: HNil, BitVector.empty))
}
"encode funky stun message types" in {
StunMessage.stunMessageTypeCodec.encode(Class.errorResponse :: Method.Binding :: HNil) shouldEqual
Successful(bin"00000100010001")
}
}
}
| danielwegener/akka-rtcweb | src/test/scala/akka/rtcweb/protocol/ice/stun/StunMessageSpec.scala | Scala | apache-2.0 | 2,051 |
/**
* Copyright (C) 2014, George B. Norr, All Rights Reserved
* Date: 2/1/14
*/
package org.norr.xwp.service
import org.springframework.test.context.ContextConfiguration
import org.scalatest.junit.JUnitSuiteLike
import org.springframework.test.context.junit4.AbstractTransactionalJUnit4SpringContextTests
import org.springframework.beans.factory.annotation.Autowired
import org.norr.xwp.domain.Xwp
import org.junit.Test
import org.junit.Assert._
import java.util.Date
@ContextConfiguration(Array("classpath:/META-INF/spring/applicationContext.xml"))
class XwpServiceTest extends AbstractTransactionalJUnit4SpringContextTests with JUnitSuiteLike {
@Autowired
val xwpService: XwpService = null
@Test
def testSaveXwp() = {
val xwp = new Xwp
xwp.timestamp = new Date()
try {
xwpService.saveXwp(xwp)
assertNotNull(xwp)
}
catch {
case e: Exception => fail()
}
}
@Test
def testLoadXwps() = {
val xwps: java.util.List[Xwp] = xwpService.loadXwps()
assertNotNull(xwps)
}
}
| gnorr/xwp-spring-scala | src/test/scala/org/norr/xwp/service/XwpServiceTest.scala | Scala | apache-2.0 | 1,060 |
/*
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
*/
package scalaguide.tests
package services
import models._
// #scalatest-userservice
class UserService(userRepository : UserRepository) {
def isAdmin(user:User) : Boolean = {
userRepository.roles(user).contains(Role("ADMIN"))
}
}
// #scalatest-userservice
| jyotikamboj/container | pf-documentation/manual/working/scalaGuide/main/tests/code/services/UserService.scala | Scala | mit | 341 |
import pl.iterators.kebs.json.KebsSpray
import spray.json.{DefaultJsonProtocol, JsArray, JsBoolean, JsNull, JsNumber, JsObject, JsString, JsonFormat, NullOptions, RootJsonFormat}
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
class SprayJsonFormatSnakifyVariantTests extends AnyFunSuite with Matchers {
object KebsProtocol extends DefaultJsonProtocol with KebsSpray.Snakified
import KebsProtocol._
case class C(anInteger: Int)
case class D(intField: Int, stringField: String)
case object F
case class Compound(CField: C, DField: D)
test("Flat format remains unchanged") {
val jf = implicitly[JsonFormat[C]]
jf.write(C(10)) shouldBe JsNumber(10)
jf.read(JsNumber(10)) shouldBe C(10)
}
test("Root format 0 remains unchanged") {
val jf = implicitly[RootJsonFormat[F.type]]
jf.write(F) shouldBe JsObject()
jf.read(JsObject()) shouldBe F
}
test("Root format 1 snakified") {
val jf = implicitly[RootJsonFormat[C]]
jf.write(C(10)) shouldBe JsObject("an_integer" -> JsNumber(10))
jf.read(JsObject("an_integer" -> JsNumber(0))) shouldBe C(0)
}
test("Root format 2 snakified") {
val jf = implicitly[RootJsonFormat[D]]
jf.write(D(10, "abcd")) shouldBe JsObject("int_field" -> JsNumber(10), "string_field" -> JsString("abcd"))
jf.read(JsObject("int_field" -> JsNumber(5), "string_field" -> JsString("abcdef"))) shouldBe D(5, "abcdef")
}
test("Json format 2 snakified") {
val jf = implicitly[JsonFormat[D]]
jf.write(D(10, "abcd")) shouldBe JsObject("int_field" -> JsNumber(10), "string_field" -> JsString("abcd"))
jf.read(JsObject("int_field" -> JsNumber(5), "string_field" -> JsString("abcdef"))) shouldBe D(5, "abcdef")
}
test("Root format snakified - compound") {
val jf = implicitly[JsonFormat[Compound]]
jf.write(Compound(C(5), D(10, "abcd"))) shouldBe JsObject("c_field" -> JsNumber(5),
"d_field" -> JsObject("int_field" -> JsNumber(10),
"string_field" -> JsString("abcd")))
jf.read(JsObject("c_field" -> JsNumber(10), "d_field" -> JsObject("int_field" -> JsNumber(100), "string_field" -> JsString("abb")))) shouldBe Compound(
C(10),
D(100, "abb"))
}
test("Root format snakified - case class with > 22 fields (issue #7)") {
import model._
val jf = implicitly[JsonFormat[ClassWith23Fields]]
val obj = ClassWith23Fields.Example
val json = JsObject(
Map(
"f1" -> JsString("f1 value"),
"f2" -> JsNumber(2),
"f3" -> JsNumber(3),
"f5" -> JsString("f5 value"),
"field_number_six" -> JsString("six"),
"f7" -> JsArray(JsString("f7 value 1"), JsString("f7 value 2")),
"f8" -> JsString("f8 value"),
"f9" -> JsString("f9 value"),
"f10" -> JsString("f10 value"),
"f11" -> JsString("f11 value"),
"f12" -> JsString("f12 value"),
"f13" -> JsString("f13 value"),
"f14" -> JsString("f14 value"),
"f15" -> JsString("f15 value"),
"f16" -> JsString("f16 value"),
"f17" -> JsString("f17 value"),
"f18" -> JsString("f18 value"),
"f19" -> JsString("f19 value"),
"f20" -> JsString("f20 value"),
"f21" -> JsString("f21 value"),
"f22" -> JsString("f22 value"),
"f23" -> JsBoolean(true)
))
jf.write(obj) shouldBe json
jf.read(json) shouldBe obj
}
test("Root format snakified with NullOptions - case class with > 22 fields (issue #73)") {
object KebsProtocolNullOptions extends DefaultJsonProtocol with KebsSpray.Snakified with NullOptions
import model._
import KebsProtocolNullOptions._
val jf = implicitly[JsonFormat[ClassWith23Fields]]
val obj = ClassWith23Fields.Example
val json = JsObject(
Map(
"f1" -> JsString("f1 value"),
"f2" -> JsNumber(2),
"f3" -> JsNumber(3),
"f4" -> JsNull,
"f5" -> JsString("f5 value"),
"field_number_six" -> JsString("six"),
"f7" -> JsArray(JsString("f7 value 1"), JsString("f7 value 2")),
"f8" -> JsString("f8 value"),
"f9" -> JsString("f9 value"),
"f10" -> JsString("f10 value"),
"f11" -> JsString("f11 value"),
"f12" -> JsString("f12 value"),
"f13" -> JsString("f13 value"),
"f14" -> JsString("f14 value"),
"f15" -> JsString("f15 value"),
"f16" -> JsString("f16 value"),
"f17" -> JsString("f17 value"),
"f18" -> JsString("f18 value"),
"f19" -> JsString("f19 value"),
"f20" -> JsString("f20 value"),
"f21" -> JsString("f21 value"),
"f22" -> JsString("f22 value"),
"f23" -> JsBoolean(true)
))
jf.write(obj) shouldBe json
jf.read(json) shouldBe obj
}
}
| theiterators/kebs | spray-json/src/test/scala/SprayJsonFormatSnakifyVariantTests.scala | Scala | mit | 5,498 |
package nexus
import nexus.diff.Input
import nexus.diff.ops._
import nexus.diff.optimizers._
import nexus.diff.execution._
import nexus.diff.syntax._
import nexus.jvm._
import nexus.jvm.setFloat32AsDefault._
import nexus.diff.modules._
import nexus._
import nexus.syntax._
/**
* @author Tongfei Chen
*/
object LogisticRegressionTest extends App {
class Batch extends Dim; val Batch = new Batch
class In extends Dim; val In = new In
class Out extends Dim; val Out = new Out
val X = FloatTensor.fromNestedArray(Batch, In)(Array(
Array(3f, 4f),
Array(5f, 1f),
Array(-3f, -2f),
Array(1f, 3f),
Array(0f, -1f)
))
val Y = FloatTensor.fromNestedArray(Batch, Out)(Array(
1, 1, 0, 1, 0
).map(i => if (i == 0) Array(1f, 0f) else Array(0f, 1f)))
val xs = X unstackAlong Batch
val ys = Y unstackAlong Batch
val x = Input[FloatTensor[In]]
val y = Input[FloatTensor[Out]]
val Layer = Affine(In -> 2, Out -> 2)
val output = x |> Layer |> Softmax
val loss = CrossEntropy(y, output)
val sgd = new AdamOptimizer(0.01)
for (i <- 0 until 100) {
for ((xv, yv) <- xs zip ys) {
implicit val forward = SymbolicForward.given(x := xv, y := yv)
val lossValue = loss.value
val grads = loss.gradients
println(s"Iteration $i: loss = $lossValue")
sgd.update(grads)
}
}
val bp = 0
}
| ctongfei/nexus | jvm-ref/backend/src/test/scala/nexus/LogisticRegressionTest.scala | Scala | mit | 1,367 |
package filodb.cassandra.columnstore
import java.net.InetSocketAddress
import java.nio.ByteBuffer
import java.util.concurrent.TimeUnit
import scala.collection.mutable.ArrayBuffer
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.concurrent.duration._
import com.datastax.driver.core.{ConsistencyLevel, Metadata, Session, TokenRange}
import com.googlecode.concurrentlinkedhashmap.ConcurrentLinkedHashMap
import com.typesafe.config.Config
import com.typesafe.scalalogging.StrictLogging
import kamon.Kamon
import monix.eval.Task
import monix.execution.Scheduler
import monix.reactive.Observable
import filodb.cassandra.FiloCassandraConnector
import filodb.core._
import filodb.core.store._
import filodb.memory.BinaryRegionLarge
/**
* Implementation of a column store using Apache Cassandra tables.
* This class must be thread-safe as it is intended to be used concurrently.
*
* ==Configuration==
* {{{
* cassandra {
* session-provider-fqcn = filodb.cassandra.DefaultFiloSessionProvider
* hosts = ["1.2.3.4", "1.2.3.5"]
* port = 9042
* keyspace = "my_cass_keyspace"
* username = ""
* password = ""
* read-timeout = 12 s # default read timeout of 12 seconds
* connect-timeout = 5 s
* }
* columnstore {
* tablecache-size = 50 # Number of cache entries for C* for ChunkTable etc.
* }
* }}}
*
* ==Constructor Args==
* @param config see the Configuration section above for the needed config
* @param readEc A Scheduler for reads. This must be separate from writes to prevent deadlocks.
* @param sched A Scheduler for writes
*/
class CassandraColumnStore(val config: Config, val readEc: Scheduler,
val session: Session,
val downsampledData: Boolean = false)
(implicit val sched: Scheduler)
extends ColumnStore with CassandraChunkSource with StrictLogging {
import collection.JavaConverters._
import filodb.core.store._
import Perftools._
logger.info(s"Starting CassandraColumnStore with config ${cassandraConfig.withoutPath("password")}")
private val writeParallelism = cassandraConfig.getInt("write-parallelism")
private val pkByUTNumSplits = cassandraConfig.getInt("pk-by-updated-time-table-num-splits")
private val pkByUTTtlSeconds = cassandraConfig.getDuration("pk-by-updated-time-table-ttl", TimeUnit.SECONDS).toInt
private val createTablesEnabled = cassandraConfig.getBoolean("create-tables-enabled")
private val numTokenRangeSplitsForScans = cassandraConfig.getInt("num-token-range-splits-for-scans")
val sinkStats = new ChunkSinkStats
def initialize(dataset: DatasetRef, numShards: Int): Future[Response] = {
val chunkTable = getOrCreateChunkTable(dataset)
val partitionKeysByUpdateTimeTable = getOrCreatePartitionKeysByUpdateTimeTable(dataset)
if (createTablesEnabled) {
val partKeyTablesInit = Observable.fromIterable(0.until(numShards)).map { s =>
getOrCreatePartitionKeysTable(dataset, s)
}.mapAsync(t => Task.fromFuture(t.initialize())).toListL
clusterConnector.createKeyspace(chunkTable.keyspace)
val indexTable = getOrCreateIngestionTimeIndexTable(dataset)
// Important: make sure nodes are in agreement before any schema changes
clusterMeta.checkSchemaAgreement()
for {ctResp <- chunkTable.initialize() if ctResp == Success
ixResp <- indexTable.initialize() if ixResp == Success
pkutResp <- partitionKeysByUpdateTimeTable.initialize() if pkutResp == Success
partKeyTablesResp <- partKeyTablesInit.runAsync if partKeyTablesResp.forall(_ == Success)
} yield Success
} else {
// ensure the table handles are eagerly created
0.until(numShards).foreach(getOrCreatePartitionKeysTable(dataset, _))
Future.successful(Success)
}
}
def truncate(dataset: DatasetRef, numShards: Int): Future[Response] = {
logger.info(s"Clearing all data for dataset ${dataset}")
val chunkTable = getOrCreateChunkTable(dataset)
val partitionKeysByUpdateTimeTable = getOrCreatePartitionKeysByUpdateTimeTable(dataset)
val partKeyTablesTrunc = Observable.fromIterable(0.until(numShards)).map { s =>
getOrCreatePartitionKeysTable(dataset, s)
}.mapAsync(t => Task.fromFuture(t.clearAll())).toListL
val indexTable = getOrCreateIngestionTimeIndexTable(dataset)
clusterMeta.checkSchemaAgreement()
for { ctResp <- chunkTable.clearAll() if ctResp == Success
ixResp <- indexTable.clearAll() if ixResp == Success
pkutResp <- partitionKeysByUpdateTimeTable.clearAll() if pkutResp == Success
partKeyTablesResp <- partKeyTablesTrunc.runAsync if partKeyTablesResp.forall( _ == Success)
} yield Success
}
def dropDataset(dataset: DatasetRef, numShards: Int): Future[Response] = {
val chunkTable = getOrCreateChunkTable(dataset)
val partitionKeysByUpdateTimeTable = getOrCreatePartitionKeysByUpdateTimeTable(dataset)
val indexTable = getOrCreateIngestionTimeIndexTable(dataset)
val partKeyTablesDrop = Observable.fromIterable(0.until(numShards)).map { s =>
getOrCreatePartitionKeysTable(dataset, s)
}.mapAsync(t => Task.fromFuture(t.drop())).toListL
clusterMeta.checkSchemaAgreement()
for {ctResp <- chunkTable.drop() if ctResp == Success
ixResp <- indexTable.drop() if ixResp == Success
pkutResp <- partitionKeysByUpdateTimeTable.drop() if pkutResp == Success
partKeyTablesResp <- partKeyTablesDrop.runAsync if partKeyTablesResp.forall(_ == Success)
} yield {
chunkTableCache.remove(dataset)
indexTableCache.remove(dataset)
partitionKeysTableCache.remove(dataset)
Success
}
}
// Initial implementation: write each ChunkSet as its own transaction. Will result in lots of writes.
// Future optimization: group by token range and batch?
def write(ref: DatasetRef,
chunksets: Observable[ChunkSet],
diskTimeToLiveSeconds: Int = 259200): Future[Response] = {
chunksets.mapAsync(writeParallelism) { chunkset =>
val span = Kamon.spanBuilder("write-chunkset").asChildOf(Kamon.currentSpan()).start()
val partBytes = BinaryRegionLarge.asNewByteArray(chunkset.partition)
val future =
for { writeChunksResp <- writeChunks(ref, partBytes, chunkset, diskTimeToLiveSeconds)
if writeChunksResp == Success
writeIndicesResp <- writeIndices(ref, partBytes, chunkset, diskTimeToLiveSeconds)
if writeIndicesResp == Success
} yield {
span.finish()
sinkStats.chunksetWrite()
writeIndicesResp
}
Task.fromFuture(future)
}
.countL.runAsync
.map { chunksWritten =>
if (chunksWritten > 0) Success else NotApplied
}
}
private def writeChunks(ref: DatasetRef,
partition: Array[Byte],
chunkset: ChunkSet,
diskTimeToLiveSeconds: Int): Future[Response] = {
asyncSubtrace("write-chunks", "ingestion") {
val chunkTable = getOrCreateChunkTable(ref)
chunkTable.writeChunks(partition, chunkset.info, chunkset.chunks, sinkStats, diskTimeToLiveSeconds)
.collect {
case Success => chunkset.invokeFlushListener(); Success
}
}
}
private def writeIndices(ref: DatasetRef,
partition: Array[Byte],
chunkset: ChunkSet,
diskTimeToLiveSeconds: Int): Future[Response] = {
asyncSubtrace("write-index", "ingestion") {
val indexTable = getOrCreateIngestionTimeIndexTable(ref)
val info = chunkset.info
val infos = Seq((info.ingestionTime, info.startTime, ChunkSetInfo.toBytes(info)))
indexTable.writeIndices(partition, infos, sinkStats, diskTimeToLiveSeconds)
}
}
/**
* Reads chunks by querying partitions by ingestion time range and subsequently filtering by user time range.
*
* Important Details:
* 1. User End time is exclusive. Important since we should not downsample one sample in two job runs
* 2. Since we do a query based on maxChunkTime which is usually configured to be slightly greater than
* flush interval, results can include chunks that are before the requested range. Callers need to
* handle this case.
*/
// scalastyle:off parameter.number
def getChunksByIngestionTimeRangeNoAsync(datasetRef: DatasetRef,
splits: Iterator[ScanSplit],
ingestionTimeStart: Long,
ingestionTimeEnd: Long,
userTimeStart: Long,
endTimeExclusive: Long,
maxChunkTime: Long,
batchSize: Int,
cassFetchSize: Int): Iterator[Seq[RawPartData]] = {
val partKeys = splits.flatMap {
case split: CassandraTokenRangeSplit =>
val indexTable = getOrCreateIngestionTimeIndexTable(datasetRef)
logger.debug(s"Querying cassandra for partKeys for split=$split ingestionTimeStart=$ingestionTimeStart " +
s"ingestionTimeEnd=$ingestionTimeEnd")
indexTable.scanPartKeysByIngestionTimeNoAsync(split.tokens, ingestionTimeStart, ingestionTimeEnd, cassFetchSize)
case split => throw new UnsupportedOperationException(s"Unknown split type $split seen")
}
val chunksTable = getOrCreateChunkTable(datasetRef)
partKeys.sliding(batchSize, batchSize).map { parts =>
logger.debug(s"Querying cassandra for chunks from ${parts.size} partitions userTimeStart=$userTimeStart " +
s"endTimeExclusive=$endTimeExclusive maxChunkTime=$maxChunkTime")
// This could be more parallel, but decision was made to control parallelism at one place: In spark (via its
// parallelism configuration. Revisit if needed later.
val batchReadSpan = Kamon.spanBuilder("cassandra-per-batch-data-read-latency").start()
try {
chunksTable.readRawPartitionRangeBBNoAsync(parts, userTimeStart - maxChunkTime, endTimeExclusive)
} finally {
batchReadSpan.finish()
}
}
}
/**
* Copy a range of chunks to a target ColumnStore, for performing disaster recovery or
* backfills. This method can also be used to delete chunks, by specifying a ttl of zero.
* If the target is the same as the source, then this effectively deletes from the source.
*
* @param diskTimeToLiveSeconds pass zero to delete chunks
*/
// scalastyle:off null method.length
def copyOrDeleteChunksByIngestionTimeRange(datasetRef: DatasetRef,
splits: Iterator[ScanSplit],
ingestionTimeStart: Long,
ingestionTimeEnd: Long,
batchSize: Int,
target: CassandraColumnStore,
targetDatasetRef: DatasetRef,
diskTimeToLiveSeconds: Int): Unit =
{
val sourceIndexTable = getOrCreateIngestionTimeIndexTable(datasetRef)
val sourceChunksTable = getOrCreateChunkTable(datasetRef)
val targetIndexTable = target.getOrCreateIngestionTimeIndexTable(targetDatasetRef)
val targetChunksTable = target.getOrCreateChunkTable(targetDatasetRef)
val chunkInfos = new ArrayBuffer[ByteBuffer]()
val futures = new ArrayBuffer[Future[Response]]()
def finishBatch(partition: ByteBuffer): Unit = {
if (diskTimeToLiveSeconds == 0) {
futures += targetChunksTable.deleteChunks(partition, chunkInfos)
} else {
for (row <- sourceChunksTable.readChunksNoAsync(partition, chunkInfos).iterator.asScala) {
futures += targetChunksTable.writeChunks(partition, row, sinkStats, diskTimeToLiveSeconds)
}
}
chunkInfos.clear()
for (f <- futures) {
try {
Await.result(f, Duration(10, SECONDS))
} catch {
case e: Exception => {
logger.error(s"Async cassandra chunk copy failed", e)
}
}
}
futures.clear()
}
var lastPartition: ByteBuffer = null
for (split <- splits) {
val tokens = split.asInstanceOf[CassandraTokenRangeSplit].tokens
val rows = sourceIndexTable.scanRowsByIngestionTimeNoAsync(tokens, ingestionTimeStart, ingestionTimeEnd)
for (row <- rows) {
val partition = row.getBytes(0) // partition
if (!partition.equals(lastPartition)) {
if (lastPartition != null) {
finishBatch(lastPartition);
}
lastPartition = partition;
}
chunkInfos += row.getBytes(3) // info
if (diskTimeToLiveSeconds == 0) {
futures += targetIndexTable.deleteIndex(row);
} else {
futures += targetIndexTable.writeIndex(row, sinkStats, diskTimeToLiveSeconds);
}
if (chunkInfos.size >= batchSize) {
finishBatch(partition)
}
}
}
if (lastPartition != null) {
finishBatch(lastPartition);
}
}
// scalastyle:on
def shutdown(): Unit = {
clusterConnector.shutdown()
}
private def clusterMeta: Metadata = session.getCluster.getMetadata
/**
* Splits scans of a dataset across multiple token ranges.
* @param splitsPerNode - how much parallelism or ways to divide a token range on each node
* @return each split will have token_start, token_end, replicas filled in
*/
def getScanSplits(dataset: DatasetRef, splitsPerNode: Int = numTokenRangeSplitsForScans): Seq[ScanSplit] = {
val keyspace = clusterConnector.keyspace
require(splitsPerNode >= 1, s"Must specify at least 1 splits_per_node, got $splitsPerNode")
val tokenRanges = unwrapTokenRanges(clusterMeta.getTokenRanges.asScala.toSeq)
logger.debug(s"unwrapTokenRanges: ${tokenRanges.toString()}")
val tokensByReplica = tokenRanges.groupBy { tokenRange =>
clusterMeta.getReplicas(keyspace, tokenRange)
}
val tokenRangeGroups: Seq[Seq[TokenRange]] = {
tokensByReplica.flatMap { case (replicaKey, rangesPerReplica) =>
// First, sort tokens in each replica group so that adjacent tokens are next to each other
val sortedRanges = rangesPerReplica.sorted
// If token ranges can be merged (adjacent), merge them and divide evenly into splitsPerNode
try {
// There is no "empty" or "zero" TokenRange, so we have to treat single range separately.
val singleRange =
if (sortedRanges.length > 1) { sortedRanges.reduceLeft(_.mergeWith(_)) }
else { sortedRanges.head }
// We end up with splitsPerNode sets of single token ranges
singleRange.splitEvenly(splitsPerNode).asScala.map(Seq(_))
// If they cannot be merged (DSE / vnodes), then try to group ranges into splitsPerNode groups
// This is less efficient but less partitions is still much much better. Having a huge
// number of partitions is very slow for Spark, and we want to honor splitsPerNode.
} catch {
case e: IllegalArgumentException =>
// First range goes to split 0, second goes to split 1, etc, capped by splits
sortedRanges.zipWithIndex.groupBy(_._2 % splitsPerNode).values.map(_.map(_._1)).toSeq
}
}.toSeq
}
tokenRangeGroups.map { tokenRanges =>
val replicas = clusterMeta.getReplicas(keyspace, tokenRanges.head).asScala
CassandraTokenRangeSplit(tokenRanges.map { range => (range.getStart.toString, range.getEnd.toString) },
replicas.map(_.getSocketAddress).toSet)
}
}
def unwrapTokenRanges(wrappedRanges : Seq[TokenRange]): Seq[TokenRange] =
wrappedRanges.flatMap(_.unwrap().asScala.toSeq)
def scanPartKeys(ref: DatasetRef, shard: Int): Observable[PartKeyRecord] = {
val table = getOrCreatePartitionKeysTable(ref, shard)
Observable.fromIterable(getScanSplits(ref)).flatMap { tokenRange =>
table.scanPartKeys(tokenRange.asInstanceOf[CassandraTokenRangeSplit].tokens, shard)
}
}
def writePartKeys(ref: DatasetRef,
shard: Int,
partKeys: Observable[PartKeyRecord],
diskTTLSeconds: Int, updateHour: Long,
writeToPkUTTable: Boolean = true): Future[Response] = {
val pkTable = getOrCreatePartitionKeysTable(ref, shard)
val pkByUTTable = getOrCreatePartitionKeysByUpdateTimeTable(ref)
val span = Kamon.spanBuilder("write-part-keys").asChildOf(Kamon.currentSpan()).start()
val ret = partKeys.mapAsync(writeParallelism) { pk =>
val ttl = if (pk.endTime == Long.MaxValue) -1 else diskTTLSeconds
// caller needs to supply hash for partKey - cannot be None
// Logical & MaxValue needed to make split positive by zeroing sign bit
val split = (pk.hash.get & Int.MaxValue) % pkByUTNumSplits
val writePkFut = pkTable.writePartKey(pk, ttl).flatMap {
case resp if resp == Success && writeToPkUTTable =>
pkByUTTable.writePartKey(shard, updateHour, split, pk, pkByUTTtlSeconds)
case resp =>
Future.successful(resp)
}
Task.fromFuture(writePkFut).map{ resp =>
sinkStats.partKeysWrite(1)
resp
}
}.findL(_.isInstanceOf[ErrorResponse]).map(_.getOrElse(Success)).runAsync
ret.onComplete(_ => span.finish())
ret
}
def deletePartKeys(ref: DatasetRef,
shard: Int,
pks: Observable[Array[Byte]]): Future[Long] = {
val pkTable = getOrCreatePartitionKeysTable(ref, shard)
pks.mapAsync(writeParallelism) { pk =>
Task.fromFuture(pkTable.deletePartKey(pk, shard))
}.countL.runAsync
}
def getPartKeysByUpdateHour(ref: DatasetRef,
shard: Int,
updateHour: Long): Observable[PartKeyRecord] = {
val pkByUTTable = getOrCreatePartitionKeysByUpdateTimeTable(ref)
Observable.fromIterable(0 until pkByUTNumSplits)
.flatMap { split => pkByUTTable.scanPartKeys(shard, updateHour, split) }
}
}
/**
* FIXME this works only for Murmur3Partitioner because it generates
* Long based tokens. If other partitioners are used, this can potentially break.
* Correct way is to pass Token objects so CQL stmts can bind tokens with stmt.bind().setPartitionKeyToken(token)
*/
case class CassandraTokenRangeSplit(tokens: Seq[(String, String)],
replicas: Set[InetSocketAddress]) extends ScanSplit {
// NOTE: You need both the host string and the IP address for Spark's locality to work
def hostnames: Set[String] = replicas.flatMap(r => Set(r.getHostString, r.getAddress.getHostAddress))
}
trait CassandraChunkSource extends RawChunkSource with StrictLogging {
def config: Config
def session: Session
def readEc: Scheduler
implicit val readSched = readEc
val stats = new ChunkSourceStats
def downsampledData: Boolean
val cassandraConfig = config.getConfig("cassandra")
val ingestionConsistencyLevel = ConsistencyLevel.valueOf(cassandraConfig.getString("ingestion-consistency-level"))
val tableCacheSize = config.getInt("columnstore.tablecache-size")
val chunkTableCache = concurrentCache[DatasetRef, TimeSeriesChunksTable](tableCacheSize)
val indexTableCache = concurrentCache[DatasetRef, IngestionTimeIndexTable](tableCacheSize)
val partKeysByUTTableCache = concurrentCache[DatasetRef, PartitionKeysByUpdateTimeTable](tableCacheSize)
val partitionKeysTableCache = concurrentCache[DatasetRef,
ConcurrentLinkedHashMap[Int, PartitionKeysTable]](tableCacheSize)
protected val clusterConnector = new FiloCassandraConnector {
def config: Config = cassandraConfig
def session: Session = CassandraChunkSource.this.session
def ec: ExecutionContext = readEc
val keyspace: String = if (!downsampledData) config.getString("keyspace")
else config.getString("downsample-keyspace")
}
val partParallelism = 4
/**
* Read chunks from persistent store. Note the following constraints under which query is optimized:
*
* 1. Within a cassandra partition, chunks are ordered by chunkId. ChunkIds have this property:
* `chunkID(t1) > chunkId(t2) if and only if t1 > t2`.
*
* 2. All chunks have samples with a range of userTime. During ingestion, we restrict the maximum
* range for the userTime. This restriction makes it possible to issue single CQL query to fetch
* all relevant chunks from cassandra. We do this by searching for searching in cassandra for chunkIds
* between `chunkID(queryStartTime - maxChunkTime)` and `chunkID(queryEndTime)`. The reason we need to
* subtract maxChunkTime from queryStartTime is for the range to include the first chunk which may have
* relevant data but may have a startTime outside the query range.
*
* @param ref dataset ref
* @param maxChunkTime maximum userTime (in millis) allowed in a single chunk. This restriction makes it
* possible to issue single CQL query to fetch all relevant chunks from cassandra
* @param partMethod selector for partitions
* @param chunkMethod selector for chunks
* @return Stored chunks and infos for each matching partition
*/
def readRawPartitions(ref: DatasetRef,
maxChunkTime: Long,
partMethod: PartitionScanMethod,
chunkMethod: ChunkScanMethod = AllChunkScan): Observable[RawPartData] = {
val chunkTable = getOrCreateChunkTable(ref)
partMethod match {
case FilteredPartitionScan(CassandraTokenRangeSplit(tokens, _), Nil) =>
chunkTable.scanPartitionsBySplit(tokens)
case _ =>
val partitions = partMethod match {
case MultiPartitionScan(p, _) => p
case SinglePartitionScan(p, _) => Seq(p)
case p => throw new UnsupportedOperationException(s"PartitionScan $p to be implemented later")
}
val (start, end) = if (chunkMethod == AllChunkScan) (minChunkUserTime, maxChunkUserTime)
else (chunkMethod.startTime - maxChunkTime, chunkMethod.endTime)
chunkTable.readRawPartitionRange(partitions, start, end)
}
}
def getOrCreateChunkTable(dataset: DatasetRef): TimeSeriesChunksTable = {
chunkTableCache.getOrElseUpdate(dataset, { dataset: DatasetRef =>
new TimeSeriesChunksTable(dataset, clusterConnector, ingestionConsistencyLevel)(readEc) })
}
def getOrCreateIngestionTimeIndexTable(dataset: DatasetRef): IngestionTimeIndexTable = {
indexTableCache.getOrElseUpdate(dataset,
{ dataset: DatasetRef =>
new IngestionTimeIndexTable(dataset, clusterConnector, ingestionConsistencyLevel)(readEc) })
}
def getOrCreatePartitionKeysByUpdateTimeTable(dataset: DatasetRef): PartitionKeysByUpdateTimeTable = {
partKeysByUTTableCache.getOrElseUpdate(dataset,
{ dataset: DatasetRef =>
new PartitionKeysByUpdateTimeTable(dataset, clusterConnector, ingestionConsistencyLevel)(readEc) })
}
def getOrCreatePartitionKeysTable(dataset: DatasetRef, shard: Int): PartitionKeysTable = {
val map = partitionKeysTableCache.getOrElseUpdate(dataset, { _ =>
concurrentCache[Int, PartitionKeysTable](tableCacheSize)
})
map.getOrElseUpdate(shard, { shard: Int =>
new PartitionKeysTable(dataset, shard, clusterConnector, ingestionConsistencyLevel)(readEc)
})
}
def reset(): Unit = {}
}
| tuplejump/FiloDB | cassandra/src/main/scala/filodb.cassandra/columnstore/CassandraColumnStore.scala | Scala | apache-2.0 | 24,150 |
/*
https://www.reddit.com/r/dailyprogrammer/comments/bazy5j/20190408_challenge_377_easy_axisaligned_crate/
*/
object Boxes {
/*
For instance, if the crate is size X = 25 by Y = 18, and the boxes are size x = 6 by y = 5, then the answer is 12.
You can fit 4 boxes along the x-axis (because 6*4 <= 25), and 3 boxes along the y-axis (because 5*3 <= 18),
so in total you can fit 4*3 = 12 boxes in a rectangle.
*/
def fit1(crateX: Int, crateY: Int, boxX: Int, boxY: Int): Int = (crateX / boxX) * (crateY / boxY)
/*
You now have the option of rotating all boxes by 90 degrees,
so that you can treat a set of 6-by-5 boxes as a set of 5-by-6 boxes.
You do not have the option of rotating some of the boxes but not others.
*/
def fit2(crateX: Int, crateY: Int, boxX: Int, boxY: Int): Int =
List(fit1(crateX, crateY, boxX, boxY), fit1(crateX, crateY, boxY, boxX)).max
/*
You're now given six parameters, X, Y, Z, x, y, and z.
That is, you're given the X, Y, and Z dimensions of the crate, and the x, y, and z dimensions of the boxes.
There are now six different possible orientations of the boxes.
Again, boxes cannot be rotated independently: they all have to have the same orientation.
*/
def fit3(crateX: Int, crateY: Int, createZ: Int, boxX: Int, boxY: Int, boxZ: Int): Int =
List(boxX, boxY, boxZ).permutations.map(m => (crateX / m(0)) * (crateY / m(1)) * (createZ / m(2))).max
/*
Now you take a list of N crate dimensions, and N box dimensions.
Assume that the boxes may be rotated in any of N! orientations,
so that each axis of the crate aligns with a different axis of the boxes.
Again, boxes cannot be rotated independently.
*/
def fitN(crate: List[Int], box: List[Int]): Int =
box.permutations.map(b => crate.zip(b).map(x => x._1 / x._2).product).max
}
| frankivo/dailyprogrammer | 377/src/main/scala/Boxes.scala | Scala | gpl-3.0 | 1,829 |
package com.twitter.zk
import scala.collection.JavaConverters._
import scala.collection.{Seq, Set}
import org.apache.zookeeper.common.PathUtils
import org.apache.zookeeper.data.{ACL, Stat}
import org.apache.zookeeper.{CreateMode, KeeperException, WatchedEvent}
import com.twitter.concurrent.{Broker, Offer}
import com.twitter.util.{Future, Return, Throw, Try}
/**
* A handle to a ZNode attached to a ZkClient
*/
trait ZNode {
/** Absolute path of ZNode */
val path: String
protected[zk] val zkClient: ZkClient
protected[this] lazy val log = zkClient.log
override def hashCode = path.hashCode
override def toString = "ZNode(%s)".format(path)
/** ZNodes are equal if they share a path. */
override def equals(other: Any) = other match {
case z @ ZNode(_) => (z.hashCode == hashCode)
case _ => false
}
/*
* Helpers
*/
/** Return the ZkClient associated with this node. */
def client = zkClient
/** Get a child node. */
def apply(child: String): ZNode = ZNode(zkClient, childPath(child))
/** Build a ZNode with its metadata. */
def apply(stat: Stat): ZNode.Exists = ZNode.Exists(this, stat)
/** Build a ZNode with its metadata and children. */
def apply(stat: Stat, children: Seq[String]): ZNode.Children = ZNode.Children(this, stat, children)
/** Build a ZNode with its metadata and data. */
def apply(stat: Stat, bytes: Array[Byte]): ZNode.Data = ZNode.Data(this, stat, bytes)
/** The 'basename' of the ZNode path. */
lazy val name: String = path.lastIndexOf('/') match {
case i if (i == -1 || i == path.length - 1) => ""
case i => path.substring(i + 1)
}
/** The parent node. The root node is its own parent. */
lazy val parent: ZNode = ZNode(zkClient, parentPath)
lazy val parentPath: String = path.lastIndexOf('/') match {
case i if (i <= 0) => "/"
case i => path.substring(0, i)
}
/** The absolute path of a child */
def childPath(child: String): String = path match {
case path if (!path.endsWith("/")) => path + "/" + child
case path => path + child
}
/** Create a copy of this ZNode with an alternate ZkClient. */
def withZkClient(zk: ZkClient): ZNode = ZNode(zk, path)
/*
* Remote node operations
*/
/**
* Create this ZNode; or if a child name is specified create that child.
*/
def create(
data: Array[Byte] = Array.empty[Byte],
acls: Seq[ACL] = zkClient.acl,
mode: CreateMode = zkClient.mode,
child: Option[String] = None): Future[ZNode] = {
val creatingPath = child map { "%s/%s".format(path, _) } getOrElse path
zkClient.retrying { zk =>
val result = new StringCallbackPromise
zk.create(creatingPath, data, acls.asJava, mode, result, null)
result map { newPath => zkClient(newPath) }
}
}
/** Returns a Future that is satisfied with this ZNode */
def delete(version: Int = 0): Future[ZNode] = zkClient.retrying { zk =>
val result = new UnitCallbackPromise
zk.delete(path, version, result, null)
result map { _ => this }
}
/** Returns a Future that is satisfied with this ZNode with its metadata and data */
def setData(data: Array[Byte], version: Int): Future[ZNode.Data] = zkClient.retrying { zk =>
val result = new ExistsCallbackPromise(this)
zk.setData(path, data, version, result, null)
result map { _.apply(data) }
}
/** Returns a Future that is satisfied with a reference to this ZNode */
def sync(): Future[ZNode] = zkClient.retrying { zk =>
val result = new UnitCallbackPromise
zk.sync(path, result, null)
result map { _ => this }
}
/** Provides access to this node's children. */
val getChildren: ZOp[ZNode.Children] = new ZOp[ZNode.Children] {
import LiftableFuture._
/** Get this ZNode with its metadata and children */
def apply(): Future[ZNode.Children] = zkClient.retrying { zk =>
val result = new ChildrenCallbackPromise(ZNode.this)
zk.getChildren(path, false, result, null)
result
}
/**
* Get a ZNode with its metadata and children; and install a watch for changes.
*
* The returned ZNode.Watch encapsulates the return value from a ZNode operation and the
* watch that will fire when a ZNode operation completes. If the ZNode does not exist, the
* result will be a Throw containing a KeeperException.NoNodeExists, though the watch will
* fire when an event occurs. If any other errors occur when fetching the ZNode, the returned
* Future will error without returning a Watch.
*/
def watch() = zkClient.retrying { zk =>
val result = new ChildrenCallbackPromise(ZNode.this)
val update = new EventPromise
zk.getChildren(path, update, result, null)
result.liftNoNode map { ZNode.Watch(_, update) }
}
}
/** Provides access to this node's data. */
val getData: ZOp[ZNode.Data] = new ZOp[ZNode.Data] {
import LiftableFuture._
/** Get this node's data */
def apply(): Future[ZNode.Data] = zkClient.retrying { zk =>
val result = new DataCallbackPromise(ZNode.this)
zk.getData(path, false, result, null)
result
}
/**
* Get this node's metadata and data; and install a watch for changes.
*
* The returned ZNode.Watch encapsulates the return value from a ZNode operation and the
* watch that will fire when a ZNode operation completes. If the ZNode does not exist, the
* result will be a Throw containing a KeeperException.NoNodeExists, though the watch will
* fire when an event occurs. If any other errors occur when fetching the ZNode, the returned
* Future will error without returning a Watch.
*/
def watch() = zkClient.retrying { zk =>
val result = new DataCallbackPromise(ZNode.this)
val update = new EventPromise
zk.getData(path, update, result, null)
result.liftNoNode map { ZNode.Watch(_, update) }
}
}
/** Provides access to this node's metadata. */
val exists: ZOp[ZNode.Exists] = new ZOp[ZNode.Exists] {
import LiftableFuture._
/** Get this node's metadata. */
def apply() = zkClient.retrying { zk =>
val result = new ExistsCallbackPromise(ZNode.this)
zk.exists(path, false, result, null)
result
}
/** Get this node's metadata and watch for updates */
def watch() = zkClient.retrying { zk =>
val result = new ExistsCallbackPromise(ZNode.this)
val update = new EventPromise
zk.exists(path, update, result, null)
result.liftNoNode.map { ZNode.Watch(_, update) }
}
}
/**
* Continuously watch all nodes in this subtree for child updates.
*
* A ZNode.TreeUpdate is offered for each node in the tree.
*
* If this node is deleted and it had children, an offer is sent indicating that this
* node no longer has children. A watch is maintained on deleted nodes so that if the
* parent node is not monitored, the monitor continues to work when the node is restored.
*
* If an authorization failure or session expiration is encountered, the monitor will be lost
* silently. To detect these situations, receive events from ZkClient.monitorSession().
*/
def monitorTree(): Offer[ZNode.TreeUpdate] = {
val broker = new Broker[ZNode.TreeUpdate]
/** Pipe events from a subtree's monitor to this broker. */
def pipeSubTreeUpdates(next: Offer[ZNode.TreeUpdate]) {
next.sync() flatMap(broker!) onSuccess { _ => pipeSubTreeUpdates(next) }
}
/** Monitor a watch on this node. */
def monitorWatch(watch: Future[ZNode.Watch[ZNode.Children]], knownChildren: Set[ZNode]) {
log.debug("monitoring %s with %d known children", path, knownChildren.size)
watch onFailure { e =>
// An error occurred and there's not really anything we can do about it.
log.error(e, "%s: watch could not be established".format(path))
} onSuccess {
// When a node is fetched with a watch, send a ZNode.TreeUpdate on the broker, and start
// monitoring
case ZNode.Watch(Return(zparent), eventUpdate) => {
val children = zparent.children.toSet
val treeUpdate = ZNode.TreeUpdate(zparent,
added = children -- knownChildren,
removed = knownChildren -- children)
log.debug("updating %s with %d children", path, treeUpdate.added.size)
broker send(treeUpdate) sync() onSuccess { _ =>
log.debug("updated %s with %d children", path, treeUpdate.added.size)
treeUpdate.added foreach { z =>
pipeSubTreeUpdates(z.monitorTree())
}
eventUpdate onSuccess { event =>
log.debug("event received on %s: %s", path, event)
} onSuccess {
case MonitorableEvent() => monitorWatch(zparent.getChildren.watch(), children)
case event => log.debug("Unmonitorable event: %s: %s", path, event)
}
}
}
case ZNode.Watch(Throw(ZNode.Error(_path)), eventUpdate) => {
// Tell the broker about the children we lost; otherwise, if there were no children,
// this deletion should be reflected in a watch on the parent node, if one exists.
if (knownChildren.size > 0) {
broker send(ZNode.TreeUpdate(this, removed = knownChildren)) sync()
} else {
Future.Done
} onSuccess { _ =>
eventUpdate onSuccess {
case MonitorableEvent() => monitorWatch(parent.getChildren.watch(), Set.empty[ZNode])
case event => log.debug("Unmonitorable event: %s: %s", path, event)
}
}
}
}
}
// Initially, we don't know about any children for the node.
monitorWatch(getChildren.watch(), Set.empty[ZNode])
broker.recv
}
/** AuthFailed and Expired are unmonitorable. Everything else can be resumed. */
protected[this] object MonitorableEvent {
def unapply(event: WatchedEvent) = event match {
case StateEvent.AuthFailed() => false
case StateEvent.Expired() => false
case _ => true
}
}
}
/**
* ZNode utilities and return types.
*/
object ZNode {
/** Build a ZNode */
def apply(zk: ZkClient, _path: String) = new ZNode {
PathUtils.validatePath(_path)
protected[zk] val zkClient = zk
val path = _path
}
/** matcher */
def unapply(znode: ZNode) = Some(znode.path)
/** A matcher for KeeperExceptions that have a non-null path. */
object Error {
def unapply(ke: KeeperException) = Option(ke.getPath)
}
/** A ZNode with its Stat metadata. */
trait Exists extends ZNode {
val stat: Stat
override def equals(other: Any) = other match {
case Exists(p, s) => (p == path && s == stat)
case o => super.equals(o)
}
def apply(children: Seq[String]): ZNode.Children = apply(stat, children)
def apply(bytes: Array[Byte]): ZNode.Data = apply(stat, bytes)
}
object Exists {
def apply(znode: ZNode, _stat: Stat) = new Exists {
val path = znode.path
protected[zk] val zkClient = znode.zkClient
val stat = _stat
}
def apply(znode: Exists): Exists = apply(znode, znode.stat)
def unapply(znode: Exists) = Some((znode.path, znode.stat))
}
/** A ZNode with its Stat metadata and children znodes. */
trait Children extends Exists {
val stat: Stat
val children: Seq[ZNode]
override def equals(other: Any) = other match {
case Children(p, s, c) => (p == path && s == stat && c == children)
case o => super.equals(o)
}
}
object Children {
def apply(znode: Exists, _children: Seq[ZNode]): Children = new Children {
val path = znode.path
protected[zk] val zkClient = znode.zkClient
val stat = znode.stat
val children = _children
}
def apply(znode: ZNode, stat: Stat, children: Seq[String]): Children = {
apply(Exists(znode, stat), children.map(znode.apply))
}
def unapply(z: Children) = Some((z.path, z.stat, z.children))
}
/** A ZNode with its Stat metadata and data. */
trait Data extends Exists {
val stat: Stat
val bytes: Array[Byte]
override def equals(other: Any) = other match {
case Data(p, s, b) => (p == path && s == stat && b == bytes)
case o => super.equals(o)
}
}
object Data {
def apply(znode: ZNode, _stat: Stat, _bytes: Array[Byte]) = new Data {
val path = znode.path
protected[zk] val zkClient = znode.zkClient
val stat = _stat
val bytes = _bytes
}
def apply(znode: Exists, bytes: Array[Byte]): Data = apply(znode, znode.stat, bytes)
def unapply(znode: Data) = Some((znode.path, znode.stat, znode.bytes))
}
case class Watch[T <: Exists](result: Try[T], update: Future[WatchedEvent]) {
/** Map this Watch to one of another type. */
def map[V <: Exists](toV: T => V): Watch[V] = new Watch(result.map(toV), update)
}
/** Describes an update to a node's children. */
case class TreeUpdate(
parent: ZNode,
added: Set[ZNode] = Set.empty[ZNode],
removed: Set[ZNode] = Set.empty[ZNode])
}
| travisbrown/util | util-zk/src/main/scala/com/twitter/zk/ZNode.scala | Scala | apache-2.0 | 13,127 |
package ml.combust.mleap.runtime.test
import java.io.File
import java.nio.file.{Files, Path}
import ml.combust.mleap.core.regression.DecisionTreeRegressionModel
import ml.combust.mleap.core.tree.{ContinuousSplit, InternalNode, LeafNode, Node}
/**
* Created by hollinwilkins on 9/28/16.
*/
object TestUtil {
val baseDir = {
val temp: Path = Files.createTempDirectory("mleap-runtime")
temp.toFile.deleteOnExit()
temp.toAbsolutePath
}
def delete(file: File): Array[(String, Boolean)] = {
Option(file.listFiles).map(_.flatMap(f => delete(f))).getOrElse(Array()) :+ (file.getPath -> file.delete)
}
def buildDecisionTreeRegression(prediction: Double,
featureIndex: Int,
goLeft: Boolean): DecisionTreeRegressionModel = {
DecisionTreeRegressionModel(buildTri(prediction, featureIndex, goLeft), 5)
}
def buildTri(prediction: Double, featureIndex: Int, goLeft: Boolean): Node = {
val node1 = LeafNode(prediction)
val node2 = LeafNode(42.34)
val split = ContinuousSplit(featureIndex, 0.5)
if(goLeft) {
InternalNode(node1, node2, split)
} else {
InternalNode(node2, node1, split)
}
}
}
| combust/mleap | mleap-runtime/src/test/scala/ml/combust/mleap/runtime/test/TestUtil.scala | Scala | apache-2.0 | 1,226 |
package com.github.gtache.lsp.client.languageserver.requestmanager
import java.util
import java.util.concurrent.CompletableFuture
import com.github.gtache.lsp.client.languageserver.ServerStatus
import com.github.gtache.lsp.client.languageserver.wrapper.LanguageServerWrapper
import com.intellij.openapi.diagnostic.Logger
import org.eclipse.lsp4j._
import org.eclipse.lsp4j.jsonrpc.messages
import org.eclipse.lsp4j.jsonrpc.messages.CancelParams
import org.eclipse.lsp4j.services.{LanguageClient, LanguageServer, TextDocumentService, WorkspaceService}
/**
* Basic implementation of a RequestManager which just passes requests from client to server and vice-versa
*/
class SimpleRequestManager(wrapper: LanguageServerWrapper, server: LanguageServer, client: LanguageClient, serverCapabilities: ServerCapabilities) extends RequestManager {
private val textDocumentOptions = if (serverCapabilities.getTextDocumentSync.isRight) serverCapabilities.getTextDocumentSync.getRight else null
private val workspaceService: WorkspaceService = server.getWorkspaceService
private val textDocumentService: TextDocumentService = server.getTextDocumentService
private val LOG: Logger = Logger.getInstance(classOf[SimpleRequestManager])
//Client
override def showMessage(messageParams: MessageParams): Unit = client.showMessage(messageParams)
override def showMessageRequest(showMessageRequestParams: ShowMessageRequestParams): CompletableFuture[MessageActionItem] = client.showMessageRequest(showMessageRequestParams)
override def logMessage(messageParams: MessageParams): Unit = client.logMessage(messageParams)
override def telemetryEvent(o: Any): Unit = client.telemetryEvent(o)
override def registerCapability(params: RegistrationParams): CompletableFuture[Void] = client.registerCapability(params)
override def unregisterCapability(params: UnregistrationParams): CompletableFuture[Void] = client.unregisterCapability(params)
override def applyEdit(params: ApplyWorkspaceEditParams): CompletableFuture[ApplyWorkspaceEditResponse] = client.applyEdit(params)
override def publishDiagnostics(publishDiagnosticsParams: PublishDiagnosticsParams): Unit = client.publishDiagnostics(publishDiagnosticsParams)
override def semanticHighlighting(params: SemanticHighlightingParams): Unit = client.semanticHighlighting(params)
//General
override def initialize(params: InitializeParams): CompletableFuture[InitializeResult] = {
if (checkStatus) try {
server.initialize(params)
} catch {
case e: Exception => crashed(e)
null
} else null
}
override def initialized(params: InitializedParams): Unit =
if (checkStatus) try {
server.initialized(params)
} catch {
case e: Exception => crashed(e)
}
override def shutdown: CompletableFuture[AnyRef] =
if (checkStatus) try {
server.shutdown()
} catch {
case e: Exception => crashed(e)
null
} else null
override def exit(): Unit =
if (checkStatus) try {
server.exit()
} catch {
case e: Exception => crashed(e)
}
override def cancelRequest(params: CancelParams): Unit = {
}
//Workspace
override def didChangeConfiguration(params: DidChangeConfigurationParams): Unit =
if (checkStatus) try {
workspaceService.didChangeConfiguration(params)
} catch {
case e: Exception => crashed(e)
}
override def didChangeWatchedFiles(params: DidChangeWatchedFilesParams): Unit =
if (checkStatus) try {
workspaceService.didChangeWatchedFiles(params)
} catch {
case e: Exception => crashed(e)
}
override def symbol(params: WorkspaceSymbolParams): CompletableFuture[java.util.List[_ <: SymbolInformation]] =
if (checkStatus) try {
if (serverCapabilities.getWorkspaceSymbolProvider) workspaceService.symbol(params) else null
} catch {
case e: Exception => crashed(e)
null
} else null
override def executeCommand(params: ExecuteCommandParams): CompletableFuture[AnyRef] =
if (checkStatus) try {
if (serverCapabilities.getExecuteCommandProvider != null) workspaceService.executeCommand(params) else null
} catch {
case e: Exception => crashed(e)
null
} else null
//TextDocument
override def didOpen(params: DidOpenTextDocumentParams): Unit =
if (checkStatus) try {
if (textDocumentOptions == null || textDocumentOptions.getOpenClose) textDocumentService.didOpen(params)
} catch {
case e: Exception => crashed(e)
}
override def didChange(params: DidChangeTextDocumentParams): Unit =
if (checkStatus) try {
if (textDocumentOptions == null || textDocumentOptions.getChange != null) textDocumentService.didChange(params)
} catch {
case e: Exception => crashed(e)
}
override def willSave(params: WillSaveTextDocumentParams): Unit =
if (checkStatus) try {
if (textDocumentOptions == null || textDocumentOptions.getWillSave) textDocumentService.willSave(params)
} catch {
case e: Exception => crashed(e)
}
override def willSaveWaitUntil(params: WillSaveTextDocumentParams): CompletableFuture[java.util.List[TextEdit]] =
if (checkStatus) try {
if (textDocumentOptions == null || textDocumentOptions.getWillSaveWaitUntil) textDocumentService.willSaveWaitUntil(params) else null
} catch {
case e: Exception => crashed(e)
null
} else null
override def didSave(params: DidSaveTextDocumentParams): Unit =
if (checkStatus) try {
if (textDocumentOptions == null || textDocumentOptions.getSave != null) textDocumentService.didSave(params)
} catch {
case e: Exception => crashed(e)
}
override def didClose(params: DidCloseTextDocumentParams): Unit =
if (checkStatus) try {
if (textDocumentOptions == null || textDocumentOptions.getOpenClose) textDocumentService.didClose(params)
} catch {
case e: Exception => crashed(e)
}
override def completion(params: CompletionParams): CompletableFuture[jsonrpc.messages.Either[java.util.List[CompletionItem], CompletionList]] =
if (checkStatus) try {
if (serverCapabilities.getCompletionProvider != null) textDocumentService.completion(params) else null
} catch {
case e: Exception => crashed(e)
null
} else null
override def resolveCompletionItem(unresolved: CompletionItem): CompletableFuture[CompletionItem] =
if (checkStatus) try {
if (serverCapabilities.getCompletionProvider != null && serverCapabilities.getCompletionProvider.getResolveProvider) textDocumentService.resolveCompletionItem(unresolved) else null
} catch {
case e: Exception => crashed(e)
null
} else null
override def hover(params: TextDocumentPositionParams): CompletableFuture[Hover] =
if (checkStatus) try {
if (serverCapabilities.getHoverProvider) textDocumentService.hover(params) else null
} catch {
case e: Exception => crashed(e)
null
} else null
override def signatureHelp(params: TextDocumentPositionParams): CompletableFuture[SignatureHelp] =
if (checkStatus) try {
if (serverCapabilities.getSignatureHelpProvider != null) textDocumentService.signatureHelp(params) else null
} catch {
case e: Exception => crashed(e)
null
} else null
override def references(params: ReferenceParams): CompletableFuture[java.util.List[_ <: Location]] =
if (checkStatus) try {
if (serverCapabilities.getReferencesProvider) textDocumentService.references(params) else null
} catch {
case e: Exception => crashed(e)
null
} else null
override def documentHighlight(params: TextDocumentPositionParams): CompletableFuture[java.util.List[_ <: DocumentHighlight]] =
if (checkStatus) try {
if (serverCapabilities.getDocumentHighlightProvider) textDocumentService.documentHighlight(params) else null
} catch {
case e: Exception => crashed(e)
null
} else null
override def documentSymbol(params: DocumentSymbolParams): CompletableFuture[java.util.List[jsonrpc.messages.Either[SymbolInformation, DocumentSymbol]]] =
if (checkStatus) try {
if (serverCapabilities.getDocumentSymbolProvider) textDocumentService.documentSymbol(params) else null
} catch {
case e: Exception => crashed(e)
null
} else null
override def formatting(params: DocumentFormattingParams): CompletableFuture[java.util.List[_ <: TextEdit]] =
if (checkStatus) try {
if (serverCapabilities.getDocumentFormattingProvider) textDocumentService.formatting(params) else null
} catch {
case e: Exception => crashed(e)
null
} else null
override def rangeFormatting(params: DocumentRangeFormattingParams): CompletableFuture[java.util.List[_ <: TextEdit]] =
if (checkStatus) try {
if (serverCapabilities.getDocumentRangeFormattingProvider) textDocumentService.rangeFormatting(params) else null
} catch {
case e: Exception => crashed(e)
null
} else null
override def onTypeFormatting(params: DocumentOnTypeFormattingParams): CompletableFuture[java.util.List[_ <: TextEdit]] =
if (checkStatus) try {
if (serverCapabilities.getDocumentOnTypeFormattingProvider != null) textDocumentService.onTypeFormatting(params) else null
} catch {
case e: Exception => crashed(e)
null
} else null
override def definition(params: TextDocumentPositionParams): CompletableFuture[jsonrpc.messages.Either[java.util.List[_ <: Location], java.util.List[_ <: LocationLink]]] =
if (checkStatus) try {
if (serverCapabilities.getDefinitionProvider) textDocumentService.definition(params) else null
} catch {
case e: Exception => crashed(e)
null
} else null
override def codeAction(params: CodeActionParams): CompletableFuture[java.util.List[jsonrpc.messages.Either[Command, CodeAction]]] =
if (checkStatus) try {
if (checkProvider(serverCapabilities.getCodeActionProvider.asInstanceOf[jsonrpc.messages.Either[Boolean, StaticRegistrationOptions]])) textDocumentService.codeAction(params) else null
} catch {
case e: Exception => crashed(e)
null
} else null
private def checkProvider(provider: jsonrpc.messages.Either[Boolean, StaticRegistrationOptions]): Boolean = {
provider != null && ((provider.isLeft && provider.getLeft) || (provider.isRight && provider.getRight != null))
}
override def codeLens(params: CodeLensParams): CompletableFuture[java.util.List[_ <: CodeLens]] =
if (checkStatus) try {
if (serverCapabilities.getCodeLensProvider != null) textDocumentService.codeLens(params) else null
} catch {
case e: Exception => crashed(e)
null
} else null
override def resolveCodeLens(unresolved: CodeLens): CompletableFuture[CodeLens] =
if (checkStatus) try {
if (serverCapabilities.getCodeLensProvider != null && serverCapabilities.getCodeLensProvider.isResolveProvider) textDocumentService.resolveCodeLens(unresolved) else null
} catch {
case e: Exception => crashed(e)
null
} else null
override def documentLink(params: DocumentLinkParams): CompletableFuture[java.util.List[DocumentLink]] =
if (checkStatus) try {
if (serverCapabilities.getDocumentLinkProvider != null) textDocumentService.documentLink(params) else null
} catch {
case e: Exception => crashed(e)
null
} else null
private def checkStatus: Boolean = wrapper.getStatus == ServerStatus.STARTED
private def crashed(e: Exception): Unit = {
LOG.warn(e)
wrapper.crashed(e)
}
override def documentLinkResolve(unresolved: DocumentLink): CompletableFuture[DocumentLink] =
if (checkStatus) try {
if (serverCapabilities.getDocumentLinkProvider != null && serverCapabilities.getDocumentLinkProvider.getResolveProvider)
textDocumentService.documentLinkResolve(unresolved) else null
} catch {
case e: Exception => crashed(e)
null
} else null
override def rename(params: RenameParams): CompletableFuture[WorkspaceEdit] =
if (checkStatus) try {
if (checkProvider(serverCapabilities.getRenameProvider.asInstanceOf[jsonrpc.messages.Either[Boolean, StaticRegistrationOptions]])) textDocumentService.rename(params) else null
} catch {
case e: Exception => crashed(e)
null
} else null
override def prepareRename(params: TextDocumentPositionParams): CompletableFuture[messages.Either[Range, PrepareRenameResult]] = {
if (checkStatus) try {
if (checkProvider(serverCapabilities.getRenameProvider.asInstanceOf[jsonrpc.messages.Either[Boolean, StaticRegistrationOptions]])) textDocumentService.prepareRename(params) else null
} catch {
case e: Exception => crashed(e)
null
} else null
}
override def implementation(params: TextDocumentPositionParams): CompletableFuture[messages.Either[java.util.List[_ <: Location], java.util.List[_ <: LocationLink]]] = throw new NotImplementedError()
override def typeDefinition(params: TextDocumentPositionParams): CompletableFuture[messages.Either[java.util.List[_ <: Location], java.util.List[_ <: LocationLink]]] = throw new NotImplementedError()
override def documentColor(params: DocumentColorParams): CompletableFuture[util.List[ColorInformation]] = throw new NotImplementedError()
override def colorPresentation(params: ColorPresentationParams): CompletableFuture[util.List[ColorPresentation]] = throw new NotImplementedError()
override def foldingRange(params: FoldingRangeRequestParams): CompletableFuture[util.List[FoldingRange]] = throw new NotImplementedError()
}
| gtache/intellij-lsp | intellij-lsp/src/com/github/gtache/lsp/client/languageserver/requestmanager/SimpleRequestManager.scala | Scala | apache-2.0 | 13,710 |
package com.sksamuel.scapegoat.inspections.collections
import com.sksamuel.scapegoat.PluginRunner
import org.scalatest.{ FreeSpec, Matchers, OneInstancePerTest }
class ReverseTakeReverseTest
extends FreeSpec
with Matchers
with PluginRunner
with OneInstancePerTest {
override val inspections = Seq(new ReverseTakeReverse)
"ReverseTakeReverse" - {
"should report warning" in {
val code = """class Test {
List(1,2,3).reverse.take(2).reverse
Array(1,2,3).reverse.take(1).reverse
} """.stripMargin
compileCodeSnippet(code)
compiler.scapegoat.feedback.warnings.size shouldBe 2
}
}
}
| vdichev/scapegoat | src/test/scala/com/sksamuel/scapegoat/inspections/collections/ReverseTakeReverseTest.scala | Scala | apache-2.0 | 692 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.