code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package com.nekogata.backlogger.domain.setting
case class Setting(val apiKey:String, val spaceName: String) {
def setApiKey(k: String) = copy(apiKey = k)
def setSpaceName(s: String) = copy(spaceName = s)
}
|
Shinpeim/BackLogger
|
scala/src/main/scala/com/nekogata/backlogger/domain/setting/Setting.scala
|
Scala
|
mit
| 211 |
package models
import play.api.Play
import play.api.Play.current
import play.api.Logger
import scala.collection.JavaConverters._
import com.typesafe.config.Config
object Environment {
type ConnectionName = String
val connectionNames: List[ConnectionName] = {
Play.configuration.getStringList("databases.connections").map(_.asScala.toList).getOrElse(List.empty)
}
def connectionDescription(connection: ConnectionName): Option[String] = {
Play.configuration.getString(s"databases.${connection}.description")
}
val databaseConnections: List[(ConnectionName,String)] = {
connectionNames.map( name => (name, connectionDescription(name).getOrElse("")))
}
def findPasswordForApplicationUser(username: String): Option[String] = {
for{
userConfigs <- Play.configuration.getConfig("application.users")
userConfig <- userConfigs.getConfig(username)
password <- userConfig.getString("password")
} yield password
}
}
import Environment.ConnectionName
case class FeatureToggle(name: String, enabled: Boolean)
case class FeatureToggleMap(toggles: Map[String,FeatureToggle]){
def isEnabled(featureName: String) = toggles.get(featureName).map(_.enabled).getOrElse(false)
}
object FeatureToggles {
private val featureNames = List("toggle","add","remove","edit")
private def isDatabaseFeatureEnabled(connection: ConnectionName, featureName: String): Boolean = {
Play.configuration.getBoolean(s"databases.${connection}.features.${featureName}").getOrElse(false)
}
def findFeatureToggles(connection: ConnectionName): FeatureToggleMap = {
val enabledFeatures = featureNames.filter( isDatabaseFeatureEnabled(connection,_))
val map = enabledFeatures.map( feature => (feature,FeatureToggle(feature,true)) ).toMap
FeatureToggleMap(map)
}
def isBackupEnabled(connection: ConnectionName): Boolean = isDatabaseFeatureEnabled(connection,"backup")
def isRelayEnabled(connection: ConnectionName): Boolean = isDatabaseFeatureEnabled(connection,"relay")
def isRelocationEnabled(connection: ConnectionName): Boolean = isDatabaseFeatureEnabled(connection,"relocation")
def isToggleEnabled(connection: ConnectionName): Boolean = isDatabaseFeatureEnabled(connection,"toggle")
def isAddEnabled(connection: ConnectionName): Boolean = isDatabaseFeatureEnabled(connection,"add")
def isRemoveEnabled(connection: ConnectionName): Boolean = isDatabaseFeatureEnabled(connection,"remove")
def isEditEnabled(connection: ConnectionName): Boolean = isDatabaseFeatureEnabled(connection,"edit")
}
case class ErrorMessage(message: String)
|
flurdy/sortingoffice
|
app/models/environment.scala
|
Scala
|
mit
| 2,574 |
/***********************************************************************
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.iterators
import org.locationtech.jts.geom.Geometry
import org.geotools.data.Query
import org.geotools.feature.simple.SimpleFeatureBuilder
import org.geotools.filter.text.ecql.ECQL
import org.junit.runner.RunWith
import org.locationtech.geomesa.accumulo.TestWithFeatureType
import org.locationtech.geomesa.accumulo.index.JoinIndex
import org.locationtech.geomesa.index.index.NamedIndex
import org.locationtech.geomesa.index.index.z3.Z3Index
import org.locationtech.geomesa.index.utils.{ExplainNull, Explainer}
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.locationtech.geomesa.utils.text.WKTUtils
import org.specs2.matcher.MatchResult
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.JavaConversions._
@RunWith(classOf[JUnitRunner])
class AttributeIndexFilteringIteratorTest extends Specification with TestWithFeatureType {
import org.locationtech.geomesa.filter.ff
sequential
override val spec = s"name:String:index=join,age:Integer:index=join,dtg:Date,*geom:Point:srid=4326"
val features = List("a", "b", "c", "d").flatMap { name =>
List(1, 2, 3, 4).zip(List(45, 46, 47, 48)).map { case (i, lat) =>
val sf = SimpleFeatureBuilder.build(sft, List(), name + i.toString)
sf.setDefaultGeometry(WKTUtils.read(f"POINT($lat%d $lat%d)"))
sf.setAttribute("dtg", "2011-01-01T00:00:00Z")
sf.setAttribute("age", i)
sf.setAttribute("name", name)
sf
}
}
addFeatures(features)
def checkStrategies[T](query: Query, strategy: NamedIndex, explain: Explainer = ExplainNull): MatchResult[Any] = {
val plan = ds.getQueryPlan(query, explainer = explain)
plan must haveLength(1)
plan.head.filter.index.name mustEqual strategy.name
}
"AttributeIndexFilteringIterator" should {
"handle like queries and choose correct strategies" in {
// Try out wildcard queries using the % wildcard syntax.
// Test single wildcard, trailing, leading, and both trailing & leading wildcards
forall(List("a", "b", "c", "d")) { letter =>
// 4 features for this letter
val leftWildCard = new Query(sftName, ff.like(ff.property("name"),s"%$letter"))
checkStrategies(leftWildCard, Z3Index)
SelfClosingIterator(fs.getFeatures(leftWildCard)).toSeq must haveLength(4)
// Double wildcards should be full table scan
val doubleWildCard = new Query(sftName, ff.like(ff.property("name"),s"%$letter%"))
checkStrategies(doubleWildCard, Z3Index)
SelfClosingIterator(fs.getFeatures(doubleWildCard)).toSeq must haveLength(4)
// should return the 4 features for this letter
val rightWildcard = new Query(sftName, ff.like(ff.property("name"),s"$letter%"))
checkStrategies(rightWildcard, JoinIndex)
SelfClosingIterator(fs.getFeatures(rightWildcard)).toSeq must haveLength(4)
}
}
"actually handle transforms properly and chose correct strategies for attribute indexing" in {
// transform to only return the attribute geom - dropping dtg, age, and name
val query = new Query(sftName, ECQL.toFilter("name = 'b'"), Array("geom"))
checkStrategies(query, JoinIndex)
// full table scan
val leftWildCard = new Query(sftName, ff.like(ff.property("name"), "%b"), Array("geom"))
checkStrategies(leftWildCard, Z3Index)
// full table scan
val doubleWildCard = new Query(sftName, ff.like(ff.property("name"), "%b%"), Array("geom"))
checkStrategies(doubleWildCard, Z3Index)
val rightWildcard = new Query(sftName, ff.like(ff.property("name"), "b%"), Array("geom"))
checkStrategies(rightWildcard, JoinIndex)
forall(List(query, leftWildCard, doubleWildCard, rightWildcard)) { query =>
val features = SelfClosingIterator(fs.getFeatures(query)).toList
features must haveLength(4)
forall(features)(_.getAttribute(0) must beAnInstanceOf[Geometry])
forall(features)(_.getAttributeCount mustEqual 1)
}
}
"handle corner case with attr idx, bbox, and no temporal filter" in {
val filter = ff.and(ECQL.toFilter("name = 'b'"), ECQL.toFilter("BBOX(geom, 30, 30, 50, 50)"))
val query = new Query(sftName, filter, Array("geom"))
ds.getQueryPlan(query).head.filter.index.name mustEqual JoinIndex.name
val features = SelfClosingIterator(fs.getFeatures(query)).toList
features must haveLength(4)
forall(features)(_.getAttribute(0) must beAnInstanceOf[Geometry])
forall(features)(_.getAttributeCount mustEqual 1)
}
}
}
|
ccri/geomesa
|
geomesa-accumulo/geomesa-accumulo-datastore/src/test/scala/org/locationtech/geomesa/accumulo/iterators/AttributeIndexFilteringIteratorTest.scala
|
Scala
|
apache-2.0
| 5,110 |
package signal
import org.scalatest.FunSuite
import org.scalatest.Matchers
import breeze.math._
class FFTTest extends FunSuite with Matchers {
import Comparisons._
test("apply an FFT to a sequence of Doubles") {
val x = List[Double](3, 5, 2, 8, 7, 9, 3, 1)
val hExpected = List[Complex]( // from Octave
38.0 + 0.0 * i,
-11.7782 - 1.1213 * i,
5.0 - 5.0 * i,
3.7782 - 3.1213 * i,
-8.0 + 0.0 * i,
3.7782 + 3.1213 * i,
5.0 + 5.0 * i,
-11.7782 + 1.1213 * i
)
val h = FFT.fft(x)
eqc(h, hExpected, 1e-4)
}
test("apply an inverse FFT") {
val xOriginal = List[Double](3, 5, 2, 8, 7, 9, 3, 1)
val h = FFT.fft(xOriginal)
val x = FFT.ifft(h)
eqc(x, xOriginal.map(Complex(_, 0.0)))
}
test("apply FFT to an even non-power-of-two length") {
val x = List[Double](1, 2, 3, 4)
val hExpected = List[Complex](10 + 0 * i, -2 + 2 * i, -2 + 0 * i, -2 - 2 * i)
val h = FFT.fft(x)
eqc(h, hExpected)
}
test("apply FFT to an odd non-power-of-two length") {
val x = List[Double](1, 6, 7, 3, 4, 9, 6, -5, -1)
val h = FFT.fft(x)
val hExpected = List[Complex]( // from Octave
30 + 0 * i,
-11.53849 - 12.00903 * i,
5.44743 - 16.80991 * i,
8.66025 * i,
-4.40895 + 2.99335 * i,
-4.40895 - 2.99335 * i,
-8.66025 * i,
5.44743 + 16.80991 * i,
-11.53849 + 12.00903 * i
)
eqc(h, hExpected, 1e-5)
}
test("apply FFT to an ECG phantom") {
eqc(FFT.fft(ECG.noisyecg), ECG.fft)
}
test("nextPow2") {
FFT.nextPow2(1) should be (2)
FFT.nextPow2(2) should be (2)
FFT.nextPow2(13) should be (16)
FFT.nextPow2(16) should be (16)
}
}
|
lancelet/scalasignal
|
src/test/scala/signal/FFTTest.scala
|
Scala
|
lgpl-2.1
| 1,728 |
package com.twitter.finagle.oauth2
case class ProtectedResourceRequest(headers: Map[String, Seq[String]], params: Map[String, Seq[String]])
extends RequestBase(headers, params) {
def oauthToken: Option[String] = param("oauth_token")
def accessToken: Option[String] = param("access_token")
def requireAccessToken: String = requireParam("access_token")
}
|
yonglehou/finagle-oauth2
|
src/main/scala/com/twitter/finagle/oauth2/ProtectedResourceRequest.scala
|
Scala
|
apache-2.0
| 365 |
/*
* This file was copied almost verbatim from [1] to support cross version support for
* sbt 0.11.x.
*
* Changes:
* - removed `private[...]` modifiers
*
* [1] https://raw.github.com/sbt/sbt/c0b1bb51e63841be63c810a961031529b7be0072/util/cross/src/main/input_sources/CrossVersionUtil.scala
*/
package net.virtualvoid.sbt.cross
object CrossVersionUtil
{
val trueString = "true"
val falseString = "false"
val fullString = "full"
val noneString = "none"
val disabledString = "disabled"
val binaryString = "binary"
val TransitionScalaVersion = "2.10"
val TransitionSbtVersion = "0.12"
def isFull(s: String): Boolean = (s == trueString) || (s == fullString)
def isDisabled(s: String): Boolean = (s == falseString) || (s == noneString) || (s == disabledString)
def isBinary(s: String): Boolean = (s == binaryString)
def isSbtApiCompatible(v: String): Boolean = sbtApiVersion(v).isDefined
/** Returns sbt binary interface x.y API compatible with the given version string v.
* RCs for x.y.0 are considered API compatible.
* Compatibile versions include 0.12.0-1 and 0.12.0-RC1 for Some(0, 12).
*/
def sbtApiVersion(v: String): Option[(Int, Int)] =
{
val ReleaseV = """(\d+)\.(\d+)\.(\d+)(-\d+)?""".r
val CandidateV = """(\d+)\.(\d+)\.(\d+)(-RC\d+)""".r
val NonReleaseV = """(\d+)\.(\d+)\.(\d+)(-\w+)""".r
v match {
case ReleaseV(x, y, z, ht) => Some((x.toInt, y.toInt))
case CandidateV(x, y, z, ht) => Some((x.toInt, y.toInt))
case NonReleaseV(x, y, z, ht) if z.toInt > 0 => Some((x.toInt, y.toInt))
case _ => None
}
}
def isScalaApiCompatible(v: String): Boolean = scalaApiVersion(v).isDefined
/** Returns Scala binary interface x.y API compatible with the given version string v.
* Compatibile versions include 2.10.0-1 and 2.10.1-M1 for Some(2, 10), but not 2.10.0-RC1.
*/
def scalaApiVersion(v: String): Option[(Int, Int)] =
{
val ReleaseV = """(\d+)\.(\d+)\.(\d+)(-\d+)?""".r
val NonReleaseV = """(\d+)\.(\d+)\.(\d+)(-\w+)""".r
v match {
case ReleaseV(x, y, z, ht) => Some((x.toInt, y.toInt))
case NonReleaseV(x, y, z, ht) if z.toInt > 0 => Some((x.toInt, y.toInt))
case _ => None
}
}
val PartialVersion = """(\d+)\.(\d+)(?:\..+)?""".r
def partialVersion(s: String): Option[(Int,Int)] =
s match {
case PartialVersion(major, minor) => Some((major.toInt, minor.toInt))
case _ => None
}
def binaryScalaVersion(full: String): String = binaryVersionWithApi(full, TransitionScalaVersion)(scalaApiVersion)
def binarySbtVersion(full: String): String = binaryVersionWithApi(full, TransitionSbtVersion)(sbtApiVersion)
def binaryVersion(full: String, cutoff: String): String = binaryVersionWithApi(full, cutoff)(scalaApiVersion)
private[this] def isNewer(major: Int, minor: Int, minMajor: Int, minMinor: Int): Boolean =
major > minMajor || (major == minMajor && minor >= minMinor)
private[this] def binaryVersionWithApi(full: String, cutoff: String)(apiVersion: String => Option[(Int,Int)]): String =
{
def sub(major: Int, minor: Int) = major + "." + minor
(apiVersion(full), partialVersion(cutoff)) match {
case (Some((major, minor)), None) => sub(major, minor)
case (Some((major, minor)), Some((minMajor, minMinor))) if isNewer(major, minor, minMajor, minMinor) => sub(major, minor)
case _ => full
}
}
}
|
jrudolph/sbt-cross-building
|
src/main/scala-sbt-0.11/net/virtualvoid/sbt/cross/CrossVersionUtil.scala
|
Scala
|
bsd-2-clause
| 3,321 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s.features
import scala.collection.JavaConverters._
import io.fabric8.kubernetes.api.model._
import org.mockito.MockitoAnnotations
import org.scalatest.{BeforeAndAfter, BeforeAndAfterEach}
import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.deploy.k8s.{KubernetesConf, KubernetesExecutorSpecificConf, SparkPod}
import org.apache.spark.deploy.k8s.Config._
import org.apache.spark.deploy.k8s.Constants._
import org.apache.spark.rpc.RpcEndpointAddress
import org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend
class BasicExecutorFeatureStepSuite
extends SparkFunSuite with BeforeAndAfter with BeforeAndAfterEach {
private val APP_ID = "app-id"
private val DRIVER_HOSTNAME = "localhost"
private val DRIVER_PORT = 7098
private val DRIVER_ADDRESS = RpcEndpointAddress(
DRIVER_HOSTNAME,
DRIVER_PORT.toInt,
CoarseGrainedSchedulerBackend.ENDPOINT_NAME)
private val DRIVER_POD_NAME = "driver-pod"
private val DRIVER_POD_UID = "driver-uid"
private val RESOURCE_NAME_PREFIX = "base"
private val EXECUTOR_IMAGE = "executor-image"
private val LABELS = Map("label1key" -> "label1value")
private val ANNOTATIONS = Map("annotation1key" -> "annotation1value")
private val TEST_IMAGE_PULL_SECRETS = Seq("my-1secret-1", "my-secret-2")
private val TEST_IMAGE_PULL_SECRET_OBJECTS =
TEST_IMAGE_PULL_SECRETS.map { secret =>
new LocalObjectReferenceBuilder().withName(secret).build()
}
private val DRIVER_POD = new PodBuilder()
.withNewMetadata()
.withName(DRIVER_POD_NAME)
.withUid(DRIVER_POD_UID)
.endMetadata()
.withNewSpec()
.withNodeName("some-node")
.endSpec()
.withNewStatus()
.withHostIP("192.168.99.100")
.endStatus()
.build()
private var baseConf: SparkConf = _
before {
MockitoAnnotations.initMocks(this)
baseConf = new SparkConf()
.set(KUBERNETES_DRIVER_POD_NAME, DRIVER_POD_NAME)
.set(KUBERNETES_EXECUTOR_POD_NAME_PREFIX, RESOURCE_NAME_PREFIX)
.set(CONTAINER_IMAGE, EXECUTOR_IMAGE)
.set(KUBERNETES_DRIVER_SUBMIT_CHECK, true)
.set("spark.driver.host", DRIVER_HOSTNAME)
.set("spark.driver.port", DRIVER_PORT.toString)
.set(IMAGE_PULL_SECRETS, TEST_IMAGE_PULL_SECRETS.mkString(","))
.set("spark.kubernetes.resource.type", "java")
}
test("basic executor pod has reasonable defaults") {
val step = new BasicExecutorFeatureStep(
KubernetesConf(
baseConf,
KubernetesExecutorSpecificConf("1", Some(DRIVER_POD)),
RESOURCE_NAME_PREFIX,
APP_ID,
LABELS,
ANNOTATIONS,
Map.empty,
Map.empty,
Map.empty,
Nil,
hadoopConfSpec = None))
val executor = step.configurePod(SparkPod.initialPod())
// The executor pod name and default labels.
assert(executor.pod.getMetadata.getName === s"$RESOURCE_NAME_PREFIX-exec-1")
assert(executor.pod.getMetadata.getLabels.asScala === LABELS)
assert(executor.pod.getSpec.getImagePullSecrets.asScala === TEST_IMAGE_PULL_SECRET_OBJECTS)
// There is exactly 1 container with no volume mounts and default memory limits.
// Default memory limit is 1024M + 384M (minimum overhead constant).
assert(executor.container.getImage === EXECUTOR_IMAGE)
assert(executor.container.getVolumeMounts.isEmpty)
assert(executor.container.getResources.getLimits.size() === 1)
assert(executor.container.getResources
.getLimits.get("memory").getAmount === "1408Mi")
// The pod has no node selector, volumes.
assert(executor.pod.getSpec.getNodeSelector.isEmpty)
assert(executor.pod.getSpec.getVolumes.isEmpty)
checkEnv(executor, Map())
checkOwnerReferences(executor.pod, DRIVER_POD_UID)
}
test("executor pod hostnames get truncated to 63 characters") {
val conf = baseConf.clone()
val longPodNamePrefix = "loremipsumdolorsitametvimatelitrefficiendisuscipianturvixlegeresple"
val step = new BasicExecutorFeatureStep(
KubernetesConf(
conf,
KubernetesExecutorSpecificConf("1", Some(DRIVER_POD)),
longPodNamePrefix,
APP_ID,
LABELS,
ANNOTATIONS,
Map.empty,
Map.empty,
Map.empty,
Nil,
hadoopConfSpec = None))
assert(step.configurePod(SparkPod.initialPod()).pod.getSpec.getHostname.length === 63)
}
test("classpath and extra java options get translated into environment variables") {
val conf = baseConf.clone()
conf.set(org.apache.spark.internal.config.EXECUTOR_JAVA_OPTIONS, "foo=bar")
conf.set(org.apache.spark.internal.config.EXECUTOR_CLASS_PATH, "bar=baz")
val step = new BasicExecutorFeatureStep(
KubernetesConf(
conf,
KubernetesExecutorSpecificConf("1", Some(DRIVER_POD)),
RESOURCE_NAME_PREFIX,
APP_ID,
LABELS,
ANNOTATIONS,
Map.empty,
Map.empty,
Map("qux" -> "quux"),
Nil,
hadoopConfSpec = None))
val executor = step.configurePod(SparkPod.initialPod())
checkEnv(executor,
Map("SPARK_JAVA_OPT_0" -> "foo=bar",
ENV_CLASSPATH -> "bar=baz",
"qux" -> "quux"))
checkOwnerReferences(executor.pod, DRIVER_POD_UID)
}
test("test executor pyspark memory") {
val conf = baseConf.clone()
conf.set("spark.kubernetes.resource.type", "python")
conf.set(org.apache.spark.internal.config.PYSPARK_EXECUTOR_MEMORY, 42L)
val step = new BasicExecutorFeatureStep(
KubernetesConf(
conf,
KubernetesExecutorSpecificConf("1", Some(DRIVER_POD)),
RESOURCE_NAME_PREFIX,
APP_ID,
LABELS,
ANNOTATIONS,
Map.empty,
Map.empty,
Map.empty,
Nil,
hadoopConfSpec = None))
val executor = step.configurePod(SparkPod.initialPod())
// This is checking that basic executor + executorMemory = 1408 + 42 = 1450
assert(executor.container.getResources.getRequests.get("memory").getAmount === "1450Mi")
}
// There is always exactly one controller reference, and it points to the driver pod.
private def checkOwnerReferences(executor: Pod, driverPodUid: String): Unit = {
assert(executor.getMetadata.getOwnerReferences.size() === 1)
assert(executor.getMetadata.getOwnerReferences.get(0).getUid === driverPodUid)
assert(executor.getMetadata.getOwnerReferences.get(0).getController === true)
}
// Check that the expected environment variables are present.
private def checkEnv(executorPod: SparkPod, additionalEnvVars: Map[String, String]): Unit = {
val defaultEnvs = Map(
ENV_EXECUTOR_ID -> "1",
ENV_DRIVER_URL -> DRIVER_ADDRESS.toString,
ENV_EXECUTOR_CORES -> "1",
ENV_EXECUTOR_MEMORY -> "1g",
ENV_APPLICATION_ID -> APP_ID,
ENV_SPARK_CONF_DIR -> SPARK_CONF_DIR_INTERNAL,
ENV_EXECUTOR_POD_IP -> null) ++ additionalEnvVars
assert(executorPod.container.getEnv.size() === defaultEnvs.size)
val mapEnvs = executorPod.container.getEnv.asScala.map {
x => (x.getName, x.getValue)
}.toMap
assert(defaultEnvs === mapEnvs)
}
}
|
ahnqirage/spark
|
resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/features/BasicExecutorFeatureStepSuite.scala
|
Scala
|
apache-2.0
| 7,961 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.parquet.hadoop.utils
import com.google.common.collect.{Maps, Sets}
import org.apache.parquet.hadoop.utils.Collections3
import org.apache.spark.SparkFunSuite
class Collections3Suite extends SparkFunSuite {
test("toSetMultiMap") {
val map = Maps.newHashMap[Int, Int]()
map.put(1, 1)
map.put(2, 2)
map.put(3, 3)
val ret = Collections3.toSetMultiMap(map)
assert(ret.size() == 3)
assert(ret.get(1).equals(Sets.newHashSet(1)))
assert(ret.get(2).equals(Sets.newHashSet(2)))
assert(ret.get(3).equals(Sets.newHashSet(3)))
}
}
|
Intel-bigdata/OAP
|
oap-cache/oap/src/test/scala/org/apache/spark/sql/parquet/hadoop/utils/Collections3Suite.scala
|
Scala
|
apache-2.0
| 1,392 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.bg.test
import java.nio.file.{Files, Paths}
import java.util.Properties
import akka.actor.{ActorRef, ActorSystem}
import cmwell.bg.{CMWellBGActor, ShutDown}
import cmwell.common._
import cmwell.domain._
import cmwell.driver.Dao
import cmwell.fts._
import cmwell.irw.IRWService
import cmwell.util.FullBox
import cmwell.util.concurrent.SimpleScheduler.{schedule, scheduleFuture}
import cmwell.zstore.ZStore
import com.datastax.driver.core.ConsistencyLevel
import com.typesafe.config.{Config, ConfigFactory}
import com.typesafe.scalalogging.LazyLogging
import org.apache.kafka.clients.producer.{Callback, KafkaProducer, ProducerRecord, RecordMetadata}
import org.elasticsearch.action.admin.indices.delete.DeleteIndexRequest
import org.elasticsearch.common.unit.TimeValue
import org.joda.time.DateTime
import org.scalatest.OptionValues._
import org.scalatest._
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, _}
import scala.io.Source
import scala.util.Random
/**
* Created by israel on 15/02/2016.
*/
@DoNotDiscover
class CmwellBGSpec extends AsyncFunSpec with BeforeAndAfterAll with Matchers with Inspectors with LazyLogging {
var kafkaProducer: KafkaProducer[Array[Byte], Array[Byte]] = _
var cmwellBGActor: ActorRef = _
var dao: Dao = _
var irwService: IRWService = _
var zStore:ZStore = _
var offsetsService:OffsetsService = _
var ftsServiceES: FTSServiceNew = _
var bgConfig: Config = _
var actorSystem: ActorSystem = _
val okToStartPromise = Promise[Unit]()
def sendToKafkaProducer(pRecord: ProducerRecord[Array[Byte], Array[Byte]]): Future[RecordMetadata] = {
val p = Promise[RecordMetadata]()
kafkaProducer.send(pRecord, new Callback {
override def onCompletion(metadata: RecordMetadata, exception: Exception): Unit = {
if(metadata ne null) p.success(metadata)
else p.failure(exception)
}
})
p.future
}
def executeAfterCompletion[T](f: Future[_], timeout: FiniteDuration = 5.minutes)(body: =>Future[T])(implicit ec: ExecutionContext): Future[T] = {
val p = Promise[T]()
f.onComplete(_ => p.tryCompleteWith(body))(ec)
if(timeout != Duration.Zero) {
schedule(timeout)(p.tryFailure(new Exception("timeout")))(ec)
}
p.future
}
override def beforeAll = {
val producerProperties = new Properties
producerProperties.put("bootstrap.servers", "localhost:9092")
producerProperties.put("key.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer")
producerProperties.put("value.serializer", "org.apache.kafka.common.serialization.ByteArraySerializer")
kafkaProducer = new KafkaProducer[Array[Byte], Array[Byte]](producerProperties)
Files.deleteIfExists(Paths.get("./target", "persist_topic-0.offset"))
Files.deleteIfExists(Paths.get("./target", "index_topic-0.offset"))
dao = Dao("Test", "data2")
irwService = IRWService.newIRW(dao, 25, true, 120.seconds)
zStore = ZStore(dao)
offsetsService = new ZStoreOffsetsService(zStore)
ftsServiceES = FailingFTSServiceMockup("es.test.yml", 2)
// ftsServiceES = FTSServiceNew("es.test.yml")
// wait for green status
ftsServiceES.client.admin().cluster()
.prepareHealth()
.setWaitForGreenStatus()
.setTimeout(TimeValue.timeValueMinutes(5))
.execute()
.actionGet()
// delete all existing indices
ftsServiceES.client.admin().indices().delete(new DeleteIndexRequest("_all"))
// load indices template
val indicesTemplate = Source.fromURL(this.getClass.getResource("/indices_template_new.json")).getLines.reduceLeft(_ + _)
ftsServiceES.client.admin().indices().preparePutTemplate("indices_template").setSource(indicesTemplate).execute().actionGet()
bgConfig = ConfigFactory.load
actorSystem = ActorSystem("cmwell-bg-test-system")
cmwellBGActor = actorSystem.actorOf(CMWellBGActor.props(0, bgConfig, irwService, ftsServiceES, zStore, offsetsService))
// scalastyle:off
println("waiting 10 seconds for all components to load")
// scalastyle:on
schedule(10.seconds){
okToStartPromise.success(())
}
super.beforeAll
}
describe("CmwellBG should") {
val useNewlyCreatedAsBaseInfoton = okToStartPromise.future.flatMap { _ =>
val pRecords = Seq.tabulate(20) { n =>
val infoton = ObjectInfoton(
path = s"/cmt/cm/bg-test/baseInfoton/info$n",
dc = "dc",
indexTime = None,
fields = Some(Map("a" -> Set(FieldValue("b"), FieldValue("c")))))
val writeCommand = WriteCommand(infoton)
val commandBytes = CommandSerializer.encode(writeCommand)
new ProducerRecord[Array[Byte], Array[Byte]]("persist_topic", commandBytes)
} :+ {
val infoton = ObjectInfoton(
path = s"/cmt/cm/bg-test/baseInfoton/info19",
dc = "dc",
indexTime = None,
fields = Some(Map("a1" -> Set(FieldValue("b1"), FieldValue("c1")))))
val writeCommand = WriteCommand(infoton)
val commandBytes = CommandSerializer.encode(writeCommand)
new ProducerRecord[Array[Byte], Array[Byte]]("persist_topic", commandBytes)
}
// send them all
val sendEm = Future.traverse(pRecords)(sendToKafkaProducer)
sendEm.flatMap{ _ =>
cmwell.util.concurrent.spinCheck(250.millis,true,30.seconds){
ftsServiceES.search(
pathFilter = Some(PathFilter("/cmt/cm/bg-test/baseInfoton", true)),
fieldsFilter = None,
datesFilter = None,
paginationParams = DefaultPaginationParams,
sortParams = SortParam.indexTimeAscending,
withHistory = false,
withDeleted = false
)
}(_.total == 20)
}.map { searchResults =>
withClue(searchResults) {
searchResults.length should be(20)
}
}
}
def afterFirst[T](body: =>Future[T])(implicit ec: ExecutionContext): Future[T] = executeAfterCompletion(useNewlyCreatedAsBaseInfoton)(body)(ec)
//Assertions
val writeCommandsProccessing = afterFirst{
// prepare sequence of writeCommands
val writeCommands = Seq.tabulate(10) { n =>
val infoton = ObjectInfoton(
path = s"/cmt/cm/bg-test1/info$n",
dc = "dc",
indexTime = None,
fields = Some(Map("country" -> Set(FieldValue("Egypt"), FieldValue("Israel")))))
WriteCommand(infoton)
}
// make kafka records out of the commands
val pRecords = writeCommands.map { writeCommand =>
val commandBytes = CommandSerializer.encode(writeCommand)
new ProducerRecord[Array[Byte], Array[Byte]]("persist_topic", commandBytes)
}
// send them all
val sendEm = Future.traverse(pRecords)(sendToKafkaProducer)
sendEm.flatMap { recordMetaDataSeq =>
// logger.info(s"waiting for 5 seconds for $recordMetaDataSeq")
scheduleFuture(5.seconds){
irwService.readPathAsync("/cmt/cm/bg-test1/info1", ConsistencyLevel.QUORUM).map { infopt =>
infopt should not be empty
}
}
}
}
// val commandRefProcessing = okToStartPromise.future.flatMap{ _ =>
// // prepare sequence of writeCommands
// val writeCommands = Seq.tabulate(10) { n =>
// val infoton = ObjectInfoton(
// path = s"/cmt/cm/bg-test-zstore/info$n",
// dc = "dc",
// indexTime = None,
// fields = Some(Map("country" -> Set(FieldValue("Egypt"), FieldValue("Israel")))))
// WriteCommand(infoton)
// }
//
// val sentCommandRefs = Future.sequence(
// writeCommands.map{ wc =>
// zStore.put(wc.infoton.path, CommandSerializer.encode(wc)).flatMap{ _ =>
// val commandRef = CommandRef(wc.infoton.path)
// val commandBytes = CommandSerializer.encode(commandRef)
// val pRecord = new ProducerRecord[Array[Byte], Array[Byte]]("persist_topic", commandBytes)
// Future {
// blocking {
// kafkaProducer.send(pRecord).get
// }
// }
// }
// }
// )
//
// sentCommandRefs.map{ commandsRefs =>
// scheduleFuture(5.seconds){
// Future.sequence {
// writeCommands.map { writeCommand =>
// irwService.readPathAsync(writeCommand.path, ConsistencyLevel.QUORUM).map { i => i should not be empty }
// }
// }
// }
// }
// }
//
val processDeletePathCommands = executeAfterCompletion(writeCommandsProccessing){
val deletePathCommand = DeletePathCommand("/cmt/cm/bg-test1/info0")
val serializedCommand = CommandSerializer.encode(deletePathCommand)
val pRecord = new ProducerRecord[Array[Byte], Array[Byte]]("persist_topic", serializedCommand)
sendToKafkaProducer(pRecord).flatMap { recordMetaData =>
scheduleFuture(3.seconds) {
irwService.readPathAsync("/cmt/cm/bg-test1/info0", ConsistencyLevel.QUORUM).map {
case FullBox(i: DeletedInfoton) => succeed
case somethingElse => fail(s"expected a deleted infoton, but got [$somethingElse] from irw (recoredMetaData: $recordMetaData).")
}
}
}
}
val parentsCreation = executeAfterCompletion(writeCommandsProccessing){
Future.traverse(Seq("/cmt/cm/bg-test1", "/cmt/cm", "/cmt")) { path =>
irwService.readPathAsync(path, ConsistencyLevel.QUORUM)
}.map { infopts =>
forAll(infopts) { infopt =>
infopt should not be empty
}
}
}
val indexAllInfotons = executeAfterCompletion(processDeletePathCommands){
scheduleFuture(3.seconds){
ftsServiceES.search(
pathFilter = Some(PathFilter("/cmt/cm/bg-test1", true)),
fieldsFilter = None,
datesFilter = None,
paginationParams = DefaultPaginationParams,
sortParams = SortParam.empty
)
}.map { x =>
withClue(s"$x") {
x.total should equal(9)
}
}
}
val groupedWriteCommands = afterFirst{
val infotonPath = "/cmt/cm/bg-test/groupedWrites/info1"
val currentTime = System.currentTimeMillis()
val writeCommands = Seq.tabulate(20) { i =>
val infoton = ObjectInfoton(
path = infotonPath,
dc = "dc",
indexTime = None,
lastModified = new org.joda.time.DateTime(currentTime + i),
fields = Some(Map(s"f$i" -> Set(FieldValue(s"v$i"))))
)
WriteCommand(infoton)
}
// make kafka records out of the commands
val pRecords = writeCommands.map { writeCommand =>
val commandBytes = CommandSerializer.encode(writeCommand)
new ProducerRecord[Array[Byte], Array[Byte]]("persist_topic", commandBytes)
}
// send them all
val sendEm = Future.traverse(pRecords)(sendToKafkaProducer)
sendEm.flatMap { recordMetaDataSeq =>
val expectedFields = Seq.tabulate(20) { i =>
s"f$i" -> Set(FieldValue(s"v$i"))
}.toMap
logger.info(s"waiting for 5 seconds for $recordMetaDataSeq")
cmwell.util.concurrent.unsafeRetryUntil[cmwell.util.Box[Infoton]]({ bi =>
bi.isDefined && bi.get.fields.fold(false)(_.size == 20)
}, 30, 1.second) {
irwService.readPathAsync(infotonPath, ConsistencyLevel.QUORUM)
}.flatMap { infopt =>
irwService.historyAsync(infotonPath, 20).flatMap { v =>
irwService.readUUIDSAsync(v.map(_._2)).map { histories =>
withClue(histories) {
infopt should not be empty
infopt.get.fields.get should contain theSameElementsAs expectedFields
}
}
}
}
}
}
val indexTimeAddedToNonOverrideCmds = afterFirst{
val writeCommands = Seq.tabulate(10) { n =>
val infoton = ObjectInfoton(
path = s"/cmt/cm/bg-test/indexTime/info$n",
dc = "dc",
indexTime = None,
fields = Some(Map("a" -> Set(FieldValue("b"), FieldValue("c")))))
WriteCommand(infoton)
}
// make kafka records out of the commands
val pRecords = writeCommands.map { writeCommand =>
val commandBytes = CommandSerializer.encode(writeCommand)
new ProducerRecord[Array[Byte], Array[Byte]]("persist_topic", commandBytes)
}
// send them all
val sendEm = Future.traverse(pRecords)(sendToKafkaProducer)
sendEm.flatMap{ recordeMetadataSeq =>
scheduleFuture(5.seconds) {
ftsServiceES.search(
pathFilter = Some(PathFilter("/cmt/cm/bg-test/indexTime", true)),
fieldsFilter = None,
datesFilter = None,
paginationParams = DefaultPaginationParams,
sortParams = SortParam.indexTimeAscending,
withHistory = false,
withDeleted = false
)
}
}.flatMap { searchResults =>
val ftsSortedPaths = searchResults.infotons.map( _.path)
Future.traverse(searchResults.infotons){ i =>
irwService.readUUIDAsync(i.uuid)
}.map { irwResults =>
withClue(ftsSortedPaths, irwResults) {
val irwSortedPaths = irwResults.sortBy(_.get.indexTime.get).map(_.get.path)
ftsSortedPaths should contain theSameElementsInOrderAs irwSortedPaths
}
}
}
}
val markInfotonAsHistory = executeAfterCompletion(indexAllInfotons){
val writeCommand =
WriteCommand(ObjectInfoton("/cmt/cm/bg-test1/info1", "dc", None, Map("i" -> Set(FieldValue("phone")))))
val pRecord = new ProducerRecord[Array[Byte], Array[Byte]]("persist_topic", CommandSerializer.encode(writeCommand))
sendToKafkaProducer(pRecord).flatMap { recordMetadata =>
scheduleFuture(5.seconds) {
val f1 = ftsServiceES.search(
pathFilter = None,
fieldsFilter = Some(MultiFieldFilter(Must, Seq(
FieldFilter(Must, Equals, "system.path", "/cmt/cm/bg-test1/info1"),
FieldFilter(Must, Equals, "system.current", "false")))),
datesFilter = None,
paginationParams = DefaultPaginationParams,
withHistory = true
)
val f2 = ftsServiceES.search(
pathFilter = None,
fieldsFilter = Some(FieldFilter(Must, Equals, "system.path", "/cmt/cm/bg-test1/info1")),
datesFilter = None,
paginationParams = DefaultPaginationParams
)
val f3 = irwService.historyAsync("/cmt/cm/bg-test1/info1", 1000)
for {
ftsSearchResponse1 <- f1
ftsSearchResponse2 <- f2
irwHistoryResponse <- f3
} yield withClue(ftsSearchResponse1,ftsSearchResponse2,irwHistoryResponse) {
ftsSearchResponse1.infotons should have size 1
ftsSearchResponse2.infotons should have size 1
irwHistoryResponse should have size 2
}
}
}
}
// ignored test position. to halt test from this point on,
// we might want to depend on a different future than `okToStartPromise.future`
// something more like: `afterStopAndStartPromise.future`
val processOverrideCommands = afterFirst{
val currentTime = System.currentTimeMillis()
val numOfInfotons = 10 //starting from 0 up to 9
val overrideCommands = Seq.tabulate(numOfInfotons) { n =>
val infoton = ObjectInfoton(
path = s"/cmt/cm/bg-test3/info$n",
dc = "dc",
indexTime = Some(currentTime + n + 1),
new org.joda.time.DateTime(currentTime + n),
fields = Some(Map("pearls" -> Set(FieldValue("Ubuntu"), FieldValue("shmubuntu")))))
OverwriteCommand(infoton)
}
// make kafka records out of the commands
val pRecords = overrideCommands.map { writeCommand =>
val commandBytes = CommandSerializer.encode(writeCommand)
new ProducerRecord[Array[Byte], Array[Byte]]("persist_topic", commandBytes)
}
// send them all
val sendEm = Future.traverse(pRecords)(sendToKafkaProducer)
sendEm.flatMap { recordMetaDataSeq =>
scheduleFuture(3000.millis)(ftsServiceES.search(
pathFilter = Some(PathFilter("/cmt/cm/bg-test3", true)),
fieldsFilter = None,
datesFilter = None,
paginationParams = DefaultPaginationParams)).map { response =>
withClue(response, recordMetaDataSeq) {
forAll(response.infotons) { infoton =>
withClue(infoton) {
val l = infoton.path.takeRight(1).toLong + 1L
infoton.indexTime.value should be(currentTime + l)
}
}
}
}
}
}
val reProcessNotIndexedOWCommands = afterFirst{
val currentTime = System.currentTimeMillis()
val infotons = Seq.tabulate(5) {n =>
ObjectInfoton(
path = s"/cmt/cm/bg-test/re_process_ow/info_override",
dc = "dc",
indexTime = Some(currentTime + n*3),
lastModified = new DateTime(currentTime +n),
indexName = "cm_well_p0_0",
fields = Some(Map(s"a$n" -> Set(FieldValue(s"b$n"), FieldValue(s"c${n % 2}"))))
)
}
val owCommands = infotons.map{ i => OverwriteCommand(i)}
// make kafka records out of the commands
val pRecords = owCommands.map { writeCommand =>
val commandBytes = CommandSerializer.encode(writeCommand)
new ProducerRecord[Array[Byte], Array[Byte]]("persist_topic", commandBytes)
}
// send them all
val sendEm = Future.traverse(pRecords)(sendToKafkaProducer)
sendEm.flatMap { _ =>
scheduleFuture(5.seconds) {
ftsServiceES.deleteInfotons(infotons).flatMap { _ =>
scheduleFuture(5.seconds) {
val sendAgain = Future.traverse(pRecords)(sendToKafkaProducer)
scheduleFuture(5.seconds) {
sendAgain.flatMap { _ =>
ftsServiceES.search(
pathFilter = Some(PathFilter("/cmt/cm/bg-test/re_process_ow", true)),
fieldsFilter = None,
datesFilter = None,
paginationParams = DefaultPaginationParams,
sortParams = FieldSortParams(List(("system.indexTime" -> Desc))),
withHistory = false,
debugInfo = true
).map { res =>
withClue(res, res.infotons.head.lastModified.getMillis, currentTime) {
res.infotons.size should equal(1)
res.infotons.head.lastModified.getMillis should equal(currentTime + 4)
}
}
}
}
}
}
}
}
}
val notGroupingOverrideCommands = afterFirst{
val numOfInfotons = 15
val overrideCommands = Seq.tabulate(numOfInfotons) { n =>
val infoton = ObjectInfoton(
path = s"/cmt/cm/bg-test/override_not_grouped/info_override",
dc = "dc",
indexTime = Some(Random.nextLong()),
fields = Some(Map(s"Version${n % 3}" -> Set(FieldValue(s"a$n"), FieldValue(s"b${n % 2}")))))
OverwriteCommand(infoton)
}
// make kafka records out of the commands
val pRecords = overrideCommands.map { writeCommand =>
val commandBytes = CommandSerializer.encode(writeCommand)
new ProducerRecord[Array[Byte], Array[Byte]]("persist_topic", commandBytes)
}
// send them all
val sendEm = Future.traverse(pRecords)(sendToKafkaProducer)
sendEm.flatMap { recordMetaDataSeq =>
scheduleFuture(10.seconds) {
ftsServiceES.search(
pathFilter = Some(PathFilter("/cmt/cm/bg-test/override_not_grouped", false)),
fieldsFilter = None,
datesFilter = None,
paginationParams = DefaultPaginationParams,
withHistory = true,
debugInfo = true).map { res =>
withClue(res) {
res.total should equal(numOfInfotons)
}
}
}
}
}
val persistAndIndexLargeInfoton = afterFirst{
val lotsOfFields = Seq.tabulate(8000){ n =>
s"field$n" -> Set[FieldValue](FString(s"value$n"))
}.toMap
val fatFoton = ObjectInfoton("/cmt/cm/bg-test-fat/fatfoton1", "dcc", None, lotsOfFields)
// make kafka record out of the infoton
val pRecord = {
val commandBytes = CommandSerializer.encode(OverwriteCommand(fatFoton))
new ProducerRecord[Array[Byte], Array[Byte]]("persist_topic", commandBytes)
}
val sendIt = sendToKafkaProducer(pRecord)
sendIt.flatMap { recordMetaData =>
scheduleFuture(10.seconds) {
val readReply = irwService.readPathAsync("/cmt/cm/bg-test-fat/fatfoton1")
val searchReply = ftsServiceES.search(
pathFilter = Some(PathFilter("/cmt/cm/bg-test-fat", true)),
fieldsFilter = None,
datesFilter = None,
paginationParams = DefaultPaginationParams
)
for{
r0 <- readReply
r1 <- searchReply
} yield withClue(r0, r1) {
r0 should not be empty
val paths = r1.infotons.map(_.path)
paths should contain("/cmt/cm/bg-test-fat/fatfoton1")
}
}
}
}
val deeplyNestedOverrideCommands = afterFirst{
val currentTime = System.currentTimeMillis()
val infoton = ObjectInfoton(
path = s"/cmt/cm/bg-test4/deeply/nested/overwrite/infobj",
dc = "dc",
indexTime = Some(currentTime + 42),
new org.joda.time.DateTime(currentTime),
fields = Some(Map("whereTo" -> Set(FieldValue("The"), FieldValue("ATM!")))))
// make kafka record out of the infoton
val pRecord = {
val commandBytes = CommandSerializer.encode(OverwriteCommand(infoton))
new ProducerRecord[Array[Byte], Array[Byte]]("persist_topic", commandBytes)
}
val sendIt = sendToKafkaProducer(pRecord)
sendIt.flatMap { recordMetaData=>
scheduleFuture(10.seconds) {
val f0 = ftsServiceES.search(
pathFilter = Some(PathFilter("/cmt/cm/bg-test4", descendants = true)),
fieldsFilter = None,
datesFilter = None,
paginationParams = DefaultPaginationParams)
val f1 = irwService.readPathAsync("/cmt/cm/bg-test4", ConsistencyLevel.QUORUM)
val f2 = irwService.readPathAsync("/cmt/cm/bg-test4/deeply", ConsistencyLevel.QUORUM)
val f3 = irwService.readPathAsync("/cmt/cm/bg-test4/deeply/nested", ConsistencyLevel.QUORUM)
val f4 = irwService.readPathAsync("/cmt/cm/bg-test4/deeply/nested/overwrite", ConsistencyLevel.QUORUM)
val f5 = irwService.readPathAsync("/cmt/cm/bg-test4/deeply/nested/overwrite/infobj", ConsistencyLevel.QUORUM)
for {
r0 <- f0
r1 <- f1
r2 <- f2
r3 <- f3
r4 <- f4
r5 <- f5
} yield withClue(r0,r1,r2,r3,r4,r5,recordMetaData) {
val paths = r0.infotons.map(_.path)
paths shouldNot contain("/cmt/cm/bg-test4/deeply")
paths shouldNot contain("/cmt/cm/bg-test4/deeply/nested")
paths shouldNot contain("/cmt/cm/bg-test4/deeply/nested/overwrite")
paths should contain("/cmt/cm/bg-test4/deeply/nested/overwrite/infobj")
paths should have size 1
r1 shouldBe empty
r2 shouldBe empty
r3 shouldBe empty
r4 shouldBe empty
r5 should not be empty
}
}
}
}
val currentTime = System.currentTimeMillis()
def sendIt(i: Infoton): Future[RecordMetadata] = {
val pRecord = {
val commandBytes = CommandSerializer.encode(OverwriteCommand(i))
new ProducerRecord[Array[Byte], Array[Byte]]("persist_topic", commandBytes)
}
sendToKafkaProducer(pRecord)
}
def verifyBgTest5() = {
val f0 = ftsServiceES.search(
pathFilter = None,
fieldsFilter = Some(FieldFilter(Must, Equals, "system.path", "/cmt/cm/bg-test5/infobj")),
datesFilter = None,
paginationParams = DefaultPaginationParams,
withHistory = false
)
val f1 = ftsServiceES.search(
pathFilter = None,
fieldsFilter = Some(MultiFieldFilter(Must, Seq(
FieldFilter(Must, Equals, "system.path", "/cmt/cm/bg-test5/infobj"),
FieldFilter(Must, Equals, "system.current", "false")))),
datesFilter = None,
paginationParams = DefaultPaginationParams,
withHistory = true
)
val f2 = irwService.readPathAsync("/cmt/cm/bg-test5/infobj", ConsistencyLevel.QUORUM)
val f3= irwService.historyAsync("/cmt/cm/bg-test5/infobj", 10)
for {
r0 <- f0
r1 <- f1
r2 <- f2
r3 <- f3
} yield (r0,r1,r2,r3)
}
def waitForIt(numOfVersionsToExpect: Int)(implicit ec: ExecutionContext): Future[FTSSearchResponse] = {
val startTime = System.currentTimeMillis()
def waitForItInner(): Future[FTSSearchResponse] = {
ftsServiceES.search(
pathFilter = None,
fieldsFilter = Some(FieldFilter(Must, Equals, "system.path", "/cmt/cm/bg-test5/infobj")),
datesFilter = None,
paginationParams = DefaultPaginationParams,
withHistory = true
)(ec,logger).flatMap { res =>
if (res.total >= numOfVersionsToExpect) Future.successful(res)
else if(System.currentTimeMillis() - startTime > 30000L) Future.failed(new IllegalStateException(s"Waited for over 30s, last res: ${res.toString}"))
else scheduleFuture(1.second)(waitForItInner())
}(ec)
}
waitForItInner()
}
val version3IngestAndVerify = afterFirst {
val infoton = ObjectInfoton(
path = s"/cmt/cm/bg-test5/infobj",
dc = "dc",
indexTime = Some(currentTime + 42),
new org.joda.time.DateTime(currentTime),
fields = Some(Map("GoTo" -> Set(FieldValue("draw"), FieldValue("money")))))
sendIt(infoton).flatMap { recordMetaData =>
waitForIt(1).flatMap { ftsRes =>
verifyBgTest5().map {
case t@(currFTSRes, histFTSRes, currPathIRWBox, historiesIRW) => withClue(t -> ftsRes) {
val currPathIRW = currPathIRWBox.toOption
currFTSRes.total should be(1)
currFTSRes.infotons.head.indexTime.value should be(currentTime + 42)
histFTSRes.total should be(0)
currPathIRW shouldBe defined
currPathIRW.value.indexTime.value should be(currentTime + 42)
currPathIRW.value.uuid shouldEqual currFTSRes.infotons.head.uuid
historiesIRW should have size 1
}
}
}
}
}
val version1IngestAndVerify = executeAfterCompletion(version3IngestAndVerify){
val infoton = ObjectInfoton(
path = s"/cmt/cm/bg-test5/infobj",
dc = "dc",
indexTime = Some(currentTime),
new org.joda.time.DateTime(currentTime - 20000),
fields = Some(Map("whereTo" -> Set(FieldValue("Techno"), FieldValue("Doron")))))
sendIt(infoton).flatMap { recordMetaData =>
waitForIt(2).flatMap { ftsRes =>
verifyBgTest5().map {
case t@(currFTSRes, histFTSRes, currPathIRWBox, historiesIRW) => withClue(t -> ftsRes) {
val currPathIRW = currPathIRWBox.toOption
currFTSRes.total should be(1)
currFTSRes.infotons.head.indexTime.value should be(currentTime + 42)
histFTSRes.total should be(1)
currPathIRW shouldBe defined
currPathIRW.value.indexTime.value should be(currentTime + 42)
currPathIRW.value.uuid shouldEqual currFTSRes.infotons.head.uuid
historiesIRW should have size 2
}
}
}
}
}
val version2IngestAndVerify = executeAfterCompletion(version1IngestAndVerify){
val infoton = ObjectInfoton(
path = s"/cmt/cm/bg-test5/infobj",
dc = "dc",
indexTime = Some(currentTime + 128),
new org.joda.time.DateTime(currentTime - 10000),
fields = Some(Map("OK" -> Set(FieldValue("TO"), FieldValue("ATM!")))))
sendIt(infoton).flatMap { recordMetaData =>
waitForIt(3).flatMap { ftsRes =>
verifyBgTest5().map {
case t@(currFTSRes,histFTSRes,currPathIRWBox,historiesIRW) => withClue(t -> ftsRes) {
val currPathIRW = currPathIRWBox.toOption
currFTSRes.total should be(1)
currFTSRes.infotons.head.indexTime.value should be(currentTime + 42)
histFTSRes.total should be(2)
currPathIRW shouldBe defined
currPathIRW.value.indexTime.value should be(currentTime + 42)
currPathIRW.value.uuid shouldEqual currFTSRes.infotons.head.uuid
historiesIRW should have size 3
}
}
}
}
}
val version4IngestAndVerify = executeAfterCompletion(version2IngestAndVerify){
val infoton = ObjectInfoton(
path = s"/cmt/cm/bg-test5/infobj",
dc = "dc",
indexTime = Some(currentTime + 23),
new org.joda.time.DateTime(currentTime + 10000),
fields = Some(Map("No" -> Set(FieldValue("U"), FieldValue("Go")),
"But" -> Set(FieldValue("I don't need the ATM.")))))
sendIt(infoton).flatMap { recordMetaData =>
scheduleFuture(5.seconds) {
waitForIt(4).flatMap { ftsRes =>
verifyBgTest5().map {
case t@(currFTSRes, histFTSRes, currPathIRWBox, historiesIRW) => withClue(t -> ftsRes) {
val currPathIRW = currPathIRWBox.toOption
currFTSRes.total should be(1)
currFTSRes.infotons.head.indexTime.value should be(currentTime + 23)
histFTSRes.total should be(3)
currPathIRW shouldBe defined
currPathIRW.value.indexTime.value should be(currentTime + 23)
currPathIRW.value.uuid shouldEqual currFTSRes.infotons.head.uuid
historiesIRW should have size 4
}
}
}
}
}
}
val version5IngestAndVerify = executeAfterCompletion(version4IngestAndVerify){
val infoton = ObjectInfoton(
path = s"/cmt/cm/bg-test5/infobj",
dc = "dc",
indexTime = Some(currentTime + 1729),
new org.joda.time.DateTime(currentTime + 20000),
fields = Some(Map("So" -> Set(FieldValue("Why you asked?")),
"Ummm" -> Set(FieldValue("I didn't...")))))
sendIt(infoton).flatMap { recordMetaData =>
waitForIt(5).flatMap { ftsRes =>
verifyBgTest5().map {
case t@(currFTSRes,histFTSRes,currPathIRWBox,historiesIRW) => withClue(t -> ftsRes) {
val currPathIRW = currPathIRWBox.toOption
currFTSRes.total should be(1)
currFTSRes.infotons.head.indexTime.value should be(currentTime + 1729)
histFTSRes.total should be(4)
currPathIRW shouldBe defined
currPathIRW.value.indexTime.value should be(currentTime + 1729)
currPathIRW.value.uuid shouldEqual currFTSRes.infotons.head.uuid
historiesIRW should have size 5
}
}
}
}
}
it("use the cache of newly created infotons as baseInfoton before merge")(useNewlyCreatedAsBaseInfoton)
it("process WriteCommands")(writeCommandsProccessing)
it("process DeletPathCommands")(processDeletePathCommands)
it("create parents")(parentsCreation)
it("index all processed infotons")(indexAllInfotons)
it("add index time to non override commands and update indexTime in Cassandra")(indexTimeAddedToNonOverrideCmds)
it("mark infoton as history if newer version is sent for it")(markInfotonAsHistory)
it("process WriteCommands containing fat infoton")(persistAndIndexLargeInfoton)
// ignore("continue from where it has stopped after stopping and starting it again"){
//
// // stop BG
// logger debug "sending stop message to cmwellBGActor"
// val stopReply = Await.result(ask(cmwellBGActor, Stop)(30.seconds).mapTo[Stopped.type], 30.seconds)
//
// val numOfInfotons = 8
// val writeCommands = Seq.tabulate(numOfInfotons) { n =>
// val infoton = ObjectInfoton(
// path = s"/cmt/cm/bg-test2/info$n",
// dc = "dc",
// indexTime = None,
// fields = Some(Map("food" -> Set(FieldValue("Malabi"), FieldValue("Brisket")))))
// WriteCommand(infoton)
// }
//
// // make kafka records out of the commands
// val pRecords = writeCommands.map { writeCommand =>
// val commandBytes = CommandSerializer.encode(writeCommand)
// new ProducerRecord[Array[Byte], Array[Byte]]("persist_topic", commandBytes)
// }
//
// // send them all
// pRecords.foreach(kafkaProducer.send)
//
// // restart bg
// val startReply = Await.result(ask(cmwellBGActor, Start)(10.seconds).mapTo[Started.type], 10.seconds)
//
// Thread.sleep(5000)
//
// for (i <- 0 until numOfInfotons) {
// val readPath = Await.result(irwService.readPathAsync(s"/cmt/cm/bg-test2/info$i", ConsistencyLevel.QUORUM), 5.seconds)
// withClue(readPath) {
// readPath should not be empty
// }
// }
//
// Await.result(ftsServiceES.search(
// pathFilter = Some(PathFilter("/cmt/cm/bg-test2", true)),
// fieldsFilter = None,
// datesFilter = None,
// paginationParams = DefaultPaginationParams
// ), 5.seconds).total should equal(numOfInfotons)
//
// }
it("re process OW commands even if were not indexed at first")(reProcessNotIndexedOWCommands)
it("process OverrideCommands correctly by keeping its original indexTime and not generating a new one")(processOverrideCommands)
// scalastyle:off
it("process group of writecommands in short time while keeping all fields (in case of the data being splitted to several versions, last version must contain all data)")(groupedWriteCommands)
// scalastyle:on
it("process OverrideCommands correctly by not grouping same path commands together for merge")(notGroupingOverrideCommands)
it("process OverrideCommands correctly by creating parents if needed")(deeplyNestedOverrideCommands)
describe("process OverrideCommands correctly by keeping history in correct order") {
it("while ingesting version 3 first, and verifying version 3 is current")(version3IngestAndVerify)
it("and then ingesting version 1 and verifying version 1 is history, while version 3 stays current")(version1IngestAndVerify)
// scalastyle:off
it("and then ingesting version 2, which is history but with newer indexTime, and verifying version 2&1 are history, while version 3 stays current")(version2IngestAndVerify)
// scalastyle:on
it("and then ingesting version 4 with older indexTime and verifying version 1-3 are history, while version 4 became current")(version4IngestAndVerify)
it("and then ingesting version 5 and verifying version 1-4 are history, while version 5 became current")(version5IngestAndVerify)
}
// describe("not generate duplicates, no matter how many consecutive updates occur on same path") {
//
// }
}
override def afterAll() = {
cmwellBGActor ! ShutDown
Thread.sleep(10000)
ftsServiceES.shutdown()
irwService = null
}
}
|
hochgi/CM-Well
|
server/cmwell-bg/src/test/scala/cmwell/bg/test/CmwellBGSpec.scala
|
Scala
|
apache-2.0
| 36,547 |
package org.hammerlab.bam.spark.compare
import org.hammerlab.bam.test.resources.TestBams
import org.hammerlab.cli.app.MainSuite
import org.hammerlab.test.matchers.lines.Line._
class TimeLoadTest
extends MainSuite(TimeLoad)
with TestBams {
test("1.bam 230k") {
checkFirstLines(
"-m", "230k",
bam1
)(
l"spark-bam first-read collection time: $d",
"",
"spark-bam collected 3 partitions' first-reads",
"hadoop-bam threw an exception:",
l"org.apache.spark.SparkException: Job aborted due to stage failure: Task 1 in stage 0.0 failed 1 times, most recent failure: Lost task 1.0 in stage 0.0 (TID $d, localhost, executor driver): htsjdk.samtools.SAMFormatException: SAM validation error: ERROR: Record 1, Read name , MRNM should not be set for unpaired read."
)
}
test("1.bam 240k") {
checkAllLines(
"-m", "240k",
bam1
)(
l"spark-bam first-read collection time: $d",
l"hadoop-bam first-read collection time: $d",
"",
"All 3 partition-start reads matched",
""
)
}
}
|
ryan-williams/spark-bam
|
cli/src/test/scala/org/hammerlab/bam/spark/compare/TimeLoadTest.scala
|
Scala
|
apache-2.0
| 1,083 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.tail
import cats.effect.laws.discipline.{Parameters => EffectParameters}
import minitest.SimpleTestSuite
import minitest.api.IgnoredException
import minitest.laws.Checkers
import monix.eval.TestUtils
import monix.execution.internal.Platform
import monix.execution.schedulers.TestScheduler
import org.scalacheck.Prop
import org.scalacheck.Test.Parameters
import org.typelevel.discipline.Laws
import scala.concurrent.duration._
/** Just a marker for what we need to extend in the tests
* of `monix-tail`.
*/
trait BaseLawsSuite extends SimpleTestSuite
with Checkers with ArbitraryInstances with TestUtils {
override lazy val checkConfig: Parameters =
Parameters.default
.withMinSuccessfulTests(if (Platform.isJVM) 100 else 10)
.withMaxDiscardRatio(if (Platform.isJVM) 5.0f else 50.0f)
.withMaxSize(24)
lazy val slowConfig: Parameters =
Parameters.default
.withMinSuccessfulTests(10)
.withMaxDiscardRatio(50.0f)
.withMaxSize(6)
// Stack-safety tests are very taxing, so reducing burden
implicit val effectParams =
EffectParameters.default.copy(
stackSafeIterationsCount = {
if (Platform.isJS || System.getenv("TRAVIS") == "true" || System.getenv("CI") == "true")
100
else
1000
})
def checkAllAsync(name: String, config: Parameters = checkConfig)
(f: TestScheduler => Laws#RuleSet): Unit = {
val s = TestScheduler()
var catchErrors = true
try {
val ruleSet = f(s)
catchErrors = false
for ((id, prop: Prop) ← ruleSet.all.properties)
test(s"$name.$id") {
s.tick(1.day)
silenceSystemErr(check(prop, config))
}
} catch {
case e: IgnoredException if catchErrors =>
test(name) { throw e }
}
}
val emptyRuleSet: Laws#RuleSet =
new Laws { val ref = new DefaultRuleSet("dummy", None) }.ref
}
|
Wogan/monix
|
monix-tail/shared/src/test/scala/monix/tail/BaseLawsSuite.scala
|
Scala
|
apache-2.0
| 2,592 |
package com.mycompany.scalcium.names
import java.io.File
import java.io.FileInputStream
import org.apache.commons.io.IOUtils
import opennlp.tools.namefind.NameFinderME
import opennlp.tools.namefind.TokenNameFinderModel
import scala.Array.canBuildFrom
import com.mycompany.scalcium.tokenizers.Tokenizer
class OpenNLPNameFinder extends NameFinder {
val ModelDir = "src/main/resources/opennlp/models"
val tokenizer = Tokenizer.getTokenizer("opennlp")
val personME = buildME("en_ner_person.bin")
val orgME = buildME("en_ner_organization.bin")
override def find(sentences: List[String]):
List[List[(String,Int,Int)]] = {
sentences.map(sentence =>
find(personME, "PERSON", sentence) ++
find(orgME, "ORGANIZATION", sentence))
}
def find(finder: NameFinderME, tag: String,
doc: List[String]):
List[List[(String,Int,Int)]] = {
try {
doc.map(sent => find(finder, tag, sent))
} finally {
clear(finder)
}
}
def find(finder: NameFinderME, tag: String, sent: String):
List[(String,Int,Int)] = {
val words = tokenizer.wordTokenize(sent)
.toArray
finder.find(words).map(span => {
val start = span.getStart()
val end = span.getEnd()
val coffsets = charOffset(start, end, words)
(tag, coffsets._1, coffsets._2)
}).toList
}
def clear(finder: NameFinderME): Unit = finder.clearAdaptiveData()
def charOffset(wbegin: Int, wend: Int, words: Array[String]):
(Int,Int) = {
val nstring = words.slice(wbegin, wend)
.mkString(" ")
val sentence = words.mkString(" ")
val cbegin = sentence.indexOf(nstring)
val cend = cbegin + nstring.length()
(cbegin, cend)
}
def buildME(model: String): NameFinderME = {
var pfin: FileInputStream = null
try {
pfin = new FileInputStream(new File(ModelDir, model))
new NameFinderME(new TokenNameFinderModel(pfin))
} finally {
IOUtils.closeQuietly(pfin)
}
}
}
|
sujitpal/scalcium
|
src/main/scala/com/mycompany/scalcium/names/OpenNLPNameFinder.scala
|
Scala
|
apache-2.0
| 2,030 |
import sbt._
class MathMLParserFooProject(info: ProjectInfo) extends DefaultProject(info) {
val mavenLocal = "Local Maven Repository" at "file://"+Path.userHome+"/.m2/repository"
val bryanjswift = "Bryan J Swift Repository" at "http://repos.bryanjswift.com/maven2/"
val junitInterface = "com.novocode" % "junit-interface" % "0.8" % "test"
val usefullScalaStuff = "UsefullScalaStuff" % "UsefullScalaStuff" % "0.1" % "compile"
}
// vim: set ts=4 sw=4 et:
|
alexmsmartins/WikiModels
|
wm_math_parser/project/build/Maven.scala
|
Scala
|
mit
| 465 |
package slamdata.engine.physical.mongodb
import slamdata.engine.fp._
import scala.collection.immutable.ListMap
import scalaz._
import Scalaz._
import Liskov._
package object optimize {
object pipeline {
import ExprOp._
import PipelineOp._
def get0(leaves: List[BsonField.Leaf], rs: List[Reshape]): Option[ExprOp \\/ Reshape] = {
(leaves, rs) match {
case (_, Nil) => Some(-\\/ (BsonField(leaves).map(DocVar.ROOT(_)).getOrElse(DocVar.ROOT())))
case (Nil, r :: rs) => inlineProject(r, rs).map(\\/- apply)
case (l :: ls, r :: rs) => r.get(l).flatMap {
case -\\/(d @ DocVar(_, _)) => get0(d.path ++ ls, rs)
case -\\/(e) =>
if (ls.isEmpty) fixExpr(rs, e).map(-\\/ apply) else None
case \\/- (r) => get0(ls, r :: rs)
}
}
}
private def fixExpr(rs: List[Reshape], e: ExprOp): Option[ExprOp] = {
type OptionTramp[X] = OptionT[Free.Trampoline, X]
def lift[A](o: Option[A]): OptionTramp[A] = OptionT(o.point[Free.Trampoline])
(e.mapUpM[OptionTramp] {
case ref @ DocVar(_, _) =>
lift {
get0(ref.path, rs).flatMap(_.fold(Some.apply, _ => None))
}
}).run.run
}
def inlineProject(r: Reshape, rs: List[Reshape]): Option[Reshape] = {
type MapField[X] = ListMap[BsonField, X]
val p = Project(r)
val map = Traverse[MapField].sequence(ListMap(p.getAll: _*).map { case (k, v) =>
k -> (v match {
case d @ DocVar(_, _) => get0(d.path, rs)
case e => fixExpr(rs, e).map(-\\/ apply)
})
})
map.map(vs => p.empty.setAll(vs).shape)
}
def inlineGroupProjects(g: WorkflowOp.GroupOp): Option[WorkflowOp.GroupOp] = {
import ExprOp._
val (rs, src) = g.src.collectShapes
type MapField[X] = ListMap[BsonField.Leaf, X]
val grouped = Traverse[MapField].sequence(ListMap(g.getAll: _*).map { t =>
val (k, v) = t
k -> (v match {
case AddToSet(e) =>
fixExpr(rs, e) flatMap {
case d @ DocVar(_, _) => Some(AddToSet(d))
case _ => None
}
case Push(e) =>
fixExpr(rs, e) flatMap {
case d @ DocVar(_, _) => Some(Push(d))
case _ => None
}
case First(e) => fixExpr(rs, e).map(First(_))
case Last(e) => fixExpr(rs, e).map(Last(_))
case Max(e) => fixExpr(rs, e).map(Max(_))
case Min(e) => fixExpr(rs, e).map(Min(_))
case Avg(e) => fixExpr(rs, e).map(Avg(_))
case Sum(e) => fixExpr(rs, e).map(Sum(_))
})
})
val by = g.by.fold(e => fixExpr(rs, e).map(-\\/ apply), r => inlineProject(r, rs).map(\\/- apply))
(grouped |@| by)((grouped, by) => WorkflowOp.GroupOp(src, Grouped(grouped), by))
}
}
}
|
mossprescott/quasar
|
src/main/scala/slamdata/engine/physical/mongodb/optimize/optimize.scala
|
Scala
|
agpl-3.0
| 2,903 |
/*
* Potigol
* Copyright (C) 2005 Leonardo Lucena
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/**
* _____ _ _ _
* | __ \\ | | (_) | |
* | |__) |__ | |_ _ __ _ ___ | |
* | ___/ _ \\| __| |/ _` |/ _ \\| |
* | | | (_) | |_| | (_| | (_) | |
* |_| \\___/ \\__|_|\\__, |\\___/|_|
* __/ |
* |___/
*
* @author Leonardo Lucena ([email protected])
*/
package br.edu.ifrn.potigol
import com.twitter.util.Eval
import scala.util.{ Try, Success, Failure }
class Compilador(val debug: Boolean = false) {
def executar(code: String) = {
if (debug)
imprimirCodigo(code)
avaliar(code) match {
case Success(_) =>
print("\\b\\b\\b\\b\\b\\b\\b\\b\\b\\b \\b\\b\\b\\b\\b\\b\\b\\b\\b\\b"); (new Eval).apply[Unit](code)
case Failure(f) => println(codigoErro(code, f.getMessage))
case _ => println("erro")
}
}
def avaliar(code: String) = {
Try {
(new Eval).check(code)
}
}
def linhaErro(code: String) = {
avaliar(code) match {
case Success(_) => 0
case Failure(f) =>
val linhaScala = f.getMessage.split(": ")(1).split(" ")(1).toInt
code.split('\\n').take(linhaScala).reverse.toList.dropWhile { x => !x.contains("/*Codigo") }.head.split(" ")(1).toInt;
}
}
def codigoErro(code: String, erro: String) = {
val partes = erro.split(": ")
val err = partes(2)
val linha = partes(1).split(" ")(1).toInt
val msg = err match {
case "not found" if debug => s"${code} - ${erro}"
case "not found" => "Valor não encontrado"
case a if debug => a
case _ => "Erro desconhecido"
}
msg + "\\nlinha: " + linha
}
def imprimirCodigo(code: String) {
val linhas = code.split('\\n')
println()
for (line <- linhas.zipWithIndex) {
println(s"${(line._2 + 1).formatted("%4d")} | ${line._1}")
}
}
}
object Comp extends App {
(new Compilador()).executar("println(1+2)\\nval a = readInt\\nprintln(b)")
}
|
loiane/Potigol
|
src/br/edu/ifrn/potigol/Compilador.scala
|
Scala
|
gpl-2.0
| 2,731 |
package leo.modules.agent.rules.control_rules
import leo.datastructures.AnnotatedClause
import leo.datastructures.blackboard.{DataStore, DataType, Delta, Result}
import leo.modules.SZSException
import leo.modules.output.SZS_Error
import scala.collection.mutable
case object Unify extends DataType[AnnotatedClause]{
override def convert(d: Any): AnnotatedClause = d match {
case c : AnnotatedClause => c
case _ => throw new SZSException(SZS_Error, s"Expected AnnotatedClause, but got $d")
}
}
/**
* Stores Formulas that are potentially to unify with
* the algorithm execution in [[leo.modules.control.Control]]
*/
class UnificationSet extends DataStore{
private final val set : mutable.Set[AnnotatedClause] = mutable.HashSet[AnnotatedClause]()
/**
* Gets the set of unprocessed clauses.
* The returned set is immutable.
*
* @return Set of unprocessed clauses
*/
def get : Set[AnnotatedClause] = synchronized(set.toSet)
override def isEmpty: Boolean = synchronized(set.isEmpty)
/**
* This method returns all Types stored by this data structure.
*
* @return all stored types
*/
override val storedTypes: Seq[DataType[Any]] = Seq(Unify)
/**
*
* Inserts all results produced by an agent into the datastructure.
*
* @param r - A result inserted into the datastructure
*/
override def updateResult(r: Delta): Delta = synchronized {
val ins1 = r.inserts(Unify)
val del1 = r.removes(Unify)
val (del2, ins2) = r.updates(Unify).unzip
val ins = (ins1 ++ ins2).iterator
val del = (del1 ++ del2).iterator
val delta = Result()
while(del.hasNext){
val c = del.next()
if(set.remove(c)) delta.remove(Unify)(c)
}
while(ins.hasNext) {
val c = ins.next()
if(set.add(c)) delta.insert(Unify)(c)
}
delta
}
/**
* Removes everything from the data structure.
* After this call the ds should behave as if it was newly created.
*/
override def clear(): Unit = synchronized(set.clear())
/**
* Returns a list of all stored data.
*
* @param t
* @return
*/
override def get[T](t: DataType[T]): Set[T] = t match{
case Unify => synchronized(set.toSet.asInstanceOf[Set[T]])
case _ => Set()
}
override def toString: String = "UnificationSet"
}
|
lex-lex/Leo-III
|
oldsrc/main/scala/leo/modules/agent/rules/control_rules/UnificationSet.scala
|
Scala
|
bsd-3-clause
| 2,338 |
package de.sciss.mutagentx
case class FeatureExtractionFailed(cause: Throwable) extends Exception(cause)
|
Sciss/MutagenTx
|
src/main/scala/de/sciss/mutagentx/FeatureExtractionFailed.scala
|
Scala
|
gpl-3.0
| 105 |
import sbt._
object Dependencies {
val resolutionRepos = Seq(
"Sonatype Releases" at "https://oss.sonatype.org/content/repositories/releases",
"Sonatype Snapshots" at "https://oss.sonatype.org/content/repositories/snapshots"
)
def compile(deps: ModuleID*): Seq[ModuleID] = deps map (_ % "compile")
def provided(deps: ModuleID*): Seq[ModuleID] = deps map (_ % "provided")
def test(deps: ModuleID*): Seq[ModuleID] = deps map (_ % "test")
def runtime(deps: ModuleID*): Seq[ModuleID] = deps map (_ % "runtime")
def container(deps: ModuleID*): Seq[ModuleID] = deps map (_ % "container")
object Ver {
val lift = "3.0-M6"
val lift_edition = "3.0"
// val jetty = "9.2.2.v20140723"
val jetty = "8.1.16.v20140903"
val akka = "2.4-M3"
}
// Lift
val liftWebkit = "net.liftweb" %% "lift-webkit" % Ver.lift
val liftMongodb = "net.liftweb" %% "lift-mongodb-record" % Ver.lift
// Jetty
val jettyWebapp = "org.eclipse.jetty" % "jetty-webapp" % Ver.jetty
val jettyPlus = "org.eclipse.jetty" % "jetty-plus" % Ver.jetty
val servlet = "javax.servlet" % "javax.servlet-api" % "3.0.1"
// Misc
val logback = "ch.qos.logback" % "logback-classic" % "1.1.2"
val scalatest = "org.scalatest" %% "scalatest" % "2.2.1"
// HTM
val htmjava = "org.numenta" % "htm.java" % "0.6.3-sSNAPSHOT"
val moclu = "htm-model-cluster" % "htm-model-cluster_2.11" % "0.1.31"
// Akka
val akkaCluster = "com.typesafe.akka" %% "akka-cluster" % Ver.akka
val akkaContrib = "com.typesafe.akka" %% "akka-contrib" % Ver.akka
val akkaMultiNodeTestkit = "com.typesafe.akka" %% "akka-multi-node-testkit" % Ver.akka
}
|
antidata/htm-moclu
|
moclu-http/project/Dependencies.scala
|
Scala
|
agpl-3.0
| 1,646 |
package com.gilt.thehand.rules.comparison
import com.gilt.thehand.rules.conversions.ConvertsTo
import com.gilt.thehand.rules.SingleValueRule
/**
* A rule that tests greater-than-or-equal.
*/
trait GreaterThanEqual extends SingleValueRule { self: ConvertsTo =>
/**
* Simple greater-than-or-equal test for this rule.
* @param v
* @return
*/
def matchInnerType(v: InnerType) = v.asInstanceOf[Ordered[InnerType]] >= value
}
|
gilt/the-hand
|
src/main/scala/com/gilt/thehand/rules/comparison/GreaterThanEqual.scala
|
Scala
|
apache-2.0
| 440 |
import ru.finagram._
import ru.finagram.api._
import com.twitter.util._
object LongPollingExample extends App {
val token = ""
val handler = (update: Update) => Future(println(update))
val server = new PollingServer(token, handler)
server.run()
.onSuccess {
case lastOffset =>
println(s"Last received offset is $lastOffset")
}
.onFailure {
case e: PollingException =>
println(s"Error occurred when receive update from offset ${e.offset}", e)
}
// work some times
Thread.sleep(13000)
// and close server
Await.result(server.close())
}
|
finagram/finagram
|
src/examples/LongPolingExample.scala
|
Scala
|
mit
| 594 |
package scutil.lang
object implicits extends implicits
trait implicits
extends extensions
with syntax
with instances
with literals
|
ritschwumm/scutil
|
modules/core/src/main/scala/scutil/lang/implicits.scala
|
Scala
|
bsd-2-clause
| 137 |
// Copyright (C) 2011 Dmitri Nikulin
//
// This file is part of Vijil.
//
// Vijil is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Vijil is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with Vijil. If not, see <http://www.gnu.org/licenses/>.
//
// Repository: https://github.com/dnikulin/vijil
// Email: [email protected]
package com.dnikulin.vijil.text
import net.liftweb.json._
import net.liftweb.json.Serialization._
import org.junit.Test
import org.junit.Assert._
import com.dnikulin.vijil.tools.ArrSeq
class TestTextFile {
implicit val formats = DefaultFormats
val name = """Text "name"."""
val hash = """Text "hash"."""
val data = """Text "data"."""
@Test
def testNestedJSON() {
val tag1 = Tag("foo1", "bar1")
val tag2 = Tag("foo2", "bar2")
val tag3 = Tag("title", name)
val span1 = new TextSpan(data, hash, 3, 4, ArrSeq(tag2, tag1))
val span2 = new TextSpan(data, hash, 1, 6, ArrSeq(tag1), ArrSeq(span1))
val note1 = new TextNote(7, "lab3l", "b0dy")
val text1 = new TextFile(data, hash, ArrSeq(tag2, tag3, tag1), ArrSeq(span1, span2), ArrSeq(note1))
// Must correctly extract name from tags.
assertEquals(name, text1.name)
val json1 = text1.toJson
assertEquals(
"""{"data":"Text \\"data\\".","hash":"Text \\"hash\\".",""" +
""""tags":[["foo2","bar2"],["title","Text \\"name\\"."],["foo1","bar1"]]""" +
""","spans":[[3,4,[["foo2","bar2"],["foo1","bar1"]],[]],""" +
"""[1,6,[["foo1","bar1"]],[[3,4,[["foo2","bar2"],["foo1","bar1"]],[]]]]]""" +
""","notes":[[7,"lab3l","b0dy"]]}""",
json1
)
val text2 = TextFile.fromJson(json1).get
// Must form distinct objects.
assertNotSame(text1, text2 )
assertNotSame(text1.hash, text2.hash )
assertNotSame(text1.data, text2.data )
assertNotSame(text1.spans, text2.spans)
assertNotSame(text1.tags, text2.tags )
// Must preserve strings exactly.
assertEquals(text1.hash, text2.hash)
assertEquals(text1.data, text2.data)
// Must encode and decode without changes.
val json2 = text2.toJson
assertEquals(json1, json2)
}
}
|
dnikulin/vijil
|
src/test/scala/com/dnikulin/vijil/text/TestTextFile.scala
|
Scala
|
agpl-3.0
| 2,649 |
package com.warningrc.test.scalatest
/**
* Created by warning on 2016/1/27.
*/
object PartialAppliedFunction {
def main(args: Array[String]) {
val array = Array(1, 2, 3, 4, 5, 6)
array.foreach(println _)
def sum(x: Int, y: Int, z: Int): Int = x + y + z
println(sum(1, 2, 3))
val sum_a = sum _
}
}
|
warningrc/learn-java
|
scala-test/src/test/scala/com/warningrc/test/scalatest/PartialAppliedFunction.scala
|
Scala
|
apache-2.0
| 330 |
package org.jetbrains.plugins.scala.lang.psi.stubs.elements
import com.intellij.lang.ASTNode
import com.intellij.psi.PsiElement
import com.intellij.psi.stubs.{StubElement, StubInputStream, StubOutputStream}
import com.intellij.util.io.StringRef.fromString
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.ScImportStmt
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.imports.ScImportStmtImpl
import org.jetbrains.plugins.scala.lang.psi.stubs.ScImportStmtStub
import org.jetbrains.plugins.scala.lang.psi.stubs.impl.ScImportStmtStubImpl
/**
* User: Alexander Podkhalyuzin
* Date: 18.06.2009
*/
class ScImportStmtElementType extends ScStubElementType[ScImportStmtStub, ScImportStmt]("import statement") {
override def serialize(stub: ScImportStmtStub, dataStream: StubOutputStream): Unit = {
dataStream.writeName(stub.importText)
}
override def deserialize(dataStream: StubInputStream, parentStub: StubElement[_ <: PsiElement]): ScImportStmtStub =
new ScImportStmtStubImpl(parentStub, this,
importTextRef = dataStream.readName)
override def createStubImpl(statement: ScImportStmt, parentStub: StubElement[_ <: PsiElement]): ScImportStmtStub =
new ScImportStmtStubImpl(parentStub, this,
importTextRef = fromString(statement.getText))
override def createElement(node: ASTNode): ScImportStmt = new ScImportStmtImpl(node)
override def createPsi(stub: ScImportStmtStub): ScImportStmt = new ScImportStmtImpl(stub)
}
|
gtache/intellij-lsp
|
intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/psi/stubs/elements/ScImportStmtElementType.scala
|
Scala
|
apache-2.0
| 1,479 |
class TestArray (inputs: Array[Int]){
def printArray = for(input <- inputs) println(input)
}
|
matt-bentley/SubsetSumProcessor
|
SubsetSumProcessor/Scala/src/main/scala/Test.scala
|
Scala
|
gpl-3.0
| 102 |
package org.jetbrains.plugins.scala.lang.psi.api
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiElement
import org.jetbrains.plugins.scala.lang.psi.controlFlow.{ScControlFlowPolicy, Instruction}
import org.jetbrains.plugins.scala.lang.psi.controlFlow.impl.{ScalaControlFlowBuilder, AllVariablesControlFlowPolicy}
import scala.collection.mutable
/**
* Represents elements with control flow cached
* @author ilyas
*/
trait ScControlFlowOwner extends ScalaPsiElement {
private val myControlFlowCache = mutable.Map[ScControlFlowPolicy, Seq[Instruction]]()
private def buildControlFlow(scope: Option[ScalaPsiElement], policy: ScControlFlowPolicy = AllVariablesControlFlowPolicy) = {
val builder = new ScalaControlFlowBuilder(null, null, policy)
scope match {
case Some(elem) =>
val controlflow = builder.buildControlflow(elem)
myControlFlowCache += (policy -> controlflow)
controlflow
case None => Seq.empty
}
}
def getControlFlow(cached: Boolean, policy: ScControlFlowPolicy = AllVariablesControlFlowPolicy): Seq[Instruction] = {
if (!cached || !myControlFlowCache.contains(policy)) buildControlFlow(controlFlowScope, policy)
else myControlFlowCache(policy)
}
def controlFlowScope: Option[ScalaPsiElement]
}
|
consulo/consulo-scala
|
src/org/jetbrains/plugins/scala/lang/psi/api/ScControlFlowOwner.scala
|
Scala
|
apache-2.0
| 1,286 |
/*
* Copyright (c) 2017 sadikovi
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.github.sadikovi.riff
import java.io.{ByteArrayInputStream, ByteArrayOutputStream, ObjectInputStream, ObjectOutputStream}
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
import com.github.sadikovi.testutil.UnitTestSuite
class TypeSpecSuite extends UnitTestSuite {
test("initialize type spec 1") {
val field = StructField("col", IntegerType)
val (indexed, pos, origPos) = (false, 0, 1)
val spec = new TypeSpec(field, indexed, pos, origPos)
spec.field() should be (field)
spec.dataType() should be (field.dataType)
spec.isIndexed should be (indexed)
spec.position() should be (pos)
spec.origSQLPos() should be (origPos)
}
test("initialize type spec 2") {
val field = StructField("col", StringType)
val (indexed, pos, origPos) = (true, 10, 4)
val spec = new TypeSpec(field, indexed, pos, origPos)
spec.field() should be (field)
spec.dataType() should be (field.dataType)
spec.isIndexed should be (indexed)
spec.position() should be (pos)
spec.origSQLPos() should be (origPos)
}
test("check toString") {
val field = StructField("col", StringType)
val (indexed, pos, origPos) = (true, 10, 4)
val spec = new TypeSpec(field, indexed, pos, origPos)
val expected = s"TypeSpec(${field.name}: ${field.dataType.simpleString}, indexed=$indexed, " +
s"position=$pos, origPos=$origPos)"
spec.toString should be (expected)
}
test("equals") {
val field = StructField("col", StringType)
val (indexed, pos, origPos) = (true, 1, 3)
val spec = new TypeSpec(field, indexed, pos, origPos)
spec.equals(spec) should be (true)
val spec1 = new TypeSpec(field, indexed, pos, origPos);
spec.equals(spec1) should be (true)
val spec2 = new TypeSpec(field, false, pos, origPos);
spec.equals(spec2) should be (false)
val spec3 = new TypeSpec(field, true, pos + 1, origPos);
spec.equals(spec3) should be (false)
val spec4 = new TypeSpec(field, true, pos, origPos + 1);
spec.equals(spec4) should be (false)
}
test("hashCode") {
val field = StructField("col", StringType)
val (indexed, pos, origPos) = (true, 1, 3)
val spec = new TypeSpec(field, indexed, pos, origPos)
spec.hashCode() should be (818348067)
val spec1 = new TypeSpec(field, false, pos, origPos);
assert(spec1.hashCode != spec.hashCode)
}
test("write/read spec into external stream 1") {
val field = StructField("col", StringType)
val (indexed, pos, origPos) = (true, 1, 3)
val spec1 = new TypeSpec(field, indexed, pos, origPos)
val out = new ByteArrayOutputStream()
val oos = new ObjectOutputStream(out)
oos.writeObject(spec1)
val in = new ByteArrayInputStream(out.toByteArray)
val ois = new ObjectInputStream(in)
val spec2 = ois.readObject().asInstanceOf[TypeSpec]
spec2 should be (spec1)
spec2.isIndexed() should be (true)
spec2.position() should be (1)
spec2.origSQLPos() should be (3)
}
test("write/read spec into external stream 2") {
val field = StructField("field",
StructType(StructField("col", IntegerType, true) :: Nil))
val (indexed, pos, origPos) = (false, 5, 2)
val spec1 = new TypeSpec(field, indexed, pos, origPos)
val out = new ByteArrayOutputStream()
val oos = new ObjectOutputStream(out)
oos.writeObject(spec1)
val in = new ByteArrayInputStream(out.toByteArray)
val ois = new ObjectInputStream(in)
val spec2 = ois.readObject().asInstanceOf[TypeSpec]
spec2 should be (spec1)
spec2.isIndexed() should be (false)
spec2.position() should be (5)
spec2.origSQLPos() should be (2)
}
}
|
sadikovi/riff
|
format/src/test/scala/com/github/sadikovi/riff/TypeSpecSuite.scala
|
Scala
|
mit
| 4,807 |
/*
* Copyright 2010-2014 Benjamin Lings
* Author: Thomas Suckow
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.codingwell.scalaguice
import com.google.inject.multibindings.Multibinder
import com.google.inject.{Binder, Key, Module, TypeLiteral}
import java.lang.annotation.Annotation
import java.util.{Set => JSet}
import net.codingwell.scalaguice.ScalaModule.ScalaLinkedBindingBuilder
import scala.collection.{immutable => im}
import scala.reflect.ClassTag
import scala.reflect.runtime.universe.TypeTag
/**
* Analog to Guice's Multibinder
*
* Use ScalaMultibinder.newSetBinder to create a multibinder that is scala friendly.
*/
sealed trait ScalaMultibinder[T] {
/**
* Configures the bound set to silently discard duplicate elements. When multiple equal values are
* bound, the one that gets included is arbitrary. When multiple modules contribute elements to
* the set, this configuration option impacts all of them.
*
* @return this multibinder
* @since 3.0
*/
def permitDuplicates(): ScalaMultibinder[T]
/**
* Returns a binding builder used to add a new element in the set. Each
* bound element must have a distinct value. Bound providers will be
* evaluated each time the set is injected.
*
* <p>It is an error to call this method without also calling one of the
* `to` methods on the returned binding builder.
*
* <p>Scoping elements independently is supported. Use the `in` method
* to specify a binding scope.
*/
def addBinding: ScalaLinkedBindingBuilder[T]
}
object ScalaMultibinder {
/** Preferred Scala Methods */
/**
* Returns a new multibinder that collects instances of type `T` in a [[scala.collection.immutable.Set]] that is
* itself bound with no binding annotation.
*/
def newSetBinder[T: TypeTag](binder: Binder): ScalaMultibinder[T] = {
newMultibinder(binder, typeLiteral[T])
}
/**
* Returns a new multibinder that collects instances of type `T` in a [[scala.collection.immutable.Set]] that is
* itself bound with a binding annotation `Ann`.
*/
def newSetBinder[T: TypeTag, Ann <: Annotation : ClassTag](binder: Binder): ScalaMultibinder[T] = {
newMultibinder[T, Ann](binder, typeLiteral[T], cls[Ann])
}
/**
* Returns a new multibinder that collects instances of type `T` in a [[scala.collection.immutable.Set]] that is
* itself bound with a binding annotation.
*/
def newSetBinder[T: TypeTag](binder: Binder, annotation: Annotation): ScalaMultibinder[T] = {
newMultibinder(binder, typeLiteral[T], annotation)
}
/** Methods Compatible w/Guice's API */
/**
* Returns a new multibinder that collects instances of `typ` in a [[scala.collection.immutable.Set]] that is
* itself bound with no binding annotation.
*/
def newSetBinder[T](binder: Binder, typ: TypeLiteral[T]): ScalaMultibinder[T] = {
newMultibinder(binder, typ)
}
/**
* Returns a new multibinder that collects instances of `typ` in a [[scala.collection.immutable.Set]] that is
* itself bound with no binding annotation. Note that `typ` is ignored in favor of using the `T` TypeTag to capture
* type arguments.
*/
def newSetBinder[T: TypeTag](binder: Binder, typ: Class[T]): ScalaMultibinder[T] = {
newMultibinder(binder, typeLiteral[T])
}
/**
* Returns a new multibinder that collects instances of `typ` in a [[scala.collection.immutable.Set]] that is
* itself bound with a binding annotation.
*/
def newSetBinder[T: TypeTag](binder: Binder, typ: TypeLiteral[T], annotation: Annotation): ScalaMultibinder[T] = {
newMultibinder(binder, typ, annotation)
}
/**
* Returns a new multibinder that collects instances of `typ` in a [[scala.collection.immutable.Set]] that is
* itself bound with a binding annotation. Note that `typ` is ignored in favor of using the TypeTag to capture
* type arguments.
*/
def newSetBinder[T: TypeTag](binder: Binder, typ: Class[T], annotation: Annotation): ScalaMultibinder[T] = {
newMultibinder(binder, typeLiteral[T], annotation)
}
/**
* Returns a new multibinder that collects instances of `typ` in a [[scala.collection.immutable.Set]] that is
* itself bound with a binding annotation.
*/
def newSetBinder[T](binder: Binder, typ: TypeLiteral[T], annotation: Class[_ <: Annotation]): ScalaMultibinder[T] = {
newMultibinder(binder, typ, annotation)
}
/**
* Returns a new multibinder that collects instances of `typ` in a [[scala.collection.immutable.Set]] that is
* itself bound with a binding annotation. Note that `typ` is ignored in favor of using the TypeTag to capture
* type arguments.
*/
def newSetBinder[T: TypeTag](binder: Binder, typ: Class[T], annotation: Class[_ <: Annotation]): ScalaMultibinder[T] = {
newMultibinder(binder, typeLiteral[T], annotation)
}
/** Implementation Details */
private def newMultibinder[T](parentBinder: Binder, typ: TypeLiteral[T]): ScalaMultibinder[T] = {
val binder = skipSources(parentBinder)
val jMultibinder = Multibinder.newSetBinder(binder, typ)
newMultibinder(binder, jMultibinder, Key.get(typ))
}
private def newMultibinder[T](parentBinder: Binder, typ: TypeLiteral[T], annotation: Annotation): ScalaMultibinder[T] = {
val binder = skipSources(parentBinder)
val jMultibinder = Multibinder.newSetBinder(binder, typ, annotation)
newMultibinder(binder, jMultibinder, Key.get(typ, annotation))
}
private def newMultibinder[T, Ann <: Annotation](parentBinder: Binder, typ: TypeLiteral[T], annotationType: Class[Ann]): ScalaMultibinder[T] = {
val binder = skipSources(parentBinder)
val jMultibinder = Multibinder.newSetBinder(binder, typ, annotationType)
newMultibinder(binder, jMultibinder, Key.get(typ, annotationType))
}
private def newMultibinder[T](binder: Binder, parent: Multibinder[T], key: Key[T]): ScalaMultibinder[T] = {
val result = new RealScalaMultibinder[T](parent, key)
binder.install(result)
result
}
private def skipSources(binder: Binder): Binder = {
binder.skipSources(
ScalaMultibinder.getClass,
classOf[ScalaMultibinder[_]],
classOf[RealScalaMultibinder[_]]
)
}
/**
* Analog to the Guice's [[com.google.inject.multibindings.Multibinder.RealMultibinder]]
*
* As a Module, the [[RealScalaMultibinder]] installs the binding to the set itself. As a module, this implements
* `equals()` and `hashCode()` in order to trick Guice into executing its `configure` method only once. That makes
* it so that multiple binders can be created for the same target collection, but only one is bound. The binding maps
* the [[java.util.Set]] to a [[im.Set]] for useful Scala injection.
*/
private class RealScalaMultibinder[T](parent: Multibinder[T], key: Key[T]) extends ScalaMultibinder[T] with Module {
private val setKey = key.ofType(wrap[im.Set].around(key.getTypeLiteral))
private[this] val setName = nameOf(setKey)
def addBinding: ScalaLinkedBindingBuilder[T] = new ScalaLinkedBindingBuilder[T] {
val self = parent.addBinding()
}
def permitDuplicates(): ScalaMultibinder[T] = {
parent.permitDuplicates
this
}
def getJavaMultibinder: Multibinder[T] = {
parent
}
def configure(binder: Binder): Unit = {
binder.bind(setKey).toProvider(new SetProvider(key.ofType(wrap[JSet].around(key.getTypeLiteral))))
}
/** Trick Guice into installing this Module once; be careful to not use the jSetKey. */
override def equals(o: Any): Boolean = o match {
case o: RealScalaMultibinder[_] => o.setKey == setKey
case _ => false
}
override def hashCode: Int = {
setKey.hashCode
}
override def toString: String = {
(if (setName.isEmpty) "" else setName + " ") + "ScalaMultibinder<" + key.getTypeLiteral + ">"
}
}
}
|
codingwell/scala-guice
|
src/main/scala/net/codingwell/scalaguice/ScalaMultibinder.scala
|
Scala
|
apache-2.0
| 8,404 |
package ecommerce.sales.app
import akka.kernel.Bootable
import com.typesafe.config.Config
import ecommerce.sales.{HttpService, SalesReadFrontConfiguration}
class SalesReadFrontApp extends Bootable {
override def systemName = "sales-read-front"
def startup() = {
new SalesReadFrontConfiguration {
override def config: Config = SalesReadFrontApp.this.config
import httpService._
system.actorOf(HttpService.props(interface, port, askTimeout), "http-service")
}
}
}
|
pawelkaczor/ddd-leaven-akka-v2
|
sales/read-front/src/main/scala/ecommerce/sales/app/SalesReadFrontApp.scala
|
Scala
|
mit
| 504 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.stream
import org.apache.flink.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.api.config.ExecutionConfigOptions
import org.apache.flink.table.api.internal.TableEnvironmentInternal
import org.apache.flink.table.planner.utils.TableTestBase
import org.apache.flink.table.types.logical.{BigIntType, IntType, VarCharType}
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import org.junit.{Before, Test}
import java.sql.Timestamp
import java.time.Duration
@RunWith(classOf[Parameterized])
class ExplainTest(extended: Boolean) extends TableTestBase {
private val extraDetails = if (extended) {
Array(ExplainDetail.CHANGELOG_MODE, ExplainDetail.ESTIMATED_COST)
} else {
Array.empty[ExplainDetail]
}
private val util = streamTestUtil()
util.addTableSource[(Int, Long, String)]("MyTable", 'a, 'b, 'c)
util.addDataStream[(Int, Long, String)]("MyTable1", 'a, 'b, 'c)
util.addDataStream[(Int, Long, String)]("MyTable2", 'd, 'e, 'f)
val STRING = VarCharType.STRING_TYPE
val LONG = new BigIntType()
val INT = new IntType()
@Before
def before(): Unit = {
util.tableEnv.getConfig
.set(ExecutionConfigOptions.TABLE_EXEC_RESOURCE_DEFAULT_PARALLELISM, Int.box(4))
}
@Test
def testExplainTableSourceScan(): Unit = {
util.verifyExplain("SELECT * FROM MyTable", extraDetails:_*)
}
@Test
def testExplainDataStreamScan(): Unit = {
util.verifyExplain("SELECT * FROM MyTable1", extraDetails:_*)
}
@Test
def testExplainWithFilter(): Unit = {
util.verifyExplain("SELECT * FROM MyTable1 WHERE mod(a, 2) = 0", extraDetails:_*)
}
@Test
def testExplainWithAgg(): Unit = {
util.verifyExplain("SELECT COUNT(*) FROM MyTable1 GROUP BY a", extraDetails:_*)
}
@Test
def testExplainWithJoin(): Unit = {
util.verifyExplain("SELECT a, b, c, e, f FROM MyTable1, MyTable2 WHERE a = d", extraDetails:_*)
}
@Test
def testExplainWithUnion(): Unit = {
util.verifyExplain("SELECT * FROM MyTable1 UNION ALL SELECT * FROM MyTable2", extraDetails:_*)
}
@Test
def testExplainWithSort(): Unit = {
util.verifyExplain("SELECT * FROM MyTable1 ORDER BY a LIMIT 5", extraDetails:_*)
}
@Test
def testExplainWithSingleSink(): Unit = {
val table = util.tableEnv.sqlQuery("SELECT * FROM MyTable1 WHERE a > 10")
val appendSink = util.createAppendTableSink(Array("a", "b", "c"), Array(INT, LONG, STRING))
util.verifyExplainInsert(table, appendSink, "appendSink", extraDetails: _*)
}
@Test
def testExplainWithMultiSinks(): Unit = {
val stmtSet = util.tableEnv.createStatementSet()
val table = util.tableEnv.sqlQuery("SELECT a, COUNT(*) AS cnt FROM MyTable1 GROUP BY a")
util.tableEnv.registerTable("TempTable", table)
val table1 = util.tableEnv.sqlQuery("SELECT * FROM TempTable WHERE cnt > 10")
val upsertSink1 = util.createUpsertTableSink(Array(0), Array("a", "cnt"), Array(INT, LONG))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"upsertSink1", upsertSink1)
stmtSet.addInsert("upsertSink1", table1)
val table2 = util.tableEnv.sqlQuery("SELECT * FROM TempTable WHERE cnt < 10")
val upsertSink2 = util.createUpsertTableSink(Array(0), Array("a", "cnt"), Array(INT, LONG))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"upsertSink2", upsertSink2)
stmtSet.addInsert("upsertSink2", table2)
util.verifyExplain(stmtSet, extraDetails: _*)
}
@Test
def testMiniBatchIntervalInfer(): Unit = {
val stmtSet = util.tableEnv.createStatementSet()
// Test emit latency propagate among RelNodeBlocks
util.addDataStream[(Int, String, Timestamp)]("T1", 'id1, 'text, 'rowtime.rowtime)
util.addDataStream[(Int, String, Int, String, Long, Timestamp)](
"T2", 'id2, 'cnt, 'name, 'goods, 'rowtime.rowtime)
util.addTableWithWatermark("T3", util.tableEnv.from("T1"), "rowtime", 0)
util.addTableWithWatermark("T4", util.tableEnv.from("T2"), "rowtime", 0)
util.tableEnv.getConfig
.set(ExecutionConfigOptions.TABLE_EXEC_MINIBATCH_ENABLED, Boolean.box(true))
util.tableEnv.getConfig
.set(
ExecutionConfigOptions.TABLE_EXEC_MINIBATCH_ALLOW_LATENCY,
Duration.ofSeconds(3))
val table = util.tableEnv.sqlQuery(
"""
|SELECT id1, T3.rowtime AS ts, text
| FROM T3, T4
|WHERE id1 = id2
| AND T3.rowtime > T4.rowtime - INTERVAL '5' MINUTE
| AND T3.rowtime < T4.rowtime + INTERVAL '3' MINUTE
""".stripMargin)
util.tableEnv.registerTable("TempTable", table)
val table1 = util.tableEnv.sqlQuery(
"""
|SELECT id1, LISTAGG(text, '#')
|FROM TempTable
|GROUP BY id1, TUMBLE(ts, INTERVAL '8' SECOND)
""".stripMargin)
val appendSink1 = util.createAppendTableSink(Array("a", "b"), Array(INT, STRING))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"appendSink1", appendSink1)
stmtSet.addInsert("appendSink1", table1)
val table2 = util.tableEnv.sqlQuery(
"""
|SELECT id1, LISTAGG(text, '*')
|FROM TempTable
|GROUP BY id1, HOP(ts, INTERVAL '12' SECOND, INTERVAL '6' SECOND)
""".stripMargin)
val appendSink2 = util.createAppendTableSink(Array("a", "b"), Array(INT, STRING))
util.tableEnv.asInstanceOf[TableEnvironmentInternal].registerTableSinkInternal(
"appendSink2", appendSink2)
stmtSet.addInsert("appendSink2", table2)
util.verifyExplain(stmtSet, extraDetails: _*)
}
}
object ExplainTest {
@Parameterized.Parameters(name = "extended={0}")
def parameters(): java.util.Collection[Boolean] = {
java.util.Arrays.asList(true, false)
}
}
|
xccui/flink
|
flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/api/stream/ExplainTest.scala
|
Scala
|
apache-2.0
| 6,612 |
package net.spals.appbuilder.message.kinesis.consumer
import net.spals.appbuilder.annotations.service.AutoBindFactory
import net.spals.appbuilder.config.message.MessageConsumerConfig
import net.spals.appbuilder.message.core.MessageConsumerCallback
import net.spals.appbuilder.model.core.ModelSerializer
/**
* @author tkral
*/
@AutoBindFactory
trait KinesisConsumerRecordProcessorFactory {
def createRecordProcessor(consumerCallbacks: Map[Class[_], MessageConsumerCallback[_]],
consumerConfig: MessageConsumerConfig,
modelSerializer: ModelSerializer): KinesisConsumerRecordProcessor
}
|
timkral/appbuilder
|
message-kinesis/src/main/scala/net/spals/appbuilder/message/kinesis/consumer/KinesisConsumerRecordProcessorFactory.scala
|
Scala
|
bsd-3-clause
| 649 |
package cromwell
import cromwell.core.{JobKey, WorkflowId}
package object jobstore {
implicit class EnhancedJobKey(val jobKey: JobKey) extends AnyVal {
def toJobStoreKey(workflowId: WorkflowId): JobStoreKey = JobStoreKey(workflowId, jobKey.node.fullyQualifiedName, jobKey.index, jobKey.attempt)
}
}
|
ohsu-comp-bio/cromwell
|
engine/src/main/scala/cromwell/jobstore/package.scala
|
Scala
|
bsd-3-clause
| 309 |
/*
*
* This file is part of BlueScale.
*
* BlueScale is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* BlueScale is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with BlueScale. If not, see <http://www.gnu.org/licenses/>.
*
* Copyright Vincent Marquez 2010
*
*
* Please contact us at www.BlueScale.org
*
*/
package org.bluescale.server
import javax.servlet.ServletException
import javax.servlet.http.HttpServlet
import javax.servlet.http.HttpServletRequest
import javax.servlet.http.HttpServletResponse
import java.io._
import org.mortbay.jetty.servlet.ServletHandler
import org.mortbay.jetty.servlet.ServletHandler
import org.mortbay.jetty.handler.ContextHandlerCollection
import org.mortbay.jetty.Server
import org.mortbay.jetty.servlet.Context
import org.mortbay.jetty.servlet.ServletHolder
import org.mortbay.jetty.bio.SocketConnector
import org.mortbay.jetty.handler.AbstractHandler
import org.bluescale.telco.api._
import org.bluescale.telco.jainsip.SipTelcoServer
import java.net._
import scala.xml._
import org.bluescale.blueml._
import scala.collection.JavaConversions._
class WebServer(apiPort:Int,
adminPort:Int,
telcoServer:TelcoServer,
callbackUrl:String) {
private val wserver = new Server()
val engine = new Engine(telcoServer, callbackUrl)
telcoServer.setIncomingCallback( conn => engine.handleIncomingCall(callbackUrl,conn) )
telcoServer.setUnjoinCallback( (unjoiner, conn) => engine.handleUnjoin(callbackUrl, unjoiner, conn) )
telcoServer.setRegisterCallback((registerInfo) => engine.handleRegisterRequest(callbackUrl,registerInfo))
//we don't care about the disconnectCallbacks as muchas conversation callbacks:w
//telcoServer.setDisconnectedCallback( conn => engine.handleDisconnect(callbackUrl, conn) )
initWebServer()
def initWebServer() {
val apiConnector = new SocketConnector()
apiConnector.setPort(apiPort)
wserver.setConnectors( List(apiConnector).toArray )
val context = new Context(wserver, "/", Context.SESSIONS)
context.addServlet( new ServletHolder( new CallServlet(telcoServer,engine) ), "/*" )
}
def start() =
wserver.start()
def stop() =
wserver.stop()
}
class CallServlet(telcoServer:TelcoServer,
engine:Engine) extends HttpServlet {
override def doGet(request:HttpServletRequest, response:HttpServletResponse) = {
val arr = request.getPathInfo().split("/")
println("Path = " + request.getPathInfo() )
printParams(request)
var status = HttpServletResponse.SC_OK
try {
if (arr(1).equals("Calls") && arr.length == 2) {
engine.newCall( request.getParameter("To"),
request.getParameter("From"),
request.getParameter("Url"))
} else if (arr(1).equals("Calls") && arr.length > 3) {
val callid = arr(2)
val action = arr(3) match {
case "Hangup" => new Hangup(request.getParameter("Url"))
case "Play" => new Play(0, request.getParameter("MediaUrl"), None, request.getParameter("Url"))
case "Hold" => new Hold()
case _ => status = HttpServletResponse.SC_BAD_REQUEST
null
}
println("action = "+ action)
engine.modifyCall(callid, action)
}
} catch {
case ex:Exception =>
ex.printStackTrace()
status = HttpServletResponse.SC_INTERNAL_SERVER_ERROR
}
response.setContentType("text/xml") //XML
response.setStatus(status)
response.getWriter().flush()
response.getWriter().close()
}
override def doPost(request:HttpServletRequest, response:HttpServletResponse) {
doGet(request, response)
}
protected def printParams(request:HttpServletRequest) =
request.getParameterNames.foreach( name => println(" " + name + " = " + request.getParameter(name.toString()) ) )
//{case (key, value) => println( " " + key + " = " + value.toString()) })
}
|
BlueScale/BlueScale
|
src/main/scala/org/bluescale/server/WebServer.scala
|
Scala
|
agpl-3.0
| 4,765 |
package cortex.db
import java.sql.{SQLException, DriverManager}
import cortex.util.log
import scalikejdbc._
/**
*/
object SqlDB {
// initialize JDBC driver & connection pool
Class.forName("org.h2.Driver")
implicit val session = AutoSession
var isInitialized = false
def initialize(dbAddressUsernameAndPassword: (String, String, String, String)) = {
GlobalSettings.loggingSQLAndTime = LoggingSQLAndTimeSettings(
enabled = false,
singleLineMode = false,
printUnprocessedStackTrace = false,
stackTraceDepth = 15,
logLevel = 'error,
warningEnabled = false,
warningThresholdMillis = 3000L,
warningLogLevel = 'warn
)
ConnectionPool.singleton(
s"jdbc:mysql://${dbAddressUsernameAndPassword._1}/${dbAddressUsernameAndPassword._2}",
dbAddressUsernameAndPassword._3,
dbAddressUsernameAndPassword._4
)
try {
val connection = DriverManager.getConnection(
s"jdbc:mysql://${dbAddressUsernameAndPassword._1}",
dbAddressUsernameAndPassword._3,
dbAddressUsernameAndPassword._4
)
connection.prepareStatement(
s"CREATE DATABASE IF NOT EXISTS ${dbAddressUsernameAndPassword._2}"
).executeUpdate()
connection.prepareStatement(
s"use ${dbAddressUsernameAndPassword._2}"
).execute()
isInitialized = true
} catch {
case e: SQLException => log e s"Failed to initialize SqlDB: ${e.getMessage}"
}
}
}
|
jsflax/cortex
|
src/main/scala/cortex/db/SqlDB.scala
|
Scala
|
mit
| 1,481 |
/*
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
*/
package play.api.data
/**
* Contains the validation API used by `Form`.
*
* For example, to define a custom constraint:
* {{{
* val negative = Constraint[Int] {
* case i if i < 0 => Valid
* case _ => Invalid("Must be a negative number.")
* }
* }}}
*/
package object validation
|
jyotikamboj/container
|
pf-framework/src/play-datacommons/src/main/scala/play/api/data/validation/package.scala
|
Scala
|
mit
| 373 |
package latis.dm
import latis.dm.implicits._
import org.junit._
import Assert._
import com.typesafe.scalalogging.LazyLogging
import latis.dm._
import latis.metadata.Metadata
import latis.time.Time
import latis.writer.AsciiWriter
import latis.writer.Writer
import latis.data.Data
import latis.data.SampledData
import latis.data.seq.DataSeq
class TestOdering {
@Test
def integer = {
val list: List[Integer] = List(Integer(2), Integer(3), Integer(1))
//val lsit2 = list.sorted
/*
* diverging implicit expansion for type scala.math.Ordering[latis.dm.Integer]
* starting with method comparatorToOrdering in trait LowPriorityOrderingImplicits
*/
val list2 = list.sorted(Integer(0)) //note, we are passing the Integer as an explicit Ordering (LATIS-532)
val list3 = list2.map(i => i match {case Integer(n) => n})
assertEquals(1, list3(0))
assertEquals(2, list3(1))
assertEquals(3, list3(2))
}
@Test
def real = {
val list: List[Real] = List(Real(2.2), Real(3.3), Real(1.1))
val list2 = list.sorted(Real(0))
val list3 = list2.map(i => i match {case Real(n) => n})
assertEquals(1.1, list3(0), 0.0)
assertEquals(2.2, list3(1), 0.0)
assertEquals(3.3, list3(2), 0.0)
}
@Test
def text = {
val list: List[Text] = List(Text("b"), Text("c"), Text("a"))
val list2 = list.sorted(Text(""))
val list3 = list2.map(i => i match {case Text(n) => n})
assertEquals("a", list3(0))
assertEquals("b", list3(1))
assertEquals("c", list3(2))
}
@Test
def time_as_text = {
val list = List(Text("2015-02-01T15:00:00"), Text("2015-02-01T15:00:01"), Text("2015-01-01"))
val list2 = list.sorted(Text(""))
val list3 = list2.map(i => i match {case Text(n) => n})
assertEquals("2015-01-01", list3(0))
assertEquals("2015-02-01T15:00:00", list3(1))
assertEquals("2015-02-01T15:00:01", list3(2))
}
}
|
dlindhol/LaTiS
|
src/test/scala/latis/dm/TestOrdering.scala
|
Scala
|
epl-1.0
| 1,923 |
package api
import asobu.distributed.{CustomRequestExtractorDefinition, PredefinedDefs, RequestExtractorDefinition}
import asobu.dsl.RequestExtractor
import play.api.mvc.RequestHeader
import shapeless.HNil
import asobu.dsl.extractors.AuthInfoExtractorBuilder
import scala.concurrent.{ExecutionContext, Future}
object authentication {
case object Authenticated extends CustomRequestExtractorDefinition[String]
}
|
kailuowang/asobu
|
example/api/src/main/scala/api/authentication.scala
|
Scala
|
apache-2.0
| 418 |
/*
* Copyright (c) 2011, Owen Stephens
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of Owen Stephens nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL Owen Stephens BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package OwenDiff
import scala.collection.immutable.HashMap
object Diff {
type IndexPair = (Int, Int)
// Find the LCS of common, unique lines.
def uniqueLCS(lines1 : Seq[String], lines2 : Seq[String])
: Traversable[IndexPair] = {
type LineToIndex = HashMap[String,Int]
type LineAndIndex = (String, Int)
// Insert into, or mark as duplicate, the line in the map.
def updateLineMap(map : LineToIndex, lineAndIndex : LineAndIndex) = {
val (line, index) = lineAndIndex
map + ((line, if (map.contains(line)) -1 else index))
}
val lines1Indices =
(new LineToIndex() /: lines1.view.zipWithIndex)(updateLineMap)
// Remove any duplicated entries (marked by value of -1)
val uniques1 = lines1Indices.filter(kv => kv._2 >= 0)
// Represents the current state of the mapping fold.
// Tuple is (file1Uniques, line#In2ToLine#In1, file2Uniques)
type MappingState =
(LineToIndex,HashMap[Int,Int], LineToIndex)
def updateUniqueMaps(state : MappingState, lineAndIndex : LineAndIndex) = {
val (uniques1, line2ToLine1, uniqueIndices2) = state
val (line, index) = lineAndIndex
// Only pay attention to common lines.
if (uniques1.contains(line)) {
val newTuple = if (uniqueIndices2.contains(line)) {
(uniques1 - line,// Ensure we don't match this line again
line2ToLine1 - uniqueIndices2(line),// Not unique, so unset.
uniqueIndices2)
} else {
(uniques1,
line2ToLine1 + ((index,uniques1(line))),
uniqueIndices2 + ((line,index)))
}
newTuple
} else {
state
}
}
// Find indices of all unique line2s and create mapping between files.
val lineMaps = (uniques1, HashMap.empty[Int, Int], new LineToIndex())
val (_,line2ToLine1,_) =
(lineMaps /: lines2.view.zipWithIndex)(updateUniqueMaps)
// Order the pairs by the line order in file2.
val indices1OrderedBy2 = line2ToLine1.toList.sortBy(p => p._1)
// Create an Ordered[IndexPair], so that pairs are ordered small-big by
// their 2nd element (line # in file1).
implicit def IndexPairOrdered(thisVal : IndexPair) = new
Ordered[IndexPair] {
def compare(thatVal : IndexPair) = thisVal._2 compare thatVal._2
}
// Obtain the LCS of the line pairs by finding the LIS
// of the pairs.
val lcs = PatienceSort.LIS(indices1OrderedBy2)(IndexPairOrdered)
// Swap the returned tuples' order, so we return pairs: (line1#, line2#)
lcs.map(t => (t._2, t._1))
}
def recursiveMatch(lines1 : Seq[String], lines2 : Seq[String],
bounds1 : (Int, Int), bounds2 : (Int, Int)) : List[IndexPair] = {
// Catch base-case bounds.
if (bounds1._1 == bounds1._2 || bounds2._1 == bounds2._2) {
return Nil
}
// Obtain a list of line pairs that form the LCS
val equalLineIndices = uniqueLCS(lines1.slice(bounds1._1, bounds1._2),
lines2.slice(bounds2._1, bounds2._2))
def processIndexPair(lastPosAndMatches : ((Int, Int), List[IndexPair]),
pair : IndexPair) = {
val offsetPos1 = pair._1 + bounds1._1
val offsetPos2 = pair._2 + bounds2._1
val (lastPos1, lastPos2) = lastPosAndMatches._1
// We want to recurse between the last matched line pair and the
// next, but only when there are lines in between.
val isGap = lastPos1 + 1 < offsetPos1 && lastPos2 + 1 < offsetPos2
val localResults = if (isGap) {
recursiveMatch(lines1, lines2, (lastPos1 + 1, offsetPos1),
(lastPos2 + 1, offsetPos2))
} else {
List()
}
((offsetPos1, offsetPos2), lastPosAndMatches._2 ++
localResults :+ (offsetPos1, offsetPos2))
}
// Fold up the list of matched line equalLineIndices, recursing between
// groups of matching lines.
val initialTuple = ((bounds1._1 - 1, bounds2._1), List[IndexPair]())
val (lastPos, returnList) =
(initialTuple /: equalLineIndices)(processIndexPair)
val extraList = if (returnList.length > 0) {
// If we matched at all, look for matches between the last match
// and the end.
recursiveMatch(lines1, lines2, (lastPos._1 + 1, bounds1._2),
(lastPos._2 + 1, bounds2._2))
} else if (lines1(bounds1._1) == lines2(bounds2._1)) {
// Find matches at the "start". Catches non-unique, yet equal lines.
// Collect matches until we pass the bounds or lines don't match.
def findStartMatches(pos1 : Int, pos2 : Int,
acc : List[IndexPair]) : (Int, Int, List[IndexPair]) = {
if (pos1 >= bounds1._2 || pos2 >= bounds2._2 ||
lines1(pos1) != lines2(pos2)) {
return (pos1, pos2, acc)
}
return findStartMatches(pos1 + 1, pos2 + 1, acc :+ (pos1, pos2))
}
val (pos1, pos2, startList) =
findStartMatches(bounds1._1, bounds2._1, Nil)
// Recurse between the last match at the start and the end.
startList ++ recursiveMatch(lines1, lines2, (pos1, bounds1._2),
(pos2, bounds2._2))
} else if (lines1(bounds1._2 - 1) == lines2(bounds2._2 - 1)) {
// Find matches at the end of the lines. Catches non-unique, yet
// equal lines.
def findEndMatches(pos1 : Int, pos2 : Int, acc : List[IndexPair])
: (Int, Int, List[IndexPair]) = {
if (pos1 <= bounds1._1 || pos2 <= bounds2._1 ||
lines1(pos1 - 1) != lines2(pos2 - 1)) {
return (pos1, pos2, acc)
} else {
return findEndMatches(pos1 - 1, pos2 - 1,
acc :+ (pos1, pos2))
}
}
val (pos1, pos2, endList) = findEndMatches(bounds1._2 - 1,
bounds2._2 - 1, Nil)
// Find any matches between end matches and last position.
val endGapList = recursiveMatch(lines1, lines2, (lastPos._1 + 1,
pos1), (lastPos._2, pos2))
// Add any matches between end matched and last match first
// to retain correct ordering.
endGapList ++ endList
} else {
Nil
}
returnList ++ extraList
}
// Turn increasing sequences of matched lines into a single MatchResult
def coalesceResults(results : Seq[IndexPair]) : List[MatchResult] = {
def processMatchResult(acc : ((Int, Int, Int), List[MatchResult]),
res : IndexPair) = {
val (index1, index2) = res
val (offset1, offset2, length) = acc._1
val list = acc._2
// Don't match at the start.
val notFirst = offset1 != -1
if (notFirst && index1 == offset1 + length &&
index2 == offset2 + length) {
((offset1, offset2, length + 1), list)
} else {
val nextList = if (notFirst) {
new MatchResult(offset1, offset2, length) :: list
} else {
list
}
((index1, index2, 1), nextList)
}
}
// Fold up the list of matchingLines to join adjacent matches.
val ((offset1, offset2, length), list) =
(((-1, -1, 0), List[MatchResult]()) /: results)(processMatchResult)
// Create a match for anything at the end.
val finalList = if (length > 0) {
new MatchResult(offset1, offset2, length) :: list
} else {
list
}
finalList.reverse
}
def diff(lines1 : Seq[String], lines2 : Seq[String])
: Traversable[DiffResult] = {
val matchLines = recursiveMatch(lines1, lines2,
(0, lines1.length), (0, lines2.length))
val matchBlocks = coalesceResults(matchLines) :+
MatchResult(lines1.length, lines2.length, 0)
// Calculate the actual differences, using the equal line indices.
def processMatchBlock(acc : (IndexPair, List[DiffResult]),
block : MatchResult) = {
val index1 = block.file1Index
val index2 = block.file2Index
val blockLen = block.length
val (pos1, pos2) = acc._1
val list = acc._2
// Update the change list, by calculating which sort of change
// has happened, based on line positions.
val modificationList = if (pos1 < index1 && pos2 < index2) {
new Modify(pos1, pos2, (index1 - pos1, index2 - pos2),
lines1.slice(pos1,index1), lines2.slice(pos2,index2)) :: list
} else if (pos1 < index1) {
new Delete(pos1, pos2, (index1 - pos1, 0),
lines1.slice(pos1, index1)) :: list
} else if (pos2 < index2) {
new Insert(pos1, pos2, (0, index2 - pos2),
lines2.slice(pos2, index2)) :: list
} else {
list
}
val newPos@(newPos1, newPos2) =
(index1 + blockLen, index2 + blockLen)
val returnList = if (blockLen > 0) {
new Equal(newPos1, newPos2, (blockLen, blockLen),
lines1.slice(newPos1 - blockLen, newPos1)) :: modificationList
} else {
modificationList
}
(newPos, returnList)
}
// Fold up a current line tuple with a list of
// line changes that describe going from file1 to file2.
// Reverse, since we cons'd to create the list
var initialTuple = ((0, 0), List[DiffResult]())
(initialTuple /: matchBlocks)(processMatchBlock)._2.reverse
}
}
/*
* Used to represent a match result between two files.
* file1[file1Index:file1Index + length] ==
* file2[file2Index:file2Index + length]
*/
case class MatchResult(file1Index : Int, file2Index : Int, length : Int)
|
owst/Scala-Patience-Diff
|
OwenDiff/Diff.scala
|
Scala
|
bsd-3-clause
| 12,329 |
package com.softwaremill.codebrag.dao.heartbeat
import org.bson.types.ObjectId
import org.joda.time.DateTime
trait HeartbeatDAO {
def update(userId: ObjectId)
def get(userId: ObjectId): Option[DateTime]
def loadAll(): List[(ObjectId, DateTime)]
}
|
frodejohansen/codebrag
|
codebrag-dao/src/main/scala/com/softwaremill/codebrag/dao/heartbeat/HeartbeatDAO.scala
|
Scala
|
agpl-3.0
| 261 |
package ecdc.crypto
import java.security.Security
import org.bouncycastle.cms.CMSEnvelopedDataParser
import org.bouncycastle.cms.jcajce.JceKeyTransEnvelopedRecipient
import org.bouncycastle.jce.provider.BouncyCastleProvider
import org.bouncycastle.util.encoders.Base64
import scala.collection.JavaConverters._
class CmsDecryptor(keyProvider: SecretKeyProvider) {
Security.addProvider(new BouncyCastleProvider())
def decrypt(enc: String): String = {
val cedParser = new CMSEnvelopedDataParser(Base64.decode(enc))
val recipientInfo = cedParser.getRecipientInfos.getRecipients.asScala.head
val rec = new JceKeyTransEnvelopedRecipient(keyProvider.key)
new String(recipientInfo.getContent(rec))
}
}
|
benfrasersimpson/ecdc
|
src/crypto/src/main/scala/ecdc/crypto/CmsDecryptor.scala
|
Scala
|
isc
| 719 |
package dotty.tools
package dotc
package core
import Types._, Contexts._, Symbols._
import Decorators._
import config.Config
import config.Printers._
/** Methods for adding constraints and solving them.
*
* What goes into a Constraint as opposed to a ConstrainHandler?
*
* Constraint code is purely functional: Operations get constraints and produce new ones.
* Constraint code does not have access to a type-comparer. Anything regarding lubs and glbs has to be done
* elsewhere.
*
* By comparison: Constraint handlers are parts of type comparers and can use their functionality.
* Constraint handlers update the current constraint as a side effect.
*/
trait ConstraintHandling {
implicit val ctx: Context
protected def isSubType(tp1: Type, tp2: Type): Boolean
val state: TyperState
import state.constraint
private var addConstraintInvocations = 0
/** If the constraint is frozen we cannot add new bounds to the constraint. */
protected var frozenConstraint = false
private def addOneBound(param: PolyParam, bound: Type, isUpper: Boolean): Boolean =
!constraint.contains(param) || {
val c1 = constraint.narrowBound(param, bound, isUpper)
(c1 eq constraint) || {
constraint = c1
val TypeBounds(lo, hi) = constraint.entry(param)
isSubType(lo, hi)
}
}
protected def addUpperBound(param: PolyParam, bound: Type): Boolean = {
def description = i"constraint $param <: $bound to\\n$constraint"
if (bound.isRef(defn.NothingClass) && ctx.typerState.isGlobalCommittable) {
def msg = s"!!! instantiated to Nothing: $param, constraint = ${constraint.show}"
if (Config.failOnInstantiationToNothing) assert(false, msg)
else ctx.log(msg)
}
constr.println(i"adding $description")
val lower = constraint.lower(param)
val res =
addOneBound(param, bound, isUpper = true) &&
lower.forall(addOneBound(_, bound, isUpper = true))
constr.println(i"added $description = $res")
res
}
protected def addLowerBound(param: PolyParam, bound: Type): Boolean = {
def description = i"constraint $param >: $bound to\\n$constraint"
constr.println(i"adding $description")
val upper = constraint.upper(param)
val res =
addOneBound(param, bound, isUpper = false) &&
upper.forall(addOneBound(_, bound, isUpper = false))
constr.println(i"added $description = $res")
res
}
protected def addLess(p1: PolyParam, p2: PolyParam): Boolean = {
def description = i"ordering $p1 <: $p2 to\\n$constraint"
val res =
if (constraint.isLess(p2, p1)) unify(p2, p1)
else {
val down1 = p1 :: constraint.exclusiveLower(p1, p2)
val up2 = p2 :: constraint.exclusiveUpper(p2, p1)
val lo1 = constraint.nonParamBounds(p1).lo
val hi2 = constraint.nonParamBounds(p2).hi
constr.println(i"adding $description down1 = $down1, up2 = $up2")
constraint = constraint.addLess(p1, p2)
down1.forall(addOneBound(_, hi2, isUpper = true)) &&
up2.forall(addOneBound(_, lo1, isUpper = false))
}
constr.println(i"added $description = $res")
res
}
/** Make p2 = p1, transfer all bounds of p2 to p1
* @pre less(p1)(p2)
*/
private def unify(p1: PolyParam, p2: PolyParam): Boolean = {
constr.println(s"unifying $p1 $p2")
assert(constraint.isLess(p1, p2))
val down = constraint.exclusiveLower(p2, p1)
val up = constraint.exclusiveUpper(p1, p2)
constraint = constraint.unify(p1, p2)
val bounds = constraint.nonParamBounds(p1)
val lo = bounds.lo
val hi = bounds.hi
isSubType(lo, hi) &&
down.forall(addOneBound(_, hi, isUpper = true)) &&
up.forall(addOneBound(_, lo, isUpper = false))
}
protected final def isSubTypeWhenFrozen(tp1: Type, tp2: Type): Boolean = {
val saved = frozenConstraint
frozenConstraint = true
try isSubType(tp1, tp2)
finally frozenConstraint = saved
}
/** Test whether the lower bounds of all parameters in this
* constraint are a solution to the constraint.
*/
protected final def isSatisfiable: Boolean =
constraint.forallParams { param =>
val TypeBounds(lo, hi) = constraint.entry(param)
isSubType(lo, hi) || {
ctx.log(i"sub fail $lo <:< $hi")
false
}
}
/** Solve constraint set for given type parameter `param`.
* If `fromBelow` is true the parameter is approximated by its lower bound,
* otherwise it is approximated by its upper bound. However, any occurrences
* of the parameter in a refinement somewhere in the bound are removed.
* (Such occurrences can arise for F-bounded types).
* The constraint is left unchanged.
* @return the instantiating type
* @pre `param` is in the constraint's domain.
*/
final def approximation(param: PolyParam, fromBelow: Boolean): Type = {
val avoidParam = new TypeMap {
override def stopAtStatic = true
def apply(tp: Type) = mapOver {
tp match {
case tp: RefinedType if param occursIn tp.refinedInfo => tp.parent
case _ => tp
}
}
}
val bound = if (fromBelow) constraint.fullLowerBound(param) else constraint.fullUpperBound(param)
val inst = avoidParam(bound)
typr.println(s"approx ${param.show}, from below = $fromBelow, bound = ${bound.show}, inst = ${inst.show}")
inst
}
/** Constraint `c1` subsumes constraint `c2`, if under `c2` as constraint we have
* for all poly params `p` defined in `c2` as `p >: L2 <: U2`:
*
* c1 defines p with bounds p >: L1 <: U1, and
* L2 <: L1, and
* U1 <: U2
*
* Both `c1` and `c2` are required to derive from constraint `pre`, possibly
* narrowing it with further bounds.
*/
protected final def subsumes(c1: Constraint, c2: Constraint, pre: Constraint): Boolean =
if (c2 eq pre) true
else if (c1 eq pre) false
else {
val saved = constraint
try
c2.forallParams(p =>
c1.contains(p) &&
c2.upper(p).forall(c1.isLess(p, _)) &&
isSubTypeWhenFrozen(c1.nonParamBounds(p), c2.nonParamBounds(p)))
finally constraint = saved
}
/** The current bounds of type parameter `param` */
final def bounds(param: PolyParam): TypeBounds = constraint.entry(param) match {
case bounds: TypeBounds => bounds
case _ => param.binder.paramBounds(param.paramNum)
}
/** Add polytype `pt`, possibly with type variables `tvars`, to current constraint
* and propagate all bounds.
* @param tvars See Constraint#add
*/
def addToConstraint(pt: PolyType, tvars: List[TypeVar]): Unit =
assert {
checkPropagated(i"initialized $pt") {
constraint = constraint.add(pt, tvars)
pt.paramNames.indices.forall { i =>
val param = PolyParam(pt, i)
val bounds = constraint.nonParamBounds(param)
val lower = constraint.lower(param)
val upper = constraint.upper(param)
if (lower.nonEmpty && !bounds.lo.isRef(defn.NothingClass) ||
upper.nonEmpty && !bounds.hi.isRef(defn.AnyClass)) constr.println(i"INIT*** $pt")
lower.forall(addOneBound(_, bounds.hi, isUpper = true)) &&
upper.forall(addOneBound(_, bounds.lo, isUpper = false))
}
}
}
/** Can `param` be constrained with new bounds? */
final def canConstrain(param: PolyParam): Boolean =
!frozenConstraint && (constraint contains param)
/** Add constraint `param <: bond` if `fromBelow` is true, `param >: bound` otherwise.
* `bound` is assumed to be in normalized form, as specified in `firstTry` and
* `secondTry` of `TypeComparer`. In particular, it should not be an alias type,
* lazy ref, typevar, wildcard type, error type. In addition, upper bounds may
* not be AndTypes and lower bounds may not be OrTypes. This is assured by the
* way isSubType is organized.
*/
protected def addConstraint(param: PolyParam, bound: Type, fromBelow: Boolean): Boolean = {
def description = i"constr $param ${if (fromBelow) ">:" else "<:"} $bound:\\n$constraint"
//checkPropagated(s"adding $description")(true) // DEBUG in case following fails
checkPropagated(s"added $description") {
addConstraintInvocations += 1
try bound match {
case bound: PolyParam if constraint contains bound =>
if (fromBelow) addLess(bound, param) else addLess(param, bound)
case _ =>
if (fromBelow) addLowerBound(param, bound) else addUpperBound(param, bound)
}
finally addConstraintInvocations -= 1
}
}
/** Check that constraint is fully propagated. See comment in Config.checkConstraintsPropagated */
def checkPropagated(msg: => String)(result: Boolean): Boolean = {
if (Config.checkConstraintsPropagated && result && addConstraintInvocations == 0) {
val saved = frozenConstraint
frozenConstraint = true
for (p <- constraint.domainParams) {
def check(cond: => Boolean, q: PolyParam, ordering: String, explanation: String): Unit =
assert(cond, i"propagation failure for $p $ordering $q: $explanation\\n$msg")
for (u <- constraint.upper(p))
check(bounds(p).hi <:< bounds(u).hi, u, "<:", "upper bound not propagated")
for (l <- constraint.lower(p)) {
check(bounds(l).lo <:< bounds(p).hi, l, ">:", "lower bound not propagated")
check(constraint.isLess(l, p), l, ">:", "reverse ordering (<:) missing")
}
}
frozenConstraint = saved
}
result
}
}
|
yusuke2255/dotty
|
src/dotty/tools/dotc/core/ConstraintHandling.scala
|
Scala
|
bsd-3-clause
| 9,568 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gearpump.experiments.yarn
object Constants {
val CONTAINER_USER = "gearpump.yarn.user"
val APPMASTER_NAME = "gearpump.yarn.applicationmaster.name"
val APPMASTER_COMMAND = "gearpump.yarn.applicationmaster.command"
val APPMASTER_MEMORY = "gearpump.yarn.applicationmaster.memory"
val APPMASTER_VCORES = "gearpump.yarn.applicationmaster.vcores"
val APPMASTER_QUEUE = "gearpump.yarn.applicationmaster.queue"
val PACKAGE_PATH = "gearpump.yarn.client.package-path"
val CONFIG_PATH = "gearpump.yarn.client.config-path"
val MASTER_COMMAND = "gearpump.yarn.master.command"
val MASTER_MEMORY = "gearpump.yarn.master.memory"
val MASTER_VCORES = "gearpump.yarn.master.vcores"
val WORKER_COMMAND = "gearpump.yarn.worker.command"
val WORKER_CONTAINERS = "gearpump.yarn.worker.containers"
val WORKER_MEMORY = "gearpump.yarn.worker.memory"
val WORKER_VCORES = "gearpump.yarn.worker.vcores"
val SERVICES_ENABLED = "gearpump.yarn.services.enabled"
val LOCAL_DIRS = org.apache.hadoop.yarn.api.ApplicationConstants.Environment.LOCAL_DIRS.$$()
val CONTAINER_ID = org.apache.hadoop.yarn.api.ApplicationConstants.Environment.CONTAINER_ID.$$()
val LOG_DIR_EXPANSION_VAR = org.apache.hadoop.yarn.api.ApplicationConstants.LOG_DIR_EXPANSION_VAR
val NODEMANAGER_HOST = org.apache.hadoop.yarn.api.ApplicationConstants.Environment.NM_HOST.$$()
}
|
manuzhang/incubator-gearpump
|
experiments/yarn/src/main/scala/org/apache/gearpump/experiments/yarn/Constants.scala
|
Scala
|
apache-2.0
| 2,187 |
package com.harborx.api.device
import com.harborx.api.modules.DeviceModule
import org.scalamock.scalatest.MockFactory
import org.scalatest.GivenWhenThen
import org.scalatestplus.play.PlaySpec
/**
* We shall use our custom Spec instead of PlaySpec in real life
* Test for service layer logic here, so we stub DeviceRepository
* TODO: Test for DeviceRepository
*/
class DeviceServiceSpec extends PlaySpec with GivenWhenThen with MockFactory with DeviceModule {
override lazy val deviceRepository = stub[DeviceRepository]
"DeviceService" must {
"forward what DeviceRepository give" in {
(deviceRepository.getDevice _).when(*).returns(Device(2))
deviceService.getDevice(10) mustBe Device(2)
}
}
}
|
harborx/play-di-example
|
play-macwire/test/com/harborx/api/device/DeviceServiceSpec.scala
|
Scala
|
mit
| 733 |
package typeclass.data
import typeclass.Prelude._
import typeclass.{MonadLaws, SemigroupLaws}
import scalaprops.{Param, Scalaprops}
object NonEmptyListTest extends Scalaprops {
val monad = MonadLaws[NonEmptyList].all.andThenParam(Param.maxSize(10))
val semigroup = SemigroupLaws[NonEmptyList[Int]].all
}
|
julien-truffaut/Typeclass
|
answer/src/test/scala/typeclass/data/NonEmptyListTest.scala
|
Scala
|
mit
| 312 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.javaapi.core.internal.errors
import java.{ lang => jl }
import io.gatling.core.session.el._
import io.gatling.javaapi.core.StructureBuilder
import io.gatling.javaapi.core.error.Errors
import io.gatling.javaapi.core.internal.Expressions._
import io.gatling.javaapi.core.internal.JavaExpression
object ScalaExitHereIf {
def apply[T <: StructureBuilder[T, W], W <: io.gatling.core.structure.StructureBuilder[W]](
context: Errors[T, W],
condition: String
): T =
context.make(_.exitHereIf(condition.el))
def apply[T <: StructureBuilder[T, W], W <: io.gatling.core.structure.StructureBuilder[W]](
context: Errors[T, W],
condition: JavaExpression[jl.Boolean]
): T =
context.make(_.exitHereIf(javaBooleanFunctionToExpression(condition)))
}
|
gatling/gatling
|
gatling-core-java/src/main/scala/io/gatling/javaapi/core/internal/errors/ScalaExitHereIf.scala
|
Scala
|
apache-2.0
| 1,415 |
package org.positronicnet.sample.contacts.test
import org.positronicnet.sample.contacts._
import org.positronicnet.content.PositronicContentResolver
import org.scalatest._
import org.positronicnet.test.RobolectricTests
import com.xtremelabs.robolectric.Robolectric
import org.scalatest.matchers.ShouldMatchers
import android.provider.ContactsContract
import android.provider.ContactsContract.{CommonDataKinds => CDK}
class ModelSpec
extends Spec
with ShouldMatchers
with BeforeAndAfterEach
with RobolectricTests
{
override def beforeEach =
PositronicContentResolver.openInContext( Robolectric.application )
override def afterEach =
PositronicContentResolver.close
def phone( number: String, category: CategoryLabel ) =
(new Phone).setProperty( "number", number )
.setProperty( "categoryLabel", category )
def email( address: String, category: CategoryLabel ) =
(new Email).setProperty( "address", address )
.setProperty( "categoryLabel", category )
def note( text: String ) = (new Note).setProperty( "note", text )
def homePhone = CategoryLabel( CDK.Phone.TYPE_HOME, null )
def workPhone = CategoryLabel( CDK.Phone.TYPE_WORK, null )
def customPhone = CategoryLabel( CDK.BaseTypes.TYPE_CUSTOM, "car" )
def homeEmail = CategoryLabel( CDK.Email.TYPE_HOME, null )
describe( "emptiness checks" ) {
it ("should mark null records as empty") {
assert( (new Phone).isEmpty )
assert( (new Email).isEmpty )
assert( (new Note).isEmpty )
}
it ("should treat blank strings as empty") {
assert( phone( "", homePhone ).isEmpty )
assert( email( "", homeEmail ).isEmpty )
assert( note( "" ).isEmpty )
}
it ("should not treat 'full' items as empty") {
assert( !phone( "911", homePhone ).isEmpty )
assert( !email( "[email protected]", homeEmail ).isEmpty )
assert( !note( "needs fish food" ).isEmpty )
}
}
describe( "data aggregation" ) {
// Set up some test fixtures, throw them up in the air, and
// assert that things come down in appropriate places and
// configurations.
val rawContactA = new RawContact
val rawContactB = new RawContact
def aggregate( adata: Seq[ContactData] = Seq.empty,
bdata: Seq[ContactData] = Seq.empty ) =
{
val state = new AggregateContactEditState( Seq(( rawContactA, adata ),
( rawContactB, bdata )))
val aInfo = state.rawContactEditStates(0).accountInfo
val bInfo = state.rawContactEditStates(1).accountInfo
(state.aggregatedData, aInfo, bInfo)
}
def assertPhone( data: Seq[ AggregatedDatum[ Phone ]],
number: String,
category: CategoryLabel,
acctInfo: AccountInfo ) =
assert( 1 == data.count( aDatum =>
(aDatum.acctInfo eq acctInfo) &&
aDatum.datum.number == number &&
aDatum.datum.categoryLabel == category ))
def assertEmail( data: Seq[ AggregatedDatum[ Email ]],
address: String,
category: CategoryLabel,
acctInfo: AccountInfo ) =
assert( 1 == data.count( aDatum =>
(aDatum.acctInfo eq acctInfo) &&
aDatum.datum.address == address &&
aDatum.datum.categoryLabel == category ))
def assertNote( data: Seq[ AggregatedDatum[ Note ]],
text: String,
acctInfo: AccountInfo ) =
assert( 1 == data.count( aDatum =>
(aDatum.acctInfo eq acctInfo) &&
aDatum.datum.note == text ))
it ("should aggregate unlike data" ) {
aggregate( Seq( phone( "617 555 1212", homePhone ),
phone( "201 111 1212", workPhone )),
Seq( phone( "333 333 3333", customPhone ))) match
{
case (data, aInfo, bInfo) => {
val aggPhones = data.dataOfType[ Phone ]
aggPhones.size should be (3)
assertPhone( aggPhones, "617 555 1212", homePhone, aInfo )
assertPhone( aggPhones, "201 111 1212", workPhone, aInfo )
assertPhone( aggPhones, "333 333 3333", customPhone, bInfo )
}
}
}
it ("should segregate data by type" ) {
aggregate( Seq( phone( "617 555 1212", homePhone ),
email( "[email protected]", homeEmail )),
Seq( phone( "333 333 3333", customPhone ))) match
{
case (data, aInfo, bInfo) => {
val aggPhones = data.dataOfType[ Phone ]
val aggEmails = data.dataOfType[ Email ]
aggPhones.size should be (2)
aggEmails.size should be (1)
assertPhone( aggPhones, "617 555 1212", homePhone, aInfo )
assertPhone( aggPhones, "333 333 3333", customPhone, bInfo )
assertEmail( aggEmails, "[email protected]", homeEmail, aInfo )
}
}
}
it ("should coalesce 'similar' data items") {
aggregate( Seq( phone( "617 555 1212", homePhone ),
phone( "201 111 1212", workPhone )),
Seq( phone( "617-555-1212", customPhone ))) match
{
case (data, aInfo, bInfo) => {
val aggPhones = data.dataOfType[ Phone ]
// Have two "similar" phone numbers. We're supposed to choose only
// one, and give the custom label preference, while leaving the
// "dissimilar" item alone.
aggPhones.size should be (2)
assertPhone( aggPhones, "201 111 1212", workPhone, aInfo )
assertPhone( aggPhones, "617-555-1212", customPhone, bInfo )
}
}
}
// Separate case in the code, so we need to test it explicitly
// to assure coverage.
it ("should aggregate uncategorized data" ) {
aggregate( Seq( note( "foo" ),
email( "[email protected]", homeEmail )),
Seq( note( "bar" ))) match
{
case (data, aInfo, bInfo) => {
val aggNotes = data.dataOfType[ Note ]
val aggEmails = data.dataOfType[ Email ]
aggNotes.size should be (2)
aggEmails.size should be (1)
assertNote( aggNotes, "foo", aInfo )
assertNote( aggNotes, "bar", bInfo )
assertEmail( aggEmails, "[email protected]", homeEmail, aInfo )
}
}
}
it ("should toss empty items") {
// Standard contacts app has a thing for creating empty notes
// and nicknames. We'd rather not show them...
aggregate ( Seq( note( "foo" )),
Seq( note( "" ))) match
{
case (data, aInfo, bInfo) => {
val aggNotes = data.dataOfType[ Note ]
aggNotes.size should be (1)
assertNote( aggNotes, "foo", aInfo )
}
}
}
}
// Here, I'm letting the tests know that DISPLAY_NAME is an alias for DATA1,
// and the other fields are DATAX for X <> 1. (I'm keeping that knowledge
// out of the app to try to keep it readable, but it's handy here...)
describe( "structured name mapper" ) {
it ("should send display name only if structured fields unset") {
val name = (new StructuredName)
.setProperty( "displayName", "Jim Smith" )
val pairs = ContactData.structuredNames.dataPairs( name )
val dataKeys = pairs.map{ _._1 }.filter{ _.startsWith("data") }
dataKeys.toSeq should equal (Seq( "data1" ))
// Make sure we have other fields.
pairs.map{ _._1 } should contain ("raw_contact_id")
}
it ("should send all structured name fields if any are set") {
val name = (new StructuredName)
.setProperty( "displayName", "Jim Smith" )
.setProperty( "givenName", "Jim" )
.setProperty( "familyName", "Smith" )
name.displayName should equal ("Jim Smith")
name.givenName should equal ("Jim")
val pairs = ContactData.structuredNames.dataPairs( name )
val dataKeys = pairs.map{ _._1 }.filter{ _.startsWith("data") }
dataKeys should contain ("data1")
dataKeys should contain ("data2") // first name; set
dataKeys should contain ("data7") // phonetic foo; unset
pairs.map{ _._1 } should contain ("raw_contact_id")
}
}
describe( "category label handling" ) {
val TYPE_HOME = CDK.Phone.TYPE_HOME
val TYPE_CUSTOM = CDK.BaseTypes.TYPE_CUSTOM
val homePhone = (new Phone).setProperty( "categoryTag", TYPE_HOME )
val customPhone =
(new Phone).setProperty( "categoryTag", TYPE_CUSTOM )
.setProperty( "label", "FOAF Mobile" )
def assertCategory( tf: CategoryLabel, tag: Int, label: String ) = {
tf.tag should equal (tag)
tf.label should equal (label)
}
it( "should render standard labels correctly" ) {
assertCategory( homePhone.categoryLabel, TYPE_HOME, null )
}
it( "should render custom labels correctly" ) {
assertCategory( customPhone.categoryLabel, TYPE_CUSTOM, "FOAF Mobile" )
}
it ( "should be able to set standard labels" ) {
val hackedPhone = (new Phone).categoryLabel_:=(
CategoryLabel( TYPE_HOME, null ))
assertCategory( hackedPhone.categoryLabel, TYPE_HOME, null )
}
it ( "should be able to set custom labels" ) {
val hackedPhone = (new Phone).categoryLabel_:=(
CategoryLabel( TYPE_CUSTOM, "car" ))
assertCategory( hackedPhone.categoryLabel, TYPE_CUSTOM, "car" )
}
}
}
|
rst/positronic_net
|
sample/contacts_app/src/test/scala/ModelSpecs.scala
|
Scala
|
bsd-3-clause
| 9,540 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js Test Suite **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.testsuite.javalib.io
import org.junit.Test
class ThrowablesTest {
@Test def should_define_all_java_io_Errors_and_Exceptions(): Unit = {
import java.io._
new IOException("", new Exception())
new EOFException("")
new UTFDataFormatException("")
new UnsupportedEncodingException("")
new NotSerializableException("")
}
}
|
lrytz/scala-js
|
test-suite/shared/src/test/scala/org/scalajs/testsuite/javalib/io/ThrowablesTest.scala
|
Scala
|
bsd-3-clause
| 891 |
package pack
class R {
class attr // Will have the bytecode name `R$attr`, not to be confused with `R@tr`!
class `@`
}
class `@`
|
yusuke2255/dotty
|
tests/untried/pos/t7532b/A_1.scala
|
Scala
|
bsd-3-clause
| 138 |
package blended.container.context.impl.internal
import java.io.File
import java.util.Properties
import scala.beans.BeanProperty
import scala.jdk.CollectionConverters._
import scala.util.{Success, Try}
import blended.updater.config.Profile
import blended.util.RichTry._
import blended.util.logging.Logger
import com.typesafe.config.{Config, ConfigFactory, ConfigParseOptions}
class ContainerContextImpl() extends AbstractContainerContextImpl {
import AbstractContainerContextImpl._
private[this] lazy val log: Logger = Logger[ContainerContextImpl]
initialize()
private def normalizePath(f: File): String = f.getAbsolutePath().replaceAll("\\\\\\\\", "/")
@BeanProperty
override lazy val containerDirectory: String = normalizePath(new File(System.getProperty("blended.home")))
@BeanProperty
override lazy val containerHostname: String = {
try {
val localMachine = java.net.InetAddress.getLocalHost()
localMachine.getCanonicalHostName()
} catch {
case _: java.net.UnknownHostException => "UNKNOWN"
}
}
@BeanProperty
override lazy val containerLogDirectory: String = containerLogDir
@BeanProperty
override lazy val profileDirectory: String = profileDir
lazy val brandingProperties: Map[String, String] = {
val props: Properties = (try {
import blended.launcher.runtime.Branding
// it is possible, that this optional class is not available at runtime,
// e.g. when started with another launcher
log.debug("About to read launcher branding properties")
Option(Branding.getProperties())
} catch {
case e: NoClassDefFoundError => None
}) getOrElse {
log.warn("Could not read launcher branding properies")
new Properties()
}
val result: Map[String, String] =
props.entrySet().asScala.map(e => e.getKey().toString() -> e.getValue().toString()).toMap
log.debug(s"Resolved branding properties : [${result.mkString(",")}]")
result
}
private[this] lazy val profileDir: String = {
val profileHome =
brandingProperties.get(Profile.Properties.PROFILE_DIR) orElse {
log.warn("Could not read the profile directory from read launcher branding properties")
None
}
val dir = profileHome getOrElse {
Option(System.getProperty(PROP_BLENDED_HOME)) getOrElse {
Option(System.getProperty("user.dir")) getOrElse {
"."
}
}
}
val configDir = new File(dir)
if (!configDir.exists()) {
log.error(s"Container directory [${dir}] does not exist.")
} else if (!configDir.isDirectory() || !configDir.canRead()) {
log.error(s"Container directory [${dir}] is not readable.")
}
val absDir = configDir.getAbsolutePath
System.setProperty("blended.container.home", absDir)
absDir
}
private[this] lazy val containerLogDir: String = {
val f = new File(containerDirectory + "/log")
normalizePath(f)
}
@BeanProperty
override lazy val containerConfigDirectory: String =
normalizePath(new File(containerDirectory, CONFIG_DIR))
@BeanProperty
override lazy val profileConfigDirectory: String = normalizePath(new File(profileDirectory, CONFIG_DIR))
override lazy val containerConfig: Config = {
val sysProps = ConfigFactory.systemProperties()
val envProps = ConfigFactory.systemEnvironment()
val cfgFile: File = new File(profileConfigDirectory, "application.conf")
log.debug(s"Trying to resolve config from [${cfgFile.getAbsolutePath()}]")
val appCfg: Config =
ConfigFactory
.parseFile(cfgFile, ConfigParseOptions.defaults().setAllowMissing(false))
.withFallback(sysProps)
.withFallback(envProps)
.resolve()
// we need to make sure that all keys are available in the resulting config,
// even if they point to null values or empty configs
val allKeys: List[String] = ConfigLocator.fullKeyset("", appCfg)
val nullKeys = allKeys
.filter(s => appCfg.getIsNull(s))
.map(s => (s -> null))
.toMap
.asJava
val emptyKeys = allKeys
.filter { s =>
Try { appCfg.getConfig(s) } match {
case Success(c) => c.isEmpty()
case _ => false
}
}
.map(s => s -> ConfigFactory.empty().root())
.toMap
.asJava
val evaluated = ConfigLocator.evaluatedConfig(appCfg, this).unwrap
log.trace(s"After reading application.conf : $evaluated")
val resolvedCfg: Config = evaluated
.withFallback(sysProps)
.withFallback(envProps)
.withFallback(ConfigFactory.parseMap(nullKeys))
.withFallback(ConfigFactory.parseMap(emptyKeys))
.resolve()
log.debug(s"Resolved container config : $resolvedCfg")
resolvedCfg
}
}
|
woq-blended/blended
|
blended.container.context.impl/src/main/scala/blended/container/context/impl/internal/ContainerContextImpl.scala
|
Scala
|
apache-2.0
| 4,766 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*package org.scalatest.wordspec
import org.scalatest._
import SharedHelpers.EventRecordingReporter
import scala.concurrent.{Promise, ExecutionContext, Future}
import org.scalatest.concurrent.SleepHelper
import scala.util.Success
class FixtureAsyncPropSpecSpec extends org.scalatest.FunSpec {
describe("AsyncPropSpec") {
it("can be used for tests that return Future under parallel async test execution") {
class ExampleSpec extends AsyncPropSpec with ParallelTestExecution {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
val a = 1
property("test 1") { fixture =>
Future {
assert(a == 1)
}
}
property("test 2") { fixture =>
Future {
assert(a == 2)
}
}
property("test 3") { fixture =>
Future {
pending
}
}
property("test 4") { fixture =>
Future {
cancel
}
}
ignore("test 5") { fixture =>
Future {
cancel
}
}
override def newInstance = new ExampleSpec
}
val rep = new EventRecordingReporter
val spec = new ExampleSpec
val status = spec.run(None, Args(reporter = rep))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
assert(rep.testStartingEventsReceived.length == 4)
assert(rep.testSucceededEventsReceived.length == 1)
assert(rep.testSucceededEventsReceived(0).testName == "test 1")
assert(rep.testFailedEventsReceived.length == 1)
assert(rep.testFailedEventsReceived(0).testName == "test 2")
assert(rep.testPendingEventsReceived.length == 1)
assert(rep.testPendingEventsReceived(0).testName == "test 3")
assert(rep.testCanceledEventsReceived.length == 1)
assert(rep.testCanceledEventsReceived(0).testName == "test 4")
assert(rep.testIgnoredEventsReceived.length == 1)
assert(rep.testIgnoredEventsReceived(0).testName == "test 5")
}
it("can be used for tests that did not return Future under parallel async test execution") {
class ExampleSpec extends AsyncPropSpec with ParallelTestExecution {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
val a = 1
property("test 1") { fixture =>
assert(a == 1)
}
property("test 2") { fixture =>
assert(a == 2)
}
property("test 3") { fixture =>
pending
}
property("test 4") { fixture =>
cancel
}
ignore("test 5") { fixture =>
cancel
}
override def newInstance = new ExampleSpec
}
val rep = new EventRecordingReporter
val spec = new ExampleSpec
val status = spec.run(None, Args(reporter = rep))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
assert(rep.testStartingEventsReceived.length == 4)
assert(rep.testSucceededEventsReceived.length == 1)
assert(rep.testSucceededEventsReceived(0).testName == "test 1")
assert(rep.testFailedEventsReceived.length == 1)
assert(rep.testFailedEventsReceived(0).testName == "test 2")
assert(rep.testPendingEventsReceived.length == 1)
assert(rep.testPendingEventsReceived(0).testName == "test 3")
assert(rep.testCanceledEventsReceived.length == 1)
assert(rep.testCanceledEventsReceived(0).testName == "test 4")
assert(rep.testIgnoredEventsReceived.length == 1)
assert(rep.testIgnoredEventsReceived(0).testName == "test 5")
}
it("should run tests that return Future in serial by default") {
@volatile var count = 0
class ExampleSpec extends AsyncPropSpec {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
property("test 1") { fixture =>
Future {
SleepHelper.sleep(30)
assert(count == 0)
count = 1
succeed
}
}
property("test 2") { fixture =>
Future {
assert(count == 1)
SleepHelper.sleep(50)
count = 2
succeed
}
}
property("test 3") { fixture =>
Future {
assert(count == 2)
}
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
assert(rep.testStartingEventsReceived.length == 3)
assert(rep.testSucceededEventsReceived.length == 3)
}
it("should run tests that does not return Future in serial by default") {
@volatile var count = 0
class ExampleSpec extends AsyncPropSpec {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
property("test 1") { fixture =>
SleepHelper.sleep(30)
assert(count == 0)
count = 1
succeed
}
property("test 2") { fixture =>
assert(count == 1)
SleepHelper.sleep(50)
count = 2
succeed
}
property("test 3") { fixture =>
assert(count == 2)
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
assert(rep.testStartingEventsReceived.length == 3)
assert(rep.testSucceededEventsReceived.length == 3)
}
// SKIP-SCALATESTJS,NATIVE-START
it("should run tests and its future in same main thread when use SerialExecutionContext") {
var mainThread = Thread.currentThread
var test1Thread: Option[Thread] = None
var test2Thread: Option[Thread] = None
var onCompleteThread: Option[Thread] = None
class ExampleSpec extends AsyncPropSpec {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
property("test 1") { fixture =>
Future {
test1Thread = Some(Thread.currentThread)
succeed
}
}
property("test 2") { fixture =>
Future {
test2Thread = Some(Thread.currentThread)
succeed
}
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
status.whenCompleted { s =>
onCompleteThread = Some(Thread.currentThread)
}
status.waitUntilCompleted()
assert(test1Thread.isDefined)
assert(test1Thread.get == mainThread)
assert(test2Thread.isDefined)
assert(test2Thread.get == mainThread)
assert(onCompleteThread.isDefined)
assert(onCompleteThread.get == mainThread)
}
it("should run tests and its true async future in the same thread when use SerialExecutionContext") {
var mainThread = Thread.currentThread
@volatile var test1Thread: Option[Thread] = None
@volatile var test2Thread: Option[Thread] = None
var onCompleteThread: Option[Thread] = None
class ExampleSpec extends AsyncPropSpec {
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
property("test 1") { fixture =>
val promise = Promise[Assertion]
val timer = new java.util.Timer
timer.schedule(
new java.util.TimerTask {
def run(): Unit = {
promise.complete(Success(succeed))
}
},
1000
)
promise.future.map { s =>
test1Thread = Some(Thread.currentThread)
s
}
}
property("test 2") { fixture =>
val promise = Promise[Assertion]
val timer = new java.util.Timer
timer.schedule(
new java.util.TimerTask {
def run(): Unit = {
promise.complete(Success(succeed))
}
},
500
)
promise.future.map { s =>
test2Thread = Some(Thread.currentThread)
s
}
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
status.whenCompleted { s =>
onCompleteThread = Some(Thread.currentThread)
}
status.waitUntilCompleted()
assert(test1Thread.isDefined)
assert(test1Thread.get == mainThread)
assert(test2Thread.isDefined)
assert(test2Thread.get == mainThread)
assert(onCompleteThread.isDefined)
assert(onCompleteThread.get == mainThread)
}
it("should not run out of stack space with nested futures when using SerialExecutionContext") {
class ExampleSpec extends AsyncPropSpec {
// Note we get a StackOverflowError with the following execution
// context.
// override implicit def executionContext: ExecutionContext = new ExecutionContext { def execute(runnable: Runnable) = runnable.run; def reportFailure(cause: Throwable) = () }
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
def sum(xs: List[Int]): Future[Int] =
xs match {
case Nil => Future.successful(0)
case x :: xs => Future(x).flatMap(xx => sum(xs).map(xxx => xx + xxx))
}
property("test 1") { fixture =>
val fut: Future[Int] = sum((1 to 50000).toList)
fut.map(total => assert(total == 1250025000))
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
status.waitUntilCompleted()
assert(!rep.testSucceededEventsReceived.isEmpty)
}
// SKIP-SCALATESTJS,NATIVE-END
it("should run tests that returns Future and report their result in serial") {
class ExampleSpec extends AsyncPropSpec {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
property("test 1") { fixture =>
Future {
SleepHelper.sleep(60)
succeed
}
}
property("test 2") { fixture =>
Future {
SleepHelper.sleep(30)
succeed
}
}
property("test 3") { fixture =>
Future {
succeed
}
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
assert(rep.testStartingEventsReceived.length == 3)
assert(rep.testStartingEventsReceived(0).testName == "test 1")
assert(rep.testStartingEventsReceived(1).testName == "test 2")
assert(rep.testStartingEventsReceived(2).testName == "test 3")
assert(rep.testSucceededEventsReceived.length == 3)
assert(rep.testSucceededEventsReceived(0).testName == "test 1")
assert(rep.testSucceededEventsReceived(1).testName == "test 2")
assert(rep.testSucceededEventsReceived(2).testName == "test 3")
}
it("should run tests that does not return Future and report their result in serial") {
class ExampleSpec extends AsyncPropSpec {
//SCALATESTJS-ONLY implicit override def executionContext = org.scalatest.concurrent.TestExecutionContext.runNow
type FixtureParam = String
def withFixture(test: OneArgAsyncTest): FutureOutcome =
test("testing")
property("test 1") { fixture =>
SleepHelper.sleep(60)
succeed
}
property("test 2") { fixture =>
SleepHelper.sleep(30)
succeed
}
property("test 3") { fixture =>
succeed
}
}
val rep = new EventRecordingReporter
val suite = new ExampleSpec
val status = suite.run(None, Args(reporter = rep))
// SKIP-SCALATESTJS,NATIVE-START
status.waitUntilCompleted()
// SKIP-SCALATESTJS,NATIVE-END
assert(rep.testStartingEventsReceived.length == 3)
assert(rep.testStartingEventsReceived(0).testName == "test 1")
assert(rep.testStartingEventsReceived(1).testName == "test 2")
assert(rep.testStartingEventsReceived(2).testName == "test 3")
assert(rep.testSucceededEventsReceived.length == 3)
assert(rep.testSucceededEventsReceived(0).testName == "test 1")
assert(rep.testSucceededEventsReceived(1).testName == "test 2")
assert(rep.testSucceededEventsReceived(2).testName == "test 3")
}
}
}*/
|
scalatest/scalatest
|
jvm/propspec-test/src/test/scala/org/scalatest/propspec/FixtureAsyncPropSpecSpec.scala
|
Scala
|
apache-2.0
| 14,568 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.torch
import com.intel.analytics.bigdl.nn.CMinTable
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.RandomGenerator._
import com.intel.analytics.bigdl.utils.Table
import scala.collection.mutable.HashMap
import scala.util.Random
@com.intel.analytics.bigdl.tags.Serial
class CMinTableSpec extends TorchSpec {
"A CMaxTable Module" should "generate correct output and grad" in {
torchCheck()
val seed = 100
RNG.setSeed(seed)
val module = new CMinTable[Double]()
val input1 = Tensor[Double](5).apply1(e => Random.nextDouble())
val input2 = Tensor[Double](5).apply1(e => Random.nextDouble())
val gradOutput = Tensor[Double](5).apply1(e => Random.nextDouble())
val input = new Table()
input(1.toDouble) = input1
input(2.toDouble) = input2
val start = System.nanoTime()
val output = module.forward(input)
val gradInput = module.backward(input, gradOutput)
val end = System.nanoTime()
val scalaTime = end - start
val code = "torch.manualSeed(" + seed + ")\\n" +
"module = nn.CMinTable()\\n" +
"output = module:forward(input)\\n" +
"gradInput = module:backward(input,gradOutput)\\n"
val (luaTime, torchResult) = TH.run(code, Map("input" -> input, "gradOutput" -> gradOutput),
Array("output", "gradInput"))
val luaOutput1 = torchResult("output").asInstanceOf[Tensor[Double]]
val luaOutput2 = torchResult("gradInput").asInstanceOf[Table]
luaOutput1 should be (output)
luaOutput2 should be (gradInput)
println("Test case : CMinTable, Torch : " + luaTime +
" s, Scala : " + scalaTime / 1e9 + " s")
}
}
|
yiheng/BigDL
|
spark/dl/src/test/scala/com/intel/analytics/bigdl/torch/CMinTableSpec.scala
|
Scala
|
apache-2.0
| 2,291 |
package extraction.parser
import ch.weisenburger.uima.FinancialDataPipelineFactory
import ch.weisenburger.uima.types.distantsupervision.skala.Timex
import org.dbpedia.extraction.wikiparser.Node
import org.dbpedia.extraction.dataparser.DataParser
import org.slf4j.LoggerFactory
case class UnexpectedNumberOfTimexesException(numberOfTimexes: Int, timexes: Seq[String]) extends Exception
class AdvancedTimexParser extends DataParser {
val logger = LoggerFactory.getLogger(classOf[AdvancedTimexParser])
val timexPipeline = FinancialDataPipelineFactory.createTimexExtractionScalaCaseClassPipeline
def parse(node: Node): Option[Any] = {
assert(1 == 0);
Some(0)
}
override def parse( node : Node, dependentParserResult: Option[Any], subjectUri: String): Option[Any] = {
val stringValue = TimexParserCustomStringParser.parse(node)
val valueParserResult = dependentParserResult
try {
val result = stringValue match {
case Some(str) => Some(parseDates(str, valueParserResult, subjectUri))
case None => None
}
//Logging
if (logger.isErrorEnabled) result match {
case None => logger.trace(s"Unable to parse node $node of $subjectUri to String");
// no timex is nothing special -> lots of non temporal properties -> trace only
case Some((None, None)) => logger.trace(s"No timex found in node $node, with value: ${stringValue.get}")
// a timex is also nothing special --> see above
case Some((fromTimex, None)) => logger.trace(s"Timex '$fromTimex' found in node $node, with value: ${stringValue.get}")
case Some((fromTimex, toTimex)) => logger.trace(s"Timex interval '$fromTimex' - '$toTimex' found in node $node, with value: ${stringValue.get}")
}
result
} catch {
case unexno: UnexpectedNumberOfTimexesException => {
logger.error(s"Found ${unexno.numberOfTimexes} timexes in $node. Unintended parsing result is likely. Timexes: ${unexno.timexes.mkString("; ")}; Node Value: ${stringValue.getOrElse("")}")
None
}
}
}
/** Parse for timexes on an infobox attribute string
*
* It is assumed, that a single timex denotes the start date / date when something occurred,
* whereas two timexes denote an interval.
*
* More than two timexes most likely indicate a pareser/semantic error and therefore cause an exception
*
* @param str infobox property value
* @param subjectUri the articles name
* @return if found tuple of start and end timexes, else None
*/
def parseDates(str: String, valueParserResult: Option[Any], subjectUri: String): (Option[String], Option[String]) = {
val dateTimexes = timexPipeline.process(str).filter(_.ttype == "DATE")
// in case of a infobox property a timex which interferes with the property value
// is likely to be a false positive
val trueTimexes = valueParserResult match {
// value parser result has type (value as Double, unit, value as String, unit as String)
case Some((_, _, valueString: String, unitString)) =>
val noOverlapWithValueParserResult = buildNoOverlapCheckFunction(str, valueString)
dateTimexes.filter(noOverlapWithValueParserResult)
case Some((doubleValue, valueString: String)) =>
val noOverlapWithValueParserResult = buildNoOverlapCheckFunction(str, valueString)
dateTimexes.filter(noOverlapWithValueParserResult)
case _ => dateTimexes
}
trueTimexes.length match {
case 0 => (None, None)
case 1 => (getTimexValue(trueTimexes.head), None)
case 2 =>
// interval found -> store in temporal order
val first = getTimexValue(trueTimexes.head)
val second = getTimexValue(trueTimexes.last)
(first,second) match {
case (Some(first), Some(second)) if first <= second => (Some(first), Some(second))
case (Some(first), Some(second)) if first > second => (Some(second), Some(first))
case (None, Some(second)) => (Some(second), None)
case (Some(first), None) => (Some(first), None)
case (None, None) => (None, None)
}
case i if i > 2 =>
throw new UnexpectedNumberOfTimexesException(i, trueTimexes.map(t => t.toString))
}
}
/**
* Ensures only timexes which have a year component are used
* -> Currently suffices for company dataset
* @param timex
* @return
*/
def getTimexValue(timex: Timex): Option[String] = timex.value match {
case str if str.length < 4 =>
logger.trace("Timex with less than the year: " + str + " in : " + timex.toString)
None
case str if str.substring(0,4).contains("X") =>
logger.trace("Year component unknown: " + str + " in : " + timex.toString )
None
case str =>Some(str)
}
// /** Escapes the 5 predefined XML characters
// *
// * See http://en.wikipedia.org/wiki/List_of_XML_and_HTML_character_entity_references
// * " "
// * ' '
// * < <
// * > >
// * & &
// *
// * @param str
// * @return escaped string
// */
// private def escapePredefinedXMLCharacters(str: String) = {
// // result will be an XML -> escape predefined XML symbols
// val sb = new StringBuilder()
// for(i <- 0 until str.length) {
// str.charAt(i) match {
// case '"' => sb.append(""")
// case '\\'' => sb.append("'")
// case '<' => sb.append("<")
// case '>' => sb.append(">")
// case '&' => sb.append("&")
// case c => sb.append(c)
// }
// }
// sb.toString
// }
private def sizeOf(o: Object, s: String) = {
// logger.error(s + ": " + SizeOf.humanReadable(SizeOf.deepSizeOf(o)))
}
def buildNoOverlapCheckFunction(str: String, valueString: String) = {
val valueBegin = str.indexOf(valueString)
val valueEnd = valueBegin + valueString.length
valueBegin match {
case -1 => (timex: Timex) => true // don't filter anything if value string can't be matched
case _ => (timex: Timex) => timex.end < valueBegin || timex.begin > valueEnd
}
}
}
|
normalerweise/mte
|
app/extraction/parser/AdvancedTimexParser.scala
|
Scala
|
gpl-2.0
| 6,197 |
import scala.reflect.macros.whitebox._
import scala.language.experimental.macros
object Macros {
def impl(c: Context) = {
var messages = List[String]()
def println(msg: String) = messages :+= msg
import c.universe._
def test(sym: Symbol): Unit = {
println(s"uninitialized ${sym.name}: ${showDecl(sym)}")
sym.info // NOTE: not fullyInitializeSymbol, so some parts may still be LazyTypes
println(s"initialized ${sym.name}: ${showDecl(sym)}")
}
println("compile-time")
test(c.mirror.staticClass("D"))
test(c.mirror.staticClass("D").info.member(TermName("x")))
test(c.mirror.staticClass("D").info.member(TermName("y")))
test(c.mirror.staticClass("D").info.member(TermName("z")))
test(c.mirror.staticClass("D").info.member(TermName("t")))
test(c.mirror.staticClass("D").info.member(TypeName("W")))
test(c.mirror.staticClass("D").info.member(TypeName("C")))
test(c.mirror.staticClass("D").info.member(TermName("O")))
q"..${messages.map(msg => q"println($msg)")}"
}
def foo: Any = macro impl
}
|
felixmulder/scala
|
test/files/run/showdecl/Macros_1.scala
|
Scala
|
bsd-3-clause
| 1,075 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import org.junit._
import org.scalatest.junit.JUnitSuite
import junit.framework.Assert._
import java.nio.ByteBuffer
import kafka.message.{Message, ByteBufferMessageSet}
import kafka.cluster.Broker
import collection.mutable._
import kafka.common.{TopicAndPartition, ErrorMapping, OffsetMetadataAndError}
import kafka.controller.LeaderIsrAndControllerEpoch
object SerializationTestUtils{
private val topic1 = "test1"
private val topic2 = "test2"
private val leader1 = 0
private val isr1 = List(0, 1, 2)
private val leader2 = 0
private val isr2 = List(0, 2, 3)
private val partitionDataFetchResponse0 = new FetchResponsePartitionData(new ByteBufferMessageSet(new Message("first message".getBytes)))
private val partitionDataFetchResponse1 = new FetchResponsePartitionData(new ByteBufferMessageSet(new Message("second message".getBytes)))
private val partitionDataFetchResponse2 = new FetchResponsePartitionData(new ByteBufferMessageSet(new Message("third message".getBytes)))
private val partitionDataFetchResponse3 = new FetchResponsePartitionData(new ByteBufferMessageSet(new Message("fourth message".getBytes)))
private val partitionDataFetchResponseMap = Map((0, partitionDataFetchResponse0), (1, partitionDataFetchResponse1), (2, partitionDataFetchResponse2), (3, partitionDataFetchResponse3))
private val topicDataFetchResponse = {
val groupedData = Array(topic1, topic2).flatMap(topic =>
partitionDataFetchResponseMap.map(partitionAndData =>
(TopicAndPartition(topic, partitionAndData._1), partitionAndData._2)))
collection.immutable.Map(groupedData:_*)
}
private val partitionDataMessage0 = new ByteBufferMessageSet(new Message("first message".getBytes))
private val partitionDataMessage1 = new ByteBufferMessageSet(new Message("second message".getBytes))
private val partitionDataMessage2 = new ByteBufferMessageSet(new Message("third message".getBytes))
private val partitionDataMessage3 = new ByteBufferMessageSet(new Message("fourth message".getBytes))
private val partitionDataProducerRequestArray = Array(partitionDataMessage0, partitionDataMessage1, partitionDataMessage2, partitionDataMessage3)
private val topicDataProducerRequest = {
val groupedData = Array(topic1, topic2).flatMap(topic =>
partitionDataProducerRequestArray.zipWithIndex.map
{
case(partitionDataMessage, partition) =>
(TopicAndPartition(topic, partition), partitionDataMessage)
})
collection.mutable.Map(groupedData:_*)
}
private val requestInfos = collection.immutable.Map(
TopicAndPartition(topic1, 0) -> PartitionFetchInfo(1000, 100),
TopicAndPartition(topic1, 1) -> PartitionFetchInfo(2000, 100),
TopicAndPartition(topic1, 2) -> PartitionFetchInfo(3000, 100),
TopicAndPartition(topic1, 3) -> PartitionFetchInfo(4000, 100),
TopicAndPartition(topic2, 0) -> PartitionFetchInfo(1000, 100),
TopicAndPartition(topic2, 1) -> PartitionFetchInfo(2000, 100),
TopicAndPartition(topic2, 2) -> PartitionFetchInfo(3000, 100),
TopicAndPartition(topic2, 3) -> PartitionFetchInfo(4000, 100)
)
private val brokers = List(new Broker(0, "localhost", 1011), new Broker(1, "localhost", 1012), new Broker(2, "localhost", 1013))
private val partitionMetaData0 = new PartitionMetadata(0, Some(brokers.head), replicas = brokers, isr = brokers, errorCode = 0)
private val partitionMetaData1 = new PartitionMetadata(1, Some(brokers.head), replicas = brokers, isr = brokers.tail, errorCode = 1)
private val partitionMetaData2 = new PartitionMetadata(2, Some(brokers.head), replicas = brokers, isr = brokers, errorCode = 2)
private val partitionMetaData3 = new PartitionMetadata(3, Some(brokers.head), replicas = brokers, isr = brokers.tail.tail, errorCode = 3)
private val partitionMetaDataSeq = Seq(partitionMetaData0, partitionMetaData1, partitionMetaData2, partitionMetaData3)
private val topicmetaData1 = new TopicMetadata(topic1, partitionMetaDataSeq)
private val topicmetaData2 = new TopicMetadata(topic2, partitionMetaDataSeq)
def createTestLeaderAndIsrRequest() : LeaderAndIsrRequest = {
val leaderAndIsr1 = new LeaderIsrAndControllerEpoch(new LeaderAndIsr(leader1, 1, isr1, 1), 1)
val leaderAndIsr2 = new LeaderIsrAndControllerEpoch(new LeaderAndIsr(leader2, 1, isr2, 2), 1)
val map = Map(((topic1, 0), PartitionStateInfo(leaderAndIsr1, 3)),
((topic2, 0), PartitionStateInfo(leaderAndIsr2, 3)))
new LeaderAndIsrRequest(map.toMap, collection.immutable.Set[Broker](), 0, 1, 0)
}
def createTestLeaderAndIsrResponse() : LeaderAndIsrResponse = {
val responseMap = Map(((topic1, 0), ErrorMapping.NoError),
((topic2, 0), ErrorMapping.NoError))
new LeaderAndIsrResponse(1, responseMap)
}
def createTestStopReplicaRequest() : StopReplicaRequest = {
new StopReplicaRequest(controllerId = 0, controllerEpoch = 1, correlationId = 0, deletePartitions = true,
partitions = collection.immutable.Set((topic1, 0), (topic2, 0)))
}
def createTestStopReplicaResponse() : StopReplicaResponse = {
val responseMap = Map(((topic1, 0), ErrorMapping.NoError),
((topic2, 0), ErrorMapping.NoError))
new StopReplicaResponse(0, responseMap.toMap)
}
def createTestProducerRequest: ProducerRequest = {
new ProducerRequest(1, "client 1", 0, 1000, topicDataProducerRequest)
}
def createTestProducerResponse: ProducerResponse =
ProducerResponse(1, Map(
TopicAndPartition(topic1, 0) -> ProducerResponseStatus(0.toShort, 10001),
TopicAndPartition(topic2, 0) -> ProducerResponseStatus(0.toShort, 20001)
))
def createTestFetchRequest: FetchRequest = {
new FetchRequest(requestInfo = requestInfos)
}
def createTestFetchResponse: FetchResponse = {
FetchResponse(1, topicDataFetchResponse)
}
def createTestOffsetRequest = new OffsetRequest(
collection.immutable.Map(TopicAndPartition(topic1, 1) -> PartitionOffsetRequestInfo(1000, 200)),
replicaId = 0
)
def createTestOffsetResponse: OffsetResponse = {
new OffsetResponse(0, collection.immutable.Map(
TopicAndPartition(topic1, 1) -> PartitionOffsetsResponse(ErrorMapping.NoError, Seq(1000l, 2000l, 3000l, 4000l)))
)
}
def createTestTopicMetadataRequest: TopicMetadataRequest = {
new TopicMetadataRequest(1, 1, "client 1", Seq(topic1, topic2))
}
def createTestTopicMetadataResponse: TopicMetadataResponse = {
new TopicMetadataResponse(Seq(topicmetaData1, topicmetaData2), 1)
}
def createTestOffsetCommitRequest: OffsetCommitRequest = {
new OffsetCommitRequest("group 1", collection.immutable.Map(
TopicAndPartition(topic1, 0) -> OffsetMetadataAndError(offset=42L, metadata="some metadata"),
TopicAndPartition(topic1, 1) -> OffsetMetadataAndError(offset=100L, metadata=OffsetMetadataAndError.NoMetadata)
))
}
def createTestOffsetCommitResponse: OffsetCommitResponse = {
new OffsetCommitResponse(collection.immutable.Map(
TopicAndPartition(topic1, 0) -> ErrorMapping.NoError,
TopicAndPartition(topic1, 1) -> ErrorMapping.UnknownTopicOrPartitionCode
))
}
def createTestOffsetFetchRequest: OffsetFetchRequest = {
new OffsetFetchRequest("group 1", Seq(
TopicAndPartition(topic1, 0),
TopicAndPartition(topic1, 1)
))
}
def createTestOffsetFetchResponse: OffsetFetchResponse = {
new OffsetFetchResponse(collection.immutable.Map(
TopicAndPartition(topic1, 0) -> OffsetMetadataAndError(42L, "some metadata", ErrorMapping.NoError),
TopicAndPartition(topic1, 1) -> OffsetMetadataAndError(100L, OffsetMetadataAndError.NoMetadata,
ErrorMapping.UnknownTopicOrPartitionCode)
))
}
}
class RequestResponseSerializationTest extends JUnitSuite {
private val leaderAndIsrRequest = SerializationTestUtils.createTestLeaderAndIsrRequest
private val leaderAndIsrResponse = SerializationTestUtils.createTestLeaderAndIsrResponse
private val stopReplicaRequest = SerializationTestUtils.createTestStopReplicaRequest
private val stopReplicaResponse = SerializationTestUtils.createTestStopReplicaResponse
private val producerRequest = SerializationTestUtils.createTestProducerRequest
private val producerResponse = SerializationTestUtils.createTestProducerResponse
private val fetchRequest = SerializationTestUtils.createTestFetchRequest
private val offsetRequest = SerializationTestUtils.createTestOffsetRequest
private val offsetResponse = SerializationTestUtils.createTestOffsetResponse
private val topicMetadataRequest = SerializationTestUtils.createTestTopicMetadataRequest
private val topicMetadataResponse = SerializationTestUtils.createTestTopicMetadataResponse
private val offsetCommitRequest = SerializationTestUtils.createTestOffsetCommitRequest
private val offsetCommitResponse = SerializationTestUtils.createTestOffsetCommitResponse
private val offsetFetchRequest = SerializationTestUtils.createTestOffsetFetchRequest
private val offsetFetchResponse = SerializationTestUtils.createTestOffsetFetchResponse
@Test
def testSerializationAndDeserialization() {
var buffer: ByteBuffer = ByteBuffer.allocate(leaderAndIsrRequest.sizeInBytes())
leaderAndIsrRequest.writeTo(buffer)
buffer.rewind()
val deserializedLeaderAndIsrRequest = LeaderAndIsrRequest.readFrom(buffer)
assertEquals("The original and deserialzed leaderAndISRRequest should be the same", leaderAndIsrRequest,
deserializedLeaderAndIsrRequest)
buffer = ByteBuffer.allocate(leaderAndIsrResponse.sizeInBytes())
leaderAndIsrResponse.writeTo(buffer)
buffer.rewind()
val deserializedLeaderAndIsrResponse = LeaderAndIsrResponse.readFrom(buffer)
assertEquals("The original and deserialzed leaderAndISRResponse should be the same", leaderAndIsrResponse,
deserializedLeaderAndIsrResponse)
buffer = ByteBuffer.allocate(stopReplicaRequest.sizeInBytes())
stopReplicaRequest.writeTo(buffer)
buffer.rewind()
val deserializedStopReplicaRequest = StopReplicaRequest.readFrom(buffer)
assertEquals("The original and deserialzed stopReplicaRequest should be the same", stopReplicaRequest,
deserializedStopReplicaRequest)
buffer = ByteBuffer.allocate(stopReplicaResponse.sizeInBytes())
stopReplicaResponse.writeTo(buffer)
buffer.rewind()
val deserializedStopReplicaResponse = StopReplicaResponse.readFrom(buffer)
assertEquals("The original and deserialzed stopReplicaResponse should be the same", stopReplicaResponse,
deserializedStopReplicaResponse)
buffer = ByteBuffer.allocate(producerRequest.sizeInBytes)
producerRequest.writeTo(buffer)
buffer.rewind()
val deserializedProducerRequest = ProducerRequest.readFrom(buffer)
assertEquals("The original and deserialzed producerRequest should be the same", producerRequest,
deserializedProducerRequest)
buffer = ByteBuffer.allocate(producerResponse.sizeInBytes)
producerResponse.writeTo(buffer)
buffer.rewind()
val deserializedProducerResponse = ProducerResponse.readFrom(buffer)
assertEquals("The original and deserialzed producerResponse should be the same: [%s], [%s]".format(producerResponse, deserializedProducerResponse), producerResponse,
deserializedProducerResponse)
buffer = ByteBuffer.allocate(fetchRequest.sizeInBytes)
fetchRequest.writeTo(buffer)
buffer.rewind()
val deserializedFetchRequest = FetchRequest.readFrom(buffer)
assertEquals("The original and deserialzed fetchRequest should be the same", fetchRequest,
deserializedFetchRequest)
buffer = ByteBuffer.allocate(offsetRequest.sizeInBytes)
offsetRequest.writeTo(buffer)
buffer.rewind()
val deserializedOffsetRequest = OffsetRequest.readFrom(buffer)
assertEquals("The original and deserialzed offsetRequest should be the same", offsetRequest,
deserializedOffsetRequest)
buffer = ByteBuffer.allocate(offsetResponse.sizeInBytes)
offsetResponse.writeTo(buffer)
buffer.rewind()
val deserializedOffsetResponse = OffsetResponse.readFrom(buffer)
assertEquals("The original and deserialzed offsetResponse should be the same", offsetResponse,
deserializedOffsetResponse)
buffer = ByteBuffer.allocate(topicMetadataRequest.sizeInBytes())
topicMetadataRequest.writeTo(buffer)
buffer.rewind()
val deserializedTopicMetadataRequest = TopicMetadataRequest.readFrom(buffer)
assertEquals("The original and deserialzed topicMetadataRequest should be the same", topicMetadataRequest,
deserializedTopicMetadataRequest)
buffer = ByteBuffer.allocate(topicMetadataResponse.sizeInBytes)
topicMetadataResponse.writeTo(buffer)
buffer.rewind()
val deserializedTopicMetadataResponse = TopicMetadataResponse.readFrom(buffer)
assertEquals("The original and deserialzed topicMetadataResponse should be the same", topicMetadataResponse,
deserializedTopicMetadataResponse)
buffer = ByteBuffer.allocate(offsetCommitRequest.sizeInBytes)
offsetCommitRequest.writeTo(buffer)
buffer.rewind()
val deserializedOffsetCommitRequest = OffsetCommitRequest.readFrom(buffer)
assertEquals("The original and deserialzed offsetCommitRequest should be the same", offsetCommitRequest,
deserializedOffsetCommitRequest)
buffer = ByteBuffer.allocate(offsetCommitResponse.sizeInBytes)
offsetCommitResponse.writeTo(buffer)
buffer.rewind()
val deserializedOffsetCommitResponse = OffsetCommitResponse.readFrom(buffer)
assertEquals("The original and deserialzed offsetCommitResponse should be the same", offsetCommitResponse,
deserializedOffsetCommitResponse)
buffer = ByteBuffer.allocate(offsetFetchRequest.sizeInBytes)
offsetFetchRequest.writeTo(buffer)
buffer.rewind()
val deserializedOffsetFetchRequest = OffsetFetchRequest.readFrom(buffer)
assertEquals("The original and deserialzed offsetFetchRequest should be the same", offsetFetchRequest,
deserializedOffsetFetchRequest)
buffer = ByteBuffer.allocate(offsetFetchResponse.sizeInBytes)
offsetFetchResponse.writeTo(buffer)
buffer.rewind()
val deserializedOffsetFetchResponse = OffsetFetchResponse.readFrom(buffer)
assertEquals("The original and deserialzed offsetFetchResponse should be the same", offsetFetchResponse,
deserializedOffsetFetchResponse)
}
}
|
akosiaris/kafka
|
core/src/test/scala/unit/kafka/api/RequestResponseSerializationTest.scala
|
Scala
|
apache-2.0
| 15,381 |
package org.kirhgoff.lastobot
import java.util.Random
import com.typesafe.scalalogging.LazyLogging
/**
* Created by kirilllastovirya on 5/05/2016.
*/
trait BotLocale
case object Russian extends BotLocale {
override def toString = "Russian"
}
case object English extends BotLocale {
override def toString = "English"
}
object BotLocale extends LazyLogging {
def apply(value:String): BotLocale = value match {
case "Russian" => Russian
case "English" => English
case other => {
logger.error(s"Incorrect locale value:$other")
English
}
}
}
object Recognizer {
//TODO use regexps
def russian(text: String) = text.startsWith("Русский") || text.startsWith("русский")
def english(text: String) = text.startsWith("English") || text.startsWith("english")
def yes (text:String) = text.startsWith("да") || text.startsWith("yes")
def no(text:String) = text.startsWith("нет") || text.startsWith("no")
def finished(text: String) = text.equalsIgnoreCase("готово") || text.equalsIgnoreCase("sumbit")
}
//TODO refactor locale match to partial function
object Phrase {
val random = new Random
def anyOf(text:String*) = text(random.nextInt(text.length))
def phraseCase(text:String, vars:Any*)
(caseLocale: BotLocale)
(implicit locale: BotLocale) :PartialFunction[BotLocale, String] = {
case locale if locale == caseLocale => text.format(vars.map(_.asInstanceOf[AnyRef]): _*)
}
def russian (text:String, vars:Any*)(implicit locale: BotLocale) = phraseCase(text, vars:_*)(Russian)
def english (text:String, vars:Any*)(implicit locale: BotLocale) = phraseCase(text, vars:_*)(English)
def russianArray(text:String*)(implicit locale: BotLocale)
:PartialFunction[BotLocale, Array[String]] =
{case locale if locale == Russian => text.toArray}
def englishArray(text:String*)(implicit locale: BotLocale)
:PartialFunction[BotLocale, Array[String]] =
//{case locale if locale == English => text.asInstanceOf[Array[String]]}
{case locale if locale == English => text.toArray}
//TODO make implicits
def compose(
partial1:PartialFunction[BotLocale, String],
partial2:PartialFunction[BotLocale, String]
)(implicit locale: BotLocale) =
partial1.orElse(partial2).apply(locale)
def composeArray(
partial1:PartialFunction[BotLocale, Array[String]],
partial2:PartialFunction[BotLocale, Array[String]]
)(implicit locale: BotLocale) =
partial1.orElse(partial2).apply(locale)
def intro(implicit locale: BotLocale) = compose(
english(
"This is prototype of SmokeBot [v1.1]\\n" +
"The idea of the bot is that when you need to control something in you life " +
"you can choose a way to measure it and make the bot take care of measurements " +
"you just provide the numbers and bot will be able to give you statistics. " +
"As a first attempt we take a smoking habit. Every time you smoke (or when you " +
"notice a bunch of stubs in your ashtray) you let bot know the number with command " +
"/smoke (you could specify the amount). When you want to see how many cigarettes " +
"you smoke, you ask for /stats and bot gives you some stats. So... how may I serve " +
"you, Master?"),
russian(
"Это прототип бота SmokeBot [v1.1]\\n" +
"Идея состоит в том, что если вам хочется контроллировать что-то в вашей " +
"жизни, один из способов - это выбрать способ мерять это, и с помощью бота наблюдать " +
"за этим измерением, будь то ваш вес или количество выкуренных вами сигарет. Вы можете " +
"сообщить боту сколько вы выкурили сигарет недавно /smoke, а он будет готов выдать вам " +
"статистику. Так что... чем я могу служить вам, Хозяин?")
)
def obey(implicit locale: BotLocale): String = compose (
english(anyOf("Yes, my master!", "I am listening, my master!")),
russian(anyOf("Да, хозяин!", "Да, мой господин!", "Слушаю и повинуюсь!"))
)
def whatFoodToServe(implicit locale: BotLocale): String = compose (
english(anyOf("What food may I serve you, my master?", "What would you like, master?")),
russian(anyOf("Чего изволити?", "Чтобы вы хотели?"))
)
def foodChoices(implicit locale: BotLocale): Array[String] = composeArray (
englishArray("bread", "butter", "beer"),
russianArray("хлеб", "масло", "пиво")
)
def sayYes(implicit locale: BotLocale): String = compose (
english("Say \\"yes\\""),
russian("Скажи \\"да\\"")
)
def yesNo(implicit locale: BotLocale): Array[String] = composeArray (
russianArray("да", "нет"),
englishArray("yes", "no")
)
def abuseReply(implicit locale: BotLocale): String = compose (
russian("Манда!"),
english("ABKHSS")
)
def what(implicit locale: BotLocale): String = compose (
english("You got me confused"),
russian("Ничего не понял")
)
def cancelled(implicit locale: BotLocale): String = compose (
english("OK, cancelled."),
russian("Отменяю")
)
def cigarettes(implicit locale: BotLocale): String = compose (
russian("Сигарет"),
english("Cigarettes")
)
def howManyCigarettes(implicit locale: BotLocale): String = compose (
russian(anyOf(
s"Хозяин, сколько сигарет вы выкурили?",
s"Готов записывать, хозяин, сколько сигарет?",
s"Сколько сигарет, хозяин?"
)),
english(anyOf(
s"How many cigarettes, master?",
s"Ready to save, how many, master?",
s"How many, master?"
))
)
def youSmoked(count: Int)(implicit locale: BotLocale): String = compose (
english(anyOf(
s"Done, you smoked $count cigarettes, master",
s"$count cigarettes, master, got it.",
s"Saving $count cigarettes"
)), //TODO add cmon, so much?!
russian(anyOf(
s"Хозяин, сигарет выкурено: $count",
s"$count сигарет, пишу в базу",
s"Записываю: $count сигаретx"
))
)
def smokedOverall(smoked: Int)(implicit locale: BotLocale): String = compose (
english(s"Master, you smoke $smoked cigarettes overall"),
russian(s"Хозяин, вы выкурили всего $smoked сигарет")
)
def noDataYet (implicit locale: BotLocale): String = compose (
english(s"Master, seems you have no data available yet!"),
russian(s"Хозяин, данных пока нет.")
)
def weight(implicit locale: BotLocale): String = compose (
russian("Вес"),
english("Weight")
)
def weightMeasured(value: Double)(implicit locale: BotLocale): String = compose(
english(anyOf(
s"Saving your weight, master - $value kilos",
s"Got it, master, $value kilos",
s"$value kilos it is, master"
)),
russian(anyOf(
s"Сохраняю вес - $value кг.",
s"Текущий вес $value кг, хозяин",
s"Хорошо, хозяин, $value килограмм"
))
)
def typeYourWeight(implicit locale: BotLocale): String = compose(
english(anyOf(
s"I am ready, master, what is your weight?",
s"What is your current weight, master?"
)),
russian(anyOf(
s"Хозяин, сколько?",
s"Готов записать ваш текущий вес, хозяин. Сколько?",
s"Ваш текущий вес, хозяин?"
))
)
def whenFinishedTypeSubmit(implicit locale: BotLocale): String = compose(
english(anyOf(
s"Listening master, my engineers will be working day and night to implement this! Type 'sumbit' to finish it.",
s"Master, my guys will do their best to do this, write 'submit' when you finished as a separate command."
)),
russian(anyOf(
s"Хозяин, команда разработчиков будет работать над вашим предложением! Напишите 'готово', когда закончите",
s"Слушаю, хозяин, напишите 'готово', как закончите."
))
)
def confirmWeight(value: Double)(implicit locale: BotLocale): String = compose(
english(anyOf(
s"Saving $value kilos",
s"Got it, master, $value kilos",
s"$value kilos it is, master"
)),
russian(anyOf(
s"Сохраняю вес - $value кг.",
s"Текущий вес $value кг, хозяин",
s"Хорошо, хозяин, $value килограмм"
))
)
def englishRussian: Array[String] = Array("English", "Русский")
def changeLocale: String = "Choose locale / выберите язык"
}
|
kirhgoff/lastobot
|
src/main/scala/org/kirhgoff/lastobot/Phrase.scala
|
Scala
|
gpl-3.0
| 9,160 |
package com.twitter.finatra.http
/**
* Enumeration which determines which
* server a given test request should be
* sent to (ExternalServer or AdminServer).
*/
sealed trait RouteHint
object RouteHint {
/** No hint is provided. Determination should be based on the route path */
case object None extends RouteHint
/** Request should be sent to the external server */
case object ExternalServer extends RouteHint
/** Request should be sent to the admin server */
case object AdminServer extends RouteHint
}
|
twitter/finatra
|
http-server/src/test/scala/com/twitter/finatra/http/RouteHint.scala
|
Scala
|
apache-2.0
| 527 |
// Solution-5.scala
// Solution to Exercise 5 in "Pattern Matching"
def forecast(temp:Int):String = {
temp match {
case 100 => "Sunny"
case 80 => "Mostly Sunny"
case 50 => "Partly Sunny"
case 20 => "Mostly Cloudy"
case 0 => "Cloudy"
case _ => "Unknown"
}
}
def forecastLoop(data:Vector[Int]):Unit = {
for(s <-data)
println("forecast(" + s + ") is " + forecast(s))
}
val allData = forecastLoop(Vector(100, 80, 50, 20, 15, 80, 20))
/* OUTPUT_SHOULD_BE
forecast(100) is Sunny
forecast(80) is Mostly Sunny
forecast(50) is Partly Sunny
forecast(20) is Mostly Cloudy
forecast(15) is Unknown
forecast(80) is Mostly Sunny
forecast(20) is Mostly Cloudy
*/
|
P7h/ScalaPlayground
|
Atomic Scala/atomic-scala-solutions/21_PatternMatching/Solution-5.scala
|
Scala
|
apache-2.0
| 685 |
/**
* Copyright (C) 2009-2011 the original author or authors.
* See the notice.md file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.fusesource.scalate.support
import scala.util.parsing.input.{NoPosition, Position}
case class CompilerError(file: String, message: String, pos: Position = NoPosition, original: CompilerError = null) {
}
|
dnatic09/scalate
|
scalate-core/src/main/scala/org/fusesource/scalate/support/CompileError.scala
|
Scala
|
apache-2.0
| 956 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.catalyst.trees.TreePattern.LEFT_SEMI_OR_ANTI_JOIN
/**
* This rule is a variant of [[PushPredicateThroughNonJoin]] which can handle
* pushing down Left semi and Left Anti joins below the following operators.
* 1) Project
* 2) Window
* 3) Union
* 4) Aggregate
* 5) Other permissible unary operators. please see [[PushPredicateThroughNonJoin.canPushThrough]].
*/
object PushDownLeftSemiAntiJoin extends Rule[LogicalPlan]
with PredicateHelper
with JoinSelectionHelper {
def apply(plan: LogicalPlan): LogicalPlan = plan.transformWithPruning(
_.containsPattern(LEFT_SEMI_OR_ANTI_JOIN), ruleId) {
// LeftSemi/LeftAnti over Project
case Join(p @ Project(pList, gChild), rightOp, LeftSemiOrAnti(joinType), joinCond, hint)
if pList.forall(_.deterministic) &&
!pList.exists(ScalarSubquery.hasCorrelatedScalarSubquery) &&
canPushThroughCondition(Seq(gChild), joinCond, rightOp) =>
if (joinCond.isEmpty) {
// No join condition, just push down the Join below Project
p.copy(child = Join(gChild, rightOp, joinType, joinCond, hint))
} else {
val aliasMap = getAliasMap(p)
val newJoinCond = if (aliasMap.nonEmpty) {
Option(replaceAlias(joinCond.get, aliasMap))
} else {
joinCond
}
p.copy(child = Join(gChild, rightOp, joinType, newJoinCond, hint))
}
// LeftSemi/LeftAnti over Aggregate, only push down if join can be planned as broadcast join.
case join @ Join(agg: Aggregate, rightOp, LeftSemiOrAnti(_), _, _)
if agg.aggregateExpressions.forall(_.deterministic) && agg.groupingExpressions.nonEmpty &&
!agg.aggregateExpressions.exists(ScalarSubquery.hasCorrelatedScalarSubquery) &&
canPlanAsBroadcastHashJoin(join, conf) =>
val aliasMap = getAliasMap(agg)
val canPushDownPredicate = (predicate: Expression) => {
val replaced = replaceAlias(predicate, aliasMap)
predicate.references.nonEmpty &&
replaced.references.subsetOf(agg.child.outputSet ++ rightOp.outputSet)
}
val makeJoinCondition = (predicates: Seq[Expression]) => {
replaceAlias(predicates.reduce(And), aliasMap)
}
pushDownJoin(join, canPushDownPredicate, makeJoinCondition)
// LeftSemi/LeftAnti over Window
case join @ Join(w: Window, rightOp, LeftSemiOrAnti(_), _, _)
if w.partitionSpec.forall(_.isInstanceOf[AttributeReference]) =>
val partitionAttrs = AttributeSet(w.partitionSpec.flatMap(_.references)) ++ rightOp.outputSet
pushDownJoin(join, _.references.subsetOf(partitionAttrs), _.reduce(And))
// LeftSemi/LeftAnti over Union
case Join(union: Union, rightOp, LeftSemiOrAnti(joinType), joinCond, hint)
if canPushThroughCondition(union.children, joinCond, rightOp) =>
if (joinCond.isEmpty) {
// Push down the Join below Union
val newGrandChildren = union.children.map { Join(_, rightOp, joinType, joinCond, hint) }
union.withNewChildren(newGrandChildren)
} else {
val output = union.output
val newGrandChildren = union.children.map { grandchild =>
val newCond = joinCond.get transform {
case e if output.exists(_.semanticEquals(e)) =>
grandchild.output(output.indexWhere(_.semanticEquals(e)))
}
assert(newCond.references.subsetOf(grandchild.outputSet ++ rightOp.outputSet))
Join(grandchild, rightOp, joinType, Option(newCond), hint)
}
union.withNewChildren(newGrandChildren)
}
// LeftSemi/LeftAnti over UnaryNode
case join @ Join(u: UnaryNode, rightOp, LeftSemiOrAnti(_), _, _)
if PushPredicateThroughNonJoin.canPushThrough(u) && u.expressions.forall(_.deterministic) =>
val validAttrs = u.child.outputSet ++ rightOp.outputSet
pushDownJoin(join, _.references.subsetOf(validAttrs), _.reduce(And))
}
/**
* Check if we can safely push a join through a project or union by making sure that attributes
* referred in join condition do not contain the same attributes as the plan they are moved
* into. This can happen when both sides of join refers to the same source (self join). This
* function makes sure that the join condition refers to attributes that are not ambiguous (i.e
* present in both the legs of the join) or else the resultant plan will be invalid.
*/
private def canPushThroughCondition(
plans: Seq[LogicalPlan],
condition: Option[Expression],
rightOp: LogicalPlan): Boolean = {
val attributes = AttributeSet(plans.flatMap(_.output))
if (condition.isDefined) {
val matched = condition.get.references.intersect(rightOp.outputSet).intersect(attributes)
matched.isEmpty
} else {
true
}
}
private def pushDownJoin(
join: Join,
canPushDownPredicate: Expression => Boolean,
makeJoinCondition: Seq[Expression] => Expression): LogicalPlan = {
assert(join.left.children.length == 1)
if (join.condition.isEmpty) {
join.left.withNewChildren(Seq(join.copy(left = join.left.children.head)))
} else {
val (pushDown, stayUp) = splitConjunctivePredicates(join.condition.get)
.partition(canPushDownPredicate)
// Check if the remaining predicates do not contain columns from the right hand side of the
// join. Since the remaining predicates will be kept as a filter over the operator under join,
// this check is necessary after the left-semi/anti join is pushed down. The reason is, for
// this kind of join, we only output from the left leg of the join.
val referRightSideCols = AttributeSet(stayUp.toSet).intersect(join.right.outputSet).nonEmpty
if (pushDown.isEmpty || referRightSideCols) {
join
} else {
val newPlan = join.left.withNewChildren(Seq(join.copy(
left = join.left.children.head, condition = Some(makeJoinCondition(pushDown)))))
// If there is no more filter to stay up, return the new plan that has join pushed down.
if (stayUp.isEmpty) {
newPlan
} else {
join.joinType match {
// In case of Left semi join, the part of the join condition which does not refer to
// to attributes of the grandchild are kept as a Filter above.
case LeftSemi => Filter(stayUp.reduce(And), newPlan)
// In case of left-anti join, the join is pushed down only when the entire join
// condition is eligible to be pushed down to preserve the semantics of left-anti join.
case _ => join
}
}
}
}
}
}
/**
* This rule is a variant of [[PushPredicateThroughJoin]] which can handle
* pushing down Left semi and Left Anti joins below a join operator. The
* allowable join types are:
* 1) Inner
* 2) Cross
* 3) LeftOuter
* 4) RightOuter
*
* TODO:
* Currently this rule can push down the left semi or left anti joins to either
* left or right leg of the child join. This matches the behaviour of `PushPredicateThroughJoin`
* when the left semi or left anti join is in expression form. We need to explore the possibility
* to push the left semi/anti joins to both legs of join if the join condition refers to
* both left and right legs of the child join.
*/
object PushLeftSemiLeftAntiThroughJoin extends Rule[LogicalPlan] with PredicateHelper {
/**
* Define an enumeration to identify whether a LeftSemi/LeftAnti join can be pushed down to
* the left leg or the right leg of the join.
*/
object PushdownDirection extends Enumeration {
val TO_LEFT_BRANCH, TO_RIGHT_BRANCH, NONE = Value
}
object AllowedJoin {
def unapply(join: Join): Option[Join] = join.joinType match {
case Inner | Cross | LeftOuter | RightOuter => Some(join)
case _ => None
}
}
/**
* Determine which side of the join a LeftSemi/LeftAnti join can be pushed to.
*/
private def pushTo(leftChild: Join, rightChild: LogicalPlan, joinCond: Option[Expression]) = {
val left = leftChild.left
val right = leftChild.right
val joinType = leftChild.joinType
val rightOutput = rightChild.outputSet
if (joinCond.nonEmpty) {
val conditions = splitConjunctivePredicates(joinCond.get)
val (leftConditions, rest) =
conditions.partition(_.references.subsetOf(left.outputSet ++ rightOutput))
val (rightConditions, commonConditions) =
rest.partition(_.references.subsetOf(right.outputSet ++ rightOutput))
if (rest.isEmpty && leftConditions.nonEmpty) {
// When the join conditions can be computed based on the left leg of
// leftsemi/anti join then push the leftsemi/anti join to the left side.
PushdownDirection.TO_LEFT_BRANCH
} else if (leftConditions.isEmpty && rightConditions.nonEmpty && commonConditions.isEmpty) {
// When the join conditions can be computed based on the attributes from right leg of
// leftsemi/anti join then push the leftsemi/anti join to the right side.
PushdownDirection.TO_RIGHT_BRANCH
} else {
PushdownDirection.NONE
}
} else {
/**
* When the join condition is empty,
* 1) if this is a left outer join or inner join, push leftsemi/anti join down
* to the left leg of join.
* 2) if a right outer join, to the right leg of join,
*/
joinType match {
case _: InnerLike | LeftOuter =>
PushdownDirection.TO_LEFT_BRANCH
case RightOuter =>
PushdownDirection.TO_RIGHT_BRANCH
case _ =>
PushdownDirection.NONE
}
}
}
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
// push LeftSemi/LeftAnti down into the join below
case j @ Join(AllowedJoin(left), right, LeftSemiOrAnti(joinType), joinCond, parentHint) =>
val (childJoinType, childLeft, childRight, childCondition, childHint) =
(left.joinType, left.left, left.right, left.condition, left.hint)
val action = pushTo(left, right, joinCond)
action match {
case PushdownDirection.TO_LEFT_BRANCH
if (childJoinType == LeftOuter || childJoinType.isInstanceOf[InnerLike]) =>
// push down leftsemi/anti join to the left table
val newLeft = Join(childLeft, right, joinType, joinCond, parentHint)
Join(newLeft, childRight, childJoinType, childCondition, childHint)
case PushdownDirection.TO_RIGHT_BRANCH
if (childJoinType == RightOuter || childJoinType.isInstanceOf[InnerLike]) =>
// push down leftsemi/anti join to the right table
val newRight = Join(childRight, right, joinType, joinCond, parentHint)
Join(childLeft, newRight, childJoinType, childCondition, childHint)
case _ =>
// Do nothing
j
}
}
}
|
BryanCutler/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/PushDownLeftSemiAntiJoin.scala
|
Scala
|
apache-2.0
| 11,948 |
/**
* Copyright (C) 2011 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.analysis
import org.orbeon.dom.io.XMLWriter
import org.orbeon.oxf.pipeline.api.TransformerXMLReceiver
import org.orbeon.oxf.xforms.analysis.XPathAnalysis.writeXPathAnalysis
import org.orbeon.oxf.xforms.analysis.controls._
import org.orbeon.oxf.xforms.analysis.model.{Model, StaticBind}
import org.orbeon.oxf.xml.XMLReceiverSupport._
import org.orbeon.oxf.xml.dom.LocationDocumentResult
import org.orbeon.oxf.xml.{TransformerUtils, XMLReceiver}
import org.orbeon.xforms.Constants
object PartAnalysisDebugSupport {
import Private._
def writePart(partAnalysis: PartAnalysisRuntimeOps)(implicit receiver: XMLReceiver): Unit =
withDocument {
partAnalysis.findControlAnalysis(partAnalysis.startScope.prefixedIdForStaticId(Constants.DocumentId)) foreach recurse
}
def printPartAsXml(partAnalysis: PartAnalysisRuntimeOps): Unit =
println(partAsXmlString(partAnalysis))
def printElementAnalysis(a: ElementAnalysis): Unit = {
implicit val identity: TransformerXMLReceiver = TransformerUtils.getIdentityTransformerHandler
val result = new LocationDocumentResult
identity.setResult(result)
withDocument {
withElement("root") {
writeElementAnalysis(a)
}
}
println(result.getDocument.getRootElement.serializeToString(XMLWriter.PrettyFormat))
}
def printXPathAnalysis(xpa: XPathAnalysis): Unit = {
implicit val identity: TransformerXMLReceiver = TransformerUtils.getIdentityTransformerHandler
val result = new LocationDocumentResult
identity.setResult(result)
withDocument {
writeXPathAnalysis(xpa)
}
println(result.getDocument.getRootElement.serializeToString(XMLWriter.PrettyFormat))
}
private object Private {
def partAsXmlString(partAnalysis: PartAnalysisRuntimeOps): String = {
implicit val identity: TransformerXMLReceiver = TransformerUtils.getIdentityTransformerHandler
val result = new LocationDocumentResult
identity.setResult(result)
writePart(partAnalysis)
result.getDocument.getRootElement.serializeToString(XMLWriter.PrettyFormat)
}
def writeElementAnalysis(a: ElementAnalysis)(implicit receiver: XMLReceiver): Unit = {
a.bindingAnalysis match {
case Some(bindingAnalysis) if a.hasBinding =>
// For now there can be a binding analysis even if there is no binding on the control
// (hack to simplify determining which controls to update)
withElement("binding") {
writeXPathAnalysis(bindingAnalysis)
}
case _ => // NOP
}
a.valueAnalysis foreach { valueAnalysis =>
withElement("value") {
writeXPathAnalysis(valueAnalysis)
}
}
}
def writeModel(a: Model)(implicit receiver: XMLReceiver): Unit = {
writeElementAnalysis(a)
a.children.iterator filterNot
(e => e.isInstanceOf[StaticBind] || e.isInstanceOf[VariableAnalysisTrait]) foreach
recurse
a.variablesSeq foreach recurse
if (a.topLevelBinds.nonEmpty)
withElement("binds") {
for (bind <- a.topLevelBinds)
recurse(bind)
}
def writeInstanceList(name: String, values: collection.Set[String]): Unit =
if (values.nonEmpty)
withElement(name) {
for (value <- values)
element("instance", text = value)
}
writeInstanceList("bind-instances", a.bindInstances)
writeInstanceList("computed-binds-instances", a.computedBindExpressionsInstances)
writeInstanceList("validation-binds-instances", a.validationBindInstances)
a.eventHandlers foreach recurse
}
def writeStaticBind(a: StaticBind)(implicit receiver: XMLReceiver): Unit = {
writeElementAnalysis(a)
// `@ref` analysis is handled by superclass
// MIP analysis
for {
(_, mips) <- a.allMIPNameToXPathMIP.toList.sortBy(_._1)
mip <- mips
} locally {
withElement("mip", atts = List("name" -> mip.name, "expression" -> mip.compiledExpression.string)) {
writeXPathAnalysis(mip.analysis)
}
}
// Children binds
a.children foreach recurse
}
def writeChildrenBuilder(a: WithChildrenTrait)(implicit receiver: XMLReceiver): Unit = {
writeElementAnalysis(a)
a.children foreach recurse
}
def writeSelectionControl(a: SelectionControlTrait)(implicit receiver: XMLReceiver): Unit = {
writeElementAnalysis(a)
a.itemsetAnalysis foreach { analysis =>
withElement("itemset") {
writeXPathAnalysis(analysis)
}
}
}
// Don't output nested value if any as everything is already contained in the enclosing variable
def writeVariableControl(a: VariableAnalysisTrait)(implicit receiver: XMLReceiver): Unit = {
writeElementAnalysis(a)
a.children filterNot (_.isInstanceOf[VariableValueTrait]) foreach recurse
}
def recurse(ea: ElementAnalysis)(implicit receiver: XMLReceiver): Unit = {
val atts =
ea match {
case m: Model =>
List(
"scope" -> m.scope.scopeId,
"prefixed-id" -> m.prefixedId,
"default-instance-prefixed-id" -> m.defaultInstancePrefixedId.orNull,
"analyzed-binds" -> m.figuredAllBindRefAnalysis.toString
)
case b: StaticBind =>
List(
"id" -> b.staticId,
"context" -> b.context.orNull,
"ref" -> b.ref.orNull
)
case e =>
List(
"scope" -> e.scope.scopeId,
"prefixed-id" -> e.prefixedId,
"model-prefixed-id" -> (e.model map (_.prefixedId) orNull),
"binding" -> e.hasBinding.toString,
"value" -> e.isInstanceOf[ValueTrait].toString,
"name" -> e.element.attributeValue("name")
)
}
withElement(ea.localName, atts = atts) {
ea match {
case a: Model => writeModel(a)
case a: StaticBind => writeStaticBind(a)
case a: SelectionControlTrait => writeSelectionControl(a)
case a: VariableAnalysisTrait => writeVariableControl(a)
case a: WithChildrenTrait => writeChildrenBuilder(a)
case a => writeElementAnalysis(a)
}
}
}
}
}
|
orbeon/orbeon-forms
|
xforms-compiler/jvm/src/main/scala/org/orbeon/oxf/xforms/analysis/PartAnalysisDebugSupport.scala
|
Scala
|
lgpl-2.1
| 7,220 |
package org.fs.utility.web.http
import java.nio.charset.Charset
import org.apache.http.entity.ContentType
/**
* @author FS
*/
case class SimpleHttpResponse(code: Int, headers: Seq[(String, String)], body: Array[Byte]) {
/** @return content charset if specified */
lazy val charsetOption: Option[Charset] =
contentTypeOption map ContentType.parse map (_.getCharset) match {
case Some(null) => None
case otherwise => otherwise
}
/** @return content charset if specified, or default ISO-8859-1 as per HTTP/1.1 standard */
lazy val charset: Charset =
charsetOption getOrElse ContentType.DEFAULT_TEXT.getCharset
/** @return body as a string using the content charset if any, or ISO-8859-1 as per HTTP/1.1 */
lazy val bodyString: String =
new String(body, charset)
/** @return body as a UTF-8 string */
lazy val bodyStringUTF8: String =
new String(body, "UTF-8")
/**
* Obtain the value of a header with a given name, if known.
* If several same-named headers are present, either may be returned.
*/
def findHeader(headerName: String): Option[String] =
headers find (_._1 == headerName) map (_._2)
/** @return Content-Type header, if known. */
lazy val contentTypeOption: Option[String] =
findHeader("Content-Type")
/** @return Content-Encoding header, if known. */
lazy val contentEncodingOption: Option[String] =
findHeader("Content-Encoding")
override def toString = {
s"SimpleHttpResponse($code, <${body.length} bytes body>, $headers)"
}
}
|
frozenspider/fs-web-utils
|
src/main/scala/org/fs/utility/web/http/SimpleHttpResponse.scala
|
Scala
|
mit
| 1,538 |
package com.arcusys.learn.scorm.manifest.storage.impl.liferay
import com.arcusys.learn.persistence.liferay.service.LFResourceLocalService
import com.arcusys.learn.persistence.liferay.model.LFResource
import com.arcusys.learn.storage.impl.liferay.MockEntityContainer
import scala.collection.JavaConverters._
object ResourceEntityContainer extends MockEntityContainer[LFResourceLocalService, LFResource] {
lazy val mockServiceBeanName = classOf[LFResourceLocalService].getName
lazy val mockLocalService = mock[LFResourceLocalService]
// service related mocks
def createFunction = _.createLFResource
def addFunction = _.addLFResource(_)
def deleteFunction = _.deleteLFResource(_)
def updateFunction = _.updateLFResource(_)
def orNull = _.orNull
def getAllFunction = _.getLFResources(_, _)
def removeAllFunction = _.removeAll()
// entity related mocks
def createMockEntity() = mock[LFResource]
def mockEntityProperties(mockEntity: LFResource) {
mockIntegerProperty(mockEntity.setPackageID(_), _.getPackageID)
mockStringProperty(mockEntity.setScormType(_), _.getScormType)
mockStringProperty(mockEntity.setResourceID(_), _.getResourceID)
mockStringProperty(mockEntity.setHref(_), _.getHref)
mockStringProperty(mockEntity.setBase(_), _.getBase)
}
def getIdFunction = _.getId
mockLocalService.findByPackageID(anyInt) answers {
(paramsRaw, mockService) =>
{
val packageID: Int = paramsRaw match {
case Array(a) => a.asInstanceOf[Int]
}
internalStorage.values.filter(entity => {
entity.getPackageID == packageID
}).toList.asJava
}
}
mockLocalService.findByPackageIDAndResourceID(anyInt, anyString, anyInt, anyInt) answers {
(paramsRaw, mockService) =>
{
val paramsTuple: (Any, Any) = paramsRaw match {
case Array(a, b, c, d) => (a, b)
}
val packageID = paramsTuple._1.asInstanceOf[Int]
val resourceID = paramsTuple._2.asInstanceOf[String]
internalStorage.values.find(entity => {
entity.getPackageID == packageID && entity.getResourceID == resourceID
}).toList.asJava
}
}
}
|
ViLPy/Valamis
|
learn-persistence-liferay-wrapper/src/test/scala/com/arcusys/learn/scorm/manifest/storage/impl/liferay/ResourceEntityContainer.scala
|
Scala
|
lgpl-3.0
| 2,191 |
package consumer
import kafka.consumer.{Consumer => KafkaConsumer, ConsumerIterator, Whitelist}
/**
* Created by mike on 22/06/15.
*/
case class SingleTopicConsumer(topic: String) extends Consumer(List(topic)) {
private lazy val consumer = KafkaConsumer.create(config)
val threadNum = 1
private lazy val consumerMap = consumer.createMessageStreams(Map(topic -> threadNum))
private lazy val stream = consumerMap.getOrElse(topic, List()).head
override def read(): Stream[String] = Stream.cons(new String(stream.head.message()), read())
}
|
mikehancock/kafka-sample
|
app/consumer/SingleTopicConsumer.scala
|
Scala
|
mit
| 551 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.hive
import scala.collection.JavaConverters._
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hive.metastore.MetaStorePreEventListener
import org.apache.hadoop.hive.metastore.api.{FieldSchema, MetaException}
import org.apache.hadoop.hive.metastore.events._
import org.apache.hadoop.hive.metastore.events.PreEventContext.PreEventType._
import org.apache.spark.sql.types.{DataType, StructField, StructType}
class CarbonHiveMetastoreListener(conf: Configuration) extends MetaStorePreEventListener(conf) {
override def onEvent(preEventContext: PreEventContext): Unit = {
preEventContext.getEventType match {
case CREATE_TABLE =>
val table = preEventContext.asInstanceOf[PreCreateTableEvent].getTable
val tableProps = table.getParameters
if (tableProps != null
&& (tableProps.get("spark.sql.sources.provider") == "org.apache.spark.sql.CarbonSource"
|| tableProps.get("spark.sql.sources.provider").equalsIgnoreCase("carbondata"))) {
val numSchemaParts = tableProps.get("spark.sql.sources.schema.numParts")
if (numSchemaParts != null && !numSchemaParts.isEmpty) {
val parts = (0 until numSchemaParts.toInt).map { index =>
val part = tableProps.get(s"spark.sql.sources.schema.part.${index}")
if (part == null) {
throw new MetaException(s"spark.sql.sources.schema.part.${index} is missing!")
}
part
}
// Stick all parts back to a single schema string.
val schema = DataType.fromJson(parts.mkString).asInstanceOf[StructType]
val hiveSchema = schema.map(toHiveColumn).asJava
table.getSd.setCols(hiveSchema)
table.getSd.setInputFormat("org.apache.carbondata.hive.MapredCarbonInputFormat")
table.getSd.setOutputFormat("org.apache.carbondata.hive.MapredCarbonOutputFormat")
table.getParameters
.put("storage_handler", "org.apache.carbondata.hive.CarbonStorageHandler")
val serdeInfo = table.getSd.getSerdeInfo
serdeInfo.setSerializationLib("org.apache.carbondata.hive.CarbonHiveSerDe")
val tablePath = serdeInfo.getParameters.get("tablePath")
if (tablePath != null) {
table.getSd.setLocation(tablePath)
}
}
}
case ALTER_TABLE =>
val table = preEventContext.asInstanceOf[PreAlterTableEvent].getNewTable
val tableProps = table.getParameters
if (tableProps != null
&& (tableProps.get("spark.sql.sources.provider") == "org.apache.spark.sql.CarbonSource"
|| tableProps.get("spark.sql.sources.provider").equalsIgnoreCase("carbondata"))) {
val numSchemaParts = tableProps.get("spark.sql.sources.schema.numParts")
if (numSchemaParts != null && !numSchemaParts.isEmpty) {
val schemaParts = (0 until numSchemaParts.toInt).map { index =>
val schemaPart = tableProps.get(s"spark.sql.sources.schema.part.$index")
if (schemaPart == null) {
throw new MetaException(s"spark.sql.sources.schema.part.$index is missing!")
}
schemaPart
}
// Stick all schemaParts back to a single schema string.
val schema = DataType.fromJson(schemaParts.mkString).asInstanceOf[StructType]
val hiveSchema = schema.map(toHiveColumn).asJava
table.getSd.setCols(hiveSchema)
}
}
case _ =>
// do nothing
}
}
private def toHiveColumn(c: StructField): FieldSchema = {
val typeString = if (c.metadata.contains("HIVE_TYPE_STRING")) {
c.metadata.getString("HIVE_TYPE_STRING")
} else {
c.dataType.catalogString
}
new FieldSchema(c.name, typeString, c.getComment().orNull)
}
}
|
zzcclp/carbondata
|
integration/hive/src/main/scala/org/apache/carbondata/hive/CarbonHiveMetastoreListener.scala
|
Scala
|
apache-2.0
| 4,716 |
package redis.commands
import redis._
import scala.concurrent.Await
import akka.util.ByteString
import redis.actors.ReplyErrorException
import redis.protocol.{Bulk, Status, MultiBulk}
class TransactionsSpec extends RedisStandaloneServer {
"Transactions commands" should {
"basic" in {
val redisTransaction = redis.transaction()
redisTransaction.exec()
redisTransaction.watch("a")
val set = redisTransaction.set("a", "abc")
val decr = redisTransaction.decr("a")
val get = redisTransaction.get("a")
redisTransaction.exec()
val r = for {
s <- set
g <- get
} yield {
s mustEqual true
g mustEqual Some(ByteString("abc"))
}
Await.result(decr, timeOut) must throwA[ReplyErrorException]("ERR value is not an integer or out of range")
Await.result(r, timeOut)
}
"function api" in {
"empty" in {
val empty = redis.multi().exec()
Await.result(empty, timeOut) mustEqual MultiBulk(Some(Vector()))
}
val redisTransaction = redis.multi(redis => {
redis.set("a", "abc")
redis.get("a")
})
val exec = redisTransaction.exec()
"non empty" in {
Await.result(exec, timeOut) mustEqual MultiBulk(Some(Vector(Status(ByteString("OK")), Bulk(Some(ByteString("abc"))))))
}
"reused" in {
redisTransaction.get("transactionUndefinedKey")
val exec = redisTransaction.exec()
Await.result(exec, timeOut) mustEqual MultiBulk(Some(Vector(Status(ByteString("OK")), Bulk(Some(ByteString("abc"))), Bulk(None))))
}
"watch" in {
val transaction = redis.watch("transactionWatchKey")
transaction.watcher.result() mustEqual Set("transactionWatchKey")
transaction.unwatch()
transaction.watcher.result() must beEmpty
val set = transaction.set("transactionWatch", "value")
transaction.exec()
val r = for {
s <- set
} yield {
s must beTrue
}
Await.result(r, timeOut)
}
}
}
}
|
etaty/rediscala
|
src/test/scala/redis/commands/TransactionsSpec.scala
|
Scala
|
apache-2.0
| 2,086 |
package com.rocketfuel.sdbc.base.jdbc
import java.sql.{DriverPropertyInfo, Driver}
import java.util.Properties
import java.util.logging.Logger
class TestDriver extends Driver {
override def acceptsURL(url: String): Boolean = ???
override def jdbcCompliant(): Boolean = ???
override def getPropertyInfo(url: String, info: Properties): Array[DriverPropertyInfo] = ???
override def getMinorVersion: Int = ???
override def getParentLogger: Logger = ???
override def connect(url: String, info: Properties): java.sql.Connection = ???
override def getMajorVersion: Int = ???
}
|
rocketfuel/sdbc
|
jdbc/src/test/scala/com/rocketfuel/sdbc/base/jdbc/TestDriver.scala
|
Scala
|
bsd-3-clause
| 592 |
package com.arcusys.valamis.web.portlet
import javax.portlet._
import javax.servlet.http.HttpServletRequest
import com.arcusys.learn.liferay.util.{PortalUtilHelper, PortletName, PortletPreferencesFactoryUtilHelper}
import com.arcusys.valamis.social.service.ValamisActivitiesSettings.{COUNT_DEFAULT_VALUE, COUNT_PROPERTY_NAME}
import com.arcusys.valamis.social.service.{ActivityService, CommentService, LikeService, ValamisActivitiesSettings}
import com.arcusys.valamis.user.service.UserService
import com.arcusys.valamis.util.serialization.JsonHelper
import com.arcusys.valamis.web.portlet.base.{PermissionUtil => PortletPermissionUtil, _}
import com.arcusys.valamis.web.service.ActivityInterpreter
import com.arcusys.valamis.web.servlet.base.PermissionUtil
import com.arcusys.valamis.web.servlet.request.ServletRequestHelper
import ServletRequestHelper._
import com.arcusys.valamis.web.servlet.social.request.ActivityRequest
import com.arcusys.valamis.web.servlet.social.response.ActivityConverter
class ValamisActivitiesView extends OAuthPortlet with PortletBase with ActivityConverter {
implicit val serializationFormats = ActivityRequest.serializationFormats
protected lazy val socialActivityService = inject[ActivityService]
protected lazy val likeService = inject[LikeService]
protected lazy val userService = inject[UserService]
protected lazy val commentService = inject[CommentService]
protected lazy val activityInterpreter = inject[ActivityInterpreter]
override def doView(request: RenderRequest, response: RenderResponse) {
implicit val out =
response.getWriter
val securityScope = getSecurityData(request)
val preferences = PortletPreferencesFactoryUtilHelper.getPortletSetup(request)
sendTextFile("/templates/2.0/valamis_activities_templates.html")
sendTextFile("/templates/2.0/common_templates.html")
sendMustacheFile(
securityScope.data ++
Map("resourceURL" -> response.createResourceURL(),
COUNT_PROPERTY_NAME -> preferences.getValue(COUNT_PROPERTY_NAME, COUNT_DEFAULT_VALUE)
),
"valamis_activities.html")
}
private def loadSettings(request: RenderRequest): Map[String, String] = {
val preferences = PortletPreferencesFactoryUtilHelper.getPortletSetup(request)
val sets = ValamisActivitiesSettings.visibleSettings.map { case (propName, _) =>
(propName, preferences.getValue(propName, "true"))
}
sets ++ Map(COUNT_PROPERTY_NAME -> preferences.getValue(COUNT_PROPERTY_NAME, COUNT_DEFAULT_VALUE))
}
private def saveValue(origRequest: HttpServletRequest, prefs: PortletPreferences, propName: String): Unit = {
val propValue = origRequest.withDefault(propName, "")
if (!propValue.isEmpty) {
prefs.setValue(propName, propValue)
}
}
private def saveSettings(request: RenderRequest): Unit = {
val preferences = PortletPreferencesFactoryUtilHelper.getPortletSetup(request)
val origRequest = PortalUtilHelper.getOriginalServletRequest(PortalUtilHelper.getHttpServletRequest(request))
ValamisActivitiesSettings.visibleSettings.foreach { case (propName, _) =>
saveValue(origRequest, preferences, propName)
}
saveValue(origRequest, preferences, COUNT_PROPERTY_NAME)
preferences.store()
}
override def doEdit(request: RenderRequest, response: RenderResponse) {
val origRequest = PortalUtilHelper.getOriginalServletRequest(PortalUtilHelper.getHttpServletRequest(request))
val needToSave = !origRequest.withDefault("saveSettings", "").isEmpty
if (needToSave) {
saveSettings(request)
} else {
implicit val out = response.getWriter
val language = LiferayHelpers.getLanguage(request)
val settings = loadSettings(request)
val securityScope = getSecurityData(request)
val translations = getTranslation("dashboard", language)
val permission = new PortletPermissionUtil(request, this)
val data = settings ++
Map(
"actionURL" -> response.createResourceURL(),
"permissionToModify" -> permission.hasPermission(ModifyPermission.name)
) ++ translations ++ getSecurityData(request).data
sendMustacheFile(data, "valamis_activities_settings.html")
}
}
override def serveResource(request: ResourceRequest, response: ResourceResponse): Unit = {
val servletRequest = PortalUtilHelper.getHttpServletRequest(request)
implicit val origRequest = PortalUtilHelper.getOriginalServletRequest(servletRequest)
val servletRequestExt = ServletRequestExt(origRequest)
PermissionUtil.requirePermissionApi(ViewPermission, PortletName.ValamisActivities)
val companyId = PortalUtilHelper.getCompanyId(servletRequest)
val myActivities = servletRequestExt.booleanOption(ActivityRequest.GetMyActivities).getOrElse(false)
val userId = if (myActivities) Some(PermissionUtil.getUserId) else None
val showAll = PermissionUtil.hasPermissionApi(ShowAllActivities, PortletName.ValamisActivities)
val plId = servletRequestExt.longRequired(ActivityRequest.PlId)
val preferences = Some(PortletPreferencesFactoryUtilHelper.getPortletSetup(request))
val lActivitiesToBeShown = Some(ValamisActivitiesSettings.getVisibleLiferayActivities(preferences))
response.getWriter.println(JsonHelper.toJson(
socialActivityService.getBy(companyId, userId, servletRequestExt.skipTake, showAll, lActivitiesToBeShown)
.map(act => toResponse(act, Some(plId)))
))
}
}
|
igor-borisov/JSCORM
|
valamis-portlets/src/main/scala/com/arcusys/valamis/web/portlet/ValamisActivitiesView.scala
|
Scala
|
gpl-3.0
| 5,476 |
/* Copyright 2009-2021 EPFL, Lausanne */
object Nested4 {
def foo(a: BigInt, a2: BigInt): BigInt = {
require(a >= 0 && a <= 50)
val b = a + 2
val c = a + b
if(a2 > a) {
def rec1(d: BigInt): BigInt = {
require(d >= 0 && d <= 50)
val e = d + b + c + a2
e
} ensuring(_ > 0)
rec1(2)
} else {
BigInt(5)
}
} ensuring(x => x > 0)
}
|
epfl-lara/stainless
|
frontends/benchmarks/verification/valid/MicroTests/Nested4.scala
|
Scala
|
apache-2.0
| 404 |
object partialApplications {
type Histogram[X] = Map[X, Int]
type StringlyHistogram[X >: String] = Histogram[X]
val xs: Histogram[String] = Map[String, Int]()
val ys: StringlyHistogram[String] = xs
def e = xs
val zs: StringlyHistogram[_] = e
type IntMap[Y] = Map[Int, Y]
val is = Map[Int, Boolean]()
val js: IntMap[Boolean] = is
val ks: IntMap[_] = is
type RMap[X, Y] = Map[Y, X]
val rs = Map[Int, Float]()
val ss: RMap[Float, Int] = rs
}
|
densh/dotty
|
tests/pickling/partialApplications.scala
|
Scala
|
bsd-3-clause
| 479 |
import org.specs2.mutable.BeforeAfter
import play.api.Play
import play.api.test.{FakeApplication, PlaySpecification}
/**
* User: Björn Reimer
* Date: 14.01.15
* Time: 11:52
*/
trait SpecWithStartedApp extends PlaySpecification with BeforeAfter {
override def before = {
val app = FakeApplication()
// check if app is started. start it if not
Play.maybeApplication match {
case Some(a) =>
case None =>
Play.start(app)
}
}
override def after = {}
}
|
indarium/hbbTVPlugin
|
test/SpecWithStartedApp.scala
|
Scala
|
agpl-3.0
| 497 |
package model
import model.db.DeviceDb
import play.api.libs.json.{Json, OWrites}
case class Device(id: Long, name: String, position: Position, sensors: List[Sensor])
object Device {
def apply(deviceDb: DeviceDb, sensors: List[Sensor]): Device =
Device(deviceDb.id, deviceDb.name, deviceDb.position, sensors)
implicit val deviceWriter: OWrites[Device] = Json.writes[Device]
}
|
openada/adaweather
|
app/model/Device.scala
|
Scala
|
mit
| 387 |
/*
IrisPca.scala
*/
import breeze.linalg.*
import breeze.stats.covmat
import scalaglm.*
@main def irisPca() =
val url = "http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data"
val fileName = "iris.csv"
val imap = Map(
"Iris-setosa" -> 0,
"Iris-versicolor" -> 1,
"Iris-virginica" -> 2)
// download the file to disk if it hasn't been already
val file = new java.io.File(fileName)
if !file.exists then
val s = new java.io.PrintWriter(file)
val data = scala.io.Source.fromURL(url).getLines()
data.foreach(l => s.write(l.trim.split(',').
map(x => imap.getOrElse(x, x)).mkString("", ",", "\\n")))
s.close
// read the file from disk
val mat = csvread(file)
println("Mat Dim: " + mat.rows + " " + mat.cols)
val x = mat(::, 0 to 3)
println("X Dim: " + x.rows + " " + x.cols)
val clas = mat(::, 4).toDenseVector
println("PCA with built-in Breeze version (like R princomp):")
val pca = new PCA(x, covmat(x))
println("Loadings:")
println(pca.loadings)
println("Stdev:")
println(pca.sdev)
println(pca.scores(0 to 5, ::))
println("Now my version (like R prcomp):")
val myPca = Pca(x, List("S-L","S-W","P-L","P-W"))
println(myPca.loadings) // loadings transposed
println(myPca.sdev)
myPca.summary
println("Scores:")
println(myPca.scores(0 to 5, ::))
myPca.plots.saveas("IrisPcaDiag.png")
// scatter plot first 2 principal components
import breeze.plot.*
val fig = Figure("PCA")
val p = fig.subplot(0)
val ind0 = (0 until x.rows) filter (i => clas(i) == 0)
p += plot(myPca.scores(ind0, 0).toDenseVector,
myPca.scores(ind0, 1).toDenseVector, '.', colorcode = "blue")
val ind1 = (0 until x.rows) filter (i => clas(i) == 1)
p += plot(myPca.scores(ind1, 0).toDenseVector,
myPca.scores(ind1, 1).toDenseVector, '.', colorcode = "red")
val ind2 = (0 until x.rows) filter (i => clas(i) == 2)
p += plot(myPca.scores(ind2, 0).toDenseVector,
myPca.scores(ind2, 1).toDenseVector, '.', colorcode = "green")
fig.saveas("IrisPca.png")
// Test without variable names
Pca(x).summary
// eof
|
darrenjw/scala-glm
|
examples/src/main/scala/IrisPca.scala
|
Scala
|
apache-2.0
| 2,216 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{execution, DataFrame, Row}
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan, Range, Repartition, RepartitionOperation, Union}
import org.apache.spark.sql.catalyst.plans.physical._
import org.apache.spark.sql.execution.adaptive.{AdaptiveSparkPlanHelper, DisableAdaptiveExecution}
import org.apache.spark.sql.execution.aggregate.{HashAggregateExec, ObjectHashAggregateExec, SortAggregateExec}
import org.apache.spark.sql.execution.columnar.{InMemoryRelation, InMemoryTableScanExec}
import org.apache.spark.sql.execution.exchange.{EnsureRequirements, ReusedExchangeExec, ReuseExchange, ShuffleExchangeExec}
import org.apache.spark.sql.execution.joins.{BroadcastHashJoinExec, SortMergeJoinExec}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types._
class PlannerSuite extends SharedSparkSession with AdaptiveSparkPlanHelper {
import testImplicits._
setupTestData()
private def testPartialAggregationPlan(query: LogicalPlan): Unit = {
val planner = spark.sessionState.planner
import planner._
val plannedOption = Aggregation(query).headOption
val planned =
plannedOption.getOrElse(
fail(s"Could query play aggregation query $query. Is it an aggregation query?"))
val aggregations = planned.collect { case n if n.nodeName contains "Aggregate" => n }
// For the new aggregation code path, there will be four aggregate operator for
// distinct aggregations.
assert(
aggregations.size == 2 || aggregations.size == 4,
s"The plan of query $query does not have partial aggregations.")
}
test("count is partially aggregated") {
val query = testData.groupBy('value).agg(count('key)).queryExecution.analyzed
testPartialAggregationPlan(query)
}
test("count distinct is partially aggregated") {
val query = testData.groupBy('value).agg(count_distinct('key)).queryExecution.analyzed
testPartialAggregationPlan(query)
}
test("mixed aggregates are partially aggregated") {
val query =
testData.groupBy('value).agg(count('value), count_distinct('key)).queryExecution.analyzed
testPartialAggregationPlan(query)
}
test("mixed aggregates with same distinct columns") {
def assertNoExpand(plan: SparkPlan): Unit = {
assert(plan.collect { case e: ExpandExec => e }.isEmpty)
}
withTempView("v") {
Seq((1, 1.0, 1.0), (1, 2.0, 2.0)).toDF("i", "j", "k").createTempView("v")
// one distinct column
val query1 = sql("SELECT sum(DISTINCT j), max(DISTINCT j) FROM v GROUP BY i")
assertNoExpand(query1.queryExecution.executedPlan)
// 2 distinct columns
val query2 = sql("SELECT corr(DISTINCT j, k), count(DISTINCT j, k) FROM v GROUP BY i")
assertNoExpand(query2.queryExecution.executedPlan)
// 2 distinct columns with different order
val query3 = sql("SELECT corr(DISTINCT j, k), count(DISTINCT k, j) FROM v GROUP BY i")
assertNoExpand(query3.queryExecution.executedPlan)
}
}
test("sizeInBytes estimation of limit operator for broadcast hash join optimization") {
def checkPlan(fieldTypes: Seq[DataType]): Unit = {
withTempView("testLimit") {
val fields = fieldTypes.zipWithIndex.map {
case (dataType, index) => StructField(s"c${index}", dataType, true)
} :+ StructField("key", IntegerType, true)
val schema = StructType(fields)
val row = Row.fromSeq(Seq.fill(fields.size)(null))
val rowRDD = sparkContext.parallelize(row :: Nil)
spark.createDataFrame(rowRDD, schema).createOrReplaceTempView("testLimit")
val planned = sql(
"""
|SELECT l.a, l.b
|FROM testData2 l JOIN (SELECT * FROM testLimit LIMIT 1) r ON (l.a = r.key)
""".stripMargin).queryExecution.sparkPlan
val broadcastHashJoins = planned.collect { case join: BroadcastHashJoinExec => join }
val sortMergeJoins = planned.collect { case join: SortMergeJoinExec => join }
assert(broadcastHashJoins.size === 1, "Should use broadcast hash join")
assert(sortMergeJoins.isEmpty, "Should not use sort merge join")
}
}
val simpleTypes =
NullType ::
BooleanType ::
ByteType ::
ShortType ::
IntegerType ::
LongType ::
FloatType ::
DoubleType ::
DecimalType(10, 5) ::
DecimalType.SYSTEM_DEFAULT ::
DateType ::
TimestampType ::
StringType ::
BinaryType :: Nil
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "16434") {
checkPlan(simpleTypes)
}
val complexTypes =
ArrayType(DoubleType, true) ::
ArrayType(StringType, false) ::
MapType(IntegerType, StringType, true) ::
MapType(IntegerType, ArrayType(DoubleType), false) ::
StructType(Seq(
StructField("a", IntegerType, nullable = true),
StructField("b", ArrayType(DoubleType), nullable = false),
StructField("c", DoubleType, nullable = false))) :: Nil
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "901617") {
checkPlan(complexTypes)
}
}
test("InMemoryRelation statistics propagation") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "81920") {
withTempView("tiny") {
testData.limit(3).createOrReplaceTempView("tiny")
sql("CACHE TABLE tiny")
val a = testData.as("a")
val b = spark.table("tiny").as("b")
val planned = a.join(b, $"a.key" === $"b.key").queryExecution.sparkPlan
val broadcastHashJoins = planned.collect { case join: BroadcastHashJoinExec => join }
val sortMergeJoins = planned.collect { case join: SortMergeJoinExec => join }
assert(broadcastHashJoins.size === 1, "Should use broadcast hash join")
assert(sortMergeJoins.isEmpty, "Should not use shuffled hash join")
spark.catalog.clearCache()
}
}
}
test("SPARK-11390 explain should print PushedFilters of PhysicalRDD") {
withSQLConf(SQLConf.USE_V1_SOURCE_LIST.key -> "parquet") {
withTempPath { file =>
val path = file.getCanonicalPath
testData.write.parquet(path)
val df = spark.read.parquet(path)
df.createOrReplaceTempView("testPushed")
withTempView("testPushed") {
val exp = sql("select * from testPushed where key = 15").queryExecution.sparkPlan
assert(exp.toString.contains("PushedFilters: [IsNotNull(key), EqualTo(key,15)]"))
}
}
}
}
test("efficient terminal limit -> sort should use TakeOrderedAndProject") {
val query = testData.select('key, 'value).sort('key).limit(2)
val planned = query.queryExecution.executedPlan
assert(planned.isInstanceOf[execution.TakeOrderedAndProjectExec])
assert(planned.output === testData.select('key, 'value).logicalPlan.output)
}
test("terminal limit -> project -> sort should use TakeOrderedAndProject") {
val query = testData.select('key, 'value).sort('key).select('value, 'key).limit(2)
val planned = query.queryExecution.executedPlan
assert(planned.isInstanceOf[execution.TakeOrderedAndProjectExec])
assert(planned.output === testData.select('value, 'key).logicalPlan.output)
}
test("terminal limits that are not handled by TakeOrderedAndProject should use CollectLimit") {
val query = testData.select('value).limit(2)
val planned = query.queryExecution.sparkPlan
assert(planned.isInstanceOf[CollectLimitExec])
assert(planned.output === testData.select('value).logicalPlan.output)
}
test("TakeOrderedAndProject can appear in the middle of plans") {
val query = testData.select('key, 'value).sort('key).limit(2).filter('key === 3)
val planned = query.queryExecution.executedPlan
assert(planned.find(_.isInstanceOf[TakeOrderedAndProjectExec]).isDefined)
}
test("CollectLimit can appear in the middle of a plan when caching is used") {
val query = testData.select('key, 'value).limit(2).cache()
val planned = query.queryExecution.optimizedPlan.asInstanceOf[InMemoryRelation]
assert(planned.cachedPlan.isInstanceOf[CollectLimitExec])
}
test("TakeOrderedAndProjectExec appears only when number of limit is below the threshold.") {
withSQLConf(SQLConf.TOP_K_SORT_FALLBACK_THRESHOLD.key -> "1000") {
val query0 = testData.select('value).orderBy('key).limit(100)
val planned0 = query0.queryExecution.executedPlan
assert(planned0.find(_.isInstanceOf[TakeOrderedAndProjectExec]).isDefined)
val query1 = testData.select('value).orderBy('key).limit(2000)
val planned1 = query1.queryExecution.executedPlan
assert(planned1.find(_.isInstanceOf[TakeOrderedAndProjectExec]).isEmpty)
}
}
test("PartitioningCollection") {
withTempView("normal", "small", "tiny") {
testData.createOrReplaceTempView("normal")
testData.limit(10).createOrReplaceTempView("small")
testData.limit(3).createOrReplaceTempView("tiny")
// Disable broadcast join
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
{
val plan = sql(
"""
|SELECT *
|FROM
| normal JOIN small ON (normal.key = small.key)
| JOIN tiny ON (small.key = tiny.key)
""".stripMargin
).queryExecution.executedPlan
val numExchanges = collect(plan) {
case exchange: ShuffleExchangeExec => exchange
}.length
assert(numExchanges === 5)
}
{
val plan = sql(
"""
|SELECT *
|FROM
| normal JOIN small ON (normal.key = small.key)
| JOIN tiny ON (normal.key = tiny.key)
""".stripMargin
).queryExecution.executedPlan
// This second query joins on different keys:
val numExchanges = collect(plan) {
case exchange: ShuffleExchangeExec => exchange
}.length
assert(numExchanges === 5)
}
}
}
}
test("collapse adjacent repartitions") {
val doubleRepartitioned = testData.repartition(10).repartition(20).coalesce(5)
def countRepartitions(plan: LogicalPlan): Int = plan.collect { case r: Repartition => r }.length
assert(countRepartitions(doubleRepartitioned.queryExecution.analyzed) === 3)
assert(countRepartitions(doubleRepartitioned.queryExecution.optimizedPlan) === 2)
doubleRepartitioned.queryExecution.optimizedPlan match {
case Repartition (numPartitions, shuffle, Repartition(_, shuffleChild, _)) =>
assert(numPartitions === 5)
assert(shuffle === false)
assert(shuffleChild)
}
}
///////////////////////////////////////////////////////////////////////////
// Unit tests of EnsureRequirements for Exchange
///////////////////////////////////////////////////////////////////////////
// When it comes to testing whether EnsureRequirements properly ensures distribution requirements,
// there two dimensions that need to be considered: are the child partitionings compatible and
// do they satisfy the distribution requirements? As a result, we need at least four test cases.
private def assertDistributionRequirementsAreSatisfied(outputPlan: SparkPlan): Unit = {
if (outputPlan.children.length > 1) {
val childPartitionings = outputPlan.children.zip(outputPlan.requiredChildDistribution)
.filter {
case (_, UnspecifiedDistribution) => false
case (_, _: BroadcastDistribution) => false
case _ => true
}.map(_._1.outputPartitioning)
if (childPartitionings.map(_.numPartitions).toSet.size > 1) {
fail(s"Partitionings doesn't have same number of partitions: $childPartitionings")
}
}
outputPlan.children.zip(outputPlan.requiredChildDistribution).foreach {
case (child, requiredDist) =>
assert(child.outputPartitioning.satisfies(requiredDist),
s"$child output partitioning does not satisfy $requiredDist:\n$outputPlan")
}
}
test("EnsureRequirements with child partitionings with different numbers of output partitions") {
val clustering = Literal(1) :: Nil
val distribution = ClusteredDistribution(clustering)
val inputPlan = DummySparkPlan(
children = Seq(
DummySparkPlan(outputPartitioning = HashPartitioning(clustering, 1)),
DummySparkPlan(outputPartitioning = HashPartitioning(clustering, 2))
),
requiredChildDistribution = Seq(distribution, distribution),
requiredChildOrdering = Seq(Seq.empty, Seq.empty)
)
val outputPlan = EnsureRequirements.apply(inputPlan)
assertDistributionRequirementsAreSatisfied(outputPlan)
}
test("EnsureRequirements with compatible child partitionings that do not satisfy distribution") {
val distribution = ClusteredDistribution(Literal(1) :: Nil)
// The left and right inputs have compatible partitionings but they do not satisfy the
// distribution because they are clustered on different columns. Thus, we need to shuffle.
val childPartitioning = HashPartitioning(Literal(2) :: Nil, 1)
assert(!childPartitioning.satisfies(distribution))
val inputPlan = DummySparkPlan(
children = Seq(
DummySparkPlan(outputPartitioning = childPartitioning),
DummySparkPlan(outputPartitioning = childPartitioning)
),
requiredChildDistribution = Seq(distribution, distribution),
requiredChildOrdering = Seq(Seq.empty, Seq.empty)
)
val outputPlan = EnsureRequirements.apply(inputPlan)
assertDistributionRequirementsAreSatisfied(outputPlan)
if (outputPlan.collect { case e: ShuffleExchangeExec => true }.isEmpty) {
fail(s"Exchange should have been added:\n$outputPlan")
}
}
test("EnsureRequirements with compatible child partitionings that satisfy distribution") {
// In this case, all requirements are satisfied and no exchange should be added.
val distribution = ClusteredDistribution(Literal(1) :: Nil)
val childPartitioning = HashPartitioning(Literal(1) :: Nil, 5)
assert(childPartitioning.satisfies(distribution))
val inputPlan = DummySparkPlan(
children = Seq(
DummySparkPlan(outputPartitioning = childPartitioning),
DummySparkPlan(outputPartitioning = childPartitioning)
),
requiredChildDistribution = Seq(distribution, distribution),
requiredChildOrdering = Seq(Seq.empty, Seq.empty)
)
val outputPlan = EnsureRequirements.apply(inputPlan)
assertDistributionRequirementsAreSatisfied(outputPlan)
if (outputPlan.collect { case e: ShuffleExchangeExec => true }.nonEmpty) {
fail(s"Exchange should not have been added:\n$outputPlan")
}
}
// This is a regression test for SPARK-9703
test("EnsureRequirements should not repartition if only ordering requirement is unsatisfied") {
// Consider an operator that imposes both output distribution and ordering requirements on its
// children, such as sort merge join. If the distribution requirements are satisfied but
// the output ordering requirements are unsatisfied, then the planner should only add sorts and
// should not need to add additional shuffles / exchanges.
val outputOrdering = Seq(SortOrder(Literal(1), Ascending))
val distribution = ClusteredDistribution(Literal(1) :: Nil)
val inputPlan = DummySparkPlan(
children = Seq(
DummySparkPlan(outputPartitioning = SinglePartition),
DummySparkPlan(outputPartitioning = SinglePartition)
),
requiredChildDistribution = Seq(distribution, distribution),
requiredChildOrdering = Seq(outputOrdering, outputOrdering)
)
val outputPlan = EnsureRequirements.apply(inputPlan)
assertDistributionRequirementsAreSatisfied(outputPlan)
if (outputPlan.collect { case e: ShuffleExchangeExec => true }.nonEmpty) {
fail(s"No Exchanges should have been added:\n$outputPlan")
}
}
test("EnsureRequirements eliminates Exchange if child has same partitioning") {
val distribution = ClusteredDistribution(Literal(1) :: Nil)
val partitioning = HashPartitioning(Literal(1) :: Nil, 5)
assert(partitioning.satisfies(distribution))
val inputPlan = ShuffleExchangeExec(
partitioning,
DummySparkPlan(outputPartitioning = partitioning))
val outputPlan = EnsureRequirements.apply(inputPlan)
assertDistributionRequirementsAreSatisfied(outputPlan)
if (outputPlan.collect { case e: ShuffleExchangeExec => true }.size == 2) {
fail(s"Topmost Exchange should have been eliminated:\n$outputPlan")
}
}
test("EnsureRequirements does not eliminate Exchange with different partitioning") {
val distribution = ClusteredDistribution(Literal(1) :: Nil)
val partitioning = HashPartitioning(Literal(2) :: Nil, 5)
assert(!partitioning.satisfies(distribution))
val inputPlan = ShuffleExchangeExec(
partitioning,
DummySparkPlan(outputPartitioning = partitioning))
val outputPlan = EnsureRequirements.apply(inputPlan)
assertDistributionRequirementsAreSatisfied(outputPlan)
if (outputPlan.collect { case e: ShuffleExchangeExec => true }.size == 1) {
fail(s"Topmost Exchange should not have been eliminated:\n$outputPlan")
}
}
test("EnsureRequirements should respect ClusteredDistribution's num partitioning") {
val distribution = ClusteredDistribution(Literal(1) :: Nil, Some(13))
// Number of partitions differ
val finalPartitioning = HashPartitioning(Literal(1) :: Nil, 13)
val childPartitioning = HashPartitioning(Literal(1) :: Nil, 5)
assert(!childPartitioning.satisfies(distribution))
val inputPlan = DummySparkPlan(
children = DummySparkPlan(outputPartitioning = childPartitioning) :: Nil,
requiredChildDistribution = Seq(distribution),
requiredChildOrdering = Seq(Seq.empty))
val outputPlan = EnsureRequirements.apply(inputPlan)
val shuffle = outputPlan.collect { case e: ShuffleExchangeExec => e }
assert(shuffle.size === 1)
assert(shuffle.head.outputPartitioning === finalPartitioning)
}
test("Reuse exchanges") {
val distribution = ClusteredDistribution(Literal(1) :: Nil)
val finalPartitioning = HashPartitioning(Literal(1) :: Nil, 5)
val childPartitioning = HashPartitioning(Literal(2) :: Nil, 5)
assert(!childPartitioning.satisfies(distribution))
val shuffle = ShuffleExchangeExec(finalPartitioning,
DummySparkPlan(
children = DummySparkPlan(outputPartitioning = childPartitioning) :: Nil,
requiredChildDistribution = Seq(distribution),
requiredChildOrdering = Seq(Seq.empty)))
val inputPlan = SortMergeJoinExec(
Literal(1) :: Nil,
Literal(1) :: Nil,
Inner,
None,
shuffle,
shuffle)
val outputPlan = ReuseExchange.apply(inputPlan)
if (outputPlan.collect { case e: ReusedExchangeExec => true }.size != 1) {
fail(s"Should re-use the shuffle:\n$outputPlan")
}
if (outputPlan.collect { case e: ShuffleExchangeExec => true }.size != 1) {
fail(s"Should have only one shuffle:\n$outputPlan")
}
// nested exchanges
val inputPlan2 = SortMergeJoinExec(
Literal(1) :: Nil,
Literal(1) :: Nil,
Inner,
None,
ShuffleExchangeExec(finalPartitioning, inputPlan),
ShuffleExchangeExec(finalPartitioning, inputPlan))
val outputPlan2 = ReuseExchange.apply(inputPlan2)
if (outputPlan2.collect { case e: ReusedExchangeExec => true }.size != 2) {
fail(s"Should re-use the two shuffles:\n$outputPlan2")
}
if (outputPlan2.collect { case e: ShuffleExchangeExec => true }.size != 2) {
fail(s"Should have only two shuffles:\n$outputPlan")
}
}
///////////////////////////////////////////////////////////////////////////
// Unit tests of EnsureRequirements for Sort
///////////////////////////////////////////////////////////////////////////
private val exprA = Literal(1)
private val exprB = Literal(2)
private val exprC = Literal(3)
private val orderingA = SortOrder(exprA, Ascending)
private val orderingB = SortOrder(exprB, Ascending)
private val orderingC = SortOrder(exprC, Ascending)
private val planA = DummySparkPlan(outputOrdering = Seq(orderingA),
outputPartitioning = HashPartitioning(exprA :: Nil, 5))
private val planB = DummySparkPlan(outputOrdering = Seq(orderingB),
outputPartitioning = HashPartitioning(exprB :: Nil, 5))
private val planC = DummySparkPlan(outputOrdering = Seq(orderingC),
outputPartitioning = HashPartitioning(exprC :: Nil, 5))
assert(orderingA != orderingB && orderingA != orderingC && orderingB != orderingC)
private def assertSortRequirementsAreSatisfied(
childPlan: SparkPlan,
requiredOrdering: Seq[SortOrder],
shouldHaveSort: Boolean): Unit = {
val inputPlan = DummySparkPlan(
children = childPlan :: Nil,
requiredChildOrdering = Seq(requiredOrdering),
requiredChildDistribution = Seq(UnspecifiedDistribution)
)
val outputPlan = EnsureRequirements.apply(inputPlan)
assertDistributionRequirementsAreSatisfied(outputPlan)
if (shouldHaveSort) {
if (outputPlan.collect { case s: SortExec => true }.isEmpty) {
fail(s"Sort should have been added:\n$outputPlan")
}
} else {
if (outputPlan.collect { case s: SortExec => true }.nonEmpty) {
fail(s"No sorts should have been added:\n$outputPlan")
}
}
}
test("EnsureRequirements skips sort when either side of join keys is required after inner SMJ") {
Seq(Inner, Cross).foreach { joinType =>
val innerSmj = SortMergeJoinExec(exprA :: Nil, exprB :: Nil, joinType, None, planA, planB)
// Both left and right keys should be sorted after the SMJ.
Seq(orderingA, orderingB).foreach { ordering =>
assertSortRequirementsAreSatisfied(
childPlan = innerSmj,
requiredOrdering = Seq(ordering),
shouldHaveSort = false)
}
}
}
test("EnsureRequirements skips sort when key order of a parent SMJ is propagated from its " +
"child SMJ") {
Seq(Inner, Cross).foreach { joinType =>
val childSmj = SortMergeJoinExec(exprA :: Nil, exprB :: Nil, joinType, None, planA, planB)
val parentSmj = SortMergeJoinExec(exprB :: Nil, exprC :: Nil, joinType, None, childSmj, planC)
// After the second SMJ, exprA, exprB and exprC should all be sorted.
Seq(orderingA, orderingB, orderingC).foreach { ordering =>
assertSortRequirementsAreSatisfied(
childPlan = parentSmj,
requiredOrdering = Seq(ordering),
shouldHaveSort = false)
}
}
}
test("EnsureRequirements for sort operator after left outer sort merge join") {
// Only left key is sorted after left outer SMJ (thus doesn't need a sort).
val leftSmj = SortMergeJoinExec(exprA :: Nil, exprB :: Nil, LeftOuter, None, planA, planB)
Seq((orderingA, false), (orderingB, true)).foreach { case (ordering, needSort) =>
assertSortRequirementsAreSatisfied(
childPlan = leftSmj,
requiredOrdering = Seq(ordering),
shouldHaveSort = needSort)
}
}
test("EnsureRequirements for sort operator after right outer sort merge join") {
// Only right key is sorted after right outer SMJ (thus doesn't need a sort).
val rightSmj = SortMergeJoinExec(exprA :: Nil, exprB :: Nil, RightOuter, None, planA, planB)
Seq((orderingA, true), (orderingB, false)).foreach { case (ordering, needSort) =>
assertSortRequirementsAreSatisfied(
childPlan = rightSmj,
requiredOrdering = Seq(ordering),
shouldHaveSort = needSort)
}
}
test("EnsureRequirements adds sort after full outer sort merge join") {
// Neither keys is sorted after full outer SMJ, so they both need sorts.
val fullSmj = SortMergeJoinExec(exprA :: Nil, exprB :: Nil, FullOuter, None, planA, planB)
Seq(orderingA, orderingB).foreach { ordering =>
assertSortRequirementsAreSatisfied(
childPlan = fullSmj,
requiredOrdering = Seq(ordering),
shouldHaveSort = true)
}
}
test("EnsureRequirements adds sort when there is no existing ordering") {
assertSortRequirementsAreSatisfied(
childPlan = DummySparkPlan(outputOrdering = Seq.empty),
requiredOrdering = Seq(orderingB),
shouldHaveSort = true)
}
test("EnsureRequirements skips sort when required ordering is prefix of existing ordering") {
assertSortRequirementsAreSatisfied(
childPlan = DummySparkPlan(outputOrdering = Seq(orderingA, orderingB)),
requiredOrdering = Seq(orderingA),
shouldHaveSort = false)
}
test("EnsureRequirements skips sort when required ordering is semantically equal to " +
"existing ordering") {
val exprId: ExprId = NamedExpression.newExprId
val attribute1 =
AttributeReference(
name = "col1",
dataType = LongType,
nullable = false
) (exprId = exprId,
qualifier = Seq("col1_qualifier")
)
val attribute2 =
AttributeReference(
name = "col1",
dataType = LongType,
nullable = false
) (exprId = exprId)
val orderingA1 = SortOrder(attribute1, Ascending)
val orderingA2 = SortOrder(attribute2, Ascending)
assert(orderingA1 != orderingA2, s"$orderingA1 should NOT equal to $orderingA2")
assert(orderingA1.semanticEquals(orderingA2),
s"$orderingA1 should be semantically equal to $orderingA2")
assertSortRequirementsAreSatisfied(
childPlan = DummySparkPlan(outputOrdering = Seq(orderingA1)),
requiredOrdering = Seq(orderingA2),
shouldHaveSort = false)
}
// This is a regression test for SPARK-11135
test("EnsureRequirements adds sort when required ordering isn't a prefix of existing ordering") {
assertSortRequirementsAreSatisfied(
childPlan = DummySparkPlan(outputOrdering = Seq(orderingA)),
requiredOrdering = Seq(orderingA, orderingB),
shouldHaveSort = true)
}
test("SPARK-24242: RangeExec should have correct output ordering and partitioning") {
val df = spark.range(10)
val rangeExec = df.queryExecution.executedPlan.collect {
case r: RangeExec => r
}
val range = df.queryExecution.optimizedPlan.collect {
case r: Range => r
}
assert(rangeExec.head.outputOrdering == range.head.outputOrdering)
assert(rangeExec.head.outputPartitioning ==
RangePartitioning(rangeExec.head.outputOrdering, df.rdd.getNumPartitions))
val rangeInOnePartition = spark.range(1, 10, 1, 1)
val rangeExecInOnePartition = rangeInOnePartition.queryExecution.executedPlan.collect {
case r: RangeExec => r
}
assert(rangeExecInOnePartition.head.outputPartitioning == SinglePartition)
val rangeInZeroPartition = spark.range(-10, -9, -20, 1)
val rangeExecInZeroPartition = rangeInZeroPartition.queryExecution.executedPlan.collect {
case r: RangeExec => r
}
assert(rangeExecInZeroPartition.head.outputPartitioning == UnknownPartitioning(0))
}
test("SPARK-24495: EnsureRequirements can return wrong plan when reusing the same key in join") {
val plan1 = DummySparkPlan(outputOrdering = Seq(orderingA),
outputPartitioning = HashPartitioning(exprA :: exprA :: Nil, 5))
val plan2 = DummySparkPlan(outputOrdering = Seq(orderingB),
outputPartitioning = HashPartitioning(exprB :: Nil, 5))
val smjExec = SortMergeJoinExec(
exprA :: exprA :: Nil, exprB :: exprC :: Nil, Inner, None, plan1, plan2)
val outputPlan = EnsureRequirements.apply(smjExec)
outputPlan match {
case SortMergeJoinExec(leftKeys, rightKeys, _, _, _, _, _) =>
assert(leftKeys == Seq(exprA, exprA))
assert(rightKeys == Seq(exprB, exprC))
case _ => fail()
}
}
test("SPARK-27485: EnsureRequirements.reorder should handle duplicate expressions") {
val plan1 = DummySparkPlan(
outputPartitioning = HashPartitioning(exprA :: exprB :: exprA :: Nil, 5))
val plan2 = DummySparkPlan()
val smjExec = SortMergeJoinExec(
leftKeys = exprA :: exprB :: exprB :: Nil,
rightKeys = exprA :: exprC :: exprC :: Nil,
joinType = Inner,
condition = None,
left = plan1,
right = plan2)
val outputPlan = EnsureRequirements.apply(smjExec)
outputPlan match {
case SortMergeJoinExec(leftKeys, rightKeys, _, _,
SortExec(_, _,
ShuffleExchangeExec(HashPartitioning(leftPartitioningExpressions, _), _, _), _),
SortExec(_, _,
ShuffleExchangeExec(HashPartitioning(rightPartitioningExpressions, _),
_, _), _), _) =>
assert(leftKeys === smjExec.leftKeys)
assert(rightKeys === smjExec.rightKeys)
assert(leftKeys === leftPartitioningExpressions)
assert(rightKeys === rightPartitioningExpressions)
case _ => fail(outputPlan.toString)
}
}
test("SPARK-24500: create union with stream of children") {
val df = Union(Stream(
Range(1, 1, 1, 1),
Range(1, 2, 1, 1)))
df.queryExecution.executedPlan.execute()
}
test("SPARK-25278: physical nodes should be different instances for same logical nodes") {
val range = Range(1, 1, 1, 1)
val df = Union(range, range)
val ranges = df.queryExecution.optimizedPlan.collect {
case r: Range => r
}
assert(ranges.length == 2)
val execRanges = df.queryExecution.sparkPlan.collect {
case r: RangeExec => r
}
assert(execRanges.length == 2)
// Ensure the two RangeExec instances are different instances
assert(!execRanges.head.eq(execRanges.last))
}
test("SPARK-24556: always rewrite output partitioning in ReusedExchangeExec " +
"and InMemoryTableScanExec",
DisableAdaptiveExecution("Reuse is dynamic in AQE")) {
def checkOutputPartitioningRewrite(
plans: Seq[SparkPlan],
expectedPartitioningClass: Class[_]): Unit = {
assert(plans.size == 1)
val plan = plans.head
val partitioning = plan.outputPartitioning
assert(partitioning.getClass == expectedPartitioningClass)
val partitionedAttrs = partitioning.asInstanceOf[Expression].references
assert(partitionedAttrs.subsetOf(plan.outputSet))
}
def checkReusedExchangeOutputPartitioningRewrite(
df: DataFrame,
expectedPartitioningClass: Class[_]): Unit = {
val reusedExchange = collect(df.queryExecution.executedPlan) {
case r: ReusedExchangeExec => r
}
checkOutputPartitioningRewrite(reusedExchange, expectedPartitioningClass)
}
def checkInMemoryTableScanOutputPartitioningRewrite(
df: DataFrame,
expectedPartitioningClass: Class[_]): Unit = {
val inMemoryScan = collect(df.queryExecution.executedPlan) {
case m: InMemoryTableScanExec => m
}
checkOutputPartitioningRewrite(inMemoryScan, expectedPartitioningClass)
}
// when enable AQE, the reusedExchange is inserted when executed.
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
// ReusedExchange is HashPartitioning
val df1 = Seq(1 -> "a").toDF("i", "j").repartition($"i")
val df2 = Seq(1 -> "a").toDF("i", "j").repartition($"i")
checkReusedExchangeOutputPartitioningRewrite(df1.union(df2), classOf[HashPartitioning])
// ReusedExchange is RangePartitioning
val df3 = Seq(1 -> "a").toDF("i", "j").orderBy($"i")
val df4 = Seq(1 -> "a").toDF("i", "j").orderBy($"i")
checkReusedExchangeOutputPartitioningRewrite(df3.union(df4), classOf[RangePartitioning])
// InMemoryTableScan is HashPartitioning
Seq(1 -> "a").toDF("i", "j").repartition($"i").persist()
checkInMemoryTableScanOutputPartitioningRewrite(
Seq(1 -> "a").toDF("i", "j").repartition($"i"), classOf[HashPartitioning])
// InMemoryTableScan is RangePartitioning
spark.range(1, 100, 1, 10).toDF().persist()
checkInMemoryTableScanOutputPartitioningRewrite(
spark.range(1, 100, 1, 10).toDF(), classOf[RangePartitioning])
}
// InMemoryTableScan is PartitioningCollection
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
Seq(1 -> "a").toDF("i", "j").join(Seq(1 -> "a").toDF("m", "n"), $"i" === $"m").persist()
checkInMemoryTableScanOutputPartitioningRewrite(
Seq(1 -> "a").toDF("i", "j").join(Seq(1 -> "a").toDF("m", "n"), $"i" === $"m"),
classOf[PartitioningCollection])
}
}
test("SPARK-26812: wrong nullability for complex datatypes in union") {
def testUnionOutputType(input1: DataType, input2: DataType, output: DataType): Unit = {
val query = Union(
LocalRelation(StructField("a", input1)), LocalRelation(StructField("a", input2)))
assert(query.output.head.dataType == output)
}
// Map
testUnionOutputType(
MapType(StringType, StringType, valueContainsNull = false),
MapType(StringType, StringType, valueContainsNull = true),
MapType(StringType, StringType, valueContainsNull = true))
testUnionOutputType(
MapType(StringType, StringType, valueContainsNull = true),
MapType(StringType, StringType, valueContainsNull = false),
MapType(StringType, StringType, valueContainsNull = true))
testUnionOutputType(
MapType(StringType, StringType, valueContainsNull = false),
MapType(StringType, StringType, valueContainsNull = false),
MapType(StringType, StringType, valueContainsNull = false))
// Array
testUnionOutputType(
ArrayType(StringType, containsNull = false),
ArrayType(StringType, containsNull = true),
ArrayType(StringType, containsNull = true))
testUnionOutputType(
ArrayType(StringType, containsNull = true),
ArrayType(StringType, containsNull = false),
ArrayType(StringType, containsNull = true))
testUnionOutputType(
ArrayType(StringType, containsNull = false),
ArrayType(StringType, containsNull = false),
ArrayType(StringType, containsNull = false))
// Struct
testUnionOutputType(
StructType(Seq(
StructField("f1", StringType, nullable = false),
StructField("f2", StringType, nullable = true),
StructField("f3", StringType, nullable = false))),
StructType(Seq(
StructField("f1", StringType, nullable = true),
StructField("f2", StringType, nullable = false),
StructField("f3", StringType, nullable = false))),
StructType(Seq(
StructField("f1", StringType, nullable = true),
StructField("f2", StringType, nullable = true),
StructField("f3", StringType, nullable = false))))
}
test("Do not analyze subqueries twice") {
// Analyzing the subquery twice will result in stacked
// CheckOverflow & PromotePrecision expressions.
val df = sql(
"""
|SELECT id,
| (SELECT 1.3000000 * AVG(CAST(id AS DECIMAL(10, 3))) FROM range(13)) AS ref
|FROM range(5)
|""".stripMargin)
val Seq(subquery) = stripAQEPlan(df.queryExecution.executedPlan).subqueriesAll
subquery.foreach { node =>
node.expressions.foreach { expression =>
expression.foreach {
case PromotePrecision(_: PromotePrecision) =>
fail(s"$expression contains stacked PromotePrecision expressions.")
case CheckOverflow(_: CheckOverflow, _, _) =>
fail(s"$expression contains stacked CheckOverflow expressions.")
case _ => // Ok
}
}
}
}
test("aliases in the project should not introduce extra shuffle") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
withTempView("df1", "df2") {
spark.range(10).selectExpr("id AS key", "0").repartition($"key").createTempView("df1")
spark.range(20).selectExpr("id AS key", "0").repartition($"key").createTempView("df2")
val planned = sql(
"""
|SELECT * FROM
| (SELECT key AS k from df1) t1
|INNER JOIN
| (SELECT key AS k from df2) t2
|ON t1.k = t2.k
""".stripMargin).queryExecution.executedPlan
val exchanges = collect(planned) { case s: ShuffleExchangeExec => s }
assert(exchanges.size == 2)
}
}
}
test("SPARK-33399: aliases should be handled properly in PartitioningCollection output" +
" partitioning") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
withTempView("t1", "t2", "t3") {
spark.range(10).repartition($"id").createTempView("t1")
spark.range(20).repartition($"id").createTempView("t2")
spark.range(30).repartition($"id").createTempView("t3")
val planned = sql(
"""
|SELECT t3.id as t3id
|FROM (
| SELECT t1.id as t1id, t2.id as t2id
| FROM t1, t2
| WHERE t1.id = t2.id
|) t12, t3
|WHERE t1id = t3.id
""".stripMargin).queryExecution.executedPlan
val exchanges = collect(planned) { case s: ShuffleExchangeExec => s }
assert(exchanges.size == 3)
val projects = collect(planned) { case p: ProjectExec => p }
assert(projects.exists(_.outputPartitioning match {
case HashPartitioning(Seq(k1: AttributeReference), _) if k1.name == "t1id" =>
true
case _ =>
false
}))
}
}
}
test("SPARK-33399: aliases should be handled properly in HashPartitioning") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
withTempView("t1", "t2", "t3") {
spark.range(10).repartition($"id").createTempView("t1")
spark.range(20).repartition($"id").createTempView("t2")
spark.range(30).repartition($"id").createTempView("t3")
val planned = sql(
"""
|SELECT t1id, t3.id as t3id
|FROM (
| SELECT t1.id as t1id
| FROM t1 LEFT SEMI JOIN t2
| ON t1.id = t2.id
|) t12 INNER JOIN t3
|WHERE t1id = t3.id
""".stripMargin).queryExecution.executedPlan
val exchanges = collect(planned) { case s: ShuffleExchangeExec => s }
assert(exchanges.size == 3)
val projects = collect(planned) { case p: ProjectExec => p }
assert(projects.exists(_.outputPartitioning match {
case HashPartitioning(Seq(a: AttributeReference), _) => a.name == "t1id"
case _ => false
}))
}
}
}
test("SPARK-33399: alias handling should happen properly for RangePartitioning") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
val df = spark.range(1, 100)
.select(col("id").as("id1")).groupBy("id1").count()
// Plan for this will be Range -> ProjectWithAlias -> HashAggregate -> HashAggregate
// if Project normalizes alias in its Range outputPartitioning, then no Exchange should come
// in between HashAggregates
val planned = df.queryExecution.executedPlan
val exchanges = collect(planned) { case s: ShuffleExchangeExec => s }
assert(exchanges.isEmpty)
val projects = collect(planned) { case p: ProjectExec => p }
assert(projects.exists(_.outputPartitioning match {
case RangePartitioning(Seq(SortOrder(ar: AttributeReference, _, _, _)), _) =>
ar.name == "id1"
case _ => false
}))
}
}
test("SPARK-33399: aliased should be handled properly " +
"for partitioning and sortorder involving complex expressions") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
withTempView("t1", "t2", "t3") {
spark.range(10).select(col("id").as("id1")).createTempView("t1")
spark.range(20).select(col("id").as("id2")).createTempView("t2")
spark.range(30).select(col("id").as("id3")).createTempView("t3")
val planned = sql(
"""
|SELECT t3.id3 as t3id
|FROM (
| SELECT t1.id1 as t1id, t2.id2 as t2id
| FROM t1, t2
| WHERE t1.id1 * 10 = t2.id2 * 10
|) t12, t3
|WHERE t1id * 10 = t3.id3 * 10
""".stripMargin).queryExecution.executedPlan
val sortNodes = collect(planned) { case s: SortExec => s }
assert(sortNodes.size == 3)
val exchangeNodes = collect(planned) { case e: ShuffleExchangeExec => e }
assert(exchangeNodes.size == 3)
val projects = collect(planned) { case p: ProjectExec => p }
assert(projects.exists(_.outputPartitioning match {
case HashPartitioning(Seq(Multiply(ar1: AttributeReference, _, _)), _) =>
ar1.name == "t1id"
case _ =>
false
}))
}
}
}
test("SPARK-33399: alias handling should happen properly for SinglePartition") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
val df = spark.range(1, 100, 1, 1)
.select(col("id").as("id1")).groupBy("id1").count()
val planned = df.queryExecution.executedPlan
val exchanges = collect(planned) { case s: ShuffleExchangeExec => s }
assert(exchanges.isEmpty)
val projects = collect(planned) { case p: ProjectExec => p }
assert(projects.exists(_.outputPartitioning match {
case SinglePartition => true
case _ => false
}))
}
}
test("SPARK-33399: No extra exchanges in case of" +
" [Inner Join -> Project with aliases -> HashAggregate]") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
withTempView("t1", "t2") {
spark.range(10).repartition($"id").createTempView("t1")
spark.range(20).repartition($"id").createTempView("t2")
val planned = sql(
"""
|SELECT t1id, t2id
|FROM (
| SELECT t1.id as t1id, t2.id as t2id
| FROM t1 INNER JOIN t2
| WHERE t1.id = t2.id
|) t12
|GROUP BY t1id, t2id
""".stripMargin).queryExecution.executedPlan
val exchanges = collect(planned) { case s: ShuffleExchangeExec => s }
assert(exchanges.size == 2)
val projects = collect(planned) { case p: ProjectExec => p }
assert(projects.exists(_.outputPartitioning match {
case PartitioningCollection(Seq(HashPartitioning(Seq(k1: AttributeReference), _),
HashPartitioning(Seq(k2: AttributeReference), _))) =>
k1.name == "t1id" && k2.name == "t2id"
case _ => false
}))
}
}
}
test("SPARK-33400: Normalization of sortOrder should take care of sameOrderExprs") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
withTempView("t1", "t2", "t3") {
spark.range(10).repartition($"id").createTempView("t1")
spark.range(20).repartition($"id").createTempView("t2")
spark.range(30).repartition($"id").createTempView("t3")
val planned = sql(
"""
|SELECT t2id, t3.id as t3id
|FROM (
| SELECT t1.id as t1id, t2.id as t2id
| FROM t1, t2
| WHERE t1.id = t2.id
|) t12, t3
|WHERE t2id = t3.id
""".stripMargin).queryExecution.executedPlan
val sortNodes = collect(planned) { case s: SortExec => s }
assert(sortNodes.size == 3)
val projects = collect(planned) { case p: ProjectExec => p }
assert(projects.exists(_.outputOrdering match {
case Seq(SortOrder(_, Ascending, NullsFirst, sameOrderExprs)) =>
sameOrderExprs.size == 1 && sameOrderExprs.head.isInstanceOf[AttributeReference] &&
sameOrderExprs.head.asInstanceOf[AttributeReference].name == "t2id"
case _ => false
}))
}
}
}
test("sort order doesn't have repeated expressions") {
withSQLConf(
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "false") {
withTempView("t1", "t2") {
spark.range(10).repartition($"id").createTempView("t1")
spark.range(20).repartition($"id").createTempView("t2")
val planned = sql(
"""
| SELECT t12.id, t1.id
| FROM (SELECT t1.id FROM t1, t2 WHERE t1.id * 2 = t2.id) t12, t1
| where 2 * t12.id = t1.id
""".stripMargin).queryExecution.executedPlan
// t12 is already sorted on `t1.id * 2`. and we need to sort it on `2 * t12.id`
// for 2nd join. So sorting on t12 can be avoided
val sortNodes = planned.collect { case s: SortExec => s }
assert(sortNodes.size == 3)
val outputOrdering = planned.outputOrdering
assert(outputOrdering.size == 1)
// Sort order should have 3 childrens, not 4. This is because t1.id*2 and 2*t1.id are same
assert(outputOrdering.head.children.size == 3)
assert(outputOrdering.head.children.count(_.isInstanceOf[AttributeReference]) == 2)
assert(outputOrdering.head.children.count(_.isInstanceOf[Multiply]) == 1)
}
}
}
test("aliases to expressions should not be replaced") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
withTempView("df1", "df2") {
spark.range(10).selectExpr("id AS key", "0").repartition($"key").createTempView("df1")
spark.range(20).selectExpr("id AS key", "0").repartition($"key").createTempView("df2")
val planned = sql(
"""
|SELECT * FROM
| (SELECT key + 1 AS k1 from df1) t1
|INNER JOIN
| (SELECT key + 1 AS k2 from df2) t2
|ON t1.k1 = t2.k2
|""".stripMargin).queryExecution.executedPlan
val exchanges = collect(planned) { case s: ShuffleExchangeExec => s }
// Make sure aliases to an expression (key + 1) are not replaced.
Seq("k1", "k2").foreach { alias =>
assert(exchanges.exists(_.outputPartitioning match {
case HashPartitioning(Seq(a: AttributeReference), _) => a.name == alias
case _ => false
}))
}
}
}
}
test("aliases in the aggregate expressions should not introduce extra shuffle") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
val t1 = spark.range(10).selectExpr("floor(id/4) as k1")
val t2 = spark.range(20).selectExpr("floor(id/4) as k2")
val agg1 = t1.groupBy("k1").agg(count(lit("1")).as("cnt1"))
val agg2 = t2.groupBy("k2").agg(count(lit("1")).as("cnt2")).withColumnRenamed("k2", "k3")
val planned = agg1.join(agg2, $"k1" === $"k3").queryExecution.executedPlan
assert(collect(planned) { case h: HashAggregateExec => h }.nonEmpty)
val exchanges = collect(planned) { case s: ShuffleExchangeExec => s }
assert(exchanges.size == 2)
}
}
test("aliases in the object hash/sort aggregate expressions should not introduce extra shuffle") {
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
Seq(true, false).foreach { useObjectHashAgg =>
withSQLConf(SQLConf.USE_OBJECT_HASH_AGG.key -> useObjectHashAgg.toString) {
val t1 = spark.range(10).selectExpr("floor(id/4) as k1")
val t2 = spark.range(20).selectExpr("floor(id/4) as k2")
val agg1 = t1.groupBy("k1").agg(collect_list("k1"))
val agg2 = t2.groupBy("k2").agg(collect_list("k2")).withColumnRenamed("k2", "k3")
val planned = agg1.join(agg2, $"k1" === $"k3").queryExecution.executedPlan
if (useObjectHashAgg) {
assert(collect(planned) { case o: ObjectHashAggregateExec => o }.nonEmpty)
} else {
assert(collect(planned) { case s: SortAggregateExec => s }.nonEmpty)
}
val exchanges = collect(planned) { case s: ShuffleExchangeExec => s }
assert(exchanges.size == 2)
}
}
}
}
test("aliases in the sort aggregate expressions should not introduce extra sort") {
withSQLConf(
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.USE_OBJECT_HASH_AGG.key -> "false") {
val t1 = spark.range(10).selectExpr("floor(id/4) as k1")
val t2 = spark.range(20).selectExpr("floor(id/4) as k2")
val agg1 = t1.groupBy("k1").agg(collect_list("k1")).withColumnRenamed("k1", "k3")
val agg2 = t2.groupBy("k2").agg(collect_list("k2"))
val planned = agg1.join(agg2, $"k3" === $"k2").queryExecution.executedPlan
assert(collect(planned) { case s: SortAggregateExec => s }.nonEmpty)
// We expect two SortExec nodes on each side of join.
val sorts = collect(planned) { case s: SortExec => s }
assert(sorts.size == 4)
}
}
testWithWholeStageCodegenOnAndOff("Change the number of partitions to zero " +
"when a range is empty") { _ =>
val range = spark.range(1, 1, 1, 1000)
val numPartitions = range.rdd.getNumPartitions
assert(numPartitions == 0)
}
test("SPARK-33758: Prune unnecessary output partitioning") {
withSQLConf(
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "false") {
withTempView("t1", "t2") {
spark.range(10).repartition($"id").createTempView("t1")
spark.range(20).repartition($"id").createTempView("t2")
val planned = sql(
"""
| SELECT t1.id as t1id, t2.id as t2id
| FROM t1, t2
| WHERE t1.id = t2.id
""".stripMargin).queryExecution.executedPlan
assert(planned.outputPartitioning match {
case PartitioningCollection(Seq(HashPartitioning(Seq(k1: AttributeReference), _),
HashPartitioning(Seq(k2: AttributeReference), _))) =>
k1.name == "t1id" && k2.name == "t2id"
})
val planned2 = sql(
"""
| SELECT t1.id as t1id
| FROM t1, t2
| WHERE t1.id = t2.id
""".stripMargin).queryExecution.executedPlan
assert(planned2.outputPartitioning match {
case HashPartitioning(Seq(k1: AttributeReference), _) if k1.name == "t1id" =>
true
})
}
}
}
test("SPARK-34919: Change partitioning to SinglePartition if partition number is 1") {
def checkSinglePartitioning(df: DataFrame): Unit = {
assert(
df.queryExecution.analyzed.collect {
case r: RepartitionOperation => r
}.size == 1)
assert(
collect(df.queryExecution.executedPlan) {
case s: ShuffleExchangeExec if s.outputPartitioning == SinglePartition => s
}.size == 1)
}
checkSinglePartitioning(sql("SELECT /*+ REPARTITION(1) */ * FROM VALUES(1),(2),(3) AS t(c)"))
checkSinglePartitioning(sql("SELECT /*+ REPARTITION(1, c) */ * FROM VALUES(1),(2),(3) AS t(c)"))
}
}
// Used for unit-testing EnsureRequirements
private case class DummySparkPlan(
override val children: Seq[SparkPlan] = Nil,
override val outputOrdering: Seq[SortOrder] = Nil,
override val outputPartitioning: Partitioning = UnknownPartitioning(0),
override val requiredChildDistribution: Seq[Distribution] = Nil,
override val requiredChildOrdering: Seq[Seq[SortOrder]] = Nil
) extends SparkPlan {
override protected def doExecute(): RDD[InternalRow] = throw new UnsupportedOperationException
override def output: Seq[Attribute] = Seq.empty
override protected def withNewChildrenInternal(newChildren: IndexedSeq[SparkPlan]): SparkPlan =
copy(children = newChildren)
}
|
BryanCutler/spark
|
sql/core/src/test/scala/org/apache/spark/sql/execution/PlannerSuite.scala
|
Scala
|
apache-2.0
| 52,806 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.tools.ingest
import java.io.Flushable
import java.util.concurrent.{ConcurrentHashMap, Executors}
import java.util.concurrent.atomic.{AtomicInteger, AtomicLong}
import com.typesafe.config.Config
import com.typesafe.scalalogging.LazyLogging
import org.geotools.data.{DataStore, DataUtilities, FeatureWriter, Transaction}
import org.locationtech.geomesa.convert.EvaluationContext
import org.locationtech.geomesa.convert.EvaluationContext.DelegatingEvaluationContext
import org.locationtech.geomesa.convert2.SimpleFeatureConverter
import org.locationtech.geomesa.tools.Command
import org.locationtech.geomesa.tools.utils.StatusCallback
import org.locationtech.geomesa.utils.collection.CloseableIterator
import org.locationtech.geomesa.utils.geotools.FeatureUtils
import org.locationtech.geomesa.utils.io.fs.FileSystemDelegate.FileHandle
import org.locationtech.geomesa.utils.io.fs.LocalDelegate.StdInHandle
import org.locationtech.geomesa.utils.io.{CloseWithLogging, CloseablePool, PathUtils, WithClose}
import org.locationtech.geomesa.utils.text.TextTools
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import scala.util.control.NonFatal
/**
* Ingestion that uses geomesa converters to process input files
*
* @param sft simple feature type
* @param dsParams data store parameters
* @param converterConfig converter definition
* @param inputs paths to ingest
* @param numThreads how many threads to use
*/
class LocalConverterIngest(
dsParams: Map[String, String],
sft: SimpleFeatureType,
converterConfig: Config,
inputs: Seq[String],
numThreads: Int
) extends AbstractConverterIngest(dsParams, sft) with LazyLogging {
/**
* Hook to allow modification of the feature returned by the converter
*
* @param iter features
* @return
*/
protected def features(iter: CloseableIterator[SimpleFeature]): CloseableIterator[SimpleFeature] = iter
override protected def runIngest(ds: DataStore, sft: SimpleFeatureType, callback: StatusCallback): Unit = {
Command.user.info("Running ingestion in local mode")
val start = System.currentTimeMillis()
// if inputs is empty, we've already validated that stdin has data to read
val stdin = inputs.isEmpty
val files = if (stdin) { StdInHandle.available().toSeq } else { inputs.flatMap(PathUtils.interpretPath) }
val threads = if (numThreads <= files.length) { numThreads } else {
Command.user.warn("Can't use more threads than there are input files - reducing thread count")
files.length
}
val batch = IngestCommand.LocalBatchSize.toInt.getOrElse {
throw new IllegalArgumentException(
s"Invalid batch size for property ${IngestCommand.LocalBatchSize.property}: " +
IngestCommand.LocalBatchSize.get)
}
// global counts shared among threads
val written = new AtomicLong(0)
val failed = new AtomicLong(0)
val errors = new AtomicInteger(0)
val bytesRead = new AtomicLong(0L)
val converters = CloseablePool(SimpleFeatureConverter(sft, converterConfig), threads)
val writers = CloseablePool(ds.getFeatureWriterAppend(sft.getTypeName, Transaction.AUTO_COMMIT), threads)
val batches = new ConcurrentHashMap[FeatureWriter[SimpleFeatureType, SimpleFeature], AtomicInteger](threads)
try {
// keep track of failure at a global level, keep line counts and success local
val globalFailures = new com.codahale.metrics.Counter {
override def inc(): Unit = failed.incrementAndGet()
override def inc(n: Long): Unit = failed.addAndGet(n)
override def dec(): Unit = failed.decrementAndGet()
override def dec(n: Long): Unit = failed.addAndGet(-1 * n)
override def getCount: Long = failed.get()
}
class LocalIngestWorker(file: FileHandle) extends Runnable {
override def run(): Unit = {
try {
converters.borrow { converter =>
val delegate = converter.createEvaluationContext(EvaluationContext.inputFileParam(file.path))
val ec = new DelegatingEvaluationContext(delegate)(failure = globalFailures)
WithClose(file.open) { streams =>
streams.foreach { case (name, is) =>
ec.setInputFilePath(name.getOrElse(file.path))
val features = LocalConverterIngest.this.features(converter.process(is, ec))
writers.borrow { writer =>
var count = batches.get(writer)
if (count == null) {
count = new AtomicInteger(0)
batches.put(writer, count)
}
features.foreach { sf =>
try {
FeatureUtils.write(writer, sf, useProvidedFid = true)
written.incrementAndGet()
count.incrementAndGet()
} catch {
case NonFatal(e) =>
logger.error(s"Failed to write '${DataUtilities.encodeFeature(sf)}'", e)
failed.incrementAndGet()
}
if (count.get % batch == 0) {
count.set(0)
writer match {
case f: Flushable => f.flush()
case _ => // no-op
}
}
}
}
}
}
}
} catch {
case e @ (_: ClassNotFoundException | _: NoClassDefFoundError) =>
// Rethrow exception so it can be caught by getting the future of this runnable in the main thread
// which will in turn cause the exception to be handled by org.locationtech.geomesa.tools.Runner
// Likely all threads will fail if a dependency is missing so it will terminate quickly
throw e
case NonFatal(e) =>
// Don't kill the entire program b/c this thread was bad! use outer try/catch
val msg = s"Fatal error running local ingest worker on ${file.path}"
Command.user.error(msg)
logger.error(msg, e)
errors.incrementAndGet()
} finally {
bytesRead.addAndGet(file.length)
}
}
}
Command.user.info(s"Ingesting ${if (stdin) { "from stdin" } else { TextTools.getPlural(files.length, "file") }} " +
s"with ${TextTools.getPlural(threads, "thread")}")
val totalLength: () => Float = if (stdin) {
() => (bytesRead.get + files.map(_.length).sum).toFloat // re-evaluate each time as bytes are read from stdin
} else {
val length = files.map(_.length).sum.toFloat // only evaluate once
() => length
}
def progress(): Float = bytesRead.get() / totalLength()
val es = Executors.newFixedThreadPool(threads)
val futures = files.map(f => es.submit(new LocalIngestWorker(f))).toList
es.shutdown()
def counters = Seq(("ingested", written.get()), ("failed", failed.get()))
while (!es.isTerminated) {
Thread.sleep(500)
callback("", progress(), counters, done = false)
}
callback("", progress(), counters, done = true)
// Get all futures so that we can propagate the logging up to the top level for handling
// in org.locationtech.geomesa.tools.Runner to catch missing dependencies
futures.foreach(_.get)
} finally {
CloseWithLogging(converters)
CloseWithLogging(writers).foreach(_ => errors.incrementAndGet())
}
Command.user.info(s"Local ingestion complete in ${TextTools.getTime(start)}")
if (files.lengthCompare(1) == 0) {
Command.user.info(IngestCommand.getStatInfo(written.get, failed.get, input = s" for file: ${files.head.path}"))
} else {
Command.user.info(IngestCommand.getStatInfo(written.get, failed.get))
}
if (errors.get > 0) {
Command.user.warn("Some files caused errors, ingest counts may not be accurate")
}
}
}
|
elahrvivaz/geomesa
|
geomesa-tools/src/main/scala/org/locationtech/geomesa/tools/ingest/LocalConverterIngest.scala
|
Scala
|
apache-2.0
| 8,628 |
package io.reactors.common.concurrent
import sun.misc.Unsafe
import scala.annotation.switch
import scala.annotation.tailrec
class CacheTrie[K <: AnyRef, V <: AnyRef](
val useCounters: Boolean = true,
val doCompression: Boolean = true
) {
import CacheTrie._
private val unsafe: Unsafe = Platform.unsafe
@volatile private var rawCache: Array[AnyRef] = null
private val rawRoot: Array[AnyRef] = createWideArray()
private def createWideArray(): Array[AnyRef] = {
val node = new Array[AnyRef](16 + 1)
node(16) = Integer.valueOf(0)
node
}
private def createNarrowArray(): Array[AnyRef] = {
val node = new Array[AnyRef](4 + 1)
node(4) = Integer.valueOf(0)
node
}
private def createCacheArray(level: Int): Array[AnyRef] = {
new Array[AnyRef](1 + (1 << level))
}
private def usedLength(array: Array[AnyRef]) = array.length - 1
@tailrec
private def decrementCount(array: Array[AnyRef]): Unit = {
val countIndex = array.length - 1
val count = READ(array, countIndex).asInstanceOf[Integer]
val newCount = Integer.valueOf(count.intValue - 1)
if (!CAS(array, countIndex, count, newCount)) decrementCount(array)
}
@tailrec
private def incrementCount(array: Array[AnyRef]): Unit = {
val countIndex = array.length - 1
val count = READ(array, countIndex).asInstanceOf[Integer]
val newCount = Integer.valueOf(count.intValue + 1)
if (!CAS(array, countIndex, count, newCount)) incrementCount(array)
}
private def sequentialFixCount(array: Array[AnyRef]): Unit = {
var i = 0
var count = 0
while (i < usedLength(array)) {
val entry = array(i)
if (entry != null) count += 1
if (entry.isInstanceOf[Array[AnyRef]]) {
sequentialFixCount(entry.asInstanceOf[Array[AnyRef]])
}
i += 1
}
array(array.length - 1) = Integer.valueOf(count)
}
private def READ(array: Array[AnyRef], pos: Int): AnyRef = {
unsafe.getObjectVolatile(array, ArrayBase + (pos << ArrayShift))
}
private def CAS(array: Array[AnyRef], pos: Int, ov: AnyRef, nv: AnyRef): Boolean = {
unsafe.compareAndSwapObject(array, ArrayBase + (pos << ArrayShift), ov, nv)
}
private def WRITE(array: Array[AnyRef], pos: Int, nv: AnyRef): Unit = {
unsafe.putObjectVolatile(array, ArrayBase + (pos << ArrayShift), nv)
}
private def READ_CACHE: Array[AnyRef] = rawCache
private def CAS_CACHE(ov: Array[AnyRef], nv: Array[AnyRef]) = {
unsafe.compareAndSwapObject(this, CacheTrieRawCacheOffset, ov, nv)
}
private def READ_WIDE(enode: ENode): Array[AnyRef] = enode.wide
private def CAS_WIDE(enode: ENode, ov: Array[AnyRef], nv: Array[AnyRef]): Boolean = {
unsafe.compareAndSwapObject(enode, ENodeWideOffset, ov, nv)
}
private def READ_TXN(snode: SNode[_, _]): AnyRef = snode.txn
private def CAS_TXN(snode: SNode[_, _], nv: AnyRef): Boolean = {
unsafe.compareAndSwapObject(snode, SNodeFrozenOffset, NoTxn, nv)
}
private def spread(h: Int): Int = {
(h ^ (h >>> 16)) & 0x7fffffff
}
private[concurrent] def fastLookup(key: K): V = {
val hash = spread(key.hashCode)
fastLookup(key, hash)
}
private[concurrent] final def fastLookup(key: K, hash: Int): V = {
val cache = READ_CACHE
if (cache == null) {
slowLookup(key, hash, 0, rawRoot, null)
} else {
val len = cache.length
val mask = len - 1 - 1
val pos = 1 + (hash & mask)
val cachee = READ(cache, pos)
val level = 31 - Integer.numberOfLeadingZeros(len - 1)
if (cachee eq null) {
// Nothing is cached at this location, do slow lookup.
slowLookup(key, hash, 0, rawRoot, cache)
} else if (cachee.isInstanceOf[SNode[_, _]]) {
// println(s"$key - found single node cachee, cache level $level")
val oldsn = cachee.asInstanceOf[SNode[K, V]]
val txn = READ_TXN(oldsn)
if (txn eq NoTxn) {
val oldhash = oldsn.hash
val oldkey = oldsn.key
if ((oldhash == hash) && ((oldkey eq key) || (oldkey == key))) oldsn.value
else null.asInstanceOf[V]
} else {
// The single node is either frozen or scheduled for modification
slowLookup(key, hash, 0, rawRoot, cache)
}
} else if (cachee.isInstanceOf[Array[AnyRef]]) {
// println(s"$key - found array cachee, cache level $level")
val an = cachee.asInstanceOf[Array[AnyRef]]
val mask = usedLength(an) - 1
val pos = (hash >>> level) & mask
val old = READ(an, pos)
if (old eq null) {
// The key is not present in the cache trie.
null.asInstanceOf[V]
} else if (old.isInstanceOf[SNode[_, _]]) {
val oldsn = old.asInstanceOf[SNode[K, V]]
val txn = READ_TXN(oldsn)
if (txn eq NoTxn) {
// The single node is up-to-date.
// Check if the key is contained in the single node.
val oldhash = oldsn.hash
val oldkey = oldsn.key
if ((oldhash == hash) && ((oldkey eq key) || (oldkey == key))) oldsn.value
else null.asInstanceOf[V]
} else {
// The single node is either frozen or scheduled for modification.
slowLookup(key, hash, 0, rawRoot, cache)
}
} else {
def resumeSlowLookup(): V = {
if (old.isInstanceOf[Array[AnyRef]]) {
// Continue the search from the specified level.
val oldan = old.asInstanceOf[Array[AnyRef]]
slowLookup(key, hash, level + 4, oldan, cache)
} else if (old.isInstanceOf[LNode[_, _]]) {
// Check if the key is contained in the list node.
var tail = old.asInstanceOf[LNode[K, V]]
while (tail != null) {
if ((tail.hash == hash) && ((tail.key eq key) || (tail.key == key))) {
return tail.value
}
tail = tail.next
}
null.asInstanceOf[V]
} else if ((old eq FVNode) || old.isInstanceOf[FNode]) {
// Array node contains a frozen node, so it is obsolete -- do slow lookup.
slowLookup(key, hash, 0, rawRoot, cache)
} else if (old.isInstanceOf[ENode]) {
// Help complete the transaction.
val en = old.asInstanceOf[ENode]
completeExpansion(cache, en)
fastLookup(key, hash)
} else if (old.isInstanceOf[XNode]) {
// Help complete the transaction.
val xn = old.asInstanceOf[XNode]
completeCompression(cache, xn)
fastLookup(key, hash)
} else {
sys.error(s"Unexpected case -- $old")
}
}
resumeSlowLookup()
}
} else {
sys.error(s"Unexpected case -- $cachee is not supposed to be cached.")
}
}
}
final def apply(key: K): V = {
val result = lookup(key)
if (result.asInstanceOf[AnyRef] eq null) throw new NoSuchElementException
else result
}
final def get(key: K): Option[V] = {
val result = lookup(key)
if (result.asInstanceOf[AnyRef] eq null) None
else Some(result)
}
final def lookup(key: K): V = {
val hash = spread(key.hashCode)
fastLookup(key, hash)
}
private[concurrent] def slowLookup(key: K): V = {
val hash = spread(key.hashCode)
slowLookup(key, hash)
}
private[concurrent] def slowLookup(key: K, hash: Int): V = {
val node = rawRoot
val cache = READ_CACHE
slowLookup(key, hash, 0, node, cache)
}
@tailrec
private[concurrent] final def slowLookup(
key: K, hash: Int, level: Int, node: Array[AnyRef], cache: Array[AnyRef]
): V = {
if (cache != null && (1 << level) == (cache.length - 1)) {
inhabitCache(cache, node, hash, level)
}
val mask = usedLength(node) - 1
val pos = (hash >>> level) & mask
val old = READ(node, pos)
if (old eq null) {
null.asInstanceOf[V]
} else if (old.isInstanceOf[Array[AnyRef]]) {
val an = old.asInstanceOf[Array[AnyRef]]
slowLookup(key, hash, level + 4, an, cache)
} else if (old.isInstanceOf[SNode[_, _]]) {
val cacheLevel =
if (cache == null) 0
else 31 - Integer.numberOfLeadingZeros(cache.length - 1)
if (level < cacheLevel || level >= cacheLevel + 8) {
recordCacheMiss()
}
val oldsn = old.asInstanceOf[SNode[K, V]]
if (cache != null && (1 << (level + 4)) == (cache.length - 1)) {
// println(s"about to inhabit for single node -- ${level + 4} vs $cacheLevel")
inhabitCache(cache, oldsn, hash, level + 4)
}
if ((oldsn.hash == hash) && ((oldsn.key eq key) || (oldsn.key == key))) {
oldsn.value
} else {
null.asInstanceOf[V]
}
} else if (old.isInstanceOf[LNode[_, _]]) {
val cacheLevel =
if (cache == null) 0
else 31 - Integer.numberOfLeadingZeros(cache.length - 1)
if (level < cacheLevel || level >= cacheLevel + 8) {
// A potential cache miss -- we need to check the cache state.
recordCacheMiss()
}
val oldln = old.asInstanceOf[LNode[K, V]]
if (oldln.hash != hash) {
null.asInstanceOf[V]
} else {
var tail = oldln
while (tail != null) {
if ((tail.key eq key) || (tail.key == key)) {
return tail.value
}
tail = tail.next
}
null.asInstanceOf[V]
}
} else if (old.isInstanceOf[ENode]) {
val enode = old.asInstanceOf[ENode]
val narrow = enode.narrow
slowLookup(key, hash, level + 4, narrow, cache)
} else if (old.isInstanceOf[XNode]) {
val xnode = old.asInstanceOf[XNode]
val stale = xnode.stale
slowLookup(key, hash, level + 4, stale, cache)
} else if (old eq FVNode) {
null.asInstanceOf[V]
} else if (old.isInstanceOf[FNode]) {
val frozen = old.asInstanceOf[FNode].frozen
if (frozen.isInstanceOf[SNode[_, _]]) {
sys.error(s"Unexpected case (should never be frozen): $frozen")
} else if (frozen.isInstanceOf[LNode[_, _]]) {
val ln = frozen.asInstanceOf[LNode[K, V]]
if (ln.hash != hash) {
null.asInstanceOf[V]
} else {
var tail = ln
while (tail != null) {
if ((tail.key eq key) || (tail.key == key)) {
return tail.value
}
tail = tail.next
}
null.asInstanceOf[V]
}
} else if (frozen.isInstanceOf[Array[AnyRef]]) {
val an = frozen.asInstanceOf[Array[AnyRef]]
slowLookup(key, hash, level + 4, an, cache)
} else {
sys.error(s"Unexpected case: $old")
}
} else {
sys.error(s"Unexpected case: $old")
}
}
final def insert(key: K, value: V): Unit = {
val hash = spread(key.hashCode)
fastInsert(key, value, hash)
}
private def fastInsert(key: K, value: V, hash: Int): Unit = {
val cache = READ_CACHE
fastInsert(key, value, hash, cache, cache)
}
@tailrec
private def fastInsert(
key: K, value: V, hash: Int, cache: Array[AnyRef], prevCache: Array[AnyRef]
): Unit = {
if (cache == null) {
slowInsert(key, value, hash)
} else {
val len = cache.length
val mask = len - 1 - 1
val pos = 1 + (hash & mask)
val cachee = READ(cache, pos)
val level = 31 - Integer.numberOfLeadingZeros(len - 1)
if (cachee eq null) {
// Inconclusive -- retry one cache layer above.
val stats = READ(cache, 0)
val parentCache = stats.asInstanceOf[CacheNode].parent
fastInsert(key, value, hash, parentCache, cache)
} else if (cachee.isInstanceOf[Array[AnyRef]]) {
// Read from the array node.
val an = cachee.asInstanceOf[Array[AnyRef]]
val mask = usedLength(an) - 1
val pos = (hash >>> level) & mask
val old = READ(an, pos)
if (old eq null) {
// Try to write the single node directly.
val sn = new SNode(hash, key, value)
if (CAS(an, pos, old, sn)) {
incrementCount(an)
return
} else fastInsert(key, value, hash, cache, prevCache)
} else if (old.isInstanceOf[Array[AnyRef]]) {
// Continue search recursively.
val oldan = old.asInstanceOf[Array[AnyRef]]
val res = slowInsert(key, value, hash, level + 4, oldan, an, prevCache)
if (res eq Restart) fastInsert(key, value, hash, cache, prevCache)
} else if (old.isInstanceOf[SNode[_, _]]) {
val oldsn = old.asInstanceOf[SNode[K, V]]
val txn = READ_TXN(oldsn)
if (txn eq NoTxn) {
// No other transaction in progress.
if ((oldsn.hash == hash) && ((oldsn.key eq key) || (oldsn.key == key))) {
// Replace this key in the parent.
val sn = new SNode(hash, key, value)
if (CAS_TXN(oldsn, sn)) {
CAS(an, pos, oldsn, sn)
// Note: must not increment the count here.
} else fastInsert(key, value, hash, cache, prevCache)
} else if (usedLength(an) == 4) {
// Must expand, but cannot do so without the parent.
// Retry one cache level above.
val stats = READ(cache, 0)
val parentCache = stats.asInstanceOf[CacheNode].parent
fastInsert(key, value, hash, parentCache, cache)
} else {
// Create an array node at the next level and replace the single node.
val nnode = newNarrowOrWideNode(
oldsn.hash, oldsn.key, oldsn.value, hash, key, value, level + 4)
if (CAS_TXN(oldsn, nnode)) {
CAS(an, pos, oldsn, nnode)
} else fastInsert(key, value, hash, cache, prevCache)
}
} else if (txn eq FSNode) {
// Must restart from the root, to find the transaction node, and help.
slowInsert(key, value, hash)
} else {
// Complete the current transaction, and retry.
CAS(an, pos, oldsn, txn)
fastInsert(key, value, hash, cache, prevCache)
}
} else {
// Must restart from the root, to find the transaction node, and help.
slowInsert(key, value, hash)
}
} else if (cachee.isInstanceOf[SNode[_, _]]) {
// Need a reference to the parent array node -- retry one cache level above.
val stats = READ(cache, 0)
val parentCache = stats.asInstanceOf[CacheNode].parent
fastInsert(key, value, hash, parentCache, cache)
} else {
sys.error(s"Unexpected case -- $cachee is not supposed to be cached.")
}
}
}
private[concurrent] final def slowInsert(key: K, value: V): Unit = {
val hash = spread(key.hashCode)
slowInsert(key, value, hash)
}
private def slowInsert(key: K, value: V, hash: Int): Unit = {
val node = rawRoot
val cache = READ_CACHE
var result = Restart
do {
result = slowInsert(key, value, hash, 0, node, null, cache)
} while (result == Restart)
}
@tailrec
private[concurrent] final def slowInsert(
key: K, value: V, hash: Int, level: Int,
current: Array[AnyRef], parent: Array[AnyRef],
cache: Array[AnyRef]
): AnyRef = {
if (cache != null && (1 << level) == (cache.length - 1)) {
inhabitCache(cache, current, hash, level)
}
val mask = usedLength(current) - 1
val pos = (hash >>> level) & mask
val old = READ(current, pos)
if (old eq null) {
// Fast-path -- CAS the node into the empty position.
val cacheLevel =
if (cache == null) 0
else 31 - Integer.numberOfLeadingZeros(cache.length - 1)
if (level < cacheLevel || level >= cacheLevel + 8) {
recordCacheMiss()
}
val snode = new SNode(hash, key, value)
if (CAS(current, pos, old, snode)) {
incrementCount(current)
Success
} else slowInsert(key, value, hash, level, current, parent, cache)
} else if (old.isInstanceOf[Array[_]]) {
// Repeat the search on the next level.
val oldan = old.asInstanceOf[Array[AnyRef]]
slowInsert(key, value, hash, level + 4, oldan, current, cache)
} else if (old.isInstanceOf[SNode[_, _]]) {
val oldsn = old.asInstanceOf[SNode[K, V]]
val txn = READ_TXN(oldsn)
if (txn eq NoTxn) {
// The node is not frozen or marked for freezing.
if ((oldsn.hash == hash) && ((oldsn.key eq key) || (oldsn.key == key))) {
val sn = new SNode(hash, key, value)
if (CAS_TXN(oldsn, sn)) {
CAS(current, pos, oldsn, sn)
// Note: must not increment count here.
Success
} else slowInsert(key, value, hash, level, current, parent, cache)
} else if (usedLength(current) == 4) {
// Expand the current node, aiming to avoid the collision.
// Root size always 16, so parent is non-null.
val parentmask = usedLength(parent) - 1
val parentlevel = level - 4
val parentpos = (hash >>> parentlevel) & parentmask
val enode = new ENode(parent, parentpos, current, hash, level)
if (CAS(parent, parentpos, current, enode)) {
completeExpansion(cache, enode)
val wide = READ_WIDE(enode)
slowInsert(key, value, hash, level, wide, parent, cache)
} else {
slowInsert(key, value, hash, level, current, parent, cache)
}
} else {
// Replace the single node with a narrow node.
val nnode = newNarrowOrWideNode(
oldsn.hash, oldsn.key, oldsn.value, hash, key, value, level + 4)
if (CAS_TXN(oldsn, nnode)) {
CAS(current, pos, oldsn, nnode)
Success
} else slowInsert(key, value, hash, level, current, parent, cache)
}
} else if (txn eq FSNode) {
// We landed into the middle of another transaction.
// We must restart from the top, find the transaction node and help.
Restart
} else {
// The single node had been scheduled for replacement by some thread.
// We need to help, then retry.
CAS(current, pos, oldsn, txn)
slowInsert(key, value, hash, level, current, parent, cache)
}
} else if (old.isInstanceOf[LNode[_, _]]) {
val oldln = old.asInstanceOf[LNode[K, V]]
val nn = newListNarrowOrWideNode(oldln, hash, key, value, level + 4)
if (CAS(current, pos, oldln, nn)) Success
else slowInsert(key, value, hash, level, current, parent, cache)
} else if (old.isInstanceOf[ENode]) {
// There is another transaction in progress, help complete it, then restart.
val enode = old.asInstanceOf[ENode]
completeExpansion(cache, enode)
Restart
} else if (old.isInstanceOf[XNode]) {
// There is another transaction in progress, help complete it, then restart.
val xnode = old.asInstanceOf[XNode]
completeCompression(cache, xnode)
Restart
} else if ((old eq FVNode) || old.isInstanceOf[FNode]) {
// We landed into the middle of some other thread's transaction.
// We need to restart from the top to find the transaction's descriptor,
// and if we find it, then help and restart once more.
Restart
} else {
sys.error("Unexpected case -- " + old)
}
}
def remove(key: K): V = {
val hash = spread(key.hashCode)
fastRemove(key, hash)
}
def fastRemove(key: K, hash: Int): V = {
val cache = READ_CACHE
val result = fastRemove(key, hash, cache, cache, 0)
result.asInstanceOf[V]
}
@tailrec
private def fastRemove(
key: K, hash: Int, cache: Array[AnyRef], prevCache: Array[AnyRef], ascends: Int
): AnyRef = {
if (cache == null) {
slowRemove(key, hash)
} else {
val len = cache.length
val mask = len - 1 - 1
val pos = 1 + (hash & mask)
val cachee = READ(cache, pos)
val level = 31 - Integer.numberOfLeadingZeros(len - 1)
// println("fast remove -- " + level)
if (cachee eq null) {
// Inconclusive -- must retry one cache level above.
val stats = READ(cache, 0)
val parentCache = stats.asInstanceOf[CacheNode].parent
fastRemove(key, hash, parentCache, cache, ascends + 1)
} else if (cachee.isInstanceOf[Array[AnyRef]]) {
// Read from an array node.
val an = cachee.asInstanceOf[Array[AnyRef]]
val mask = usedLength(an) - 1
val pos = (hash >>> level) & mask
val old = READ(an, pos)
if (old eq null) {
// The key does not exist.
if (ascends > 1) {
recordCacheMiss()
}
null
} else if (old.isInstanceOf[Array[AnyRef]]) {
// Continue searching recursively.
val oldan = old.asInstanceOf[Array[AnyRef]]
val res = slowRemove(key, hash, level + 4, oldan, an, prevCache)
if (res == Restart) fastRemove(key, hash, cache, prevCache, ascends)
else res
} else if (old.isInstanceOf[SNode[_, _]]) {
val oldsn = old.asInstanceOf[SNode[K, V]]
val txn = READ_TXN(oldsn)
if (txn eq NoTxn) {
// No other transaction in progress.
if ((oldsn.hash == hash) && ((oldsn.key eq key) || (oldsn.key == key))) {
// Remove the key.
if (CAS_TXN(oldsn, null)) {
CAS(an, pos, oldsn, null)
decrementCount(an)
if (ascends > 1) {
recordCacheMiss()
}
if (isCompressible(an)) {
compressDescend(rawRoot, null, hash, 0)
}
oldsn.value
} else fastRemove(key, hash, cache, prevCache, ascends)
} else {
// The key does not exist.
if (ascends > 1) {
recordCacheMiss()
}
null
}
} else if (txn eq FSNode) {
// Must restart from the root, to find the transaction node, and help.
slowRemove(key, hash)
} else {
// Complete the current transaction, and retry.
CAS(an, pos, oldsn, txn)
fastRemove(key, hash, cache, prevCache, ascends)
}
} else {
// Must restart from the root, to find the transaction node, and help.
slowRemove(key, hash)
}
} else if (cachee.isInstanceOf[SNode[_, _]]) {
// Need parent array node -- retry one cache level above.
val stats = READ(cache, 0)
val parentCache = stats.asInstanceOf[CacheNode].parent
fastRemove(key, hash, parentCache, cache, ascends + 1)
} else {
sys.error(s"Unexpected case -- $cachee is not supposed to be cached.")
}
}
}
private[concurrent] def slowRemove(key: K): V = {
val hash = spread(key.hashCode)
slowRemove(key, hash)
}
private[concurrent] def slowRemove(key: K, hash: Int): V = {
val node = rawRoot
val cache = READ_CACHE
var result: AnyRef = null
do {
result = slowRemove(key, hash, 0, node, null, cache)
} while (result == Restart)
result.asInstanceOf[V]
}
private def isCompressible(current: Array[AnyRef]): Boolean = {
if (!doCompression) return false
if (useCounters) {
val count = READ(current, current.length - 1).asInstanceOf[Integer].intValue
if (count > 1) {
return false
}
}
var found: AnyRef = null
var i = 0
while (i < usedLength(current)) {
val old = READ(current, i)
if (old != null) {
if (found == null && old.isInstanceOf[SNode[_, _]]) {
found = old
} else {
return false
}
}
i += 1
}
true
}
private def compressSingleLevel(
cache: Array[AnyRef], current: Array[AnyRef], parent: Array[AnyRef],
hash: Int, level: Int
): Boolean = {
if (parent == null) {
return false
}
if (!isCompressible(current)) {
return false
}
// It is likely that the node is compressible, so we freeze it and try to compress.
val parentmask = usedLength(parent) - 1
val parentpos = (hash >>> (level - 4)) & parentmask
val xn = new XNode(parent, parentpos, current, hash, level)
if (CAS(parent, parentpos, current, xn)) {
completeCompression(cache, xn)
} else {
false
}
}
private def compressAscend(
cache: Array[AnyRef], current: Array[AnyRef], parent: Array[AnyRef],
hash: Int, level: Int
): Unit = {
val mustContinue = compressSingleLevel(cache, current, parent, hash, level)
if (mustContinue) {
// Continue compressing if possible.
// TODO: Investigate if full ascend is feasible.
compressDescend(rawRoot, null, hash, 0)
}
}
private def compressDescend(
current: Array[AnyRef], parent: Array[AnyRef], hash: Int, level: Int
): Boolean = {
// Dive into the cache starting from the root for the given hash,
// and compress as much as possible.
val pos = (hash >>> level) & (usedLength(current) - 1)
val old = READ(current, pos)
if (old.isInstanceOf[Array[AnyRef]]) {
val an = old.asInstanceOf[Array[AnyRef]]
if (!compressDescend(an, current, hash, level + 4)) return false
}
// We do not care about maintaining the cache in the slow compression path,
// so we just use the top-level cache.
if (parent != null) {
val cache = READ_CACHE
return compressSingleLevel(cache, current, parent, hash, level)
}
return false
}
private def compressFrozen(frozen: Array[AnyRef], level: Int): AnyRef = {
var single: AnyRef = null
var i = 0
while (i < usedLength(frozen)) {
val old = READ(frozen, i)
if (old != FVNode) {
if (single == null && old.isInstanceOf[SNode[_, _]]) {
// It is possible that this is the only entry in the array node.
single = old
} else {
// There are at least 2 nodes that are not FVNode.
// Unfortunately, the node was modified before it was completely frozen.
if (usedLength(frozen) == 16) {
val wide = createWideArray()
sequentialTransfer(frozen, wide, level)
sequentialFixCount(wide)
return wide
} else {
// If the node is narrow, then it cannot have any children.
val narrow = createNarrowArray()
sequentialTransferNarrow(frozen, narrow, level)
sequentialFixCount(narrow)
return narrow
}
}
}
i += 1
}
if (single != null) {
val oldsn = single.asInstanceOf[SNode[K, V]]
single = new SNode(oldsn.hash, oldsn.key, oldsn.value)
}
return single
}
private def completeCompression(cache: Array[AnyRef], xn: XNode): Boolean = {
val parent = xn.parent
val parentpos = xn.parentpos
val level = xn.level
// First, freeze and compress the subtree below.
val stale = xn.stale
val compressed = freezeAndCompress(cache, stale, level)
// Then, replace with the compressed version in the parent.
if (CAS(parent, parentpos, xn, compressed)) {
if (compressed == null) {
decrementCount(parent)
}
return compressed == null || compressed.isInstanceOf[SNode[K, V]]
}
return false
}
private def completeCompressionAlt(cache: Array[AnyRef], xn: XNode): Boolean = {
val parent = xn.parent
val parentpos = xn.parentpos
val level = xn.level
// First, freeze the subtree below.
val stale = xn.stale
freeze(cache, stale)
// Then, create a compressed version, and replace it in the parent.
val compressed = compressFrozen(stale, level)
if (CAS(parent, parentpos, xn, compressed)) {
if (compressed == null) {
decrementCount(parent)
}
return compressed == null || compressed.isInstanceOf[SNode[K, V]]
}
return false
}
@tailrec
private def slowRemove(
key: K, hash: Int, level: Int, current: Array[AnyRef], parent: Array[AnyRef],
cache: Array[AnyRef]
): AnyRef = {
// println("slow remove -- " + level)
val mask = usedLength(current) - 1
val pos = (hash >>> level) & mask
val old = READ(current, pos)
if (old eq null) {
// The key does not exist.
null
} else if (old.isInstanceOf[Array[AnyRef]]) {
// Repeat search at the next level.
val oldan = old.asInstanceOf[Array[AnyRef]]
slowRemove(key, hash, level + 4, oldan, current, cache)
} else if (old.isInstanceOf[SNode[_, _]]) {
val cacheLevel =
if (cache == null) 0
else 31 - Integer.numberOfLeadingZeros(cache.length - 1)
if (level < cacheLevel || level >= cacheLevel + 8) {
recordCacheMiss()
}
val oldsn = old.asInstanceOf[SNode[K, V]]
val txn = READ_TXN(oldsn)
if (txn eq NoTxn) {
// There is no other transaction in progress.
if ((oldsn.hash == hash) && ((oldsn.key eq key) || (oldsn.key == key))) {
// The same key, remove it.
if (CAS_TXN(oldsn, null)) {
CAS(current, pos, oldsn, null)
decrementCount(current)
compressAscend(cache, current, parent, hash, level)
oldsn.value.asInstanceOf[AnyRef]
} else slowRemove(key, hash, level, current, parent, cache)
} else {
// The target key does not exist.
null
}
} else if (txn eq FSNode) {
// We landed into a middle of another transaction.
// We must restart from the top, find the transaction node and help.
Restart
} else {
// The single node had been scheduled for replacement by some thread.
// We need to help and retry.
CAS(current, pos, oldsn, txn)
slowRemove(key, hash, level, current, parent, cache)
}
} else if (old.isInstanceOf[LNode[_, _]]) {
val oldln = old.asInstanceOf[LNode[K, V]]
val (result, nn) = newListNodeWithoutKey(oldln, hash, key)
if (CAS(current, pos, oldln, nn)) result
else slowRemove(key, hash, level, current, parent, cache)
} else if (old.isInstanceOf[ENode]) {
// There is another transaction in progress, help complete it, then restart.
val enode = old.asInstanceOf[ENode]
completeExpansion(cache, enode)
Restart
} else if (old.isInstanceOf[XNode]) {
// There is another transaction in progress, help complete it, then restart.
val xnode = old.asInstanceOf[XNode]
completeCompression(cache, xnode)
Restart
} else if ((old eq FVNode) || old.isInstanceOf[FNode]) {
// We landed into the middle of some other thread's transaction.
// We need to restart above, from the descriptor.
Restart
} else {
sys.error("Unexpected case -- " + old)
}
}
private def isFrozenS(n: AnyRef): Boolean = {
if (n.isInstanceOf[SNode[_, _]]) {
val f = READ_TXN(n.asInstanceOf[SNode[_, _]])
f eq FSNode
} else false
}
private def isFrozenA(n: AnyRef): Boolean = {
n.isInstanceOf[FNode] && n.asInstanceOf[FNode].frozen.isInstanceOf[Array[AnyRef]]
}
private def isFrozenL(n: AnyRef): Boolean = {
n.isInstanceOf[FNode] && n.asInstanceOf[FNode].frozen.isInstanceOf[LNode[_, _]]
}
private def freezeAndCompress(
cache: Array[AnyRef], current: Array[AnyRef], level: Int
): AnyRef = {
var single: AnyRef = null
var i = 0
while (i < usedLength(current)) {
val node = READ(current, i)
if (node eq null) {
// Freeze null.
// If it fails, then either someone helped or another txn is in progress.
// If another txn is in progress, then reinspect the current slot.
if (!CAS(current, i, node, FVNode)) i -= 1
} else if (node.isInstanceOf[SNode[_, _]]) {
val sn = node.asInstanceOf[SNode[K, V]]
val txn = READ_TXN(sn)
if (txn eq NoTxn) {
// Freeze single node.
// If it fails, then either someone helped or another txn is in progress.
// If another txn is in progress, then we must reinspect the current slot.
if (!CAS_TXN(node.asInstanceOf[SNode[K, V]], FSNode)) i -= 1
else {
if (single == null) single = sn
else single = current
}
} else if (txn eq FSNode) {
// We can skip, another thread previously froze this node.
single = current
} else {
// Another thread is trying to replace the single node.
// In this case, we help and retry.
single = current
CAS(current, i, node, txn)
i -= 1
}
} else if (node.isInstanceOf[LNode[_, _]]) {
// Freeze list node.
// If it fails, then either someone helped or another txn is in progress.
// If another txn is in progress, then we must reinspect the current slot.
single = current
val fnode = new FNode(node)
CAS(current, i, node, fnode)
i -= 1
} else if (node.isInstanceOf[Array[AnyRef]]) {
// Freeze the array node.
// If it fails, then either someone helped or another txn is in progress.
// If another txn is in progress, then reinspect the current slot.
single = current
val fnode = new FNode(node)
CAS(current, i, node, fnode)
i -= 1
} else if (isFrozenL(node)) {
// We can skip, another thread previously helped with freezing this node.
single = current
} else if (node.isInstanceOf[FNode]) {
// We still need to freeze the subtree recursively.
single = current
val subnode = node.asInstanceOf[FNode].frozen.asInstanceOf[Array[AnyRef]]
freeze(cache, subnode.asInstanceOf[Array[AnyRef]])
} else if (node eq FVNode) {
// We can continue, another thread already froze this slot.
single = current
} else if (node.isInstanceOf[ENode]) {
// If some other txn is in progress, help complete it,
// then restart from the current position.
single = current
val enode = node.asInstanceOf[ENode]
completeExpansion(cache, enode)
i -= 1
} else if (node.isInstanceOf[XNode]) {
// It some other txn is in progress, help complete it,
// then restart from the current position.
single = current
val xnode = node.asInstanceOf[XNode]
completeCompression(cache, xnode)
i -= 1
} else {
sys.error("Unexpected case -- " + node)
}
i += 1
}
if (single.isInstanceOf[SNode[_, _]]) {
val oldsn = single.asInstanceOf[SNode[K, V]]
single = new SNode(oldsn.hash, oldsn.key, oldsn.value)
return single
} else if (single != null) {
return compressFrozen(current, level)
} else {
return single
}
}
private def freeze(cache: Array[AnyRef], current: Array[AnyRef]): Unit = {
var i = 0
while (i < usedLength(current)) {
val node = READ(current, i)
if (node eq null) {
// Freeze null.
// If it fails, then either someone helped or another txn is in progress.
// If another txn is in progress, then reinspect the current slot.
if (!CAS(current, i, node, FVNode)) i -= 1
} else if (node.isInstanceOf[SNode[_, _]]) {
val sn = node.asInstanceOf[SNode[K, V]]
val txn = READ_TXN(sn)
if (txn eq NoTxn) {
// Freeze single node.
// If it fails, then either someone helped or another txn is in progress.
// If another txn is in progress, then we must reinspect the current slot.
if (!CAS_TXN(node.asInstanceOf[SNode[K, V]], FSNode)) i -= 1
} else if (txn eq FSNode) {
// We can skip, another thread previously froze this node.
} else {
// Another thread is trying to replace the single node.
// In this case, we help and retry.
CAS(current, i, node, txn)
i -= 1
}
} else if (node.isInstanceOf[LNode[_, _]]) {
// Freeze list node.
// If it fails, then either someone helped or another txn is in progress.
// If another txn is in progress, then we must reinspect the current slot.
val fnode = new FNode(node)
CAS(current, i, node, fnode)
i -= 1
} else if (node.isInstanceOf[Array[AnyRef]]) {
// Freeze the array node.
// If it fails, then either someone helped or another txn is in progress.
// If another txn is in progress, then reinspect the current slot.
val fnode = new FNode(node)
CAS(current, i, node, fnode)
i -= 1
} else if (isFrozenL(node)) {
// We can skip, another thread previously helped with freezing this node.
} else if (node.isInstanceOf[FNode]) {
// We still need to freeze the subtree recursively.
val subnode = node.asInstanceOf[FNode].frozen.asInstanceOf[Array[AnyRef]]
freeze(cache, subnode.asInstanceOf[Array[AnyRef]])
} else if (node eq FVNode) {
// We can continue, another thread already froze this slot.
} else if (node.isInstanceOf[ENode]) {
// If some other txn is in progress, help complete it,
// then restart from the current position.
val enode = node.asInstanceOf[ENode]
completeExpansion(cache, enode)
i -= 1
} else if (node.isInstanceOf[XNode]) {
// It some other txn is in progress, help complete it,
// then restart from the current position.
val xnode = node.asInstanceOf[XNode]
completeCompression(cache, xnode)
i -= 1
} else {
sys.error("Unexpected case -- " + node)
}
i += 1
}
}
private def sequentialInsert(
sn: SNode[K, V], wide: Array[AnyRef], level: Int
): Unit = {
val mask = usedLength(wide) - 1
val pos = (sn.hash >>> level) & mask
if (wide(pos) == null) wide(pos) = sn
else sequentialInsert(sn, wide, level, pos)
}
@tailrec
private def sequentialInsert(
sn: SNode[K, V], wide: Array[AnyRef], level: Int, pos: Int
): Unit = {
val old = wide(pos)
if (old.isInstanceOf[SNode[_, _]]) {
val oldsn = old.asInstanceOf[SNode[K, V]]
val an = newNarrowOrWideNodeUsingFreshThatNeedsCountFix(oldsn, sn, level + 4)
wide(pos) = an
} else if (old.isInstanceOf[Array[AnyRef]]) {
val oldan = old.asInstanceOf[Array[AnyRef]]
val npos = (sn.hash >>> (level + 4)) & (usedLength(oldan) - 1)
if (oldan(npos) == null) {
oldan(npos) = sn
} else if (usedLength(oldan) == 4) {
val an = createWideArray()
sequentialTransfer(oldan, an, level + 4)
wide(pos) = an
sequentialInsert(sn, wide, level, pos)
} else {
sequentialInsert(sn, oldan, level + 4, npos)
}
} else if (old.isInstanceOf[LNode[_, _]]) {
val oldln = old.asInstanceOf[LNode[K, V]]
val nn = newListNarrowOrWideNode(oldln, sn.hash, sn.key, sn.value, level + 4)
wide(pos) = nn
} else {
sys.error("Unexpected case: " + old)
}
}
private def sequentialTransfer(
source: Array[AnyRef], wide: Array[AnyRef], level: Int
): Unit = {
val mask = usedLength(wide) - 1
var i = 0
while (i < usedLength(source)) {
val node = source(i)
if (node eq FVNode) {
// We can skip, the slot was empty.
} else if (isFrozenS(node)) {
// We can copy it over to the wide node.
val oldsn = node.asInstanceOf[SNode[K, V]]
val sn = new SNode(oldsn.hash, oldsn.key, oldsn.value)
val pos = (sn.hash >>> level) & mask
if (wide(pos) == null) wide(pos) = sn
else sequentialInsert(sn, wide, level, pos)
} else if (isFrozenL(node)) {
var tail = node.asInstanceOf[FNode].frozen.asInstanceOf[LNode[K, V]]
while (tail != null) {
val sn = new SNode(tail.hash, tail.key, tail.value)
val pos = (sn.hash >>> level) & mask
sequentialInsert(sn, wide, level, pos)
tail = tail.next
}
} else if (node.isInstanceOf[FNode]) {
val fn = node.asInstanceOf[FNode]
val an = fn.frozen.asInstanceOf[Array[AnyRef]]
sequentialTransfer(an, wide, level)
} else {
sys.error("Unexpected case -- source array node should have been frozen.")
}
i += 1
}
}
private def sequentialTransferNarrow(
source: Array[AnyRef], narrow: Array[AnyRef], level: Int
): Unit = {
var i = 0
while (i < 4) {
val node = source(i)
if (node eq FVNode) {
// We can skipp, this slow was empty.
} else if (isFrozenS(node)) {
val oldsn = node.asInstanceOf[SNode[K, V]]
val sn = new SNode(oldsn.hash, oldsn.key, oldsn.value)
narrow(i) = sn
} else if (isFrozenL(node)) {
val chain = node.asInstanceOf[FNode].frozen.asInstanceOf[LNode[K, V]]
narrow(i) = chain
} else {
sys.error(s"Unexpected case: $node")
}
i += 1
}
}
private def newNarrowOrWideNode(
h1: Int, k1: K, v1: V, h2: Int, k2: K, v2: V, level: Int
): AnyRef = {
newNarrowOrWideNodeUsingFresh(
new SNode(h1, k1, v1), new SNode(h2, k2, v2), level)
}
private def newNarrowOrWideNodeUsingFresh(
sn1: SNode[K, V], sn2: SNode[K, V], level: Int
): AnyRef = {
if (sn1.hash == sn2.hash) {
val ln1 = new LNode(sn1)
val ln2 = new LNode(sn2, ln1)
ln2
} else {
val pos1 = (sn1.hash >>> level) & (4 - 1)
val pos2 = (sn2.hash >>> level) & (4 - 1)
if (pos1 != pos2) {
val an = createNarrowArray()
val pos1 = (sn1.hash >>> level) & (usedLength(an) - 1)
an(pos1) = sn1
val pos2 = (sn2.hash >>> level) & (usedLength(an) - 1)
an(pos2) = sn2
an(an.length - 1) = Integer.valueOf(2)
an
} else {
val an = createWideArray()
sequentialInsert(sn1, an, level)
sequentialInsert(sn2, an, level)
sequentialFixCount(an)
an
}
}
}
private def newNarrowOrWideNodeUsingFreshThatNeedsCountFix(
sn1: SNode[K, V], sn2: SNode[K, V], level: Int
): AnyRef = {
if (sn1.hash == sn2.hash) {
val ln1 = new LNode(sn1)
val ln2 = new LNode(sn2, ln1)
ln2
} else {
val pos1 = (sn1.hash >>> level) & (4 - 1)
val pos2 = (sn2.hash >>> level) & (4 - 1)
if (pos1 != pos2) {
val an = createNarrowArray()
val pos1 = (sn1.hash >>> level) & (usedLength(an) - 1)
an(pos1) = sn1
val pos2 = (sn2.hash >>> level) & (usedLength(an) - 1)
an(pos2) = sn2
an
} else {
val an = createWideArray()
sequentialInsert(sn1, an, level)
sequentialInsert(sn2, an, level)
an
}
}
}
private def newNarrowOrWideNodeThatNeedsCountFix(
h1: Int, k1: K, v1: V, h2: Int, k2: K, v2: V, level: Int
): AnyRef = {
newNarrowOrWideNodeUsingFreshThatNeedsCountFix(
new SNode(h1, k1, v1), new SNode(h2, k2, v2), level)
}
private def newListNodeWithoutKey(
oldln: CacheTrie.LNode[K, V], hash: Int, k: K
): (V, CacheTrie.LNode[K, V]) = {
var tail = oldln
while (tail != null) {
if (tail.key == k) {
// Only reallocate list if the key must be removed.
val result = tail.value
var ln: LNode[K, V] = null
tail = oldln
while (tail != null) {
if (tail.key != k) {
ln = new LNode(tail.hash, tail.key, tail.value, ln)
}
tail = tail.next
}
return (result, ln)
}
tail = tail.next
}
return (null.asInstanceOf[V], oldln)
}
// TODO: Fix a corner-case in which must check if key is already in the list node.
private def newListNarrowOrWideNode(
oldln: CacheTrie.LNode[K, V], hash: Int, k: K, v: V, level: Int
): AnyRef = {
var tail = oldln
var ln: LNode[K, V] = null
while (tail != null) {
ln = new LNode(tail.hash, tail.key, tail.value, ln)
tail = tail.next
}
if (ln.hash == hash) {
new LNode(hash, k, v, ln)
} else {
val an = createWideArray()
val pos1 = (ln.hash >>> level) & (usedLength(an) - 1)
an(pos1) = ln
val sn = new SNode(hash, k, v)
sequentialInsert(sn, an, level)
sequentialFixCount(an)
an
}
}
private def completeExpansion(cache: Array[AnyRef], enode: ENode): Unit = {
val parent = enode.parent
val parentpos = enode.parentpos
val level = enode.level
// First, freeze the subtree beneath the narrow node.
val narrow = enode.narrow
freeze(cache, narrow)
// Second, populate the target array, and CAS it into the parent.
var wide = createWideArray()
sequentialTransfer(narrow, wide, level)
sequentialFixCount(wide)
// If this CAS fails, then somebody else already committed the wide array.
if (!CAS_WIDE(enode, null, wide)) {
wide = READ_WIDE(enode)
}
// We need to write the agreed value back into the parent.
// If we failed, it means that somebody else succeeded.
// If we succeeded, then we must update the cache.
// Note that not all nodes will get cached from this site,
// because some array nodes get created outside expansion
// (e.g. when creating a node to resolve collisions in sequentialTransfer).
if (CAS(parent, parentpos, enode, wide)) {
inhabitCache(cache, wide, enode.hash, level)
}
}
@tailrec
private def inhabitCache(
cache: Array[AnyRef], nv: AnyRef, hash: Int, cacheeLevel: Int
): Unit = {
if (cache eq null) {
// Only create the cache if the entry is at least level 12,
// since the expectation on the number of elements is ~80.
// This means that we can afford to create a cache with 256 entries.
if (cacheeLevel >= 12) {
val cn = createCacheArray(8)
cn(0) = new CacheNode(null, 8)
CAS_CACHE(null, cn)
val newCache = READ_CACHE
inhabitCache(newCache, nv, hash, cacheeLevel)
}
} else {
val len = cache.length
val cacheLevel = Integer.numberOfTrailingZeros(len - 1)
if (cacheeLevel == cacheLevel) {
val mask = len - 1 - 1
val pos = 1 + (hash & mask)
WRITE(cache, pos, nv)
} else {
// We have a cache level miss -- update statistics, and rebuild if necessary.
// TODO: Probably not necessary here.
}
}
}
def sampleAndUpdateCache(cache: Array[AnyRef], stats: CacheNode): Unit = {
// Sample the hash trie to estimate the level distribution.
// Use an 8-byte histogram, total sample size must be less than 255.
var histogram = 0L
val sampleSize = 128
val sampleType = 2
var seed = Thread.currentThread.getId + System.identityHashCode(this)
val levelOffset = 4
(sampleType: @switch) match {
case 0 =>
var i = 0
while (i < sampleSize) {
seed = (seed * 0x5DEECE66DL + 0xBL) & ((1L << 48) - 1);
val hash = (seed >>> 16).toInt
@tailrec
def sampleHash(node: Array[AnyRef], level: Int, hash: Int): Int = {
val mask = usedLength(node) - 1
val pos = (hash >>> level) & mask
val child = READ(node, pos)
if (child.isInstanceOf[Array[AnyRef]]) {
sampleHash(child.asInstanceOf[Array[AnyRef]], level + 4, hash)
} else {
level + levelOffset
}
}
val level = sampleHash(rawRoot, 0, hash)
val shift = (level >>> 2) << 3
val addend = 1L << shift
histogram += addend
i += 1
}
case 1 =>
var i = 0
while (i < sampleSize) {
seed = (seed * 0x5DEECE66DL + 0xBL) & ((1L << 48) - 1);
val hash = (seed >>> 16).toInt
def sampleKey(node: Array[AnyRef], level: Int, hash: Int): Int = {
val mask = usedLength(node) - 1
val pos = (hash >>> level) & mask
var i = (pos + 1) % usedLength(node)
while (i != pos) {
val ch = READ(node, i)
if (ch.isInstanceOf[SNode[_, _]] || isFrozenS(ch) || isFrozenL(ch)) {
return level + levelOffset
} else if (ch.isInstanceOf[Array[AnyRef]]) {
val an = ch.asInstanceOf[Array[AnyRef]]
val result = sampleKey(an, level + 4, hash)
if (result != -1) return result
} else if (isFrozenA(ch)) {
val an = ch.asInstanceOf[FNode].frozen.asInstanceOf[Array[AnyRef]]
val result = sampleKey(an, level + 4, hash)
if (result != -1) return result
}
i = (i + 1) % usedLength(node)
}
-1
}
val level = sampleKey(rawRoot, 0, hash)
if (level == -1) i = sampleSize
else {
val shift = (level >>> 2) << 3
val addend = 1L << shift
histogram += addend
i += 1
}
}
case 2 =>
def count(histogram: Long): Int = {
(
((histogram >>> 0) & 0xff) +
((histogram >>> 8) & 0xff) +
((histogram >>> 16) & 0xff) +
((histogram >>> 24) & 0xff) +
((histogram >>> 32) & 0xff) +
((histogram >>> 40) & 0xff) +
((histogram >>> 48) & 0xff) +
((histogram >>> 56) & 0xff)
).toInt
}
def sampleUnbiased(
node: Array[AnyRef], level: Int, maxRepeats: Int, maxSamples: Int,
startHistogram: Long, startSeed: Long
): Long = {
var seed = startSeed
var histogram = startHistogram
val mask = usedLength(node) - 1
var i = 0
while (i < maxRepeats && count(histogram) < maxSamples) {
seed = (seed * 0x5DEECE66DL + 0xBL) & ((1L << 48) - 1);
val hash = (seed >>> 16).toInt
val pos = hash & mask
val ch = READ(node, pos)
if (ch.isInstanceOf[Array[AnyRef]]) {
val an = ch.asInstanceOf[Array[AnyRef]]
histogram += sampleUnbiased(
an, level + 4, math.min(maxSamples, maxRepeats * 4), maxSamples,
histogram, seed + 1
)
} else if (
ch.isInstanceOf[SNode[_, _]] || isFrozenS(ch) || isFrozenL(ch)
) {
val shift = ((level + levelOffset) >>> 2) << 3
histogram += 1L << shift
} else if (isFrozenA(ch)) {
val an = ch.asInstanceOf[FNode].frozen.asInstanceOf[Array[AnyRef]]
histogram += sampleUnbiased(
an, level + 4, maxRepeats * 4, maxSamples, histogram, seed + 1
)
}
i += 1
}
histogram
}
var i = 0
val trials = 32
while (i < trials) {
seed += 1
histogram += sampleUnbiased(rawRoot, 0, 1, sampleSize / trials, 0L, seed)
i += 1
}
}
// Find two consecutive levels with most elements.
// Additionally, record the number of elements at the current cache level.
val oldCacheLevel = stats.level
var cacheCount = 0
var bestLevel = 0
var bestCount = (histogram & 0xff) + ((histogram >>> 8) & 0xff)
var level = 8
while (level < 64) {
val count =
((histogram >>> level) & 0xff) + ((histogram >>> (level + 8)) & 0xff)
if (count > bestCount) {
bestCount = count
bestLevel = level >> 1
}
if ((level >> 1) == oldCacheLevel) {
cacheCount += count.toInt
}
level += 8
}
// Debug information.
def printDebugInformation() {
println(debugPerLevelDistribution)
println(s"best level: ${bestLevel} (count: $bestCount)")
println(s"cache level: ${stats.level} (count: $cacheCount)")
val histogramString =
(0 until 8).map(_ * 8).map(histogram >>> _).map(_ & 0xff).mkString(",")
println(histogramString)
println(debugCacheStats)
println()
}
// printDebugInformation()
// Decide whether to change the cache levels.
val repairThreshold = 1.40f
if (cacheCount * repairThreshold < bestCount) {
// printDebugInformation()
var currCache = cache
var currStats = stats
while (currStats.level > bestLevel) {
// Drop cache level.
val parentCache = currStats.parent
if (CAS_CACHE(currCache, parentCache)) {
if (parentCache == null) {
return
}
currCache = parentCache
currStats = READ(parentCache, 0).asInstanceOf[CacheNode]
} else {
// Bail out immediately -- cache will be repaired by someone else eventually.
return
}
}
while (currStats.level < bestLevel) {
// Add cache level.
val nextLevel = currStats.level + 4
val nextCache = createCacheArray(nextLevel)
nextCache(0) = new CacheNode(currCache, nextLevel)
if (CAS_CACHE(currCache, nextCache)) {
currCache = nextCache
currStats = READ(nextCache, 0).asInstanceOf[CacheNode]
} else {
// Bail our immediately -- cache will be repaired by someone else eventually.
return
}
}
}
}
private def recordCacheMiss(): Unit = {
val missCountMax = 2048
val cache = READ_CACHE
if (cache ne null) {
val stats = READ(cache, 0).asInstanceOf[CacheNode]
if (stats.approximateMissCount > missCountMax) {
// We must again check if the cache level is obsolete.
// Reset the miss count.
stats.resetMissCount()
// Resample to find out if cache needs to be repaired.
sampleAndUpdateCache(cache, stats)
} else {
stats.bumpMissCount()
}
}
}
private[concurrent] def debugReadCache: Array[AnyRef] = READ_CACHE
private[concurrent] def debugReadRoot: Array[AnyRef] = rawRoot
private[concurrent] def debugCachePopulateTwoLevelSingle(
level: Int, key: K, value: V
): Unit = {
rawCache = createCacheArray(level)
rawCache(0) = new CacheNode(null, level)
var i = 1
while (i < rawCache.length) {
val an = createNarrowArray()
rawCache(i) = an
var j = 0
while (j < usedLength(an)) {
an(j) = new SNode(0, key, value)
j += 1
}
sequentialFixCount(an)
i += 1
}
}
private[concurrent] def debugCachePopulateTwoLevel(
level: Int, keys: Array[K], values: Array[V]
): Unit = {
rawCache = createCacheArray(level)
rawCache(0) = new CacheNode(null, level)
var i = 1
while (i < rawCache.length) {
val an = createNarrowArray()
rawCache(i) = an
var j = 0
while (j < usedLength(an)) {
an(j) = new SNode(0, keys(i * 4 + j), values(i * 4 + j))
j += 1
}
sequentialFixCount(an)
i += 1
}
}
private[concurrent] def debugCachePopulateOneLevel(
level: Int, keys: Array[K], values: Array[V], scarce: Boolean
): Unit = {
rawCache = createCacheArray(level)
rawCache(0) = new CacheNode(null, level)
var i = 1
while (i < rawCache.length) {
if (!scarce || i % 4 == 0) {
rawCache(i) = new SNode(0, keys(i), values(i))
}
i += 1
}
}
private[concurrent] def debugLoadFactor(): Double = {
var full = 0
var total = 0
def traverse(node: Array[AnyRef]): Unit = {
total += usedLength(node)
var i = 0
while (i < usedLength(node)) {
val old = READ(node, i)
if (old.isInstanceOf[SNode[_, _]]) {
full += 1
} else if (old.isInstanceOf[LNode[_, _]]) {
full += 1
} else if (old.isInstanceOf[Array[AnyRef]]) {
traverse(old.asInstanceOf[Array[AnyRef]])
}
i += 1
}
}
traverse(rawRoot)
return 1.0 * full / total
}
private[concurrent] def debugTree: String = {
val res = new StringBuilder
def traverse(indent: String, node: Array[AnyRef]): Unit = {
var i = 0
while (i < usedLength(node)) {
val old = READ(node, i)
if (old == null) {
res.append(s"${indent}<empty>")
res.append("\n")
} else if (old.isInstanceOf[SNode[_, _]]) {
val sn = old.asInstanceOf[SNode[K, V]]
res.append(s"${indent}")
val txn = READ_TXN(sn)
val marker = if (txn eq NoTxn) "_" else txn.toString
val id = System.identityHashCode(sn)
res.append(
s"SN[${Integer.toHexString(sn.hash)}:${sn.key}:${sn.value}:$marker]@$id")
res.append("\n")
} else if (old.isInstanceOf[LNode[_, _]]) {
var ln = old.asInstanceOf[LNode[K, V]]
while (ln != null) {
res.append(s"${indent}")
res.append(s"LN[${Integer.toHexString(ln.hash)}:${ln.key}:${ln.value}]")
res.append("->")
ln = ln.next
}
res.append("\n")
} else if (old.isInstanceOf[Array[AnyRef]]) {
val an = old.asInstanceOf[Array[AnyRef]]
res.append(s"${indent}${if (usedLength(an) == 4) "narrow" else "wide"}")
res.append("\n")
traverse(indent + " ", an)
} else {
res.append("unsupported case: " + old)
}
i += 1
}
}
traverse("", rawRoot)
res.toString
}
def debugCacheStats: String = {
def traverse(cache: Array[AnyRef]): String = {
if (cache == null) {
return "empty cache"
}
val stats = cache(0).asInstanceOf[CacheNode]
var count = 0
var acount = 0
var scount = 0
for (i <- 1 until cache.length) {
val c = cache(i)
if (c != null && c != FVNode && !c.isInstanceOf[FNode]) {
count += 1
}
if (c.isInstanceOf[Array[AnyRef]]) {
acount += 1
}
if (c.isInstanceOf[SNode[_, _]]) {
scount += 1
}
}
traverse(stats.parent) + "\n|\n" + s"""
|cache level: ${stats.level}, $count / ${cache.length - 1}
|a-nodes: $acount
|s-nodes: $scount
""".stripMargin.trim
}
val cache = READ_CACHE
"----\n" + traverse(cache) + "\n----"
}
def debugPerLevelDistribution: String = {
val histogram = new Array[Int](10)
var sz = 0
def traverse(node: Array[AnyRef], level: Int): Unit = {
var i = 0
while (i < usedLength(node)) {
val old = node(i)
if (old.isInstanceOf[SNode[_, _]]) {
histogram((level + 4) / 4) += 1
sz += 1
} else if (old.isInstanceOf[LNode[_, _]]) {
var ln = old.asInstanceOf[LNode[_, _]]
while (ln != null) {
histogram((level + 4) / 4) += 1
sz += 1
ln = ln.next
}
} else if (old.isInstanceOf[Array[AnyRef]]) {
val an = old.asInstanceOf[Array[AnyRef]]
traverse(an, level + 4)
} else if (old eq null) {
} else {
sys.error(s"Unexpected case: $old")
}
i += 1
}
}
traverse(this.debugReadRoot, 0)
val sb = new StringBuilder
sb.append(s":: size $sz ::\n")
for (i <- 0 until histogram.length) {
val num = histogram(i)
val percent = (100.0 * num / sz).toInt
sb.append(f"${i * 4}%3d: $num%8d ($percent%3d%%) ${"*" * (num * 40 / sz)}\n")
}
sb.toString
}
private[concurrent] def assertCorrectCounts(): Unit = {
def traverse(node: Array[AnyRef]): Unit = {
var i = 0
var count = 0
while (i < usedLength(node)) {
val old = node(i)
if (old != null) {
count += 1
}
if (old.isInstanceOf[Array[AnyRef]]) {
traverse(old.asInstanceOf[Array[AnyRef]])
}
i += 1
}
assert(count == node(node.length - 1).asInstanceOf[Integer],
s"Counted $count, node: ${ANode.toString(node)}")
}
traverse(rawRoot)
}
}
object CacheTrie {
private val ArrayBase = Platform.unsafe.arrayBaseOffset(classOf[Array[AnyRef]])
private val ArrayShift = {
val scale = Platform.unsafe.arrayIndexScale(classOf[Array[AnyRef]])
require((scale & (scale - 1)) == 0)
31 - Integer.numberOfLeadingZeros(scale)
}
private val ENodeWideOffset = {
val field = classOf[ENode].getDeclaredField("wide")
Platform.unsafe.objectFieldOffset(field)
}
private val SNodeFrozenOffset = {
val field = classOf[SNode[_, _]].getDeclaredField("txn")
Platform.unsafe.objectFieldOffset(field)
}
private val CacheTrieRawCacheOffset = {
val field = classOf[CacheTrie[_, _]].getDeclaredField("rawCache")
Platform.unsafe.objectFieldOffset(field)
}
private val availableProcessors = Runtime.getRuntime.availableProcessors()
/* result types */
val Success = new AnyRef
val Restart = new AnyRef
/* node types */
object NoTxn
class SNode[K <: AnyRef, V](
@volatile var txn: AnyRef,
val hash: Int,
val key: K,
val value: V
) {
def this(h: Int, k: K, v: V) = this(NoTxn, h, k, v)
override def toString = {
val id = System.identityHashCode(this)
s"SN[$hash, $key, $value, ${if (txn != NoTxn) txn else '_'}]@$id"
}
}
class LNode[K <: AnyRef, V](
val hash: Int,
val key: K,
val value: V,
val next: LNode[K, V]
) {
def this(sn: SNode[K, V], next: LNode[K, V]) = this(sn.hash, sn.key, sn.value, next)
def this(sn: SNode[K, V]) = this(sn, null)
override def toString = s"LN[$hash, $key, $value] -> $next"
}
class ENode(
val parent: Array[AnyRef],
val parentpos: Int,
val narrow: Array[AnyRef],
val hash: Int,
val level: Int
) {
@volatile var wide: Array[AnyRef] = null
override def toString = s"EN"
}
class XNode(
val parent: Array[AnyRef],
val parentpos: Int,
val stale: Array[AnyRef],
val hash: Int,
val level: Int
) {
override def toString = s"XN"
}
object ANode {
def toString(an: Array[AnyRef]) = an.mkString("AN[", ", ", "]")
}
val FVNode = new AnyRef
val FSNode = new AnyRef
class FNode(
val frozen: AnyRef
)
class CacheNode(val parent: Array[AnyRef], val level: Int) {
val missCounts = new Array[Int](availableProcessors * math.min(16, level))
private def pos: Int = {
val id = Thread.currentThread.getId
val pos = (id ^ (id >>> 16)).toInt & (missCounts.length - 1)
pos
}
final def approximateMissCount: Int = {
missCounts(0)
}
final def resetMissCount(): Unit = {
missCounts(0) = 0
}
final def bumpMissCount(): Unit = {
missCounts(0) += 1
}
}
}
|
storm-enroute/reactors
|
reactors-common/jvm/src/main/scala/io/reactors/common/concurrent/CacheTrie.scala
|
Scala
|
bsd-3-clause
| 63,188 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.executor
import org.apache.spark.SparkFunSuite
class TaskMetricsSuite extends SparkFunSuite {
test("[SPARK-5701] updateShuffleReadMetrics: ShuffleReadMetrics not added when no shuffle deps") {
val taskMetrics = new TaskMetrics()
taskMetrics.updateShuffleReadMetrics()
assert(taskMetrics.shuffleReadMetrics.isEmpty)
}
}
|
ArvinDevel/onlineAggregationOnSparkV2
|
core/src/test/scala/org/apache/spark/executor/TaskMetricsSuite.scala
|
Scala
|
apache-2.0
| 1,164 |
package no.nextgentel.oss.akkatools.example
import java.util.UUID
import java.util.concurrent.TimeUnit
import akka.actor.{Actor, ActorLogging, ActorSystem, Props}
import akka.util.Timeout
import akka.pattern.ask
import no.nextgentel.oss.akkatools.example.booking._
import no.nextgentel.oss.akkatools.persistence.jdbcjournal.{JdbcJournalConfig, PersistenceIdParserImpl, PersistenceIdSingle, StorageRepoConfig}
import no.nextgentel.oss.akkatools.persistence.{DurableMessage, EventAndState, GetEventAndStateHistory}
import no.nextgentel.oss.akkatools.utils.DataSourceUtil
import scala.concurrent.Await
import scala.io.StdIn._
object ExampleApp extends App {
val dataSource = DataSourceUtil.createDataSource("JdbcReadJournalTest")
JdbcJournalConfig.setConfig(JdbcJournalConfig(dataSource, None, StorageRepoConfig()))
val infoColor = Console.YELLOW
val e = new ExampleSystem()
val bookingId = "movie-" + UUID.randomUUID().toString
Thread.sleep(3000)
doNextStep("Placing booking - which will fail") {
() =>
try {
// This will fail
e.placeBooking(bookingId)
} catch {
case e: Exception => {
waitForALittleWhile()
printInfo("Ignoring booking-error: " + e.getMessage)
}
}
}
doNextStep("Open the booking") {
() =>
e.openBooking(bookingId, 3)
}
val firstSeatId:String = doNextStep("Place booking") {
() =>
val seatId = e.placeBooking(bookingId)
printInfo(s"Booking completed - seatId: $seatId")
waitForALittleWhile(15)
seatId
}
doNextStep("Place another booking") {
() =>
val seatId = e.placeBooking(bookingId)
printInfo(s"Booking completed - seatId: $seatId")
waitForALittleWhile(15)
}
doNextStep("Cancel an invalid booking") {
() =>
try {
// This will fail
e.cancelBooking(bookingId, "seat-na")
} catch {
case e: Exception => {
waitForALittleWhile()
printInfo("Ignoring booking-error: " + e.getMessage)
}
}
}
doNextStep("Cancel the first booking") {
() =>
e.cancelBooking(bookingId, firstSeatId)
printInfo(s"Seat canceled")
waitForALittleWhile()
}
doNextStep("Close booking") {
() =>
e.closeBooking(bookingId)
}
doNextStep("Try a booking after it has been closed - will fail") {
() =>
try {
// This will fail
e.placeBooking(bookingId)
} catch {
case e: Exception => {
waitForALittleWhile()
printInfo("Ignoring booking-error: " + e.getMessage)
}
}
}
doNextStep("Printing history of the booking") {
() =>
printHistory(bookingId)
}
doNextStep("quit") {
() =>
Thread.sleep(500)
System.exit(10)
}
def printInfo(info:String, sleep:Int = 1): Unit ={
Thread.sleep(sleep*1000)
println(infoColor + info + Console.RESET)
}
def waitForALittleWhile(seconds:Int = 1): Unit ={
//Thread.sleep(seconds*1000)
}
def doNextStep[T](description:String)(work:()=>T): T = {
//Thread.sleep(1000)
println(Console.BLUE + "\\n\\nType 'run + [ENTER]' to execute next step: " + infoColor + description + Console.RESET)
var wait = false//true
while(wait) {
val line = readLine()
// println("line: " + line)
if (line != null && line.equalsIgnoreCase("run")) {
wait = false
}
}
work.apply()
}
def printHistory(bookingId:String): Unit = {
e.getBookingHistory(bookingId).foreach {
e => printInfo("Event: " + e.event.toString, 0)
}
}
}
class ExampleSystem(system: ActorSystem) {
def this() = this(ActorSystem("ExampleSystem"))
implicit val ec = system.dispatcher
implicit val timeout = Timeout(20, TimeUnit.SECONDS)
private val booking = new BookingStarter(system)
private val cinema = system.actorOf(Props(new Cinema), "cinema")
private val ticketPrinter = system.actorOf(Props(new TicketPrinter), "ticketPrintger")
booking.config(ticketPrinter.path, cinema.path)
booking.start()
def placeBooking(bookingId: String): String = {
val msg = ReserveSeatCmd(bookingId)
Await.result(ask(booking.dispatcher, msg), timeout.duration).asInstanceOf[String]
}
def openBooking(bookingId:String, seats:Int): Unit = {
booking.dispatcher ! OpenBookingCmd(bookingId, seats)
}
def cancelBooking(bookingId:String, seatId:String): Unit = {
val msg = CancelSeatCmd(bookingId, seatId)
Await.result(ask(booking.dispatcher, msg), timeout.duration)
}
def closeBooking(bookingId:String): Unit = {
booking.dispatcher ! CloseBookingCmd(bookingId)
}
def getBookingHistory(bookingId:String):List[EventAndState] = {
val f = booking.askView(bookingId, GetEventAndStateHistory()).mapTo[List[EventAndState]]
Await.result(f, timeout.duration)
}
}
class Cinema extends Actor with ActorLogging {
def receive = {
case dm: DurableMessage =>
val m = dm.payload
log.info(Console.GREEN + s"Cinema: $m" + Console.RESET)
dm.confirm(context, self)
}
}
class TicketPrinter extends Actor with ActorLogging {
var counter = 0
def receive = {
case dm: DurableMessage =>
val m = dm.payload
counter = counter + 1
val willCrash = (counter % 3) != 0
if ( willCrash)
log.warning(Console.RED + s"Failing to print ticket $m" + Console.RESET)
else {
log.info(Console.GREEN + s"Printing ticket: $m" + Console.RESET)
dm.confirm(context, self)
}
}
}
|
NextGenTel/akka-tools
|
examples/aggregates/src/main/scala/no/nextgentel/oss/akkatools/example/Example.scala
|
Scala
|
mit
| 5,546 |
package com.shocktrade.models.profile
import akka.actor.{ActorRef, Props}
import akka.pattern.ask
import akka.routing.RoundRobinPool
import akka.util.Timeout
import com.shocktrade.actors.UserProfileActor
import com.shocktrade.actors.UserProfileActor._
import com.shocktrade.controllers.Application._
import com.shocktrade.util.BSONHelper._
import play.libs.Akka
import reactivemongo.api.collections.default.BSONCollection
import reactivemongo.bson.{BSONArray, BSONDocument => BS, BSONObjectID}
import reactivemongo.core.commands.LastError
import scala.collection.concurrent.TrieMap
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
/**
* User Profiles Proxy
* @author [email protected]
*/
object UserProfiles {
private val system = Akka.system
private implicit val ec = system.dispatcher
private val finderActor = system.actorOf(Props[UserProfileActor].withRouter(RoundRobinPool(nrOfInstances = 20)), name = "ProfileFinder")
private val profileActors = TrieMap[String, ActorRef]()
private implicit val timeout: Timeout = 5.second
private implicit val mc = db.collection[BSONCollection]("Players")
/**
* Creates the given user profile
* @param profile the given user profile
* @return a promise of the [[LastError outcome]]
*/
def createProfile(profile: UserProfile): Future[LastError] = {
(UserProfiles ? CreateProfile(profile)).mapTo[LastError]
}
/**
* Retrieves a user profile by the user's name
* @param userID the given user ID
* @param amountToDeduct the amount to deduct
* @return a promise of an option of a user profile
*/
def deductFunds(userID: BSONObjectID, amountToDeduct: BigDecimal): Future[Option[UserProfile]] = {
(UserProfiles ? DeductFunds(userID, amountToDeduct)).mapTo[Option[UserProfile]]
}
def findFacebookFriends(fbIds: Seq[String])(implicit ec: ExecutionContext) = {
(finderActor ? FindFacebookFriends(fbIds)) map {
case e: Exception => throw new IllegalStateException(e)
case response => response.asInstanceOf[Seq[BS]]
}
}
/**
* Retrieves a user profile by the user's name
* @param name the given user name (e.g. "ldaniels528")
* @return a promise of an option of a user profile
*/
def findProfileByName(name: String): Future[Option[UserProfile]] = {
(finderActor ? FindProfileByName(name)) map {
case e: Exception => throw new IllegalStateException(e)
case response => response.asInstanceOf[Option[UserProfile]]
}
}
/**
* Retrieves a user profile by the user's Facebook ID
* @param fbId the given user's Facebook ID
* @return a promise of an option of a user profile
*/
def findProfileByFacebookID(fbId: String): Future[Option[UserProfile]] = {
(finderActor ? FindProfileByFacebookID(fbId)) map {
case e: Exception => throw new IllegalStateException(e)
case response => response.asInstanceOf[Option[UserProfile]]
}
}
def !(action: ProfileAgnosticAction) = finderActor ! action
def !(action: ProfileSpecificAction) = profileActor(action.userID) ! action
def ?(action: ProfileAgnosticAction)(implicit timeout: Timeout) = (finderActor ? action) map {
case e: Exception => throw new IllegalStateException(e.getMessage, e)
case response => response
}
def ?(action: ProfileSpecificAction)(implicit timeout: Timeout) = (profileActor(action.userID) ? action) map {
case e: Exception => throw new IllegalStateException(e.getMessage, e)
case response => response
}
/**
* Ensures an actor instance per contest
* @param id the given [[BSONObjectID contest ID]]
* @return a reference to the actor that manages the contest
*/
private def profileActor(id: BSONObjectID): ActorRef = {
profileActors.getOrElseUpdate(id.stringify, system.actorOf(Props[UserProfileActor], name = s"ProfileActor-${id.stringify}"))
}
}
|
ldaniels528/shocktrade-server
|
app-server/app/com/shocktrade/models/profile/UserProfiles.scala
|
Scala
|
apache-2.0
| 3,908 |
// See the LICENCE.txt file distributed with this work for additional
// information regarding copyright ownership.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package scray.core.service
import java.net.InetSocketAddress
import com.esotericsoftware.kryo.Kryo
import scala.collection.JavaConversions._
import com.twitter.finagle.ListeningServer
import com.twitter.finagle.Thrift
import com.twitter.util.Await
import com.twitter.util.Duration
import com.twitter.util.Time
import com.twitter.util.TimerTask
import com.twitter.util.JavaTimer
import java.util.concurrent.TimeUnit
import scray.querying.description._
import scray.querying.caching.serialization._
import scray.common.serialization.KryoPoolSerialization
import scray.common.serialization.numbers.KryoSerializerNumber
import scray.common.properties.ScrayProperties
import scray.common.properties.IntProperty
import scray.common.properties.predefined.PredefinedProperties
import scray.service.qservice.thrifscala.ScrayCombinedStatefulTService
import scray.service.qservice.thrifscala.ScrayTServiceEndpoint
import scray.core.service.properties.ScrayServicePropertiesRegistration
import scray.service.qmodel.thrifscala.ScrayUUID
import com.typesafe.scalalogging.LazyLogging
import org.apache.thrift.protocol.TBinaryProtocol
import com.twitter.finagle.builder.ServerBuilder
import com.twitter.finagle.http._
import org.apache.thrift.transport.TSocket
import com.twitter.finagle.thrift.ThriftServerFramedCodec
abstract class ScrayCombinedStatefulTServer extends KryoPoolRegistration with App with LazyLogging {
// abstract functions to be customized
def initializeResources: Unit
def destroyResources: Unit
def configureProperties
// read properties
configureProperties
// kryo pool registrars
registerSerializers
// custom init
initializeResources
// launch combined service
val server = Thrift.server.serveIface(SCRAY_QUERY_LISTENING_ENDPOINT, ScrayCombinedStatefulTServiceImpl())
def getVersion: String = "0.9.2"
// endpoint registration refresh timer
private val refreshTimer = new JavaTimer(isDaemon = false) {
override def logError(t: Throwable) {
logger.error("Could not refresh.", t)
}
}
var client: Option[ScrayCombinedStatefulTService.FutureIface] = None
// refresh task handle
private var refreshTask: Option[TimerTask] = None
// this endpoint
val endpoint = ScrayTServiceEndpoint(SCRAY_QUERY_HOST_ENDPOINT.getHostString, SCRAY_QUERY_HOST_ENDPOINT.getPort)
val refreshPeriod = EXPIRATION * 2 / 3
def addrStr(): String =
s"${SCRAY_QUERY_HOST_ENDPOINT.getHostString}:${SCRAY_QUERY_HOST_ENDPOINT.getPort}"
// register this endpoint with all seeds and schedule regular refresh
// the refresh loop keeps the server running
SCRAY_SEEDS.map(inetAddr2EndpointString(_)).foreach { seedAddr =>
try {
if (Await.result(getClient(seedAddr).ping(), Duration(20, TimeUnit.SECONDS))) {
logger.debug(s"$addrStr adding local service endpoint ($endpoint) to $seedAddr.")
val _ep = Await.result(getClient(seedAddr).addServiceEndpoint(endpoint), Duration(20, TimeUnit.SECONDS))
// refreshTask = Some(refreshTimer.schedule(refreshPeriod.fromNow, refreshPeriod)(refresh(_ep.endpointId.get)))
refreshTask = Some(refreshTimer.schedule(refreshPeriod.fromNow, refreshPeriod)(refresh(_ep.endpointId.get)))
}
} catch {
case ex: Exception => {
}
}
}
println(s"Scray Combined Server (Version ${getVersion}) started on ${addrStr}. Waiting for client requests...")
private def getClient(seedAddr: String): ScrayCombinedStatefulTService.FutureIface = {
client.getOrElse {
logger.info("Initializing thrift-client ")
val clientIface = Thrift.client.newIface[ScrayCombinedStatefulTService.FutureIface](seedAddr)
client = Some(clientIface)
clientIface
}
}
/**
* Refresh the registry entry
*/
def refresh(id: ScrayUUID, time: Int = 1): Unit = {
SCRAY_SEEDS.map(inetAddr2EndpointString(_)).foreach { seedAddr =>
try {
logger.trace(s"$addrStr trying to refresh service endpoint ($id).")
if (Await.result(getClient(seedAddr).ping(), Duration(20, TimeUnit.SECONDS))) {
logger.debug(s"$addrStr refreshing service endpoint ($id).")
// client.refreshServiceEndpoint(id)
Await.result(getClient(seedAddr).addServiceEndpoint(endpoint), Duration(20, TimeUnit.SECONDS))
}
} catch {
case ex: Exception =>
client = None
getClient(seedAddr)
if(time < 4) {
logger.warn(s"Endpoint refresh failed, time $time: $ex", ex)
Thread.sleep(10000)
refresh(id, time + 1)
} else {
logger.warn("Endpoint refresh failed. Retry maximum exceeded. Exiting.")
}
}
}
}
override def finalize = {
client = None
destroyResources
}
}
|
scray/scray
|
scray-service/src/main/scala/scray/core/service/ScrayCombinedStatefulTServer.scala
|
Scala
|
apache-2.0
| 5,456 |
/**
* Mapping Selector
* Mapping Selector
* Copyright (C) 01/04/16 echinopsii
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package net.echinopsii.ariane.community.core.mapping.ds.sdsl.parser
import scala.util.parsing.combinator.JavaTokenParsers
import net.echinopsii.ariane.community.core.mapping.ds.tools.ParserUtils
class UtilsTP extends ParserUtils with JavaTokenParsers {
val selectorkeywords = List(
"and","like","=","!=","<>",">","<",">=","<=","=~"
)
def notAKeyword: Parser[String] =
not(ignoreCases(selectorkeywords: _*)) ~> ident | ignoreCases(selectorkeywords: _*) ~> failure("invalid keyword usage.")
def propertyKey: Parser[String] = identifier
}
|
echinopsii/net.echinopsii.ariane.community.core.mapping
|
ds/dsl/src/main/scala/net/echinopsii/ariane/community/core/mapping/ds/sdsl/parser/UtilsTP.scala
|
Scala
|
agpl-3.0
| 1,311 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.job
import org.apache.samza.SamzaException
import org.apache.samza.config.Config
import org.apache.samza.config.JobConfig.Config2Job
import org.apache.samza.coordinator.stream.messages.{Delete, SetConfig}
import org.apache.samza.job.ApplicationStatus.Running
import org.apache.samza.util.CommandLine
import org.apache.samza.util.Logging
import org.apache.samza.util.Util
import scala.collection.JavaConversions._
import org.apache.samza.metrics.MetricsRegistryMap
import org.apache.samza.coordinator.stream.{CoordinatorStreamSystemProducer, CoordinatorStreamSystemFactory}
object JobRunner {
val SOURCE = "job-runner"
def main(args: Array[String]) {
val cmdline = new CommandLine
val options = cmdline.parser.parse(args: _*)
val config = cmdline.loadConfig(options)
new JobRunner(config).run()
}
}
/**
* ConfigRunner is a helper class that sets up and executes a Samza job based
* on a config URI. The configFactory is instantiated, fed the configPath,
* and returns a Config, which is used to execute the job.
*/
class JobRunner(config: Config) extends Logging {
/**
* This function submits the samza job.
* @param resetJobConfig This flag indicates whether or not to reset the job configurations when submitting the job.
* If this value is set to true, all previously written configs to coordinator stream will be
* deleted, and only the configs in the input config file will have an affect. Otherwise, any
* config that is not deleted will have an affect.
* By default this value is set to true.
* @return The job submitted
*/
def run(resetJobConfig: Boolean = true) = {
debug("config: %s" format (config))
val jobFactoryClass = config.getStreamJobFactoryClass match {
case Some(factoryClass) => factoryClass
case _ => throw new SamzaException("no job factory class defined")
}
val jobFactory = Class.forName(jobFactoryClass).newInstance.asInstanceOf[StreamJobFactory]
info("job factory: %s" format (jobFactoryClass))
val factory = new CoordinatorStreamSystemFactory
val coordinatorSystemConsumer = factory.getCoordinatorStreamSystemConsumer(config, new MetricsRegistryMap)
val coordinatorSystemProducer = factory.getCoordinatorStreamSystemProducer(config, new MetricsRegistryMap)
// Create the coordinator stream if it doesn't exist
info("Creating coordinator stream")
val (coordinatorSystemStream, systemFactory) = Util.getCoordinatorSystemStreamAndFactory(config)
val systemAdmin = systemFactory.getAdmin(coordinatorSystemStream.getSystem, config)
systemAdmin.createCoordinatorStream(coordinatorSystemStream.getStream)
if (resetJobConfig) {
info("Storing config in coordinator stream.")
coordinatorSystemProducer.register(JobRunner.SOURCE)
coordinatorSystemProducer.start
coordinatorSystemProducer.writeConfig(JobRunner.SOURCE, config)
}
info("Loading old config from coordinator stream.")
coordinatorSystemConsumer.register
coordinatorSystemConsumer.start
coordinatorSystemConsumer.bootstrap
coordinatorSystemConsumer.stop
val oldConfig = coordinatorSystemConsumer.getConfig()
if (resetJobConfig) {
info("Deleting old configs that are no longer defined: %s".format(oldConfig.keySet -- config.keySet))
(oldConfig.keySet -- config.keySet).foreach(key => {
coordinatorSystemProducer.send(new Delete(JobRunner.SOURCE, key, SetConfig.TYPE))
})
}
coordinatorSystemProducer.stop
// Create the actual job, and submit it.
val job = jobFactory.getJob(config).submit
info("waiting for job to start")
// Wait until the job has started, then exit.
Option(job.waitForStatus(Running, 500)) match {
case Some(appStatus) => {
if (Running.equals(appStatus)) {
info("job started successfully - " + appStatus)
} else {
warn("unable to start job successfully. job has status %s" format (appStatus))
}
}
case _ => warn("unable to start job successfully.")
}
info("exiting")
job
}
}
|
zcan/samza
|
samza-core/src/main/scala/org/apache/samza/job/JobRunner.scala
|
Scala
|
apache-2.0
| 5,010 |
package com.github.pedrovgs.haveaniceday.smiles.storage
import javax.inject.Inject
import com.github.pedrovgs.haveaniceday.extensions.sqldate._
import com.github.pedrovgs.haveaniceday.smiles.model.{SmilesGeneration, SmilesGenerationResult}
import com.github.pedrovgs.haveaniceday.smiles.storage.codec._
import com.github.pedrovgs.haveaniceday.utils.Clock
import slick.Database
import slick.Tables.{SmilesGenerationRow, SmilesGenerationTable}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
class SmilesGenerationsRepository @Inject()(database: Database, clock: Clock) {
import database.config.profile.api._
def saveLastGenerationStorage(result: SmilesGenerationResult): Future[SmilesGenerationResult] = {
val row = result match {
case Right(smile) => SmilesGenerationRow(0, clock.now, Some(smile.id), None)
case Left(error) => SmilesGenerationRow(0, clock.now, None, Some(error.message))
}
val insertQuery = SmilesGenerationTable returning SmilesGenerationTable
.map(_.id) into ((row, id) => row.copy(id = id))
database.db.run(insertQuery += row).map(_ => result)
}
def getGenerations(): Future[Seq[SmilesGeneration]] =
database.db.run(SmilesGenerationTable.result).map(asSmilesGeneration)
}
|
pedrovgs/HaveANiceDay
|
src/main/scala/com/github/pedrovgs/haveaniceday/smiles/storage/SmilesGenerationsRepository.scala
|
Scala
|
gpl-3.0
| 1,288 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package services
import connectors.HmrcTierConnector
import controllers.FakePBIKApplication
import controllers.actions.MinimalAuthAction
import models._
import org.mockito.ArgumentMatchers.{any, eq => argEq}
import org.mockito.Mockito._
import org.scalatest.OptionValues
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike
import play.api.Application
import play.api.i18n.{I18nSupport, Messages, MessagesApi}
import play.api.inject.bind
import play.api.inject.guice.GuiceApplicationBuilder
import play.api.libs.json
import play.api.mvc.{AnyContent, AnyContentAsEmpty}
import play.api.test.FakeRequest
import play.api.test.Helpers.{contentAsString, defaultAwaitTimeout, status}
import support.TestAuthUser
import uk.gov.hmrc.auth.core.retrieve.Name
import uk.gov.hmrc.http.{HeaderCarrier, SessionId}
import utils.{TaxDateUtils, TestMinimalAuthAction}
import views.html.registration.NextTaxYear
import scala.concurrent.Future
class RegistrationServiceSpec
extends AnyWordSpecLike with Matchers with OptionValues with TestAuthUser with FakePBIKApplication
with I18nSupport {
override val messagesApi: MessagesApi = app.injector.instanceOf[MessagesApi]
override lazy val fakeApplication: Application = GuiceApplicationBuilder(
disabled = Seq(classOf[com.kenshoo.play.metrics.PlayModule])
).configure(config)
.overrides(bind[MinimalAuthAction].to(classOf[TestMinimalAuthAction]))
.overrides(bind[BikListService].toInstance(mock(classOf[BikListService])))
.overrides(bind[HmrcTierConnector].toInstance(mock(classOf[HmrcTierConnector])))
.build()
val registrationService: RegistrationService = {
val service = app.injector.instanceOf[RegistrationService]
lazy val CYCache: List[Bik] = List.tabulate(5)(n => Bik("" + (n + 1), 10))
when(service.bikListService.pbikHeaders).thenReturn(Map(HeaderTags.ETAG -> "0", HeaderTags.X_TXID -> "1"))
when(service.bikListService.registeredBenefitsList(any[Int], any[EmpRef])(any[String])(any[HeaderCarrier]))
.thenReturn(Future.successful(CYCache))
// Return instance where not all Biks have been registered for CY
when(
service.tierConnector.genericGetCall[List[Bik]](
any[String],
any[String],
any[EmpRef],
argEq(injected[TaxDateUtils].getCurrentTaxYear()))(any[HeaderCarrier], any[json.Format[List[Bik]]]))
.thenReturn(Future.successful(CYCache.filter { x: Bik =>
Integer.parseInt(x.iabdType) <= 3
}))
// Return instance where not all Biks have been registered for CYP1
when(
service.tierConnector.genericGetCall[List[Bik]](
any[String],
any[String],
any[EmpRef],
argEq(injected[TaxDateUtils].getCurrentTaxYear() + 1))(any[HeaderCarrier], any[json.Format[List[Bik]]]))
.thenReturn(Future.successful(CYCache.filter { x: Bik =>
Integer.parseInt(x.iabdType) <= 5
}))
service
}
"When generating a page which allows registrations, the service" should {
"return the selection page" in {
val request: FakeRequest[AnyContentAsEmpty.type] = mockrequest
val nextTaxYearView = app.injector.instanceOf[NextTaxYear]
implicit val authenticatedRequest: AuthenticatedRequest[AnyContent] =
AuthenticatedRequest(EmpRef("taxOfficeNumber", "taxOfficeReference"), UserName(Name(None, None)), request)
implicit val hc: HeaderCarrier = HeaderCarrier(sessionId = Some(SessionId(sessionId)))
val taxDateUtils = injected[TaxDateUtils]
val YEAR_RANGE = taxDateUtils.getTaxYearRange()
val result =
registrationService.generateViewForBikRegistrationSelection(
YEAR_RANGE.cyminus1,
"add",
nextTaxYearView(_, additive = true, YEAR_RANGE, _, _, _, _, _, EmpRef.empty))
status(result) shouldBe 200
contentAsString(result) should include(Messages("AddBenefits.Heading"))
contentAsString(result) should include(Messages("BenefitInKind.label.4"))
}
}
}
|
hmrc/pbik-frontend
|
test/services/RegistrationServiceSpec.scala
|
Scala
|
apache-2.0
| 4,623 |
/*
* Copyright (c) 1995, 2008, Oracle and/or its affiliates. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Neither the name of Oracle or the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package scala.swing.examples.tutorials.layout
/*
* 1.2+ version. Used by CustomLayoutDemo.scala.
*/
import java.awt._
//remove if not needed
import scala.collection.JavaConversions._
class DiagonalLayout(private var vgap: Int) extends LayoutManager {
private var minWidth: Int = 0
private var minHeight: Int = 0
private var preferredWidth: Int = 0
private var preferredHeight: Int = 0
private var sizeUnknown: Boolean = true
def this() {
this(5)
}
/* Required by LayoutManager. */
def addLayoutComponent(name: String, comp: Component) {
}
def removeLayoutComponent(comp: Component) {
}
private def setSizes(parent: Container) {
val nComps = parent.getComponentCount
var d: Dimension = null
//Reset preferred/minimum width and height.
preferredWidth = 0
preferredHeight = 0
minWidth = 0
minHeight = 0
for (i <- 0 until nComps) {
val c = parent.getComponent(i)
if (c.isVisible) {
d = c.getPreferredSize
if (i > 0) {
preferredWidth += d.width / 2
preferredHeight += vgap
} else {
preferredWidth = d.width
}
preferredHeight += d.height
minWidth = Math.max(c.getMinimumSize.width, minWidth)
minHeight = preferredHeight
}
}
}
/* Required by LayoutManager. */
def preferredLayoutSize(parent: Container): Dimension = {
val dim = new Dimension(0, 0)
val nComps = parent.getComponentCount
setSizes(parent)
val insets = parent.getInsets
dim.width = preferredWidth + insets.left + insets.right
dim.height = preferredHeight + insets.top + insets.bottom
sizeUnknown = false
dim
}
/* Required by LayoutManager. */
def minimumLayoutSize(parent: Container): Dimension = {
val dim = new Dimension(0, 0)
val nComps = parent.getComponentCount
val insets = parent.getInsets
dim.width = minWidth + insets.left + insets.right
dim.height = minHeight + insets.top + insets.bottom
sizeUnknown = false
dim
}
/* Required by LayoutManager. */
/*
* This is called when the panel is first displayed,
* and every time its size changes.
* Note: You CAN'T assume preferredLayoutSize or
* minimumLayoutSize will be called -- in the case
* of applets, at least, they probably won't be.
*/
def layoutContainer(parent: Container) {
val insets = parent.getInsets
val maxWidth = parent.getWidth - (insets.left + insets.right)
val maxHeight = parent.getHeight - (insets.top + insets.bottom)
val nComps = parent.getComponentCount
var previousWidth = 0
var previousHeight = 0
var x = 0
var y = insets.top
val rowh = 0
val start = 0
var xFudge = 0
var yFudge = 0
var oneColumn = false
// Go through the components' sizes, if neither
// preferredLayoutSize nor minimumLayoutSize has
// been called.
if (sizeUnknown) {
setSizes(parent)
}
if (maxWidth <= minWidth) {
oneColumn = true
}
if (maxWidth != preferredWidth) {
xFudge = (maxWidth - preferredWidth) / (nComps - 1)
}
if (maxHeight > preferredHeight) {
yFudge = (maxHeight - preferredHeight) / (nComps - 1)
}
for (i <- 0 until nComps) {
val c = parent.getComponent(i)
if (c.isVisible) {
val d = c.getPreferredSize
// increase x and y, if appropriate
if (i > 0) {
if (!oneColumn) {
x += previousWidth / 2 + xFudge
}
y += previousHeight + vgap + yFudge
}
// If x is too large,
if ((!oneColumn) && (x + d.width) > (parent.getWidth - insets.right)) {
x = parent.getWidth - insets.bottom - d.width
}
// If y is too large,
if ((y + d.height) > (parent.getHeight - insets.bottom)) {
// do nothing.
// Another choice would be to do what we do to x.
}
// Set the component's size and position.
c.setBounds(x, y, d.width, d.height)
previousWidth = d.width
previousHeight = d.height
}
}
}
override def toString(): String = {
val str = ""
getClass.getName + "[vgap=" + vgap + str + "]"
}
}
|
benhutchison/scala-swing
|
examples/src/main/scala/scala/swing/examples/tutorials/layout/DiagonalLayout.scala
|
Scala
|
bsd-3-clause
| 5,832 |
package com.scalaAsm.x86
package Instructions
package System
// Description: Read from Model Specific Register
// Category: general
trait RDMSR extends InstructionDefinition {
val mnemonic = "RDMSR"
}
object RDMSR extends ZeroOperands[RDMSR] with RDMSRImpl
trait RDMSRImpl extends RDMSR {
implicit object _0 extends NoOp{
val opcode: TwoOpcodes = (0x0F, 0x32)
override def hasImplicitOperand = true
}
}
|
bdwashbu/scala-x86-inst
|
src/main/scala/com/scalaAsm/x86/Instructions/System/RDMSR.scala
|
Scala
|
apache-2.0
| 425 |
package org.nlogo.extensions.dnl
import org.scalatest.{ BeforeAndAfterAll, FunSuite, OneInstancePerTest }
import org.scalatest.concurrent.Timeouts
import org.scalatest.time.SpanSugar._
import org.zeromq.ZMQ, ZMQ.Context
class ServerThreadTest extends FunSuite with Timeouts with BeforeAndAfterAll {
val context = ZMQ.context(1)
val socketManager = new SocketManager(context)
override def afterAll() =
context.term()
test("That ServerThread can be stopped") {
val st = new ServerThread(socketManager, "inproc://test1", {
req =>
Thread.sleep(1000)
Messages.LogoObject("1")
})
st.start()
while(! st.bound) {}
failAfter(400 millis) {
st.close()
st.join(400)
}
}
}
|
mrerrormessage/dnl
|
src/test/scala/ServerThreadTest.scala
|
Scala
|
cc0-1.0
| 742 |
package org.codersunit.tn.output.limiter
trait Limiter {
def allowed(assoc: List[String], prob: Double, count: Int): Boolean
}
|
rnijveld/twitnetter
|
src/main/scala/output/limiter/Limiter.scala
|
Scala
|
bsd-3-clause
| 132 |
package piecewise
import scala.collection.mutable.ListBuffer
import scala.math._
/** Monotonic piecewise cubic curve for the spline interpolation
*
* A Сurve, that serve for the approximation physical reality definitions, where monotonic property required
*
* @see Fritsch, F. N. Monotone piecewise cubic interpolation
* / F. N. Fritsch, R. E. Carlson // SIAM J. Numer. Anal. — 1980. — 17. № 2. — pp. 238 — 246.
* @version 0.5.0
* @author Даниил
*/
case class M1Hermite3(coefs: Array[Double], x0: Double) extends Hermite {
override def apply(x: Double): Double = PieceFunction.cubicRuleOfHorner(x - x0, coefs(0), coefs(1), coefs(2), coefs(3))
override def derivative(x: Double): Double = PieceFunction.cubicHornerDerivative(x - x0, coefs(0), coefs(1), coefs(2), coefs(3))
override def antider(x: Double): Double = PieceFunction.cubicHornerIntegral(x - x0, coefs(0), coefs(1), coefs(2), coefs(3))
private lazy val body = f"*(x${-x0}%+.7f)"
override lazy val toString = {
f"${coefs(3)}%1.4f" + body + f"^3 ${coefs(2)}%+1.4f" + body + f"^2 ${coefs(1)}%+1.4f" +
body + f" ${coefs(0)}%+1.4f"
}
//TODO get desired spline smoothness
//private[this] lazy val fi4 = if(2 * alpha + beta < 3.0 || alpha + 2 * beta < 3.0) true else false
// private[this] lazy val fi3 = if(alpha + beta < 3 || fi4 == false) true else false
// private[this] lazy val fi2 = if(sqrt(pow(alpha,2) + pow(beta,2)) < 3.0 || fi3 == false) true else false
// private[this] lazy val fi1 = if(alpha < 0.3 && beta < 0.3 || fi2 == false) true else false
/** Spline smoothness
*
* @return
* string with "Smooth", "Normal", "Coarse", "Coarsest", which depends on curve smoothness
*/
// def smoothness = {
// true match {
// case `fi1` => M1Hermite3 SMOOTH
// case `fi2` => M1Hermite3 NORMAL
// case `fi3` => M1Hermite3 COARSE
// case `fi4` => M1Hermite3 COARSEST
// case _ if isMonotone => "Monotone"
// case _ => "No monotone"
// }
// }
/** Extremum of the function `x`
*
* @return extremums of function */
override protected def extremum(low: Double, upp: Double): List[Double] = ???
override def equals(obj: scala.Any): Boolean = {
obj match {
case sameType: Hermite3 => {
coefs.zip(sameType.coefs).forall(t => t._1.equals(t._2)) &&
x0.equals(sameType.x0)
}
case _ => false
}
}
override def area(x0: Double, x1: Double) = ???
}
object M1Hermite3 {
def constructSpline(source: Array[Double]): ((Double, Double), M1Hermite3) = {
val Array(yLow, yUpp, sdLow, sdUpp, xLow, xUpp) = source
val delta_ = delta(yLow, yUpp, xLow, xUpp)
val h_ = h(xLow, xUpp)
val coefs: Array[Double] = Array(
yLow, sdLow,
(-2.0 * sdLow - sdUpp + 3.0 * delta_) / h_,
(sdLow + sdUpp - 2.0 * delta_) / pow(h_, 2.0)
)
((xLow, xUpp), M1Hermite3(coefs, xLow))
}
def smoothness(prev: Array[Double],
next: Array[Double])
: Array[Double] = {
val Array(_, _, _, dLeft, _, _) = prev
val Array(_, _, dRight, _, _, _) = next
val der = signum(dLeft) * min(abs(dRight), abs(dLeft))
prev.update(3, der)
next.update(2, der)
prev
}
def apply(values: Iterator[(Double, Double)]
): Iterator[((Double, Double), M1Hermite3)] = {
import Hermite3._
val vals = values.toList
val sources = Hermite3.makeSources(vals, deriv(vals.head, vals.tail.head))
.map(monothone(_)(Normal))
if (sources.isEmpty) Iterator.empty
else {
def go(prevous: Array[Double],
iter: Iterator[Array[Double]],
acc: ListBuffer[Array[Double]]): Iterator[Array[Double]] = {
if (iter.isEmpty) {
acc += prevous
acc.result().iterator
} else {
val next = iter.next()
val transformed = smoothness(prevous, next)
acc += transformed
go(next, iter, acc)
}
}
val first = sources.next()
go(first, sources, ListBuffer.empty).map(constructSpline)
}
}
def apply(x: List[Double], y: List[Double]): List[M1Hermite3] = {
???
}
implicit def convert[S <: PieceFunction](low: Double,
upp: Double, fn: S): M1Hermite3 = {
fn match {
case line: Line => new M1Hermite3(Array(line.intercept, line.slope, 0.0, 0.0), 0.0)
case nonMonothone: Hermite3 => ???
}
}
}
|
daniil-timofeev/gridsplines
|
piecewise/src/main/scala/piecewise/M1Hermite3.scala
|
Scala
|
apache-2.0
| 4,503 |
/*
* Copyright (c) 2014, Brook 'redattack34' Heisler
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the ModularRayguns team nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package com.castlebravostudios.rayguns.items.chambers
import com.castlebravostudios.rayguns.api.items.ItemModule
import com.castlebravostudios.rayguns.entities.BaseBeamEntity
import com.castlebravostudios.rayguns.entities.effects.LightningBeamEntity
import com.castlebravostudios.rayguns.entities.effects.LightningEffect
import com.castlebravostudios.rayguns.mod.ModularRayguns
import com.castlebravostudios.rayguns.utils.BeamUtils
import com.castlebravostudios.rayguns.utils.Extensions.WorldExtension
import com.castlebravostudios.rayguns.utils.MidpointDisplacement
import com.castlebravostudios.rayguns.utils.RaytraceUtils
import com.castlebravostudios.rayguns.utils.Vector3
import net.minecraft.entity.player.EntityPlayer
import net.minecraft.world.World
import com.castlebravostudios.rayguns.items.emitters.Emitters
import com.castlebravostudios.rayguns.items.misc.Tier1EmptyChamber
object LightningChamber extends BaseChamber {
val moduleKey = "LightningChamber"
val powerModifier = 2.0
val shotEffect = LightningEffect
val nameSegmentKey = "rayguns.LightningChamber.segment"
def createItem() : ItemModule = new ItemChamber( this,
Emitters.lightningEmitter, Tier1EmptyChamber )
.setUnlocalizedName("rayguns.LightningChamber")
.setTextureName("rayguns:chamber_lightning")
.setCreativeTab( ModularRayguns.raygunsTab )
.setMaxStackSize(1)
def registerShotHandlers() : Unit = {
registerSingleShotHandlers()
registerScatterShotHandler()
registerChargedShotHandler()
registerPreciseShotHandler()
}
}
|
Redattack34/ModularRayguns
|
src/main/scala/com/castlebravostudios/rayguns/items/chambers/LightningChamber.scala
|
Scala
|
bsd-3-clause
| 3,153 |
package org.scaladebugger.api.lowlevel.breakpoints
import com.sun.jdi.request.BreakpointRequest
import org.scaladebugger.api.lowlevel.requests.JDIRequestArgument
import scala.util.Try
/**
* Represents the manager for breakpoint requests.
*/
trait BreakpointManager {
/**
* Retrieves the list of breakpoints contained by this manager.
*
* @return The collection of breakpoints in the form of information
*/
def breakpointRequestList: Seq[BreakpointRequestInfo]
/**
* Retrieves the list of breakpoints contained by this manager.
*
* @return The collection of breakpoints by id
*/
def breakpointRequestListById: Seq[String]
/**
* Creates and enables a breakpoint on the specified line of the class.
*
* @param requestId The id of the request used for lookup and removal
* @param fileName The name of the file to set a breakpoint
* @param lineNumber The number of the line to break
* @param extraArguments Any additional arguments to provide to the request
*
* @return Success(id) if successful, otherwise Failure
*/
def createBreakpointRequestWithId(
requestId: String,
fileName: String,
lineNumber: Int,
extraArguments: JDIRequestArgument*
): Try[String]
/**
* Creates and enables a breakpoint on the specified line of the class.
*
* @param fileName The name of the file to set a breakpoint
* @param lineNumber The number of the line to break
* @param extraArguments Any additional arguments to provide to the request
*
* @return Success(id) if successful, otherwise Failure
*/
def createBreakpointRequest(
fileName: String,
lineNumber: Int,
extraArguments: JDIRequestArgument*
): Try[String] = createBreakpointRequestWithId(
newRequestId(),
fileName,
lineNumber,
extraArguments: _*
)
/**
* Creates and enables a breakpoint based on the specified information.
*
* @param breakpointRequestInfo The information used to create the breakpoint
* request
*
* @return Success(id) if successful, otherwise Failure
*/
def createBreakpointRequestFromInfo(
breakpointRequestInfo: BreakpointRequestInfo
): Try[String] = createBreakpointRequestWithId(
breakpointRequestInfo.requestId,
breakpointRequestInfo.fileName,
breakpointRequestInfo.lineNumber,
breakpointRequestInfo.extraArguments: _*
)
/**
* Determines whether or not the breakpoint for the specific file's line.
*
* @param fileName The name of the file whose line to reference
* @param lineNumber The number of the line to check for a breakpoint
*
* @return True if a breakpoint exists, otherwise false
*/
def hasBreakpointRequest(fileName: String, lineNumber: Int): Boolean
/**
* Determines whether or not the breakpoint with the specified id exists.
*
* @param requestId The id of the request
*
* @return True if a breakpoint exists, otherwise false
*/
def hasBreakpointRequestWithId(requestId: String): Boolean
/**
* Returns the collection of breakpoints representing the breakpoint for the
* specified line.
*
* @param fileName The name of the file whose line to reference
* @param lineNumber The number of the line to check for breakpoints
*
* @return Some collection of breakpoints for the specified line, or None if
* the specified line has no breakpoints
*/
def getBreakpointRequest(
fileName: String,
lineNumber: Int
): Option[Seq[BreakpointRequest]]
/**
* Returns the collection of breakpoints with the specified id.
*
* @param requestId The id of the request
*
* @return Some collection of breakpoints for the specified line, or None if
* the specified line has no breakpoints
*/
def getBreakpointRequestWithId(
requestId: String
): Option[Seq[BreakpointRequest]]
/**
* Returns the information for a breakpoint request with the specified id.
*
* @param requestId The id of the request
*
* @return Some breakpoint information if found, otherwise None
*/
def getBreakpointRequestInfoWithId(
requestId: String
): Option[BreakpointRequestInfo]
/**
* Removes the breakpoint on the specified line of the file.
*
* @param fileName The name of the file to remove the breakpoint
* @param lineNumber The number of the line to break
*
* @return True if successfully removed breakpoint, otherwise false
*/
def removeBreakpointRequest(
fileName: String,
lineNumber: Int
): Boolean
/**
* Removes the breakpoint with the specified id.
*
* @param requestId The id of the request
*
* @return True if successfully removed breakpoint, otherwise false
*/
def removeBreakpointRequestWithId(
requestId: String
): Boolean
/**
* Generates an id for a new request.
*
* @return The id as a string
*/
protected def newRequestId(): String = java.util.UUID.randomUUID().toString
}
|
ensime/scala-debugger
|
scala-debugger-api/src/main/scala/org/scaladebugger/api/lowlevel/breakpoints/BreakpointManager.scala
|
Scala
|
apache-2.0
| 4,986 |
package ahlers.michael.basic
import java.util.UUID.randomUUID
import ahlers.michael.basic.BasicActor._
import akka.actor.ActorSystem
import akka.testkit.{ImplicitSender, TestKit}
import org.scalatest.{BeforeAndAfterAll, FlatSpecLike, Matchers}
/**
* @author <a href="mailto:[email protected]">Michael Ahlers</a>
*/
class BasicActorSpec extends TestKit(ActorSystem()) with ImplicitSender
with FlatSpecLike with Matchers with BeforeAndAfterAll {
override def afterAll {
super.afterAll
TestKit.shutdownActorSystem(system)
}
val id = randomUUID
val datas = "foo" :: "bear" :: Nil map {
_.getBytes.toSeq
}
it must "accept commands" in {
val actor = system.actorOf(BasicActor.props(id))
datas map Command foreach {
actor ! _
}
actor ! Fetch
expectMsg(State(datas))
}
it must "restore state" in {
val actor = system.actorOf(BasicActor.props(id))
actor ! Fetch
expectMsg(State(datas))
}
}
|
michaelahlers/sandbox-akka-serialization
|
src/test/scala/ahlers/michael/basic/BasicActorSpec.scala
|
Scala
|
mit
| 973 |
package sangria.marshalling
import scala.annotation.implicitNotFound
import scala.language.higherKinds
@implicitNotFound(
"Type ${Val} cannot be used as an input. Please consider defining an implicit instance of `FromInput` for it.")
trait FromInput[Val] {
val marshaller: ResultMarshaller
def fromResult(node: marshaller.Node): Val
}
object FromInput {
private object ScalarFromInput extends FromInput[Any] {
val marshaller: CoercedScalaResultMarshaller = CoercedScalaResultMarshaller.default
def fromResult(node: marshaller.Node): marshaller.Node = node
}
class SeqFromInput[T](delegate: FromInput[T]) extends FromInput[Seq[T]] {
val marshaller: ResultMarshaller = delegate.marshaller
def fromResult(node: marshaller.Node): Seq[T] =
node
.asInstanceOf[Seq[Any]]
.map {
case optElem: Option[_] =>
optElem.map(elem => delegate.fromResult(elem.asInstanceOf[delegate.marshaller.Node]))
case elem =>
delegate.fromResult(elem.asInstanceOf[delegate.marshaller.Node])
}
.asInstanceOf[Seq[T]]
}
import sangria.util.tag._
implicit def coercedScalaInput[T]: FromInput[T @@ CoercedScalaResult] =
ScalarFromInput.asInstanceOf[FromInput[T @@ CoercedScalaResult]]
implicit def defaultInput[T]: FromInput[Map[String, Any]] =
ScalarFromInput.asInstanceOf[FromInput[Map[String, Any]]]
implicit def inputObjectResultInput[T](implicit
ev: FromInput[T]): FromInput[T @@ InputObjectResult] =
ev.asInstanceOf[FromInput[T @@ InputObjectResult]]
implicit def optionInput[T](implicit ev: FromInput[T]): FromInput[Option[T]] =
ev.asInstanceOf[FromInput[Option[T]]]
implicit def seqInput[T](implicit ev: FromInput[T]): SeqFromInput[T] = new SeqFromInput[T](ev)
trait CoercedScalaResult
trait InputObjectResult
}
|
sangria-graphql/sangria-marshalling-api
|
src/main/scala/sangria/marshalling/FromInput.scala
|
Scala
|
apache-2.0
| 1,845 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.stream.table
import org.apache.flink.api.scala._
import org.apache.flink.table.api.scala._
import org.apache.flink.table.api.{Slide, Tumble}
import org.apache.flink.table.planner.utils.{EmptyTableAggFunc, TableTestBase}
import org.junit.Test
class GroupWindowTableAggregateTest extends TableTestBase {
val util = streamTestUtil()
val table = util.addTableSource[(Long, Int, Long, Long)]('a, 'b, 'c, 'd.rowtime, 'e.proctime)
val emptyFunc = new EmptyTableAggFunc
@Test
def testSingleWindow(): Unit = {
val windowedTable = table
.window(Tumble over 5.milli on 'd as 'w)
.groupBy('w, 'c)
.flatAggregate(emptyFunc('a, 'b))
.select('f0, 'f1 + 1, 'w.start, 'w.end)
util.verifyPlan(windowedTable)
}
@Test
def testMultiWindow(): Unit = {
val windowedTable = table
.window(Tumble over 50.milli on 'e as 'w1)
.groupBy('w1, 'c)
.flatAggregate(emptyFunc('a, 'b))
.select('w1.proctime as 'proctime, 'c, 'f0, 'f1 + 1 as 'f1)
.window(Slide over 20.milli every 10.milli on 'proctime as 'w2)
.groupBy('w2)
.flatAggregate(emptyFunc('f0))
.select('w2.start, 'f1)
util.verifyPlan(windowedTable)
}
@Test
def testTimeMaterializer(): Unit = {
val windowedTable = table
.window(Tumble over 5.milli on 'd as 'w)
.groupBy('w, 'e)
.flatAggregate(emptyFunc('a, 'b))
.select('f0, 'f1 + 1, 'w.start, 'w.end)
util.verifyPlan(windowedTable)
}
}
|
hequn8128/flink
|
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/stream/table/GroupWindowTableAggregateTest.scala
|
Scala
|
apache-2.0
| 2,317 |
package almhirt.aggregates
/** Represents the states that an aggregate can advance through in its 'lifetime' */
sealed trait AggregateRootLifecycle[+T <: AggregateRoot] {
def version: AggregateRootVersion
def idOption: Option[AggregateRootId]
}
/** The aggregate root is either [[Vacat]] or [[Vivus]] */
sealed trait Antemortem[+T <: AggregateRoot] extends AggregateRootLifecycle[T]
/** The aggregate root is either [[Vivus]] or [[Mortuus]] */
sealed trait Postnatalis[+T <: AggregateRoot] extends AggregateRootLifecycle[T] {
def id: AggregateRootId
def idOption = Some(id)
}
/** The aggregate root is either [[Vacat]] or [[Mortuus]] */
sealed trait Transcendentia[+T <: AggregateRoot] extends AggregateRootLifecycle[T]
/** The aggregate root does not exist and hasn't died yet. */
case object Vacat extends Antemortem[Nothing] with Transcendentia[Nothing] {
val version: AggregateRootVersion = AggregateRootVersion(0L)
def idOption = None
}
/** The aggregate root exists. */
final case class Vivus[T <: AggregateRoot](ar: T) extends Postnatalis[T] with Antemortem[T] {
def version: AggregateRootVersion = ar.version
def id: AggregateRootId = ar.id
}
/** The aggregate root does not exists anymore. */
final case class Mortuus(id: AggregateRootId, version: AggregateRootVersion) extends Postnatalis[Nothing] with Transcendentia[Nothing]
object AggregateRootLifecycle {
implicit class LifecycleOps[T <: AggregateRoot](self: AggregateRootLifecycle[T]) {
import scalaz._, Scalaz._
import almhirt.common._
def toOption: Option[T] =
self match {
case Vivus(ar) ⇒ Some(ar)
case _ ⇒ None
}
def toAggregateRoot: AlmValidation[T] =
self match {
case Vivus(ar) ⇒ ar.success
case Vacat ⇒ NotFoundProblem("The aggregate root does not exist.").failure
case Mortuus(id, v) ⇒ AggregateRootDeletedProblem(id).failure
}
}
}
object Postnatalis {
def unapply(p: Postnatalis[_ <: AggregateRoot]): Option[(AggregateRootId, AggregateRootVersion)] =
Some((p.id, p.version))
}
object VivusRef {
def unapply(v: Vivus[_ <: AggregateRoot]): Option[(AggregateRootId, AggregateRootVersion)] =
Some((v.id, v.version))
}
|
chridou/almhirt
|
almhirt-common/src/main/scala/almhirt/aggregates/AggregateRootLifecycle.scala
|
Scala
|
apache-2.0
| 2,219 |
package com.argcv.valhalla
/**
*
* @author Yu Jing <[email protected]> on 10/9/16
*/
package object ml {
}
|
yuikns/valhalla
|
src/main/scala/com/argcv/valhalla/ml/package.scala
|
Scala
|
mit
| 107 |
package dhg.ccg.parse.pcfg
import org.junit.Test
import org.junit.Assert._
import dhg.ccg.parse._
import dhg.ccg.prob._
import dhg.ccg.cat._
import dhg.ccg.tagdict.SimpleTagDictionary
import dhg.util._
import dhg.ccg.parse.pcfg.mcmc.PcfgProductionCounter
class SupPcfgTrainerTests {
val A: Cat = cat"A"
val B: Cat = cat"B"
val C: Cat = cat"C"
val D: Cat = cat"D"
val E: Cat = cat"E"
val F: Cat = cat"F"
val G: Cat = cat"G"
val H: Cat = cat"H"
val Z: Cat = cat"Z"
@Test
def test_UnsmoothedSupPcfgTrainer_train {
throw new NotImplementedError("Test not written")
}
@Test
def test_AlphaBetaSupPcfgTrainer {
type Word = String
val alphaRoot = 2.1
val alphaProd = 202.2
val alphaLctx = 213.3
val alphaRctx = 224.4
val s1: CcgTree = CcgLeaf(A, "something", "FAKEPOS")
val s2: CcgTree = CcgLeaf(B, "fake", "FAKEPOS")
val mockProductionFinder = new PcfgProductionCounter {
def rootCounts(t: CcgTree): Map[Cat, Double] = t match {
case _ if t == s1 => Map(A -> 11, B -> 12)
case _ if t == s2 => Map(B -> 13, C -> 14, D -> 15)
}
def binyCounts(t: CcgTree): Map[Cat, Map[BinaryProd, Double]] = ???
def unryCounts(t: CcgTree): Map[Cat, Map[UnaryProd, Double]] = ???
def termCounts(t: CcgTree): Map[Cat, Map[TermProd, Double]] = ???
def prodCounts(t: CcgTree): Map[Cat, Map[Prod, Double]] = t match {
case _ if t == s1 => Map(
A -> Map(BinaryProd(B, C) -> 21, UnaryProd(B) -> 24, TermProd("a1") -> 27),
C -> Map(BinaryProd(D, E) -> 22, UnaryProd(D) -> 25, TermProd("c1") -> 28, TermProd("c2") -> 29),
D -> Map(BinaryProd(B, C) -> 23, UnaryProd(B) -> 26))
case _ if t == s2 => Map(
A -> Map(BinaryProd(B, C) -> 24, BinaryProd(E, F) -> 26, UnaryProd(B) -> 64, UnaryProd(E) -> 66, TermProd("a1") -> 35, TermProd("a2") -> 36),
C -> Map(BinaryProd(D, E) -> 25, BinaryProd(E, F) -> 27, UnaryProd(D) -> 65, UnaryProd(E) -> 67, TermProd("c1") -> 37, TermProd("c3") -> 38),
E -> Map(TermProd("e1") -> 39))
}
}
val priorRootDist = new LogProbabilityDistribution[Cat] {
def apply(b: Cat): LogDouble = LogDouble(b match {
case A => 0.91
case B => 0.92
case C => 0.93
case D => 0.94
case E => 0.95
case F => 0.96
case G => 0.97
case H => 0.98
case Z => 0.99
})
def sample(): Cat = ???
def defaultProb: LogDouble = ???
}
val priorBinyDist = new ConditionalLogProbabilityDistribution[Cat, BinaryProd] {
def apply(x: BinaryProd, given: Cat): LogDouble = LogDouble((given, x) match {
case (A, BinaryProd(B, C)) => 0.11
case (A, BinaryProd(E, D)) => 0.12
case (A, BinaryProd(E, F)) => 0.13
case (A, BinaryProd(Z, Z)) => 0.38
case (C, BinaryProd(A, D)) => 0.14
case (C, BinaryProd(D, E)) => 0.15
case (C, BinaryProd(D, F)) => 0.16
case (C, BinaryProd(E, D)) => 0.17
case (C, BinaryProd(E, F)) => 0.18
case (D, BinaryProd(B, C)) => 0.19
case (E, BinaryProd(D, F)) => 0.21
case (E, BinaryProd(B, C)) => 0.22
case (Z, BinaryProd(Z, Z)) => 0.39
})
def sample(given: Cat): BinaryProd = ???
}
val priorUnryDist = new ConditionalLogProbabilityDistribution[Cat, UnaryProd] {
def apply(x: UnaryProd, given: Cat): LogDouble = LogDouble((given, x) match {
case (A, UnaryProd(B)) => 0.23
case (A, UnaryProd(D)) => 0.24
case (A, UnaryProd(E)) => 0.25
case (A, UnaryProd(Z)) => 0.41
case (C, UnaryProd(A)) => 0.26
case (C, UnaryProd(D)) => 0.27
case (C, UnaryProd(E)) => 0.28
case (D, UnaryProd(B)) => 0.42
case (E, UnaryProd(B)) => 0.43
case (E, UnaryProd(D)) => 0.44
case (Z, UnaryProd(Z)) => 0.45
})
def sample(given: Cat): UnaryProd = ???
}
val priorTermDist = new ConditionalLogProbabilityDistribution[Cat, TermProd] {
def apply(x: TermProd, given: Cat): LogDouble = LogDouble((given, x) match {
case (A, TermProd("a1")) => 0.31
case (A, TermProd("a2")) => 0.32
case (A, TermProd("z")) => 0.46
case (C, TermProd("c1")) => 0.33
case (C, TermProd("c2")) => 0.34
case (C, TermProd("c3")) => 0.35
case (D, TermProd("d2")) => 0.36
case (E, TermProd("e1")) => 0.37
case (Z, TermProd("z")) => 0.47
})
def sample(given: Cat): TermProd = ???
}
val mockResultingParser = new AbstractKBestGuideChartParser {
def parseAndProbKBestFromGuideChart(guideChart: CfgGuideChart, k: Int): Vector[(CcgTree, LogDouble)] = ???
}
val mockPcfgParserInstantiater = new PcfgParserInstantiater {
def apply(
rootDist: LogProbabilityDistribution[Cat],
prodDist: ConditionalLogProbabilityDistribution[Cat, Prod]) = {
/* ROOTS
* A: 11 + (2.1 * 0.91) = 12.911 / 72.77 = 0.17742201456644222
* B: (12+13) + (2.1 * 0.92) = 26.932 / 72.77 = 0.3700975676789886
* C: 14 + (2.1 * 0.93) = 15.953 / 72.77 = 0.2192249553387385
* D: 15 + (2.1 * 0.94) = 16.974 / 72.77 = 0.23325546241583073
* ------
* 72.77
* E: 0 + (2.1 * 0.93) = 1.995 / 72.77 = 0.027415143603133157
* F: 0 + (2.1 * 0.94) =
*/
assertEqualsLog(LogDouble((11 + (2.1 * 0.91)) / (2.1 + 65)), rootDist(A), 1e-9)
assertEqualsLog(LogDouble((25 + (2.1 * 0.92)) / (2.1 + 65)), rootDist(B), 1e-9)
assertEqualsLog(LogDouble((14 + (2.1 * 0.93)) / (2.1 + 65)), rootDist(C), 1e-9)
assertEqualsLog(LogDouble((15 + (2.1 * 0.94)) / (2.1 + 65)), rootDist(D), 1e-9)
assertEqualsLog(LogDouble((0 + (2.1 * 0.95)) / (2.1 + 65)), rootDist(E), 1e-9)
assertEqualsLog(LogDouble((0 + (2.1 * 0.96)) / (2.1 + 65)), rootDist(F), 1e-9)
assertEqualsLog(LogDouble((0 + (2.1 * 0.97)) / (2.1 + 65)), rootDist(G), 1e-9)
assertEqualsLog(LogDouble((0 + (2.1 * 0.98)) / (2.1 + 65)), rootDist(H), 1e-9)
assertEqualsLog(LogDouble((0 + (2.1 * 0.99)) / (2.1 + 65)), rootDist(Z), 1e-9)
/* PRODS
* A -> BC 45 + (202.2 * 0.11*0.5) = 56.1210 / 401.858 =
* A -> EF 26 + (202.2 * 0.13*0.5) = 39.1430 / 401.858 =
* A -> B 88 + (202.2 * 0.23*0.3) = 101.9518 / 401.858 =
* A -> E 66 + (202.2 * 0.25*0.3) = 81.1650 / 401.858 =
* A -> a1 62 + (202.2 * 0.31*0.2) = 74.5364 / 401.858 =
* A -> a2 36 + (202.2 * 0.32*0.2) = 48.9408 / 401.858 =
* --------
* 401.858
* A -> ED 0 + (202.2 * 0.12*0.5) =
* A -> D 0 + (202.2 * 0.24*0.3) =
*
*
* C -> DE 47 + (202.2 * 0.15*0.5) = 62.1650 / 470.9748 =
* C -> EF 27 + (202.2 * 0.18*0.5) = 45.1980 / 470.9748 =
* C -> D 90 + (202.2 * 0.27*0.3) = 106.3782 / 470.9748 =
* C -> E 67 + (202.2 * 0.28*0.3) = 83.9848 / 470.9748 =
* C -> c1 65 + (202.2 * 0.33*0.2) = 78.3452 / 470.9748 =
* C -> c2 29 + (202.2 * 0.34*0.2) = 42.7496 / 470.9748 =
* C -> c3 38 + (202.2 * 0.35*0.2) = 52.1540 / 470.9748 =
* --------
* 470.9748
* C -> AD 0 + (202.2 * 0.14*0.5) = 14.1540 / 470.9748 =
* C -> DF 0 + (202.2 * 0.16*0.5) = 16.1760 / 470.9748 =
* C -> ED 0 + (202.2 * 0.17*0.5) = 17.1870 / 470.9748 =
* C -> A 0 + (202.2 * 0.26*0.3) = 15.7716 / 470.9748 =
*
*
* D -> BC 23 + (202.2 * 0.19*0.5) = 42.2090 / 93.6862 =
* D -> B 26 + (202.2 * 0.42*0.3) = 51.4772 / 93.6862 =
* -------
* 93.6862
* D -> d2 0 + (202.2 * 0.36*0.2) = 14.5584 / 93.6862 = 0.2692589386776749
*
*
* E -> e1 39 + (202.2 * 0.37*0.2) = 53.9628 / 53.9628 = 1.0
* -------
* 53.9628
* E -> DF 0 + (202.2 * 0.21*0.5) = 21.2310 / 53.9628 =
* E -> BC 0 + (202.2 * 0.22*0.5) = 22.2420 / 53.9628 =
* E -> B 0 + (202.2 * 0.43*0.3) = 26.0838 / 53.9628 =
* E -> D 0 + (202.2 * 0.44*0.3) = 26.6904 / 53.9628 =
*/
assertEqualsLog(LogDouble((45 + (202.2 * 0.11 * 0.5)) / (202.2 + 323)), prodDist(BinaryProd(B, C), A), 1e-9)
assertEqualsLog(LogDouble((26 + (202.2 * 0.13 * 0.5)) / (202.2 + 323)), prodDist(BinaryProd(E, F), A), 1e-9)
assertEqualsLog(LogDouble((88 + (202.2 * 0.23 * 0.3)) / (202.2 + 323)), prodDist(UnaryProd(B), A), 1e-9)
assertEqualsLog(LogDouble((66 + (202.2 * 0.25 * 0.3)) / (202.2 + 323)), prodDist(UnaryProd(E), A), 1e-9)
assertEqualsLog(LogDouble((62 + (202.2 * 0.31 * 0.2)) / (202.2 + 323)), prodDist(TermProd("a1"), A), 1e-9)
assertEqualsLog(LogDouble((36 + (202.2 * 0.32 * 0.2)) / (202.2 + 323)), prodDist(TermProd("a2"), A), 1e-9)
assertEqualsLog(LogDouble((0 + (202.2 * 0.12 * 0.5)) / (202.2 + 323)), prodDist(BinaryProd(E, D), A), 1e-9)
assertEqualsLog(LogDouble((0 + (202.2 * 0.24 * 0.3)) / (202.2 + 323)), prodDist(UnaryProd(D), A), 1e-9)
assertEqualsLog(LogDouble((0 + (202.2 * 0.38 * 0.5)) / (202.2 + 323)), prodDist(BinaryProd(Z, Z), A), 1e-9)
assertEqualsLog(LogDouble((0 + (202.2 * 0.41 * 0.3)) / (202.2 + 323)), prodDist(UnaryProd(Z), A), 1e-9)
assertEqualsLog(LogDouble((0 + (202.2 * 0.46 * 0.2)) / (202.2 + 323)), prodDist(TermProd("z"), A), 1e-9)
assertEqualsLog(LogDouble((47 + (202.2 * 0.15 * 0.5)) / (202.2 + 363)), prodDist(BinaryProd(D, E), C), 1e-9)
assertEqualsLog(LogDouble((27 + (202.2 * 0.18 * 0.5)) / (202.2 + 363)), prodDist(BinaryProd(E, F), C), 1e-9)
assertEqualsLog(LogDouble((90 + (202.2 * 0.27 * 0.3)) / (202.2 + 363)), prodDist(UnaryProd(D), C), 1e-9)
assertEqualsLog(LogDouble((67 + (202.2 * 0.28 * 0.3)) / (202.2 + 363)), prodDist(UnaryProd(E), C), 1e-9)
assertEqualsLog(LogDouble((65 + (202.2 * 0.33 * 0.2)) / (202.2 + 363)), prodDist(TermProd("c1"), C), 1e-9)
assertEqualsLog(LogDouble((29 + (202.2 * 0.34 * 0.2)) / (202.2 + 363)), prodDist(TermProd("c2"), C), 1e-9)
assertEqualsLog(LogDouble((38 + (202.2 * 0.35 * 0.2)) / (202.2 + 363)), prodDist(TermProd("c3"), C), 1e-9)
assertEqualsLog(LogDouble((0 + (202.2 * 0.14 * 0.5)) / (202.2 + 363)), prodDist(BinaryProd(A, D), C), 1e-9)
assertEqualsLog(LogDouble((0 + (202.2 * 0.16 * 0.5)) / (202.2 + 363)), prodDist(BinaryProd(D, F), C), 1e-9)
assertEqualsLog(LogDouble((0 + (202.2 * 0.17 * 0.5)) / (202.2 + 363)), prodDist(BinaryProd(E, D), C), 1e-9)
assertEqualsLog(LogDouble((0 + (202.2 * 0.26 * 0.3)) / (202.2 + 363)), prodDist(UnaryProd(A), C), 1e-9)
assertEqualsLog(LogDouble((23 + (202.2 * 0.19 * 0.5)) / (202.2 + 49)), prodDist(BinaryProd(B, C), D), 1e-9)
assertEqualsLog(LogDouble((26 + (202.2 * 0.42 * 0.3)) / (202.2 + 49)), prodDist(UnaryProd(B), D), 1e-9)
assertEqualsLog(LogDouble((0 + (202.2 * 0.36 * 0.2)) / (202.2 + 49)), prodDist(TermProd("d2"), D), 1e-9)
assertEqualsLog(LogDouble((39 + (202.2 * 0.37 * 0.2)) / (202.2 + 39)), prodDist(TermProd("e1"), E), 1e-9)
assertEqualsLog(LogDouble((0 + (202.2 * 0.21 * 0.5)) / (202.2 + 39)), prodDist(BinaryProd(D, F), E), 1e-9)
assertEqualsLog(LogDouble((0 + (202.2 * 0.22 * 0.5)) / (202.2 + 39)), prodDist(BinaryProd(B, C), E), 1e-9)
assertEqualsLog(LogDouble((0 + (202.2 * 0.43 * 0.3)) / (202.2 + 39)), prodDist(UnaryProd(B), E), 1e-9)
assertEqualsLog(LogDouble((0 + (202.2 * 0.44 * 0.3)) / (202.2 + 39)), prodDist(UnaryProd(D), E), 1e-9)
assertEqualsLog(LogDouble(0.39 * 0.5), prodDist(BinaryProd(Z, Z), Z), 1e-9)
assertEqualsLog(LogDouble(0.45 * 0.3), prodDist(UnaryProd(Z), Z), 1e-9)
assertEqualsLog(LogDouble(0.47 * 0.2), prodDist(TermProd("z"), Z), 1e-9)
mockResultingParser
}
}
val sampledTrees = Vector[CcgTree](s1, s2)
val absct = new AlphaBetaSupPcfgTrainer(
priorRootDist, priorBinyDist, priorUnryDist, priorTermDist,
alphaRoot, alphaProd,
priorBinyProdMix = 0.5, priorUnryProdMix = 0.3, priorTermProdMix = 0.2,
mockProductionFinder,
mockPcfgParserInstantiater)
val parser: GuideChartParser = absct.train(sampledTrees)
assertSame(mockResultingParser, parser)
}
def assertEqualsLog(a: LogDouble, b: LogDouble, e: Double) {
assertEquals(a.toDouble, b.toDouble, e)
}
}
|
dhgarrette/2015-ccg-parsing
|
src/test/scala/dhg/ccg/parse/pcfg/SupPcfgTrainerTests.scala
|
Scala
|
apache-2.0
| 12,953 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.rdd.read
import org.bdgenomics.adam.rdd.ADAMContext._
import htsjdk.samtools.{ TextCigarCodec, ValidationStringency }
import org.bdgenomics.utils.misc.Logging
// NOTE(ryan): this is necessary for Spark <= 1.2.1.
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.models.ReferenceRegion
import org.bdgenomics.adam.util.{ ReferenceFile, MdTag }
import org.bdgenomics.formats.avro.AlignmentRecord
case class MDTagging(
reads: RDD[AlignmentRecord],
@transient referenceFile: ReferenceFile,
partitionSize: Long = 1000000,
overwriteExistingTags: Boolean = false,
validationStringency: ValidationStringency = ValidationStringency.STRICT) extends Logging {
@transient val sc = reads.sparkContext
val mdTagsAdded = sc.accumulator(0L, "MDTags Added")
val mdTagsExtant = sc.accumulator(0L, "MDTags Extant")
val numUnmappedReads = sc.accumulator(0L, "Unmapped Reads")
val incorrectMDTags = sc.accumulator(0L, "Incorrect Extant MDTags")
val taggedReads = addMDTagsBroadcast.cache
def maybeMDTagRead(read: AlignmentRecord, refSeq: String): AlignmentRecord = {
val cigar = TextCigarCodec.decode(read.getCigar)
val mdTag = MdTag(read.getSequence, refSeq, cigar, read.getStart)
if (read.getMismatchingPositions != null) {
mdTagsExtant += 1
if (mdTag.toString != read.getMismatchingPositions) {
incorrectMDTags += 1
if (overwriteExistingTags) {
read.setMismatchingPositions(mdTag.toString)
} else {
val exception = IncorrectMDTagException(read, mdTag.toString)
if (validationStringency == ValidationStringency.STRICT) {
throw exception
} else if (validationStringency == ValidationStringency.LENIENT) {
log.warn(exception.getMessage)
}
}
}
} else {
read.setMismatchingPositions(mdTag.toString)
mdTagsAdded += 1
}
read
}
def addMDTagsBroadcast(): RDD[AlignmentRecord] = {
val referenceFileB = sc.broadcast(referenceFile)
reads.map(read => {
(for {
contig <- Option(read.getContigName)
if read.getReadMapped
} yield {
maybeMDTagRead(read, referenceFileB.value.extract(ReferenceRegion(read)))
}).getOrElse({
numUnmappedReads += 1
read
})
})
}
}
object MDTagging {
def apply(
reads: RDD[AlignmentRecord],
referenceFile: String,
fragmentLength: Long,
overwriteExistingTags: Boolean,
validationStringency: ValidationStringency): RDD[AlignmentRecord] = {
val sc = reads.sparkContext
new MDTagging(
reads,
sc.loadReferenceFile(referenceFile, fragmentLength = fragmentLength),
partitionSize = fragmentLength,
overwriteExistingTags,
validationStringency
).taggedReads
}
}
case class IncorrectMDTagException(read: AlignmentRecord, mdTag: String) extends Exception {
override def getMessage: String =
s"Read: ${read.getReadName}, pos: ${read.getContigName}:${read.getStart}, cigar: ${read.getCigar}, existing MD tag: ${read.getMismatchingPositions}, correct MD tag: $mdTag"
}
|
tdanford/adam
|
adam-core/src/main/scala/org/bdgenomics/adam/rdd/read/MDTagging.scala
|
Scala
|
apache-2.0
| 3,972 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.runtime.harness
import java.lang.{Integer => JInt}
import java.util.concurrent.ConcurrentLinkedQueue
import org.apache.flink.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.api.bridge.scala._
import org.apache.flink.table.api.bridge.scala.internal.StreamTableEnvironmentImpl
import org.apache.flink.table.api.EnvironmentSettings
import org.apache.flink.table.planner.runtime.utils.StreamingWithStateTestBase.StateBackendMode
import org.apache.flink.table.planner.utils.{Top3WithMapView, Top3WithRetractInput}
import org.apache.flink.table.runtime.util.RowDataHarnessAssertor
import org.apache.flink.table.runtime.util.StreamRecordUtils.{deleteRecord, insertRecord}
import org.apache.flink.types.Row
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import org.junit.{Before, Test}
import java.time.Duration
import scala.collection.mutable
@RunWith(classOf[Parameterized])
class TableAggregateHarnessTest(mode: StateBackendMode) extends HarnessTestBase(mode) {
@Before
override def before(): Unit = {
super.before()
val setting = EnvironmentSettings.newInstance().inStreamingMode().build()
val config = new TestTableConfig
this.tEnv = StreamTableEnvironmentImpl.create(env, setting, config)
}
val data = new mutable.MutableList[(Int, Int)]
@Test
def testTableAggregate(): Unit = {
val top3 = new Top3WithMapView
tEnv.registerFunction("top3", top3)
val source = env.fromCollection(data).toTable(tEnv, 'a, 'b)
val resultTable = source
.groupBy('a)
.flatAggregate(top3('b) as ('b1, 'b2))
.select('a, 'b1, 'b2)
tEnv.getConfig.setIdleStateRetention(Duration.ofSeconds(2))
val testHarness = createHarnessTester(
resultTable.toRetractStream[Row], "GroupTableAggregate")
val assertor = new RowDataHarnessAssertor(
Array(
DataTypes.INT().getLogicalType,
DataTypes.INT().getLogicalType,
DataTypes.INT().getLogicalType))
testHarness.open()
val expectedOutput = new ConcurrentLinkedQueue[Object]()
// set TtlTimeProvider with 1
testHarness.setStateTtlProcessingTime(1)
// input with two columns: key and value
testHarness.processElement(insertRecord(1: JInt, 1: JInt))
// output with three columns: key, value, value. The value is in the top3 of the key
expectedOutput.add(insertRecord(1: JInt, 1: JInt, 1: JInt))
testHarness.processElement(insertRecord(1: JInt, 2: JInt))
expectedOutput.add(deleteRecord(1: JInt, 1: JInt, 1: JInt))
expectedOutput.add(insertRecord(1: JInt, 1: JInt, 1: JInt))
expectedOutput.add(insertRecord(1: JInt, 2: JInt, 2: JInt))
testHarness.processElement(insertRecord(1: JInt, 3: JInt))
expectedOutput.add(deleteRecord(1: JInt, 1: JInt, 1: JInt))
expectedOutput.add(deleteRecord(1: JInt, 2: JInt, 2: JInt))
expectedOutput.add(insertRecord(1: JInt, 1: JInt, 1: JInt))
expectedOutput.add(insertRecord(1: JInt, 2: JInt, 2: JInt))
expectedOutput.add(insertRecord(1: JInt, 3: JInt, 3: JInt))
testHarness.processElement(insertRecord(1: JInt, 2: JInt))
expectedOutput.add(deleteRecord(1: JInt, 1: JInt, 1: JInt))
expectedOutput.add(deleteRecord(1: JInt, 2: JInt, 2: JInt))
expectedOutput.add(deleteRecord(1: JInt, 3: JInt, 3: JInt))
expectedOutput.add(insertRecord(1: JInt, 2: JInt, 2: JInt))
expectedOutput.add(insertRecord(1: JInt, 2: JInt, 2: JInt))
expectedOutput.add(insertRecord(1: JInt, 3: JInt, 3: JInt))
// ingest data with key value of 2
testHarness.processElement(insertRecord(2: JInt, 2: JInt))
expectedOutput.add(insertRecord(2: JInt, 2: JInt, 2: JInt))
//set TtlTimeProvider with 3002 to trigger expired state cleanup
testHarness.setStateTtlProcessingTime(3002)
testHarness.processElement(insertRecord(1: JInt, 2: JInt))
expectedOutput.add(insertRecord(1: JInt, 2: JInt, 2: JInt))
val result = testHarness.getOutput
assertor.assertOutputEqualsSorted("result mismatch", expectedOutput, result)
testHarness.close()
}
@Test
def testTableAggregateWithRetractInput(): Unit = {
val top3 = new Top3WithRetractInput
tEnv.registerFunction("top3", top3)
val source = env.fromCollection(data).toTable(tEnv, 'a, 'b)
val resultTable = source
.groupBy('a)
.select('b.sum as 'b)
.flatAggregate(top3('b) as ('b1, 'b2))
.select('b1, 'b2)
tEnv.getConfig.setIdleStateRetention(Duration.ofSeconds(2))
val testHarness = createHarnessTester(
resultTable.toRetractStream[Row], "GroupTableAggregate")
val assertor = new RowDataHarnessAssertor(
Array(
DataTypes.INT().getLogicalType,
DataTypes.INT().getLogicalType))
testHarness.open()
val expectedOutput = new ConcurrentLinkedQueue[Object]()
// set TtlTimeProvider with 1
testHarness.setStateTtlProcessingTime(1)
// input with two columns: key and value
testHarness.processElement(insertRecord(1: JInt))
// output with three columns: key, value, value. The value is in the top3 of the key
expectedOutput.add(insertRecord(1: JInt, 1: JInt))
testHarness.processElement(deleteRecord(1: JInt))
expectedOutput.add(deleteRecord(1: JInt, 1: JInt))
testHarness.processElement(insertRecord(3: JInt))
expectedOutput.add(insertRecord(3: JInt, 3: JInt))
testHarness.processElement(insertRecord(4: JInt))
expectedOutput.add(deleteRecord(3: JInt, 3: JInt))
expectedOutput.add(insertRecord(3: JInt, 3: JInt))
expectedOutput.add(insertRecord(4: JInt, 4: JInt))
testHarness.processElement(deleteRecord(3: JInt))
expectedOutput.add(deleteRecord(3: JInt, 3: JInt))
expectedOutput.add(deleteRecord(4: JInt, 4: JInt))
expectedOutput.add(insertRecord(4: JInt, 4: JInt))
testHarness.processElement(insertRecord(5: JInt))
expectedOutput.add(deleteRecord(4: JInt, 4: JInt))
expectedOutput.add(insertRecord(4: JInt, 4: JInt))
expectedOutput.add(insertRecord(5: JInt, 5: JInt))
val result = testHarness.getOutput
assertor.assertOutputEqualsSorted("result mismatch", expectedOutput, result)
testHarness.close()
}
}
|
clarkyzl/flink
|
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/runtime/harness/TableAggregateHarnessTest.scala
|
Scala
|
apache-2.0
| 7,003 |
package offGridOrcs
final case class Tile(
position: Vec2,
structure: Tile.Structure,
orc: Option[Reference.Orc],
demon: Option[Reference.Demon],
building: Option[Reference.Building],
goal: Option[Reference.Goal],
stock: Stock
)
object Tile {
sealed trait Structure
final case class Trees(shade: TreesShade) extends Structure
final case class Grass(shade: GrassShade) extends Structure
final case class Building(stage: BuildingStage) extends Structure
sealed trait TreesShade
sealed trait GrassShade
final case class NoShade() extends TreesShade with GrassShade
final case class HardHighlight() extends TreesShade
final case class SoftHighlight() extends TreesShade with GrassShade
final case class SoftShadow() extends TreesShade with GrassShade
final case class HardShadow() extends GrassShade
sealed trait BuildingStage
final case class Flooring() extends BuildingStage
final case class Walls() extends BuildingStage
final case class Roof() extends BuildingStage
final case class Decal() extends BuildingStage
}
|
dcecile/off-grid-orcs
|
src/Tile.scala
|
Scala
|
mit
| 1,065 |
package com.dedup
import com.dedup.PartitionEstimator
import com.dedup.{DeDupPairRDD, SerializableHelpers}
import com.dedup.SimpleModuloPartitioner
import org.apache.hadoop.fs.Path
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.expressions.GenericRowWithSchema
import org.apache.spark.sql.{DataFrame, Row, SQLContext}
import org.apache.spark.{Partitioner, SparkContext}
import scala.collection.immutable.HashSet
import scala.util.{Failure, Success, Try}
class DeDuplicateMergeData(sparkContext: SparkContext, sqlContext: SQLContext) {
/**
* Steps in this ETL.
* 1 Partitioning and deduplicating new data
* - Prepare (make a pair rdd) and partition (spark memory partition) the new data.
* - Remove duplicates using Hash table for new data. (Remove duplicates present in incoming raw data)
* - Create a HashSet of the new data.
* - Check if it is the first load, then return the deduplicated data and the HashSet. End of story
* 2 Get ready to merge with old data. -> We know it is not a first load
* - Load HashSet of Old Data. If this doesn't exist it should return empty. This might have got deleted.
* - Check if existing HashSet is present, rebuild HashSet.
* 3 Merge
* - If no old data is available, this is the fist load, load the new data as is.
* - Co group the two data sets. Remove data that was already loaded before, based on available HashSet.
* - Now we have the actual data that needs to be appended
* 4 Create/Update HashSet index
* - Create the HashSet Index, for the incoming data set.
* - If this is the first run, store this HashSet index.
* - If HashSet already existed for old data, update and overwrite the new HashSet.
* - Handle failures for writing and storing this HashSet.
*
* Returns
* - The deduplicated data that should be appended
* - The total HashSet index that would be valid after appending the data.
* - --- Since the input and output are data frames, hence the HashSet is not overwritten by this etl function.
* - --- A helper function is provided to over the HashSet index to disk. However appending Data Frame should
* - --- happen first, and then the HashSet should be written. If writing HashSet fails, it should be deleted
* - --- to prevent having an invalid HashSet Index. It will get re-created in the next run.
*
*/
// TODO Document this method
// TODO - Return type is the final merged data (to be appended) and the MERGED (Complete) HashSet
// The entry point for the ETL
def runEtl(existingData: DataFrame,
newData: DataFrame,
hashSetIndexLocation: String,
partitionColumn: String,
primaryKeys: Seq[String],
newDataPartitionEstimator: PartitionEstimator): (DataFrame, RDD[(Int, HashSet[Seq[String]])]) = {
// 1 Prepare new Data
// Step 1.1 - Prepare and partition the new data
val newDataPairRDD = makePairRDD(newData, partitionColumn)
// Step 1.1 - Partition the new data
val newDataNumPartitions = newDataPartitionEstimator.estimate
require(newDataNumPartitions > 0, "Number of partitions of new data cannot be 0")
val newDataPartitioner = new SimpleModuloPartitioner[Int, Row](newDataNumPartitions)
val newDataPartitionedRDD = newDataPairRDD.partitionBy(newDataPartitioner)
// Step 1.2 - Remove duplicates from new data.
val deDupedNewData = deDupPairRDDwithHashSet(newDataPartitionedRDD, primaryKeys = primaryKeys)
// Step 1.3 - Creat a HashSet Index for the new data
val newDataHashSet = createHashSetIndex(newDataPartitionedRDD, newDataPartitioner, primaryKeys)
.cache // This Hash Set should be cached as it would be saved later
// Step 1.4 - For first load, return the new data.
if (isFirstLoad(existingData)) {
// Existing data is not available, hence HashSet also not available, so no Merge necessary for HashSet Index
val deDupedNewDataDf = newData
.sqlContext
.createDataFrame(deDupedNewData.map(_._2), newData.schema)
return (deDupedNewDataDf, newDataHashSet)
}
// Step 2.1 - Read Or Rebuild old data HashSet
// It is not the first load now. Regular data load.
val existingHashSetRDD =
if (!isFirstLoad(existingData) && !isHashSetIndexAvailable(hashSetIndexLocation)) {
// Incremental Load but HashSet is not available. Rebuild from scratch for existing data.
// Rebuilding requires the knowledge of exact number of partitions, and hence is costly.
val existingPartitions = existingData.select(partitionColumn).distinct.count.toInt // TODO with Spark 1.5.2, the distinct doesn't convert to group by. Add custom strategy
val existingDataPartitioner = new SimpleModuloPartitioner[Int, Row](existingPartitions)
val existingPairRDD = makePairRDD(existingData, partitionColumn)
val existingPartitionedPairRDD = existingPairRDD.partitionBy(existingDataPartitioner)
createHashSetIndex(existingPartitionedPairRDD, existingDataPartitioner, primaryKeys)
}
else
readHashSetIndex(hashSetIndexLocation).get // We use get, since we know that the HashSet is defined now
existingHashSetRDD.cache // Since this would be used multiple times
// Step 3 - The actual merge operation
val filteredRdd = filterOldRecordsUsingHashSet(deDupedNewData, existingHashSetRDD, newDataPartitioner, primaryKeys)
// Convert the filtered data to a Data Frame
val mergedDf = newData
.sqlContext
.createDataFrame(filteredRdd.map(_._2), newData.schema)
// Step 4 - Merge the HashSets
val mergedPartitioner = new SimpleModuloPartitioner[Int, HashSet[Seq[String]]](filteredRdd.partitions.length)
val mergedHashSet = mergeHashSetIndex(Seq(existingHashSetRDD, newDataHashSet), mergedPartitioner)
(mergedDf, mergedHashSet)
}
private def isHashSetIndexAvailable(hashSetIndexLocation: String): Boolean = {
val hashSetIndex = readHashSetIndex(hashSetIndexLocation)
hashSetIndex.isDefined && hashSetIndex.nonEmpty
}
private def isFirstLoad(existingData: DataFrame) = existingData.take(1).isEmpty
private def readHashSetIndex(hashSetIndexLocation: String): Option[RDD[(Int, HashSet[Seq[String]])]] = {
val filesystemPath = new Path(hashSetIndexLocation)
val fs = filesystemPath.getFileSystem(sqlContext.sparkContext.hadoopConfiguration)
if (!fs.exists(filesystemPath)) None
else {
val existingHashSetRDD = sparkContext.objectFile[(Int, HashSet[Seq[String]])](hashSetIndexLocation)
Some(existingHashSetRDD)
}
}
private def deDupPairRDDwithHashSet(dataWithDuplicates: RDD[(Int, Row)],
primaryKeys: Seq[String]): DeDupPairRDD[Int, Row] =
new DeDupPairRDD[Int, Row](dataWithDuplicates, primaryKeys)
private def makePairRDD(dataFrame: DataFrame, partitionColumn: String): RDD[(Int, Row)] =
dataFrame.rdd.keyBy(row => row.getAs[Int](partitionColumn))
private def createHashSetIndex(partitionedPairRDD: RDD[(Int, Row)],
partitioner: Partitioner,
primaryKeys: Seq[String]): RDD[(Int, HashSet[Seq[String]])] = {
// Create a HashSet (or a Bloom Filter) index for each partition
val createCombiner = (row: Row) => HashSet(SerializableHelpers.getKeysFromRow(row.asInstanceOf[GenericRowWithSchema], primaryKeys))
val mergeValue = (C: HashSet[Seq[String]], V: Row) => C + SerializableHelpers.getKeysFromRow(V.asInstanceOf[GenericRowWithSchema], primaryKeys)
val mergeCombiners = (C1: HashSet[Seq[String]], C2: HashSet[Seq[String]]) => C1 ++ C2
// Pass our partitioner to prevent repartitioning / shuffle
partitionedPairRDD.combineByKey(
createCombiner = createCombiner,
mergeValue = mergeValue,
mergeCombiners = mergeCombiners,
partitioner = partitioner)
}
private def filterOldRecordsUsingHashSet(deDupedNewData: RDD[(Int, Row)],
existingHashSetRDD: RDD[(Int, HashSet[Seq[String]])],
newDataPartitioner: Partitioner,
primaryKeys: Seq[String]): RDD[(Int, Row)] = {
// Step 3 - Merge - this is an incremental load, and old data is already available.
// Step 3.1 - Cogroup the data. Passing the partitioner same as new data should prevent repartitioning the data
val coGroupedRDD = deDupedNewData.cogroup(existingHashSetRDD, partitioner = newDataPartitioner)
// Step 3.2 - Remove duplicates using old data HashSet - the actual merge operation
coGroupedRDD.flatMapValues {
case (vs, Seq()) => // This is a new partition and this wasn't present in old data
vs.iterator
case (vs, ws) => // This is a partition which is there in old data as well as new data
val newRecordsIterator = vs.iterator
val existingHashSet = ws.iterator.toList.headOption.getOrElse(HashSet()) // We expect only one HashSet
newRecordsIterator.filter({ newRecord =>
// Filter already existing data
!existingHashSet.contains(SerializableHelpers.getKeysFromRow(newRecord.asInstanceOf[GenericRowWithSchema], primaryKeys))
})
// Ignore the case for only old partition with no new partition present -> case (Seq(), ws)
}
}
private def mergeHashSetIndex(rdds: Seq[RDD[(Int, HashSet[Seq[String]])]],
partitioner: Partitioner): RDD[(Int, HashSet[Seq[String]])] = {
// Apply reduce to union all the given rdds into one rdd.
// This would forget the previous partitioning and simply increase the number of partitions.
val unionedRDD = rdds.reduce(_.union(_))
// The following function for merging two HashSets would work for merging values as well as Combiners
val mergeVaulesAndCombiners = (C1: HashSet[Seq[String]], C2: HashSet[Seq[String]]) => C1 ++ C2
// Because after co grouping we expect only one HashSet
unionedRDD.combineByKey(createCombiner = (row: HashSet[Seq[String]]) => row,
mergeValue = mergeVaulesAndCombiners,
mergeCombiners = mergeVaulesAndCombiners,
partitioner = partitioner) // This is required to force repartitioning as union would have likely increased the partitions
}
def overwriteHashSetRDD(hashSetIndexLocation: String, rdd: RDD[(Int, HashSet[Seq[String]])]) = {
// TODO Should it write to a tmp folder underneath and on success move the files ???
// Delete already existing file
val filesystemPath = new Path(hashSetIndexLocation)
val fs = filesystemPath.getFileSystem(sqlContext.sparkContext.hadoopConfiguration)
if (fs.exists(filesystemPath))
fs.delete(filesystemPath, true)
// Write to the specified location
Try(rdd.saveAsObjectFile(hashSetIndexLocation))
match {
case Failure(ex) => println("FATAL ERROR : Failed to save index. Index location will be deleted.")
if (fs.exists(filesystemPath))
fs.delete(filesystemPath, true)
case Success(_) =>
}
}
}
|
sum-coderepo/spark-scala
|
src/main/scala/com/dedup/DeDuplicateMergeData.scala
|
Scala
|
apache-2.0
| 11,151 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.