code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package com.datastax.spark.connector.types
import scala.language.existentials
import scala.reflect.runtime.universe._
trait CollectionColumnType[T] extends ColumnType[T] {
def isCollection = true
protected def nestedElementTypeName(elemType: ColumnType[_]): String = elemType match {
case _: UserDefinedType => s"frozen<${elemType.cqlTypeName}>" // non-frozen user types are only supported at top-level
case _ => s"${elemType.cqlTypeName}"
}
}
case class ListType[T](
elemType: ColumnType[T],
override val isFrozen: Boolean = false) extends CollectionColumnType[Vector[T]] {
@transient
lazy val scalaTypeTag = {
implicit val elemTypeTag = elemType.scalaTypeTag
implicitly[TypeTag[Vector[T]]]
}
def cqlTypeName = s"list<${nestedElementTypeName(elemType)}>"
override def converterToCassandra: TypeConverter[_ <: AnyRef] =
new TypeConverter.OptionToNullConverter(TypeConverter.listConverter(elemType.converterToCassandra))
}
case class SetType[T](
elemType: ColumnType[T],
override val isFrozen: Boolean = false) extends CollectionColumnType[Set[T]] {
@transient
lazy val scalaTypeTag = {
implicit val elemTypeTag = elemType.scalaTypeTag
implicitly[TypeTag[Set[T]]]
}
def cqlTypeName = s"set<${nestedElementTypeName(elemType)}>"
override def converterToCassandra: TypeConverter[_ <: AnyRef] =
new TypeConverter.OptionToNullConverter(TypeConverter.setConverter(elemType.converterToCassandra))
}
case class MapType[K, V](
keyType: ColumnType[K],
valueType: ColumnType[V],
override val isFrozen: Boolean = false) extends CollectionColumnType[Map[K, V]] {
@transient
lazy val scalaTypeTag = {
implicit val keyTypeTag = keyType.scalaTypeTag
implicit val valueTypeTag = valueType.scalaTypeTag
implicitly[TypeTag[Map[K, V]]]
}
def cqlTypeName = s"map<${nestedElementTypeName(keyType)}, ${nestedElementTypeName(valueType)}>"
override def converterToCassandra: TypeConverter[_ <: AnyRef] =
new TypeConverter.OptionToNullConverter(
TypeConverter.mapConverter(keyType.converterToCassandra, valueType.converterToCassandra))
}
|
datastax/spark-cassandra-connector
|
driver/src/main/scala/com/datastax/spark/connector/types/CollectionColumnType.scala
|
Scala
|
apache-2.0
| 2,132 |
/*
* IJ-Plugins
* Copyright (C) 2002-2021 Jarek Sacha
* Author's email: jpsacha at gmail dot com
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Latest release available at https://github.com/ij-plugins/ijp-toolkit/
*/
package ij_plugins.toolkit.ui.progress
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should
/**
* @author Jarek Sacha
*/
class ProgressReporterSpec extends AnyFlatSpec with should.Matchers {
private class CounterWithProgress extends ProgressReporter {
def count(max: Int): Unit = {
for (i <- 1 to max)
notifyProgressListeners(i, max)
}
}
"ProgressReporter" should "accept progress listener as a lambda" in {
var testCounter = 0
testCounter should equal(0)
val c = new CounterWithProgress()
c.addProgressListener(_ => testCounter += 1)
c.count(7)
testCounter should equal(7)
}
"ProgressReporter" should "accept progress listener as an instance of ProgressListener" in {
var testCounter = 0
testCounter should equal(0)
val c = new CounterWithProgress()
c.addProgressListener(new ProgressListener {
override def progressNotification(e: ProgressEvent): Unit = {
testCounter += 2
}
})
c.count(7)
testCounter should equal(14)
}
}
|
ij-plugins/ijp-toolkit
|
src/test/scala/ij_plugins/toolkit/ui/progress/ProgressReporterSpec.scala
|
Scala
|
lgpl-2.1
| 2,005 |
package mesosphere.mesos
import mesosphere.marathon.core.launcher.impl.ResourceLabels
import mesosphere.marathon.core.task.Task
import mesosphere.marathon.state.{ AppDefinition, ResourceRole }
import mesosphere.marathon.tasks.{ PortsMatch, PortsMatcher }
import mesosphere.mesos.protos.Resource
import org.apache.mesos.Protos
import org.apache.mesos.Protos.Offer
import org.slf4j.LoggerFactory
import scala.annotation.tailrec
import scala.collection.JavaConverters._
import scala.collection.immutable.Seq
import scala.collection.mutable
object ResourceMatcher {
type Role = String
private[this] val log = LoggerFactory.getLogger(getClass)
/**
* A successful match result of the [[ResourceMatcher]].matchResources method.
*/
case class ResourceMatch(scalarMatches: Iterable[ScalarMatch], portsMatch: PortsMatch) {
lazy val hostPorts: Seq[Int] = portsMatch.hostPorts
def scalarMatch(name: String): Option[ScalarMatch] = scalarMatches.find(_.resourceName == name)
def resources: Iterable[org.apache.mesos.Protos.Resource] =
scalarMatches.flatMap(_.consumedResources) ++ portsMatch.resources
}
/**
* Restricts which resources are considered for matching.
*
* Disk resources are always discarded, since we do not want to match them by
* accident.
*
* @param acceptedRoles contains all Mesos resource roles that are accepted
* @param reserved if reserved is true, only resources with a ReservationInfo
* are considered. If reserved is false, only resources without
* a ReservationInfo are considered.
* @param requiredLabels only resources with the given keys/values are matched.
*/
case class ResourceSelector(
acceptedRoles: Set[String], reserved: Boolean, requiredLabels: ResourceLabels = ResourceLabels.empty) {
def apply(resource: Protos.Resource): Boolean = {
// resources with disks are matched by the VolumeMatcher or not at all
val noAssociatedDisk = !resource.hasDisk
def hasRequiredLabels: Boolean = {
val labelMap: Map[String, String] =
if (!resource.hasReservation || !resource.getReservation.hasLabels)
Map.empty
else {
import scala.collection.JavaConverters._
resource.getReservation.getLabels.getLabelsList.asScala.iterator.map { label =>
label.getKey -> label.getValue
}.toMap
}
requiredLabels.labels.forall { case (k, v) => labelMap.get(k).contains(v) }
}
noAssociatedDisk && acceptedRoles(resource.getRole) && resource.hasReservation == reserved && hasRequiredLabels
}
override def toString: String = {
val reservedString = if (reserved) "RESERVED" else "unreserved"
val rolesString = acceptedRoles.mkString(", ")
val labelStrings = if (requiredLabels.labels.nonEmpty) s" and labels $requiredLabels" else ""
s"Considering $reservedString resources with roles {$rolesString}$labelStrings"
}
}
object ResourceSelector {
/** Match unreserved resources for which role == '*' applies (default) */
def wildcard: ResourceSelector = ResourceSelector(Set(ResourceRole.Unreserved), reserved = false)
}
/**
* Checks whether the given offer contains enough resources to launch a task of the given app
* or to make a reservation for a task.
*
* If a task uses local volumes, this method is typically called twice for every launch. Once
* for the reservation on UNRESERVED resources and once for every (re-)launch on RESERVED resources.
*
* If matching on RESERVED resources as specified by the ResourceSelector, resources for volumes
* have to be matched separately (e.g. by the [[PersistentVolumeMatcher]]). If matching on UNRESERVED
* resources, the disk resources for the local volumes are included since they must become part of
* the reservation.
*/
def matchResources(offer: Offer, app: AppDefinition, runningTasks: => Iterable[Task],
selector: ResourceSelector): Option[ResourceMatch] = {
val groupedResources: Map[Role, mutable.Buffer[Protos.Resource]] = offer.getResourcesList.asScala.groupBy(_.getName)
val scalarResourceMatch = matchScalarResource(groupedResources, selector) _
// Local volumes only need to be matched if we are making a reservation for resident tasks --
// that means if the resources that are matched are still unreserved.
val diskMatch = if (!selector.reserved && app.diskForPersistentVolumes > 0)
scalarResourceMatch(Resource.DISK, app.disk + app.diskForPersistentVolumes,
ScalarMatchResult.Scope.IncludingLocalVolumes)
else
scalarResourceMatch(Resource.DISK, app.disk, ScalarMatchResult.Scope.ExcludingLocalVolumes)
val scalarMatchResults = Iterable(
scalarResourceMatch(Resource.CPUS, app.cpus, ScalarMatchResult.Scope.NoneDisk),
scalarResourceMatch(Resource.MEM, app.mem, ScalarMatchResult.Scope.NoneDisk),
diskMatch
).filter(_.requiredValue != 0)
logUnsatisfiedResources(offer, selector, scalarMatchResults)
def portsMatchOpt: Option[PortsMatch] = new PortsMatcher(app, offer, selector).portsMatch
def meetsAllConstraints: Boolean = {
lazy val tasks = runningTasks.filter(_.launched.exists(_.appVersion >= app.versionInfo.lastConfigChangeVersion))
val badConstraints = app.constraints.filterNot { constraint =>
Constraints.meetsConstraint(tasks, offer, constraint)
}
if (badConstraints.nonEmpty && log.isInfoEnabled) {
log.info(
s"Offer [${offer.getId.getValue}]. Constraints for app [${app.id}] not satisfied.\\n" +
s"The conflicting constraints are: [${badConstraints.mkString(", ")}]"
)
}
badConstraints.isEmpty
}
if (scalarMatchResults.forall(_.matches)) {
for {
portsMatch <- portsMatchOpt
if meetsAllConstraints
} yield ResourceMatch(scalarMatchResults.collect { case m: ScalarMatch => m }, portsMatch)
}
else {
None
}
}
private[this] def matchScalarResource(
groupedResources: Map[Role, mutable.Buffer[Protos.Resource]], selector: ResourceSelector)(
name: String, requiredValue: Double,
scope: ScalarMatchResult.Scope = ScalarMatchResult.Scope.NoneDisk): ScalarMatchResult = {
require(scope == ScalarMatchResult.Scope.NoneDisk || name == Resource.DISK)
@tailrec
def findMatches(
valueLeft: Double,
resourcesLeft: Iterable[Protos.Resource],
resourcesConsumed: List[ScalarMatch.Consumption] = List.empty): ScalarMatchResult = {
if (valueLeft <= 0) {
ScalarMatch(name, requiredValue, resourcesConsumed, scope = scope)
}
else {
resourcesLeft.headOption match {
case None => NoMatch(name, requiredValue, requiredValue - valueLeft, scope = scope)
case Some(nextResource) =>
val consume = Math.min(valueLeft, nextResource.getScalar.getValue)
val newValueLeft = valueLeft - consume
val reservation = if (nextResource.hasReservation) Option(nextResource.getReservation) else None
val consumedValue = ScalarMatch.Consumption(consume, nextResource.getRole, reservation)
findMatches(newValueLeft, resourcesLeft.tail, consumedValue :: resourcesConsumed)
}
}
}
val resourcesForName = groupedResources.getOrElse(name, Iterable.empty)
val matchingScalarResources = resourcesForName.filter(selector(_))
findMatches(requiredValue, matchingScalarResources)
}
private[this] def logUnsatisfiedResources(offer: Offer,
selector: ResourceSelector,
scalarMatchResults: Iterable[ScalarMatchResult]): Unit = {
if (log.isInfoEnabled) {
if (scalarMatchResults.exists(!_.matches)) {
val basicResourceString = scalarMatchResults.mkString(", ")
log.info(
s"Offer [${offer.getId.getValue}]. " +
s"$selector. " +
s"Not all basic resources satisfied: $basicResourceString")
}
}
}
}
|
ss75710541/marathon
|
src/main/scala/mesosphere/mesos/ResourceMatcher.scala
|
Scala
|
apache-2.0
| 8,175 |
/*
* Copyright (C) 2005, The OpenURP Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openurp.code.geo.model
import org.beangle.data.orm.MappingModule
class DefaultMapping extends MappingModule {
def binding(): Unit = {
bind[Division].declare { e =>
e.children.is(depends("parent"), orderby("code"))
}
bind[Country].declare { e =>
e.alpha2Code is length(2)
e.alpha3Code is length(3)
e.shortName is length(50)
}
bind[RailwayStation].declare { e =>
e.jianpin is length(50)
}
all.cacheAll()
}
}
|
openurp/api
|
code/src/main/scala/org/openurp/code/geo/model/mapping.scala
|
Scala
|
lgpl-3.0
| 1,207 |
package es.upm.oeg.epnoi.matching.metrics.similarity
import breeze.linalg.DenseVector
import es.upm.oeg.epnoi.matching.metrics.feature.LuceneTokenizer
import es.upm.oeg.epnoi.matching.metrics.utils.SparkWrapper
import org.apache.spark.mllib.feature.{Word2Vec, Word2VecModel}
import org.apache.spark.mllib.linalg
import org.apache.spark.mllib.linalg.Vectors
/**
* Created by cbadenes on 20/07/15.
*/
object WordVecCorpusExample {
// def createModel(path: String): Word2VecModel={
// val input = SparkWrapper
// .readCorpus(path)
// .map{case (f,x)=>LuceneTokenizer(x)}
// val word2vec = new Word2Vec()
// val model = word2vec.fit(input)
// model.save(SparkWrapper.sc, path)
// return model
// }
//
// def loadModel(path: String): Word2VecModel={
// return Word2VecModel.load(SparkWrapper.sc,path)
// }
def loadModel(path: String): Word2VecModel={
try{
return Word2VecModel.load(SparkWrapper.sc,path)
} catch{
case e: Exception =>{
val input = SparkWrapper
.readCorpus(path)
.map{case (f,x)=>LuceneTokenizer(x)}
val word2vec = new Word2Vec()
val model = word2vec.fit(input)
model.save(SparkWrapper.sc, path)
return model
}
}
}
def main(args: Array[String]): Unit = {
val path = "/Users/cbadenes/Documents/Academic/MUIA/TFM/ressist/resources/publications/oaipmh/**/**/*.txt"
val model = loadModel(path)
// val patient : linalg.Vector = model.transform("patient")
// val man : linalg.Vector = model.transform("man")
//
// val bv1 = new DenseVector(patient.toArray)
// val bv2 = new DenseVector(man.toArray)
//
// val vectout = Vectors.dense((bv1 + bv2).toArray)
model.getVectors.map(y => y._1).foreach{ case word =>
print(s"$word,")
}
// model.getVectors.foreach{ case v =>
//
// println( v._1 + " -> " + v._2.toList )
//
// }
// val synonyms = model.findSynonyms(vectout,20)
//
// for ((synonym, cosineSimilarity) <- synonyms){
// println(s"$synonym $cosineSimilarity")
// }
}
}
|
cbadenes/epnoi-matching-metrics
|
src/test/scala/es/upm/oeg/epnoi/matching/metrics/similarity/WordVecCorpusExample.scala
|
Scala
|
apache-2.0
| 2,082 |
package chapter.four
object ExerciseEight extends App {
//todo: preconditions and generic
def minmax(values: Array[Int]) = {
(values.min, values.max)
}
}
|
deekim/impatient-scala
|
src/main/scala/chapter/four/ExerciseEight.scala
|
Scala
|
apache-2.0
| 168 |
package com.github.eerohele.expek
import net.sf.saxon.s9api._
import org.w3c.dom.{Node => DomNode}
/** A trait that contains XPath-related methods. */
trait XPathSupport {
import utils.Tap
val xpathCompiler: XPathCompiler
val builder: DocumentBuilder
object XPath {
/** Evaluate the given XPath query on the given context item.
*
* Note: this method isn't suitable for dealing with atomic values, since it's set to always return a node.
*
* The primary use case is to test XSLT templates that access nodes outside the current node. This is subject
* to change.
*
* Example:
*
* {{{
* "Apply a template that accesses an ancestor node" in {
* applying {
* // The ancestor element is set as the context node for the transformation.
* <ancestor copied="value"><descendant/></ancestor>,
* // Use XPath to select the element that you want to apply the templates for.
* XPath.select("ancestor/descendant")
* } must produce (<descendant copied="value"/>)
* }
* }}}
*/
def select(query: String)(contextItem: XdmItem): XdmNode = {
xpathCompiler.compile(query).load.tap(_.setContextItem(contextItem)).evaluate.asInstanceOf[XdmNode]
}
/** Check whether an item matches an XPath expression.
*
* Use to filter nodes when comparing XML trees.
*
* Example:
*
* {{{
* "Ignore an attribute" >> {
* applying(<x/>) must produce(<y/>)(filterAttr(!XPath.matches("@id", _)))
* }
* }}}
*/
def matches(query: String, contextItem: XdmItem): Boolean = {
val selector = xpathCompiler.compilePattern(query).load
selector.setContextItem(contextItem)
selector.effectiveBooleanValue
}
def matches(query: String, contextItem: DomNode): Boolean = {
matches(query, builder.wrap(contextItem))
}
}
}
|
eerohele/expek
|
src/main/scala/com/github/eerohele/expek/XPathSupport.scala
|
Scala
|
mit
| 2,159 |
/**
* Copyright (c) 2014-2016 Tim Bruijnzeels
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of this software, nor the names of its contributors, nor
* the names of the contributors' employers may be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package nl.bruijnzeels.tim.rpki.ca.provisioning
import java.util.UUID
import javax.security.auth.x500.X500Principal
import net.ripe.rpki.commons.provisioning.identity.ChildIdentitySerializer
import org.scalatest.{FunSuite, Matchers}
@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
class MyIdentityTest extends FunSuite with Matchers {
test("Should create identity") {
val id = UUID.randomUUID()
val myIdentity = MyIdentity.create(id)
myIdentity.id should equal(id)
myIdentity.identityCertificate.getSubject() should equal(new X500Principal("CN=" + id.toString))
myIdentity.keyPair should not be (null)
}
test("Should convert to rpki-commons ChildIdentity") {
val id = UUID.randomUUID()
val myIdentity = MyIdentity.create(id)
val childIdentity = new ChildIdentitySerializer().deserialize(myIdentity.toChildXml)
childIdentity.getIdentityCertificate() should equal (myIdentity.identityCertificate)
childIdentity.getHandle() should equal (id.toString)
}
}
|
timbru/rpki-ca
|
src/test/scala/nl/bruijnzeels/tim/rpki/ca/provisioning/MyIdentityTest.scala
|
Scala
|
bsd-3-clause
| 2,672 |
/* Copyright 2017-19, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.tensorflow.api.learn.hooks
import org.platanios.tensorflow.api.learn.ModelInstance
/** Represents hooks that may dependent on the constructed model.
*
* This class offers the `modelInstance` field that sub-classes can access and that contains information specific to
* the created model. It is only updated when the model graph is constructed (i.e., it is not updated while recovering
* failed sessions).
*
* For example, a hook that logs the loss function value depends on the created loss op, or an evaluation hook may
* depends on multiple ops created as part of the model.
*
* @author Emmanouil Antonios Platanios
*/
trait ModelDependentHook[In, TrainIn, Out, TrainOut, Loss, EvalIn] extends Hook {
protected var modelInstance: ModelInstance[In, TrainIn, Out, TrainOut, Loss, EvalIn] = _
/** This method will be called by estimators at graph construction time, before `begin()`. It will **not** be called
* again if a session fails and is recovered. */
private[learn] final def setModelInstance(
modelInstance: ModelInstance[In, TrainIn, Out, TrainOut, Loss, EvalIn]
): Unit = {
this.modelInstance = modelInstance
}
}
|
eaplatanios/tensorflow_scala
|
modules/api/src/main/scala/org/platanios/tensorflow/api/learn/hooks/ModelDependentHook.scala
|
Scala
|
apache-2.0
| 1,827 |
package yandex.metrika
import org.specs2.mutable._
import org.specs2.specification._
import play.api.libs.json._
class MetrikaSpec extends Specification with AllExpectations {
//initialize Metrika with FAKE parameters
var user_login = "api-metrika2" //fake login
var token = "05dd3dd84ff948fdae2bc4fb91f13e22" //fake token
var counter_id = 2215573 //fake counter id
var goal_id = 334423 //fake goal_id
var filter_id = 66943 //fake filter_id
var operation_id = 66955 //fake operation_id
var id = 2138128 //fake id
val m = Metrika(user_login, token)
/**
* Counters
*/
"getCounterList" should {
sequential
"send TRUE request" in {
val res = m.getCounterList()
res \\ "rows" must_== (JsNumber(2))
res \\\\ "code_status" map (_.as[String]) must_== (List("CS_NOT_FOUND", "CS_NOT_FOUND_HOME_LOAD_DATA"))
res \\\\ "id" map (_.as[String]) must_== (List("2215573", "2138128"))
}
}
"getCounter" should {
sequential
"send TRUE request" in {
val res = m.getCounter(counter_id)
val c = res \\ "counter"
c \\ "code_status" must_== (JsString("CS_NOT_FOUND"))
c \\ "permission" must_== (JsString("own"))
(c \\ "monitoring" \\ "emails").as[List[String]] must_== (List("[email protected]"))
}
}
/**
* Goals
*/
"getCounterGoalList" should {
sequential
"send TRUE request" in {
val res = m.getCounterGoalList(counter_id)
res \\ "goal" must_!= (JsNull)
(res \\\\ "detailed_statistics").length must_== (4)
res \\\\ "id" map (_.as[String]) must_== (List("334420", "334423", "334426", "334429"))
}
}
"getCounterGoal" should {
sequential
"send TRUE request" in {
val res = m.getCounterGoal(counter_id, goal_id)
res \\ "goal" must_!= (JsNull)
res \\ "goal" \\ "detailed_statistics" must_== (JsNumber(1))
res \\ "goal" \\ "flag" must_== (JsString("basket"))
res \\ "goal" \\ "id" must_== (JsString("334423"))
}
}
/**
* Filters
*/
"getCounterFilterList" should {
sequential
"send TRUE request" in {
val res = m.getCounterFilterList(counter_id)
res \\ "filters" must_!= (JsNull)
(res \\\\ "status").length must_== (6)
res \\\\ "id" map (_.as[String]) must_== (List("66940", "66928", "66943", "66946", "66949", "66952"))
}
}
"getCounterFilter" should {
sequential
"send TRUE request" in {
val res = m.getCounterFilter(counter_id, filter_id)
res \\ "filter" must_!= (JsNull)
res \\ "filter" \\ "status" must_== (JsString("active"))
res \\ "filter" \\ "end_ip" must_== (JsString("192.168.0.255"))
res \\ "filter" \\ "id" must_== (JsString("66943"))
}
}
/**
* Operations
*/
"getCounterOperationList" should {
sequential
"send TRUE request" in {
val res = m.getCounterOperationList(counter_id)
res \\ "operations" must_!= (JsNull)
(res \\\\ "status").length must_== (2)
res \\\\ "id" map (_.as[String]) must_== (List("66955", "66958"))
}
}
"getCounterOperation" should {
sequential
"send TRUE request" in {
val res = m.getCounterOperation(counter_id, operation_id)
res \\ "operation" must_!= (JsNull)
res \\ "operation" \\ "status" must_== (JsString("active"))
res \\ "operation" \\ "value" must_== (JsString("debug"))
res \\ "operation" \\ "id" must_== (JsString("66955"))
}
}
/**
* Grants
*/
"getCounterGrantList" should {
sequential
"send TRUE request" in {
val res = m.getCounterGrantList(counter_id)
res \\ "grants" must_!= (JsNull)
(res \\\\ "user_login").length must_== (2)
res \\\\ "perm" map (_.as[String]) must_== (List("public_stat", "view"))
}
}
"getCounterGrant" should {
sequential
"send TRUE request" in {
val res = m.getCounterGrant(counter_id, user_login)
res \\ "grant" must_!= (JsNull)
res \\ "grant" \\ "user_login" must_== (JsString("api-metrika2"))
res \\ "grant" \\ "created_at" must_== (JsString("2010-12-08 20:02:01"))
res \\ "grant" \\ "perm" must_== (JsString("view"))
}
}
/**
* Delegates
*/
"getDelegates" should {
sequential
"send TRUE request" in {
val res = m.getDelegates
res \\ "delegates" must_!= (JsNull)
(res \\\\ "user_login").length must_== (1)
res \\\\ "created_at" map (_.as[String]) must_== (List("2010-12-08 19:33:00"))
}
}
/**
* Accounts
*/
"getAccounts" should {
sequential
"send TRUE request" in {
val res = m.getAccounts
res \\ "accounts" must_!= (JsNull)
(res \\\\ "user_login").length must_== (1)
res \\\\ "created_at" map (_.as[String]) must_== (List("2010-12-08 19:32:03"))
}
}
/**
* Statistics
*/
/** Statistics Traffic **/
"getStatTrafficSummary" should {
sequential
"send TRUE request" in {
val res = m.getStatTrafficSummary(OParameters(id = Some(id)))
res \\ "id" must_== (JsString("2138128"))
res \\ "rows" must_== (JsNumber(7))
res \\ "date1" must_!= (JsNull)
res \\ "date2" must_!= (JsNull)
res \\ "totals" \\ "visits" must_!= (JsNull)
val data = (res \\ "data").as[List[JsValue]]
data.length must_== (7)
data.head \\ "denial" must_!= (JsNull)
}
}
/** Statistics Sources **/
"getStatSourcesPhrases" should {
sequential
"send TRUE request" in {
val res = m.getStatSourcesPhrases(OParameters(id = Some(id)))
res \\ "id" must_== (JsString("2138128"))
res \\ "rows" must_!= (JsNull)
res \\ "date1" must_!= (JsNull)
res \\ "date2" must_!= (JsNull)
res \\ "totals" \\ "visits" must_!= (JsNull)
val data = (res \\ "data").as[List[JsValue]]
data.length must_== ((res \\ "rows").as[Int])
data.head \\ "denial" must_!= (JsNull)
data.head \\ "id" must_== (JsString("1097347136981725315"))
data.head \\ "phrase" must_!= (JsNull)
}
}
"getStatSourcesDirectSummary" should {
sequential
"send TRUE request" in {
val res = m.getStatSourcesDirectSummary(OParameters(id = Some(id)))
res \\ "errors" must_!= (JsNull)
val data = (res \\ "errors").as[List[JsValue]]
data.length must_== (1)
data.head \\ "code" must_== (JsString("ERR_NO_DATA"))
}
}
/** Statistics Content **/
/** Statistics Geography **/
/** Statistics Demography **/
/** Statistics Tech **/
}
|
krispo/yandex-metrika
|
src/test/scala/yandex/metrika/MetrikaSpec.scala
|
Scala
|
mit
| 6,437 |
/*
* Copyright (C) 2014 GRNET S.A.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package gr.grnet.egi.vmcatcher.image.handler
import gr.grnet.egi.vmcatcher.event.ImageEvent
/**
*
*/
class JustLogHandler extends DequeueHandler {
def handle(event: ImageEvent, data: HandlerData): Unit =
data.log.info(s"event = $event")
}
|
grnet/snf-vmcatcher
|
src/main/scala/gr/grnet/egi/vmcatcher/image/handler/JustLogHandler.scala
|
Scala
|
gpl-3.0
| 939 |
package net.selenate.server
import scala.collection.mutable.HashMap
object MapCache {
def empty[K, V] = new MapCache[K, V]()
}
class MapCache[K, V]() {
private case object Lock
private val cache = HashMap.empty[K, V]
def add(key: K, value: V) = Lock.synchronized {
cache.put(key, value)
}
def remove(key: K) = Lock.synchronized {
cache.remove(key)
}
def get(key: K) = Lock.synchronized {
cache.get(key)
}
}
|
tferega/selenate
|
code/Server/src/main/scala/net/selenate/server/MapCache.scala
|
Scala
|
bsd-3-clause
| 443 |
package org.akoshterek.backgammon.eval
object Gammons {
/* gammon possible by side on roll */
val G_POSSIBLE: Int = 0x1
/* backgammon possible by side on roll */
val BG_POSSIBLE: Int = 0x2
/* gammon possible by side not on roll */
val OG_POSSIBLE: Int = 0x4
/* backgammon possible by side not on roll */
val OBG_POSSIBLE: Int = 0x8
}
|
akoshterek/MultiGammonJava
|
multi-gammon-core/src/main/java/org/akoshterek/backgammon/eval/Gammons.scala
|
Scala
|
gpl-3.0
| 350 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.fs.tools.stats
import com.beust.jcommander.Parameters
import org.locationtech.geomesa.fs.FileSystemDataStore
import org.locationtech.geomesa.fs.tools.stats.FsStatsBoundsCommand.FsStatsBoundsParams
import org.locationtech.geomesa.fs.tools.{FsDataStoreCommand, FsParams}
import org.locationtech.geomesa.tools.RequiredTypeNameParam
import org.locationtech.geomesa.tools.stats.{StatsBoundsCommand, StatsBoundsParams}
class FsStatsBoundsCommand extends StatsBoundsCommand[FileSystemDataStore] with FsDataStoreCommand {
override val params = new FsStatsBoundsParams
}
object FsStatsBoundsCommand {
@Parameters(commandDescription = "View or calculate bounds on attributes in a GeoMesa feature type")
class FsStatsBoundsParams extends StatsBoundsParams with FsParams with RequiredTypeNameParam
}
|
boundlessgeo/geomesa
|
geomesa-fs/geomesa-fs-tools/src/main/scala/org/locationtech/geomesa/fs/tools/stats/FsStatsBoundsCommand.scala
|
Scala
|
apache-2.0
| 1,295 |
package com.productfoundry.akka.cqrs.publish
import com.productfoundry.akka.cqrs.AggregateEventRecord
import com.productfoundry.akka.messaging.MessageSubscriber
/**
* Indicates this actor receives event publications.
*/
trait EventSubscriber extends MessageSubscriber {
type ReceiveEventRecord = PartialFunction[AggregateEventRecord, Unit]
/**
* Can be used as default receive function to handle published events.
*/
def receivePublishedEvent: Receive = {
case publication: EventPublication =>
publication.confirmIfRequested()
eventReceived.applyOrElse(publication.eventRecord, unhandled)
case eventRecord: AggregateEventRecord if eventReceived.isDefinedAt(eventRecord) =>
eventReceived(eventRecord)
}
/**
* Partial function to handle published aggregate event records.
*/
def eventReceived: ReceiveEventRecord = PartialFunction.empty
}
|
odd/akka-cqrs
|
core/src/main/scala/com/productfoundry/akka/cqrs/publish/EventSubscriber.scala
|
Scala
|
apache-2.0
| 895 |
/*
* OpenURP, Open University Resouce Planning
*
* Copyright (c) 2013-2014, OpenURP Software.
*
* OpenURP is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* OpenURP is distributed in the hope that it will be useful.
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Beangle. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openurp.ws.services.teach.attendance.domain
import java.{ util => ju }
import org.beangle.commons.lang.Objects
import java.{util => ju}
/**
* 签到信息
*
* @author chaostone
* @version 1.0, 2014/03/22
* @since 0.0.1
*/
class SigninData(val devId: Int, val cardId: String, val signinAt: ju.Date, val params: String) {
override def toString(): String = {
Objects.toStringBuilder(this).add("devId", devId).add("cardId", cardId).add("signinAt", DateFormats.toDateTimeStr(signinAt)).toString
}
}
|
openurp/edu-attendance-core
|
attendance/src/main/scala/org/openurp/ws/services/teach/attendance/domain/SigninData.scala
|
Scala
|
gpl-3.0
| 1,256 |
package com.lookout.borderpatrol.auth
import com.lookout.borderpatrol.ServiceIdentifier
import com.lookout.borderpatrol.sessionx.SessionId
import com.twitter.finagle.Service
/**
* The purpose of this module is to transpose an incoming identified request into a request that the `AccessIssuer`
* understands, send that request to the `AccessIssuer` and receive a reply with access granted or not.
*
* We want to be able to transform the (identity, service, request) tuple into an understandable form for the
* `AccessIssuer`
*
* {{{
* case class Credential(user: String, password: String)
*
* def httpBasicAccess(cred: Credential, req: httpx.Request): AccessRequest[String] =
* new AccessRequest[String] {
* val identity = s"${cred.user}:${cred.password}"
* val serviceId = "example"
* val request = req
*
* def basicRequest: Request = {
* request.headers += ("Basic" -> Base64Encoder(credential))
* request
* }
* }
*
* case class ApiToken(token: String)
* case class TokenAccessResponse(access: Option[ApiToken], reply: httpx.Response) extends AccessResponse[ApiToken]
*
* case class ApiTokenIssuer(remote: Service[httpx.Request, httpx.Response])
* extends AccessIssuer[String, ApiToken] {
*
* def apply(req: AccessRequest[String]): Future[AccessResponse[ApiToken]] =
* remote(req.baseRequest).map(res => TokenAccessResponse(res.body.as[ApiToken], res))
* }
* }}}
*/
/**
* Abstraction for some access data, e.g. service token, grant, role, scope
*/
trait Access[A] {
val access: A
}
object Access {
def apply[A](a: A): Access[A] =
new Access[A] {val access = a}
}
/**
* The identification information needed by the [[com.lookout.borderpatrol.auth.AccessIssuer AccessIssuer]]
* to issue access data for your request
*
* This can be thought of as a function (A, ServiceIdentifier) => Req
*/
trait AccessRequest[A] {
val identity: Id[A]
val serviceId: ServiceIdentifier
val sessionId: SessionId
}
object AccessRequest {
def apply[A](id: Id[A], servId: ServiceIdentifier, sessId: SessionId): AccessRequest[A] =
new AccessRequest[A] {
val identity = id
val serviceId = servId
val sessionId = sessId
}
}
/**
* This response contains the access data needed by an authenticated endpoint, e.g. grants, tokens, api keys
*/
trait AccessResponse[A] {
val access: Access[A]
}
/**
* Describes a service that acts as an Access issuing endpoint, this would be something like an OAuth2 token
* service, or an LDAP server, or a database that holds access tokens for user credentials
*/
trait AccessIssuer[A, B] extends Service[AccessRequest[A], AccessResponse[B]]
|
jamescway/borderpatrol
|
core/src/main/scala/com/lookout/borderpatrol/auth/Access.scala
|
Scala
|
mit
| 2,733 |
package info.armado.ausleihe.admin.transport.dataobjects
import javax.xml.bind.annotation.{XmlAccessType, XmlAccessorType, XmlRootElement}
@XmlRootElement
@XmlAccessorType(XmlAccessType.FIELD)
case class GameDTO(var barcode: String,
var title: String,
var author: String,
var publisher: String,
var minAge: Integer,
var playerCount: PlayerCountDTO,
var duration: DurationDTO,
var releaseYear: Integer,
var comment: String,
var activated: Boolean) {
def this() = this(null, null, null, null, null, null, null, null, null, false)
}
|
Spielekreis-Darmstadt/lending
|
lending-admin-interfaces/src/main/scala/info/armado/ausleihe/admin/transport/dataobjects/GameDTO.scala
|
Scala
|
apache-2.0
| 704 |
package com.socrata.bq.soql.bqreps
import com.rojoma.json.v3.ast.{JNumber, JString, JNull, JValue}
import com.socrata.bq.soql.{BigqueryType, BBQRep}
import com.socrata.soql.types.{SoQLNull, SoQLDouble, SoQLType, SoQLValue}
class DoubleRep extends BBQRep[SoQLType, SoQLValue] {
override def repType: SoQLType = SoQLDouble
override val bigqueryType = BigqueryType.Float
override def SoQL(cols: Seq[String]): SoQLValue = {
if (cols.head == null) SoQLNull
else SoQLDouble(cols.head.toDouble)
}
override def jvalue(value: SoQLValue): JValue = {
if (value == SoQLNull) JNull
else {
val soqlDouble = value.asInstanceOf[SoQLDouble].value
if (soqlDouble.isInfinite || soqlDouble.isNaN) JString(soqlDouble.toString)
else JNumber(soqlDouble)
}
}
override val numColumns: Int = 1
}
|
socrata-platform/soql-bigquery-adapter
|
common-bq/src/main/scala/com/socrata/bq/soql/bqreps/DoubleRep.scala
|
Scala
|
apache-2.0
| 829 |
/*
,i::,
:;;;;;;;
;:,,::;.
1ft1;::;1tL
t1;::;1,
:;::; _____ __ ___ __
fCLff ;:: tfLLC / ___/ / |/ /____ _ _____ / /_
CLft11 :,, i1tffLi \\__ \\ ____ / /|_/ // __ `// ___// __ \\
1t1i .;; .1tf ___/ //___// / / // /_/ // /__ / / / /
CLt1i :,: .1tfL. /____/ /_/ /_/ \\__,_/ \\___//_/ /_/
Lft1,:;: , 1tfL:
;it1i ,,,:::;;;::1tti s_mach.concurrent
.t1i .,::;;; ;1tt Copyright (c) 2017 S-Mach, Inc.
Lft11ii;::;ii1tfL: Author: [email protected]
.L1 1tt1ttt,,Li
...1LLLL...
*/
package s_mach.concurrent
/* WARNING: Generated code. To modify see s_mach.concurrent.codegen.TupleAsyncTaskRunnerTestCodeGen */
import scala.util.{Random, Success, Failure}
import org.scalatest.{FlatSpec, Matchers}
import s_mach.concurrent.TestBuilder._
import util._
class Tuple18AsyncTaskRunnerTest extends FlatSpec with Matchers with ConcurrentTestCommon {
"Tuple18AsyncTaskRunner-t0" must "wait on all Futures to complete concurrently" in {
val results =
test repeat TEST_COUNT run {
implicit val ctc = mkConcurrentTestContext()
import ctc._
sched.addEvent("start")
val items = IndexedSeq.fill(18)(Random.nextInt)
val fa = success(items(0))
val fb = success(items(1))
val fc = success(items(2))
val fd = success(items(3))
val fe = success(items(4))
val ff = success(items(5))
val fg = success(items(6))
val fh = success(items(7))
val fi = success(items(8))
val fj = success(items(9))
val fk = success(items(10))
val fl = success(items(11))
val fm = success(items(12))
val fn = success(items(13))
val fo = success(items(14))
val fp = success(items(15))
val fq = success(items(16))
val fr = success(items(17))
val result = async.par.run(fa,fb,fc,fd,fe,ff,fg,fh,fi,fj,fk,fl,fm,fn,fo,fp,fq,fr)
waitForActiveExecutionCount(0)
sched.addEvent("end")
result.awaitTry should be(Success((items(0),items(1),items(2),items(3),items(4),items(5),items(6),items(7),items(8),items(9),items(10),items(11),items(12),items(13),items(14),items(15),items(16),items(17))))
isConcurrentSchedule(Vector(items(0),items(1),items(2),items(3),items(4),items(5),items(6),items(7),items(8),items(9),items(10),items(11),items(12),items(13),items(14),items(15),items(16),items(17)), sched)
}
val concurrentPercent = results.count(_ == true) / results.size.toDouble
concurrentPercent should be >= MIN_CONCURRENCY_PERCENT
}
"TupleAsyncTaskRunner-t1" must "complete immediately after any Future fails" in {
test repeat TEST_COUNT run {
implicit val ctc = mkConcurrentTestContext()
import ctc._
sched.addEvent("start")
val endLatch = Latch()
val fb = fail(2)
// Note1: without hooking the end latch here there would be a race condition here between success 1,3,4,5,6
// and end. The latch is used to create a serialization schedule that can be reliably tested
// Note2: Due to this design, a bug in merge that does not complete immediately on failure will cause a
// deadlock here instead of a failing test
val fa = endLatch happensBefore success(1)
val fc = endLatch happensBefore success(3)
val fd = endLatch happensBefore success(4)
val fe = endLatch happensBefore success(5)
val ff = endLatch happensBefore success(6)
val fg = endLatch happensBefore success(7)
val fh = endLatch happensBefore success(8)
val fi = endLatch happensBefore success(9)
val fj = endLatch happensBefore success(10)
val fk = endLatch happensBefore success(11)
val fl = endLatch happensBefore success(12)
val fm = endLatch happensBefore success(13)
val fn = endLatch happensBefore success(14)
val fo = endLatch happensBefore success(15)
val fp = endLatch happensBefore success(16)
val fq = endLatch happensBefore success(17)
val fr = endLatch happensBefore success(18)
val result = async.par.run(fa,fb,fc,fd,fe,ff,fg,fh,fi,fj,fk,fl,fm,fn,fo,fp,fq,fr)
waitForActiveExecutionCount(0)
sched.addEvent("end")
endLatch.set()
waitForActiveExecutionCount(0)
result.awaitTry shouldBe a [Failure[_]]
result.awaitTry.failed.get shouldBe a [AsyncParThrowable]
sched.happensBefore("start","fail-2") should equal(true)
sched.happensBefore("fail-2","end") should equal(true)
(1 to 18).filter(_ != 2).foreach { i =>
sched.happensBefore("end", s"success-$i") should equal(true)
}
}
}
"TupleAsyncTaskRunner-t2" must "throw AsyncParThrowable which can wait for all failures" in {
test repeat TEST_COUNT run {
implicit val ctc = mkConcurrentTestContext()
import ctc._
val failures = Random.shuffle(Seq(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18)).take(2)
def call(i: Int) = if(failures.contains(i)) {
fail(i)
} else {
success(i)
}
val result = async.par.run(call(1),call(2),call(3),call(4),call(5),call(6),call(7),call(8),call(9),call(10),call(11),call(12),call(13),call(14),call(15),call(16),call(17),call(18))
waitForActiveExecutionCount(0)
val thrown = result.failed.await.asInstanceOf[AsyncParThrowable]
// Even though there are two worker threads, it technically is a race condition to see which failure happens
// first. This actually happens in about 1/1000 runs where it appears worker one while processing fail-1 stalls
// and worker 2 is able to complete success-2, success-3 and fail-4 before fail-1 finishes
thrown.firstFailure.toString.startsWith("java.lang.RuntimeException: fail-") should equal(true)
thrown.allFailure.await.map(_.toString) should contain theSameElementsAs(
failures.map(failIdx => new RuntimeException(s"fail-$failIdx").toString)
)
}
}
}
|
S-Mach/s_mach.concurrent
|
src/test/scala/s_mach/concurrent/Tuple18AsyncTaskRunnerTest.scala
|
Scala
|
mit
| 6,195 |
import org.specs2.execute.FailureException
import scalaxb.compiler.Config
import scalaxb.compiler.ConfigEntry._
import scala.io.Source
class UseListsTest extends TestBase {
val schema = resource("useLists.xsd")
lazy val generatedWithSeqs = module.processFiles(
List(schema),
Config.default.update(PackageNames(Map(Some("http://simple/main") -> Some("uselists"), None -> Some("default")))).
update(Outdir(tmp)))
lazy val generatedWithLists = module.processFiles(
List(schema),
Config.default.update(PackageNames(Map(Some("http://simple/main") -> Some("uselists"), None -> Some("default")))).
update(Outdir(tmp)).
update(UseLists))
lazy val fileContentWithSeqs: String = generatedWithSeqs.find(fi => fi.getName.contains("useLists.scala")) match {
case Some(enumFile) =>
Source.fromFile(enumFile).mkString
case None => throw FailureException(failure("Could not find generated file: useLists.scala"))
}
lazy val fileContentWithLists: String = generatedWithLists.find(fi => fi.getName.contains("useLists.scala")) match {
case Some(enumFile) =>
Source.fromFile(enumFile).mkString
case None => throw FailureException(failure("Could not find generated file: useLists.scala"))
}
"Use Seq[T] by default" in {
fileContentWithSeqs must contain("Seq[")
fileContentWithSeqs must not(contain("List["))
}
"Use List[T] when configured" in {
fileContentWithLists must contain("List[")
fileContentWithLists must not(contain("Seq["))
}
}
|
eed3si9n/scalaxb
|
integration/src/test/scala/UseListsTest.scala
|
Scala
|
mit
| 1,532 |
package com.lonelyplanet.openplanet.client.apis
import com.lonelyplanet.openplanet.client.{Annotate, IncludeParameter, OpenPlanetClient}
import spray.json.JsValue
import scala.collection.immutable.Seq
trait OpAnnotate extends Annotate {
val client: OpenPlanetClient
override def annotationRequest(body: String): JsValue = {
client.post("/annotations", body)
}
override def annotationMatches(id: String, include: Seq[IncludeParameter] = Seq.empty): JsValue = {
client.getSingle(s"/annotations/$id/matches")
}
}
|
lonelyplanet/open-planet-scala-client
|
src/main/scala/com/lonelyplanet/openplanet/client/apis/OpAnnotate.scala
|
Scala
|
mit
| 534 |
// Copyright 2014 Foursquare Labs Inc. All Rights Reserved.
package io.fsq.twofishes.indexer.importers.geonames
import io.fsq.twofishes.indexer.util.FsqSimpleFeatureImplicits._
import io.fsq.twofishes.indexer.util.ShapefileIterator
import java.io.FileWriter
import org.slf4s.Logging
// Tool to flatten NaturalEarth Populated Places Shapefile to a single text file
// to simplify scalding index build
// run from twofishes root directory using the following command:
// ./sbt "indexer/run-main io.fsq.twofishes.indexer.importers.geonames.NaturalEarthAttributesFlattener"
//
// NOTE: This is a temporary workaround until I find/write an implementation
// of FileInputFormat and RecordReader for shapefiles that can split
// https://github.com/mraad/Shapefile works but cannot split yet
object NaturalEarthAttributesFlattener extends Logging {
def main(args: Array[String]): Unit = {
val fileWriter = new FileWriter("src/jvm/io/fsq/twofishes/indexer/data/downloaded/flattenedAttributes.txt", false)
var features = 0
val iterator = new ShapefileIterator(
"src/jvm/io/fsq/twofishes/indexer/data/downloaded/ne_10m_populated_places_simple.shp"
)
for {
f <- iterator
geonameidString <- f.propMap.get("geonameid").toList
// remove .000
geonameId = geonameidString.toDouble.toInt
if geonameId != -1
adm0cap = f.propMap.getOrElse("adm0cap", "0").toDouble.toInt
worldcity = f.propMap.getOrElse("worldcity", "0").toDouble.toInt
scalerank = f.propMap.getOrElse("scalerank", "20").toInt
natscale = f.propMap.getOrElse("natscale", "0").toInt
labelrank = f.propMap.getOrElse("labelrank", "0").toInt
} {
fileWriter.write("%d\t%d\t%d\t%d\t%d\t%d\n".format(geonameId, adm0cap, worldcity, scalerank, natscale, labelrank))
features += 1
if (features % 1000 == 0) {
log.info("processed %d features".format(features))
}
}
fileWriter.close()
log.info("Done.")
}
}
|
foursquare/fsqio
|
src/jvm/io/fsq/twofishes/indexer/importers/geonames/NaturalEarthAttributesFlattener.scala
|
Scala
|
apache-2.0
| 1,984 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.kafka010
import java.{ util => ju }
import java.io.File
import scala.collection.JavaConverters._
import scala.util.Random
import kafka.common.TopicAndPartition
import kafka.log._
import kafka.message._
import kafka.utils.Pool
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.scalatest.BeforeAndAfterAll
import org.apache.spark._
import org.apache.spark.scheduler.ExecutorCacheTaskLocation
import org.apache.spark.streaming.kafka010.mocks.MockTime
class KafkaRDDSuite extends SparkFunSuite with BeforeAndAfterAll {
private var kafkaTestUtils: KafkaTestUtils = _
private val sparkConf = new SparkConf().setMaster("local[4]")
.setAppName(this.getClass.getSimpleName)
private var sc: SparkContext = _
override def beforeAll {
sc = new SparkContext(sparkConf)
kafkaTestUtils = new KafkaTestUtils
kafkaTestUtils.setup()
}
override def afterAll {
if (sc != null) {
sc.stop
sc = null
}
if (kafkaTestUtils != null) {
kafkaTestUtils.teardown()
kafkaTestUtils = null
}
}
private def getKafkaParams() = Map[String, Object](
"bootstrap.servers" -> kafkaTestUtils.brokerAddress,
"key.deserializer" -> classOf[StringDeserializer],
"value.deserializer" -> classOf[StringDeserializer],
"group.id" -> s"test-consumer-${Random.nextInt}-${System.currentTimeMillis}"
).asJava
private val preferredHosts = LocationStrategies.PreferConsistent
private def compactLogs(topic: String, partition: Int, messages: Array[(String, String)]) {
val mockTime = new MockTime()
// LogCleaner in 0.10 version of Kafka is still expecting the old TopicAndPartition api
val logs = new Pool[TopicAndPartition, Log]()
val logDir = kafkaTestUtils.brokerLogDir
val dir = new File(logDir, topic + "-" + partition)
dir.mkdirs()
val logProps = new ju.Properties()
logProps.put(LogConfig.CleanupPolicyProp, LogConfig.Compact)
logProps.put(LogConfig.MinCleanableDirtyRatioProp, java.lang.Float.valueOf(0.1f))
val log = new Log(
dir,
LogConfig(logProps),
0L,
mockTime.scheduler,
mockTime
)
messages.foreach { case (k, v) =>
val msg = new ByteBufferMessageSet(
NoCompressionCodec,
new Message(v.getBytes, k.getBytes, Message.NoTimestamp, Message.CurrentMagicValue))
log.append(msg)
}
log.roll()
logs.put(TopicAndPartition(topic, partition), log)
val cleaner = new LogCleaner(CleanerConfig(), logDirs = Array(dir), logs = logs)
cleaner.startup()
cleaner.awaitCleaned(topic, partition, log.activeSegment.baseOffset, 1000)
cleaner.shutdown()
mockTime.scheduler.shutdown()
}
test("basic usage") {
val topic = s"topicbasic-${Random.nextInt}-${System.currentTimeMillis}"
kafkaTestUtils.createTopic(topic)
val messages = Array("the", "quick", "brown", "fox")
kafkaTestUtils.sendMessages(topic, messages)
val kafkaParams = getKafkaParams()
val offsetRanges = Array(OffsetRange(topic, 0, 0, messages.size))
val rdd = KafkaUtils.createRDD[String, String](sc, kafkaParams, offsetRanges, preferredHosts)
.map(_.value)
val received = rdd.collect.toSet
assert(received === messages.toSet)
// size-related method optimizations return sane results
assert(rdd.count === messages.size)
assert(rdd.countApprox(0).getFinalValue.mean === messages.size)
assert(!rdd.isEmpty)
assert(rdd.take(1).size === 1)
assert(rdd.take(1).head === messages.head)
assert(rdd.take(messages.size + 10).size === messages.size)
val emptyRdd = KafkaUtils.createRDD[String, String](
sc, kafkaParams, Array(OffsetRange(topic, 0, 0, 0)), preferredHosts)
assert(emptyRdd.isEmpty)
// invalid offset ranges throw exceptions
val badRanges = Array(OffsetRange(topic, 0, 0, messages.size + 1))
intercept[SparkException] {
val result = KafkaUtils.createRDD[String, String](sc, kafkaParams, badRanges, preferredHosts)
.map(_.value)
.collect()
}
}
test("compacted topic") {
val compactConf = sparkConf.clone()
compactConf.set("spark.streaming.kafka.allowNonConsecutiveOffsets", "true")
sc.stop()
sc = new SparkContext(compactConf)
val topic = s"topiccompacted-${Random.nextInt}-${System.currentTimeMillis}"
val messages = Array(
("a", "1"),
("a", "2"),
("b", "1"),
("c", "1"),
("c", "2"),
("b", "2"),
("b", "3")
)
val compactedMessages = Array(
("a", "2"),
("b", "3"),
("c", "2")
)
compactLogs(topic, 0, messages)
val props = new ju.Properties()
props.put("cleanup.policy", "compact")
props.put("flush.messages", "1")
props.put("segment.ms", "1")
props.put("segment.bytes", "256")
kafkaTestUtils.createTopic(topic, 1, props)
val kafkaParams = getKafkaParams()
val offsetRanges = Array(OffsetRange(topic, 0, 0, messages.size))
val rdd = KafkaUtils.createRDD[String, String](
sc, kafkaParams, offsetRanges, preferredHosts
).map(m => m.key -> m.value)
val received = rdd.collect.toSet
assert(received === compactedMessages.toSet)
// size-related method optimizations return sane results
assert(rdd.count === compactedMessages.size)
assert(rdd.countApprox(0).getFinalValue.mean === compactedMessages.size)
assert(!rdd.isEmpty)
assert(rdd.take(1).size === 1)
assert(rdd.take(1).head === compactedMessages.head)
assert(rdd.take(messages.size + 10).size === compactedMessages.size)
val emptyRdd = KafkaUtils.createRDD[String, String](
sc, kafkaParams, Array(OffsetRange(topic, 0, 0, 0)), preferredHosts)
assert(emptyRdd.isEmpty)
// invalid offset ranges throw exceptions
val badRanges = Array(OffsetRange(topic, 0, 0, messages.size + 1))
intercept[SparkException] {
val result = KafkaUtils.createRDD[String, String](sc, kafkaParams, badRanges, preferredHosts)
.map(_.value)
.collect()
}
}
test("iterator boundary conditions") {
// the idea is to find e.g. off-by-one errors between what kafka has available and the rdd
val topic = s"topicboundary-${Random.nextInt}-${System.currentTimeMillis}"
val sent = Map("a" -> 5, "b" -> 3, "c" -> 10)
kafkaTestUtils.createTopic(topic)
val kafkaParams = getKafkaParams()
// this is the "lots of messages" case
kafkaTestUtils.sendMessages(topic, sent)
var sentCount = sent.values.sum
val rdd = KafkaUtils.createRDD[String, String](sc, kafkaParams,
Array(OffsetRange(topic, 0, 0, sentCount)), preferredHosts)
val ranges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
val rangeCount = ranges.map(o => o.untilOffset - o.fromOffset).sum
assert(rangeCount === sentCount, "offset range didn't include all sent messages")
assert(rdd.map(_.offset).collect.sorted === (0 until sentCount).toArray,
"didn't get all sent messages")
// this is the "0 messages" case
val rdd2 = KafkaUtils.createRDD[String, String](sc, kafkaParams,
Array(OffsetRange(topic, 0, sentCount, sentCount)), preferredHosts)
// shouldn't get anything, since message is sent after rdd was defined
val sentOnlyOne = Map("d" -> 1)
kafkaTestUtils.sendMessages(topic, sentOnlyOne)
assert(rdd2.map(_.value).collect.size === 0, "got messages when there shouldn't be any")
// this is the "exactly 1 message" case, namely the single message from sentOnlyOne above
val rdd3 = KafkaUtils.createRDD[String, String](sc, kafkaParams,
Array(OffsetRange(topic, 0, sentCount, sentCount + 1)), preferredHosts)
// send lots of messages after rdd was defined, they shouldn't show up
kafkaTestUtils.sendMessages(topic, Map("extra" -> 22))
assert(rdd3.map(_.value).collect.head === sentOnlyOne.keys.head,
"didn't get exactly one message")
}
test("executor sorting") {
val kafkaParams = new ju.HashMap[String, Object](getKafkaParams())
kafkaParams.put("auto.offset.reset", "none")
val rdd = new KafkaRDD[String, String](
sc,
kafkaParams,
Array(OffsetRange("unused", 0, 1, 2)),
ju.Collections.emptyMap[TopicPartition, String](),
true)
val a3 = ExecutorCacheTaskLocation("a", "3")
val a4 = ExecutorCacheTaskLocation("a", "4")
val b1 = ExecutorCacheTaskLocation("b", "1")
val b2 = ExecutorCacheTaskLocation("b", "2")
val correct = Array(b2, b1, a4, a3)
correct.permutations.foreach { p =>
assert(p.sortWith(rdd.compareExecutors) === correct)
}
}
}
|
bravo-zhang/spark
|
external/kafka-0-10/src/test/scala/org/apache/spark/streaming/kafka010/KafkaRDDSuite.scala
|
Scala
|
apache-2.0
| 9,481 |
package at.logic.gapt.formats.llk
import at.logic.gapt.formats.llk.ast.LambdaAST
import at.logic.gapt.expr._
import at.logic.gapt.formats.ClasspathInputFile
import org.specs2.mutable._
class LLKTest extends Specification {
val p1 =
"""\\AX{T,MON(h_1,\\alpha)}{MON(h_1,\\alpha) }
|\\AX{ NOCC(h_1,\\alpha,\\sigma)}{NOCC(h_1,\\alpha,\\sigma)}
|\\EXR{}{ NOCC(h_1,\\alpha,\\sigma)}{(exists s NOCC(h_1,\\alpha,s))}
|\\ANDR{T,MON(h_1,\\alpha), NOCC(h_1,\\alpha,\\sigma)}{MON(h_1,\\alpha) & (exists s NOCC(h_1,\\alpha,s))}
|\\EXR{}{T,MON(h_1,\\alpha), NOCC(h_1,\\alpha,\\sigma)}{(exists h (MON(h,\\alpha) & (exists s NOCC(h,\\alpha,s))))}
|\\ANDL{T, MON(h_1,\\sigma) & NOCC(h_1,\\sigma,x)}{(exists h (MON(h,\\alpha) & (exists s NOCC(h,\\alpha,s))))}
|\\EXL{}{T, (exists h (MON(h,\\sigma) & NOCC(h,\\sigma,x)))}{(exists h (MON(h,\\alpha) & (exists s NOCC(h,\\alpha,s))))}
|\\ALLL{}{T, (all n exists h (MON(h,n) & NOCC(h,n,x)))}{(exists h (MON(h,\\alpha) & (exists s NOCC(h,\\alpha,s))))}
|\\DEF{T,A(\\sigma)}{(exists h (MON(h,\\alpha) & (exists s NOCC(h,\\alpha,s))))}
|\\ALLR{}{T,A(\\sigma)}{(all n exists h (MON(h,n) & (exists s NOCC(h,n,s))))}
|\\DEF{T,A(\\sigma)}{C}
|\\CONTINUEWITH{\\rho(\\sigma)}
|""".stripMargin
def checkformula( f: String ) = {
LLKProofParser.parseAll( LLKProofParser.formula, f ) match {
case LLKProofParser.Success( r, _ ) =>
ok( r.toString )
case LLKProofParser.NoSuccess( msg, input ) =>
ko( "Error at " + input.pos + ": " + msg )
}
}
def llkFromClasspath( filename: String ) =
LLKProofParser( ClasspathInputFile( filename ) )
"Hybrid Latex-GAPT" should {
"correctly handle latex macros in formulas (1)" in {
checkformula( "\\\\benc{j_1<n+1}" )
ok
}
"correctly handle latex macros in formulas (2)" in {
checkformula( "\\\\ite{\\\\benc{j_1<n+1}}{h'(j_1)}{\\\\alpha}" )
ok
}
"correctly handle latex macros in formulas (3)" in {
checkformula( "\\\\ite{\\\\ienc{j_1<n+1}}{h'(j_1)}{\\\\alpha}" )
ok
}
"correctly handle latex macros in formulas (4)" in {
checkformula( "\\\\ite{\\\\benc{j_1<n+1}}{h'(j_1)}{\\\\alpha} = 0" )
ok
}
"accept the proof outline" in {
//println(p1)
LLKProofParser.parseAll( LLKProofParser.rules, p1 ) match {
case LLKProofParser.Success( r: List[Token], _ ) =>
//println(r)
val lterms: List[LambdaAST] = r.flatMap( _ match {
case RToken( _, _, a, s, _ ) => a ++ s
case TToken( _, _, _ ) => Nil
case AToken( _, _, a, s ) => a ++ s
} )
//println(lterms.flatMap(_.varnames).toSet)
ok( "successfully parsed " + r )
case LLKProofParser.NoSuccess( msg, input ) =>
ko( "parsing error at " + input.pos + ": " + msg )
}
ok
}
"accept the proof outline with the parse interface" in {
val r = LLKProofParser.parse( p1 )
ok
}
"correctly infer replacement terms in equalities" in {
import EquationVerifier.{ Different, Equal, EqualModuloEquality, checkReplacement }
val List( a ) = List( "a" ) map ( x => Const( x, Ti ) )
val List( f, g ) = List( "f", "g" ) map ( x => Const( x, Ti -> Ti ) )
val List( p ) = List( "p" ) map ( x => Const( x, Ti -> ( Ti -> ( Ti -> To ) ) ) )
val t1 = App( p, List( App( f, a ), App( f, App( g, App( f, a ) ) ), a ) )
val t2 = App( p, List( App( f, a ), App( f, App( g, App( g, a ) ) ), a ) )
val fa = App( f, a )
val ga = App( g, a )
checkReplacement( fa, ga, t1, t2 ) match {
case Equal => ko( "Terms " + t1 + " and " + t2 + " considered as equal, but they differ!" )
case Different => ko( "Terms " + t1 + " and t2 considered as (completely) different, but they differ only modulo one replacement!" )
case EqualModuloEquality( path ) =>
//println("Path:"+path)
ok
}
checkReplacement( fa, ga, t1, t1 ) match {
case Equal => ok
case Different => ko( "Terms " + t1 + " and t2 considered as (completely) different, but they are equal!" )
case EqualModuloEquality( path ) => ko( "Found an equality modulo " + Eq( fa.asInstanceOf[Expr], ga.asInstanceOf[Expr] ) + " but should be equal!" )
}
ok
}
"load the simple example from file and parse it" in {
val p = llkFromClasspath( "simple.llk" )
//println(p)
ok
}
"load the commutativity of + proof from file and parse it" in {
val p = llkFromClasspath( "komm.llk" )
( p.proof( "THEPROOF" ) ) must not throwAn () //exception
ok
}
"load the 3-2 pigeon hole example from file and parse it" in {
val p = llkFromClasspath( "pigeon32.llk" )
( p.proof( "PROOF" ) ) must not throwAn () //exception
ok
}
"load the tape3 proof from file" in {
val p = llkFromClasspath( "tape3.llk" )
p.proofs.length must be_>( 0 )
p.Definitions.toList.length must be_>( 0 )
p.axioms.length must be_>( 0 )
ok
}
}
"Tactics" should {
"correctly prove the instance of an axiom" in {
val vmap = Map[String, Ty]( "x" -> Ti, "y" -> Ti, "z" -> Ti )
val cmap = Map[String, Ty]( "a" -> Ti, "1" -> Ti, "+" -> ( Ti -> ( Ti -> Ti ) ) )
val naming: String => Expr = x => {
if ( vmap contains x ) Var( x, vmap( x ) ) else
Const( x, cmap( x ) )
}
val axiom = LLKFormulaParser.ASTtoHOL( naming, LLKProofParser.parseFormula( "(all x all y all z (x+(y+z)=(x+y)+z))" ) )
val instance = LLKFormulaParser.ASTtoHOL( naming, LLKProofParser.parseFormula( "a+((1+x)+y)=(a+(1+x))+y" ) )
val t1 = HOLFunction( Const( "+", Ti -> ( Ti -> Ti ) ), List(
Const( "1", Ti ),
Var( "x", Ti )
) )
val t2 = Const( "a", Ti )
val x = Var( "x", Ti )
val y = Var( "y", Ti )
val z = Var( "z", Ti )
val sub = Substitution( List( ( x, t2 ), ( y, t1 ), ( z, y ) ) )
val p = LLKProofParser.proveInstance( axiom.asInstanceOf[Formula], instance.asInstanceOf[Formula], sub )
p.endSequent.formulas must haveSize( 2 )
p.endSequent.antecedent must haveSize( 1 )
p.endSequent.succedent must haveSize( 1 )
p.endSequent.antecedent( 0 ) mustEqual ( axiom )
p.endSequent.succedent( 0 ) mustEqual ( instance )
}
}
}
|
gebner/gapt
|
tests/src/test/scala/at/logic/gapt/formats/llk/HybridLatexParserTest.scala
|
Scala
|
gpl-3.0
| 6,440 |
package improbable.bridgesettings
import improbable.fapi.bridge._
import improbable.fapi.network.RakNetLinkSettings
import improbable.unity.fabric.AuthoritativeEntityOnly
import improbable.unity.fabric.bridge.FSimAssetContextDiscriminator
import improbable.unity.fabric.engine.EnginePlatform
import improbable.unity.fabric.satisfiers.SatisfyPhysics
object UnityFSimBridgeSettings extends BridgeSettingsResolver {
private val fSimEngineBridgeSettings = BridgeSettings(
FSimAssetContextDiscriminator(),
RakNetLinkSettings(),
EnginePlatform.UNITY_FSIM_ENGINE,
SatisfyPhysics,
AuthoritativeEntityOnly(),
MetricsEngineLoadPolicy,
PerEntityOrderedStateUpdateQos
)
override def engineTypeToBridgeSettings(engineType: String, metadata: String): Option[BridgeSettings] = {
if (engineType == EnginePlatform.UNITY_FSIM_ENGINE) {
Some(fSimEngineBridgeSettings)
} else {
None
}
}
}
|
timtroendle/spatial-cimo
|
workers/gsim/src/main/scala/improbable/bridgesettings/UnityFSimBridgeSettings.scala
|
Scala
|
mit
| 932 |
package test;
trait Test3 {
trait MatchableImpl {
trait MatchImpl;
}
trait BracePairImpl {
trait BraceImpl extends MatchableImpl {
private object MyMatch1 extends MatchImpl;
protected def match0 : MatchImpl = MyMatch1;
}
}
}
|
yusuke2255/dotty
|
tests/untried/pos/t651.scala
|
Scala
|
bsd-3-clause
| 260 |
package org.jetbrains.plugins.scala
package lang
package psi
package stubs
package elements
import com.intellij.psi.PsiElement
import com.intellij.psi.stubs._
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScValue
import org.jetbrains.plugins.scala.lang.psi.stubs.impl.ScValueStubImpl
import org.jetbrains.plugins.scala.lang.psi.stubs.index.ScalaIndexKeys.VALUE_NAME_KEY
/**
* User: Alexander Podkhalyuzin
* Date: 17.10.2008
*/
abstract class ScValueElementType[V <: ScValue](debugName: String)
extends ScValueOrVariableElementType[ScValueStub, ScValue](debugName) {
override protected val key = VALUE_NAME_KEY
override def serialize(stub: ScValueStub, dataStream: StubOutputStream): Unit = {
super.serialize(stub, dataStream)
dataStream.writeBoolean(stub.isImplicit)
}
override def deserialize(dataStream: StubInputStream,
parentStub: StubElement[_ <: PsiElement]) = new ScValueStubImpl(
parentStub,
this,
isDeclaration = dataStream.readBoolean,
namesRefs = dataStream.readNames,
typeTextRef = dataStream.readOptionName,
bodyTextRef = dataStream.readOptionName,
containerTextRef = dataStream.readOptionName,
isLocal = dataStream.readBoolean,
isImplicit = dataStream.readBoolean
)
override def createStubImpl(value: ScValue,
parentStub: StubElement[_ <: PsiElement]) = new ScValueStubImpl(
parentStub,
this,
isDeclaration = isDeclaration(value),
namesRefs = names(value),
typeTextRef = typeText(value),
bodyTextRef = bodyText(value),
containerTextRef = containerText(value),
isLocal = isLocal(value),
isImplicit = value.hasModifierProperty("implicit")
)
override def indexStub(stub: ScValueStub, sink: IndexSink): Unit = {
super.indexStub(stub, sink)
if (stub.isImplicit) {
this.indexImplicit(sink)
}
}
}
|
jastice/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/stubs/elements/ScValueElementType.scala
|
Scala
|
apache-2.0
| 1,903 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package java.util
abstract class AbstractSequentialList[E] protected ()
extends AbstractList[E] {
def get(index: Int): E = {
val iter = listIterator(index)
if (iter.hasNext()) iter.next()
else throw new IndexOutOfBoundsException(index.toString)
}
override def set(index: Int, element: E): E = {
val iter = listIterator(index)
if (!iter.hasNext())
throw new IndexOutOfBoundsException
val ret = iter.next()
iter.set(element)
ret
}
override def add(index: Int, element: E): Unit =
listIterator(index).add(element)
override def remove(index: Int): E = {
val iter = listIterator(index)
if (!iter.hasNext())
throw new IndexOutOfBoundsException
val ret = iter.next()
iter.remove()
ret
}
override def addAll(index: Int, c: Collection[_ <: E]): Boolean = {
val iter = listIterator(index)
val citer = c.iterator()
val changed = citer.hasNext()
while (citer.hasNext()) {
iter.add(citer.next())
}
changed
}
def listIterator(index: Int): ListIterator[E]
}
|
scala-js/scala-js
|
javalib/src/main/scala/java/util/AbstractSequentialList.scala
|
Scala
|
apache-2.0
| 1,346 |
package com.faacets
package consolidate
import org.scalatest.{FunSuite, Inside, Matchers}
import cats.data.{Validated, ValidatedNel, NonEmptyList => NEL}
import Result.{Failed, Same, Updated}
import cats.implicits._
import com.faacets.consolidate.implicits._
class PersonSuite extends FunSuite with Matchers with Inside {
import PersonSuite._
test("Merge syntax and results") {
val a = Person("Jack", None, None)
val b = Person("Jack", Some(40), None)
(a merge a) shouldBe Same(a)
inside(a merge b) { case Updated(c, _) => c shouldBe b }
(b merge a) shouldBe Same(b)
}
test("Map merge") {
val a = Person("Jack", None, None)
val b = Person("Jack", Some(40), None)
inside(Map("a" -> a) merge Map("b" -> b)) {
case Updated(newMap, _) => newMap shouldBe Map("a" -> a, "b" -> b)
}
inside(Map("a" -> a) merge Map("a" -> b)) {
case Updated(newMap, _) => newMap shouldBe Map("a" -> b)
}
}
}
object PersonSuite {
case class Person(name: String, age: Option[Int] = None, retired: Option[Boolean] = None)
object Person {
def validated(name: String, age: Option[Int], retired: Option[Boolean]): ValidatedNel[String, Person] = {
import Validated.{invalidNel, valid}
def validName(name: String) = if (name.isEmpty) invalidNel("Name cannot be empty") else valid(name)
def validAge(age: Option[Int]) = age match {
case Some(a) if a < 0 => invalidNel("Age cannot be negative")
case _ => valid(age)
}
(validName(name), validAge(age), valid(retired)).mapN(Person.apply)
}
implicit def StringMerge = Merge.fromEquals[String]
implicit def IntMerge = Merge.fromEquals[Int]
implicit def BooleanMerge = Merge.fromEquals[Boolean]
implicit val PersonMerge: Merge[Person] = Auto.derive[Person].validated((Person.validated _).tupled)
}
}
|
denisrosset/consolidate
|
src/test/scala/com.faacets/consolidate/PersonSuite.scala
|
Scala
|
mit
| 1,873 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.aggregate
import java.lang.Iterable
import org.apache.flink.api.common.functions.RichGroupReduceFunction
import org.apache.flink.configuration.Configuration
import org.apache.flink.table.codegen.{Compiler, GeneratedAggregationsFunction}
import org.apache.flink.types.Row
import org.apache.flink.util.Collector
import org.slf4j.LoggerFactory
/**
* [[RichGroupReduceFunction]] to compute the final result of a pre-aggregated aggregation
* for batch (DataSet) queries.
*
* @param genAggregations Code-generated [[GeneratedAggregations]]
*/
class DataSetFinalAggFunction(
private val genAggregations: GeneratedAggregationsFunction)
extends RichGroupReduceFunction[Row, Row]
with Compiler[GeneratedAggregations] {
private var output: Row = _
private var accumulators: Row = _
val LOG = LoggerFactory.getLogger(this.getClass)
private var function: GeneratedAggregations = _
override def open(config: Configuration) {
LOG.debug(s"Compiling AggregateHelper: $genAggregations.name \\n\\n " +
s"Code:\\n$genAggregations.code")
val clazz = compile(
getClass.getClassLoader,
genAggregations.name,
genAggregations.code)
LOG.debug("Instantiating AggregateHelper.")
function = clazz.newInstance()
output = function.createOutputRow()
accumulators = function.createAccumulators()
}
override def reduce(records: Iterable[Row], out: Collector[Row]): Unit = {
val iterator = records.iterator()
// reset first accumulator
function.resetAccumulator(accumulators)
var record: Row = null
while (iterator.hasNext) {
record = iterator.next()
// accumulate
function.mergeAccumulatorsPair(accumulators, record)
}
// set group keys value to final output
function.setForwardedFields(record, output)
// get final aggregate value and set to output.
function.setAggregationResults(accumulators, output)
// set grouping set flags to output
function.setConstantFlags(output)
out.collect(output)
}
}
|
hongyuhong/flink
|
flink-libraries/flink-table/src/main/scala/org/apache/flink/table/runtime/aggregate/DataSetFinalAggFunction.scala
|
Scala
|
apache-2.0
| 2,886 |
package com.infinitemule.hopperhack.poursquare.service
import com.infinitemule.hopperhack.poursquare.data.PoursquareDaoMongo
import org.springframework.stereotype.Component
import org.springframework.beans.factory.annotation.Autowired
import com.infinitemule.hopperhack.poursquare.domain._
/*
* A service that in this case just forwards calls to a DAO.
*/
trait PoursquareService {
def createCheckin(json: String)
def mostRecentCheckins(limit: Int): List[Checkin]
def mostRecentInUsa(limit: Int, laterThan: Int): List[Checkin]
def checkinsByCountry(): List[CheckinCountryCount]
}
@Component
class PoursquareDataService extends PoursquareService {
@Autowired
var dao: PoursquareDaoMongo = _
override def createCheckin(json: String) = {
dao.createCheckin(json)
}
override def mostRecentInUsa(limit: Int, laterThan: Int): List[Checkin] = {
dao.mostRecentInUsa(limit, laterThan)
}
override def mostRecentCheckins(limit: Int): List[Checkin] = {
dao.mostRecentCheckins(limit)
}
override def checkinsByCountry(): List[CheckinCountryCount] = {
dao.checkinsByCountry()
}
}
|
infinitemule/poursquare-client
|
src/main/scala/com/infinitemule/hopperhack/poursquare/service/PoursquareService.scala
|
Scala
|
mit
| 1,143 |
/*
* Copyright © 2014 TU Berlin ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.emmalanguage
package examples.graphs
import api._
import model._
import Ordering.Implicits._
@emma.lib
object ConnectedComponents {
def apply[V: Ordering : Meta.Tag](edges: DataBag[Edge[V]]): DataBag[LVertex[V, V]] = {
val vertices = edges.map(_.src).distinct
// initial state
val state = MutableBag(vertices.map(v => v -> v))
// collection of vertices whose label changed in the last iteration
var delta = state.bag()
while (delta.nonEmpty) {
val messages = for {
Tuple2(k, v) <- delta
Edge(src, dst) <- edges
if k == src
} yield Message(dst, v)
delta = state.update(
messages.groupBy(_.tgt)
)((_, vOpt, ms) => for {
v <- vOpt
m = ms.map(_.payload).max
if m > v
} yield m)
}
// return solution set
for ((vid, cmp) <- state.bag()) yield LVertex(vid, cmp)
}
}
|
aalexandrov/emma
|
emma-examples/emma-examples-library/src/main/scala/org/emmalanguage/examples/graphs/ConnectedComponents.scala
|
Scala
|
apache-2.0
| 1,526 |
package se.lu.nateko.cp.data.streams.geo
import scala.collection.mutable.Buffer
import scala.collection.mutable.Map
class PointReducerState{
val lats, lons = Buffer.empty[Double]
val bbox = new BBox
val shortList = Buffer.empty[Int]
val costs = Map.empty[Int, Double]
val inheritedCosts = Map.empty[Int, Double]
def latLongs = shortList.map(i => (lats(i), lons(i)))
}
|
ICOS-Carbon-Portal/data
|
src/main/scala/se/lu/nateko/cp/data/streams/geo/PointReducerState.scala
|
Scala
|
gpl-3.0
| 379 |
package org.jetbrains.plugins.scala
package lang
package parser
package parsing
package statements
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder
import org.jetbrains.plugins.scala.lang.parser.parsing.types.Type
/**
* @author Alexander Podkhalyuzin
* Date: 11.02.2008
*/
/*
* FunDcl ::= FunSig [':' Type]
*/
object FunDcl extends FunDcl {
override protected def funSig = FunSig
override protected def `type` = Type
}
trait FunDcl {
protected def funSig: FunSig
protected def `type`: Type
def parse(builder: ScalaPsiBuilder): Boolean = {
//val returnMarker = builder.mark
builder.getTokenType match {
case ScalaTokenTypes.kDEF =>
builder.advanceLexer //Ate def
case _ =>
//returnMarker.drop
return false
}
if (!(funSig parse builder)) {
//returnMarker.drop
return false
}
builder.getTokenType match {
case ScalaTokenTypes.tCOLON =>
builder.advanceLexer //Ate :
if (`type`.parse(builder)) {
//returnMarker.drop
return true
}
else {
builder error ScalaBundle.message("wrong.type")
//returnMarker.drop
return true
}
case _ =>
//returnMarker.drop
return true
}
}
}
|
ilinum/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/parser/parsing/statements/FunDcl.scala
|
Scala
|
apache-2.0
| 1,366 |
package me.axiometry.blocknet.minecraft.net
trait Message {
def name: String
def direction: Direction
}
object Message {
object ToClient {
}
object ToServer {
}
}
|
Axiometry/Blocknet
|
blocknet-minecraft/src/main/scala/me/axiometry/blocknet/minecraft/net/Message.scala
|
Scala
|
bsd-2-clause
| 177 |
package com.netscout.aion2
import com.github.racc.tscg.TypesafeConfigModule
import com.google.inject.{AbstractModule, Guice}
import com.netscout.aion2.inject._
import com.netscout.aion2.model.DataSource
import javax.ws.rs.core.{Application => JAXRSApplication, Response}
import net.codingwell.scalaguice.ScalaModule
import org.glassfish.jersey.server.ResourceConfig
import org.glassfish.jersey.test.JerseyTest
import org.mockito.{Matchers => MockitoMatchers}
import org.mockito.Matchers._
import org.mockito.Mockito._
import org.mockito.BDDMockito._
import org.scalatest._
import org.scalatest.mock.MockitoSugar
object ApplicationSpec {
import com.typesafe.config.ConfigFactory
/**
* Gets a config file with a specific name
*/
def namedConfig(name: String) = ConfigFactory.parseResources(this.getClass, name ++ ".json")
}
class ApplicationSpec extends FlatSpec with Matchers with MockitoSugar {
import com.typesafe.config.ConfigFactory
import scala.collection.JavaConversions._
import javax.ws.rs.core.Response.Status.Family._
import javax.ws.rs.core.MediaType._
class ApplicationJerseyTest (
val application: JAXRSApplication
) extends JerseyTest(application)
class TestModule (
val name: String
) extends AbstractModule with ScalaModule {
val resourceConfig = new ResourceConfig
val dataSource = mock[DataSource]
def setupTestDataTypes {
import java.util.UUID
doReturn(classOf[String], Seq.empty : _*).when(dataSource).classOfType("text")
doReturn(classOf[UUID], Seq.empty : _*).when(dataSource).classOfType("timeuuid")
doReturn(classOf[Array[Byte]], Seq.empty : _*).when(dataSource).classOfType("blob")
}
override def configure {
bind[ResourceConfig].toInstance(resourceConfig)
bind[SchemaProvider].toInstance(new AionConfig(classOf[ApplicationSpec].getResourceAsStream(s"schema-${name}.yml")))
bind[DataSource].toInstance(dataSource)
}
}
def namedApplication(name: String, testModule: Option[TestModule] = None) = {
import net.codingwell.scalaguice.InjectorExtensions._
val tModule = testModule match {
case Some(m) => m
case None => new TestModule(name)
}
val injector = Guice.createInjector(
TypesafeConfigModule.fromConfig(ApplicationSpec.namedConfig(name)),
JacksonModule,
Slf4jLoggerModule,
AionResourceModule,
tModule)
injector.instance[Application]
}
def namedFixture(name: String) =
new {
val testModule = new TestModule(name)
val app = namedApplication(name, Some(testModule))
val test = new ApplicationJerseyTest(testModule.resourceConfig)
test.setUp()
}
def defaultFixture = namedFixture("defaults")
def defaultApplication = namedApplication("defaults")
implicit class TestApplicationHelper(val app: JAXRSApplication) {
def resourceCount = app.getClasses.size + app.getSingletons.size
}
implicit class TestResult(val response: Response) {
def shouldBeOfFamily(family: Response.Status.Family) {
response.getStatusInfo.getFamily shouldBe family
}
}
"An Application" should "be initializable with minimal configuration" in {
val uut = defaultApplication
uut should not be (null)
}
it should s"register only hard coded resources with no objects" in {
val f = defaultFixture
val uut = f.app
f.testModule.resourceConfig.getClasses should not be (null)
f.testModule.resourceConfig.getSingletons should not be (null)
// The extra one here is for JacksonFeature
f.testModule.resourceConfig.resourceCount shouldBe (f.app.hardCodedResources.size + 1)
}
it should "register resources of complete schema" in {
val f = namedFixture("complete")
val uut = f.app
val resourceConfig = f.testModule.resourceConfig
val registeredResources = resourceConfig.getResources
val resourcePaths = registeredResources.map(r => (r.getPath, r)).toMap
val expectedPaths = Seq(
"/foo/single_partition/{partition}",
"/foo/double_partition/{partition}/{range}",
"/foo/no_partition"
)
for (p <- expectedPaths) {
resourcePaths.contains(p) shouldBe true
}
}
it should "respond to schema requests" in {
val f = namedFixture("complete")
val result: Response = f.test.target("/schema").request().get()
result shouldBeOfFamily SUCCESSFUL
f.test.tearDown
}
it should "respond to version requests" in {
val f = defaultFixture
val result: Response = f.test.target("/version").request.get
result shouldBeOfFamily SUCCESSFUL
result.getMediaType shouldBe TEXT_PLAIN_TYPE
f.test.tearDown
}
it should "ask DataStore to initialize schema on startup" in {
val f = defaultFixture
verify(f.testModule.dataSource).initializeSchema(anyObject())
}
"The schema resource" should "report accurate schema information" in {
val f = namedFixture("complete")
val result: Response = f.test.target("/schema").request().get()
result shouldBeOfFamily SUCCESSFUL
val jsonResult = result.readEntity(classOf[String])
val schemaMap = f.app.mapper.readValue(jsonResult, classOf[Map[String, Map[String, String]]])
schemaMap shouldEqual Map (
"foo" -> Map (
"partition" -> "text",
"range" -> "text",
"time" -> "timeuuid",
"data" -> "blob",
"datam" -> "map<text,blob>"
)
)
}
"The resource resource" should "ask the DataSource for data on HTTP GET on an index with multiple partition keys" in {
import java.time.Instant
import java.time.temporal.ChronoUnit._
val f = namedFixture("complete")
// given
f.testModule.setupTestDataTypes
given(f.testModule.dataSource.executeQuery(anyObject(), anyObject(), anyObject(), anyObject())).willReturn(Seq())
// when
val now = Instant.now
val end = now.plus(1, HOURS)
val result: Response = f.test.target(s"/foo/single_partition/somePartition").queryParam("from", now.toString).queryParam("to", end.toString).request().get()
// then
result.getStatus shouldBe 200
verify(f.testModule.dataSource).executeQuery(
anyObject(),
anyObject(),
anyObject(),
MockitoMatchers.eq(Map("partition" -> "somePartition"))
) // TODO: better matching of the QueryStrategy
}
it should "ask the DataSource for data on HTTP GET on a full index with range keys" in {
import java.time.Instant
import java.time.temporal.ChronoUnit._
val f = namedFixture("complete")
// given
f.testModule.setupTestDataTypes
given(f.testModule.dataSource.executeQuery(anyObject(), anyObject(), anyObject(), anyObject())).willReturn(Seq())
// when
val now = Instant.now
val end = now.plus(1, HOURS)
val result: Response = f.test.target(s"/foo/double_partition/somePartition/someRange").queryParam("from", now.toString).queryParam("to", end.toString).request().get()
// then
result.getStatus shouldBe 200
verify(f.testModule.dataSource).executeQuery(
anyObject(),
anyObject(),
anyObject(),
MockitoMatchers.eq(Map("partition" -> "somePartition", "range" -> "someRange"))
) // TODO: better matching of the QueryStrategy
}
it should "return 404 for data on HTTP GET on a full index without partition keys" in {
import java.time.Instant
import java.time.temporal.ChronoUnit._
val f = namedFixture("complete")
// given
f.testModule.setupTestDataTypes
given(f.testModule.dataSource.executeQuery(anyObject(), anyObject(), anyObject(), anyObject())).willReturn(Seq())
// when
val now = Instant.now
val end = now.plus(1, HOURS)
val result: Response = f.test.target("/foo/single_partition").queryParam("from", now.toString).queryParam("to", now.toString).request().get()
// then
result.getStatus shouldBe 404
}
it should "ask the DataSource for data on HTTP GET with no partition / range keys" in {
import java.time.Instant
import java.time.temporal.ChronoUnit._
val f = namedFixture("complete")
// given
f.testModule.setupTestDataTypes
given(f.testModule.dataSource.executeQuery(anyObject(), anyObject(), anyObject(), anyObject())).willReturn(Seq())
// when
val now = Instant.now
val end = now.plus(1, HOURS)
val result: Response = f.test.target(s"/foo/no_partition").queryParam("from", now.toString).queryParam("to", end.toString).request().get()
// then
result.getStatus shouldBe 200
verify(f.testModule.dataSource).executeQuery(anyObject(), anyObject(), anyObject(), MockitoMatchers.eq(Map())) // TODO: better matching of the QueryStrategy
f.test.tearDown
}
"The index resource" should "insert into the DataSource on HTTP PUT with no range keys" in {
import com.datastax.driver.core.utils.UUIDs
import javax.ws.rs.client.Entity
val f = namedFixture("complete")
f.testModule.setupTestDataTypes
val result: Response = f.test.target(s"/foo").request().post(Entity.json(s"""{
"partition": "somePartition",
"time": "${UUIDs.timeBased}",
"data": ""
}"""))
result.getStatus shouldBe 201
verify(f.testModule.dataSource).insertQuery(anyObject(), anyObject())
f.test.tearDown
}
it should "return 400 bad request on bad JSON input" in {
import javax.ws.rs.client.Entity
val f = namedFixture("complete")
f.testModule.setupTestDataTypes
val result: Response = f.test.target("/foo").request().post(Entity.json("not really json"))
result.getStatus shouldBe 400
}
}
|
FlukeNetworks/aion
|
src/test/scala/com/netscout/aion2/ApplicationSpec.scala
|
Scala
|
apache-2.0
| 9,580 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.nodes.physical.batch
import org.apache.flink.table.planner.plan.nodes.physical.FlinkPhysicalRel
/**
* Base class for batch physical relational expression.
*/
trait BatchPhysicalRel extends FlinkPhysicalRel {
}
|
lincoln-lil/flink
|
flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/plan/nodes/physical/batch/BatchPhysicalRel.scala
|
Scala
|
apache-2.0
| 1,069 |
package com.twitter.finagle.zipkin.core
import com.twitter.conversions.time._
import com.twitter.finagle.stats.NullStatsReceiver
import com.twitter.finagle.tracing.{SpanId, TraceId}
import com.twitter.util.{Future, MockTimer, Time}
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class DeadlineSpanMapTest extends FunSuite {
test("DeadlineSpanMap should expire and log spans") {
Time.withCurrentTimeFrozen { tc =>
var spansLogged: Boolean = false
val logger: Seq[Span] => Future[Unit] = { _ =>
spansLogged = true
Future.Done
}
val timer = new MockTimer
val map = new DeadlineSpanMap(logger, 1.milliseconds, NullStatsReceiver, timer)
val traceId = TraceId(Some(SpanId(123)), Some(SpanId(123)), SpanId(123), None)
val span = map.update(traceId)(_.setServiceName("service").setName("name"))
tc.advance(10.seconds) // advance timer
timer.tick() // execute scheduled event
// span must have been logged
assert(spansLogged)
}
}
}
|
koshelev/finagle
|
finagle-zipkin-core/src/test/scala/com/twitter/finagle/zipkin/core/DeadlineSpanMapTest.scala
|
Scala
|
apache-2.0
| 1,108 |
/*
Copyright 2013 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.summingbird.scalding
import com.twitter.scalding.{Args, Hdfs, RichDate, DateParser}
import com.twitter.summingbird.scalding.store.HDFSMetadata
import com.twitter.summingbird.{ Env, Summer, TailProducer, AbstractJob }
import com.twitter.summingbird.batch.{ BatchID, Batcher, Timestamp }
import com.twitter.summingbird.builder.{ SourceBuilder, Reducers, CompletedBuilder }
import scala.collection.JavaConverters._
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.util.GenericOptionsParser
import java.util.TimeZone
/**
* @author Oscar Boykin
* @author Sam Ritchie
* @author Ashu Singhal
*/
// If present, after the groupAndSum we store
// the intermediate key-values for using a store as a service
// in another job.
// Prefer using .write in the -core API.
case class StoreIntermediateData[K, V](sink: ScaldingSink[(K,V)]) extends java.io.Serializable
// TODO (https://github.com/twitter/summingbird/issues/69): Add
// documentation later describing command-line args. start-time,
// batches, reducers, Hadoop-specific arguments and where they go. We
// might pull this argument-parsing out into its own class with all
// arguments defined to make it easier to understand (and add to
// later).
case class ScaldingEnv(override val jobName: String, inargs: Array[String])
extends Env(jobName) {
override lazy val args = {
// pull out any hadoop specific args
Args(new GenericOptionsParser(new Configuration, inargs).getRemainingArgs)
}
def tz = TimeZone.getTimeZone("UTC")
// Summingbird's Scalding mode needs some way to figure out the very
// first batch to grab. This particular implementation gets the
// --start-time option from the command line, asks the batcher for
// the relevant Time, converts that to a Batch and sets this as the
// initial batch to process. All runs after the first batch
// (incremental updates) will use the batch of the previous run as
// the starting batch, rendering this unnecessary.
def startDate: Option[Timestamp] =
args.optional("start-time")
.map(RichDate(_)(tz, DateParser.default).value)
def initialBatch(b: Batcher): Option[BatchID] = startDate.map(b.batchOf(_))
// The number of batches to process in this particular run. Imagine
// a batch size of one hour; For big recomputations, one might want
// to process a day's worth of data with each Hadoop run. Do this by
// setting --batches to "24" until the recomputation's finished.
def batches : Int = args.getOrElse("batches","1").toInt
// The number of reducers to use for the Scalding piece of the
// Summingbird job.
def reducers : Int = args.getOrElse("reducers","20").toInt
// Used to insert a write just before the store so the store
// can be used as a Service
private def addDeltaWrite(snode: Summer[Scalding, Any, Any],
sink: ScaldingSink[(Any, Any)]): Summer[Scalding, Any, Any] = {
val Summer(prod, store, monoid) = snode
Summer(prod.write(sink), store, monoid)
}
case class Built(platform: Scalding,
toRun: TailProducer[Scalding, (Any, (Option[Any], Any))],
stateFn: (Configuration) => VersionedState)
lazy val build: Built = {
// Calling abstractJob's constructor and binding it to a variable
// forces any side effects caused by that constructor (building up
// of the environment and defining the builder).
val ajob = abstractJob
val scaldingBuilder = builder.asInstanceOf[CompletedBuilder[Scalding, Any, Any]]
val name = args.optional("name").getOrElse(ajob.getClass.getName)
// Perform config transformations before Hadoop job submission
val opts = SourceBuilder.adjust(
scaldingBuilder.opts, scaldingBuilder.id)(_.set(Reducers(reducers)))
// Support for the old setting based writing
val toRun: TailProducer[Scalding, (Any, (Option[Any], Any))] =
(for {
opt <- opts.get(scaldingBuilder.id)
stid <- opt.get[StoreIntermediateData[Any, Any]]
} yield addDeltaWrite(scaldingBuilder.node, stid.sink))
.getOrElse(scaldingBuilder.node)
.name(scaldingBuilder.id)
val scald = Scalding(name, opts)
.withRegistrars(ajob.registrars ++ builder.registrar.getRegistrars.asScala)
.withConfigUpdater {
// Set these before the user settings, so that the user
// can change them if needed
// Make sure we use block compression from mappers to reducers
_.put("mapred.output.compression.type", "BLOCK")
.put("io.compression.codec.lzo.compression.level", "3")
.put("mapred.output.compress", "true")
.put("mapred.compress.map.output", "true")
}
.withConfigUpdater{ c =>
c.updated(ajob.transformConfig(c.toMap))
}
def getStatePath(ss: ScaldingStore[_, _]): Option[String] =
ss match {
case store: VersionedBatchStore[_, _, _, _] => Some(store.rootPath)
case initstore: InitialBatchedStore[_, _] => getStatePath(initstore.proxy)
case _ => None
}
// VersionedState needs this
implicit val batcher = scaldingBuilder.batcher
val stateFn = { (conf: Configuration) =>
val statePath = getStatePath(scaldingBuilder.node.store).getOrElse {
sys.error("You must use a VersionedBatchStore with the old Summingbird API!")
}
VersionedState(HDFSMetadata(conf, statePath), startDate, batches)
};
Built(scald, toRun, stateFn)
}
def run = run(build)
def run(b: Built) {
val Built(scald, toRun, stateFn) = b
val conf = new Configuration
// Add the generic options
new GenericOptionsParser(conf, inargs)
try {
scald.run(stateFn(conf), Hdfs(true, conf), toRun)
}
catch {
case f@FlowPlanException(errs) =>
/* This is generally due to data not being ready, don't give a failed error code */
if(!args.boolean("scalding.nothrowplan")) {
println("use: --scalding.nothrowplan to not give a failing error code in this case")
throw f
}
else {
println("[ERROR]: ========== FlowPlanException =========")
errs.foreach { println(_) }
println("========== FlowPlanException =========")
}
}
}
}
|
sengt/summingbird-batch
|
summingbird-builder/src/main/scala/com/twitter/summingbird/scalding/ScaldingEnv.scala
|
Scala
|
apache-2.0
| 6,805 |
package at.nonblocking.cliwix.integrationtest
import at.nonblocking.cliwix.core.ExecutionContext
import at.nonblocking.cliwix.core.command.{CompanyInsertCommand, SiteInsertCommand}
import at.nonblocking.cliwix.core.handler._
import at.nonblocking.cliwix.core.util.GroupUtil
import at.nonblocking.cliwix.integrationtest.TestEntityFactory._
import at.nonblocking.cliwix.model._
import org.junit.Assert._
import org.junit._
import org.junit.runner.RunWith
import scala.beans.BeanProperty
@RunWith(classOf[CliwixIntegrationTestRunner])
class GroupUtilIntegrationTest {
@BeanProperty
var dispatchHandler: DispatchHandler = _
@BeanProperty
var groupUtil: GroupUtil = _
@Test
@TransactionalRollback
def testSiteGroup() {
val company = createTestCompany()
val insertedCompany = this.dispatchHandler.execute(CompanyInsertCommand(company)).result
ExecutionContext.updateCompanyContext(insertedCompany)
val site = createTestSite()
site.getSiteConfiguration.setDescription("My test site")
val insertedSite = this.dispatchHandler.execute(SiteInsertCommand(insertedCompany.getCompanyId, site)).result
val groupEntityAndId = this.groupUtil.getLiferayEntityForGroupId(insertedSite.getSiteId)
assertEquals(classOf[Site], groupEntityAndId.get._1)
assertEquals(insertedSite.getSiteId, groupEntityAndId.get._2)
}
}
|
nonblocking/cliwix
|
cliwix-test-integration/src/test/scala/at/nonblocking/cliwix/integrationtest/GroupUtilIntegrationTest.scala
|
Scala
|
agpl-3.0
| 1,360 |
package de.ftrossbach.dcos.config.reader.parser
case class Parse(name: String, version: String, json: String)
|
ftrossbach/dcos-config-tool
|
src/main/scala/de/ftrossbach/dcos/config/reader/parser/Common.scala
|
Scala
|
apache-2.0
| 110 |
package com.gw.events.json
import org.joda.time.DateTime
import play.api.libs.json.Reads._
import play.api.libs.json._
import play.api.libs.functional.syntax._
import com.gw.events._
object Parsers {
import Storage._
val eventAttributesReads: Reads[EventAttribute] = (
(JsPath \ "key").read[String] and (JsPath \ "value").read[String]
)(EventAttribute)
val eventReads: Reads[Event] = (
(JsPath \ "id").read[String] and
(JsPath \ "description").read[String] and
(JsPath \ "generatedAt").read[DateTime] and
(JsPath \ "attributes").lazyRead(list[EventAttribute](eventAttributesReads))
)(Event)
val parseAddEvent: String => Option[AddEvent] = {
implicit val addEventReads: Reads[AddEvent] = (
JsPath.read[Event](eventReads) and
(JsPath \ "childEvents").readNullable(list[Event](eventReads))
)(AddEvent)
(data: String) => Json.parse(data).validate[AddEvent] match {
case JsSuccess(event, _) => Some(event)
case JsError(errors) =>
println(errors)
None
}
}
val parseUpdateEvent: String => Option[UpdateEvent] = {
implicit val updateEventReads: Reads[UpdateEvent] = (
(JsPath \ "id").read[String] and
(JsPath \ "childEvents").read(list[Event](eventReads))
)(UpdateEvent)
(data: String) => Json.parse(data).validate[UpdateEvent] match {
case JsSuccess(event, _) => Some(event)
case JsError(errors) =>
println(errors)
None
}
}
implicit val eventAttributeWrites: Writes[EventAttribute] = (
(JsPath \ "key").write[String] and
(JsPath \ "value").write[String]
)(unlift(EventAttribute.unapply))
implicit val eventsWrites: Writes[Event] = (
(JsPath \ "id").write[String] and
(JsPath \ "description").write[String] and
(JsPath \ "generatedAt").write[DateTime] and
(JsPath \ "attributes").write(Writes.seq(eventAttributeWrites))
)(unlift(Event.unapply))
}
|
grzesiekw/events
|
events-store/src/main/scala/com/gw/events/json/Parsers.scala
|
Scala
|
apache-2.0
| 1,963 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.boxes
import uk.gov.hmrc.ct.accounts.frs102.retriever.Frs102AccountsBoxRetriever
import uk.gov.hmrc.ct.box._
case class AC45(value: Option[Int]) extends CtBoxIdentifier(name = "Tangible Assets (previous PoA)")
with CtOptionalInteger
with Input
with ValidatableBox[Frs102AccountsBoxRetriever]
with Validators {
override def validate(boxRetriever: Frs102AccountsBoxRetriever): Set[CtValidation] = {
collectErrors(
validateMoney(value, min = 0)
)
}
}
|
liquidarmour/ct-calculations
|
src/main/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/AC45.scala
|
Scala
|
apache-2.0
| 1,121 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.util
/**
* Utils for table sources and sinks.
*/
object TableConnectorUtil {
/** Returns the table connector name used for log and web UI */
def generateRuntimeName(clazz: Class[_], fields: Array[String]): String = {
val className = clazz.getSimpleName
if (null == fields) {
s"$className(*)"
} else {
s"$className(${fields.mkString(", ")})"
}
}
}
|
mylog00/flink
|
flink-libraries/flink-table/src/main/scala/org/apache/flink/table/util/TableConnectorUtil.scala
|
Scala
|
apache-2.0
| 1,225 |
package com.wavesplatform.it.sync.grpc
import com.google.protobuf.ByteString
import com.typesafe.config.Config
import com.wavesplatform.api.grpc.BlockRangeRequest
import com.wavesplatform.block.Block
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.crypto
import com.wavesplatform.it.api.SyncGrpcApi._
import com.wavesplatform.it.sync.activation.ActivationStatusRequest
import com.wavesplatform.it.{GrpcIntegrationSuiteWithThreeAddress, NodeConfigs}
import org.scalatest._
import scala.concurrent.duration._
class BlockV5GrpcSuite
extends freespec.AnyFreeSpec
with ActivationStatusRequest
with OptionValues
with GrpcIntegrationSuiteWithThreeAddress {
override def nodeConfigs: Seq[Config] =
NodeConfigs.newBuilder
.overrideBase(_.quorum(0))
.withDefault(1)
.withSpecial(1, _.nonMiner)
.buildNonConflicting()
"block v5 appears and blockchain grows" - {
"when feature activation happened" in {
sender.waitForHeight(sender.height + 1, 2.minutes)
val currentHeight = sender.height
val blockV5 = sender.blockAt(currentHeight)
val blockV5ById = sender.blockById(ByteString.copyFrom(blockV5.id().arr))
blockV5.header.version shouldBe Block.ProtoBlockVersion
blockV5.id().arr.length shouldBe crypto.DigestLength
blockV5.signature.arr.length shouldBe crypto.SignatureLength
blockV5.header.generationSignature.arr.length shouldBe Block.GenerationVRFSignatureLength
assert(blockV5.transactionsRootValid(), "transactionsRoot is not valid")
blockV5ById.header.version shouldBe Block.ProtoBlockVersion
blockV5ById.header.generationSignature.arr.length shouldBe Block.GenerationVRFSignatureLength
assert(blockV5ById.transactionsRootValid(), "transactionsRoot is not valid")
sender.waitForHeight(currentHeight + 1, 2.minutes)
val blockAfterVRFUsing = sender.blockAt(currentHeight + 1)
val blockAfterVRFUsingById = sender.blockById(ByteString.copyFrom(blockAfterVRFUsing.id().arr))
blockAfterVRFUsing.header.version shouldBe Block.ProtoBlockVersion
blockAfterVRFUsing.header.generationSignature.arr.length shouldBe Block.GenerationVRFSignatureLength
ByteStr(sender.blockHeaderAt(currentHeight + 1).reference.toByteArray) shouldBe blockV5.id()
blockAfterVRFUsingById.header.version shouldBe Block.ProtoBlockVersion
blockAfterVRFUsingById.header.generationSignature.arr.length shouldBe Block.GenerationVRFSignatureLength
assert(blockAfterVRFUsingById.transactionsRootValid(), "transactionsRoot is not valid")
val blockSeqOfBlocksV5 = sender.blockSeq(currentHeight, currentHeight + 2)
for (blockV5 <- blockSeqOfBlocksV5) {
blockV5.header.version shouldBe Block.ProtoBlockVersion
blockV5.header.generationSignature.arr.length shouldBe Block.GenerationVRFSignatureLength
assert(blockV5.transactionsRootValid(), "transactionsRoot is not valid")
}
val blockSeqOfBlocksV5ByAddress = sender.blockSeqByAddress(miner.address, currentHeight, currentHeight + 2)
for (blockV5 <- blockSeqOfBlocksV5ByAddress) {
blockV5.header.generator shouldBe miner.keyPair.publicKey
blockV5.header.version shouldBe Block.ProtoBlockVersion
blockV5.header.generationSignature.arr.length shouldBe Block.GenerationVRFSignatureLength
assert(blockV5.transactionsRootValid(), "transactionsRoot is not valid")
}
val blockSeqOfBlocksV5ByPKGrpc = NodeExtGrpc(sender).blockSeq(
currentHeight,
currentHeight + 2,
BlockRangeRequest.Filter.GeneratorPublicKey(ByteString.copyFrom(miner.keyPair.publicKey.arr))
)
for (blockV5 <- blockSeqOfBlocksV5ByPKGrpc) {
blockV5.header.generator shouldBe miner.keyPair.publicKey
blockV5.header.version shouldBe Block.ProtoBlockVersion
blockV5.header.generationSignature.arr.length shouldBe Block.GenerationVRFSignatureLength
assert(blockV5.transactionsRootValid(), "transactionsRoot is not valid")
}
}
}
}
|
wavesplatform/Waves
|
node-it/src/test/scala/com/wavesplatform/it/sync/grpc/BlockV5GrpcSuite.scala
|
Scala
|
mit
| 4,084 |
/* sbt -- Simple Build Tool
* Copyright 2010 Mark Harrah
*/
package sbt
import java.io.{File, PrintWriter}
final case class GlobalLogging(full: Logger, backed: ConsoleLogger, backing: GlobalLogBacking)
final case class GlobalLogBacking(file: File, last: Option[File], newLogger: (PrintWriter, GlobalLogBacking) => GlobalLogging, newBackingFile: () => File)
{
def shift(newFile: File) = GlobalLogBacking(newFile, Some(file), newLogger, newBackingFile)
def shiftNew() = shift(newBackingFile())
def unshift = GlobalLogBacking(last getOrElse file, None, newLogger, newBackingFile)
}
object GlobalLogBacking
{
def apply(newLogger: (PrintWriter, GlobalLogBacking) => GlobalLogging, newBackingFile: => File): GlobalLogBacking =
GlobalLogBacking(newBackingFile, None, newLogger, newBackingFile _)
}
object GlobalLogging
{
@deprecated("Explicitly specify standard out.", "0.13.0")
def initial(newLogger: (PrintWriter, GlobalLogBacking) => GlobalLogging, newBackingFile: => File): GlobalLogging =
initial(newLogger, newBackingFile, ConsoleLogger.systemOut)
def initial(newLogger: (PrintWriter, GlobalLogBacking) => GlobalLogging, newBackingFile: => File, console: ConsoleOut): GlobalLogging =
{
val log = ConsoleLogger(console)
GlobalLogging(log, log, GlobalLogBacking(newLogger, newBackingFile))
}
}
|
harrah/xsbt
|
util/log/src/main/scala/sbt/GlobalLogging.scala
|
Scala
|
bsd-3-clause
| 1,312 |
package no.mesan.hipchatparse.utils
import org.junit.runner.RunWith
import org.scalatest._
import Tools._
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class ToolsSpec extends FlatSpec with Matchers {
"ToolsSpec.urlParse" should "handle empty strings" in {
urlParse("") shouldBe empty
}
it should "handle a string containing no URLs" in {
val result= urlParse("no URLs")
result should have length 1
result.head should be (false, "no URLs")
}
it should "handle a pure URL" in {
val result= urlParse("http://fagblogg.mesan.no/?s=test")
result should have length 1
result.head should be (true, "http://fagblogg.mesan.no/?s=test")
}
it should "handle a string that ends with a URL" in {
val result= urlParse("search: https://fagblogg.mesan.no/?s=test")
result should have length 2
result.head should be (false, "search: ")
result(1) should be (true, "https://fagblogg.mesan.no/?s=test")
}
it should "handle a string that starts with a URL" in {
val result= urlParse("http://fagblogg.mesan.no/?s=test found")
result should have length 2
result.head should be (true, "http://fagblogg.mesan.no/?s=test")
result(1) should be (false, " found")
}
it should "handle a string that contains a URL" in {
val result= urlParse("search: http://fagblogg.mesan.no/?s=test found")
result should have length 3
result.head should be (false, "search: ")
result(1) should be (true, "http://fagblogg.mesan.no/?s=test")
result(2) should be (false, " found")
}
it should "handle multiple URLs" in {
val result= urlParse("http://fagblogg.mesan.no/?s=test mailto:[email protected] ftp://files/1")
result should have length 5
result.head should be (true, "http://fagblogg.mesan.no/?s=test")
result(1) should be (false, " ")
result(2) should be (true, "mailto:[email protected]")
result(3) should be (false, " ")
result(4) should be (true, "ftp://files/1")
}
}
|
lre-mesan/hipchatparse
|
src/test/scala/no/mesan/hipchatparse/utils/ToolsSpec.scala
|
Scala
|
mit
| 1,997 |
/**
* Copyright 2009 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS-IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.appjet.bodylock;
import java.io.{StringWriter, StringReader}
import net.appjet.common.util.BetterFile;
object compressor {
def compress(code: String): String = {
import yuicompressor.org.mozilla.javascript.{ErrorReporter, EvaluatorException};
object MyErrorReporter extends ErrorReporter {
def warning(message:String, sourceName:String, line:Int, lineSource:String, lineOffset:Int) {
if (message startsWith "Try to use a single 'var' statement per scope.") return;
if (line < 0) System.err.println("\\n[WARNING] " + message);
else System.err.println("\\n[WARNING] " + line + ':' + lineOffset + ':' + message);
}
def error(message:String, sourceName:String, line:Int, lineSource:String, lineOffset:Int) {
if (line < 0) System.err.println("\\n[ERROR] " + message);
else System.err.println("\\n[ERROR] " + line + ':' + lineOffset + ':' + message);
java.lang.System.exit(1);
}
def runtimeError(message:String, sourceName:String, line:Int, lineSource:String, lineOffset:Int): EvaluatorException = {
error(message, sourceName, line, lineSource, lineOffset);
return new EvaluatorException(message);
}
}
val munge = true;
val verbose = false;
val optimize = true;
val wrap = true;
val compressor = new com.yahoo.platform.yui.compressor.JavaScriptCompressor(new StringReader(code), MyErrorReporter);
val writer = new StringWriter;
compressor.compress(writer, if (wrap) 100 else -1, munge, verbose, true, optimize);
writer.toString;
}
def main(args: Array[String]) {
for (fname <- args) {
try {
val src = BetterFile.getFileContents(fname);
val obfSrc = compress(src);
val fw = (new java.io.FileWriter(new java.io.File(fname)));
fw.write(obfSrc, 0, obfSrc.length);
fw.close();
} catch {
case e => {
println("Failed to compress: "+fname+". Quitting.");
e.printStackTrace();
System.exit(1);
}
}
}
}
}
// ignore these:
// import java.io._;
// def doMake {
// lazy val isEtherPad = (args.length >= 2 && args(1) == "etherpad");
// lazy val isNoHelma = (args.length >= 2 && args(1) == "nohelma");
// def getFile(path:String): String = {
// val builder = new StringBuilder(1000);
// val reader = new BufferedReader(new FileReader(path));
// val buf = new Array[Char](1024);
// var numRead = 0;
// while({ numRead = reader.read(buf); numRead } != -1) {
// builder.append(buf, 0, numRead);
// }
// reader.close;
// return builder.toString;
// }
// def putFile(str: String, path: String): Unit = {
// val writer = new FileWriter(path);
// writer.write(str);
// writer.close;
// }
// def writeToString(func:(Writer=>Unit)): String = {
// val writer = new StringWriter;
// func(writer);
// return writer.toString;
// }
// def compressJS(code: String, wrap: Boolean): String = {
// import org.mozilla.javascript.{ErrorReporter, EvaluatorException};
// object MyErrorReporter extends ErrorReporter {
// def warning(message:String, sourceName:String, line:Int, lineSource:String, lineOffset:Int) {
// if (message startsWith "Try to use a single 'var' statement per scope.") return;
// if (line < 0) System.err.println("\\n[WARNING] " + message);
// else System.err.println("\\n[WARNING] " + line + ':' + lineOffset + ':' + message);
// }
// def error(message:String, sourceName:String, line:Int, lineSource:String, lineOffset:Int) {
// if (line < 0) System.err.println("\\n[ERROR] " + message);
// else System.err.println("\\n[ERROR] " + line + ':' + lineOffset + ':' + message);
// }
// def runtimeError(message:String, sourceName:String, line:Int, lineSource:String, lineOffset:Int): EvaluatorException = {
// error(message, sourceName, line, lineSource, lineOffset);
// return new EvaluatorException(message);
// }
// }
// val munge = true;
// val verbose = false;
// val optimize = true;
// val compressor = new com.yahoo.platform.yui.compressor.JavaScriptCompressor(new StringReader(code), MyErrorReporter);
// return writeToString(compressor.compress(_, if (wrap) 100 else -1, munge, verbose, true, !optimize));
// }
// def compressCSS(code: String, wrap: Boolean): String = {
// val compressor = new com.yahoo.platform.yui.compressor.CssCompressor(new StringReader(code));
// return writeToString(compressor.compress(_, if (wrap) 100 else -1));
// }
// import java.util.regex.{Pattern, Matcher, MatchResult};
// def stringReplace(orig: String, regex: String, groupReferences:Boolean, func:(MatchResult=>String)): String = {
// val buf = new StringBuffer;
// val m = Pattern.compile(regex).matcher(orig);
// while (m.find) {
// var str = func(m);
// if (! groupReferences) {
// str = str.replace("\\\\", "\\\\\\\\").replace("$", "\\\\$");
// }
// m.appendReplacement(buf, str);
// }
// m.appendTail(buf);
// return buf.toString;
// }
// def stringToExpression(str: String): String = {
// val contents = str.replace("\\\\", "\\\\\\\\").replace("'", "\\\\'").replace("<", "\\\\x3c").replace("\\n", "\\\\n").
// replace("\\r", "\\\\n").replace("\\t", "\\\\t");
// return "'"+contents+"'";
// }
// val srcDir = "www";
// val destDir = "build";
// var code = getFile(srcDir+"/ace2_outer.js");
// val useCompression = true; //if (isEtherPad) false else true;
// code = stringReplace(code, "\\\\$\\\\$INCLUDE_([A-Z_]+)\\\\([\\"']([^\\"']+)[\\"']\\\\)", false, (m:MatchResult) => {
// val includeType = m.group(1);
// val path = m.group(2);
// includeType match {
// case "JS" => {
// var subcode = getFile(srcDir+"/"+path);
// subcode = subcode.replaceAll("var DEBUG=true;//\\\\$\\\\$[^\\n\\r]*", "var DEBUG=false;");
// if (useCompression) subcode = compressJS(subcode, false);
// "('<script type=\\"text/javascript\\">//<!--\\\\n'+" + stringToExpression(subcode) +
// "+'//-->\\\\n</script>')";
// }
// case "CSS" => {
// var subcode = getFile(srcDir+"/"+path);
// if (useCompression) subcode = compressCSS(subcode, false);
// "('<style type=\\"text/css\\">'+" + stringToExpression(subcode) + "+'</style>')";
// }
// case "JS_Q" => {
// var subcode = getFile(srcDir+"/"+path);
// subcode = subcode.replaceAll("var DEBUG=true;//\\\\$\\\\$[^\\n\\r]*", "var DEBUG=false;");
// if (useCompression) subcode = compressJS(subcode, false);
// "('(\\\\'<script type=\\"text/javascript\\">//<!--\\\\\\\\n\\\\'+'+" +
// stringToExpression(stringToExpression(subcode)) +
// "+'+\\\\'//-->\\\\\\\\n\\\\\\\\x3c/script>\\\\')')";
// }
// case "CSS_Q" => {
// var subcode = getFile(srcDir+"/"+path);
// if (useCompression) subcode = compressCSS(subcode, false);
// "('(\\\\'<style type=\\"text/css\\">\\\\'+'+" + stringToExpression(stringToExpression(subcode)) +
// "+'+\\\\'\\\\\\\\x3c/style>\\\\')')";
// }
// case ("JS_DEV" | "CSS_DEV") => "''";
// case ("JS_Q_DEV" | "CSS_Q_DEV") => "'\\\\'\\\\''";
// case _ => "$$INCLUDE_"+includeType+"(\\"../www/"+path+"\\")";
// }
// });
// if (useCompression) code = compressJS(code, true);
// putFile(code, destDir+"/ace2bare.js");
// var wrapper = getFile(srcDir+"/ace2_wrapper.js");
// if (useCompression) wrapper = compressJS(wrapper, true);
// putFile(wrapper+"\\n"+code, destDir+"/ace2.js");
// var index = getFile(srcDir+"/index.html");
// index = index.replaceAll("<!--\\\\s*DEBUG\\\\s*-->\\\\s*([\\\\s\\\\S]+?)\\\\s*<!--\\\\s*/DEBUG\\\\s*-->", "");
// index = index.replaceAll("<!--\\\\s*PROD:\\\\s*([\\\\s\\\\S]+?)\\\\s*-->", "$1");
// putFile(index, destDir+"/index.html");
// putFile(getFile(srcDir+"/testcode.js"), destDir+"/testcode.js");
// def copyFile(fromFile: String, toFile: String) {
// if (0 != Runtime.getRuntime.exec("cp "+fromFile+" "+toFile).waitFor) {
// printf("copy failed (%s -> %s).\\n", fromFile, toFile);
// }
// }
// if (isEtherPad) {
// copyFile("build/ace2.js", "../../../etherpad/src/static/js/ace.js");
// val easysync = getFile(srcDir+"/easy_sync.js");
// putFile(easysync, "../../../etherpad/src/etherpad/collab/easysync.js");
// }
// else if (! isNoHelma) {
// copyFile("build/ace2.js", "../helma_apps/appjet/protectedStatic/js/ace.js");
// }
// }
// def remakeLoop {
// def getStamp: Long = {
// return new java.io.File("www").listFiles.
// filter(! _.getName.endsWith("~")).
// filter(! _.getName.endsWith("#")).
// filter(! _.getName.startsWith(".")).map(_.lastModified).
// reduceLeft(Math.max(_:Long,_:Long));
// }
// var madeStamp:Long = 0;
// var errorStamp:Long = 0;
// while (true) {
// Thread.sleep(500);
// val s = getStamp;
// if (s > madeStamp && s != errorStamp) {
// Thread.sleep(1000);
// if (getStamp == s) {
// madeStamp = s;
// print("Remaking... ");
// try {
// doMake;
// println("OK");
// }
// catch { case e => {
// println("ERROR");
// errorStamp = s;
// } }
// }
// }
// }
// }
// if (args.length >= 1 && args(0) == "auto") {
// remakeLoop;
// }
// else {
// doMake;
// }
|
railscook/etherpad
|
infrastructure/net.appjet.bodylock/compressor.scala
|
Scala
|
apache-2.0
| 9,723 |
package eu.inn.binders.value
import java.util.Date
import eu.inn.binders.core.Serializer
import eu.inn.binders.naming.Converter
import scala.language.experimental.macros
class ValueSerializeException(message: String) extends RuntimeException(message)
trait ValueSerializerBaseTrait[C <: Converter] extends Serializer[C] {
def asValue: Value
}
class ValueSerializerBase[C <: Converter, F <: ValueSerializerBaseTrait[C]] extends ValueSerializerBaseTrait[C]{
protected var value: Value = null
protected var map: scala.collection.mutable.Map[String, ValueSerializerBaseTrait[C]] = null
protected var seq: scala.collection.mutable.ArrayBuffer[Value] = null
def getFieldSerializer(fieldName: String): Option[F] = {
if (map == null) {
throw new ValueSerializeException("Can't get field serializer for nonmap: "+ fieldName)
}
val f = createFieldSerializer()
map += fieldName -> f
Some(f)
}
protected def createFieldSerializer(): F = ???
def writeNull() = writeDynamicObject(Null)
def writeString(value: String) = if(value == null) writeNull() else writeDynamicObject(Text(value))
def writeBoolean(value: Boolean) = writeDynamicObject(Bool(value))
def writeBigDecimal(value: BigDecimal) = if(value == null) writeNull() else writeDynamicObject(Number(value))
def writeInt(value: Int) = writeDynamicObject(Number(value))
def writeLong(value: Long) = writeDynamicObject(Number(value))
def writeFloat(value: Float) = writeDynamicObject(Number(BigDecimal(value)))
def writeDouble(value: Double) = writeDynamicObject(Number(value))
def writeDate(value: Date) = if(value == null) writeNull() else writeDynamicObject(Number(value.getTime))
def writeDynamicObject(value: Value): Unit = {
if (seq != null)
seq += value
else
this.value = value
}
def beginObject(): Unit = {
map = new scala.collection.mutable.HashMap[String, ValueSerializerBaseTrait[C]]()
}
def endObject(): Unit = {
value = Obj(map.toMap.map(kv => (kv._1, kv._2.asValue)))
map = null
}
def beginArray(): Unit = {
seq = new scala.collection.mutable.ArrayBuffer[Value]()
}
def endArray(): Unit = {
value = Lst(seq)
seq = null
}
def asValue: Value = value
}
class ValueSerializer[C <: Converter] extends ValueSerializerBase[C, ValueSerializer[C]]{
protected override def createFieldSerializer(): ValueSerializer[C] = new ValueSerializer[C]()
}
|
InnovaCo/binders
|
src/main/scala/eu/inn/binders/value/ValueSerializer.scala
|
Scala
|
bsd-3-clause
| 2,432 |
package com.classification
import io.prediction.controller.EngineFactory
import io.prediction.controller.Engine
class Query(
val attr0 : Double,
val attr1 : Double,
val attr2 : Double
) extends Serializable
class PredictedResult(
val label: Double
) extends Serializable
class ActualResult(
val label: Double
) extends Serializable
object ClassificationEngine extends EngineFactory {
def apply() = {
new Engine(
classOf[DataSource],
classOf[Preparator],
Map("naive" -> classOf[NaiveBayesAlgorithm]),
classOf[Serving])
}
}
|
PredictionIO/open-academy
|
AminManna/MyClassification/src/main/scala/Engine.scala
|
Scala
|
apache-2.0
| 568 |
package com.rasterfoundry
import com.rasterfoundry.database.filter.Filterables
import com.rasterfoundry.database.meta.RFMeta
import com.rasterfoundry.datamodel.Credential
import com.rasterfoundry.datamodel.ExportAssetType
import cats.data.NonEmptyList
import doobie.Get
import doobie.Meta
import doobie.Put
package object database {
object Implicits extends RFMeta with Filterables {
def fromString(s: String): Credential = {
Credential.apply(Some(s))
}
def toString(c: Credential): String = {
c.token match {
case Some(s) => s
case _ => ""
}
}
implicit val credMeta2: Meta[Credential] =
Meta[String].timap(fromString)(toString)
implicit def nelExportAssetTypesGet(implicit ev: Get[List[String]]) =
ev.map { (list: List[String]) =>
NonEmptyList.fromListUnsafe(list.map {
ExportAssetType.unsafeFromString(_)
})
}
implicit def nelExportAssetTypesPut(implicit ev: Put[List[String]]) =
ev.contramap { (nel: NonEmptyList[ExportAssetType]) =>
nel.toList.map { _.toString }
}
}
}
|
raster-foundry/raster-foundry
|
app-backend/db/src/main/scala/package.scala
|
Scala
|
apache-2.0
| 1,118 |
package com.argcv.valhalla.ml.common
//import spire.syntax.std.{ArrayOps => SArrayOps}
import spire.implicits.cfor
import scala.collection.{ AbstractSeq, Iterable, Seq, Traversable, TraversableOnce, immutable, mutable }
import scala.reflect.ClassTag
/**
*
* @author Yu Jing <[email protected]> on 10/9/16
*/
@SerialVersionUID(1457976278L)
case class Vec(value: Array[Double]) extends Serializable {
//lazy val ops = new SArrayOps[Double](value)
lazy val sum: Double = sum(0, length)
lazy val avg: Double = avg(0, length)
lazy val min: (Int, Double) = min(0, length)
lazy val max: (Int, Double) = max(0, length)
lazy val variance: Double = variance(0, length)
lazy val standardDeviation: Double = standardDeviation(0, length)
lazy val sd: Double = standardDeviation
def min(start: Int, end: Int): (Int, Double) = {
val rs = 0 max start
val re = length min end
var o = rs
var v = value(o)
cfor(rs + 1)(_ < re, _ + 1) { i =>
if (value(i) < v) {
o = i
v = value(i)
}
}
(o, v)
}
def max(start: Int, end: Int): (Int, Double) = {
val rs = 0 max start
val re = length min end
var o = rs
var v = value(o)
cfor(rs + 1)(_ < re, _ + 1) { i =>
if (value(i) > v) {
o = i
v = value(i)
}
}
(o, v)
}
def standardDeviation(start: Int, end: Int): Double = {
Math.sqrt(variance(start, end))
}
def variance(start: Int, end: Int): Double = {
val rs = 0 max start
val re = length min end
val avg = this.avg(rs, re)
var result: Double = 0.0
cfor(rs)(_ < re, _ + 1) { i =>
result += Math.pow(value(i) - avg, 2)
}
result
}
def avg(start: Int, end: Int): Double = {
if (length == 0) 0.0
else sum(start, end) / length
}
def sum(start: Int, end: Int): Double = {
val rs = 0 max start
val re = length min end
var result: Double = 0.0
cfor(rs)(_ < re, _ + 1) { i =>
result += value(i)
}
result
}
def length = value.length
def size = value.length
def map(f: Double => Double) = Vec(value.map(f))
def zipWithIndex: Array[(Double, Int)] = value.zipWithIndex
def foreach[U](f: Double => U) = value.foreach(f)
def hasDefiniteSize: Boolean = true
def exists(p: Double => Boolean): Boolean = value.exists(p)
def find(p: Double => Boolean): Option[Double] = value.find(p)
def nonEmpty: Boolean = !isEmpty
def count(p: Double => Boolean): Int = value.count(p)
def collectFirst[B](pf: PartialFunction[Double, B]): Option[B] = value.collectFirst[B](pf)
def /:[B](z: B)(op: (B, Double) => B): B = foldLeft(z)(op)
def :\\[B](z: B)(op: (Double, B) => B): B = foldRight(z)(op)
def foldRight[B](z: B)(op: (Double, B) => B): B =
value.foldRight[B](z)(op)
def reduceRightOption[B >: Double](op: (Double, B) => B): Option[B] = if (isEmpty) None else Some(reduceRight(op))
def reduceRight[B >: Double](op: (Double, B) => B): B = value.reduceRight[B](op)
def isEmpty: Boolean = value.isEmpty
def reduce[A1 >: Double](op: (A1, A1) => A1): A1 = reduceLeft(op)
def reduceLeft[B >: Double](op: (B, Double) => B): B =
value.reduceLeft[B](op)
def reduceOption[A1 >: Double](op: (A1, A1) => A1): Option[A1] =
reduceLeftOption(op)
def reduceLeftOption[B >: Double](op: (B, Double) => B): Option[B] =
if (isEmpty) None else Some(reduceLeft(op))
def fold[A1 >: Double](z: A1)(op: (A1, A1) => A1): A1 = foldLeft(z)(op)
def foldLeft[B](z: B)(op: (B, Double) => B): B = {
var result = z
value foreach (x => result = op(result, x))
result
}
def aggregate[B](z: => B)(seqop: (B, Double) => B, combop: (B, B) => B): B =
foldLeft(z)(seqop)
def seq: TraversableOnce[Double] = value.seq
def toArray[B >: Double: ClassTag]: Array[B] = value.toArray[B]
def toBuffer[B >: Double]: mutable.Buffer[B] = value.toBuffer[B]
def toTraversable: Traversable[Double] = value.toTraversable
def toList: List[Double] = value.toList
def toIterable: Iterable[Double] = value.toIterable
def toSeq: Seq[Double] = value.toSeq
def toIndexedSeq: immutable.IndexedSeq[Double] = value.toIndexedSeq
def toSet[B >: Double]: immutable.Set[B] = value.toSet
def toVector: scala.Vector[Double] = value.toVector
def toMap[T, U](implicit ev: Double <:< (T, U)): immutable.Map[T, U] = value.toMap[T, U](ev)
def mkString: String = mkString("")
override def toString = s"[${mkString(", ")}]"
def mkString(sep: String): String = mkString("", sep, "")
def mkString(start: String, sep: String, end: String): String = value.mkString(start, sep, end)
def apply(q: Int) = value(q)
def toSVMString = {
val buffer = new StringBuffer()
val re = length
cfor(0)(_ < re, _ + 1) { i =>
if (value(i) != 0) {
buffer.append(s"${i + 1}:${value(i)} ")
}
}
buffer.toString.trim
}
}
object Vec {
//def apply(value: Array[Double]): Vec = new Vec(value)
def apply(value: AbstractSeq[Double]): Vec = new Vec(value.toArray)
}
|
yuikns/valhalla
|
src/main/scala/com/argcv/valhalla/ml/common/Vec.scala
|
Scala
|
mit
| 5,038 |
package mesosphere.marathon.api.v2
import mesosphere.marathon.api.TestAuthFixture
import mesosphere.marathon.core.group.GroupManager
import mesosphere.marathon.state.{ AppDefinition, Group, PathId }
import mesosphere.marathon.test.{ MarathonSpec, Mockito }
import mesosphere.marathon.upgrade.DeploymentManager.DeploymentStepInfo
import mesosphere.marathon.upgrade.{ DeploymentPlan, DeploymentStep }
import mesosphere.marathon.{ MarathonConf, MarathonSchedulerService }
import org.scalatest.{ GivenWhenThen, Matchers }
import scala.concurrent.Future
import scala.collection.immutable.Seq
class DeploymentsResourceTest extends MarathonSpec with GivenWhenThen with Matchers with Mockito {
test("access without authentication is denied") {
Given("An unauthenticated request")
auth.authenticated = false
val req = auth.request
val app = AppDefinition(PathId("/test"))
val targetGroup = Group.empty.copy(apps = Map(app.id -> app))
val deployment = DeploymentStepInfo(DeploymentPlan(Group.empty, targetGroup), DeploymentStep(Seq.empty), 1)
service.listRunningDeployments() returns Future.successful(Seq(deployment))
When("the index is fetched")
val running = deploymentsResource.running(req)
Then("we receive a NotAuthenticated response")
running.getStatus should be(auth.NotAuthenticatedStatus)
When("one app version is fetched")
val cancel = deploymentsResource.cancel(deployment.plan.id, false, req)
Then("we receive a NotAuthenticated response")
cancel.getStatus should be(auth.NotAuthenticatedStatus)
}
test("access without authorization is denied") {
Given("An unauthorized request")
auth.authenticated = true
auth.authorized = false
val req = auth.request
val app = AppDefinition(PathId("/test"))
val targetGroup = Group.empty.copy(apps = Map(app.id -> app))
val deployment = DeploymentStepInfo(DeploymentPlan(Group.empty, targetGroup), DeploymentStep(Seq.empty), 1)
service.listRunningDeployments() returns Future.successful(Seq(deployment))
When("one app version is fetched")
val cancel = deploymentsResource.cancel(deployment.plan.id, false, req)
Then("we receive a not authorized response")
cancel.getStatus should be(auth.UnauthorizedStatus)
}
var service: MarathonSchedulerService = _
var groupManager: GroupManager = _
var config: MarathonConf = _
var deploymentsResource: DeploymentsResource = _
var auth: TestAuthFixture = _
before {
auth = new TestAuthFixture
groupManager = mock[GroupManager]
config = mock[MarathonConf]
service = mock[MarathonSchedulerService]
deploymentsResource = new DeploymentsResource(service, groupManager, auth.auth, auth.auth, config)
}
}
|
timcharper/marathon
|
src/test/scala/mesosphere/marathon/api/v2/DeploymentsResourceTest.scala
|
Scala
|
apache-2.0
| 2,736 |
package instrumentti
import processing.core.PVector
object NodeO {
val nodeDisplayRadius = 10
}
class Node(var location: PVector) extends Element(ElementCollection.getNextNodeId) {
ElementCollection.addNode(this)
val main = InstrumenttiMain
def display = {
main.stroke(0)
main.noFill()
main.ellipse(location, NodeO.nodeDisplayRadius)
}
def displaySelected = {
main.fill(255, 0, 0, 75);
main.noStroke();
main.ellipse(location, NodeO.nodeDisplayRadius + 5);
}
def displayTempConnection = {
main.stroke(255, 0, 0);
main.line(location, InstrumenttiMain.mousePos());
main.noFill();
main.ellipse(location, ElementCreator.defaultLinkLength);
}
def dist(other: Node): Float = dist(other.location)
def dist(other: PVector): Float = location.dist(other)
}
|
transfluxus/PublicInstrumentti
|
src/instrumentti/Node.scala
|
Scala
|
mit
| 820 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.plugin.security
import java.io.Serializable
import java.util.{Map => jMap}
import org.locationtech.geomesa.utils.audit.AuditProvider
import org.springframework.security.core.Authentication
import org.springframework.security.core.context.SecurityContextHolder
import org.springframework.security.core.userdetails.UserDetails
import scala.collection.JavaConverters._
class SpringAuditProvider extends AuditProvider {
override def getCurrentUserId: String = {
val principal = getAuth.flatMap(a => Option(a.getPrincipal)).getOrElse("unknown")
principal match {
case p: UserDetails => p.getUsername
case p => p.toString
}
}
override def getCurrentUserDetails: jMap[AnyRef, AnyRef] = {
getAuth match {
case None => Map.empty[AnyRef, AnyRef].asJava
case Some(auth) =>
Map[AnyRef, AnyRef](
SpringAuditProvider.AUTHORITIES -> auth.getAuthorities,
SpringAuditProvider.DETAILS -> auth.getDetails,
SpringAuditProvider.CREDENTIALS -> auth.getCredentials,
SpringAuditProvider.AUTHENTICATED -> auth.isAuthenticated.asInstanceOf[AnyRef]
).asJava
}
}
override def configure(params: jMap[String, Serializable]): Unit = {}
private def getAuth: Option[Authentication] =
Option(SecurityContextHolder.getContext).flatMap(c => Option(c.getAuthentication))
}
object SpringAuditProvider {
val AUTHORITIES = "authorities"
val DETAILS = "details"
val CREDENTIALS = "credentials"
val AUTHENTICATED = "authenticated"
}
|
tkunicki/geomesa
|
geomesa-accumulo/geomesa-accumulo-gs-plugin/src/main/scala/org/locationtech/geomesa/plugin/security/SpringAuditProvider.scala
|
Scala
|
apache-2.0
| 2,037 |
import sbt._
import Keys._
import play.Project._
object ApplicationBuild extends Build {
val appName = "optionometer"
val appVersion = "1.0.1"
val appDependencies = Seq(
jdbc,
anorm,
"postgresql" % "postgresql" % "9.1-901.jdbc4",
"com.github.nscala-time" %% "nscala-time" % "0.6.0"
)
val main = play.Project(appName, appVersion, appDependencies).settings(
scalaVersion := "2.10.2"
)
}
|
Exupery/optionometer
|
project/Build.scala
|
Scala
|
mit
| 436 |
package com.twitter.finagle.netty3.codec
import com.twitter.finagle.Failure
import com.twitter.io.{Buf, Charsets}
import org.jboss.netty.buffer.{ChannelBuffer, ChannelBuffers}
import org.jboss.netty.handler.codec.embedder.{CodecEmbedderException, DecoderEmbedder, EncoderEmbedder}
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class BufCodecTest extends FunSuite {
test("decode") {
val de = new DecoderEmbedder[Buf](new BufCodec)
de.offer(ChannelBuffers.wrappedBuffer("hello".getBytes(Charsets.Utf8)))
assert(de.size() == 1)
assert(de.poll() == Buf.Utf8("hello"))
assert(de.size() == 0)
val exc = intercept[CodecEmbedderException] { de.offer(new Object) }
assert(exc.getCause.isInstanceOf[Failure])
}
test("encode") {
val ee = new EncoderEmbedder[ChannelBuffer](new BufCodec)
ee.offer(Buf.Utf8("hello"))
assert(ee.size == 1)
val cb = ee.poll()
assert(cb.toString(Charsets.Utf8) == "hello")
assert(ee.size == 0)
val cf = ee.getPipeline.getChannel.write(new Object)
assert(cf.getCause.isInstanceOf[Failure])
}
}
|
adriancole/finagle
|
finagle-core/src/test/scala/com/twitter/finagle/netty3/codec/BufCodecTest.scala
|
Scala
|
apache-2.0
| 1,169 |
package io.sqooba.oss.timeseries.immutable
import io.sqooba.oss.timeseries.TimeSeriesTestBench
import org.scalatest.matchers.should.Matchers
import org.scalatest.flatspec.AnyFlatSpec
class VectorTimeSeriesSpec extends AnyFlatSpec with Matchers with TimeSeriesTestBench {
"A VectorTimeSeries (unsafe)" should behave like nonEmptyNonSingletonDoubleTimeSeries(
VectorTimeSeries.ofOrderedEntriesUnsafe(_)
)
it should behave like nonEmptyNonSingletonDoubleTimeSeriesWithCompression(
VectorTimeSeries.ofOrderedEntriesUnsafe(_)
)
it should behave like nonEmptyNonSingletonGenericTimeSeries(
VectorTimeSeries.ofOrderedEntriesUnsafe(_)
)
"A VectorTimeSeries (safe)" should behave like nonEmptyNonSingletonDoubleTimeSeries(
VectorTimeSeries.ofOrderedEntriesSafe(_)
)
it should behave like nonEmptyNonSingletonDoubleTimeSeriesWithCompression(
VectorTimeSeries.ofOrderedEntriesSafe(_)
)
it should behave like nonEmptyNonSingletonGenericTimeSeries(
VectorTimeSeries.ofOrderedEntriesSafe(_)
)
"trimRight" should "correctly keep the compressed and continuous flags on continuous entries" in {
// Two contiguous entries
// isContinuous = true, isCompressed = true
val contig2 = VectorTimeSeries.ofOrderedEntriesSafe(Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 10)))
// Two contiguous entries:
assert(contig2.trimRight(22).isCompressed)
assert(contig2.trimRight(22).isDomainContinuous)
// On the second entry
assert(contig2.trimRight(20).isCompressed)
assert(contig2.trimRight(20).isDomainContinuous)
// On the boundary
assert(contig2.trimRight(11).isCompressed)
assert(contig2.trimRight(11).isDomainContinuous)
// On the first entry
assert(contig2.trimRight(10).isCompressed)
assert(contig2.trimRight(10).isDomainContinuous)
assert(contig2.trimRight(2).isCompressed)
assert(contig2.trimRight(2).isDomainContinuous)
// Before the first entry
// We expect the timeseries to be Empty, which is NOT compressed and NOT continuous
assert(contig2.trimRight(1).isCompressed === false)
assert(contig2.trimRight(0).isDomainContinuous === false)
}
it should "correctly keep the compressed and continuous flags on discontinuous entries" in {
// Two entries with a gap in between
// isContinuous = false, isCompressed = true
val discon2 = VectorTimeSeries.ofOrderedEntriesSafe(Seq(TSEntry(1, 111d, 10), TSEntry(12, 222d, 10)))
assert(discon2.isDomainContinuous === false)
// Right of the domain
assert(discon2.trimRight(22).isCompressed)
assert(discon2.trimRight(22).isDomainContinuous === false)
// On the second entry
assert(discon2.trimRight(20).isCompressed)
assert(discon2.trimRight(20).isDomainContinuous === false)
// On the boundary
assert(discon2.trimRight(11).isCompressed)
// This should now be true because we removed the right part
assert(discon2.trimRight(11).isDomainContinuous)
// On the first entry
assert(discon2.trimRight(10).isCompressed)
assert(discon2.trimRight(10).isDomainContinuous)
assert(discon2.trimRight(2).isCompressed)
assert(discon2.trimRight(2).isDomainContinuous)
// Before the first entry
// We expect the timeseries to be Empty, which is NOT compressed and NOT continuous
assert(discon2.trimRight(1).isCompressed === false)
assert(discon2.trimRight(0).isDomainContinuous === false)
}
"trimLeft" should "correctly keep the compressed and continuous flags on contiguous entries" in {
// Two contiguous entries
// isContinuous = true, isCompressed = true
val contig2 = VectorTimeSeries.ofOrderedEntriesSafe(Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 10)))
// Two contiguous entries:
// Left of the domain (that should not change the timeseries):
assert(contig2.trimLeft(0).isCompressed)
assert(contig2.trimLeft(0).isDomainContinuous)
// On the second entry
assert(contig2.trimLeft(20).isCompressed)
assert(contig2.trimLeft(20).isDomainContinuous)
// On the boundary
assert(contig2.trimLeft(11).isCompressed)
assert(contig2.trimLeft(11).isDomainContinuous)
// On the first entry
assert(contig2.trimLeft(10).isCompressed)
assert(contig2.trimLeft(10).isDomainContinuous)
assert(contig2.trimLeft(2).isCompressed)
assert(contig2.trimLeft(2).isDomainContinuous)
}
it should "correctly keep the compressed and continuous flags on noncontiguous entries" in {
// Two entries with a gap in between
// isContinuous = false, isCompressed = true
val discon2 = VectorTimeSeries.ofOrderedEntriesSafe(Seq(TSEntry(1, 111d, 10), TSEntry(12, 222d, 10)))
assert(discon2.isDomainContinuous === false)
// Left of the domain (that should not change the timeseries):
assert(discon2.trimLeft(0).isCompressed)
assert(discon2.trimLeft(0).isDomainContinuous === false)
// On the second entry
assert(discon2.trimLeft(20).isCompressed)
assert(discon2.trimLeft(20).isDomainContinuous)
// On the boundary
// Discontinuous entries
assert(discon2.trimLeft(11).isCompressed)
// This should now be true because we removed the left part
assert(discon2.trimLeft(11).isDomainContinuous)
// On the first entry
// This only remove the left part, so we still have the gap in between the first and the second TSEntry
assert(discon2.trimLeft(10).isCompressed)
assert(discon2.trimLeft(10).isDomainContinuous === false)
assert(discon2.trimLeft(2).isCompressed)
assert(discon2.trimLeft(2).isDomainContinuous === false)
}
"trimRightDiscrete" should "correctly keep the compressed and continuous flags on continuous entries" in {
// Two contiguous entries
// isContinuous = true, isCompressed = true
val contig2 = VectorTimeSeries.ofOrderedEntriesSafe(Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 10)))
// Right of the domain:
assert(contig2.trimRightDiscrete(22, true).isCompressed)
assert(contig2.trimRightDiscrete(22, false).isDomainContinuous)
// On the second entry
assert(contig2.trimRightDiscrete(20, true).isCompressed)
assert(contig2.trimRightDiscrete(20, true).isDomainContinuous)
// On the boundary
assert(contig2.trimRightDiscrete(11, true).isCompressed)
assert(contig2.trimRightDiscrete(11, false).isDomainContinuous)
// On the first entry
assert(contig2.trimRightDiscrete(10, true).isCompressed)
assert(contig2.trimRightDiscrete(10, true).isDomainContinuous)
}
it should "correctly keep the compressed and continuous flags on discontinuous entries" in {
// Two contiguous entries
// isContinuous = false, isCompressed = true
val discon2 = VectorTimeSeries.ofOrderedEntriesSafe(Seq(TSEntry(1, 111d, 10), TSEntry(12, 222d, 10)))
assert(discon2.isDomainContinuous === false)
// Right of the domain:
assert(discon2.trimRightDiscrete(22, true).isCompressed)
assert(discon2.trimRightDiscrete(22, false).isDomainContinuous === false)
// On the second entry
assert(discon2.trimRightDiscrete(20, true).isCompressed)
assert(discon2.trimRightDiscrete(20, true).isDomainContinuous === false)
// On the boundary
assert(discon2.trimRightDiscrete(11, true).isCompressed)
assert(discon2.trimRightDiscrete(11, false).isDomainContinuous)
// On the first entry
assert(discon2.trimRightDiscrete(10, true).isCompressed)
assert(discon2.trimRightDiscrete(10, true).isDomainContinuous)
}
"trimLeftDiscret" should "correctly keep the compressed and continuous flags on contiguous entries" in {
// Two contiguous entries
// isContinuous = true, isCompressed = true
val contig2 = VectorTimeSeries.ofOrderedEntriesSafe(Seq(TSEntry(1, 111d, 10), TSEntry(11, 222d, 10)))
// Two contiguous entries:
// Left of the domain (that should not change the timeseries):
assert(contig2.trimLeftDiscrete(0).isCompressed)
assert(contig2.trimLeftDiscrete(0).isDomainContinuous)
// On the second entry
assert(contig2.trimLeftDiscrete(20).isCompressed)
assert(contig2.trimLeftDiscrete(20).isDomainContinuous)
// On the boundary
assert(contig2.trimLeftDiscrete(11).isCompressed)
assert(contig2.trimLeftDiscrete(11).isDomainContinuous)
// On the first entry
assert(contig2.trimLeftDiscrete(10).isCompressed)
assert(contig2.trimLeftDiscrete(10).isDomainContinuous)
assert(contig2.trimLeftDiscrete(2).isCompressed)
assert(contig2.trimLeftDiscrete(2).isDomainContinuous)
}
it should "correctly keep the compressed and continuous flags on noncontiguous entries" in {
// Two entries with a gap in between
// isContinuous = false, isCompressed = true
val discon2 = VectorTimeSeries.ofOrderedEntriesSafe(Seq(TSEntry(1, 111d, 10), TSEntry(12, 222d, 10)))
assert(discon2.isDomainContinuous === false)
// Left of the domain (that should not change the timeseries):
assert(discon2.trimLeftDiscrete(0).isCompressed)
assert(discon2.trimLeftDiscrete(0).isDomainContinuous === false)
// On the second entry
assert(discon2.trimLeftDiscrete(20).isCompressed)
assert(discon2.trimLeftDiscrete(20).isDomainContinuous)
// On the boundary
// Discontinuous entries
assert(discon2.trimLeftDiscrete(11).isCompressed)
// This should now be true because we removed the left part
assert(discon2.trimLeftDiscrete(11).isDomainContinuous)
// On the first entry
// This only remove the left part, so we still have the gap in between the first and the second TSEntry
assert(discon2.trimLeftDiscrete(10).isCompressed)
assert(discon2.trimLeftDiscrete(10).isDomainContinuous === false)
assert(discon2.trimLeftDiscrete(2).isCompressed)
assert(discon2.trimLeftDiscrete(2).isDomainContinuous === false)
}
/*
TODO: This test is not passing yet
Here we have three entries, the first two are continuous and the last one is not. When trimming right at the boundary between the
second and the third entry, the result SHOULD BE continuous (because we removed the gap)
This is currently not the case because we use the `VectorTimeSeries` constructor in trimRight (or the ofOrderedEntriesUnsafe) which
does not recompute the domaine.
I (@gui) don't know what would be the performances implications of using the `ofOrderedEntriesSafe` or having an helper method that could
determine if a list of entries in a VectorTimeSeries is continuous or not (this should be linear and we could improve performances with an
early exit in case a gap is found).
The problem is exactly the same with trimLeft (but with a gap between the first and the second entry, and the second and third entries beeing continuous)
*/
/* it should "correctly keep the compressed and continuous flags on discontinuous entries" in {
// Two entries with a gap in between
// isContinuous = false, isCompressed = true
val discon2 = VectorTimeSeries.ofOrderedEntriesSafe(Seq(TSEntry(1, 000d, 7), TSEntry(8, 111d, 3), TSEntry(12, 222d, 10)))
assert(discon2.isDomainContinuous === false)
// On the second boundary
assert(discon2.trimRight(11).isCompressed)
// This should now be true because we removed the right part
assert(discon2.trimRight(11).isDomainContinuous)
} */
}
|
Shastick/scala-timeseries-lib
|
src/test/scala/io/sqooba/oss/timeseries/immutable/VectorTimeSeriesSpec.scala
|
Scala
|
apache-2.0
| 11,363 |
package edu.gemini.p2checker.util
import edu.gemini.p2checker.api.ObservationElements
import edu.gemini.spModel.target.offset.OffsetPosBase
import edu.gemini.spModel.target.offset.OffsetPosList
import edu.gemini.spModel.target.offset.OffsetUtil
import scala.collection.JavaConverters._
/**
* Created with IntelliJ IDEA.
* User: sraaphor
* Date: 4/11/14
* Time: 11:59 AM
* To change this template use File | Settings | File Templates.
*/
object PositionOffsetChecker {
val PROBLEM_CODE = "LGS_MAX_DIST"
val PROBLEM_MESSAGE = "The maximum offset when using LGS is 5 arcminutes. Please reduce the size of the offset or make a separate observation."
def hasBadOffsets(elements: ObservationElements): Boolean = {
// Note that the bizarre cast is required here in order to make the Array type match OffsetPostList[] in Java.
val lists = OffsetUtil.allOffsetPosLists(elements.getObservationNode).asScala
val arrays = lists.toArray.asInstanceOf[Array[OffsetPosList[_ <: OffsetPosBase]]]
val offsets = OffsetUtil.getOffsets(arrays).asScala.toSet
offsets.exists(_.distance.toArcmins.toPositive.getMagnitude > 5.0)
}
}
|
arturog8m/ocs
|
bundle/edu.gemini.p2checker/src/main/scala/edu/gemini/p2checker/util/PositionOffsetChecker.scala
|
Scala
|
bsd-3-clause
| 1,152 |
package com.stulsoft.serialization
/**
* @author Yuriy Stul.
*/
case class Message2(content:String)
|
ysden123/poc
|
pserialization/src/main/scala/com/stulsoft/serialization/Message2.scala
|
Scala
|
mit
| 105 |
package weeks.second
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import weeks.second.HighOrderFun._
@RunWith(classOf[JUnitRunner])
class HighOrderFunSuite extends FunSuite {
private def id(x: Int) = x
private def square(x: Int) = x * x
test("sum of numbers from 1 to 100 is 5050") {
assert(sum(id)(1, 100) == 5050)
}
test("ascending and descending order return the same result") {
assert(sum(id)(20, 75) == sum(id)(75, 20))
}
test("sum of squares from 10 to 1 is 385") {
assert(sum(square)(1, 10) == 385)
}
test("product of numbers from 3 to 7 is 2520") {
assert(product(id)(3, 7) == 2520)
}
test("factorial of 5 is 120") {
assert(factorial(5) == 120)
}
test("factorial of 10 is 3628800") {
assert(factorial(10) == 3628800)
}
}
|
unbowed/progfun
|
src/test/scala/weeks/second/HighOrderFunSuite.scala
|
Scala
|
mit
| 847 |
package tests.test
private[test] trait Private {
def /*caret*/foo = ???
}
class Public extends Private
|
triplequote/intellij-scala
|
scala/scala-impl/testdata/rename3/privatePackageClassInheritor/before/tests/test/Private.scala
|
Scala
|
apache-2.0
| 107 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.metadata
import org.apache.flink.table.planner.plan.nodes.physical.batch.BatchPhysicalRank
import org.apache.calcite.sql.fun.SqlStdOperatorTable
import org.apache.calcite.util.ImmutableBitSet
import org.junit.Assert._
import org.junit.Test
class FlinkRelMdPopulationSizeTest extends FlinkRelMdHandlerTestBase {
@Test
def testGetPopulationSizeOnTableScan(): Unit = {
Array(studentLogicalScan, studentBatchScan, studentStreamScan).foreach { scan =>
assertEquals(1.0, mq.getPopulationSize(scan, ImmutableBitSet.of()))
assertEquals(50.0, mq.getPopulationSize(scan, ImmutableBitSet.of(0)))
assertEquals(48.0, mq.getPopulationSize(scan, ImmutableBitSet.of(1)))
assertEquals(20.0, mq.getPopulationSize(scan, ImmutableBitSet.of(2)))
assertEquals(7.0, mq.getPopulationSize(scan, ImmutableBitSet.of(3)))
assertEquals(35.0, mq.getPopulationSize(scan, ImmutableBitSet.of(4)))
assertEquals(2.0, mq.getPopulationSize(scan, ImmutableBitSet.of(5)))
assertNull(mq.getPopulationSize(scan, ImmutableBitSet.of(6)))
assertEquals(50.0, mq.getPopulationSize(scan, ImmutableBitSet.of(0, 2)))
assertEquals(50.0, mq.getPopulationSize(scan, ImmutableBitSet.of(2, 3)))
assertEquals(14.0, mq.getPopulationSize(scan, ImmutableBitSet.of(3, 5)))
assertEquals(50.0, mq.getPopulationSize(scan, ImmutableBitSet.of(0, 6)))
}
Array(empLogicalScan, empBatchScan, empStreamScan).foreach { scan =>
assertEquals(1.0, mq.getPopulationSize(scan, ImmutableBitSet.of()))
assertNull(mq.getPopulationSize(scan, ImmutableBitSet.of(0)))
}
}
@Test
def testGetPopulationSizeOnValues(): Unit = {
assertEquals(2.0, mq.getPopulationSize(logicalValues, ImmutableBitSet.of()))
assertEquals(2.0, mq.getPopulationSize(logicalValues, ImmutableBitSet.of(0)))
assertEquals(2.0, mq.getPopulationSize(logicalValues, ImmutableBitSet.of(1)))
assertEquals(2.0, mq.getPopulationSize(logicalValues, ImmutableBitSet.of(0, 1)))
assertEquals(1.0, mq.getPopulationSize(emptyValues, ImmutableBitSet.of(0)))
assertEquals(1.0, mq.getPopulationSize(emptyValues, ImmutableBitSet.of(1)))
assertEquals(1.0, mq.getPopulationSize(emptyValues, ImmutableBitSet.of(0, 1)))
}
@Test
def testGetPopulationSizeOnProject(): Unit = {
assertEquals(1.0, mq.getPopulationSize(logicalProject, ImmutableBitSet.of()))
assertEquals(50.0, mq.getPopulationSize(logicalProject, ImmutableBitSet.of(0)))
assertEquals(48.0, mq.getPopulationSize(logicalProject, ImmutableBitSet.of(1)))
assertEquals(16.43,
mq.getPopulationSize(logicalProject, ImmutableBitSet.of(2)), 1e-2)
assertEquals(6.99,
mq.getPopulationSize(logicalProject, ImmutableBitSet.of(3)), 1e-2)
assertEquals(20.37,
mq.getPopulationSize(logicalProject, ImmutableBitSet.of(4)), 1e-2)
assertEquals(20.37,
mq.getPopulationSize(logicalProject, ImmutableBitSet.of(5)), 1e-2)
assertEquals(35.0, mq.getPopulationSize(logicalProject, ImmutableBitSet.of(6)))
assertEquals(5.0, mq.getPopulationSize(logicalProject, ImmutableBitSet.of(7)), 1e-2)
assertEquals(1.0, mq.getPopulationSize(logicalProject, ImmutableBitSet.of(8)))
assertEquals(1.0, mq.getPopulationSize(logicalProject, ImmutableBitSet.of(9)))
assertEquals(1.0, mq.getPopulationSize(logicalProject, ImmutableBitSet.of(10)))
assertEquals(16.43,
mq.getPopulationSize(logicalProject, ImmutableBitSet.of(11)), 1e-2)
assertEquals(50.0, mq.getPopulationSize(logicalProject, ImmutableBitSet.of(0, 1)))
assertEquals(31.24,
mq.getPopulationSize(logicalProject, ImmutableBitSet.of(1, 8)), 1e-2)
}
@Test
def testGetPopulationSizeOnFilter(): Unit = {
assertEquals(1.0, mq.getPopulationSize(logicalFilter, ImmutableBitSet.of()))
assertEquals(50.0, mq.getPopulationSize(logicalFilter, ImmutableBitSet.of(0)))
assertEquals(48.0, mq.getPopulationSize(logicalFilter, ImmutableBitSet.of(1)))
assertEquals(20.0, mq.getPopulationSize(logicalFilter, ImmutableBitSet.of(2)))
assertEquals(7.0, mq.getPopulationSize(logicalFilter, ImmutableBitSet.of(3)))
assertEquals(35.0, mq.getPopulationSize(logicalFilter, ImmutableBitSet.of(4)))
assertEquals(2.0, mq.getPopulationSize(logicalFilter, ImmutableBitSet.of(5)))
assertNull(mq.getPopulationSize(logicalFilter, ImmutableBitSet.of(6)))
assertEquals(50.0, mq.getPopulationSize(logicalFilter, ImmutableBitSet.of(0, 2)))
assertEquals(50.0, mq.getPopulationSize(logicalFilter, ImmutableBitSet.of(2, 3)))
assertEquals(14.0, mq.getPopulationSize(logicalFilter, ImmutableBitSet.of(3, 5)))
assertEquals(50.0, mq.getPopulationSize(logicalFilter, ImmutableBitSet.of(0, 6)))
}
@Test
def testGetPopulationSizeOnCalc(): Unit = {
assertEquals(1.0, mq.getPopulationSize(logicalCalc, ImmutableBitSet.of()))
assertEquals(50.0, mq.getPopulationSize(logicalCalc, ImmutableBitSet.of(0)))
assertEquals(48.0, mq.getPopulationSize(logicalCalc, ImmutableBitSet.of(1)))
assertEquals(11.22,
mq.getPopulationSize(logicalCalc, ImmutableBitSet.of(2)), 1e-2)
assertEquals(6.67,
mq.getPopulationSize(logicalCalc, ImmutableBitSet.of(3)), 1e-2)
assertEquals(12.30,
mq.getPopulationSize(logicalCalc, ImmutableBitSet.of(4)), 1e-2)
assertEquals(12.30,
mq.getPopulationSize(logicalCalc, ImmutableBitSet.of(5)), 1e-2)
assertEquals(35.0, mq.getPopulationSize(logicalCalc, ImmutableBitSet.of(6)))
assertEquals(2.5, mq.getPopulationSize(logicalCalc, ImmutableBitSet.of(7)), 1e-2)
assertEquals(1.0, mq.getPopulationSize(logicalCalc, ImmutableBitSet.of(8)))
assertEquals(1.0, mq.getPopulationSize(logicalCalc, ImmutableBitSet.of(9)))
assertEquals(1.0, mq.getPopulationSize(logicalCalc, ImmutableBitSet.of(10)))
assertEquals(11.22,
mq.getPopulationSize(logicalCalc, ImmutableBitSet.of(11)), 1e-2)
assertEquals(50.0, mq.getPopulationSize(logicalCalc, ImmutableBitSet.of(0, 1)))
assertEquals(19.64,
mq.getPopulationSize(logicalCalc, ImmutableBitSet.of(1, 8)), 1e-2)
}
@Test
def testGetPopulationSizeOnExpand(): Unit = {
assertEquals(1.0, mq.getPopulationSize(logicalExpand, ImmutableBitSet.of()))
assertEquals(50.0, mq.getPopulationSize(logicalExpand, ImmutableBitSet.of(0)))
assertEquals(48.0, mq.getPopulationSize(logicalExpand, ImmutableBitSet.of(1)))
assertEquals(20.0, mq.getPopulationSize(logicalExpand, ImmutableBitSet.of(2)))
assertEquals(7.0, mq.getPopulationSize(logicalExpand, ImmutableBitSet.of(3)))
assertEquals(35.0, mq.getPopulationSize(logicalExpand, ImmutableBitSet.of(4)))
assertEquals(2.0, mq.getPopulationSize(logicalExpand, ImmutableBitSet.of(5)))
assertNull(mq.getPopulationSize(logicalExpand, ImmutableBitSet.of(6)))
assertEquals(3.0, mq.getPopulationSize(logicalExpand, ImmutableBitSet.of(7)))
assertEquals(50.0, mq.getPopulationSize(logicalExpand, ImmutableBitSet.of(0, 1)))
assertEquals(14.0, mq.getPopulationSize(logicalExpand, ImmutableBitSet.of(3, 5)))
}
@Test
def testGetPopulationSizeOnExchange(): Unit = {
Array(batchExchange, streamExchange).foreach {
exchange =>
assertEquals(1.0, mq.getPopulationSize(exchange, ImmutableBitSet.of()))
assertEquals(50.0, mq.getPopulationSize(exchange, ImmutableBitSet.of(0)))
assertEquals(48.0, mq.getPopulationSize(exchange, ImmutableBitSet.of(1)))
assertEquals(20.0, mq.getPopulationSize(exchange, ImmutableBitSet.of(2)))
assertEquals(7.0, mq.getPopulationSize(exchange, ImmutableBitSet.of(3)))
assertEquals(35.0, mq.getPopulationSize(exchange, ImmutableBitSet.of(4)))
assertEquals(2.0, mq.getPopulationSize(exchange, ImmutableBitSet.of(5)))
assertNull(mq.getPopulationSize(exchange, ImmutableBitSet.of(6)))
assertEquals(50.0, mq.getPopulationSize(exchange, ImmutableBitSet.of(0, 2)))
assertEquals(50.0, mq.getPopulationSize(exchange, ImmutableBitSet.of(2, 3)))
assertEquals(14.0, mq.getPopulationSize(exchange, ImmutableBitSet.of(3, 5)))
assertEquals(50.0, mq.getPopulationSize(exchange, ImmutableBitSet.of(0, 6)))
}
}
@Test
def testGetPopulationSizeOnRank(): Unit = {
Array(logicalRank, flinkLogicalRank, batchLocalRank, batchGlobalRank, streamRank).foreach {
rank =>
assertEquals(1.0, mq.getPopulationSize(rank, ImmutableBitSet.of()))
assertEquals(50.0, mq.getPopulationSize(rank, ImmutableBitSet.of(0)))
assertEquals(48.0, mq.getPopulationSize(rank, ImmutableBitSet.of(1)))
assertEquals(20.0, mq.getPopulationSize(rank, ImmutableBitSet.of(2)))
assertEquals(7.0, mq.getPopulationSize(rank, ImmutableBitSet.of(3)))
assertEquals(35.0, mq.getPopulationSize(rank, ImmutableBitSet.of(4)))
assertEquals(2.0, mq.getPopulationSize(rank, ImmutableBitSet.of(5)))
assertNull(mq.getPopulationSize(rank, ImmutableBitSet.of(6)))
assertEquals(50.0, mq.getPopulationSize(rank, ImmutableBitSet.of(0, 2)))
rank match {
case r: BatchPhysicalRank =>
// local batch rank does not output rank func
// TODO re-check this
if (r.isGlobal) {
assertEquals(1.0, mq.getPopulationSize(rank, ImmutableBitSet.of(7)))
assertEquals(1.0, mq.getPopulationSize(rank, ImmutableBitSet.of(7)))
assertEquals(1.0, mq.getPopulationSize(rank, ImmutableBitSet.of(0, 7)))
assertEquals(1.0, mq.getPopulationSize(rank, ImmutableBitSet.of(3, 7)))
}
case _ =>
assertEquals(5.0, mq.getPopulationSize(rank, ImmutableBitSet.of(7)))
assertEquals(5.0, mq.getPopulationSize(rank, ImmutableBitSet.of(0, 7)))
assertEquals(5.0, mq.getPopulationSize(rank, ImmutableBitSet.of(3, 7)))
}
}
}
@Test
def testGetPopulationSizeOnSort(): Unit = {
Array(logicalSort, flinkLogicalSort, batchSort, streamSort,
logicalLimit, flinkLogicalLimit, batchLimit, batchLocalLimit, batchGlobalLimit, streamLimit,
logicalSortLimit, flinkLogicalSortLimit, batchSortLimit, batchLocalSortLimit,
batchGlobalSortLimit, streamSortLimit).foreach {
sort =>
assertEquals(1.0, mq.getPopulationSize(sort, ImmutableBitSet.of()))
assertEquals(50.0, mq.getPopulationSize(sort, ImmutableBitSet.of(0)))
assertEquals(48.0, mq.getPopulationSize(sort, ImmutableBitSet.of(1)))
assertEquals(20.0, mq.getPopulationSize(sort, ImmutableBitSet.of(2)))
assertEquals(7.0, mq.getPopulationSize(sort, ImmutableBitSet.of(3)))
assertEquals(35.0, mq.getPopulationSize(sort, ImmutableBitSet.of(4)))
assertEquals(2.0, mq.getPopulationSize(sort, ImmutableBitSet.of(5)))
assertNull(mq.getPopulationSize(sort, ImmutableBitSet.of(6)))
assertEquals(50.0, mq.getPopulationSize(sort, ImmutableBitSet.of(0, 2)))
assertEquals(50.0, mq.getPopulationSize(sort, ImmutableBitSet.of(2, 3)))
assertEquals(14.0, mq.getPopulationSize(sort, ImmutableBitSet.of(3, 5)))
assertEquals(50.0, mq.getPopulationSize(sort, ImmutableBitSet.of(0, 6)))
}
}
@Test
def testGetPopulationSizeOnAggregate(): Unit = {
Array(logicalAgg, flinkLogicalAgg, batchGlobalAggWithLocal, batchGlobalAggWithoutLocal,
batchLocalAgg).foreach { agg =>
assertEquals(1.0, mq.getPopulationSize(agg, ImmutableBitSet.of()))
assertEquals(7.0, mq.getPopulationSize(agg, ImmutableBitSet.of(0)))
assertEquals(2.0, mq.getPopulationSize(agg, ImmutableBitSet.of(1)))
assertEquals(2.0, mq.getPopulationSize(agg, ImmutableBitSet.of(2)))
assertEquals(3.5, mq.getPopulationSize(agg, ImmutableBitSet.of(3)))
assertEquals(3.5, mq.getPopulationSize(agg, ImmutableBitSet.of(4)))
assertEquals(10.0, mq.getPopulationSize(agg, ImmutableBitSet.of(5)))
assertEquals(7.0, mq.getPopulationSize(agg, ImmutableBitSet.of(0, 1)))
assertEquals(7.0, mq.getPopulationSize(agg, ImmutableBitSet.of(0, 5)))
}
}
@Test
def testGetPopulationSizeOnWindowAgg(): Unit = {
Array(logicalWindowAgg, flinkLogicalWindowAgg, batchGlobalWindowAggWithoutLocalAgg,
batchGlobalWindowAggWithLocalAgg).foreach { agg =>
assertEquals(30D, mq.getPopulationSize(agg, ImmutableBitSet.of(0)))
assertEquals(5D, mq.getPopulationSize(agg, ImmutableBitSet.of(1)))
assertEquals(50D, mq.getPopulationSize(agg, ImmutableBitSet.of(0, 1)))
assertEquals(50D, mq.getPopulationSize(agg, ImmutableBitSet.of(0, 2)))
assertEquals(null, mq.getPopulationSize(agg, ImmutableBitSet.of(3)))
assertEquals(null, mq.getPopulationSize(agg, ImmutableBitSet.of(0, 3)))
assertEquals(null, mq.getPopulationSize(agg, ImmutableBitSet.of(1, 3)))
assertEquals(null, mq.getPopulationSize(agg, ImmutableBitSet.of(2, 3)))
}
assertEquals(30D, mq.getPopulationSize(batchLocalWindowAgg, ImmutableBitSet.of(0)))
assertEquals(5D, mq.getPopulationSize(batchLocalWindowAgg, ImmutableBitSet.of(1)))
assertEquals(null, mq.getPopulationSize(batchLocalWindowAgg, ImmutableBitSet.of(2)))
assertEquals(50D, mq.getPopulationSize(batchLocalWindowAgg, ImmutableBitSet.of(0, 1)))
assertEquals(null, mq.getPopulationSize(batchLocalWindowAgg, ImmutableBitSet.of(0, 2)))
assertEquals(10D, mq.getPopulationSize(batchLocalWindowAgg, ImmutableBitSet.of(3)))
assertEquals(50D, mq.getPopulationSize(batchLocalWindowAgg, ImmutableBitSet.of(0, 3)))
assertEquals(50D, mq.getPopulationSize(batchLocalWindowAgg, ImmutableBitSet.of(1, 3)))
assertEquals(null, mq.getPopulationSize(batchLocalWindowAgg, ImmutableBitSet.of(2, 3)))
Array(logicalWindowAggWithAuxGroup, flinkLogicalWindowAggWithAuxGroup,
batchGlobalWindowAggWithoutLocalAggWithAuxGroup,
batchGlobalWindowAggWithLocalAggWithAuxGroup).foreach { agg =>
assertEquals(50D, mq.getPopulationSize(agg, ImmutableBitSet.of(0)))
assertEquals(48D, mq.getPopulationSize(agg, ImmutableBitSet.of(1)))
assertEquals(10D, mq.getPopulationSize(agg, ImmutableBitSet.of(2)))
assertEquals(null, mq.getPopulationSize(agg, ImmutableBitSet.of(3)))
assertEquals(50D, mq.getPopulationSize(agg, ImmutableBitSet.of(0, 1)))
assertEquals(50D, mq.getPopulationSize(agg, ImmutableBitSet.of(0, 1, 2)))
assertEquals(null, mq.getPopulationSize( agg, ImmutableBitSet.of(0, 1, 3)))
}
assertEquals(50D, mq.getPopulationSize(batchLocalWindowAggWithAuxGroup, ImmutableBitSet.of(0)))
assertNull(mq.getPopulationSize(batchLocalWindowAggWithAuxGroup, ImmutableBitSet.of(1)))
assertEquals(48D, mq.getPopulationSize(batchLocalWindowAggWithAuxGroup, ImmutableBitSet.of(2)))
assertEquals(10D, mq.getPopulationSize(batchLocalWindowAggWithAuxGroup, ImmutableBitSet.of(3)))
assertNull(mq.getPopulationSize(batchLocalWindowAggWithAuxGroup, ImmutableBitSet.of(0, 1)))
assertEquals(50D,
mq.getPopulationSize(batchLocalWindowAggWithAuxGroup, ImmutableBitSet.of(0, 2)))
assertNull(mq.getPopulationSize(batchLocalWindowAggWithAuxGroup, ImmutableBitSet.of(0, 1, 3)))
}
@Test
def testGetPopulationSizeOnOverAgg(): Unit = {
Array(flinkLogicalOverAgg, batchOverAgg).foreach { agg =>
assertEquals(1.0, mq.getPopulationSize(agg, ImmutableBitSet.of()))
assertEquals(50.0, mq.getPopulationSize(agg, ImmutableBitSet.of(0)))
assertEquals(48.0, mq.getPopulationSize(agg, ImmutableBitSet.of(1)))
assertEquals(20.0, mq.getPopulationSize(agg, ImmutableBitSet.of(2)))
assertEquals(7.0, mq.getPopulationSize(agg, ImmutableBitSet.of(3)))
(4 until 11).foreach { idx =>
assertNull(mq.getPopulationSize(agg, ImmutableBitSet.of(idx)))
}
assertNull(mq.getPopulationSize(agg, ImmutableBitSet.of(0, 6)))
}
}
@Test
def testGetPopulationSizeOnJoin(): Unit = {
assertEquals(1.0, mq.getPopulationSize(logicalInnerJoinOnUniqueKeys, ImmutableBitSet.of()))
assertEquals(49.999938,
mq.getPopulationSize(logicalInnerJoinOnUniqueKeys, ImmutableBitSet.of(0)), 1e-6)
assertEquals(49.999998,
mq.getPopulationSize(logicalInnerJoinOnUniqueKeys, ImmutableBitSet.of(1)), 1e-6)
assertEquals(50.0,
mq.getPopulationSize(logicalInnerJoinOnUniqueKeys, ImmutableBitSet.of(1, 5)), 1e-6)
assertEquals(49.999991,
mq.getPopulationSize(logicalInnerJoinOnUniqueKeys, ImmutableBitSet.of(0, 6)), 1e-6)
assertEquals(1.0, mq.getPopulationSize(logicalLeftJoinNotOnUniqueKeys, ImmutableBitSet.of()))
assertEquals(2.0E7, mq.getPopulationSize(logicalLeftJoinNotOnUniqueKeys, ImmutableBitSet.of(0)))
assertEquals(5.0569644545E8,
mq.getPopulationSize(logicalLeftJoinNotOnUniqueKeys, ImmutableBitSet.of(1)), 1e-2)
assertEquals(8.0E8,
mq.getPopulationSize(logicalLeftJoinNotOnUniqueKeys, ImmutableBitSet.of(1, 5)), 1e-2)
assertEquals(7.9377199253E8,
mq.getPopulationSize(logicalLeftJoinNotOnUniqueKeys, ImmutableBitSet.of(0, 6)), 1e-2)
assertEquals(1.0,
mq.getPopulationSize(logicalRightJoinOnLHSUniqueKeys, ImmutableBitSet.of()))
assertEquals(1.264241136E7,
mq.getPopulationSize(logicalRightJoinOnLHSUniqueKeys, ImmutableBitSet.of(0)), 1e-2)
assertEquals(1.975207027E7,
mq.getPopulationSize(logicalRightJoinOnLHSUniqueKeys, ImmutableBitSet.of(1)), 1e-2)
assertEquals(2.0E7,
mq.getPopulationSize(logicalRightJoinOnLHSUniqueKeys, ImmutableBitSet.of(1, 5)), 1e-2)
assertEquals(1.999606902E7,
mq.getPopulationSize(logicalRightJoinOnLHSUniqueKeys, ImmutableBitSet.of(0, 6)), 1e-2)
assertEquals(1.0, mq.getPopulationSize(logicalFullJoinWithoutEquiCond, ImmutableBitSet.of()))
assertEquals(2.0E7, mq.getPopulationSize(logicalFullJoinWithoutEquiCond, ImmutableBitSet.of(0)))
assertEquals(8.0E8, mq.getPopulationSize(logicalFullJoinWithoutEquiCond, ImmutableBitSet.of(1)))
assertEquals(8.0E15,
mq.getPopulationSize(logicalFullJoinWithoutEquiCond, ImmutableBitSet.of(1, 5)))
assertEquals(5.112E10,
mq.getPopulationSize(logicalFullJoinWithoutEquiCond, ImmutableBitSet.of(0, 6)))
assertEquals(1.0, mq.getPopulationSize(logicalSemiJoinOnUniqueKeys, ImmutableBitSet.of()))
assertEquals(2.0E7, mq.getPopulationSize(logicalSemiJoinOnLHSUniqueKeys, ImmutableBitSet.of(0)))
assertEquals(8.0E8, mq.getPopulationSize(logicalSemiJoinNotOnUniqueKeys, ImmutableBitSet.of(1)))
assertEquals(8.0E8, mq.getPopulationSize(logicalSemiJoinOnUniqueKeys, ImmutableBitSet.of(0, 1)))
assertEquals(8.0E8,
mq.getPopulationSize(logicalSemiJoinNotOnUniqueKeys, ImmutableBitSet.of(0, 2)))
assertEquals(1.0, mq.getPopulationSize(logicalAntiJoinNotOnUniqueKeys, ImmutableBitSet.of()))
assertEquals(2.0E7, mq.getPopulationSize(logicalAntiJoinOnUniqueKeys, ImmutableBitSet.of(0)))
assertEquals(8.0E8, mq.getPopulationSize(logicalAntiJoinOnLHSUniqueKeys, ImmutableBitSet.of(1)))
assertEquals(8.0E8, mq.getPopulationSize(logicalAntiJoinOnUniqueKeys, ImmutableBitSet.of(0, 1)))
assertEquals(8.0E8,
mq.getPopulationSize(logicalAntiJoinNotOnUniqueKeys, ImmutableBitSet.of(0, 2)))
}
@Test
def testGetPopulationSizeOnUnion(): Unit = {
Array(logicalUnion, logicalUnionAll).foreach { unoin =>
assertEquals(2.0, mq.getPopulationSize(unoin, ImmutableBitSet.of()))
assertEquals(4.0E7, mq.getPopulationSize(unoin, ImmutableBitSet.of(0)))
assertEquals(8.00002556E8, mq.getPopulationSize(unoin, ImmutableBitSet.of(1)))
assertEquals(2263.0, mq.getPopulationSize(unoin, ImmutableBitSet.of(2)))
assertEquals(8.2E8, mq.getPopulationSize(unoin, ImmutableBitSet.of(0, 2)))
}
}
@Test
def testGetPopulationSizeOnDefault(): Unit = {
assertNull(mq.getPopulationSize(testRel, ImmutableBitSet.of()))
assertNull(mq.getPopulationSize(testRel, ImmutableBitSet.of(1)))
}
@Test
def testGetPopulationSizeOnLargeDomainSize(): Unit = {
relBuilder.clear()
val rel = relBuilder
.scan("MyTable1")
.project(
relBuilder.field(0),
relBuilder.field(1),
relBuilder.call(SqlStdOperatorTable.SUBSTRING, relBuilder.field(3), relBuilder.literal(10)))
.build()
assertEquals(
7.999999964933156E8,
mq.getPopulationSize(rel, ImmutableBitSet.of(0, 1, 2)),
1e-2)
}
}
|
tillrohrmann/flink
|
flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/planner/plan/metadata/FlinkRelMdPopulationSizeTest.scala
|
Scala
|
apache-2.0
| 21,124 |
package org.airpnp.ui
import javax.swing.JPanel
import java.awt.BorderLayout
import org.airpnp.upnp.Device
import javax.swing.JLabel
import javax.swing.JTree
import javax.swing.tree.TreeModel
import javax.swing.JSplitPane
import javax.swing.tree.DefaultTreeModel
import java.awt.Dimension
import javax.swing.event.TreeModelListener
import java.awt.Container
import javax.swing.event.TreeSelectionListener
import javax.swing.event.TreeSelectionEvent
object AirPnpPanel {
val PREFERRED_HEIGHT = 600
val PREFERRED_WIDTH = 800
}
class AirPnpPanel(devices: Seq[Device]) extends JPanel {
setLayout(new BorderLayout)
add({
val right = new JPanel()
right.setLayout(new BorderLayout())
val left = new JTree(createTreeModel)
left.addTreeSelectionListener(new SelectionListener(right))
val pane = new JSplitPane(JSplitPane.HORIZONTAL_SPLIT, left, right)
pane.setResizeWeight(0.3)
pane
}, BorderLayout.CENTER)
setPreferredSize(new Dimension(AirPnpPanel.PREFERRED_WIDTH, AirPnpPanel.PREFERRED_HEIGHT))
private def createTreeModel = new DefaultTreeModel(new RootNode(devices))
private class SelectionListener(c: Container) extends TreeSelectionListener {
def valueChanged(e: TreeSelectionEvent) {
c.removeAll()
val path = e.getNewLeadSelectionPath
if (path != null) {
path.getLastPathComponent match {
case node: MasterNode =>
val page = node.getDetailPage
if (page != null) {
c.add(page, BorderLayout.NORTH)
}
case _ =>
}
}
c.revalidate()
c.repaint()
}
}
}
|
provegard/ScAirPnp
|
src/main/scala/org/airpnp/ui/AirPnpPanel.scala
|
Scala
|
mit
| 1,626 |
package levd
import leon._
import mem._
import lang._
import annotation._
import instrumentation._
import invariant._
import collection._
object LevenshteinDistance {
@ignore
var xstring = Array[BigInt]()
@ignore
var ystring = Array[BigInt]()
@extern
def lookup(i: BigInt, j: BigInt) = {
(xstring(i.toInt), ystring(j.toInt))
} ensuring (_ => steps <= 1)
// deps and it's lemmas
def deps(i: BigInt, j: BigInt): Boolean = {
require(i >= 0 && j >= 0)
cached(levDist(i, j)) &&
(if (i <= 0 && j <= 0) true
else if (i <= 0) deps(i, j - 1)
else if (j <= 0) deps(i - 1, j)
else deps(i - 1, j) && deps(i, j - 1))
}
@invisibleBody
@traceInduct
def depsMono(i: BigInt, j: BigInt, st1: Set[Fun[BigInt]], st2: Set[Fun[BigInt]]) = {
require(i >= 0 && j >= 0)
(st1.subsetOf(st2) && (deps(i, j) in st1)) ==> (deps(i, j) in st2)
} holds
@traceInduct
def depsLem(i: BigInt, j: BigInt, m: BigInt, n: BigInt) = {
require(i >= 0 && j >= 0 && m >= 0 && n >= 0)
(i <= m && j <= n && deps(m, n)) ==> deps(i, j)
} holds
@invstate
@memoize
@invisibleBody
def levDist(i: BigInt, j: BigInt): BigInt = {
require((i >=0 && j >= 0) && (i == 0 || deps(i - 1, j)) && (j == 0 || deps(i, j-1)))
if (i == 0) j
else if(j == 0) i
else {
val (xi, yj) = lookup(i, j)
val dprev = levDist(i - 1, j - 1)
val a1 = if (xi == yj) dprev else dprev + 1
val a2 = {
val s1 = levDist(i - 1, j)
val s2 = levDist(i, j - 1)
if (s1 >= s2) s1 else s2
}
if (a1 >= a2) a1 else a2
}
} ensuring (_ => steps <= ?)
@invisibleBody
def invoke(i: BigInt, j: BigInt, n: BigInt) = {
require((i >=0 && j >= 0 && n >= j) && (i == 0 || deps(i - 1, j)) && (j == 0 || deps(i, j-1)))
levDist(i, j)
} ensuring (res => {
val in = inSt[BigInt]
val out = outSt[BigInt]
(i == 0 || (depsMono(i - 1, j, in, out) && depsMono(i - 1, n, in, out))) &&
(j == 0 || depsMono(i, j - 1, in, out)) &&
deps(i, j) &&
steps <= ?
})
/**
* Given a m x n DP problem, the following function solves the subproblems by traversing the problem space
* from right to left, and bottom to top.
* @param m - number of rows remaining
* @param n - max. number of columns
* @param j - number of columns remaining (initially set to n)
* @result returns a list of solutions for each sub-problem (the size of the resulting list will be quadratic)
*/
def bottomup(m: BigInt, j: BigInt, n: BigInt): List[BigInt] = {
require(0 <= m && 0 <= j && j <= n)
if (m == 0 && j == 0) {
Cons(invoke(m, j, n), Nil[BigInt]())
}
else if(j == 0) {
val tail = bottomup(m - 1, n, n)
Cons(invoke(m, j, n), tail)
}
else {
val tail = bottomup(m, j - 1, n)
Cons(invoke(m, j, n), tail)
}
} ensuring {_ =>
bottomUpPost(m, j, n) &&
steps <= ? * (m * n) + ? * m + ? * j + ?
}
@invisibleBody
def bottomUpPost(m: BigInt, j: BigInt, n: BigInt): Boolean = {
require(m >= 0 && n >= j && j >= 0)
(m == 0 || (deps(m - 1, n) && (j == n || depsLem(m - 1, j + 1, m - 1, n)))) && deps(m, j) &&
depsLem(m, 0, m, j)
}
def levDistSols(m: BigInt, n: BigInt): List[BigInt] = {
require(0 <= m && 0 <= n)
bottomup(m, n, n)
} ensuring(_ => steps <= ? * (m * n) + ? * m + ? * n + ?)
}
|
epfl-lara/leon
|
testcases/web/memresources/02_LevenshteinDistance.scala
|
Scala
|
gpl-3.0
| 3,394 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package expr
import com.intellij.lang.ASTNode
import org.jetbrains.plugins.scala.lang.psi.api.expr._
/**
* @author Alexander Podkhalyuzin
* Date: 06.03.2008
*/
class ScMethodCallImpl(node: ASTNode) extends ScExpressionImplBase(node) with ScMethodCall {
def getInvokedExpr: ScExpression = findChildByClassScala(classOf[ScExpression])
def argumentExpressions: Seq[ScExpression] = if (args != null) args.exprs else Nil
override def getEffectiveInvokedExpr: ScExpression = {
findChildByClassScala(classOf[ScExpression]) match {
case x: ScParenthesisedExpr => x.expr.getOrElse(x)
case x => x
}
}
override def argumentExpressionsIncludeUpdateCall: Seq[ScExpression] = {
updateExpression() match {
case Some(expr) => argumentExpressions ++ Seq(expr)
case _ => argumentExpressions
}
}
override def toString: String = "MethodCall"
}
|
triplequote/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/impl/expr/ScMethodCallImpl.scala
|
Scala
|
apache-2.0
| 972 |
package com.ignition.frame
import org.junit.runner.RunWith
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class KafkaInputSpec extends FrameFlowSpecification {
"KafkaInput" should {
"construct with defaults" in {
val step = KafkaInput("zk", "topic", "group")
step.zkUrl === "zk"
step.topic === "topic"
step.groupId === "group"
step.kafkaProperties must beEmpty
step.maxRows must beSome(100)
step.maxTimeout must beSome(60000)
step.field === "payload"
}
"build with helpers" in {
val step = KafkaInput("zk", "topic", "group") properties ("a" -> "b") maxRows (10) noMaxTimeout
step.zkUrl === "zk"
step.topic === "topic"
step.groupId === "group"
step.kafkaProperties === Map("a" -> "b")
step.maxRows must beSome(10)
step.maxTimeout must beNone
step.field === "payload"
}
"fail without max- condition" in {
(KafkaInput("zk", "topic", "group").noMaxRows.noMaxTimeout) must throwA[IllegalArgumentException]
}
"save to/load from xml" in {
val k1 = KafkaInput("zk", "topic", "group") properties ("a" -> "b") maxRows (10) noMaxTimeout ()
k1.toXml must ==/(
<kafka-input maxRows="10">
<zkUrl>zk</zkUrl>
<topic>topic</topic>
<groupId>group</groupId>
<field>payload</field>
<kafkaProperties>
<property name="a">b</property>
</kafkaProperties>
</kafka-input>)
KafkaInput.fromXml(k1.toXml) === k1
val k2 = KafkaInput("zk", "topic", "group") maxTimeout (200) field ("data")
k2.toXml must ==/(
<kafka-input maxRows="100" maxTimeout="200">
<zkUrl>zk</zkUrl>
<topic>topic</topic>
<groupId>group</groupId>
<field>data</field>
</kafka-input>)
KafkaInput.fromXml(k2.toXml) === k2
}
"save to/load from json" in {
import org.json4s.JsonDSL._
val k1 = KafkaInput("zk", "topic", "group") properties ("a" -> "b") maxRows (10) noMaxTimeout ()
k1.toJson === ("tag" -> "kafka-input") ~ ("topic" -> "topic") ~ ("zkUrl" -> "zk") ~
("groupId" -> "group") ~ ("field" -> "payload") ~ ("maxRows" -> 10) ~ ("maxTimeout" -> jNone) ~
("kafkaProperties" -> List(("name" -> "a") ~ ("value" -> "b")))
KafkaInput.fromJson(k1.toJson) === k1
val k2 = KafkaInput("zk", "topic", "group") maxTimeout (200) field ("data")
k2.toJson === ("tag" -> "kafka-input") ~ ("topic" -> "topic") ~ ("zkUrl" -> "zk") ~
("groupId" -> "group") ~ ("field" -> "data") ~ ("maxRows" -> 100) ~ ("maxTimeout" -> 200) ~
("kafkaProperties" -> jNone)
KafkaInput.fromJson(k2.toJson) === k2
}
"be unserializable" in assertUnserializable(KafkaInput("zk", "topic", "group"))
}
}
|
uralian/ignition
|
src/test/scala/com/ignition/frame/KafkaInputSpec.scala
|
Scala
|
apache-2.0
| 2,835 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.serializer
import java.io.{ByteArrayInputStream, ByteArrayOutputStream}
import java.nio.ByteBuffer
import com.esotericsoftware.kryo.io.{Output, Input}
import org.apache.avro.{SchemaBuilder, Schema}
import org.apache.avro.generic.GenericData.Record
import org.apache.spark.{SparkFunSuite, SharedSparkContext}
class GenericAvroSerializerSuite extends SparkFunSuite with SharedSparkContext {
conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
val schema : Schema = SchemaBuilder
.record("testRecord").fields()
.requiredString("data")
.endRecord()
val record = new Record(schema)
record.put("data", "test data")
test("schema compression and decompression") {//模式压缩与解压缩
val genericSer = new GenericAvroSerializer(conf.getAvroSchema)
assert(schema === genericSer.decompress(ByteBuffer.wrap(genericSer.compress(schema))))
}
test("record serialization and deserialization") {//记录序列化和反序列化
val genericSer = new GenericAvroSerializer(conf.getAvroSchema)
val outputStream = new ByteArrayOutputStream()
val output = new Output(outputStream)
genericSer.serializeDatum(record, output)
output.flush()
output.close()
val input = new Input(new ByteArrayInputStream(outputStream.toByteArray))
assert(genericSer.deserializeDatum(input) === record)
}
//使用模式指纹以减少信息大小
test("uses schema fingerprint to decrease message size") {
val genericSerFull = new GenericAvroSerializer(conf.getAvroSchema)
val output = new Output(new ByteArrayOutputStream())
val beginningNormalPosition = output.total()
genericSerFull.serializeDatum(record, output)
output.flush()
val normalLength = output.total - beginningNormalPosition
conf.registerAvroSchemas(schema)
val genericSerFinger = new GenericAvroSerializer(conf.getAvroSchema)
val beginningFingerprintPosition = output.total()
genericSerFinger.serializeDatum(record, output)
val fingerprintLength = output.total - beginningFingerprintPosition
assert(fingerprintLength < normalLength)
}
test("caches previously seen schemas") {//缓存之前模式
val genericSer = new GenericAvroSerializer(conf.getAvroSchema)
val compressedSchema = genericSer.compress(schema)
val decompressedScheam = genericSer.decompress(ByteBuffer.wrap(compressedSchema))
assert(compressedSchema.eq(genericSer.compress(schema)))
assert(decompressedScheam.eq(genericSer.decompress(ByteBuffer.wrap(compressedSchema))))
}
}
|
tophua/spark1.52
|
core/src/test/scala/org/apache/spark/serializer/GenericAvroSerializerSuite.scala
|
Scala
|
apache-2.0
| 3,380 |
package com.blinkbox.books.marvin.ui
import akka.actor.ActorRefFactory
import com.blinkbox.books.config.ApiConfig
import com.blinkbox.books.logging.DiagnosticExecutionContext
import com.blinkbox.books.spray.v2.Implicits.throwableMarshaller
import com.blinkbox.books.spray.{Directives => CommonDirectives, _}
import com.typesafe.scalalogging.StrictLogging
import org.slf4j.LoggerFactory
import spray.http.HttpHeaders._
import spray.http.StatusCodes
import spray.routing.directives.FileAndResourceDirectives
import spray.routing.{HttpService, Route}
import spray.http.Uri.Path
trait PublicApiRoutes extends HttpService {
def mappings: Route
}
class PublicApi(config: ApiConfig)
(implicit val actorRefFactory: ActorRefFactory) extends PublicApiRoutes with CommonDirectives with StrictLogging with FileAndResourceDirectives {
implicit val executionContext = DiagnosticExecutionContext(actorRefFactory.dispatcher)
implicit val timeout = config.timeout
implicit val log = LoggerFactory.getLogger(classOf[PublicApi])
val mappings = get {
path(Rest) { path =>
val defaultPath = if (path.isEmpty) "index.html" else path
getFromResource(s"site/${defaultPath}")
}
}
val routes = rootPath(config.localUrl.path) {
monitor(logger, throwableMarshaller) {
respondWithHeader(RawHeader("Vary", "Accept, Accept-Encoding")) {
mappings
}
}
}
}
|
blinkboxbooks/marvin-frontend.js
|
src/main/scala/com/blinkbox/books/marvin/ui/PublicApi.scala
|
Scala
|
mit
| 1,407 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution.internal
import monix.execution.schedulers.CanBlock
import scala.concurrent.{Await, Awaitable}
import scala.concurrent.duration.Duration
import scala.util.Try
private[monix] object Platform {
/**
* Returns `true` in case Monix is running on top of Scala.js,
* or `false` otherwise.
*/
final val isJS = false
/**
* Returns `true` in case Monix is running on top of the JVM,
* or `false` otherwise.
*/
final val isJVM = true
/**
* Reads environment variable in a platform-specific way.
*/
def getEnv(key: String): Option[String] =
Option(System.getenv(key)).map(_.trim).filter(_.nonEmpty)
/** Recommended batch size used for breaking synchronous loops in
* asynchronous batches. When streaming value from a producer to
* a synchronous consumer it's recommended to break the streaming
* in batches as to not hold the current thread or run-loop
* indefinitely.
*
* Rounding up to the closest power of 2, because then for
* applying the modulo operation we can just do:
* {{{
* val modulus = Platform.recommendedBatchSize - 1
* // ...
* nr = (nr + 1) & modulus
* }}}
*
* Can be configured by setting Java properties:
*
* <pre>
* java -Dmonix.environment.batchSize=256 \\
* ...
* </pre>
*/
val recommendedBatchSize: Int = {
Option(System.getProperty("monix.environment.batchSize", ""))
.filter(s => s != null && s.nonEmpty)
.flatMap(s => Try(s.toInt).toOption)
.map(math.nextPowerOf2)
.getOrElse(1024)
}
/** Recommended chunk size in unbounded buffer implementations that are chunked,
* or in chunked streaming.
*
* Examples:
*
* - the default when no `chunkSizeHint` is specified in
* [[monix.execution.BufferCapacity.Unbounded BufferCapacity.Unbounded]]
* - the chunk size used in
* [[monix.reactive.OverflowStrategy.Unbounded OverflowStrategy.Unbounded]]
* - the default in
* [[monix.tail.Iterant.fromConsumer Iterant.fromConsumer]] or in
* [[monix.tail.Iterant.fromConsumer Iterant.fromChannel]]
*
* Can be configured by setting Java properties:
*
* <pre>
* java -Dmonix.environment.bufferChunkSize=128 \\
* ...
* </pre>
*
* Should be a power of 2 or it gets rounded to one.
*/
val recommendedBufferChunkSize: Int = {
Option(System.getProperty("monix.environment.bufferChunkSize", ""))
.filter(s => s != null && s.nonEmpty)
.flatMap(s => Try(s.toInt).toOption)
.map(math.nextPowerOf2)
.getOrElse(256)
}
/** Default value for auto cancelable loops, set to `false`.
*
* On top of the JVM the default can be overridden by setting the following
* system property:
*
* `monix.environment.autoCancelableRunLoops`
*
* You can set the following values:
*
* - `true`, `yes` or `1` for enabling (the default)
* - `no`, `false` or `0` for disabling
*
* NOTE: this values was `false` by default prior to the Monix 3.0.0
* release. This changed along with the release of Cats-Effect 1.1.0
* which now recommends for this default to be `true` due to the design
* of its type classes.
*/
val autoCancelableRunLoops: Boolean = {
Option(System.getProperty("monix.environment.autoCancelableRunLoops", ""))
.map(_.toLowerCase)
.forall(v => v != "no" && v != "false" && v != "0")
}
/**
* Default value for local context propagation loops is set to
* false. On top of the JVM the default can be overridden by
* setting the following system property:
*
* - `monix.environment.localContextPropagation`
* (`true`, `yes` or `1` for enabling)
*/
val localContextPropagation: Boolean =
Option(System.getProperty("monix.environment.localContextPropagation", ""))
.map(_.toLowerCase)
.exists(v => v == "yes" || v == "true" || v == "1")
/** Blocks for the result of `fa`.
*
* This operation is only supported on top of the JVM, whereas for
* JavaScript a dummy is provided.
*/
def await[A](fa: Awaitable[A], timeout: Duration)(implicit permit: CanBlock): A =
Await.result(fa, timeout)
/** Composes multiple errors together, meant for those cases in which
* error suppression, due to a second error being triggered, is not
* acceptable.
*
* On top of the JVM this function uses `Throwable#addSuppressed`,
* available since Java 7. On top of JavaScript the function would return
* a `CompositeException`.
*/
def composeErrors(first: Throwable, rest: Throwable*): Throwable = {
for (e <- rest; if e ne first) first.addSuppressed(e)
first
}
/** Useful utility that combines an `Either` result, which is what
* `MonadError#attempt` returns.
*/
def composeErrors(first: Throwable, second: Either[Throwable, _]): Throwable =
second match {
case Left(e2) if first ne e2 =>
first.addSuppressed(e2)
first
case _ =>
first
}
/**
* Returns the current thread's ID.
*
* To be used for multi-threading optimizations. Note that
* in JavaScript this always returns the same value.
*/
def currentThreadId(): Long = {
Thread.currentThread().getId
}
/**
* For reporting errors when we don't have access to
* an error handler.
*/
def reportFailure(e: Throwable): Unit = {
val t = Thread.currentThread()
t.getUncaughtExceptionHandler match {
case null => DefaultUncaughtExceptionReporter.reportFailure(e)
case ref => ref.uncaughtException(t, e)
}
}
}
|
monixio/monix
|
monix-execution/jvm/src/main/scala/monix/execution/internal/Platform.scala
|
Scala
|
apache-2.0
| 6,360 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.rdd.variant
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.util.ADAMFunSuite
class GenotypeRDDSuite extends ADAMFunSuite {
sparkTest("use broadcast join to pull down genotypes mapped to targets") {
val genotypesPath = testFile("small.vcf")
val targetsPath = testFile("small.1.bed")
val genotypes = sc.loadGenotypes(genotypesPath)
val targets = sc.loadFeatures(targetsPath)
val jRdd = genotypes.broadcastRegionJoin(targets)
assert(jRdd.rdd.count === 9L)
}
sparkTest("use right outer broadcast join to pull down genotypes mapped to targets") {
val genotypesPath = testFile("small.vcf")
val targetsPath = testFile("small.1.bed")
val genotypes = sc.loadGenotypes(genotypesPath)
val targets = sc.loadFeatures(targetsPath)
val jRdd = genotypes.rightOuterBroadcastRegionJoin(targets)
val c = jRdd.rdd.collect
assert(c.count(_._1.isEmpty) === 3)
assert(c.count(_._1.isDefined) === 9)
}
sparkTest("use shuffle join to pull down genotypes mapped to targets") {
val genotypesPath = testFile("small.vcf")
val targetsPath = testFile("small.1.bed")
val genotypes = sc.loadGenotypes(genotypesPath)
.transform(_.repartition(1))
val targets = sc.loadFeatures(targetsPath)
.transform(_.repartition(1))
val jRdd = genotypes.shuffleRegionJoin(targets)
val jRdd0 = genotypes.shuffleRegionJoin(targets, optPartitions = Some(4))
// we can't guarantee that we get exactly the number of partitions requested,
// we get close though
assert(jRdd.rdd.partitions.length === 1)
assert(jRdd0.rdd.partitions.length === 5)
assert(jRdd.rdd.count === 9L)
assert(jRdd0.rdd.count === 9L)
}
sparkTest("use right outer shuffle join to pull down genotypes mapped to targets") {
val genotypesPath = testFile("small.vcf")
val targetsPath = testFile("small.1.bed")
val genotypes = sc.loadGenotypes(genotypesPath)
.transform(_.repartition(1))
val targets = sc.loadFeatures(targetsPath)
.transform(_.repartition(1))
val jRdd = genotypes.rightOuterShuffleRegionJoin(targets)
val jRdd0 = genotypes.rightOuterShuffleRegionJoin(targets, optPartitions = Some(4))
// we can't guarantee that we get exactly the number of partitions requested,
// we get close though
assert(jRdd.rdd.partitions.length === 1)
assert(jRdd0.rdd.partitions.length === 5)
val c = jRdd.rdd.collect
val c0 = jRdd0.rdd.collect
assert(c.count(_._1.isEmpty) === 3)
assert(c0.count(_._1.isEmpty) === 3)
assert(c.count(_._1.isDefined) === 9)
assert(c0.count(_._1.isDefined) === 9)
}
sparkTest("use left outer shuffle join to pull down genotypes mapped to targets") {
val genotypesPath = testFile("small.vcf")
val targetsPath = testFile("small.1.bed")
val genotypes = sc.loadGenotypes(genotypesPath)
.transform(_.repartition(1))
val targets = sc.loadFeatures(targetsPath)
.transform(_.repartition(1))
val jRdd = genotypes.leftOuterShuffleRegionJoin(targets)
val jRdd0 = genotypes.leftOuterShuffleRegionJoin(targets, optPartitions = Some(4))
// we can't guarantee that we get exactly the number of partitions requested,
// we get close though
assert(jRdd.rdd.partitions.length === 1)
assert(jRdd0.rdd.partitions.length === 5)
val c = jRdd.rdd.collect
val c0 = jRdd0.rdd.collect
assert(c.count(_._2.isEmpty) === 9)
assert(c0.count(_._2.isEmpty) === 9)
assert(c.count(_._2.isDefined) === 9)
assert(c0.count(_._2.isDefined) === 9)
}
sparkTest("use full outer shuffle join to pull down genotypes mapped to targets") {
val genotypesPath = testFile("small.vcf")
val targetsPath = testFile("small.1.bed")
val genotypes = sc.loadGenotypes(genotypesPath)
.transform(_.repartition(1))
val targets = sc.loadFeatures(targetsPath)
.transform(_.repartition(1))
val jRdd = genotypes.fullOuterShuffleRegionJoin(targets)
val jRdd0 = genotypes.fullOuterShuffleRegionJoin(targets, optPartitions = Some(4))
// we can't guarantee that we get exactly the number of partitions requested,
// we get close though
assert(jRdd.rdd.partitions.length === 1)
assert(jRdd0.rdd.partitions.length === 5)
val c = jRdd.rdd.collect
val c0 = jRdd0.rdd.collect
assert(c.count(t => t._1.isEmpty && t._2.isEmpty) === 0)
assert(c0.count(t => t._1.isEmpty && t._2.isEmpty) === 0)
assert(c.count(t => t._1.isDefined && t._2.isEmpty) === 9)
assert(c0.count(t => t._1.isDefined && t._2.isEmpty) === 9)
assert(c.count(t => t._1.isEmpty && t._2.isDefined) === 3)
assert(c0.count(t => t._1.isEmpty && t._2.isDefined) === 3)
assert(c.count(t => t._1.isDefined && t._2.isDefined) === 9)
assert(c0.count(t => t._1.isDefined && t._2.isDefined) === 9)
}
sparkTest("use shuffle join with group by to pull down genotypes mapped to targets") {
val genotypesPath = testFile("small.vcf")
val targetsPath = testFile("small.1.bed")
val genotypes = sc.loadGenotypes(genotypesPath)
.transform(_.repartition(1))
val targets = sc.loadFeatures(targetsPath)
.transform(_.repartition(1))
val jRdd = genotypes.shuffleRegionJoinAndGroupByLeft(targets)
val jRdd0 = genotypes.shuffleRegionJoinAndGroupByLeft(targets, optPartitions = Some(4))
// we can't guarantee that we get exactly the number of partitions requested,
// we get close though
assert(jRdd.rdd.partitions.length === 1)
assert(jRdd0.rdd.partitions.length === 5)
val c = jRdd.rdd.collect
val c0 = jRdd0.rdd.collect
assert(c.size === 9)
assert(c0.size === 9)
assert(c.forall(_._2.size == 1))
assert(c0.forall(_._2.size == 1))
}
sparkTest("use right outer shuffle join with group by to pull down genotypes mapped to targets") {
val genotypesPath = testFile("small.vcf")
val targetsPath = testFile("small.1.bed")
val genotypes = sc.loadGenotypes(genotypesPath)
.transform(_.repartition(1))
val targets = sc.loadFeatures(targetsPath)
.transform(_.repartition(1))
val jRdd = genotypes.rightOuterShuffleRegionJoinAndGroupByLeft(targets)
val jRdd0 = genotypes.rightOuterShuffleRegionJoinAndGroupByLeft(targets, optPartitions = Some(4))
// we can't guarantee that we get exactly the number of partitions requested,
// we get close though
assert(jRdd.rdd.partitions.length === 1)
assert(jRdd0.rdd.partitions.length === 5)
val c = jRdd0.rdd.collect
val c0 = jRdd0.rdd.collect
assert(c.count(_._1.isDefined) === 18)
assert(c0.count(_._1.isDefined) === 18)
assert(c.filter(_._1.isDefined).count(_._2.size == 1) === 9)
assert(c0.filter(_._1.isDefined).count(_._2.size == 1) === 9)
assert(c.filter(_._1.isDefined).count(_._2.isEmpty) === 9)
assert(c0.filter(_._1.isDefined).count(_._2.isEmpty) === 9)
assert(c.count(_._1.isEmpty) === 3)
assert(c0.count(_._1.isEmpty) === 3)
assert(c.filter(_._1.isEmpty).forall(_._2.size == 1))
assert(c0.filter(_._1.isEmpty).forall(_._2.size == 1))
}
sparkTest("convert back to variant contexts") {
val genotypesPath = testFile("small.vcf")
val genotypes = sc.loadGenotypes(genotypesPath)
val variantContexts = genotypes.toVariantContextRDD
assert(variantContexts.sequences.containsRefName("1"))
assert(variantContexts.samples.nonEmpty)
val vcs = variantContexts.rdd.collect
assert(vcs.size === 6)
val vc = vcs.head
assert(vc.position.referenceName === "1")
assert(vc.variant.variant.contigName === "1")
assert(vc.genotypes.nonEmpty)
}
}
|
massie/adam
|
adam-core/src/test/scala/org/bdgenomics/adam/rdd/variant/GenotypeRDDSuite.scala
|
Scala
|
apache-2.0
| 8,474 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.linker.backend.emitter
import scala.language.implicitConversions
import scala.collection.mutable
import org.scalajs.ir.ScalaJSVersions
import org.scalajs.ir.Position
import org.scalajs.ir.Names._
import org.scalajs.ir.OriginalName.NoOriginalName
import org.scalajs.ir.Trees.{JSUnaryOp, JSBinaryOp}
import org.scalajs.ir.Types._
import org.scalajs.linker.interface.{CheckedBehavior, ESVersion, ModuleKind}
import org.scalajs.linker.interface.unstable.RuntimeClassNameMapperImpl
import org.scalajs.linker.backend.javascript.Trees._
import EmitterNames._
import PolyfillableBuiltin._
private[emitter] object CoreJSLib {
def build(sjsGen: SJSGen, moduleContext: ModuleContext,
globalKnowledge: GlobalKnowledge): WithGlobals[Lib] = {
new CoreJSLibBuilder(sjsGen)(moduleContext, globalKnowledge).build()
}
/** A fully built CoreJSLib
*
* @param preObjectDefinitions The bulk of the CoreJSLib.
* Definitions that do not depend on any other Scala.js emitted code
* (notably Object and RuntimeLong). These must be available to all
* Scala.js emitted code.
*
* @param postObjectDefinitions Definitions coming after `j.l.Object`.
* Definitions that need the `$c_O` class to be defined, but nothing
* else. This notably includes the Array classes and everything that
* depends on them, such as the `$TypeData` class.
*
* @param initialization Things that depend on Scala.js generated classes.
* These must have class definitions (but not static fields) available.
*/
final class Lib private[CoreJSLib] (
val preObjectDefinitions: Tree,
val postObjectDefinitions: Tree,
val initialization: Tree)
private class CoreJSLibBuilder(sjsGen: SJSGen)(
implicit moduleContext: ModuleContext, globalKnowledge: GlobalKnowledge) {
import sjsGen._
import jsGen._
import config._
import nameGen._
import varGen._
import esFeatures._
import semantics._
import TreeDSL._
implicit private val noPosition: Position = Position.NoPosition
private var trackedGlobalRefs = Set.empty[String]
private def globalRef(name: String): VarRef = {
trackGlobalRef(name)
varRef(name)
}
private def trackGlobalRef(name: String): Unit = {
// We never access dangerous global refs from the core JS lib
assert(!GlobalRefUtils.isDangerousGlobalRef(name))
if (trackAllGlobalRefs)
trackedGlobalRefs += name
}
private def extractWithGlobals[A](withGlobals: WithGlobals[A]): A = {
withGlobals.globalVarNames.foreach(trackGlobalRef(_))
withGlobals.value
}
// Unconditional global references
private val ObjectRef = globalRef("Object")
private val ArrayRef = globalRef("Array")
private val StringRef = globalRef("String")
private val MathRef = globalRef("Math")
private val NumberRef = globalRef("Number")
private val TypeErrorRef = globalRef("TypeError")
private def BigIntRef = globalRef("BigInt")
private val SymbolRef = globalRef("Symbol")
// Conditional global references that we often use
private def ReflectRef = globalRef("Reflect")
private val classData = Ident("$classData")
private val orderedPrimRefsWithoutVoid = {
List(BooleanRef, CharRef, ByteRef, ShortRef, IntRef, LongRef,
FloatRef, DoubleRef)
}
private val orderedPrimRefs = VoidRef :: orderedPrimRefsWithoutVoid
private val specializedArrayTypeRefs: List[NonArrayTypeRef] =
ClassRef(ObjectClass) :: orderedPrimRefsWithoutVoid
def build(): WithGlobals[Lib] = {
val lib = new Lib(buildPreObjectDefinitions(),
buildPostObjectDefinitions(), buildInitializations())
WithGlobals(lib, trackedGlobalRefs)
}
private def buildPreObjectDefinitions(): Tree = Block(
defineLinkingInfo(),
defineJSBuiltinsSnapshotsAndPolyfills(),
declareCachedL0(),
definePropertyName(),
defineCharClass(),
defineRuntimeFunctions(),
defineObjectGetClassFunctions(),
defineDispatchFunctions(),
defineArithmeticOps(),
defineES2015LikeHelpers(),
defineModuleHelpers(),
defineIntrinsics(),
defineIsPrimitiveFunctions(),
defineBoxFunctions()
)
private def buildPostObjectDefinitions(): Tree = Block(
defineSpecializedArrayClasses(),
defineTypeDataClass(),
defineSpecializedIsArrayOfFunctions(),
defineSpecializedAsArrayOfFunctions(),
defineSpecializedTypeDatas()
)
private def buildInitializations(): Tree = Block(
assignCachedL0()
)
private def defineLinkingInfo(): Tree = {
// must be in sync with scala.scalajs.runtime.LinkingInfo
def objectFreeze(tree: Tree): Tree =
Apply(genIdentBracketSelect(ObjectRef, "freeze"), tree :: Nil)
val linkingInfo = objectFreeze(ObjectConstr(List(
str("esVersion") -> int(esVersion.edition),
str("assumingES6") -> bool(useECMAScript2015Semantics), // different name for historical reasons
str("productionMode") -> bool(productionMode),
str("linkerVersion") -> str(ScalaJSVersions.current),
str("fileLevelThis") -> This()
)))
extractWithGlobals(globalVarDef("linkingInfo", CoreVar, linkingInfo))
}
private def defineJSBuiltinsSnapshotsAndPolyfills(): Tree = {
def genPolyfillFor(builtin: PolyfillableBuiltin): Tree = builtin match {
case ObjectIsBuiltin =>
val x = varRef("x")
val y = varRef("y")
genArrowFunction(paramList(x, y), Return {
If(x === y, {
// +0.0 must be different from -0.0
(x !== 0) || ((int(1) / x) === (int(1) / y))
}, {
// NaN must be equal to NaN
(x !== x) && (y !== y)
})
})
case ImulBuiltin =>
val a = varRef("a")
val b = varRef("b")
val ah = varRef("ah")
val al = varRef("al")
val bh = varRef("bh")
val bl = varRef("bl")
genArrowFunction(paramList(a, b), Block(
const(ah, a >>> 16),
const(al, a & 0xffff),
const(bh, b >>> 16),
const(bl, b & 0xffff),
Return((al * bl) + (((ah * bl + al * bh) << 16) >>> 0) | 0)
))
case FroundBuiltin =>
val v = varRef("v")
if (!strictFloats) {
genArrowFunction(paramList(v), Return(+v))
} else {
val Float32ArrayRef = globalRef("Float32Array")
/* (function(array) {
* return function(v) {
* array[0] = v;
* return array[0];
* }
* })(new Float32Array(1))
*
* Allocating the Float32Array once and for all, and capturing it
* in an IIFE, is *much* faster than recreating it in every call of
* the polyfill (about an order of magnitude).
*/
val array = varRef("array")
val typedArrayPolyfillInner = genArrowFunction(paramList(v), {
Block(
BracketSelect(array, 0) := v,
Return(BracketSelect(array, 0))
)
})
val typedArrayPolyfill = Apply(
genArrowFunction(paramList(array), Return(typedArrayPolyfillInner)),
New(Float32ArrayRef, 1 :: Nil) :: Nil)
// scalastyle:off line.size.limit
/* Originally inspired by the Typed Array polyfills written by
* Joshua Bell:
* https://github.com/inexorabletash/polyfill/blob/a682f42c1092280bb01907c245979fb07219513d/typedarray.js#L150-L255
* Then simplified quite a lot because
* 1) we do not need to produce the actual bit string that serves
* as storage of the floats, and
* 2) we are only interested in the float32 case.
*
* Eventually, the last bits of the above were replaced by an
* application of Veltkamp's splitting (see below). The inspiration
* for that use case came from core-js' implementation at
* https://github.com/zloirock/core-js/blob/a3f591658e063a6e2c2594ec3c80eff16340a98d/packages/core-js/internals/math-fround.js
* The code does not mention Veltkamp's splitting, but the PR
* discussion that led to it does, although with a question mark,
* and without any explanation of how/why it works:
* https://github.com/paulmillr/es6-shim/pull/140#issuecomment-91787165
* We tracked down the descriptions and proofs relative to
* Veltkamp's splitting and re-derived an implementation from there.
*
* The direct tests for this polyfill are the tests for `toFloat`
* in org.scalajs.testsuite.compiler.DoubleTest.
*/
// scalastyle:on line.size.limit
val sign = varRef("sign")
val av = varRef("av")
val p = varRef("p")
val Inf = double(Double.PositiveInfinity)
val overflowThreshold = double(3.4028235677973366e38)
val normalThreshold = double(1.1754943508222875e-38)
val noTypedArrayPolyfill = genArrowFunction(paramList(v), Block(
v := +v, // turns `null` into +0, making sure not to deoptimize what follows
const(sign, If(v < 0, -1, 1)), // 1 for NaN, +0 and -0
const(av, sign * v), // abs(v), or -0 if v is -0
If(av >= overflowThreshold, { // also handles the case av === Infinity
Return(sign * Inf)
}, If(av >= normalThreshold, Block(
/* Here, we know that both the input and output are expressed
* in a Double normal form, so standard floating point
* algorithms from papers can be used.
*
* We use Veltkamp's splitting, as described and studied in
* Sylvie Boldo.
* Pitfalls of a Full Floating-Point Proof: Example on the
* Formal Proof of the Veltkamp/Dekker Algorithms
* https://dx.doi.org/10.1007/11814771_6
* Section 3, with β = 2, t = 53, s = 53 - 24 = 29, x = av.
* 53 is the number of effective mantissa bits in a Double;
* 24 in a Float.
*
* ◦ is the round-to-nearest operation with a tie-breaking
* rule (in our case, break-to-even).
*
* Let C = βˢ + 1 = 536870913
* p = ◦(x × C)
* q = ◦(x − p)
* x₁ = ◦(p + q)
*
* Boldo proves that x₁ is the (t-s)-bit float closest to x,
* using the same tie-breaking rule as ◦. Since (t-s) = 24,
* this is the closest float32 (with 24 mantissa bits), and
* therefore the correct result of `fround`.
*
* Boldo also proves that if the computation of x × C does not
* cause overflow, then none of the following operations will
* cause overflow. We know that x (av) is less than the
* overflowThreshold, and overflowThreshold × C does not
* overflow, so that computation can never cause an overflow.
*
* If the reader does not have access to Boldo's paper, they
* may refer instead to
* Claude-Pierre Jeannerod, Jean-Michel Muller, Paul Zimmermann.
* On various ways to split a floating-point number.
* ARITH 2018 - 25th IEEE Symposium on Computer Arithmetic,
* Jun 2018, Amherst (MA), United States.
* pp.53-60, 10.1109/ARITH.2018.8464793. hal-01774587v2
* available at
* https://hal.inria.fr/hal-01774587v2/document
* Section III, although that paper defers some theorems and
* proofs to Boldo's.
*/
const(p, av * 536870913),
Return(sign * (p + (av - p)))
), {
/* Here, the result is represented as a subnormal form in a
* float32 representation.
*
* We round `av` to the nearest multiple of the smallest
* positive Float value (i.e., `Float.MinPositiveValue`),
* breaking ties to an even multiple.
*
* We do this by leveraging the inherent loss of precision near
* the minimum positive *double* value: conceptually, we divide
* the value by
* Float.MinPositiveValue / Double.MinPositiveValue
* which will drop the excess precision, applying exactly the
* rounding strategy that we want. Then we multiply the value
* back by the same constant.
*
* However, `Float.MinPositiveValue / Double.MinPositiveValue`
* is not representable as a finite Double. Therefore, we
* instead use the *inverse* constant
* Double.MinPositiveValue / Float.MinPositiveValue
* and we first multiply by that constant, then divide by it.
*
* ---
*
* As an additional "hack", the input values NaN, +0 and -0
* also fall in this code path. For them, this computation
* happens to be an identity, and is therefore correct as well.
*/
val roundingFactor = double(Double.MinPositiveValue / Float.MinPositiveValue.toDouble)
Return(sign * ((av * roundingFactor) / roundingFactor))
}))
))
If(typeof(Float32ArrayRef) !== str("undefined"),
typedArrayPolyfill, noTypedArrayPolyfill)
}
case Clz32Builtin =>
val i = varRef("i")
val r = varRef("r")
genArrowFunction(paramList(i), Block(
// See Hacker's Delight, Section 5-3
If(i === 0, Return(32), Skip()),
let(r, 1),
If((i & 0xffff0000) === 0, Block(i := i << 16, r := r + 16), Skip()),
If((i & 0xff000000) === 0, Block(i := i << 8, r := r + 8), Skip()),
If((i & 0xf0000000) === 0, Block(i := i << 4, r := r + 4), Skip()),
If((i & 0xc0000000) === 0, Block(i := i << 2, r := r + 2), Skip()),
Return(r + (i >> 31))
))
case PrivateSymbolBuiltin =>
/* function privateJSFieldSymbol(description) {
* function rand32() {
* const s = ((Math.random() * 4294967296.0) | 0).toString(16);
* return "00000000".substring(s.length) + s;
* }
* return description + rand32() + rand32() + rand32() + rand32();
* }
*
* In production mode, we remove the `description` parameter.
*/
val description = varRef("description")
val rand32 = varRef("rand32")
val s = varRef("s")
val theParamList =
if (semantics.productionMode) Nil
else paramList(description)
genArrowFunction(theParamList, Block(
FunctionDef(rand32.ident, Nil, None, Block(
genLet(s.ident, mutable = false, {
val randomDouble =
Apply(genIdentBracketSelect(MathRef, "random"), Nil)
val randomInt =
(randomDouble * double(4294967296.0)) | 0
Apply(genIdentBracketSelect(randomInt, "toString"), 16 :: Nil)
}),
{
val padding = Apply(
genIdentBracketSelect(str("00000000"), "substring"),
genIdentBracketSelect(s, "length") :: Nil)
Return(padding + s)
}
)),
{
val callRand32 = Apply(rand32, Nil)
val rand128 = callRand32 + callRand32 + callRand32 + callRand32
val result =
if (semantics.productionMode) rand128
else description + rand128
Return(result)
}
))
case GetOwnPropertyDescriptorsBuiltin =>
/* getOwnPropertyDescriptors = (() => {
* // Fetch or polyfill Reflect.ownKeys
* var ownKeysFun;
* if (typeof Reflect != "undefined" && Reflect.ownKeys) {
* ownKeysFun = Reflect.ownKeys;
* } else {
* /* Fetch or polyfill Object.getOwnPropertySymbols.
* * We assume that if that function does not exist, then
* * symbols do not exist at all. Therefore, the result is
* * always an empty array.
* */
* var getOwnPropertySymbols = Object.getOwnPropertySymbols || (o => []);
*
* // Polyfill for Reflect.ownKeys
* ownKeysFun = o => Object.getOwnPropertyNames(o).concat(getOwnPropertySymbols(o));
* }
*
* // Polyfill for Object.getOwnPropertyDescriptors
* return (o => {
* var ownKeys = ownKeysFun(o);
* var descriptors = {};
* var len = ownKeys.length | 0;
* var i = 0;
* while (i !== len) {
* var key = ownKeys[i];
* /* Almost equivalent to
* * descriptors[key] = Object.getOwnPropertyDescriptor(descriptors, key);
* * except that `defineProperty` will bypass any existing setter for
* * the property `key` on `descriptors` or in its prototype chain.
* */
* Object.defineProperty(descriptors, key, {
* configurable: true,
* enumerable: true,
* writable: true,
* value: Object.getOwnPropertyDescriptor(o, key)
* });
* i = (i + 1) | 0;
* }
* return descriptors;
* });
* })();
*/
val o = varRef("o")
val ownKeysFun = varRef("ownKeysFun")
val getOwnPropertySymbols = varRef("getOwnPropertySymbols")
val ownKeys = varRef("ownKeys")
val descriptors = varRef("descriptors")
val len = varRef("len")
val i = varRef("i")
val key = varRef("key")
val funGenerator = genArrowFunction(Nil, Block(
VarDef(ownKeysFun.ident, None),
If((typeof(ReflectRef) !== str("undefined")) && genIdentBracketSelect(ReflectRef, "ownKeys"), {
ownKeysFun := genIdentBracketSelect(ReflectRef, "ownKeys")
}, Block(
const(getOwnPropertySymbols,
genIdentBracketSelect(ObjectRef, "getOwnPropertySymbols") ||
genArrowFunction(paramList(o), Return(ArrayConstr(Nil)))),
ownKeysFun := genArrowFunction(paramList(o), Return {
Apply(
genIdentBracketSelect(
Apply(genIdentBracketSelect(ObjectRef, "getOwnPropertyNames"), o :: Nil),
"concat"),
Apply(getOwnPropertySymbols, o :: Nil) :: Nil)
})
)),
Return(genArrowFunction(paramList(o), Block(
const(ownKeys, Apply(ownKeysFun, o :: Nil)),
const(descriptors, ObjectConstr(Nil)),
const(len, ownKeys.length | 0),
let(i, 0),
While(i !== len, Block(
const(key, BracketSelect(ownKeys, i)),
Apply(genIdentBracketSelect(ObjectRef, "defineProperty"), List(
descriptors,
key,
ObjectConstr(List(
str("configurable") -> bool(true),
str("enumerable") -> bool(true),
str("writable") -> bool(true),
str("value") -> {
Apply(
genIdentBracketSelect(ObjectRef, "getOwnPropertyDescriptor"),
o :: key :: Nil)
}
))
)),
i := (i + 1) | 0
)),
Return(descriptors)
)))
))
Apply(funGenerator, Nil)
}
val polyfillDefs = for {
builtin <- PolyfillableBuiltin.All
if esVersion < builtin.availableInESVersion
} yield {
val polyfill = genPolyfillFor(builtin)
val rhs = builtin match {
case builtin: GlobalVarBuiltin =>
// (typeof GlobalVar !== "undefined") ? GlobalVar : polyfill
val globalVarRef = globalRef(builtin.globalVar)
If(UnaryOp(JSUnaryOp.typeof, globalVarRef) !== str("undefined"),
globalVarRef, polyfill)
case builtin: NamespacedBuiltin =>
// NamespaceGlobalVar.builtinName || polyfill
genIdentBracketSelect(globalRef(builtin.namespaceGlobalVar), builtin.builtinName) || polyfill
}
extractWithGlobals(globalVarDef(builtin.builtinName, CoreVar, rhs))
}
Block(polyfillDefs)
}
private def declareCachedL0(): Tree = {
condTree(!allowBigIntsForLongs)(
extractWithGlobals(globalVarDecl("L0", CoreVar))
)
}
private def assignCachedL0(): Tree = {
condTree(!allowBigIntsForLongs)(Block(
globalVar("L0", CoreVar) := genScalaClassNew(
LongImpl.RuntimeLongClass, LongImpl.initFromParts, 0, 0),
genClassDataOf(LongRef) DOT "zero" := globalVar("L0", CoreVar)
))
}
private def definePropertyName(): Tree = {
/* Encodes a property name for runtime manipulation.
*
* Usage:
* env.propertyName({someProp:0})
* Returns:
* "someProp"
* Useful when the property is renamed by a global optimizer (like
* Closure) but we must still get hold of a string of that name for
* runtime reflection.
*/
defineFunction1("propertyName") { obj =>
val prop = varRef("prop")
ForIn(genEmptyImmutableLet(prop.ident), obj, Return(prop))
}
}
private def defineCharClass(): Tree = {
val ctor = {
val c = varRef("c")
MethodDef(static = false, Ident("constructor"), paramList(c), None, {
This() DOT "c" := c
})
}
val toStr = {
MethodDef(static = false, Ident("toString"), Nil, None, {
Return(Apply(genIdentBracketSelect(StringRef, "fromCharCode"),
(This() DOT "c") :: Nil))
})
}
if (useClassesForRegularClasses) {
extractWithGlobals(globalClassDef("Char", CoreVar, None, ctor :: toStr :: Nil))
} else {
Block(
defineFunction("Char", ctor.args, ctor.body),
assignES5ClassMembers(globalVar("Char", CoreVar), List(toStr))
)
}
}
private def defineRuntimeFunctions(): Tree = Block(
condTree(asInstanceOfs != CheckedBehavior.Unchecked)(Block(
defineFunction2("throwClassCastException") { (instance, classFullName) =>
Throw(maybeWrapInUBE(asInstanceOfs, {
genScalaClassNew(ClassCastExceptionClass, StringArgConstructorName,
instance + str(" is not an instance of ") + classFullName)
}))
},
defineFunction3("throwArrayCastException") { (instance, classArrayEncodedName, depth) =>
Block(
While(depth.prefix_--, {
classArrayEncodedName := (str("[") + classArrayEncodedName)
}),
genCallHelper("throwClassCastException", instance, classArrayEncodedName)
)
}
)),
condTree(arrayIndexOutOfBounds != CheckedBehavior.Unchecked)(
defineFunction1("throwArrayIndexOutOfBoundsException") { i =>
Throw(maybeWrapInUBE(arrayIndexOutOfBounds, {
genScalaClassNew(ArrayIndexOutOfBoundsExceptionClass,
StringArgConstructorName,
If(i === Null(), Null(), str("") + i))
}))
}
),
condTree(moduleInit == CheckedBehavior.Fatal)(
defineFunction1("throwModuleInitError") { name =>
Throw(genScalaClassNew(UndefinedBehaviorErrorClass,
StringArgConstructorName, str("Initializer of ") + name +
str(" called before completion of its super constructor")))
}
),
defineFunction1("noIsInstance") { instance =>
Throw(New(TypeErrorRef,
str("Cannot call isInstance() on a Class representing a JS trait/object") :: Nil))
},
defineFunction2("newArrayObject") { (arrayClassData, lengths) =>
Return(genCallHelper("newArrayObjectInternal", arrayClassData, lengths, int(0)))
},
defineFunction3("newArrayObjectInternal") { (arrayClassData, lengths, lengthIndex) =>
val result = varRef("result")
val subArrayClassData = varRef("subArrayClassData")
val subLengthIndex = varRef("subLengthIndex")
val underlying = varRef("underlying")
val i = varRef("i")
Block(
const(result, New(arrayClassData DOT "constr",
BracketSelect(lengths, lengthIndex) :: Nil)),
If(lengthIndex < (lengths.length - 1), Block(
const(subArrayClassData, arrayClassData DOT "componentData"),
const(subLengthIndex, lengthIndex + 1),
const(underlying, result.u),
For(let(i, 0), i < underlying.length, i.++, {
BracketSelect(underlying, i) :=
genCallHelper("newArrayObjectInternal", subArrayClassData, lengths, subLengthIndex)
})
)),
Return(result)
)
},
defineFunction1("objectClone") { instance =>
// return Object.create(Object.getPrototypeOf(instance), $getOwnPropertyDescriptors(instance));
val callGetOwnPropertyDescriptors = genCallPolyfillableBuiltin(
GetOwnPropertyDescriptorsBuiltin, instance)
Return(Apply(genIdentBracketSelect(ObjectRef, "create"), List(
Apply(genIdentBracketSelect(ObjectRef, "getPrototypeOf"), instance :: Nil),
callGetOwnPropertyDescriptors)))
},
defineFunction1("objectOrArrayClone") { instance =>
// return instance.$classData.isArrayClass ? instance.clone__O() : $objectClone(instance);
Return(If(genIdentBracketSelect(instance DOT classData, "isArrayClass"),
Apply(instance DOT genName(cloneMethodName), Nil),
genCallHelper("objectClone", instance)))
}
)
private def defineObjectGetClassFunctions(): Tree = {
// objectGetClass and objectClassName
def defineObjectGetClassBasedFun(name: String,
constantClassResult: ClassName => Tree,
scalaObjectResult: VarRef => Tree, jsObjectResult: Tree): Tree = {
defineFunction1(name) { instance =>
Switch(typeof(instance), List(
str("string") -> {
Return(constantClassResult(BoxedStringClass))
},
str("number") -> {
Block(
If(genCallHelper("isInt", instance), {
If((instance << 24 >> 24) === instance, {
Return(constantClassResult(BoxedByteClass))
}, {
If((instance << 16 >> 16) === instance, {
Return(constantClassResult(BoxedShortClass))
}, {
Return(constantClassResult(BoxedIntegerClass))
})
})
}, {
if (strictFloats) {
If(genCallHelper("isFloat", instance), {
Return(constantClassResult(BoxedFloatClass))
}, {
Return(constantClassResult(BoxedDoubleClass))
})
} else {
Return(constantClassResult(BoxedFloatClass))
}
})
)
},
str("boolean") -> {
Return(constantClassResult(BoxedBooleanClass))
},
str("undefined") -> {
Return(constantClassResult(BoxedUnitClass))
}
), {
If(instance === Null(), {
Return(Apply(instance DOT genName(getClassMethodName), Nil))
}, {
If(genIsInstanceOfHijackedClass(instance, BoxedLongClass), {
Return(constantClassResult(BoxedLongClass))
}, {
If(genIsInstanceOfHijackedClass(instance, BoxedCharacterClass), {
Return(constantClassResult(BoxedCharacterClass))
}, {
If(genIsScalaJSObject(instance), {
Return(scalaObjectResult(instance))
}, {
Return(jsObjectResult)
})
})
})
})
})
}
}
Block(
/* We use isClassClassInstantiated as an over-approximation of whether
* the program contains any `GetClass` node. If `j.l.Class` is not
* instantiated, then we know that there is no `GetClass` node, and it is
* safe to omit the definition of `objectGetClass`. However, it is
* possible that we generate `objectGetClass` even if it is not
* necessary, in the case that `j.l.Class` is otherwise instantiated
* (i.e., through a `ClassOf` node).
*/
condTree(globalKnowledge.isClassClassInstantiated)(
defineObjectGetClassBasedFun("objectGetClass",
className => genClassOf(className),
instance => Apply(instance DOT classData DOT "getClassOf", Nil),
Null()
)
),
defineObjectGetClassBasedFun("objectClassName",
{ className =>
StringLiteral(RuntimeClassNameMapperImpl.map(
semantics.runtimeClassNameMapper, className.nameString))
},
instance => genIdentBracketSelect(instance DOT classData, "name"),
Apply(Null() DOT genName(getNameMethodName), Nil)
)
)
}
private def defineDispatchFunctions(): Tree = {
val instance = varRef("instance")
def defineDispatcher(methodName: MethodName, args: List[VarRef],
body: Tree): Tree = {
defineFunction("dp_" + genName(methodName),
paramList((instance :: args): _*), body)
}
/* A standard dispatcher performs a type test on the instance and then
* calls the relevant implementation which is either of:
*
* - A normal method call if the instance is a normal scala class.
* - A method in the relevant hijacked class.
* - The implementation in java.lang.Object (if this is a JS object).
*/
def defineStandardDispatcher(methodName: MethodName,
implementingClasses: Set[ClassName]): Tree = {
val args =
methodName.paramTypeRefs.indices.map(i => varRef("x" + i)).toList
val targetHijackedClasses =
subsetOfHijackedClassesOrderedForTypeTests(implementingClasses)
val implementedInObject = implementingClasses.contains(ObjectClass)
def hijackedClassNameToTypeof(className: ClassName): Option[String] = className match {
case BoxedStringClass => Some("string")
case BoxedDoubleClass => Some("number")
case BoxedBooleanClass => Some("boolean")
case BoxedUnitClass => Some("undefined")
case _ => None
}
def genHijackedMethodApply(className: ClassName): Tree =
Apply(globalVar("f", (className, methodName)), instance :: args)
def genBodyNoSwitch(hijackedClasses: List[ClassName]): Tree = {
val normalCall = Return(Apply(instance DOT genName(methodName), args))
def hijackedDispatch(default: Tree) = {
hijackedClasses.foldRight(default) { (className, next) =>
If(genIsInstanceOfHijackedClass(instance, className),
Return(genHijackedMethodApply(className)),
next)
}
}
if (implementedInObject) {
val staticObjectCall: Tree = {
val fun = globalVar("c", ObjectClass).prototype DOT genName(methodName)
Return(Apply(fun DOT "call", instance :: args))
}
If(genIsScalaJSObjectOrNull(instance),
normalCall,
hijackedDispatch(staticObjectCall))
} else {
hijackedDispatch(normalCall)
}
}
defineDispatcher(methodName, args, {
val (classesWithTypeof, otherClasses) =
targetHijackedClasses.span(hijackedClassNameToTypeof(_).isDefined)
if (classesWithTypeof.lengthCompare(1) > 0) {
// First switch on the typeof
Switch(typeof(instance), for (className <- classesWithTypeof) yield {
str(hijackedClassNameToTypeof(className).get) -> {
Return(genHijackedMethodApply(className))
}
}, {
genBodyNoSwitch(otherClasses)
})
} else {
genBodyNoSwitch(targetHijackedClasses)
}
})
}
val methodsInRepresentativeClasses =
globalKnowledge.methodsInRepresentativeClasses()
val dispatchers = for {
(methodName, implementingClasses) <- methodsInRepresentativeClasses
} yield {
if (methodName == toStringMethodName) {
// toString()java.lang.String is special as per IR spec.
defineDispatcher(toStringMethodName, Nil, {
Return(If(instance === Undefined(),
str("undefined"),
Apply(instance DOT "toString", Nil)))
})
} else {
defineStandardDispatcher(methodName, implementingClasses)
}
}
Block(dispatchers)
}
private def defineArithmeticOps(): Tree = {
val throwDivByZero = {
Throw(genScalaClassNew(ArithmeticExceptionClass,
StringArgConstructorName, str("/ by zero")))
}
def wrapBigInt64(tree: Tree): Tree =
Apply(genIdentBracketSelect(BigIntRef, "asIntN"), 64 :: tree :: Nil)
Block(
defineFunction2("intDiv") { (x, y) =>
If(y === 0, throwDivByZero, {
Return((x / y) | 0)
})
},
defineFunction2("intMod") { (x, y) =>
If(y === 0, throwDivByZero, {
Return((x % y) | 0)
})
},
defineFunction1("doubleToInt") { x =>
Return(If(x > 2147483647, 2147483647, If(x < -2147483648, -2147483648, x | 0)))
},
condTree(allowBigIntsForLongs)(Block(
defineFunction2("longDiv") { (x, y) =>
If(y === bigInt(0), throwDivByZero, {
Return(wrapBigInt64(x / y))
})
},
defineFunction2("longMod") { (x, y) =>
If(y === bigInt(0), throwDivByZero, {
Return(wrapBigInt64(x % y))
})
},
defineFunction1("doubleToLong")(x => Return {
If(x < double(-9223372036854775808.0), { // -2^63
bigInt(-9223372036854775808L)
}, {
If (x >= double(9223372036854775808.0), { // 2^63
bigInt(9223372036854775807L)
}, {
If (x !== x, { // NaN
bigInt(0L)
}, {
Apply(BigIntRef,
Apply(genIdentBracketSelect(MathRef, "trunc"), x :: Nil) :: Nil)
})
})
})
}),
defineFunction1("longToFloat") { x =>
val abs = varRef("abs")
val y = varRef("y")
val absR = varRef("absR")
// See RuntimeLong.toFloat for the strategy
Block(
const(abs, If(x < bigInt(0L), -x, x)),
const(y, If(abs <= bigInt(1L << 53) || (abs & bigInt(0xffffL)) === bigInt(0L), {
abs
}, {
(abs & bigInt(~0xffffL)) | bigInt(0x8000L)
})),
const(absR, Apply(NumberRef, y :: Nil)),
Return(genCallPolyfillableBuiltin(FroundBuiltin, If(x < bigInt(0L), -absR, absR)))
)
}
))
)
}
private def defineES2015LikeHelpers(): Tree = Block(
condTree(esVersion < ESVersion.ES2015)(
defineFunction2("newJSObjectWithVarargs") { (ctor, args) =>
val instance = varRef("instance")
val result = varRef("result")
// This basically emulates the ECMAScript specification for 'new'.
Block(
const(instance, Apply(genIdentBracketSelect(ObjectRef, "create"), ctor.prototype :: Nil)),
const(result, Apply(genIdentBracketSelect(ctor, "apply"), instance :: args :: Nil)),
Switch(typeof(result),
List("string", "number", "boolean", "undefined").map(str(_) -> Skip()) :+
str("symbol") -> Return(instance),
Return(If(result === Null(), instance, result)))
)
}
),
defineFunction2("resolveSuperRef") { (superClass, propName) =>
val getPrototypeOf = varRef("getPrototypeOf")
val getOwnPropertyDescriptor = varRef("getOwnPropertyDescriptor")
val superProto = varRef("superProto")
val desc = varRef("desc")
Block(
const(getPrototypeOf, genIdentBracketSelect(ObjectRef, "getPrototyeOf")),
const(getOwnPropertyDescriptor, genIdentBracketSelect(ObjectRef, "getOwnPropertyDescriptor")),
let(superProto, superClass.prototype),
While(superProto !== Null(), Block(
const(desc, Apply(getOwnPropertyDescriptor, superProto :: propName :: Nil)),
If(desc !== Undefined(), Return(desc)),
superProto := Apply(getPrototypeOf, superProto :: Nil)
))
)
},
defineFunction3("superGet") { (superClass, self, propName) =>
val desc = varRef("desc")
val getter = varRef("getter")
Block(
const(desc, genCallHelper("resolveSuperRef", superClass, propName)),
If(desc !== Undefined(), Block(
const(getter, genIdentBracketSelect(desc, "get")),
Return(If(getter !== Undefined(),
Apply(genIdentBracketSelect(getter, "call"), self :: Nil),
genIdentBracketSelect(getter, "value")))
))
)
},
defineFunction4("superSet") { (superClass, self, propName, value) =>
val desc = varRef("desc")
val setter = varRef("setter")
Block(
const(desc, genCallHelper("resolveSuperRef", superClass, propName)),
If(desc !== Undefined(), Block(
const(setter, genIdentBracketSelect(desc, "set")),
If(setter !== Undefined(), Block(
Apply(genIdentBracketSelect(setter, "call"), self :: value :: Nil),
Return(Undefined())
))
)),
Throw(New(TypeErrorRef,
List(str("super has no setter '") + propName + str("'."))))
)
}
)
private def defineModuleHelpers(): Tree = {
condTree(moduleKind == ModuleKind.CommonJSModule)(
defineFunction1("moduleDefault") { m =>
Return(If(
m && (typeof(m) === str("object")) && (str("default") in m),
BracketSelect(m, str("default")),
m))
}
)
}
private def defineIntrinsics(): Tree = Block(
condTree(arrayIndexOutOfBounds != CheckedBehavior.Unchecked)(
defineFunction5("arraycopyCheckBounds") { (srcLen, srcPos, destLen, destPos, length) =>
If((srcPos < 0) || (destPos < 0) || (length < 0) ||
(srcPos > ((srcLen - length) | 0)) ||
(destPos > ((destLen - length) | 0)), {
genCallHelper("throwArrayIndexOutOfBoundsException", Null())
})
}
),
defineFunction5("arraycopyGeneric") { (srcArray, srcPos, destArray, destPos, length) =>
val i = varRef("i")
Block(
if (arrayIndexOutOfBounds != CheckedBehavior.Unchecked) {
genCallHelper("arraycopyCheckBounds", srcArray.length,
srcPos, destArray.length, destPos, length)
} else {
Skip()
},
If((srcArray !== destArray) || (destPos < srcPos) || (((srcPos + length) | 0) < destPos), {
For(let(i, 0), i < length, i := ((i + 1) | 0), {
BracketSelect(destArray, (destPos + i) | 0) := BracketSelect(srcArray, (srcPos + i) | 0)
})
}, {
For(let(i, (length - 1) | 0), i >= 0, i := ((i - 1) | 0), {
BracketSelect(destArray, (destPos + i) | 0) := BracketSelect(srcArray, (srcPos + i) | 0)
})
})
)
},
condTree(esVersion < ESVersion.ES2015)(
defineFunction5("systemArraycopy") { (src, srcPos, dest, destPos, length) =>
genCallHelper("arraycopyGeneric", src.u, srcPos, dest.u, destPos, length)
}
),
// systemIdentityHashCode
locally {
val WeakMapRef = globalRef("WeakMap")
val lastIDHash = fileLevelVar("lastIDHash")
val idHashCodeMap = fileLevelVar("idHashCodeMap")
val obj = varRef("obj")
val biHash = varRef("biHash")
val description = varRef("description")
val hash = varRef("hash")
def functionSkeleton(defaultImpl: Tree): Function = {
def genHijackedMethodApply(className: ClassName, arg: Tree): Tree =
Apply(globalVar("f", (className, hashCodeMethodName)), arg :: Nil)
def genReturnHijackedMethodApply(className: ClassName): Tree =
Return(genHijackedMethodApply(className, obj))
def genReturnBigIntHashCode(): Tree = {
/* Xor together all the chunks of 32 bits. For negative numbers,
* take their bitwise not first (otherwise we would go into an
* infinite loop).
*
* This is compatible with the specified hash code of j.l.Long,
* which is desirable: it means that the hashCode() of bigints does
* not depend on whether we implement Longs as BigInts or not.
* (By spec, x.hashCode() delegates to systemIdentityHashCode(x)
* for bigints unless they fit in a Long and we implement Longs as
* bigints.)
*
* let biHash = 0;
* if (obj < 0n)
* obj = ~obj;
* while (obj !== 0n) {
* biHash ^= Number(BigInt.asIntN(32, obj));
* obj >>= 32n;
* }
* return biHash;
*/
def biLit(x: Int): Tree =
if (esFeatures.allowBigIntsForLongs) bigInt(x)
else Apply(BigIntRef, x :: Nil)
def asInt32(arg: Tree): Tree =
Apply(genIdentBracketSelect(BigIntRef, "asIntN"), 32 :: arg :: Nil)
Block(
let(biHash, 0),
If(obj < biLit(0), obj := ~obj),
While(obj !== biLit(0), Block(
biHash := biHash ^ Apply(NumberRef, asInt32(obj) :: Nil),
obj := (obj >> biLit(32))
)),
Return(biHash)
)
}
def genReturnSymbolHashCode(): Tree = {
/* Hash the `description` field of the symbol, which is either
* `undefined` or a string.
*/
Block(
const(description, genIdentBracketSelect(obj, "description")),
Return(If(description === Undefined(), 0,
genHijackedMethodApply(BoxedStringClass, description)))
)
}
genArrowFunction(paramList(obj), {
Switch(typeof(obj), List(
str("string") -> genReturnHijackedMethodApply(BoxedStringClass),
str("number") -> genReturnHijackedMethodApply(BoxedDoubleClass),
str("bigint") -> genReturnBigIntHashCode(),
str("boolean") -> Return(If(obj, 1231, 1237)),
str("undefined") -> Return(0),
str("symbol") -> genReturnSymbolHashCode()
), defaultImpl)
})
}
def weakMapBasedFunction: Function = {
functionSkeleton {
If(obj === Null(), {
Return(0)
}, {
Block(
let(hash, Apply(genIdentBracketSelect(idHashCodeMap, "get"), obj :: Nil)),
If(hash === Undefined(), {
Block(
hash := ((lastIDHash + 1) | 0),
lastIDHash := hash,
Apply(genIdentBracketSelect(idHashCodeMap, "set"), obj :: hash :: Nil)
)
}, {
Skip()
}),
Return(hash)
)
})
}
}
def fieldBasedFunction: Function = {
functionSkeleton {
If(genIsScalaJSObject(obj), {
Block(
let(hash, genIdentBracketSelect(obj, "$idHashCode$0")),
If(hash !== Undefined(), {
Return(hash)
}, {
If(!Apply(genIdentBracketSelect(ObjectRef, "isSealed"), obj :: Nil), {
Block(
hash := ((lastIDHash + 1) | 0),
lastIDHash := hash,
genIdentBracketSelect(obj, "$idHashCode$0") := hash,
Return(hash)
)
}, {
Return(42)
})
})
)
}, {
If(obj === Null(), 0, 42)
})
}
}
Block(
let(lastIDHash, 0),
const(idHashCodeMap,
if (esVersion >= ESVersion.ES2015) New(WeakMapRef, Nil)
else If(typeof(WeakMapRef) !== str("undefined"), New(WeakMapRef, Nil), Null())),
if (esVersion >= ESVersion.ES2015) {
val f = weakMapBasedFunction
defineFunction("systemIdentityHashCode", f.args, f.body)
} else {
extractWithGlobals(globalVarDef("systemIdentityHashCode", CoreVar,
If(idHashCodeMap !== Null(), weakMapBasedFunction, fieldBasedFunction)))
}
)
}
)
private def defineIsPrimitiveFunctions(): Tree = {
def defineIsIntLike(name: String, specificTest: VarRef => Tree): Tree = {
defineFunction1(name) { v =>
Return((typeof(v) === str("number")) && specificTest(v) &&
((int(1) / v) !== (int(1) / double(-0.0))))
}
}
Block(
defineIsIntLike("isByte", v => (v << 24 >> 24) === v),
defineIsIntLike("isShort", v => (v << 16 >> 16) === v),
defineIsIntLike("isInt", v => (v | 0) === v),
condTree(allowBigIntsForLongs)(
defineFunction1("isLong") { v =>
Return((typeof(v) === str("bigint")) &&
(Apply(genIdentBracketSelect(BigIntRef, "asIntN"), int(64) :: v :: Nil) === v))
}
),
condTree(strictFloats)(
defineFunction1("isFloat") { v =>
Return((typeof(v) === str("number")) &&
((v !== v) || (genCallPolyfillableBuiltin(FroundBuiltin, v) === v)))
}
)
)
}
private def defineBoxFunctions(): Tree = Block(
// Boxes for Chars
defineFunction1("bC") { c =>
Return(New(globalVar("Char", CoreVar), c :: Nil))
},
extractWithGlobals(globalVarDef("bC0", CoreVar, genCallHelper("bC", 0))),
if (asInstanceOfs != CheckedBehavior.Unchecked) {
// Unboxes for everything
def defineUnbox(name: String, boxedClassName: ClassName, resultExpr: VarRef => Tree): Tree = {
val fullName = boxedClassName.nameString
defineFunction1(name)(v => Return {
If(genIsInstanceOfHijackedClass(v, boxedClassName) || (v === Null()),
resultExpr(v),
genCallHelper("throwClassCastException", v, str(fullName)))
})
}
Block(
defineUnbox("uV", BoxedUnitClass, _ => Undefined()),
defineUnbox("uZ", BoxedBooleanClass, v => !(!v)),
defineUnbox("uC", BoxedCharacterClass, v => If(v === Null(), 0, v DOT "c")),
defineUnbox("uB", BoxedByteClass, _ | 0),
defineUnbox("uS", BoxedShortClass, _ | 0),
defineUnbox("uI", BoxedIntegerClass, _ | 0),
defineUnbox("uJ", BoxedLongClass, v => If(v === Null(), genLongZero(), v)),
/* Since the type test ensures that v is either null or a float, we can
* use + instead of fround.
*/
defineUnbox("uF", BoxedFloatClass, v => +v),
defineUnbox("uD", BoxedDoubleClass, v => +v),
defineUnbox("uT", BoxedStringClass, v => If(v === Null(), StringLiteral(""), v))
)
} else {
// Unboxes for Chars and Longs
Block(
defineFunction1("uC") { v =>
Return(If(v === Null(), 0, v DOT "c"))
},
defineFunction1("uJ") { v =>
Return(If(v === Null(), genLongZero(), v))
}
)
}
)
/** Define the array classes for primitive types and for `Object`.
*
* Other array classes are created dynamically from their TypeData's
* `initArray` initializer, and extend the array class for `Object`.
*/
private def defineSpecializedArrayClasses(): Tree = Block(
for (componentTypeRef <- specializedArrayTypeRefs) yield {
val ArrayClass = globalVar("ac", componentTypeRef)
val isTypedArray = usesUnderlyingTypedArray(componentTypeRef)
val ctor = {
val arg = varRef("arg")
MethodDef(static = false, Ident("constructor"), paramList(arg), None, {
Block(
if (useClassesForRegularClasses) Apply(Super(), Nil) else Skip(),
genArrayClassConstructorBody(arg, componentTypeRef)
)
})
}
val getAndSet = if (arrayIndexOutOfBounds != CheckedBehavior.Unchecked) {
val i = varRef("i")
val v = varRef("v")
val boundsCheck = {
If((i < 0) || (i >= This().u.length),
genCallHelper("throwArrayIndexOutOfBoundsException", i))
}
List(
MethodDef(static = false, Ident("get"), paramList(i), None, {
Block(
boundsCheck,
Return(BracketSelect(This().u, i))
)
}),
MethodDef(static = false, Ident("set"), paramList(i, v), None, {
Block(
boundsCheck,
BracketSelect(This().u, i) := v
)
})
)
} else {
Nil
}
val copyTo = if (esVersion >= ESVersion.ES2015) {
val srcPos = varRef("srcPos")
val dest = varRef("dest")
val destPos = varRef("destPos")
val length = varRef("length")
val methodDef = MethodDef(static = false, Ident("copyTo"),
paramList(srcPos, dest, destPos, length), None, {
if (isTypedArray) {
Block(
if (semantics.arrayIndexOutOfBounds != CheckedBehavior.Unchecked) {
genCallHelper("arraycopyCheckBounds", This().u.length,
srcPos, dest.u.length, destPos, length)
} else {
Skip()
},
Apply(genIdentBracketSelect(dest.u, "set"),
Apply(genIdentBracketSelect(This().u, "subarray"), srcPos :: ((srcPos + length) | 0) :: Nil) ::
destPos ::
Nil)
)
} else {
genCallHelper("arraycopyGeneric", This().u, srcPos,
dest.u, destPos, length)
}
})
methodDef :: Nil
} else {
Nil
}
val clone = MethodDef(static = false, Ident(genName(cloneMethodName)), Nil, None, {
Return(New(ArrayClass,
Apply(genIdentBracketSelect(This().u, "slice"), Nil) :: Nil))
})
val members = getAndSet ::: copyTo ::: clone :: Nil
if (useClassesForRegularClasses) {
extractWithGlobals(globalClassDef("ac", componentTypeRef,
Some(globalVar("c", ObjectClass)), ctor :: members))
} else {
val clsDef = Block(
extractWithGlobals(globalFunctionDef("ac", componentTypeRef,
ctor.args, ctor.restParam, ctor.body)),
(ArrayClass.prototype := New(globalVar("h", ObjectClass), Nil)),
(ArrayClass.prototype DOT "constructor" := ArrayClass),
assignES5ClassMembers(ArrayClass, members)
)
componentTypeRef match {
case _: ClassRef =>
Block(
clsDef,
extractWithGlobals(globalFunctionDef("ah", ObjectClass, Nil, None, Skip())),
(globalVar("ah", ObjectClass).prototype := ArrayClass.prototype)
)
case _: PrimRef =>
clsDef
}
}
}
)
private def genArrayClassConstructorBody(arg: VarRef,
componentTypeRef: NonArrayTypeRef): Tree = {
val i = varRef("i")
If(typeof(arg) === str("number"), {
getArrayUnderlyingTypedArrayClassRef(componentTypeRef) match {
case Some(typeArrayClassWithGlobalRefs) =>
This().u := New(extractWithGlobals(typeArrayClassWithGlobalRefs), arg :: Nil)
case None =>
Block(
This().u := New(ArrayRef, arg :: Nil),
For(let(i, 0), i < arg, i.++, {
BracketSelect(This().u, i) := genZeroOf(componentTypeRef)
})
)
}
}, {
// arg is a native array that we wrap
This().u := arg
})
}
private def defineTypeDataClass(): Tree = {
def privateFieldSet(fieldName: String, value: Tree): Tree =
This() DOT fieldName := value
def publicFieldSet(fieldName: String, value: Tree): Tree =
genIdentBracketSelect(This(), fieldName) := value
val ctor = {
MethodDef(static = false, Ident("constructor"), Nil, None, {
Block(
privateFieldSet("constr", Undefined()),
if (globalKnowledge.isParentDataAccessed)
privateFieldSet("parentData", Undefined())
else
Skip(),
privateFieldSet("ancestors", Null()),
privateFieldSet("componentData", Null()),
privateFieldSet("arrayBase", Null()),
privateFieldSet("arrayDepth", int(0)),
privateFieldSet("zero", Null()),
privateFieldSet("arrayEncodedName", str("")),
privateFieldSet("_classOf", Undefined()),
privateFieldSet("_arrayOf", Undefined()),
/* A lambda for the logic of the public `isAssignableFrom`,
* without its fast-path. See the comment on the definition of
* `isAssignableFrom` for the rationale of this decomposition.
*/
privateFieldSet("isAssignableFromFun", Undefined()),
privateFieldSet("wrapArray", Undefined()),
publicFieldSet("name", str("")),
publicFieldSet("isPrimitive", bool(false)),
publicFieldSet("isInterface", bool(false)),
publicFieldSet("isArrayClass", bool(false)),
publicFieldSet("isJSClass", bool(false)),
publicFieldSet("isInstance", Undefined())
)
})
}
val initPrim = {
val zero = varRef("zero")
val arrayEncodedName = varRef("arrayEncodedName")
val displayName = varRef("displayName")
val arrayClass = varRef("arrayClass")
val typedArrayClass = varRef("typedArrayClass")
val self = varRef("self")
val that = varRef("that")
val depth = varRef("depth")
val obj = varRef("obj")
MethodDef(static = false, Ident("initPrim"),
paramList(zero, arrayEncodedName, displayName, arrayClass, typedArrayClass), None, {
Block(
privateFieldSet("ancestors", ObjectConstr(Nil)),
privateFieldSet("zero", zero),
privateFieldSet("arrayEncodedName", arrayEncodedName),
const(self, This()), // capture `this` for use in arrow fun
privateFieldSet("isAssignableFromFun",
genArrowFunction(paramList(that), Return(that === self))),
publicFieldSet("name", displayName),
publicFieldSet("isPrimitive", bool(true)),
publicFieldSet("isInstance",
genArrowFunction(paramList(obj), Return(bool(false)))),
If(arrayClass !== Undefined(), { // it is undefined for void
privateFieldSet("_arrayOf",
Apply(New(globalVar("TypeData", CoreVar), Nil) DOT "initSpecializedArray",
List(This(), arrayClass, typedArrayClass)))
}),
Return(This())
)
})
}
val initClass = {
val internalNameObj = varRef("internalNameObj")
val isInterface = varRef("isInterface")
val fullName = varRef("fullName")
val ancestors = varRef("ancestors")
val isJSType = varRef("isJSType")
val parentData = varRef("parentData")
val isInstance = varRef("isInstance")
val internalName = varRef("internalName")
val that = varRef("that")
val depth = varRef("depth")
val obj = varRef("obj")
MethodDef(static = false, Ident("initClass"),
paramList(internalNameObj, isInterface, fullName, ancestors,
isJSType, parentData, isInstance), None, {
Block(
const(internalName, genCallHelper("propertyName", internalNameObj)),
if (globalKnowledge.isParentDataAccessed)
privateFieldSet("parentData", parentData)
else
Skip(),
privateFieldSet("ancestors", ancestors),
privateFieldSet("arrayEncodedName", str("L") + fullName + str(";")),
privateFieldSet("isAssignableFromFun", {
genArrowFunction(paramList(that), {
Return(!(!(BracketSelect(that DOT "ancestors", internalName))))
})
}),
privateFieldSet("isJSType", !(!isJSType)),
publicFieldSet("name", fullName),
publicFieldSet("isInterface", isInterface),
publicFieldSet("isInstance", isInstance || {
genArrowFunction(paramList(obj), {
Return(!(!(obj && (obj DOT classData) &&
BracketSelect(obj DOT classData DOT "ancestors", internalName))))
})
}),
Return(This())
)
})
}
def initArrayCommonBody(arrayClass: VarRef, componentData: VarRef,
arrayBase: VarRef, arrayDepth: Tree): Tree = {
val name = varRef("name")
Block(
arrayClass.prototype DOT classData := This(),
const(name, str("[") + (componentData DOT "arrayEncodedName")),
privateFieldSet("constr", arrayClass),
if (globalKnowledge.isParentDataAccessed)
privateFieldSet("parentData", genClassDataOf(ObjectClass))
else
Skip(),
privateFieldSet("ancestors", ObjectConstr(List(
Ident(genName(ObjectClass)) -> 1,
Ident(genName(CloneableClass)) -> 1,
Ident(genName(SerializableClass)) -> 1
))),
privateFieldSet("componentData", componentData),
privateFieldSet("arrayBase", arrayBase),
privateFieldSet("arrayDepth", arrayDepth),
privateFieldSet("arrayEncodedName", name),
publicFieldSet("name", name),
publicFieldSet("isArrayClass", bool(true))
)
}
val initSpecializedArray = {
val componentData = varRef("componentData")
val arrayClass = varRef("arrayClass")
val typedArrayClass = varRef("typedArrayClass")
val isAssignableFromFun = varRef("isAssignableFromFun")
val self = varRef("self")
val that = varRef("that")
val obj = varRef("obj")
val array = varRef("array")
MethodDef(static = false, Ident("initSpecializedArray"),
paramList(componentData, arrayClass, typedArrayClass, isAssignableFromFun), None, {
Block(
initArrayCommonBody(arrayClass, componentData, componentData, 1),
const(self, This()), // capture `this` for use in arrow fun
privateFieldSet("isAssignableFromFun", isAssignableFromFun || {
genArrowFunction(paramList(that), Return(self === that))
}),
privateFieldSet("wrapArray", {
If(typedArrayClass, {
genArrowFunction(paramList(array), {
Return(New(arrayClass, New(typedArrayClass, array :: Nil) :: Nil))
})
}, {
genArrowFunction(paramList(array), {
Return(New(arrayClass, array :: Nil))
})
})
}),
publicFieldSet("isInstance",
genArrowFunction(paramList(obj), Return(obj instanceof arrayClass))),
Return(This())
)
})
}
val initArray = {
val componentData = varRef("componentData")
val ArrayClass = varRef("ArrayClass")
val arrayBase = varRef("arrayBase")
val arrayDepth = varRef("arrayDepth")
val isAssignableFromFun = varRef("isAssignableFromFun")
val that = varRef("that")
val self = varRef("self")
val obj = varRef("obj")
val array = varRef("array")
MethodDef(static = false, Ident("initArray"),
paramList(componentData), None, {
val ArrayClassDef = {
val ctor = {
val arg = varRef("arg")
val i = varRef("i")
MethodDef(static = false, Ident("constructor"), paramList(arg), None, {
if (useClassesForRegularClasses)
Apply(Super(), arg :: Nil)
else
genArrayClassConstructorBody(arg, ClassRef(ObjectClass))
})
}
val copyTo = if (esVersion >= ESVersion.ES2015) {
val srcPos = varRef("srcPos")
val dest = varRef("dest")
val destPos = varRef("destPos")
val length = varRef("length")
val methodDef = MethodDef(static = false, Ident("copyTo"),
paramList(srcPos, dest, destPos, length), None, {
genCallHelper("arraycopyGeneric", This().u, srcPos,
dest.u, destPos, length)
})
methodDef :: Nil
} else {
Nil
}
val clone = MethodDef(static = false, Ident(genName(cloneMethodName)), Nil, None, {
Return(New(ArrayClass,
Apply(genIdentBracketSelect(This().u, "slice"), Nil) :: Nil))
})
val members = copyTo ::: clone :: Nil
if (useClassesForRegularClasses) {
ClassDef(Some(ArrayClass.ident), Some(globalVar("ac", ObjectClass)),
ctor :: members)
} else {
Block(
FunctionDef(ArrayClass.ident, ctor.args, ctor.restParam, ctor.body),
ArrayClass.prototype := New(globalVar("ah", ObjectClass), Nil),
ArrayClass.prototype DOT "constructor" := ArrayClass,
assignES5ClassMembers(ArrayClass, members)
)
}
}
Block(
ArrayClassDef,
const(arrayBase, (componentData DOT "arrayBase") || componentData),
const(arrayDepth, (componentData DOT "arrayDepth") + 1),
initArrayCommonBody(ArrayClass, componentData, arrayBase, arrayDepth),
const(isAssignableFromFun, {
genArrowFunction(paramList(that), {
val thatDepth = varRef("thatDepth")
Block(
const(thatDepth, that DOT "arrayDepth"),
Return(If(thatDepth === arrayDepth, {
Apply(arrayBase DOT "isAssignableFromFun", (that DOT "arrayBase") :: Nil)
}, {
(thatDepth > arrayDepth) && (arrayBase === genClassDataOf(ObjectClass))
}))
)
})
}),
privateFieldSet("isAssignableFromFun", isAssignableFromFun),
privateFieldSet("wrapArray", genArrowFunction(paramList(array), {
Return(New(ArrayClass, array :: Nil))
})),
const(self, This()), // don't rely on the lambda being called with `this` as receiver
publicFieldSet("isInstance", genArrowFunction(paramList(obj), {
val data = varRef("data")
Block(
const(data, obj && (obj DOT classData)),
Return(!(!data) && {
(data === self) || // fast path
Apply(isAssignableFromFun, data :: Nil)
})
)
})),
Return(This())
)
})
}
val getArrayOf = {
MethodDef(static = false, Ident("getArrayOf"), Nil, None, {
Block(
If(!(This() DOT "_arrayOf"),
This() DOT "_arrayOf" :=
Apply(New(globalVar("TypeData", CoreVar), Nil) DOT "initArray", This() :: Nil),
Skip()),
Return(This() DOT "_arrayOf")
)
})
}
def getClassOf = {
MethodDef(static = false, Ident("getClassOf"), Nil, None, {
Block(
If(!(This() DOT "_classOf"),
This() DOT "_classOf" := genScalaClassNew(ClassClass, ObjectArgConstructorName, This()),
Skip()),
Return(This() DOT "_classOf")
)
})
}
def isAssignableFrom = {
/* This is the public method called by j.l.Class.isAssignableFrom. It
* first performs a fast-path with `this === that`, and otherwise calls
* the internal `isAssignableFromFun` function.
* The reason for this decomposition (as opposed to performing the
* fast-path in each `isAssignableFromFun`) is to keep the fast-path
* monomorphic: on the happy path, the VM performs a monomorphic
* dispatch to this method, which performs the fast-path and returns.
* We only need a polymorphic dispatch in the slow path.
*/
val that = varRef("that")
MethodDef(static = false, StringLiteral("isAssignableFrom"),
paramList(that), None, {
Return(
(This() === that) || // fast path
Apply(This() DOT "isAssignableFromFun", that :: Nil))
})
}
def checkCast = {
val obj = varRef("obj")
MethodDef(static = false, StringLiteral("checkCast"), paramList(obj), None,
if (asInstanceOfs != CheckedBehavior.Unchecked) {
If((obj !== Null()) && !(This() DOT "isJSType") &&
!Apply(genIdentBracketSelect(This(), "isInstance"), obj :: Nil),
genCallHelper("throwClassCastException", obj, genIdentBracketSelect(This(), "name")),
Skip())
} else {
Skip()
}
)
}
def getSuperclass = {
MethodDef(static = false, StringLiteral("getSuperclass"), Nil, None, {
Return(If(This() DOT "parentData",
Apply(This() DOT "parentData" DOT "getClassOf", Nil),
Null()))
})
}
def getComponentType = {
MethodDef(static = false, StringLiteral("getComponentType"), Nil, None, {
Return(If(This() DOT "componentData",
Apply(This() DOT "componentData" DOT "getClassOf", Nil),
Null()))
})
}
def newArrayOfThisClass = {
val lengths = varRef("lengths")
val arrayClassData = varRef("arrayClassData")
val i = varRef("i")
MethodDef(static = false, StringLiteral("newArrayOfThisClass"),
paramList(lengths), None, {
Block(
let(arrayClassData, This()),
For(let(i, 0), i < lengths.length, i.++, {
arrayClassData := Apply(arrayClassData DOT "getArrayOf", Nil)
}),
Return(genCallHelper("newArrayObject", arrayClassData, lengths))
)
})
}
val members = List(
initPrim,
initClass,
initSpecializedArray,
initArray,
getArrayOf
) ::: (
if (globalKnowledge.isClassClassInstantiated) {
List(
getClassOf,
isAssignableFrom,
checkCast,
getSuperclass,
getComponentType,
newArrayOfThisClass
)
} else {
Nil
}
)
if (useClassesForRegularClasses) {
extractWithGlobals(globalClassDef("TypeData", CoreVar, None, ctor :: members))
} else {
Block(
defineFunction("TypeData", ctor.args, ctor.body),
assignES5ClassMembers(globalVar("TypeData", CoreVar), members)
)
}
}
private def defineSpecializedIsArrayOfFunctions(): Tree = {
// isArrayOf_O
val obj = varRef("obj")
val depth = varRef("depth")
val data = varRef("data")
val arrayDepth = varRef("arrayDepth")
val forObj = extractWithGlobals(globalFunctionDef("isArrayOf", ObjectClass, paramList(obj, depth), None, {
Block(
const(data, obj && (obj DOT "$classData")),
If(!data, {
Return(BooleanLiteral(false))
}, {
Block(
const(arrayDepth, data DOT "arrayDepth"),
Return(If(arrayDepth === depth, {
!genIdentBracketSelect(data DOT "arrayBase", "isPrimitive")
}, {
arrayDepth > depth
}))
)
})
)
}))
val forPrims = for (primRef <- orderedPrimRefsWithoutVoid) yield {
val obj = varRef("obj")
val depth = varRef("depth")
extractWithGlobals(globalFunctionDef("isArrayOf", primRef, paramList(obj, depth), None, {
Return(!(!(obj && (obj DOT classData) &&
((obj DOT classData DOT "arrayDepth") === depth) &&
((obj DOT classData DOT "arrayBase") === genClassDataOf(primRef)))))
}))
}
Block(forObj :: forPrims)
}
private def defineSpecializedAsArrayOfFunctions(): Tree = {
condTree(asInstanceOfs != CheckedBehavior.Unchecked)(Block(
for (typeRef <- specializedArrayTypeRefs) yield {
val encodedName = typeRef match {
case typeRef: PrimRef => typeRef.charCode.toString()
case _ => "L" + ObjectClass.nameString + ";"
}
val obj = varRef("obj")
val depth = varRef("depth")
extractWithGlobals(globalFunctionDef("asArrayOf", typeRef, paramList(obj, depth), None, {
If(Apply(globalVar("isArrayOf", typeRef), obj :: depth :: Nil) || (obj === Null()), {
Return(obj)
}, {
genCallHelper("throwArrayCastException", obj, str(encodedName), depth)
})
}))
}
))
}
private def defineSpecializedTypeDatas(): Tree = {
/* d_O must be first to correctly populate the parentData of array
* classes. Unlike all other type datas, we assign the first of d_O
* directly in the generated code, rather than through an `initXyz`
* method. That's because its initialization code does not follow the
* pattern of other type datas, and therefore the appropriate `initXyz`
* would be called only from here anyway.
*/
val obj = locally {
val fullName = RuntimeClassNameMapperImpl.map(
semantics.runtimeClassNameMapper, ObjectClass.nameString)
val that = varRef("that")
val obj = varRef("obj")
val typeDataVar = globalVar("d", ObjectClass)
def privateFieldSet(fieldName: String, value: Tree): Tree =
typeDataVar DOT fieldName := value
def publicFieldSet(fieldName: String, value: Tree): Tree =
genIdentBracketSelect(typeDataVar, fieldName) := value
Block(
extractWithGlobals(
globalVarDef("d", ObjectClass, New(globalVar("TypeData", CoreVar), Nil))),
privateFieldSet("ancestors", ObjectConstr(List((Ident(genName(ObjectClass)) -> 1)))),
privateFieldSet("arrayEncodedName", str("L" + fullName + ";")),
privateFieldSet("isAssignableFromFun", {
genArrowFunction(paramList(that), {
Return(!genIdentBracketSelect(that, "isPrimitive"))
})
}),
publicFieldSet("name", str(fullName)),
publicFieldSet("isInstance",
genArrowFunction(paramList(obj), Return(obj !== Null()))),
privateFieldSet("_arrayOf", {
Apply(New(globalVar("TypeData", CoreVar), Nil) DOT "initSpecializedArray", List(
typeDataVar,
globalVar("ac", ObjectClass),
Undefined(), // typedArray
genArrowFunction(paramList(that), {
val thatDepth = varRef("thatDepth")
Block(
const(thatDepth, that DOT "arrayDepth"),
Return(If(thatDepth === 1, {
!genIdentBracketSelect(that DOT "arrayBase", "isPrimitive")
}, {
(thatDepth > 1)
}))
)
})
))
}),
globalVar("c", ObjectClass).prototype DOT "$classData" := typeDataVar
)
}
val prims = for (primRef <- orderedPrimRefs) yield {
/* Zero value, for use by the intrinsified code of
* `scala.collection.mutable.ArrayBuilder.genericArrayBuilderResult`.
* This code is Scala-specific, and "unboxes" `null` as the zero of
* primitive types. For `void`, it is even more special, as it produces
* a boxed Unit value, which is `undefined` (although `VoidRef`/`NoType`
* doesn't have a zero value per se).
*/
val zero = primRef match {
case VoidRef => Undefined()
case LongRef if !allowBigIntsForLongs => Null() // set later when $L0 is initialized
case _ => genZeroOf(primRef)
}
val typedArrayClass = getArrayUnderlyingTypedArrayClassRef(primRef) match {
case Some(typedArrayClassWithGlobals) =>
extractWithGlobals(typedArrayClassWithGlobals)
case None =>
Undefined()
}
extractWithGlobals(globalVarDef("d", primRef, {
Apply(New(globalVar("TypeData", CoreVar), Nil) DOT "initPrim",
List(zero, str(primRef.charCode.toString()),
str(primRef.displayName),
if (primRef == VoidRef) Undefined()
else genArrayConstrOf(ArrayTypeRef(primRef, 1)),
typedArrayClass))
}))
}
Block(obj :: prims)
}
private def defineFunction(name: String, args: List[ParamDef], body: Tree): Tree =
extractWithGlobals(globalFunctionDef(name, CoreVar, args, None, body))
private val argRefs = List.tabulate(5)(i => varRef("arg" + i))
private def defineFunction1(name: String)(body: VarRef => Tree): Tree = {
val a :: _ = argRefs
defineFunction(name, paramList(a), body(a))
}
private def defineFunction2(name: String)(body: (VarRef, VarRef) => Tree): Tree = {
val a :: b :: _ = argRefs
defineFunction(name, paramList(a, b), body(a, b))
}
private def defineFunction3(name: String)(body: (VarRef, VarRef, VarRef) => Tree): Tree = {
val a :: b :: c :: _ = argRefs
defineFunction(name, paramList(a, b, c), body(a, b, c))
}
private def defineFunction4(name: String)(body: (VarRef, VarRef, VarRef, VarRef) => Tree): Tree = {
val a :: b :: c :: d :: _ = argRefs
defineFunction(name, paramList(a, b, c, d), body(a, b, c, d))
}
private def defineFunction5(name: String)(body: (VarRef, VarRef, VarRef, VarRef, VarRef) => Tree): Tree = {
val a :: b :: c :: d :: e :: _ = argRefs
defineFunction(name, paramList(a, b, c, d, e), body(a, b, c, d, e))
}
private def genArrowFunction(args: List[ParamDef], body: Tree): Function =
jsGen.genArrowFunction(args, None, body)
private def genCallPolyfillableBuiltin(builtin: PolyfillableBuiltin,
args: Tree*): Tree = {
extractWithGlobals(sjsGen.genCallPolyfillableBuiltin(builtin, args: _*))
}
private def maybeWrapInUBE(behavior: CheckedBehavior, exception: Tree): Tree = {
if (behavior == CheckedBehavior.Fatal) {
genScalaClassNew(UndefinedBehaviorErrorClass,
ThrowableArgConsructorName, exception)
} else {
exception
}
}
private def genIsScalaJSObject(obj: VarRef): Tree =
!(!(obj && (obj DOT classData)))
private def genIsScalaJSObjectOrNull(obj: VarRef): Tree =
genIsScalaJSObject(obj) || (obj === Null())
private def condTree(cond: Boolean)(tree: => Tree): Tree =
if (cond) tree
else Skip()
private def varRef(name: String): VarRef = VarRef(Ident(name))
private def const(ref: VarRef, rhs: Tree): LocalDef =
genConst(ref.ident, rhs)
private def let(ref: VarRef, rhs: Tree): LocalDef =
genLet(ref.ident, mutable = true, rhs)
private def paramList(refs: VarRef*): List[ParamDef] =
refs.toList.map(ref => ParamDef(ref.ident))
private def str(s: String): StringLiteral = StringLiteral(s)
private def bool(b: Boolean): BooleanLiteral = BooleanLiteral(b)
/* This one is implicit because there are *many* ints in the trees we
* created, so this helps readability.
*/
private implicit def int(i: Int): IntLiteral = IntLiteral(i)
private def double(d: Double): DoubleLiteral = DoubleLiteral(d)
private def bigInt(i: Long): BigIntLiteral = BigIntLiteral(i)
}
}
|
scala-js/scala-js
|
linker/shared/src/main/scala/org/scalajs/linker/backend/emitter/CoreJSLib.scala
|
Scala
|
apache-2.0
| 80,727 |
package toguru.impl
import java.util.concurrent.Executors
import net.jodah.failsafe.CircuitBreaker
import org.mockito.Mockito._
import org.mockito.scalatest.IdiomaticMockito
import org.scalatest.OptionValues
import org.scalatest.matchers.must.Matchers
import org.scalatest.wordspec.AnyWordSpec
import sttp.client.testing.SttpBackendStub
import sttp.client.{Identity, NothingT, Response}
import sttp.model.{Header, MediaType}
import toguru.api.{Condition, DefaultActivations, Toggle}
import toguru.impl.RemoteActivationsProvider.{PollResponse, TogglePoller}
import scala.concurrent.duration.{FiniteDuration, _}
class RemoteActivationsProviderSpec extends AnyWordSpec with OptionValues with Matchers with IdiomaticMockito {
val toggleOne = Toggle("toggle-one")
val toggleTwo = Toggle("toggle-two")
val executor = Executors.newSingleThreadScheduledExecutor()
def poller(response: String, contentType: String = RemoteActivationsProvider.MimeApiV3): TogglePoller =
_ => PollResponse(200, contentType, response)
def createCircuitBreaker(): CircuitBreaker[Any] =
new CircuitBreaker[Any]().withFailureThreshold(1000).withDelay(java.time.Duration.ofMillis(100))
def createProvider(poller: TogglePoller): RemoteActivationsProvider = {
val provider = new RemoteActivationsProvider(poller, executor, circuitBreakerBuilder = createCircuitBreaker)
provider.close()
return provider
}
def createProvider(
response: String,
contentType: String = RemoteActivationsProvider.MimeApiV3
): RemoteActivationsProvider =
createProvider(poller(response, contentType))
def createProvider(
backend: SttpBackendStub[Identity, Nothing, NothingT]
): RemoteActivationsProvider =
RemoteActivationsProvider(
s"http://localhost:80",
pollInterval = 100.milliseconds,
circuitBreakerBuilder = createCircuitBreaker
)(backend)
"Fetching features from toggle endpoint" should {
def validateResponse(toggles: Seq[ToggleState]): Unit = {
val toggleStateOne = toggles.collectFirst { case t if t.id == toggleOne.id => t }.value
val toggleStateTwo = toggles.collectFirst { case t if t.id == toggleTwo.id => t }.value
toggleStateOne.id mustBe "toggle-one"
toggleStateOne.tags mustBe Map("services" -> "toguru")
toggleStateOne.condition mustBe Condition.Off
toggleStateTwo.id mustBe "toggle-two"
toggleStateTwo.tags mustBe Map("team" -> "Shared Services")
toggleStateTwo.condition mustBe Condition.UuidRange(1 to 20)
}
"send sequenceNo to server" in {
val poller = mock[TogglePoller]
when(poller.apply(Some(10))).thenReturn(PollResponse(200, "", ""))
val provider = createProvider(poller)
provider.fetchToggleStates(Some(10))
verify(poller).apply(Some(10))
}
"succeed if a toggle response is received" in {
val response =
"""
|{
| "sequenceNo": 10,
| "toggles": [
| { "id": "toggle-one", "tags": {"services": "toguru"}, "activations": [] },
| { "id": "toggle-two", "tags": {"team": "Shared Services"}, "activations": [ {"rollout": {"percentage": 20 }, "attributes": {} } ] }
| ]
|}
|""".stripMargin
val provider = createProvider(response)
val toggles = provider.fetchToggleStates(None).value.toggles
validateResponse(toggles)
}
"keeps latest sequence number in activation conditions" in {
val response =
"""
|{
| "sequenceNo": 10,
| "toggles": []
|}
""".stripMargin
val provider = createProvider(response)
provider.update()
val activations = provider.apply()
activations.stateSequenceNo mustBe Some(10)
}
"rejects stale toggle state updates" in {
val response =
"""
|{
| "sequenceNo": 5,
| "toggles": []
|}
""".stripMargin
val provider = createProvider(response)
val maybeToggleStates = provider.fetchToggleStates(Some(10))
maybeToggleStates mustBe None
}
"fails when server returns no sequence number, but client already has one" in {
val response = """[]"""
val provider = createProvider(response)
val maybeToggleStates = provider.fetchToggleStates(Some(10))
maybeToggleStates mustBe None
}
"fail if toggle endpoint returns 500" in {
val poller: TogglePoller = _ => PollResponse(500, "", "")
val provider = createProvider(poller)
provider.fetchToggleStates(None) mustBe None
provider.update()
provider.apply() mustBe DefaultActivations
}
"fail if toggle endpoint returns malformed json" in {
val provider = createProvider("ok")
provider.fetchToggleStates(None) mustBe None
provider.update()
provider.apply() mustBe DefaultActivations
}
"fail if poller throws exception" in {
val poller: TogglePoller = _ => throw new RuntimeException("boom")
val provider = createProvider(poller)
provider.fetchToggleStates(None) mustBe None
}
}
"Creating activation provider" should {
"succeed with a valid url" in {
val provider = RemoteActivationsProvider("http://localhost:9000")
provider.close()
}
}
"Created activation provider" should {
def toguruResponse(body: String): Response[String] =
Response
.ok(body)
.copy(headers =
List(
Header.contentType(
MediaType
.parse(RemoteActivationsProvider.MimeApiV3)
.getOrElse(throw new IllegalArgumentException)
)
)
)
"poll remote url" in {
val stub = SttpBackendStub.synchronous.whenAnyRequest.thenRespond(
toguruResponse(
"""
|{
| "sequenceNo": 10,
| "toggles": [{"id":"toggle-one","tags":{"team":"Toguru Team","services":"toguru"},"activations":[{"rollout":{"percentage":20}, "attributes":{}}]}]
|}
""".stripMargin
)
)
val provider = createProvider(stub)
val rolloutCondition = Condition.UuidRange(1 to 20)
waitFor(100, 100.millis) {
provider.apply() != DefaultActivations
}
provider.close()
val activations = provider.apply()
activations.apply(toggleOne) mustBe rolloutCondition
activations.togglesFor("toguru") mustBe Map(toggleOne.id -> rolloutCondition)
}
"sends accept header when polling remote url" in {
var acceptHeader: Option[String] = None
val stub =
SttpBackendStub.synchronous.whenAnyRequest.thenRespondWrapped { req =>
acceptHeader = req.headers
.find(_.name == "Accept")
.map(_.value)
toguruResponse("""{ "sequenceNo": 10, "toggles": [] }""")
}
val provider = createProvider(stub)
waitFor(100, 100.millis) {
acceptHeader.isDefined
}
provider.close()
acceptHeader mustBe Some(RemoteActivationsProvider.MimeApiV3)
}
"poll remote url with sequenceNo" in {
var maybeSeqNo: Option[String] = None
val stub =
SttpBackendStub.synchronous.whenAnyRequest.thenRespondWrapped { req =>
maybeSeqNo = req.uri.paramsMap.get("seqNo")
toguruResponse(
"""
|{
| "sequenceNo": 10,
| "toggles": [{"id":"toggle-one","tags":{"team":"Toguru Team","services":"toguru"},"activations":[{"rollout":{"percentage":20}, "attributes":{}}]}]
|}""".stripMargin
)
}
val provider = createProvider(stub)
waitFor(100, 100.millis)(maybeSeqNo.isDefined)
provider.close()
maybeSeqNo mustBe Some("10")
}
}
/**
* @param times how many times we want to try.
* @param wait how long to wait before the next try
* @param test returns true if test (finally) succeeded, false if we need to retry
*/
def waitFor(times: Int, wait: FiniteDuration = 2.second)(test: => Boolean): Unit = {
val success = (1 to times).exists { i =>
if (test)
true
else {
if (i < times)
Thread.sleep(wait.toMillis)
false
}
}
success mustBe true
}
}
|
AutoScout24/toguru-scala-client
|
core/src/test/scala/toguru/impl/RemoteActivationsProviderSpec.scala
|
Scala
|
mit
| 8,371 |
/*
* Copyright ixias.net All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license
* For the full copyright and license information,
* please view the LICENSE file that was distributed with this source code.
*/
package ixias.mail
import java.util.ArrayList
import javax.inject.Singleton
import scala.util.{ Success, Failure }
import scala.concurrent.{ Future, ExecutionContext }
import org.apache.http.NameValuePair
import org.apache.http.message.BasicNameValuePair
import com.twilio.sdk.TwilioRestClient
import com.twilio.sdk.resource.instance.Message
// Send an email(SMS) via Twillio REST API
//~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@Singleton
class EmailClientViaTwillio extends EmailClient with EmailConfig {
/**
* Send an email with the provided data.
*/
override def send(to: UserEmail, tpl: EmailTemplate[_])
(implicit ctx: ExecutionContext): Future[String] =
for {
from <- Future.fromTry(getTwillioFrom())
message <- send(to, UserEmail(from), tpl)
} yield message
/**
* Send an email with the provided data.
*/
override def send(to: UserEmail, from: UserEmail, tpl: EmailTemplate[_])
(implicit ctx: ExecutionContext): Future[String] =
Future.fromTry(for {
sid <- getTwillioSid()
token <- getTwillioAuthToken()
} yield {
val client = new TwilioRestClient(sid, token)
val mParams = new ArrayList[NameValuePair]()
mParams.add(new BasicNameValuePair("To", to.address))
mParams.add(new BasicNameValuePair("From", from.address))
mParams.add(new BasicNameValuePair("Body", tpl.getBodySMSText(to, from).get))
val message: Message = client.getAccount().getMessageFactory().create(mParams)
message.getBody()
}) andThen {
case Success(_) => logger.info("[SUCCESS] to=" + to.address)
case Failure(ex) => logger.error("[FAILURE] to=" + to.address, ex)
} recover {
case _: Throwable if tpl.silent => ""
}
}
|
sp1rytus/ixias
|
framework/ixias-mail/src/main/scala/ixias/EmailClientViaTwillio.scala
|
Scala
|
mit
| 1,995 |
package org.jetbrains.plugins.scala.scalai18n.codeInspection.i18n.internal
import com.intellij.codeInspection.LocalInspectionTool
import org.jetbrains.plugins.scala.DependencyManagerBase.RichStr
import org.jetbrains.plugins.scala.base.libraryLoaders.{IvyManagedLoader, LibraryLoader}
import org.jetbrains.plugins.scala.codeInspection.{ScalaInspectionBundle, ScalaInspectionTestBase}
class DynamicPropertyKeyInspectionTest extends ScalaInspectionTestBase {
override protected val classOfInspection: Class[_ <: LocalInspectionTool] =
classOf[DynamicPropertyKeyInspection]
override protected val description =
ScalaInspectionBundle.message("internal.only.pass.hardcoded.strings.as.property.keys")
override protected def librariesLoaders: Seq[LibraryLoader] = super.librariesLoaders ++ Seq(
IvyManagedLoader("org.jetbrains" % "annotations" % "22.0.0")
)
override protected def createTestText(text: String): String =
s"""import org.jetbrains.annotations.PropertyKey
|
|object MyBundle {
| private final val Bundle= "MyBundle"
|
| def message(@PropertyKey(resourceBundle = BUNDLE) key: String, params: Any*): String = ???
|}
|
|//noinspection ScalaUnresolvedPropertyKey
|class MyMainScala {
| $text
|}
|""".stripMargin
def test_no_errors_for_simple_string_literal(): Unit = {
checkTextHasNoErrors(raw"""MyBundle.message("my.key1")""")
}
def test_has_errors_for_interpolated_string_literal(): Unit = {
checkTextHasError(raw"""MyBundle.message(${START}s"my.interpolated.key1"$END)""")
}
def test_has_errors_non_string_literal(): Unit = {
checkTextHasError(
raw"""val key: String =
| if (???) "property.key.1"
| else "property.key.2"
|MyBundle.message(${START}key$END)
|""".stripMargin
)
}
def test_no_errors_for_value_definition(): Unit = {
checkTextHasNoErrors(
raw"""
|@PropertyKey(resourceBundle = "MyBundle")
|val value: String = "invalid.key"
|""".stripMargin
)
}
}
|
JetBrains/intellij-scala
|
scala/integration/properties/test/org/jetbrains/plugins/scala/scalai18n/codeInspection/i18n/internal/DynamicPropertyKeyInspectionTest.scala
|
Scala
|
apache-2.0
| 2,118 |
package de.htwg.zeta.server.module
import javax.inject.Singleton
import com.google.inject.Provides
import de.htwg.zeta.common.format.entity.BondedTaskFormat
import de.htwg.zeta.common.format.entity.EventDrivenTaskFormat
import de.htwg.zeta.common.format.entity.FileFormat
import de.htwg.zeta.common.format.entity.FilterFormat
import de.htwg.zeta.common.format.entity.GeneratorFormat
import de.htwg.zeta.common.format.entity.GeneratorImageFormat
import de.htwg.zeta.common.format.entity.TimedTaskFormat
import de.htwg.zeta.common.format.entity.UserFormat
import de.htwg.zeta.common.format.model.EdgeFormat
import de.htwg.zeta.common.format.model.GDSLInstanceProjectFormat
import de.htwg.zeta.common.format.model.GraphicalDslInstanceFormat
import de.htwg.zeta.common.format.model.NodeFormat
import de.htwg.zeta.common.format.project.AttributeFormat
import de.htwg.zeta.common.format.project.AttributeTypeFormat
import de.htwg.zeta.common.format.project.AttributeValueFormat
import de.htwg.zeta.common.format.project.ClassFormat
import de.htwg.zeta.common.format.project.ConceptFormat
import de.htwg.zeta.common.format.project.EnumFormat
import de.htwg.zeta.common.format.project.GdslProjectFormat
import de.htwg.zeta.common.format.project.GraphicalDslReleaseFormat
import de.htwg.zeta.common.format.project.MethodFormat
import de.htwg.zeta.common.format.project.ReferenceFormat
import de.htwg.zeta.common.format.project.gdsl.shape.ShapeFormat
import de.htwg.zeta.common.format.project.gdsl.DiagramsFormat
import de.htwg.zeta.common.format.project.gdsl.StylesFormat
import net.codingwell.scalaguice.ScalaModule
class JsonFormatModule extends ScalaModule {
private val sString = "String"
private val sBoolean = "Bool"
private val sInt = "Int"
private val sDouble = "Double"
override def configure(): Unit = {
bind[EventDrivenTaskFormat].toInstance(new EventDrivenTaskFormat)
bind[BondedTaskFormat].toInstance(new BondedTaskFormat)
bind[FileFormat].toInstance(new FileFormat)
bind[FilterFormat].toInstance(new FilterFormat)
bind[GeneratorImageFormat].toInstance(new GeneratorImageFormat(sSchema = s"$$schema", sRef = s"$$ref"))
bind[GeneratorFormat].toInstance(new GeneratorFormat)
bind[TimedTaskFormat].toInstance(new TimedTaskFormat)
bind[AttributeTypeFormat].toInstance(new AttributeTypeFormat(sString = sString, sBoolean = sBoolean, sInt = sInt, sDouble = sDouble, sUnit = "Unit"))
bind[AttributeValueFormat].toInstance(new AttributeValueFormat(sString = sString, sBoolean = sBoolean, sInt = sInt, sDouble = sDouble))
bind[EnumFormat].toInstance(new EnumFormat)
bind[UserFormat].toInstance(new UserFormat)
}
@Provides
@Singleton
def provideClassFormat(
attributeFormat: AttributeFormat,
methodFormat: MethodFormat
): ClassFormat = {
new ClassFormat(attributeFormat, methodFormat)
}
@Provides
@Singleton
def provideReferenceFormat(
attributeFormat: AttributeFormat,
methodFormat: MethodFormat
): ReferenceFormat = {
new ReferenceFormat(attributeFormat, methodFormat)
}
@Provides
@Singleton
def provideMetaModelFormat(
enumFormat: EnumFormat,
classFormat: ClassFormat,
referenceFormat: ReferenceFormat,
attributeFormat: AttributeFormat,
methodFormat: MethodFormat
): ConceptFormat = {
new ConceptFormat(enumFormat, classFormat, referenceFormat, attributeFormat, methodFormat)
}
@Provides
@Singleton
def provideMetaModelEntityFormat(
metaModelFormat: ConceptFormat
): GdslProjectFormat = {
new GdslProjectFormat(metaModelFormat)
}
@Provides
@Singleton
def provideMetaModelReleaseFormat(
metaModelFormat: ConceptFormat
): GraphicalDslReleaseFormat = {
new GraphicalDslReleaseFormat(metaModelFormat)
}
@Provides
@Singleton
def provideModelFormat(
nodeFormat: NodeFormat,
edgeFormat: EdgeFormat,
attributeFormat: AttributeFormat,
attributeValueFormat: AttributeValueFormat,
methodFormat: MethodFormat
): GraphicalDslInstanceFormat = {
new GraphicalDslInstanceFormat(nodeFormat, edgeFormat, attributeFormat, attributeValueFormat, methodFormat)
}
@Provides
@Singleton
def provideGDSLInstanceProjectFormat(
gDSLInstanceFormat: GraphicalDslInstanceFormat,
conceptFormat: ConceptFormat,
shapeFormat: ShapeFormat,
diagramFormat: DiagramsFormat,
styleFormat: StylesFormat
): GDSLInstanceProjectFormat = {
new GDSLInstanceProjectFormat(gDSLInstanceFormat, conceptFormat,shapeFormat,diagramFormat,styleFormat)
}
@Provides
@Singleton
def provideNodeFormat(
attributeFormat: AttributeFormat,
attributeValueFormat: AttributeValueFormat,
methodFormat: MethodFormat
): NodeFormat = {
new NodeFormat(attributeFormat, attributeValueFormat, methodFormat)
}
@Provides
@Singleton
def provideEdgeFormat(
attributeFormat: AttributeFormat,
attributeValueFormat: AttributeValueFormat,
methodFormat: MethodFormat
): EdgeFormat = {
new EdgeFormat(attributeFormat, attributeValueFormat, methodFormat)
}
@Provides
@Singleton
def provideAttributeFormat(
attributeTypeFormat: AttributeTypeFormat,
attributeValueFormat: AttributeValueFormat
): AttributeFormat = {
new AttributeFormat(attributeTypeFormat, attributeValueFormat)
}
@Provides
@Singleton
def provideMethodFormat(
attributeTypeFormat: AttributeTypeFormat
): MethodFormat = {
new MethodFormat(attributeTypeFormat)
}
}
|
Zeta-Project/zeta
|
api/server/app/de/htwg/zeta/server/module/JsonFormatModule.scala
|
Scala
|
bsd-2-clause
| 5,547 |
package sbt
package inc
import xsbti.api.Source
import xsbt.api.SameAPI
import java.io.File
private final class IncrementalDefaultImpl(log: Logger, options: IncOptions) extends IncrementalCommon(log, options) {
// Package objects are fragile: if they inherit from an invalidated source, get "class file needed by package is missing" error
// This might be too conservative: we probably only need package objects for packages of invalidated sources.
override protected def invalidatedPackageObjects(invalidated: Set[File], relations: Relations, apis: APIs): Set[File] =
invalidated flatMap relations.publicInherited.internal.reverse filter apis.hasPackageObject
override protected def sameAPI[T](src: T, a: Source, b: Source): Option[SourceAPIChange[T]] = {
if (SameAPI(a, b))
None
else {
val sourceApiChange = SourceAPIChange(src)
Some(sourceApiChange)
}
}
/** Invalidates sources based on initially detected 'changes' to the sources, products, and dependencies.*/
override protected def invalidateByExternal(relations: Relations, externalAPIChange: APIChange[String]): Set[File] = {
val modified = externalAPIChange.modified
// Propagate public inheritance dependencies transitively.
// This differs from normal because we need the initial crossing from externals to sources in this project.
val externalInheritedR = relations.publicInherited.external
val byExternalInherited = externalInheritedR.reverse(modified)
val internalInheritedR = relations.publicInherited.internal
val transitiveInherited = transitiveDeps(byExternalInherited)(internalInheritedR.reverse _)
// Get the direct dependencies of all sources transitively invalidated by inheritance
val directA = transitiveInherited flatMap relations.direct.internal.reverse
// Get the sources that directly depend on externals. This includes non-inheritance dependencies and is not transitive.
val directB = relations.direct.external.reverse(modified)
transitiveInherited ++ directA ++ directB
}
override protected def invalidateSource(relations: Relations, change: APIChange[File]): Set[File] = {
def reverse(r: Relations.Source) = r.internal.reverse _
val directDeps: File => Set[File] = reverse(relations.direct)
val publicInherited: File => Set[File] = reverse(relations.publicInherited)
log.debug("Invalidating by inheritance (transitively)...")
val transitiveInherited = transitiveDeps(Set(change.modified))(publicInherited)
log.debug("Invalidated by transitive public inheritance: " + transitiveInherited)
val direct = transitiveInherited flatMap directDeps
log.debug("Invalidated by direct dependency: " + direct)
transitiveInherited ++ direct
}
override protected def allDeps(relations: Relations): File => Set[File] =
f => relations.direct.internal.reverse(f)
}
|
som-snytt/xsbt
|
compile/inc/src/main/scala/sbt/inc/IncrementalDefaultImpl.scala
|
Scala
|
bsd-3-clause
| 2,882 |
package org.jetbrains.plugins.scala.testingSupport.utest.scala2_12.utest_0_7_4
import org.jetbrains.plugins.scala.testingSupport.utest.UTestNewSyntaxSimpleTest
class UTestSimpleTest_2_12_0_7_4 extends UTestTestBase_2_12_0_7_4 with UTestNewSyntaxSimpleTest
|
JetBrains/intellij-scala
|
scala/scala-impl/test/org/jetbrains/plugins/scala/testingSupport/utest/scala2_12/utest_0_7_4/UTestSimpleTest_2_12_0_7_4.scala
|
Scala
|
apache-2.0
| 257 |
package java.lang
trait AutoCloseable {
def close(): Unit
}
|
cedricviaccoz/scala-native
|
javalib/src/main/scala/java/lang/AutoCloseable.scala
|
Scala
|
bsd-3-clause
| 63 |
package demo
package pages
import demo.components.LeftNavPage
import demo.routes.{LeftRoute, ReactTreeViewRouteModule}
import japgolly.scalajs.react._
import japgolly.scalajs.react.extra.router.RouterCtl
object ReactTreeViewPage {
val component = ScalaComponent
.builder[Props]("ReactTreeViewPage")
.renderBackend[Backend]
.build
class Backend(t: BackendScope[Props, Unit]) {
def render(P: Props) =
LeftNavPage(ReactTreeViewRouteModule.menu, P.selectedPage, P.ctrl)
}
case class Props(selectedPage: LeftRoute, ctrl: RouterCtl[LeftRoute])
def apply(selectedPage: LeftRoute, ctrl: RouterCtl[LeftRoute]) =
component(Props(selectedPage, ctrl))
}
|
aparo/scalajs-react-components
|
demo/src/main/scala/demo/pages/ReactTreeViewPage.scala
|
Scala
|
apache-2.0
| 684 |
// Copyright (C) 2011 Dmitri Nikulin
//
// This file is part of Vijil.
//
// Vijil is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// Vijil is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with Vijil. If not, see <http://www.gnu.org/licenses/>.
//
// Repository: https://github.com/dnikulin/vijil
// Email: [email protected]
package com.dnikulin.vijil.tools
object TryOrNone {
def apply[T](work: => Option[T]): Option[T] = {
try {work}
catch {case _ => None}
}
}
object TryTraced {
def apply[T](work: => Option[T]): Option[T] = {
try {
work
} catch {
case ex: Throwable =>
ex.printStackTrace()
None
case _ =>
None
}
}
}
|
dnikulin/vijil
|
src/main/scala/com/dnikulin/vijil/tools/TryOrNone.scala
|
Scala
|
agpl-3.0
| 1,190 |
/*
* Copyright 2011-2018 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.jms.check
import javax.jms.Message
import scala.annotation.implicitNotFound
import io.gatling.core.check.{ CheckBuilder, CheckProtocolProvider, FindCheckBuilder, ValidatorCheckBuilder }
import io.gatling.core.check.extractor.xpath.XmlParsers
import io.gatling.jms.JmsCheck
trait JmsCheckSupport {
def simpleCheck = JmsSimpleCheck
@implicitNotFound("Could not find a CheckProtocolProvider. This check might not be a valid JMS one.")
implicit def checkBuilder2JmsCheck[A, P, X](checkBuilder: CheckBuilder[A, P, X])(implicit provider: CheckProtocolProvider[A, JmsCheck, Message, P]): JmsCheck =
checkBuilder.build(provider)
@implicitNotFound("Could not find a CheckProtocolProvider. This check might not be a valid JMS one.")
implicit def validatorCheckBuilder2JmsCheck[A, P, X](validatorCheckBuilder: ValidatorCheckBuilder[A, P, X])(implicit provider: CheckProtocolProvider[A, JmsCheck, Message, P]): JmsCheck =
validatorCheckBuilder.exists
@implicitNotFound("Could not find a CheckProtocolProvider. This check might not be a valid JMS one.")
implicit def findCheckBuilder2JmsCheck[A, P, X](findCheckBuilder: FindCheckBuilder[A, P, X])(implicit provider: CheckProtocolProvider[A, JmsCheck, Message, P]): JmsCheck =
findCheckBuilder.find.exists
implicit def jmsXPathProvider(implicit xmlParsers: XmlParsers) = new JmsXPathProvider(xmlParsers)
}
|
wiacekm/gatling
|
gatling-jms/src/main/scala/io/gatling/jms/check/JmsCheckSupport.scala
|
Scala
|
apache-2.0
| 2,016 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.accumulo.security
import org.locationtech.geomesa.security.AuthorizationsProvider
import org.springframework.security.authentication.TestingAuthenticationToken
import org.springframework.security.core.context.SecurityContextHolder
class TestAuthorizationsProvider extends AuthorizationsProvider {
override def getAuthorizations: java.util.List[String] = {
import scala.collection.JavaConversions._
val authentication = SecurityContextHolder.getContext.getAuthentication.asInstanceOf[TestingAuthenticationToken]
new java.util.ArrayList[String](authentication.getAuthorities.map(_.getAuthority))
}
override def configure(params: java.util.Map[String, java.io.Serializable]): Unit = {}
}
|
MutahirKazmi/geomesa
|
geomesa-accumulo/geomesa-accumulo-security/src/test/scala/org/locationtech/geomesa/accumulo/security/TestAuthorizationsProvider.scala
|
Scala
|
apache-2.0
| 1,201 |
/*
* Copyright (C) 2016 Department for Business, Energy and Industrial Strategy
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package forms
import controllers.{FieldCheck, FieldChecks, JsonHelpers}
import forms.validation._
import models.Question
import play.api.libs.json.{JsObject, Json}
case class DateTimeRangeField(name: String, allowPast: Boolean, isEndDateMandatory: Boolean) extends Field with DateTimeFormats {
implicit val dvReads = Json.reads[DateValues]
implicit val dtrReads = Json.reads[DateTimeRangeValues]
val startDateField = DateField(s"$name.startDate", allowPast)
val endDateField = DateField(s"$name.endDate", allowPast)
val validator = DateTimeRangeValidator(allowPast = allowPast, isEndDateMandatory = isEndDateMandatory)
override val check: FieldCheck = FieldChecks.fromValidator(validator)
override def renderFormInput(questions: Map[String, Question], answers: JsObject, errs: Seq[FieldError], hints: Seq[FieldHint]) =
views.html.renderers.dateTimeRangeField(this, questions, answers, errs, hints)
override def renderPreview(questions: Map[String, Question], answers: JsObject) = {
val flattenedAnswers = JsonHelpers.flatten("", answers)
val startDateValues = dateValuesFor(s"${startDateField.name}", flattenedAnswers)
val endDateValues = dateValuesFor(s"${endDateField.name}", flattenedAnswers)
val vs = DateTimeRangeValues(Some(startDateValues), Some(endDateValues), endDateProvided = None)
validator.validate("", vs).map { dtr =>
views.html.renderers.preview.dateTimeRangeField(
this,
fmt.print(dtr.startDate),
accessFmt.print(dtr.startDate),
dtr.endDate.map(fmt.print),
dtr.endDate.map(accessFmt.print))
}.leftMap { errs =>
views.html.renderers.preview.dateTimeRangeField(this, "None", "None", None, None)
}.fold(identity, identity)
}
def dateValuesFor(name: String, answers: Map[String, String]): DateValues =
DateValues(answers.get(s"$name.day"), answers.get(s"$name.month"), answers.get(s"$name.year"))
}
|
UKGovernmentBEIS/rifs-frontend-play
|
src/main/scala/forms/DateTimeRangeField.scala
|
Scala
|
gpl-3.0
| 2,666 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import java.util.concurrent.TimeUnit
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.util.control.{ControlThrowable, NonFatal}
import com.codahale.metrics.{Counter, Gauge, MetricRegistry}
import org.apache.spark.internal.{config, Logging}
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.DECOMMISSION_ENABLED
import org.apache.spark.internal.config.Tests.TEST_DYNAMIC_ALLOCATION_SCHEDULE_ENABLED
import org.apache.spark.metrics.source.Source
import org.apache.spark.resource.ResourceProfile.UNKNOWN_RESOURCE_PROFILE_ID
import org.apache.spark.resource.ResourceProfileManager
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.dynalloc.ExecutorMonitor
import org.apache.spark.util.{Clock, SystemClock, ThreadUtils, Utils}
/**
* An agent that dynamically allocates and removes executors based on the workload.
*
* The ExecutorAllocationManager maintains a moving target number of executors, for each
* ResourceProfile, which is periodically synced to the cluster manager. The target starts
* at a configured initial value and changes with the number of pending and running tasks.
*
* Decreasing the target number of executors happens when the current target is more than needed to
* handle the current load. The target number of executors is always truncated to the number of
* executors that could run all current running and pending tasks at once.
*
* Increasing the target number of executors happens in response to backlogged tasks waiting to be
* scheduled. If the scheduler queue is not drained in M seconds, then new executors are added. If
* the queue persists for another N seconds, then more executors are added and so on. The number
* added in each round increases exponentially from the previous round until an upper bound has been
* reached. The upper bound is based both on a configured property and on the current number of
* running and pending tasks, as described above.
*
* The rationale for the exponential increase is twofold: (1) Executors should be added slowly
* in the beginning in case the number of extra executors needed turns out to be small. Otherwise,
* we may add more executors than we need just to remove them later. (2) Executors should be added
* quickly over time in case the maximum number of executors is very high. Otherwise, it will take
* a long time to ramp up under heavy workloads.
*
* The remove policy is simpler and is applied on each ResourceProfile separately. If an executor
* for that ResourceProfile has been idle for K seconds and the number of executors is more
* then what is needed for that ResourceProfile, meaning there are not enough tasks that could use
* the executor, then it is removed. Note that an executor caching any data
* blocks will be removed if it has been idle for more than L seconds.
*
* There is no retry logic in either case because we make the assumption that the cluster manager
* will eventually fulfill all requests it receives asynchronously.
*
* The relevant Spark properties are below. Each of these properties applies separately to
* every ResourceProfile. So if you set a minimum number of executors, that is a minimum
* for each ResourceProfile.
*
* spark.dynamicAllocation.enabled - Whether this feature is enabled
* spark.dynamicAllocation.minExecutors - Lower bound on the number of executors
* spark.dynamicAllocation.maxExecutors - Upper bound on the number of executors
* spark.dynamicAllocation.initialExecutors - Number of executors to start with
*
* spark.dynamicAllocation.executorAllocationRatio -
* This is used to reduce the parallelism of the dynamic allocation that can waste
* resources when tasks are small
*
* spark.dynamicAllocation.schedulerBacklogTimeout (M) -
* If there are backlogged tasks for this duration, add new executors
*
* spark.dynamicAllocation.sustainedSchedulerBacklogTimeout (N) -
* If the backlog is sustained for this duration, add more executors
* This is used only after the initial backlog timeout is exceeded
*
* spark.dynamicAllocation.executorIdleTimeout (K) -
* If an executor without caching any data blocks has been idle for this duration, remove it
*
* spark.dynamicAllocation.cachedExecutorIdleTimeout (L) -
* If an executor with caching data blocks has been idle for more than this duration,
* the executor will be removed
*
*/
private[spark] class ExecutorAllocationManager(
client: ExecutorAllocationClient,
listenerBus: LiveListenerBus,
conf: SparkConf,
cleaner: Option[ContextCleaner] = None,
clock: Clock = new SystemClock(),
resourceProfileManager: ResourceProfileManager)
extends Logging {
allocationManager =>
import ExecutorAllocationManager._
// Lower and upper bounds on the number of executors.
private val minNumExecutors = conf.get(DYN_ALLOCATION_MIN_EXECUTORS)
private val maxNumExecutors = conf.get(DYN_ALLOCATION_MAX_EXECUTORS)
private val initialNumExecutors = Utils.getDynamicAllocationInitialExecutors(conf)
// How long there must be backlogged tasks for before an addition is triggered (seconds)
private val schedulerBacklogTimeoutS = conf.get(DYN_ALLOCATION_SCHEDULER_BACKLOG_TIMEOUT)
// Same as above, but used only after `schedulerBacklogTimeoutS` is exceeded
private val sustainedSchedulerBacklogTimeoutS =
conf.get(DYN_ALLOCATION_SUSTAINED_SCHEDULER_BACKLOG_TIMEOUT)
// During testing, the methods to actually kill and add executors are mocked out
private val testing = conf.get(DYN_ALLOCATION_TESTING)
private val executorAllocationRatio =
conf.get(DYN_ALLOCATION_EXECUTOR_ALLOCATION_RATIO)
private val decommissionEnabled = conf.get(DECOMMISSION_ENABLED)
private val defaultProfileId = resourceProfileManager.defaultResourceProfile.id
validateSettings()
// Number of executors to add for each ResourceProfile in the next round
private[spark] val numExecutorsToAddPerResourceProfileId = new mutable.HashMap[Int, Int]
numExecutorsToAddPerResourceProfileId(defaultProfileId) = 1
// The desired number of executors at this moment in time. If all our executors were to die, this
// is the number of executors we would immediately want from the cluster manager.
// Note every profile will be allowed to have initial number,
// we may want to make this configurable per Profile in the future
private[spark] val numExecutorsTargetPerResourceProfileId = new mutable.HashMap[Int, Int]
numExecutorsTargetPerResourceProfileId(defaultProfileId) = initialNumExecutors
// A timestamp of when an addition should be triggered, or NOT_SET if it is not set
// This is set when pending tasks are added but not scheduled yet
private var addTime: Long = NOT_SET
// Polling loop interval (ms)
private val intervalMillis: Long = 100
// Listener for Spark events that impact the allocation policy
val listener = new ExecutorAllocationListener
// Executor that handles the scheduling task.
private val executor =
ThreadUtils.newDaemonSingleThreadScheduledExecutor("spark-dynamic-executor-allocation")
// Metric source for ExecutorAllocationManager to expose internal status to MetricsSystem.
val executorAllocationManagerSource = new ExecutorAllocationManagerSource(this)
val executorMonitor =
new ExecutorMonitor(conf, client, listenerBus, clock, executorAllocationManagerSource)
// Whether we are still waiting for the initial set of executors to be allocated.
// While this is true, we will not cancel outstanding executor requests. This is
// set to false when:
// (1) a stage is submitted, or
// (2) an executor idle timeout has elapsed.
@volatile private var initializing: Boolean = true
// Number of locality aware tasks for each ResourceProfile, used for executor placement.
private var numLocalityAwareTasksPerResourceProfileId = new mutable.HashMap[Int, Int]
numLocalityAwareTasksPerResourceProfileId(defaultProfileId) = 0
// ResourceProfile id to Host to possible task running on it, used for executor placement.
private var rpIdToHostToLocalTaskCount: Map[Int, Map[String, Int]] = Map.empty
/**
* Verify that the settings specified through the config are valid.
* If not, throw an appropriate exception.
*/
private def validateSettings(): Unit = {
if (minNumExecutors < 0 || maxNumExecutors < 0) {
throw new SparkException(
s"${DYN_ALLOCATION_MIN_EXECUTORS.key} and ${DYN_ALLOCATION_MAX_EXECUTORS.key} must be " +
"positive!")
}
if (maxNumExecutors == 0) {
throw new SparkException(s"${DYN_ALLOCATION_MAX_EXECUTORS.key} cannot be 0!")
}
if (minNumExecutors > maxNumExecutors) {
throw new SparkException(s"${DYN_ALLOCATION_MIN_EXECUTORS.key} ($minNumExecutors) must " +
s"be less than or equal to ${DYN_ALLOCATION_MAX_EXECUTORS.key} ($maxNumExecutors)!")
}
if (schedulerBacklogTimeoutS <= 0) {
throw new SparkException(s"${DYN_ALLOCATION_SCHEDULER_BACKLOG_TIMEOUT.key} must be > 0!")
}
if (sustainedSchedulerBacklogTimeoutS <= 0) {
throw new SparkException(
s"s${DYN_ALLOCATION_SUSTAINED_SCHEDULER_BACKLOG_TIMEOUT.key} must be > 0!")
}
if (!conf.get(config.SHUFFLE_SERVICE_ENABLED)) {
// If dynamic allocation shuffle tracking or worker decommissioning along with
// storage shuffle decommissioning is enabled we have *experimental* support for
// decommissioning without a shuffle service.
if (conf.get(config.DYN_ALLOCATION_SHUFFLE_TRACKING_ENABLED) ||
(decommissionEnabled &&
conf.get(config.STORAGE_DECOMMISSION_SHUFFLE_BLOCKS_ENABLED))) {
logWarning("Dynamic allocation without a shuffle service is an experimental feature.")
} else if (!testing) {
throw new SparkException("Dynamic allocation of executors requires the external " +
"shuffle service. You may enable this through spark.shuffle.service.enabled.")
}
}
if (executorAllocationRatio > 1.0 || executorAllocationRatio <= 0.0) {
throw new SparkException(
s"${DYN_ALLOCATION_EXECUTOR_ALLOCATION_RATIO.key} must be > 0 and <= 1.0")
}
}
/**
* Register for scheduler callbacks to decide when to add and remove executors, and start
* the scheduling task.
*/
def start(): Unit = {
listenerBus.addToManagementQueue(listener)
listenerBus.addToManagementQueue(executorMonitor)
cleaner.foreach(_.attachListener(executorMonitor))
val scheduleTask = new Runnable() {
override def run(): Unit = {
try {
schedule()
} catch {
case ct: ControlThrowable =>
throw ct
case t: Throwable =>
logWarning(s"Uncaught exception in thread ${Thread.currentThread().getName}", t)
}
}
}
if (!testing || conf.get(TEST_DYNAMIC_ALLOCATION_SCHEDULE_ENABLED)) {
executor.scheduleWithFixedDelay(scheduleTask, 0, intervalMillis, TimeUnit.MILLISECONDS)
}
// copy the maps inside synchronize to ensure not being modified
val (numExecutorsTarget, numLocalityAware) = synchronized {
val numTarget = numExecutorsTargetPerResourceProfileId.toMap
val numLocality = numLocalityAwareTasksPerResourceProfileId.toMap
(numTarget, numLocality)
}
client.requestTotalExecutors(numExecutorsTarget, numLocalityAware, rpIdToHostToLocalTaskCount)
}
/**
* Stop the allocation manager.
*/
def stop(): Unit = {
executor.shutdown()
executor.awaitTermination(10, TimeUnit.SECONDS)
}
/**
* Reset the allocation manager when the cluster manager loses track of the driver's state.
* This is currently only done in YARN client mode, when the AM is restarted.
*
* This method forgets about any state about existing executors, and forces the scheduler to
* re-evaluate the number of needed executors the next time it's run.
*/
def reset(): Unit = synchronized {
addTime = 0L
numExecutorsTargetPerResourceProfileId.keys.foreach { rpId =>
numExecutorsTargetPerResourceProfileId(rpId) = initialNumExecutors
}
numExecutorsToAddPerResourceProfileId.keys.foreach { rpId =>
numExecutorsToAddPerResourceProfileId(rpId) = 1
}
executorMonitor.reset()
}
/**
* The maximum number of executors, for the ResourceProfile id passed in, that we would need
* under the current load to satisfy all running and pending tasks, rounded up.
*/
private[spark] def maxNumExecutorsNeededPerResourceProfile(rpId: Int): Int = {
val pending = listener.totalPendingTasksPerResourceProfile(rpId)
val pendingSpeculative = listener.pendingSpeculativeTasksPerResourceProfile(rpId)
val unschedulableTaskSets = listener.pendingUnschedulableTaskSetsPerResourceProfile(rpId)
val running = listener.totalRunningTasksPerResourceProfile(rpId)
val numRunningOrPendingTasks = pending + running
val rp = resourceProfileManager.resourceProfileFromId(rpId)
val tasksPerExecutor = rp.maxTasksPerExecutor(conf)
logDebug(s"max needed for rpId: $rpId numpending: $numRunningOrPendingTasks," +
s" tasksperexecutor: $tasksPerExecutor")
val maxNeeded = math.ceil(numRunningOrPendingTasks * executorAllocationRatio /
tasksPerExecutor).toInt
val maxNeededWithSpeculationLocalityOffset =
if (tasksPerExecutor > 1 && maxNeeded == 1 && pendingSpeculative > 0) {
// If we have pending speculative tasks and only need a single executor, allocate one more
// to satisfy the locality requirements of speculation
maxNeeded + 1
} else {
maxNeeded
}
if (unschedulableTaskSets > 0) {
// Request additional executors to account for task sets having tasks that are unschedulable
// due to executors excluded for failures when the active executor count has already reached
// the max needed which we would normally get.
val maxNeededForUnschedulables = math.ceil(unschedulableTaskSets * executorAllocationRatio /
tasksPerExecutor).toInt
math.max(maxNeededWithSpeculationLocalityOffset,
executorMonitor.executorCountWithResourceProfile(rpId) + maxNeededForUnschedulables)
} else {
maxNeededWithSpeculationLocalityOffset
}
}
private def totalRunningTasksPerResourceProfile(id: Int): Int = synchronized {
listener.totalRunningTasksPerResourceProfile(id)
}
/**
* This is called at a fixed interval to regulate the number of pending executor requests
* and number of executors running.
*
* First, adjust our requested executors based on the add time and our current needs.
* Then, if the remove time for an existing executor has expired, kill the executor.
*
* This is factored out into its own method for testing.
*/
private def schedule(): Unit = synchronized {
val executorIdsToBeRemoved = executorMonitor.timedOutExecutors()
if (executorIdsToBeRemoved.nonEmpty) {
initializing = false
}
// Update executor target number only after initializing flag is unset
updateAndSyncNumExecutorsTarget(clock.nanoTime())
if (executorIdsToBeRemoved.nonEmpty) {
removeExecutors(executorIdsToBeRemoved)
}
}
/**
* Updates our target number of executors for each ResourceProfile and then syncs the result
* with the cluster manager.
*
* Check to see whether our existing allocation and the requests we've made previously exceed our
* current needs. If so, truncate our target and let the cluster manager know so that it can
* cancel pending requests that are unneeded.
*
* If not, and the add time has expired, see if we can request new executors and refresh the add
* time.
*
* @return the delta in the target number of executors.
*/
private def updateAndSyncNumExecutorsTarget(now: Long): Int = synchronized {
if (initializing) {
// Do not change our target while we are still initializing,
// Otherwise the first job may have to ramp up unnecessarily
0
} else {
val updatesNeeded = new mutable.HashMap[Int, ExecutorAllocationManager.TargetNumUpdates]
// Update targets for all ResourceProfiles then do a single request to the cluster manager
numExecutorsTargetPerResourceProfileId.foreach { case (rpId, targetExecs) =>
val maxNeeded = maxNumExecutorsNeededPerResourceProfile(rpId)
if (maxNeeded < targetExecs) {
// The target number exceeds the number we actually need, so stop adding new
// executors and inform the cluster manager to cancel the extra pending requests
// We lower the target number of executors but don't actively kill any yet. Killing is
// controlled separately by an idle timeout. It's still helpful to reduce
// the target number in case an executor just happens to get lost (e.g., bad hardware,
// or the cluster manager preempts it) -- in that case, there is no point in trying
// to immediately get a new executor, since we wouldn't even use it yet.
decrementExecutorsFromTarget(maxNeeded, rpId, updatesNeeded)
} else if (addTime != NOT_SET && now >= addTime) {
addExecutorsToTarget(maxNeeded, rpId, updatesNeeded)
}
}
doUpdateRequest(updatesNeeded.toMap, now)
}
}
private def addExecutorsToTarget(
maxNeeded: Int,
rpId: Int,
updatesNeeded: mutable.HashMap[Int, ExecutorAllocationManager.TargetNumUpdates]): Int = {
updateTargetExecs(addExecutors, maxNeeded, rpId, updatesNeeded)
}
private def decrementExecutorsFromTarget(
maxNeeded: Int,
rpId: Int,
updatesNeeded: mutable.HashMap[Int, ExecutorAllocationManager.TargetNumUpdates]): Int = {
updateTargetExecs(decrementExecutors, maxNeeded, rpId, updatesNeeded)
}
private def updateTargetExecs(
updateTargetFn: (Int, Int) => Int,
maxNeeded: Int,
rpId: Int,
updatesNeeded: mutable.HashMap[Int, ExecutorAllocationManager.TargetNumUpdates]): Int = {
val oldNumExecutorsTarget = numExecutorsTargetPerResourceProfileId(rpId)
// update the target number (add or remove)
val delta = updateTargetFn(maxNeeded, rpId)
if (delta != 0) {
updatesNeeded(rpId) = ExecutorAllocationManager.TargetNumUpdates(delta, oldNumExecutorsTarget)
}
delta
}
private def doUpdateRequest(
updates: Map[Int, ExecutorAllocationManager.TargetNumUpdates],
now: Long): Int = {
// Only call cluster manager if target has changed.
if (updates.size > 0) {
val requestAcknowledged = try {
logDebug("requesting updates: " + updates)
testing ||
client.requestTotalExecutors(
numExecutorsTargetPerResourceProfileId.toMap,
numLocalityAwareTasksPerResourceProfileId.toMap,
rpIdToHostToLocalTaskCount)
} catch {
case NonFatal(e) =>
// Use INFO level so the error it doesn't show up by default in shells.
// Errors here are more commonly caused by YARN AM restarts, which is a recoverable
// issue, and generate a lot of noisy output.
logInfo("Error reaching cluster manager.", e)
false
}
if (requestAcknowledged) {
// have to go through all resource profiles that changed
var totalDelta = 0
updates.foreach { case (rpId, targetNum) =>
val delta = targetNum.delta
totalDelta += delta
if (delta > 0) {
val executorsString = "executor" + { if (delta > 1) "s" else "" }
logInfo(s"Requesting $delta new $executorsString because tasks are backlogged " +
s"(new desired total will be ${numExecutorsTargetPerResourceProfileId(rpId)} " +
s"for resource profile id: ${rpId})")
numExecutorsToAddPerResourceProfileId(rpId) =
if (delta == numExecutorsToAddPerResourceProfileId(rpId)) {
numExecutorsToAddPerResourceProfileId(rpId) * 2
} else {
1
}
logDebug(s"Starting timer to add more executors (to " +
s"expire in $sustainedSchedulerBacklogTimeoutS seconds)")
addTime = now + TimeUnit.SECONDS.toNanos(sustainedSchedulerBacklogTimeoutS)
} else {
logDebug(s"Lowering target number of executors to" +
s" ${numExecutorsTargetPerResourceProfileId(rpId)} (previously " +
s"${targetNum.oldNumExecutorsTarget} for resource profile id: ${rpId}) " +
"because not all requested executors " +
"are actually needed")
}
}
totalDelta
} else {
// request was for all profiles so we have to go through all to reset to old num
updates.foreach { case (rpId, targetNum) =>
logWarning("Unable to reach the cluster manager to request more executors!")
numExecutorsTargetPerResourceProfileId(rpId) = targetNum.oldNumExecutorsTarget
}
0
}
} else {
logDebug("No change in number of executors")
0
}
}
private def decrementExecutors(maxNeeded: Int, rpId: Int): Int = {
val oldNumExecutorsTarget = numExecutorsTargetPerResourceProfileId(rpId)
numExecutorsTargetPerResourceProfileId(rpId) = math.max(maxNeeded, minNumExecutors)
numExecutorsToAddPerResourceProfileId(rpId) = 1
numExecutorsTargetPerResourceProfileId(rpId) - oldNumExecutorsTarget
}
/**
* Update the target number of executors and figure out how many to add.
* If the cap on the number of executors is reached, give up and reset the
* number of executors to add next round instead of continuing to double it.
*
* @param maxNumExecutorsNeeded the maximum number of executors all currently running or pending
* tasks could fill
* @param rpId the ResourceProfile id of the executors
* @return the number of additional executors actually requested.
*/
private def addExecutors(maxNumExecutorsNeeded: Int, rpId: Int): Int = {
val oldNumExecutorsTarget = numExecutorsTargetPerResourceProfileId(rpId)
// Do not request more executors if it would put our target over the upper bound
// this is doing a max check per ResourceProfile
if (oldNumExecutorsTarget >= maxNumExecutors) {
logDebug("Not adding executors because our current target total " +
s"is already ${oldNumExecutorsTarget} (limit $maxNumExecutors)")
numExecutorsToAddPerResourceProfileId(rpId) = 1
return 0
}
// There's no point in wasting time ramping up to the number of executors we already have, so
// make sure our target is at least as much as our current allocation:
var numExecutorsTarget = math.max(numExecutorsTargetPerResourceProfileId(rpId),
executorMonitor.executorCountWithResourceProfile(rpId))
// Boost our target with the number to add for this round:
numExecutorsTarget += numExecutorsToAddPerResourceProfileId(rpId)
// Ensure that our target doesn't exceed what we need at the present moment:
numExecutorsTarget = math.min(numExecutorsTarget, maxNumExecutorsNeeded)
// Ensure that our target fits within configured bounds:
numExecutorsTarget = math.max(math.min(numExecutorsTarget, maxNumExecutors), minNumExecutors)
val delta = numExecutorsTarget - oldNumExecutorsTarget
numExecutorsTargetPerResourceProfileId(rpId) = numExecutorsTarget
// If our target has not changed, do not send a message
// to the cluster manager and reset our exponential growth
if (delta == 0) {
numExecutorsToAddPerResourceProfileId(rpId) = 1
}
delta
}
/**
* Request the cluster manager to remove the given executors.
* Returns the list of executors which are removed.
*/
private def removeExecutors(executors: Seq[(String, Int)]): Seq[String] = synchronized {
val executorIdsToBeRemoved = new ArrayBuffer[String]
logDebug(s"Request to remove executorIds: ${executors.mkString(", ")}")
val numExecutorsTotalPerRpId = mutable.Map[Int, Int]()
executors.foreach { case (executorIdToBeRemoved, rpId) =>
if (rpId == UNKNOWN_RESOURCE_PROFILE_ID) {
if (testing) {
throw new SparkException("ResourceProfile Id was UNKNOWN, this is not expected")
}
logWarning(s"Not removing executor $executorIdToBeRemoved because the " +
"ResourceProfile was UNKNOWN!")
} else {
// get the running total as we remove or initialize it to the count - pendingRemoval
val newExecutorTotal = numExecutorsTotalPerRpId.getOrElseUpdate(rpId,
(executorMonitor.executorCountWithResourceProfile(rpId) -
executorMonitor.pendingRemovalCountPerResourceProfileId(rpId) -
executorMonitor.decommissioningPerResourceProfileId(rpId)
))
if (newExecutorTotal - 1 < minNumExecutors) {
logDebug(s"Not removing idle executor $executorIdToBeRemoved because there " +
s"are only $newExecutorTotal executor(s) left (minimum number of executor limit " +
s"$minNumExecutors)")
} else if (newExecutorTotal - 1 < numExecutorsTargetPerResourceProfileId(rpId)) {
logDebug(s"Not removing idle executor $executorIdToBeRemoved because there " +
s"are only $newExecutorTotal executor(s) left (number of executor " +
s"target ${numExecutorsTargetPerResourceProfileId(rpId)})")
} else {
executorIdsToBeRemoved += executorIdToBeRemoved
numExecutorsTotalPerRpId(rpId) -= 1
}
}
}
if (executorIdsToBeRemoved.isEmpty) {
return Seq.empty[String]
}
// Send a request to the backend to kill this executor(s)
val executorsRemoved = if (testing) {
executorIdsToBeRemoved
} else {
// We don't want to change our target number of executors, because we already did that
// when the task backlog decreased.
if (decommissionEnabled) {
val executorIdsWithoutHostLoss = executorIdsToBeRemoved.toSeq.map(
id => (id, ExecutorDecommissionInfo("spark scale down"))).toArray
client.decommissionExecutors(
executorIdsWithoutHostLoss,
adjustTargetNumExecutors = false,
triggeredByExecutor = false)
} else {
client.killExecutors(executorIdsToBeRemoved.toSeq, adjustTargetNumExecutors = false,
countFailures = false, force = false)
}
}
// [SPARK-21834] killExecutors api reduces the target number of executors.
// So we need to update the target with desired value.
client.requestTotalExecutors(
numExecutorsTargetPerResourceProfileId.toMap,
numLocalityAwareTasksPerResourceProfileId.toMap,
rpIdToHostToLocalTaskCount)
// reset the newExecutorTotal to the existing number of executors
if (testing || executorsRemoved.nonEmpty) {
if (decommissionEnabled) {
executorMonitor.executorsDecommissioned(executorsRemoved.toSeq)
} else {
executorMonitor.executorsKilled(executorsRemoved.toSeq)
}
logInfo(s"Executors ${executorsRemoved.mkString(",")} removed due to idle timeout.")
executorsRemoved.toSeq
} else {
logWarning(s"Unable to reach the cluster manager to kill executor/s " +
s"${executorIdsToBeRemoved.mkString(",")} or no executor eligible to kill!")
Seq.empty[String]
}
}
/**
* Callback invoked when the scheduler receives new pending tasks.
* This sets a time in the future that decides when executors should be added
* if it is not already set.
*/
private def onSchedulerBacklogged(): Unit = synchronized {
if (addTime == NOT_SET) {
logDebug(s"Starting timer to add executors because pending tasks " +
s"are building up (to expire in $schedulerBacklogTimeoutS seconds)")
addTime = clock.nanoTime() + TimeUnit.SECONDS.toNanos(schedulerBacklogTimeoutS)
}
}
/**
* Callback invoked when the scheduler queue is drained.
* This resets all variables used for adding executors.
*/
private def onSchedulerQueueEmpty(): Unit = synchronized {
logDebug("Clearing timer to add executors because there are no more pending tasks")
addTime = NOT_SET
numExecutorsToAddPerResourceProfileId.transform { case (_, _) => 1 }
}
private case class StageAttempt(stageId: Int, stageAttemptId: Int) {
override def toString: String = s"Stage $stageId (Attempt $stageAttemptId)"
}
/**
* A listener that notifies the given allocation manager of when to add and remove executors.
*
* This class is intentionally conservative in its assumptions about the relative ordering
* and consistency of events returned by the listener.
*/
private[spark] class ExecutorAllocationListener extends SparkListener {
private val stageAttemptToNumTasks = new mutable.HashMap[StageAttempt, Int]
// Number of running tasks per stageAttempt including speculative tasks.
// Should be 0 when no stages are active.
private val stageAttemptToNumRunningTask = new mutable.HashMap[StageAttempt, Int]
private val stageAttemptToTaskIndices = new mutable.HashMap[StageAttempt, mutable.HashSet[Int]]
// Number of speculative tasks pending/running in each stageAttempt
private val stageAttemptToNumSpeculativeTasks = new mutable.HashMap[StageAttempt, Int]
// The speculative tasks started in each stageAttempt
private val stageAttemptToSpeculativeTaskIndices =
new mutable.HashMap[StageAttempt, mutable.HashSet[Int]]
private val resourceProfileIdToStageAttempt =
new mutable.HashMap[Int, mutable.Set[StageAttempt]]
// Keep track of unschedulable task sets because of executor/node exclusions from too many task
// failures. This is a Set of StageAttempt's because we'll only take the last unschedulable task
// in a taskset although there can be more. This is done in order to avoid costly loops in the
// scheduling. Check TaskSetManager#getCompletelyExcludedTaskIfAny for more details.
private val unschedulableTaskSets = new mutable.HashSet[StageAttempt]
// stageAttempt to tuple (the number of task with locality preferences, a map where each pair
// is a node and the number of tasks that would like to be scheduled on that node, and
// the resource profile id) map,
// maintain the executor placement hints for each stageAttempt used by resource framework
// to better place the executors.
private val stageAttemptToExecutorPlacementHints =
new mutable.HashMap[StageAttempt, (Int, Map[String, Int], Int)]
override def onStageSubmitted(stageSubmitted: SparkListenerStageSubmitted): Unit = {
initializing = false
val stageId = stageSubmitted.stageInfo.stageId
val stageAttemptId = stageSubmitted.stageInfo.attemptNumber()
val stageAttempt = StageAttempt(stageId, stageAttemptId)
val numTasks = stageSubmitted.stageInfo.numTasks
allocationManager.synchronized {
stageAttemptToNumTasks(stageAttempt) = numTasks
allocationManager.onSchedulerBacklogged()
// need to keep stage task requirements to ask for the right containers
val profId = stageSubmitted.stageInfo.resourceProfileId
logDebug(s"Stage resource profile id is: $profId with numTasks: $numTasks")
resourceProfileIdToStageAttempt.getOrElseUpdate(
profId, new mutable.HashSet[StageAttempt]) += stageAttempt
numExecutorsToAddPerResourceProfileId.getOrElseUpdate(profId, 1)
// Compute the number of tasks requested by the stage on each host
var numTasksPending = 0
val hostToLocalTaskCountPerStage = new mutable.HashMap[String, Int]()
stageSubmitted.stageInfo.taskLocalityPreferences.foreach { locality =>
if (!locality.isEmpty) {
numTasksPending += 1
locality.foreach { location =>
val count = hostToLocalTaskCountPerStage.getOrElse(location.host, 0) + 1
hostToLocalTaskCountPerStage(location.host) = count
}
}
}
stageAttemptToExecutorPlacementHints.put(stageAttempt,
(numTasksPending, hostToLocalTaskCountPerStage.toMap, profId))
// Update the executor placement hints
updateExecutorPlacementHints()
if (!numExecutorsTargetPerResourceProfileId.contains(profId)) {
numExecutorsTargetPerResourceProfileId.put(profId, initialNumExecutors)
if (initialNumExecutors > 0) {
logDebug(s"requesting executors, rpId: $profId, initial number is $initialNumExecutors")
// we need to trigger a schedule since we add an initial number here.
client.requestTotalExecutors(
numExecutorsTargetPerResourceProfileId.toMap,
numLocalityAwareTasksPerResourceProfileId.toMap,
rpIdToHostToLocalTaskCount)
}
}
}
}
override def onStageCompleted(stageCompleted: SparkListenerStageCompleted): Unit = {
val stageId = stageCompleted.stageInfo.stageId
val stageAttemptId = stageCompleted.stageInfo.attemptNumber()
val stageAttempt = StageAttempt(stageId, stageAttemptId)
allocationManager.synchronized {
// do NOT remove stageAttempt from stageAttemptToNumRunningTask
// because the attempt may still have running tasks,
// even after another attempt for the stage is submitted.
stageAttemptToNumTasks -= stageAttempt
stageAttemptToNumSpeculativeTasks -= stageAttempt
stageAttemptToTaskIndices -= stageAttempt
stageAttemptToSpeculativeTaskIndices -= stageAttempt
stageAttemptToExecutorPlacementHints -= stageAttempt
// Update the executor placement hints
updateExecutorPlacementHints()
// If this is the last stage with pending tasks, mark the scheduler queue as empty
// This is needed in case the stage is aborted for any reason
if (stageAttemptToNumTasks.isEmpty && stageAttemptToNumSpeculativeTasks.isEmpty) {
allocationManager.onSchedulerQueueEmpty()
}
}
}
override def onTaskStart(taskStart: SparkListenerTaskStart): Unit = {
val stageId = taskStart.stageId
val stageAttemptId = taskStart.stageAttemptId
val stageAttempt = StageAttempt(stageId, stageAttemptId)
val taskIndex = taskStart.taskInfo.index
allocationManager.synchronized {
stageAttemptToNumRunningTask(stageAttempt) =
stageAttemptToNumRunningTask.getOrElse(stageAttempt, 0) + 1
// If this is the last pending task, mark the scheduler queue as empty
if (taskStart.taskInfo.speculative) {
stageAttemptToSpeculativeTaskIndices.getOrElseUpdate(stageAttempt,
new mutable.HashSet[Int]) += taskIndex
} else {
stageAttemptToTaskIndices.getOrElseUpdate(stageAttempt,
new mutable.HashSet[Int]) += taskIndex
}
if (!hasPendingTasks) {
allocationManager.onSchedulerQueueEmpty()
}
}
}
override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = {
val stageId = taskEnd.stageId
val stageAttemptId = taskEnd.stageAttemptId
val stageAttempt = StageAttempt(stageId, stageAttemptId)
val taskIndex = taskEnd.taskInfo.index
allocationManager.synchronized {
if (stageAttemptToNumRunningTask.contains(stageAttempt)) {
stageAttemptToNumRunningTask(stageAttempt) -= 1
if (stageAttemptToNumRunningTask(stageAttempt) == 0) {
stageAttemptToNumRunningTask -= stageAttempt
if (!stageAttemptToNumTasks.contains(stageAttempt)) {
val rpForStage = resourceProfileIdToStageAttempt.filter { case (k, v) =>
v.contains(stageAttempt)
}.keys
if (rpForStage.size == 1) {
// be careful about the removal from here due to late tasks, make sure stage is
// really complete and no tasks left
resourceProfileIdToStageAttempt(rpForStage.head) -= stageAttempt
} else {
logWarning(s"Should have exactly one resource profile for stage $stageAttempt," +
s" but have $rpForStage")
}
}
}
}
if (taskEnd.taskInfo.speculative) {
stageAttemptToSpeculativeTaskIndices.get(stageAttempt).foreach {_.remove{taskIndex}}
// If the previous task attempt succeeded first and it was the last task in a stage,
// the stage may have been removed before handing this speculative TaskEnd event.
if (stageAttemptToNumSpeculativeTasks.contains(stageAttempt)) {
stageAttemptToNumSpeculativeTasks(stageAttempt) -= 1
}
}
taskEnd.reason match {
case Success | _: TaskKilled =>
case _ =>
if (!hasPendingTasks) {
// If the task failed (not intentionally killed), we expect it to be resubmitted
// later. To ensure we have enough resources to run the resubmitted task, we need to
// mark the scheduler as backlogged again if it's not already marked as such
// (SPARK-8366)
allocationManager.onSchedulerBacklogged()
}
if (!taskEnd.taskInfo.speculative) {
// If a non-speculative task is intentionally killed, it means the speculative task
// has succeeded, and no further task of this task index will be resubmitted. In this
// case, the task index is completed and we shouldn't remove it from
// stageAttemptToTaskIndices. Otherwise, we will have a pending non-speculative task
// for the task index (SPARK-30511)
stageAttemptToTaskIndices.get(stageAttempt).foreach {_.remove(taskIndex)}
}
}
}
}
override def onSpeculativeTaskSubmitted(speculativeTask: SparkListenerSpeculativeTaskSubmitted)
: Unit = {
val stageId = speculativeTask.stageId
val stageAttemptId = speculativeTask.stageAttemptId
val stageAttempt = StageAttempt(stageId, stageAttemptId)
allocationManager.synchronized {
stageAttemptToNumSpeculativeTasks(stageAttempt) =
stageAttemptToNumSpeculativeTasks.getOrElse(stageAttempt, 0) + 1
allocationManager.onSchedulerBacklogged()
}
}
override def onUnschedulableTaskSetAdded(
unschedulableTaskSetAdded: SparkListenerUnschedulableTaskSetAdded): Unit = {
val stageId = unschedulableTaskSetAdded.stageId
val stageAttemptId = unschedulableTaskSetAdded.stageAttemptId
val stageAttempt = StageAttempt(stageId, stageAttemptId)
allocationManager.synchronized {
unschedulableTaskSets.add(stageAttempt)
allocationManager.onSchedulerBacklogged()
}
}
override def onUnschedulableTaskSetRemoved(
unschedulableTaskSetRemoved: SparkListenerUnschedulableTaskSetRemoved): Unit = {
val stageId = unschedulableTaskSetRemoved.stageId
val stageAttemptId = unschedulableTaskSetRemoved.stageAttemptId
val stageAttempt = StageAttempt(stageId, stageAttemptId)
allocationManager.synchronized {
// Clear unschedulableTaskSets since atleast one task becomes schedulable now
unschedulableTaskSets.remove(stageAttempt)
}
}
/**
* An estimate of the total number of pending tasks remaining for currently running stages. Does
* not account for tasks which may have failed and been resubmitted.
*
* Note: This is not thread-safe without the caller owning the `allocationManager` lock.
*/
def pendingTasksPerResourceProfile(rpId: Int): Int = {
val attempts = resourceProfileIdToStageAttempt.getOrElse(rpId, Set.empty).toSeq
attempts.map(attempt => getPendingTaskSum(attempt)).sum
}
def hasPendingRegularTasks: Boolean = {
val attemptSets = resourceProfileIdToStageAttempt.values
attemptSets.exists(attempts => attempts.exists(getPendingTaskSum(_) > 0))
}
private def getPendingTaskSum(attempt: StageAttempt): Int = {
val numTotalTasks = stageAttemptToNumTasks.getOrElse(attempt, 0)
val numRunning = stageAttemptToTaskIndices.get(attempt).map(_.size).getOrElse(0)
numTotalTasks - numRunning
}
def pendingSpeculativeTasksPerResourceProfile(rp: Int): Int = {
val attempts = resourceProfileIdToStageAttempt.getOrElse(rp, Set.empty).toSeq
attempts.map(attempt => getPendingSpeculativeTaskSum(attempt)).sum
}
def hasPendingSpeculativeTasks: Boolean = {
val attemptSets = resourceProfileIdToStageAttempt.values
attemptSets.exists { attempts =>
attempts.exists(getPendingSpeculativeTaskSum(_) > 0)
}
}
private def getPendingSpeculativeTaskSum(attempt: StageAttempt): Int = {
val numTotalTasks = stageAttemptToNumSpeculativeTasks.getOrElse(attempt, 0)
val numRunning = stageAttemptToSpeculativeTaskIndices.get(attempt).map(_.size).getOrElse(0)
numTotalTasks - numRunning
}
/**
* Currently we only know when a task set has an unschedulable task, we don't know
* the exact number and since the allocation manager isn't tied closely with the scheduler,
* we use the number of tasks sets that are unschedulable as a heuristic to add more executors.
*/
def pendingUnschedulableTaskSetsPerResourceProfile(rp: Int): Int = {
val attempts = resourceProfileIdToStageAttempt.getOrElse(rp, Set.empty).toSeq
attempts.count(attempt => unschedulableTaskSets.contains(attempt))
}
def hasPendingTasks: Boolean = {
hasPendingSpeculativeTasks || hasPendingRegularTasks
}
def totalPendingTasksPerResourceProfile(rp: Int): Int = {
pendingTasksPerResourceProfile(rp) + pendingSpeculativeTasksPerResourceProfile(rp)
}
/**
* The number of tasks currently running across all stages.
* Include running-but-zombie stage attempts
*/
def totalRunningTasks(): Int = {
stageAttemptToNumRunningTask.values.sum
}
def totalRunningTasksPerResourceProfile(rp: Int): Int = {
val attempts = resourceProfileIdToStageAttempt.getOrElse(rp, Set.empty).toSeq
// attempts is a Set, change to Seq so we keep all values
attempts.map { attempt =>
stageAttemptToNumRunningTask.getOrElseUpdate(attempt, 0)
}.sum
}
/**
* Update the Executor placement hints (the number of tasks with locality preferences,
* a map where each pair is a node and the number of tasks that would like to be scheduled
* on that node).
*
* These hints are updated when stages arrive and complete, so are not up-to-date at task
* granularity within stages.
*/
def updateExecutorPlacementHints(): Unit = {
val localityAwareTasksPerResourceProfileId = new mutable.HashMap[Int, Int]
// ResourceProfile id => map[host, count]
val rplocalityToCount = new mutable.HashMap[Int, mutable.HashMap[String, Int]]()
stageAttemptToExecutorPlacementHints.values.foreach {
case (numTasksPending, localities, rpId) =>
val rpNumPending =
localityAwareTasksPerResourceProfileId.getOrElse(rpId, 0)
localityAwareTasksPerResourceProfileId(rpId) = rpNumPending + numTasksPending
localities.foreach { case (hostname, count) =>
val rpBasedHostToCount =
rplocalityToCount.getOrElseUpdate(rpId, new mutable.HashMap[String, Int])
val newUpdated = rpBasedHostToCount.getOrElse(hostname, 0) + count
rpBasedHostToCount(hostname) = newUpdated
}
}
allocationManager.numLocalityAwareTasksPerResourceProfileId =
localityAwareTasksPerResourceProfileId
allocationManager.rpIdToHostToLocalTaskCount =
rplocalityToCount.map { case (k, v) => (k, v.toMap)}.toMap
}
}
}
/**
* Metric source for ExecutorAllocationManager to expose its internal executor allocation
* status to MetricsSystem.
* Note: These metrics heavily rely on the internal implementation of
* ExecutorAllocationManager, metrics or value of metrics will be changed when internal
* implementation is changed, so these metrics are not stable across Spark version.
*/
private[spark] class ExecutorAllocationManagerSource(
executorAllocationManager: ExecutorAllocationManager) extends Source {
val sourceName = "ExecutorAllocationManager"
val metricRegistry = new MetricRegistry()
private def registerGauge[T](name: String, value: => T, defaultValue: T): Unit = {
metricRegistry.register(MetricRegistry.name("executors", name), new Gauge[T] {
override def getValue: T = synchronized { Option(value).getOrElse(defaultValue) }
})
}
private def getCounter(name: String): Counter = {
metricRegistry.counter(MetricRegistry.name("executors", name))
}
val gracefullyDecommissioned: Counter = getCounter("numberExecutorsGracefullyDecommissioned")
val decommissionUnfinished: Counter = getCounter("numberExecutorsDecommissionUnfinished")
val driverKilled: Counter = getCounter("numberExecutorsKilledByDriver")
val exitedUnexpectedly: Counter = getCounter("numberExecutorsExitedUnexpectedly")
// The metrics are going to return the sum for all the different ResourceProfiles.
registerGauge("numberExecutorsToAdd",
executorAllocationManager.numExecutorsToAddPerResourceProfileId.values.sum, 0)
registerGauge("numberExecutorsPendingToRemove",
executorAllocationManager.executorMonitor.pendingRemovalCount, 0)
registerGauge("numberAllExecutors",
executorAllocationManager.executorMonitor.executorCount, 0)
registerGauge("numberTargetExecutors",
executorAllocationManager.numExecutorsTargetPerResourceProfileId.values.sum, 0)
registerGauge("numberMaxNeededExecutors",
executorAllocationManager.numExecutorsTargetPerResourceProfileId.keys
.map(executorAllocationManager.maxNumExecutorsNeededPerResourceProfile(_)).sum, 0)
}
private object ExecutorAllocationManager {
val NOT_SET = Long.MaxValue
// helper case class for requesting executors, here to be visible for testing
private[spark] case class TargetNumUpdates(delta: Int, oldNumExecutorsTarget: Int)
}
|
BryanCutler/spark
|
core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala
|
Scala
|
apache-2.0
| 47,326 |
package com.wavesplatform.state.diffs
import com.google.common.primitives.Ints
import com.wavesplatform.account.KeyPair
import com.wavesplatform.common.utils.EitherExt2
import com.wavesplatform.db.WithDomain
import com.wavesplatform.db.WithState.AddrWithBalance
import com.wavesplatform.features.BlockchainFeatures
import com.wavesplatform.features.BlockchainFeatures._
import com.wavesplatform.lagonaki.mocks.TestBlock
import com.wavesplatform.lang.contract.DApp
import com.wavesplatform.lang.contract.DApp.{CallableAnnotation, CallableFunction}
import com.wavesplatform.lang.directives.values._
import com.wavesplatform.lang.script.ContractScript.ContractScriptImpl
import com.wavesplatform.lang.script.v1.ExprScript
import com.wavesplatform.lang.script.{ContractScript, Script}
import com.wavesplatform.lang.v1.FunctionHeader.Native
import com.wavesplatform.lang.v1.compiler.Terms._
import com.wavesplatform.lang.v1.compiler.{Terms, TestCompiler}
import com.wavesplatform.protobuf.dapp.DAppMeta
import com.wavesplatform.settings.{FunctionalitySettings, TestFunctionalitySettings}
import com.wavesplatform.test.{PropSpec, _}
import com.wavesplatform.transaction.{GenesisTransaction, TxHelpers, TxVersion}
import com.wavesplatform.transaction.smart.SetScriptTransaction
import org.scalatest.Assertion
class SetScriptTransactionDiffTest extends PropSpec with WithDomain {
private val fs = TestFunctionalitySettings.Enabled.copy(
preActivatedFeatures = Map(BlockchainFeatures.SmartAccounts.id -> 0, BlockchainFeatures.Ride4DApps.id -> 0)
)
val preconditionsAndSetContract: (GenesisTransaction, SetScriptTransaction) =
preconditionsAndSetCustomContract(
ContractScript(
V3,
DApp(
DAppMeta(),
List.empty,
List(
CallableFunction(CallableAnnotation("sender"), Terms.FUNC("foo", List("a"), FUNCTION_CALL(Native(203), List(REF("a"), REF("sender")))))
),
None
)
).explicitGet()
)
private def preconditionsAndSetCustomContract(script: Script): (GenesisTransaction, SetScriptTransaction) = {
val master = TxHelpers.signer(1)
val genesis = TxHelpers.genesis(master.toAddress)
val setScript = TxHelpers.setScript(master, script)
(genesis, setScript)
}
property("setting script results in account state") {
val (genesis, setScript) = preconditionsAndSetContract
assertDiffAndState(Seq(TestBlock.create(Seq(genesis))), TestBlock.create(Seq(setScript)), fs) {
case (_, newState) =>
newState.accountScript(setScript.sender.toAddress).map(_.script) shouldBe setScript.script
}
}
property("setting contract results in account state") {
val (genesis, setScript) = preconditionsAndSetContract
assertDiffAndState(Seq(TestBlock.create(Seq(genesis))), TestBlock.create(Seq(setScript)), fs) {
case (_, newState) =>
newState.accountScript(setScript.sender.toAddress).map(_.script) shouldBe setScript.script
}
}
property("Script with BlockV2 only works after Ride4DApps feature activation") {
import com.wavesplatform.lagonaki.mocks.TestBlock.{create => block}
val settingsUnactivated = TestFunctionalitySettings.Enabled.copy(
preActivatedFeatures = Map(
BlockchainFeatures.Ride4DApps.id -> 3
)
)
val settingsActivated = TestFunctionalitySettings.Enabled.copy(
preActivatedFeatures = Map(
BlockchainFeatures.Ride4DApps.id -> 0
)
)
val setup = {
val master = TxHelpers.signer(1)
val genesis = TxHelpers.genesis(master.toAddress)
val expr = BLOCK(LET("x", CONST_LONG(3)), CONST_BOOLEAN(true))
val script = ExprScript(V1, expr, checkSize = false).explicitGet()
val tx = TxHelpers.setScript(master, script)
(genesis, tx)
}
val (genesis, tx) = setup
assertDiffEi(Seq(block(Seq(genesis))), block(Seq(tx)), settingsUnactivated) { blockDiffEi =>
blockDiffEi should produce("RIDE 4 DAPPS feature has not been activated yet")
}
assertDiffEi(Seq(block(Seq(genesis))), block(Seq(tx)), settingsActivated) { blockDiffEi =>
blockDiffEi.explicitGet()
}
}
property("verifier complexity limit 3000 from V4") {
val exprV3WithComplexityBetween2000And3000 =
TestCompiler(V3).compileExpression(
"""
| {-#STDLIB_VERSION 3 #-}
| {-#SCRIPT_TYPE ACCOUNT #-}
| {-#CONTENT_TYPE EXPRESSION #-}
|
| rsaVerify(SHA256, base64'ZGdnZHMK',base64'ZGdnZHMK',base64'ZGdnZHMK') &&
| rsaVerify(SHA256, base64'ZGdnZHMK',base64'ZGdnZHMK',base64'ZGdnZHMK') &&
| rsaVerify(SHA256, base64'ZGdnZHMK',base64'ZGdnZHMK',base64'ZGdnZHMK') &&
| rsaVerify(SHA256, base64'ZGdnZHMK',base64'ZGdnZHMK',base64'ZGdnZHMK') &&
| rsaVerify(SHA256, base64'ZGdnZHMK',base64'ZGdnZHMK',base64'ZGdnZHMK') &&
| rsaVerify(SHA256, base64'ZGdnZHMK',base64'ZGdnZHMK',base64'ZGdnZHMK') &&
| rsaVerify(SHA256, base64'ZGdnZHMK',base64'ZGdnZHMK',base64'ZGdnZHMK')
""".stripMargin
)
val contractV3WithComplexityBetween2000And3000 = {
val script =
"""
| {-#STDLIB_VERSION 3 #-}
| {-#SCRIPT_TYPE ACCOUNT #-}
| {-#CONTENT_TYPE DAPP #-}
|
| @Verifier(tx)
| func verify() =
| rsaVerify(SHA256, base64'ZGdnZHMK',base64'ZGdnZHMK',base64'ZGdnZHMK') &&
| rsaVerify(SHA256, base64'ZGdnZHMK',base64'ZGdnZHMK',base64'ZGdnZHMK') &&
| rsaVerify(SHA256, base64'ZGdnZHMK',base64'ZGdnZHMK',base64'ZGdnZHMK') &&
| rsaVerify(SHA256, base64'ZGdnZHMK',base64'ZGdnZHMK',base64'ZGdnZHMK') &&
| rsaVerify(SHA256, base64'ZGdnZHMK',base64'ZGdnZHMK',base64'ZGdnZHMK') &&
| rsaVerify(SHA256, base64'ZGdnZHMK',base64'ZGdnZHMK',base64'ZGdnZHMK') &&
| rsaVerify(SHA256, base64'ZGdnZHMK',base64'ZGdnZHMK',base64'ZGdnZHMK')
""".stripMargin
TestCompiler(V3).compileContract(script)
}
val exprV4WithComplexityBetween2000And3000 =
TestCompiler(V4).compileExpression(
"""
| {-#STDLIB_VERSION 4 #-}
| {-#SCRIPT_TYPE ACCOUNT #-}
| {-#CONTENT_TYPE EXPRESSION #-}
|
| groth16Verify_5inputs(base64'ZGdnZHMK',base64'ZGdnZHMK',base64'ZGdnZHMK') || groth16Verify_1inputs(base64'ZGdnZHMK',base64'ZGdnZHMK',base64'ZGdnZHMK')
""".stripMargin
)
val contractV4WithComplexityBetween2000And3000 = {
val script =
"""
| {-#STDLIB_VERSION 4 #-}
| {-#SCRIPT_TYPE ACCOUNT #-}
| {-#CONTENT_TYPE DAPP #-}
|
| @Verifier(tx)
| func verify() =
| groth16Verify_5inputs(base64'ZGdnZHMK',base64'ZGdnZHMK',base64'ZGdnZHMK') || groth16Verify_1inputs(base64'ZGdnZHMK',base64'ZGdnZHMK',base64'ZGdnZHMK')
|
""".stripMargin
TestCompiler(V4).compileContract(script)
}
val contractV4WithCallableComplexityBetween3000And4000 = {
val script =
"""
| {-#STDLIB_VERSION 4 #-}
| {-#SCRIPT_TYPE ACCOUNT #-}
| {-#CONTENT_TYPE DAPP #-}
|
| @Callable(i)
| func default() = {
| if(groth16Verify_15inputs(base64'ZGdnZHMK',base64'ZGdnZHMK',base64'ZGdnZHMK'))
| then [] else []
| }
|
""".stripMargin
TestCompiler(V4).compileContract(script)
}
val rideV3Activated = TestFunctionalitySettings.Enabled.copy(
preActivatedFeatures = Map(
BlockchainFeatures.Ride4DApps.id -> 0
)
)
val rideV4Activated = TestFunctionalitySettings.Enabled.copy(
preActivatedFeatures = Map(
BlockchainFeatures.Ride4DApps.id -> 0,
BlockchainFeatures.BlockV5.id -> 0
)
)
def assertSuccess(script: Script, settings: FunctionalitySettings): Unit = {
val (genesis, setScript) = preconditionsAndSetCustomContract(script)
assertDiffAndState(Seq(TestBlock.create(Seq(genesis))), TestBlock.create(Seq(setScript)), settings) {
case (_, newState) =>
newState.accountScript(setScript.sender.toAddress).map(_.script) shouldBe setScript.script
}
}
def assertFailure(script: Script, settings: FunctionalitySettings, errorMessage: String): Unit = {
val (genesis, setScript) = preconditionsAndSetCustomContract(script)
assertDiffEi(Seq(TestBlock.create(Seq(genesis))), TestBlock.create(Seq(setScript)), settings)(
_ should produce(errorMessage)
)
}
assertSuccess(exprV3WithComplexityBetween2000And3000, rideV3Activated)
assertSuccess(contractV3WithComplexityBetween2000And3000, rideV3Activated)
assertFailure(exprV3WithComplexityBetween2000And3000, rideV4Activated, "Script is too complex: 2134 > 2000")
assertFailure(exprV4WithComplexityBetween2000And3000, rideV4Activated, "Script is too complex: 2807 > 2000")
assertFailure(contractV3WithComplexityBetween2000And3000, rideV4Activated, "Contract verifier is too complex: 2134 > 2000")
assertFailure(contractV4WithComplexityBetween2000And3000, rideV4Activated, "Contract verifier is too complex: 2807 > 2000")
assertSuccess(contractV4WithCallableComplexityBetween3000And4000, rideV4Activated)
}
property("estimation overflow") {
val body = {
val n = 65
s"""
| func f0() = true
| ${(0 until n).map(i => s"func f${i + 1}() = if (f$i()) then f$i() else f$i()").mkString("\\n")}
| f$n()
""".stripMargin
}
val verifier = TestCompiler(V3).compileExpression(body)
// due to complexity of natural callable with the expression is not negative
val callable = CallableFunction(CallableAnnotation("i"), FUNC("call", Nil, verifier.expr.asInstanceOf[EXPR]))
val dAppCallable = ContractScriptImpl(V4, DApp(DAppMeta(), Nil, List(callable), None))
val dAppVerifier = TestCompiler(V3).compileContract(
s"""
| @Verifier(tx)
| func verify() = {
| $body
| }
""".stripMargin
)
val sender = TxHelpers.signer(1)
val balances = AddrWithBalance.enoughBalances(sender)
def settings(checkNegative: Boolean = false, checkSumOverflow: Boolean = false): FunctionalitySettings = {
TestFunctionalitySettings
.withFeatures(BlockV5)
.copy(
estimationOverflowFixHeight = if (checkNegative) 0 else 999,
estimatorSumOverflowFixHeight = if (checkSumOverflow) 0 else 999
)
}
def assert(script: Script, checkNegativeMessage: String): Assertion = {
def setScript() = TxHelpers.setScript(sender, script)
withDomain(domainSettingsWithFS(settings()), balances) { db =>
val tx = setScript()
db.appendBlock(tx)
db.liquidDiff.errorMessage(tx.id()) shouldBe None
}
withDomain(domainSettingsWithFS(settings(checkNegative = true)), balances) { db =>
db.appendBlockE(setScript()) should produce(checkNegativeMessage)
}
withDomain(domainSettingsWithFS(settings(checkSumOverflow = true)), balances) { db =>
db.appendBlockE(setScript()) should produce("Illegal script")
}
}
Seq(
(verifier, "Unexpected negative verifier complexity"),
(dAppVerifier, "Unexpected negative verifier complexity"),
(dAppCallable, "Unexpected negative callable `call` complexity")
).foreach { case (script, message) => assert(script, message) }
}
property("illegal recursion in scripts is allowed before sumOverflow height") {
/*
func a1() = true
@Verifier(tx)
func a1() = a1()
*/
val verifier = "AAIFAAAAAAAAAA0IAhoJCgJhMRIDYTExAAAAAQEAAAACYTEAAAAABgAAAAAAAAABAAAAAnR4AQAAAAJhMQAAAAAJAQAAAAJhMQAAAAA1A+Ee"
/*
func a1() = true
func a1() = a1()
@Verifier(tx)
func a2() = a1()
*/
val userFunctions = "AAIFAAAAAAAAAA0IAhoJCgJhMRIDYTExAAAAAgEAAAACYTEAAAAABgEAAAACYTEAAAAACQEAAAACYTEAAAAAAAAAAAAAAAEAAAACdHgBAAAAAmEyAAAAAAkBAAAAAmExAAAAAIGVAL4="
/*
func a1() = true
func a2() = {
func a3() = {
func a11() = a1()
a11()
}
a3()
}
@Verifier(tx)
func a4() = a2()
*/
val innerOverlapWithVerifier = "AAIFAAAAAAAAAA0IAhoJCgJhMRIDYTExAAAAAgEAAAACYTEAAAAABgEAAAACYTIAAAAACgEAAAACYTMAAAAACgEAAAACYTEAAAAACQEAAAACYTEAAAAACQEAAAACYTEAAAAACQEAAAACYTMAAAAAAAAAAAAAAAEAAAACdHgBAAAAAmE0AAAAAAkBAAAAAmEyAAAAAEjFcsE="
/*
func a1() = true
func a2() = {
func a3() = {
func a11() = a1()
a11()
}
a3()
}
@Callable(i)
func a4() = {
strict a0 = a2()
[]
}
*/
val innerOverlapWithCallable = "AAIFAAAAAAAAAA8IAhIAGgkKAmExEgNhMTEAAAACAQAAAAJhMQAAAAAGAQAAAAJhMgAAAAAKAQAAAAJhMwAAAAAKAQAAAAJhMQAAAAAJAQAAAAJhMQAAAAAJAQAAAAJhMQAAAAAJAQAAAAJhMwAAAAAAAAABAAAAAWkBAAAAAmE0AAAAAAQAAAACYTAJAQAAAAJhMgAAAAADCQAAAAAAAAIFAAAAAmEwBQAAAAJhMAUAAAADbmlsCQAAAgAAAAECAAAAJFN0cmljdCB2YWx1ZSBpcyBub3QgZXF1YWwgdG8gaXRzZWxmLgAAAABEHCSy"
val keyPairs = Vector.tabulate(8)(i => KeyPair(Ints.toByteArray(i)))
val balances = keyPairs.map(acc => AddrWithBalance(acc.toAddress, 10.waves))
def setScript(keyPairIndex: Int, script: String): SetScriptTransaction =
TxHelpers.setScript(keyPairs(keyPairIndex), Script.fromBase64String(script).explicitGet(), version = TxVersion.V2)
val settings =
DomainPresets.RideV5.copy(blockchainSettings = DomainPresets.RideV5.blockchainSettings.copy(
functionalitySettings = DomainPresets.RideV5.blockchainSettings.functionalitySettings.copy(
estimatorSumOverflowFixHeight = 3
)
))
withDomain(settings, balances) { d =>
d.appendBlock(
setScript(0, verifier),
setScript(1, userFunctions),
setScript(2, innerOverlapWithVerifier),
setScript(3, innerOverlapWithCallable)
)
d.appendBlockE(setScript(4, verifier)) should produce("shadows preceding declaration")
d.appendBlockE(setScript(5, userFunctions)) should produce("shadows preceding declaration")
d.appendBlockE(setScript(6, innerOverlapWithVerifier)) should produce("shadows preceding declaration")
d.appendBlockE(setScript(7, innerOverlapWithCallable)) should produce("shadows preceding declaration")
}
}
}
|
wavesplatform/Waves
|
node/src/test/scala/com/wavesplatform/state/diffs/SetScriptTransactionDiffTest.scala
|
Scala
|
mit
| 14,386 |
/***********************************************************************
* Copyright (c) 2013-2015 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0 which
* accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.convert.text
import java.nio.charset.StandardCharsets
import com.google.common.io.Resources
import com.typesafe.config.ConfigFactory
import org.junit.runner.RunWith
import org.locationtech.geomesa.convert.SimpleFeatureConverters
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class DelimitedTextConverterTest extends Specification {
sequential
"DelimitedTextConverter" should {
val data =
"""
|1,hello,45.0,45.0
|2,world,90.0,90.0
|willfail,hello
""".stripMargin
val conf = ConfigFactory.parseString(
"""
| converter = {
| type = "delimited-text",
| format = "DEFAULT",
| id-field = "md5(string2bytes($0))",
| fields = [
| { name = "oneup", transform = "$1" },
| { name = "phrase", transform = "concat($1, $2)" },
| { name = "lat", transform = "$3::double" },
| { name = "lon", transform = "$4::double" },
| { name = "lit", transform = "'hello'" },
| { name = "geom", transform = "point($lat, $lon)" }
| { name = "l1", transform = "concat($lit, $lit)" }
| { name = "l2", transform = "concat($l1, $lit)" }
| { name = "l3", transform = "concat($l2, $lit)" }
| ]
| }
""".stripMargin)
val sft = SimpleFeatureTypes.createType(ConfigFactory.load("sft_testsft.conf"))
"be built from a conf" >> {
val converter = SimpleFeatureConverters.build[String](sft, conf)
converter must not beNull
val res = converter.processInput(data.split("\\n").toIterator.filterNot( s => "^\\\\s*$".r.findFirstIn(s).size > 0)).toList
converter.close()
"and process some data" >> {
res.size must be equalTo 2
res(0).getAttribute("phrase").asInstanceOf[String] must be equalTo "1hello"
res(1).getAttribute("phrase").asInstanceOf[String] must be equalTo "2world"
}
"handle more derived fields than input fields" >> {
res(0).getAttribute("oneup").asInstanceOf[String] must be equalTo "1"
}
}
"handle tab delimited files" >> {
val conf = ConfigFactory.parseString(
"""
| converter = {
| type = "delimited-text",
| format = "TDF",
| id-field = "md5(string2bytes($0))",
| fields = [
| { name = "phrase", transform = "concat($1, $2)" },
| { name = "lat", transform = "$3::double" },
| { name = "lon", transform = "$4::double" },
| { name = "geom", transform = "point($lat, $lon)" }
| ]
| }
""".stripMargin)
val sft = SimpleFeatureTypes.createType(ConfigFactory.load("sft_testsft.conf"))
val converter = SimpleFeatureConverters.build[String](sft, conf)
converter must not beNull
val res = converter.processInput(data.split("\\n").toIterator.filterNot( s => "^\\\\s*$".r.findFirstIn(s).size > 0).map(_.replaceAll(",", "\\t"))).toList
converter.close()
res.size must be equalTo 2
res(0).getAttribute("phrase").asInstanceOf[String] must be equalTo "1hello"
res(1).getAttribute("phrase").asInstanceOf[String] must be equalTo "2world"
}
"handle line number transform and filename global parameter correctly " >> {
val conf = ConfigFactory.parseString(
"""
| converter = {
| type = "delimited-text",
| format = "TDF",
| id-field = "md5(string2bytes($0))",
| fields = [
| { name = "phrase", transform = "concat($1, $2)" },
| { name = "lineNr", transform = "lineNo()"},
| { name = "fn", transform = "$filename"},
| { name = "lat", transform = "$3::double" },
| { name = "lon", transform = "$4::double" },
| { name = "geom", transform = "point($lat, $lon)" }
| ]
| }
""".stripMargin)
val sft = SimpleFeatureTypes.createType(ConfigFactory.load("sft_testsft.conf"))
val converter = SimpleFeatureConverters.build[String](sft, conf)
converter must not beNull
val input = data.split("\\n").toIterator.filterNot( s => "^\\\\s*$".r.findFirstIn(s).size > 0).map(_.replaceAll(",", "\\t"))
val res = converter.processInput(input, Map("filename"-> "/some/file/path/testfile.txt")).toList
converter.close()
res.size must be equalTo 2
res(0).getAttribute("phrase").asInstanceOf[String] must be equalTo "1hello"
res(0).getAttribute("lineNr").asInstanceOf[Int] must be equalTo 1
res(0).getAttribute("fn").asInstanceOf[String] must be equalTo "/some/file/path/testfile.txt"
res(1).getAttribute("phrase").asInstanceOf[String] must be equalTo "2world"
res(1).getAttribute("lineNr").asInstanceOf[Int] must be equalTo 2
res(1).getAttribute("fn").asInstanceOf[String] must be equalTo "/some/file/path/testfile.txt"
}
"handle line number transform and filename global in id-field " >> {
val conf = ConfigFactory.parseString(
"""
| converter = {
| type = "delimited-text",
| format = "TDF",
| id-field = "concat($filename, lineNo())",
| fields = [
| { name = "phrase", transform = "concat($1, $2)" },
| { name = "lineNr", transform = "lineNo()"},
| { name = "fn", transform = "$filename"},
| { name = "lat", transform = "$3::double" },
| { name = "lon", transform = "$4::double" },
| { name = "geom", transform = "point($lat, $lon)" }
| ]
| }
""".stripMargin)
val sft = SimpleFeatureTypes.createType(ConfigFactory.load("sft_testsft.conf"))
val converter = SimpleFeatureConverters.build[String](sft, conf)
converter must not beNull
val input = data.split("\\n").toIterator.filterNot( s => "^\\\\s*$".r.findFirstIn(s).size > 0).map(_.replaceAll(",", "\\t"))
val res = converter.processInput(input, Map("filename"-> "/some/file/path/testfile.txt")).toList
converter.close()
res.size must be equalTo 2
res(0).getAttribute("phrase").asInstanceOf[String] must be equalTo "1hello"
res(0).getAttribute("lineNr").asInstanceOf[Int] must be equalTo 1
res(0).getAttribute("fn").asInstanceOf[String] must be equalTo "/some/file/path/testfile.txt"
res(1).getAttribute("phrase").asInstanceOf[String] must be equalTo "2world"
res(1).getAttribute("lineNr").asInstanceOf[Int] must be equalTo 2
res(1).getAttribute("fn").asInstanceOf[String] must be equalTo "/some/file/path/testfile.txt"
}
"handle projecting to just the attributes in the SFT (and associated input dependencies)" >> {
// l3 has cascading dependencies
val subsft = SimpleFeatureTypes.createType("subsettest", "l3:String,geom:Point:srid=4326")
val conv = SimpleFeatureConverters.build[String](subsft, conf)
val res = conv.processInput(data.split("\\n").toIterator.filterNot( s => "^\\\\s*$".r.findFirstIn(s).size > 0)).toList
conv.close()
res.length must be equalTo 2
}
"handle horrible quoting and nested separators" >> {
val conf = ConfigFactory.parseString(
"""
| converter = {
| type = "delimited-text",
| format = "EXCEL",
| id-field = "md5(string2bytes($0))",
| fields = [
| { name = "phrase", transform = "concat($1, $2)" },
| { name = "lat", transform = "$3::double" },
| { name = "lon", transform = "$4::double" },
| { name = "geom", transform = "point($lat, $lon)" }
| ]
| }
""".stripMargin)
import scala.collection.JavaConversions._
val data = Resources.readLines(Resources.getResource("messydata.csv"), StandardCharsets.UTF_8)
val sft = SimpleFeatureTypes.createType(ConfigFactory.load("sft_testsft.conf"))
val converter = SimpleFeatureConverters.build[String](sft, conf)
converter must not beNull
val res = converter.processInput(data.iterator()).toList
converter.close()
res.size must be equalTo 2
res(0).getAttribute("phrase").asInstanceOf[String] must be equalTo "1hello, \\"foo\\""
res(1).getAttribute("phrase").asInstanceOf[String] must be equalTo "2world"
}
"handle records bigger than buffer size" >> {
// set the buffer size to 16 bytes and try to write records that are bigger than the buffer size
val sizeConf = ConfigFactory.parseString(
"""
| converter = {
| type = "delimited-text",
| format = "DEFAULT",
| id-field = "md5(string2bytes($0))",
| pipe-size = 16 // 16 bytes
| fields = [
| { name = "oneup", transform = "$1" },
| { name = "phrase", transform = "concat($1, $2)" },
| { name = "lat", transform = "$3::double" },
| { name = "lon", transform = "$4::double" },
| { name = "lit", transform = "'hello'" },
| { name = "geom", transform = "point($lat, $lon)" }
| ]
| }
""".stripMargin)
val converter = SimpleFeatureConverters.build[String](sft, sizeConf)
val data =
"""
|1,hello,45.0,45.0
|2,world,90.0,90.0
|willfail,hello
""".stripMargin
val nonEmptyData = data.split("\\n").toIterator.filterNot(s => "^\\\\s*$".r.findFirstIn(s).size > 0)
val res = converter.processInput(nonEmptyData).toList
converter.close()
res.size must be greaterThan 0
}
}
}
|
giserh/geomesa
|
geomesa-convert/geomesa-convert-text/src/test/scala/org/locationtech/geomesa/convert/text/DelimitedTextConverterTest.scala
|
Scala
|
apache-2.0
| 10,537 |
package example
object Issue2144/*<=example.Issue2144.*/ {
class Test/*<=example.Issue2144.Test#*/(a/*<=example.Issue2144.Test#a.*/: Boolean/*=>scala.Boolean#*/, b/*<=example.Issue2144.Test#b.*/: Int/*=>scala.Int#*/ = 1, c/*<=example.Issue2144.Test#c.*/: Int/*=>scala.Int#*/ = 2)
val x/*<=example.Issue2144.x.*/ = new Test/*=>example.Issue2144.Test#*/(a/*=>example.Issue2144.Test#`<init>`().(a)*/ = true, c/*=>example.Issue2144.Test#`<init>`().(c)*/ = 1)
}
|
scalameta/scalameta
|
tests/jvm/src/test/resources/example/Issue2144.scala
|
Scala
|
bsd-3-clause
| 461 |
package com.geishatokyo.diffsql.parser
import scala.annotation.tailrec
import scala.util.matching.Regex
import scala.util.parsing.combinator.RegexParsers
import com.geishatokyo.diffsql.{Name, Definition}
import scala.language.implicitConversions
/**
* Created by takeshita on 14/02/14.
*/
trait SQLParser extends RegexParsers with SkippingParsers{
def skipComment : Boolean = true
// Make literal case insensitive
implicit override def literal(s: String): Parser[String] = new Parser[String] {
def apply(in: Input) = {
val source = in.source
val offset = in.offset
val start = handleWhiteSpace(source, offset)
var i = 0
var j = start
while (i < s.length && j < source.length && s.charAt(i).toUpper == source.charAt(j).toUpper) {
i += 1
j += 1
}
if (i == s.length)
Success(source.subSequence(start, j).toString, in.drop(j - offset))
else {
val found = if (start == source.length()) "end of source" else "`"+source.charAt(start)+"'"
Failure("`"+s+"' is expected but "+ found + " is found", in.drop(start - offset))
}
}
}
override protected def handleWhiteSpace(source: CharSequence, offset: Int): Int = {
if(skipComment){
val o1 = super.handleWhiteSpace(source,offset)
val o2 = handleComment(source,o1)
if(o1 != o2) handleWhiteSpace(source,o2)
else o2
}else{
super.handleWhiteSpace(source,offset)
}
}
def handleComment(source : CharSequence, offset : Int) : Int = {
val o1 = handleBlockComment(source,offset)
if(o1 != offset) o1
else handleInlineComment(source,offset)
}
def handleBlockComment(source : CharSequence,offset : Int) : Int = {
if(source.length() - offset < 4) {
return offset
}
val commentStart = source.subSequence(offset,offset + 2)
if(commentStart == "/*" && source.length > offset + 3 && source.subSequence(offset,offset + 3) != "/*!"){
val s = source
var i = offset + 2
while((i + 1 < s.length) && !(s.charAt(i) == '*' && s.charAt(i + 1) == '/')){
i += 1
}
if(i + 1 < s.length){
i + 2
}else{
offset
}
}else {
offset
}
}
def handleInlineComment(source : CharSequence,offset : Int) : Int = {
if(source.length() - offset < 2) {
return offset
}
val commentStart = source.subSequence(offset,offset + 2)
if(commentStart == "--" || commentStart.charAt(0) == '#'){
val s = source
var i = offset + 2
while(i < s.length && (s.charAt(i) != '\\n')){
i += 1
}
if(i < s.length){
i + 1
}else{
i
}
}else {
offset
}
}
// Default data structure
def bool : Parser[Boolean] = "true" ^^^ { true } | "false" ^^^ { false }
def digits : Parser[Int] = """-?\\d+""".r ^^ { case v => v.toInt}
def floats : Parser[Double] = """-?\\d+\\.\\d+""".r ^^ { case v => v.toDouble}
def nonLiteralChars : Parser[String] = """[a-zA-Z0-9_]+""".r
def literalChars : Parser[String] = """\\w+""".r
def dataTypeChars = """[a-zA-Z0-9¥(¥)]+""".r
def value = ("`" ~> literalChars <~ "`") | nonLiteralChars
def name = value ^^ {
case n => Name(n)
}
def stringLiteral = ("'" ~> """[^']*""".r <~ "'") | ("\\"" ~> """[^"]*""".r <~ "\\"")
def allValueAsString = (bool ^^ {
case b => b.toString
}) | ("""-?\\d+\\.\\d+""".r) | (digits ^^ {
case i => i.toString
}) | stringLiteral | value
def functionAsString : Parser[String] = """[a-zA-Z_0-9]+""".r ~ "(" ~ repsep(
functionAsString | allValueAsString,",") ~ ")" ^^ {
case a ~ b ~ args ~ d => {
a + b + args.mkString(",") + d
}
}
def createDefs : Parser[List[Definition]]
def parseSql(s : String) : List[Definition] = parseAll(createDefs,s) match {
case Success(result,_) => result
case noSuccess : NoSuccess => {
throw new Exception("Fail to parse:" + noSuccess)
}
}
}
|
geishatokyo/diff-sql-table
|
parser/src/main/scala/com/geishatokyo/diffsql/parser/SQLParser.scala
|
Scala
|
mit
| 3,980 |
package org.randi3.model
object TrialSubjectIdentificationCreationType extends Enumeration {
val EXTERNAL, CONTINUOUS_COUNTER, TRIAL_ARM_COUNTER = Value
}
|
dschrimpf/randi3-core
|
src/main/scala/org/randi3/model/TrialSubjectIdentificationCreationType.scala
|
Scala
|
gpl-3.0
| 161 |
/*
* Copyright 2001-2011 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalatest.StackDepthExceptionHelper.getStackDepthFun
/**
* Trait that provides an implicit conversion that adds a <code>valueAt</code> method
* to <code>PartialFunction</code>, which will return the value (result) of the function applied to the argument passed to <code>valueAt</code>,
* or throw <code>TestFailedException</code> if the partial function is not defined at the argument.
*
* <p>
* This construct allows you to express in one statement that a partial function should be defined for a particular input,
* and that its result value should meet some expectation. Here's an example:
* </p>
*
* <pre class="stHighlight">
* pf.valueAt("IV") should equal (4)
* </pre>
*
* <p>
* Or, using an assertion instead of a matcher expression:
* </p>
*
* <pre class="stHighlight">
* assert(pf.valueAt("IV") === 4)
* </pre>
*
* <p>
* Were you to simply invoke <code>apply</code> on the <code>PartialFunction</code>, passing in an input value,
* if the partial function wasn't defined at that input, it would throw some exception, but likely not one
* that provides a <a href="StackDepth.html">stack depth</a>:
* </p>
*
* <pre class="stHighlight">
* // Note: a Map[K, V] is a PartialFunction[K, V]
* val pf: PartialFunction[String, Int] = Map("I" -> 1, "II" -> 2, "III" -> 3, "IV" -> 4)
*
* pf("V") should equal (5) // pf("V") throws NoSuchElementException
* </pre>
*
* <p>
* The <code>NoSuchElementException</code> thrown in this situation would cause the test to fail, but without providing a stack depth pointing
* to the failing line of test code. This stack depth, provided by <a href="TestFailedException.html"><code>TestFailedException</code></a> (and a
* few other ScalaTest exceptions), makes it quicker for
* users to navigate to the cause of the failure. Without <code>PartialFunctionValues</code>, to get
* a stack depth exception you would need to make two statements, like this:
* </p>
*
* <pre class="stHighlight">
* val pf: PartialFunction[String, Int] = Map("I" -> 1, "II" -> 2, "III" -> 3, "IV" -> 4)
*
* pf.isDefinedAt("V") should be (true) // throws TestFailedException
* pf("V") should equal (5)
* </pre>
*
* <p>
* The <code>PartialFunctionValues</code> trait allows you to state that more concisely:
* </p>
*
* <pre class="stHighlight">
* val pf: PartialFunction[String, Int] = Map("I" -> 1, "II" -> 2, "III" -> 3, "IV" -> 4)
*
* pf.valueAt("V") should equal (5) // pf.valueAt("V") throws TestFailedException
* </pre>
*/
trait PartialFunctionValues {
/**
* Implicit conversion that adds a <code>valueAt</code> method to <code>PartialFunction</code>.
*
* @param pf the <code>PartialFunction</code> on which to add the <code>valueAt</code> method
*/
implicit def convertPartialFunctionToValuable[A, B](pf: PartialFunction[A, B]) = new Valuable(pf)
/**
* Wrapper class that adds a <code>valueAt</code> method to <code>PartialFunction</code>, allowing
* you to make statements like:
*
* <pre class="stHighlight">
* pf.valueAt("VI") should equal (6)
* </pre>
*
* @param pf An <code>PartialFunction</code> to convert to <code>Valuable</code>, which provides the <code>valueAt</code> method.
*/
class Valuable[A, B](pf: PartialFunction[A, B]) {
/**
* Returns the result of applying the wrapped <code>PartialFunction</code> to the passed input, if it is defined at that input, else
* throws <code>TestFailedException</code> with a detail message indicating the <code>PartialFunction</code> was not defined at the given input.
*/
def valueAt(input: A): B = {
if (pf.isDefinedAt(input)) {
pf.apply(input)
}
else
throw new TestFailedException(sde => Some(Resources("partialFunctionValueNotDefined", input.toString)), None, getStackDepthFun("PartialFunctionValues.scala", "valueAt"))
}
}
}
/**
* Companion object that facilitates the importing of <code>PartialFunctionValues</code> members as
* an alternative to mixing it in. One use case is to import <code>PartialFunctionValues</code>'s members so you can use
* the <code>valueAt</code> method on <code>PartialFunction</code> in the Scala interpreter:
*
* <pre class="stREPL">
* $ scala -cp scalatest-1.7.jar
* Welcome to Scala version 2.9.1.final (Java HotSpot(TM) 64-Bit Server VM, Java 1.6.0_29).
* Type in expressions to have them evaluated.
* Type :help for more information.
*
* scala> import org.scalatest._
* import org.scalatest._
*
* scala> import matchers.ShouldMatchers._
* import matchers.ShouldMatchers._
*
* scala> import PartialFunctionValues._
* import PartialFunctionValues._
*
* scala> val pf: PartialFunction[String, Int] = Map("I" -> 1, "II" -> 2, "III" -> 3, "IV" -> 4)
* pf: PartialFunction[String,Int] = Map(I -> 1, II -> 2, III -> 3, IV -> 4)
*
* scala> pf("IV") should equal (4)
*
* scala> pf("V") should equal (5)
* java.util.NoSuchElementException: key not found: V
* at scala.collection.MapLike$class.default(MapLike.scala:224)
* at scala.collection.immutable.Map$Map4.default(Map.scala:167)
* ...
* </pre>
*/
object PartialFunctionValues extends PartialFunctionValues
|
yyuu/scalatest
|
src/main/scala/org/scalatest/PartialFunctionValues.scala
|
Scala
|
apache-2.0
| 5,890 |
package com.seanshubin.templater.domain
import java.nio.file.Path
case class CopyFileCommand(origin: Path, destination: Path)
|
SeanShubin/generate-from-template
|
domain/src/main/scala/com/seanshubin/templater/domain/CopyFileCommand.scala
|
Scala
|
unlicense
| 128 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package matryoshka.instances.fixedpoint
import slamdata.Predef._
import matryoshka._
import matryoshka.implicits._
import matryoshka.scalacheck.arbitrary._
import scala.Predef.implicitly
import org.scalacheck._
import org.specs2.ScalaCheck
import org.specs2.mutable._
import org.specs2.scalaz.{ScalazMatchers}
import scalaz._, Scalaz._
import scalaz.scalacheck.{ScalazProperties => Props}
class PartialSpec extends Specification with ScalazMatchers with ScalaCheck {
/** For testing cases that should work with truly diverging functions. */
def sometimesNeverGen[A: Arbitrary]: Gen[Partial[A]] =
Gen.oneOf(Arbitrary.arbitrary[Partial[A]], Gen.const(Partial.never[A]))
"Partial laws" >> {
addFragments(properties(Props.equal.laws[Partial[Int]](Partial.equal, implicitly)))
addFragments(properties(Props.monad.laws[Partial](implicitly, implicitly, implicitly, implicitly, Partial.equal)))
addFragments(properties(Props.foldable.laws[Partial]))
}
// https://en.wikipedia.org/wiki/McCarthy_91_function
def mc91(n: Int): Partial[Int] =
if (n > 100) Partial.now(n - 10)
else mc91(n + 11) >>= mc91
"never" should {
"always have more steps" >> prop { (i: Conat) =>
Partial.never[Int].runFor(i) must beRightDisjunction
}
}
"runFor" should {
"return now immediately" in {
Partial.now(13).runFor(Nat.zero[Nat]) must beLeftDisjunction(13)
}
"return a value when it runs past the end" >> prop { (i: Conat) =>
i.transAna[Partial[Int]](Partial.delay(7)).runFor(i) must
beLeftDisjunction(7)
}
"return after multiple runs" >> prop { (a: Conat, b: Conat) =>
b > Nat.zero[Conat] ==> {
val first = (a + b).transAna[Partial[Int]](Partial.delay(27)).runFor(a)
first must beRightDisjunction
first.flatMap(_.runFor(b)) must beLeftDisjunction(27)
}
}
"still pending one short" >> prop { (a: Conat) =>
val first = (a + Nat.one[Conat]).transAna[Partial[Int]](Partial.delay(27)).runFor(a)
first must beRightDisjunction
first.flatMap(_.runFor(a + Nat.one[Conat])) must beLeftDisjunction(27)
}
"return exactly at the end" >> prop { (n: Conat, i: Int) =>
n.transAna[Partial[Int]](Partial.delay(i)).runFor(n) must
beLeftDisjunction(i)
}
}
"unsafePerformSync" should {
"return now immediately" in {
Partial.now(12).unsafePerformSync must equal(12)
}
"return a value when it gets to the end" in {
Partial.later(Partial.later(Partial.now(3))).unsafePerformSync must
equal(3)
}
// NB: This test will depend on the size of your stack, you may have to
// increase the initial value on larger stacks.
"return a value well after stack would overflow" in {
100000000.ana[Partial[Unit]](i => if (i ≟ 0) ().left else (i - 1).right)
.unsafePerformSync must
equal(())
}
// NB: This is because the following test doesn't always get close to the
// lower bound, so we make sure changes don't make things worse.
"check lower bound of mc91" in {
mc91(-150000).unsafePerformSync must equal(91)
}
// TODO: Should work with any Int, but stack overflows on big negatives.
"always terminate with mc91" >> prop { (n: Int) =>
n > -150000 ==>
(mc91(n).unsafePerformSync must equal(if (n <= 100) 91 else n - 10))
}
}
}
|
slamdata/matryoshka
|
tests/shared/src/test/scala/matryoshka/instances/fixedpoint/partial.scala
|
Scala
|
apache-2.0
| 4,006 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.bforms.repositories
import play.api.Logger
import play.api.libs.json._
import reactivemongo.api.DB
import reactivemongo.api.commands.WriteConcern
import reactivemongo.bson.BSONObjectID
import scala.concurrent.{ ExecutionContext, Future }
import uk.gov.hmrc.bforms.core.Opt
import uk.gov.hmrc.bforms.exceptions.InvalidState
import uk.gov.hmrc.bforms.models.{ FormTemplate, DbOperationResult, UpdateSuccess }
import uk.gov.hmrc.mongo.ReactiveRepository
class FormTemplateRepository(implicit mongo: () => DB)
extends ReactiveRepository[FormTemplate, BSONObjectID]("formTemplate", mongo, implicitly[Format[FormTemplate]]) {
def update(
selector: JsObject,
update: FormTemplate
)(
implicit
ex: ExecutionContext
): Future[Opt[DbOperationResult]] = {
val res = collection.update(selector = selector, update = update, writeConcern = WriteConcern.Default, upsert = true, multi = false)
checkUpdateResult(res)
}
def findOne(
selector: JsObject,
projection: JsObject
)(
implicit
ex: ExecutionContext
): Future[Option[FormTemplate]] = {
collection.find(selector = selector, projection = projection).one[FormTemplate]
}
}
|
VlachJosef/bforms
|
app/uk/gov/hmrc/bforms/repositories/FormTemplateRepository.scala
|
Scala
|
apache-2.0
| 1,803 |
/* __ *\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2003-2013, LAMP/EPFL **
** __\ \/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\___/_/ |_/____/_/ | | **
** |/ **
\* */
package scala.concurrent
import scala.concurrent.duration.Duration
/**
* An object that may eventually be completed with a result value of type `T` which may be
* awaited using blocking methods.
*
* The [[Await]] object provides methods that allow accessing the result of an `Awaitable`
* by blocking the current thread until the `Awaitable` has been completed or a timeout has
* occurred.
*/
trait Awaitable[+T] {
/**
* Await the "completed" state of this `Awaitable`.
*
* '''''This method should not be called directly; use [[Await.ready]] instead.'''''
*
* @param atMost
* maximum wait time, which may be negative (no waiting is done),
* [[scala.concurrent.duration.Duration.Inf Duration.Inf]] for unbounded waiting, or a finite positive
* duration
* @return this `Awaitable`
* @throws InterruptedException if the current thread is interrupted while waiting
* @throws TimeoutException if after waiting for the specified time this `Awaitable` is still not ready
* @throws IllegalArgumentException if `atMost` is [[scala.concurrent.duration.Duration.Undefined Duration.Undefined]]
*/
@throws(classOf[TimeoutException])
@throws(classOf[InterruptedException])
def ready(atMost: Duration)(implicit permit: CanAwait): this.type
/**
* Await and return the result (of type `T`) of this `Awaitable`.
*
* '''''This method should not be called directly; use [[Await.result]] instead.'''''
*
* @param atMost
* maximum wait time, which may be negative (no waiting is done),
* [[scala.concurrent.duration.Duration.Inf Duration.Inf]] for unbounded waiting, or a finite positive
* duration
* @return the result value if the `Awaitable` is completed within the specific maximum wait time
* @throws InterruptedException if the current thread is interrupted while waiting
* @throws TimeoutException if after waiting for the specified time this `Awaitable` is still not ready
* @throws IllegalArgumentException if `atMost` is [[scala.concurrent.duration.Duration.Undefined Duration.Undefined]]
*/
@throws(classOf[Exception])
def result(atMost: Duration)(implicit permit: CanAwait): T
}
|
felixmulder/scala
|
src/library/scala/concurrent/Awaitable.scala
|
Scala
|
bsd-3-clause
| 2,779 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.nn
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.utils.serializer.ModuleSerializationTest
import scala.util.Random
class SoftPlusSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val softPlus = SoftPlus[Float]().setName("softPlus")
val input = Tensor[Float](10).apply1(_ => Random.nextFloat())
runSerializationTest(softPlus, input)
}
}
|
intel-analytics/BigDL
|
scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/nn/SoftPlusSpec.scala
|
Scala
|
apache-2.0
| 1,069 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.cloudml.zen.ml.clustering
import java.io.File
import java.util.Random
import LDADefines._
import com.github.cloudml.zen.ml.clustering.algorithm._
import com.github.cloudml.zen.ml.util.SharedSparkContext
import breeze.linalg.{DenseVector => BDV, SparseVector => BSV}
import breeze.linalg.functions.euclideanDistance
import breeze.stats.distributions.Poisson
import com.google.common.io.Files
import org.apache.spark.storage.StorageLevel
import org.scalatest.FunSuite
class LDASuite extends FunSuite with SharedSparkContext {
import LDASuite._
test("ZenLDA || Gibbs sampling") {
val model = generateRandomLDAModel(numTopics, numTerms)
val corpus = sampleCorpus(model, numDocs, numTerms, numTopics)
val data = sc.parallelize(corpus, 2)
data.cache()
val algo = new ZenLDA(numTopics, numThreads)
val docs = LDA.initializeCorpusEdges(data, "bow", numTopics, algo, storageLevel)
val pps = new Array[Double](incrementalLearning)
val lda = LDA(docs, numTopics, alpha, beta, alphaAS, algo, storageLevel)
var i = 0
val startedAt = System.currentTimeMillis()
while (i < incrementalLearning) {
lda.runGibbsSampling(totalIterations)
pps(i) = LDAMetrics(evalMetric, lda).getTotal
i += 1
}
println((System.currentTimeMillis() - startedAt) / 1e3)
pps.foreach(println)
val ppsDiff = pps.init.zip(pps.tail).map { case (lhs, rhs) => rhs - lhs }
assert(ppsDiff.count(_ > 0).toDouble / ppsDiff.length > 0.6)
assert(pps.last - pps.head > 0)
val ldaModel = lda.toLDAModel
val tempDir = Files.createTempDir()
tempDir.deleteOnExit()
val path = tempDir.toURI.toString + File.separator + "lda"
ldaModel.save(sc, path)
val sameModel = LDAModel.load(sc, path)
assert(sameModel.toLocalLDAModel.termTopicsArr === ldaModel.toLocalLDAModel.termTopicsArr)
assert(sameModel.alpha === ldaModel.alpha)
assert(sameModel.beta === ldaModel.beta)
assert(sameModel.alphaAS === ldaModel.alphaAS)
val localLdaModel = sameModel.toLocalLDAModel
val tempDir2 = Files.createTempDir()
tempDir2.deleteOnExit()
val path2 = tempDir2.toString + File.separator + "lda.txt"
localLdaModel.save(path2)
val loadLdaModel = LDAModel.loadLocalLDAModel(path2)
assert(localLdaModel.termTopicsArr === loadLdaModel.termTopicsArr)
assert(localLdaModel.alpha === loadLdaModel.alpha)
assert(localLdaModel.beta === loadLdaModel.beta)
assert(localLdaModel.alphaAS === loadLdaModel.alphaAS)
}
test("LightLDA || Metropolis Hasting sampling") {
val model = generateRandomLDAModel(numTopics, numTerms)
val corpus = sampleCorpus(model, numDocs, numTerms, numTopics)
val data = sc.parallelize(corpus, 2)
data.cache()
val algo = new LightLDA(numTopics, numThreads)
val docs = LDA.initializeCorpusEdges(data, "bow", numTopics, algo, storageLevel)
val pps = new Array[Double](incrementalLearning)
val lda = LDA(docs, numTopics, alpha, beta, alphaAS, algo, storageLevel)
var i = 0
val startedAt = System.currentTimeMillis()
while (i < incrementalLearning) {
lda.runGibbsSampling(totalIterations)
pps(i) = LDAMetrics(evalMetric, lda).getTotal
i += 1
}
println((System.currentTimeMillis() - startedAt) / 1e3)
pps.foreach(println)
val ppsDiff = pps.init.zip(pps.tail).map { case (lhs, rhs) => rhs - lhs }
assert(ppsDiff.count(_ > 0).toDouble / ppsDiff.length > 0.6)
assert(pps.last - pps.head > 0)
val ldaModel = lda.toLDAModel.toLocalLDAModel
data.collect().foreach { case (_, sv) =>
val a = ldaModel.inference(sv)
val b = ldaModel.inference(sv)
val sim: Double = euclideanDistance(a, b)
assert(sim < 0.1)
}
}
test("SparseLDA || Gibbs sampling") {
val model = generateRandomLDAModel(numTopics, numTerms)
val corpus = sampleCorpus(model, numDocs, numTerms, numTopics)
val data = sc.parallelize(corpus, 2)
data.cache()
val algo = new SparseLDA(numTopics, numThreads)
val docs = LDA.initializeCorpusEdges(data, "bow", numTopics, algo, storageLevel)
val pps = new Array[Double](incrementalLearning)
val lda = LDA(docs, numTopics, alpha, beta, alphaAS, algo, storageLevel)
var i = 0
val startedAt = System.currentTimeMillis()
while (i < incrementalLearning) {
lda.runGibbsSampling(totalIterations)
pps(i) = LDAMetrics(evalMetric, lda).getTotal
i += 1
}
println((System.currentTimeMillis() - startedAt) / 1e3)
pps.foreach(println)
val ppsDiff = pps.init.zip(pps.tail).map { case (lhs, rhs) => rhs - lhs }
assert(ppsDiff.count(_ > 0).toDouble / ppsDiff.length > 0.6)
assert(pps.last - pps.head > 0)
val ldaModel = lda.toLDAModel.toLocalLDAModel
data.collect().foreach { case (_, sv) =>
val a = ldaModel.inference(sv)
val b = ldaModel.inference(sv)
val sim: Double = euclideanDistance(a, b)
assert(sim < 0.1)
}
}
test("AliasLDA || Metropolis Hasting sampling") {
val model = generateRandomLDAModel(numTopics, numTerms)
val corpus = sampleCorpus(model, numDocs, numTerms, numTopics)
val data = sc.parallelize(corpus, 2)
data.cache()
val algo = new AliasLDA(numTopics, numThreads)
val docs = LDA.initializeCorpusEdges(data, "bow", numTopics, algo, storageLevel)
val pps = new Array[Double](incrementalLearning)
val lda = LDA(docs, numTopics, alpha, beta, alphaAS, algo, storageLevel)
var i = 0
val startedAt = System.currentTimeMillis()
while (i < incrementalLearning) {
lda.runGibbsSampling(totalIterations)
pps(i) = LDAMetrics(evalMetric, lda).getTotal
i += 1
}
println((System.currentTimeMillis() - startedAt) / 1e3)
pps.foreach(println)
val ppsDiff = pps.init.zip(pps.tail).map { case (lhs, rhs) => rhs - lhs }
assert(ppsDiff.count(_ > 0).toDouble / ppsDiff.length > 0.6)
assert(pps.last - pps.head > 0)
val ldaModel = lda.toLDAModel.toLocalLDAModel
data.collect().foreach { case (_, sv) =>
val a = ldaModel.inference(sv)
val b = ldaModel.inference(sv)
val sim: Double = euclideanDistance(a, b)
assert(sim < 0.1)
}
}
test("F+LDA || Gibbs sampling") {
val model = generateRandomLDAModel(numTopics, numTerms)
val corpus = sampleCorpus(model, numDocs, numTerms, numTopics)
val data = sc.parallelize(corpus, 2)
data.cache()
val algo = new FPlusLDA(numTopics, numThreads)
val docs = LDA.initializeCorpusEdges(data, "bow", numTopics, algo, storageLevel)
val pps = new Array[Double](incrementalLearning)
val lda = LDA(docs, numTopics, alpha, beta, alphaAS, algo, storageLevel)
var i = 0
val startedAt = System.currentTimeMillis()
while (i < incrementalLearning) {
lda.runGibbsSampling(totalIterations)
pps(i) = LDAMetrics(evalMetric, lda).getTotal
i += 1
}
println((System.currentTimeMillis() - startedAt) / 1e3)
pps.foreach(println)
val ppsDiff = pps.init.zip(pps.tail).map { case (lhs, rhs) => rhs - lhs }
assert(ppsDiff.count(_ > 0).toDouble / ppsDiff.length > 0.6)
assert(pps.last - pps.head > 0)
val ldaModel = lda.toLDAModel.toLocalLDAModel
data.collect().foreach { case (_, sv) =>
val a = ldaModel.inference(sv)
val b = ldaModel.inference(sv)
val sim: Double = euclideanDistance(a, b)
assert(sim < 0.1)
}
}
}
object LDASuite {
val numTopics = 50
val numTerms = 1000
val numDocs = 100
val expectedDocLength = 300
val alpha = 0.01
val alphaAS = 1D
val beta = 0.01
val totalIterations = 2
val numThreads = 2
val burnInIterations = 1
val incrementalLearning = 10
val storageLevel = StorageLevel.MEMORY_AND_DISK
val evalMetric = "llh"
/**
* Generate a random LDA model, i.e. the topic-term matrix.
*/
def generateRandomLDAModel(numTopics: Int, numTerms: Int): Array[BDV[Double]] = {
val model = new Array[BDV[Double]](numTopics)
val width = numTerms.toDouble / numTopics
var topic = 0
var i = 0
while (topic < numTopics) {
val topicCentroid = width * (topic + 1)
model(topic) = BDV.zeros[Double](numTerms)
i = 0
while (i < numTerms) {
// treat the term list as a circle, so the distance between the first one and the last one
// is 1, not n-1.
val distance = Math.abs(topicCentroid - i) % (numTerms / 2)
// Possibility is decay along with distance
model(topic)(i) = 1D / (1D + Math.abs(distance))
i += 1
}
topic += 1
}
model
}
/**
* Sample one document given the topic-term matrix.
*/
def ldaSampler(
model: Array[BDV[Double]],
topicDist: BDV[Double],
numTermsPerDoc: Int): Array[Int] = {
val samples = new Array[Int](numTermsPerDoc)
val rand = new Random()
(0 until numTermsPerDoc).foreach { i =>
samples(i) = multinomialDistSampler(
rand,
model(multinomialDistSampler(rand, topicDist))
)
}
samples
}
/**
* Sample corpus (many documents) from a given topic-term matrix.
*/
def sampleCorpus(
model: Array[BDV[Double]],
numDocs: Int,
numTerms: Int,
numTopics: Int): Array[BOW] = {
(0 until numDocs).map { i =>
val rand = new Random()
val numTermsPerDoc = Poisson.distribution(expectedDocLength).sample()
val numTopicsPerDoc = rand.nextInt(numTopics / 2) + 1
val topicDist = BDV.zeros[Double](numTopics)
(0 until numTopicsPerDoc).foreach { _ =>
topicDist(rand.nextInt(numTopics)) += 1
}
val sv = BSV.zeros[Int](numTerms)
ldaSampler(model, topicDist, numTermsPerDoc).foreach { term => sv(term) += 1 }
(i.toLong, sv)
}.toArray
}
/**
* A multinomial distribution sampler, using roulette method to sample an Int back.
*/
def multinomialDistSampler(rand: Random, dist: BDV[Double]): Int = {
val distSum = rand.nextDouble() * breeze.linalg.sum[BDV[Double], Double](dist)
def loop(index: Int, accum: Double): Int = {
if (index == dist.length) return dist.length - 1
val sum = accum - dist(index)
if (sum <= 0) index else loop(index + 1, sum)
}
loop(0, distSum)
}
}
|
cloudml/zen
|
ml/src/test/scala/com/github/cloudml/zen/ml/clustering/LDASuite.scala
|
Scala
|
apache-2.0
| 11,064 |
package edu.duke.oit.vw.models
import org.apache.solr.client.solrj.SolrServer
import edu.duke.oit.vw.utils._
import java.util.Date
import org.slf4j.{Logger, LoggerFactory}
object Person extends SolrModel
with AttributeParams {
def find(uri: String, solr: SolrServer): Option[Person] = {
getDocumentByIdOrAlternateId(uri,solr) match {
case Some(sd) => Option(PersonExtraction(sd.get("json").toString))
case _ => None
}
}
// NOTE: person and organization have Option[Date] instead of Date for updatedAt because some people exist
// in the index preceding the existence of that field. If it is not Option[], it returns an error
def build(uri:String,
active:Option[Boolean],
updatedAt:Option[Date],
personData:Map[Symbol,String],
pubs:List[Publication],
awards:List[Award],
artisticWorks:List[ArtisticWork],
artisticEvents:List[ArtisticEvent],
grants:List[Grant],
courses:List[Course],
professionalActivities:List[ProfessionalActivity],
positions:List[Position],
addresses:List[Address],
educations:List[Education],
researchAreas:List[ResearchArea],
webpages:List[Webpage],
geographicalFocus:List[GeographicFocus],
newsfeeds:List[Newsfeed],
cvInfo:Option[PersonCVInfo]): Person = {
new Person(uri,
active,
updatedAt,
vivoType = personData('type).stripBrackets(),
label = personData('label),
title = personData('title),
publications = pubs,
awards = awards,
artisticWorks = artisticWorks,
artisticEvents = artisticEvents,
grants = grants,
courses = courses,
professionalActivities = professionalActivities,
positions = positions,
addresses = addresses,
educations = educations,
researchAreas = researchAreas,
webpages = webpages,
geographicalFocus = geographicalFocus,
newsfeeds = newsfeeds,
cvInfo = cvInfo,
attributes = parseAttributes(personData, List('type,'label,'title)))
}
}
case class PersonCVInfo(gifts:List[Gift],
academicPositions:List[AcademicPosition],
licenses:List[License],
pastAppointments:List[PastAppointment]) {
}
case class Person(uri:String,
active:Option[Boolean],
updatedAt:Option[Date],
vivoType:String,
label:String,
title:String,
publications:List[Publication],
awards:List[Award],
artisticWorks:List[ArtisticWork],
artisticEvents:List[ArtisticEvent],
grants:List[Grant],
courses:List[Course],
professionalActivities:List[ProfessionalActivity],
positions:List[Position],
addresses:List[Address],
educations:List[Education],
researchAreas:List[ResearchArea],
webpages:List[Webpage],
geographicalFocus:List[GeographicFocus],
newsfeeds:List[Newsfeed],
cvInfo: Option[PersonCVInfo],
attributes:Option[Map[String, String]])
extends VivoAttributes(uri, vivoType, label, attributes)
with AddToJson
with Timestamped
{
val log = LoggerFactory.getLogger(getClass)
override def uris() = {
var results = (uri :: super.uris) ++
publications.foldLeft(List[String]()) {(u,publication) => u ++ publication.uris} ++
awards.foldLeft(List[String]()) {(u,award) => u ++ award.uris} ++
artisticWorks.foldLeft(List[String]()) {(u,artisticWork) => u ++ artisticWork.uris} ++
artisticEvents.foldLeft(List[String]()) {(u,artisticEvent) => u ++ artisticEvent.uris} ++
grants.foldLeft(List[String]()) {(u,grant) => u ++ grant.uris} ++
courses.foldLeft(List[String]()) {(u,course) => u ++ course.uris} ++
professionalActivities.foldLeft(List[String]()) {(u,professionalActivity) => u ++ professionalActivity.uris} ++
positions.foldLeft(List[String]()) {(u,position) => u ++ position.uris} ++
addresses.foldLeft(List[String]()) {(u,address) => u ++ address.uris} ++
educations.foldLeft(List[String]()) {(u,education) => u ++ education.uris} ++
researchAreas.foldLeft(List[String]()) {(u,area) => u ++ area.uris} ++
webpages.foldLeft(List[String]()) {(u,page) => u ++ page.uris} ++
geographicalFocus.foldLeft(List[String]()) {(u,focus) => u ++ focus.uris} ++
newsfeeds.foldLeft(List[String]()) {(u,newsfeed) => u ++ newsfeed.uris }
if (cvInfo.isDefined) {
log.debug("cvInfo.isDefined")
results = results ++ cvInfo.get.gifts.foldLeft(List[String]()) { (u, gift) => u ++ gift.uris }
results = results ++ cvInfo.get.academicPositions.foldLeft(List[String]()) {(u,academicPosition) => u ++ academicPosition.uris}
results = results ++ cvInfo.get.licenses.foldLeft(List[String]()) {(u,license) => u ++ license.uris}
results = results ++ cvInfo.get.pastAppointments.foldLeft(List[String]()) {(u,PastAppointment) => u ++ PastAppointment.uris}
}
results
}
def personAttributes() = {
this.attributes match {
case Some(attributes) => attributes ++ Map("uri" -> this.uri)
case _ => Map("uri" -> this.uri)
}
}
def academicPositions() = {
this.cvInfo match {
case Some(cvInfo: PersonCVInfo) => cvInfo.academicPositions
case _ => List()
}
}
def gifts() = {
this.cvInfo match {
case Some(cvInfo: PersonCVInfo) => cvInfo.gifts
case _ => List()
}
}
def licenses() = {
this.cvInfo match {
case Some(cvInfo: PersonCVInfo) => cvInfo.licenses
case _ => List()
}
}
def pastAppointments() = {
this.cvInfo match {
case Some(cvInfo: PersonCVInfo) => cvInfo.pastAppointments
case _ => List()
}
}
}
/**
* Wraps the lift-json parsing and extraction of a person.
*/
object PersonExtraction {
def apply(json:String) = {
import net.liftweb.json._
// Brings in default date formats etc.
implicit val formats = DefaultFormats
val j = JsonParser.parse(json)
j.extract[Person]
}
}
|
OIT-ADS-Web/vivo_widgets
|
src/main/scala/models/Person.scala
|
Scala
|
bsd-3-clause
| 6,755 |
/**
* Copyright (C) 2009-2014 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.stream.testkit
import java.io.PrintStream
import java.lang.management.{ ManagementFactory, ThreadInfo }
import java.util.Date
import java.util.concurrent.{ TimeoutException, CountDownLatch }
import org.scalatest.{ BeforeAndAfterAll, Suite }
import scala.annotation.tailrec
import scala.concurrent.{ Promise, Awaitable, CanAwait, Await }
import scala.concurrent.duration._
import scala.util.control.NonFatal
import akka.testkit.{ TestKit, TestDuration }
// TODO: until `akka-stream-testkit-experimental` package is released, we need a copy of this file for testing
/**
* The Coroner can be used to print a diagnostic report of the JVM state,
* including stack traces and deadlocks. A report can be printed directly, by
* calling `printReport`. Alternatively, the Coroner can be asked to `watch`
* the JVM and generate a report at a later time - unless the Coroner is cancelled
* by that time.
*
* The latter method is useful for printing diagnostics in the event that, for
* example, a unit test stalls and fails to cancel the Coroner in time. The
* Coroner will assume that the test has "died" and print a report to aid in
* debugging.
*/
object Coroner { // FIXME: remove once going back to project dependencies
/**
* Used to cancel the Coroner after calling `watch`.
* The result of this Awaitable will be `true` if it has been cancelled.
*/
trait WatchHandle extends Awaitable[Boolean] {
/**
* Will try to ensure that the Coroner has finished reporting.
*/
def cancel(): Unit
}
private class WatchHandleImpl(startAndStopDuration: FiniteDuration)
extends WatchHandle {
val cancelPromise = Promise[Boolean]
val startedLatch = new CountDownLatch(1)
val finishedLatch = new CountDownLatch(1)
def waitForStart(): Unit = {
startedLatch.await(startAndStopDuration.length, startAndStopDuration.unit)
}
def started(): Unit = startedLatch.countDown()
def finished(): Unit = finishedLatch.countDown()
def expired(): Unit = cancelPromise.trySuccess(false)
override def cancel(): Unit = {
cancelPromise.trySuccess(true)
finishedLatch.await(startAndStopDuration.length, startAndStopDuration.unit)
}
override def ready(atMost: Duration)(implicit permit: CanAwait): this.type = {
result(atMost)
this
}
override def result(atMost: Duration)(implicit permit: CanAwait): Boolean =
try { Await.result(cancelPromise.future, atMost) } catch { case _: TimeoutException ⇒ false }
}
val defaultStartAndStopDuration = 1.second
/**
* Ask the Coroner to print a report if it is not cancelled by the given deadline.
* The returned handle can be used to perform the cancellation.
*
* If displayThreadCounts is set to true, then the Coroner will print thread counts during start
* and stop.
*/
def watch(duration: FiniteDuration, reportTitle: String, out: PrintStream,
startAndStopDuration: FiniteDuration = defaultStartAndStopDuration,
displayThreadCounts: Boolean = false): WatchHandle = {
val watchedHandle = new WatchHandleImpl(startAndStopDuration)
def triggerReportIfOverdue(duration: Duration): Unit = {
val threadMx = ManagementFactory.getThreadMXBean()
val startThreads = threadMx.getThreadCount
if (displayThreadCounts) {
threadMx.resetPeakThreadCount()
out.println(s"Coroner Thread Count starts at $startThreads in $reportTitle")
}
watchedHandle.started()
try {
if (!Await.result(watchedHandle, duration)) {
watchedHandle.expired()
out.println(s"Coroner not cancelled after ${duration.toMillis}ms. Looking for signs of foul play...")
try printReport(reportTitle, out) catch {
case NonFatal(ex) ⇒ {
out.println("Error displaying Coroner's Report")
ex.printStackTrace(out)
}
}
}
} finally {
if (displayThreadCounts) {
val endThreads = threadMx.getThreadCount
out.println(s"Coroner Thread Count started at $startThreads, ended at $endThreads, peaked at ${threadMx.getPeakThreadCount} in $reportTitle")
}
out.flush()
watchedHandle.finished()
}
}
new Thread(new Runnable { def run = triggerReportIfOverdue(duration) }, "Coroner").start()
watchedHandle.waitForStart()
watchedHandle
}
/**
* Print a report containing diagnostic information.
*/
def printReport(reportTitle: String, out: PrintStream) {
import out.println
val osMx = ManagementFactory.getOperatingSystemMXBean()
val rtMx = ManagementFactory.getRuntimeMXBean()
val memMx = ManagementFactory.getMemoryMXBean()
val threadMx = ManagementFactory.getThreadMXBean()
println(s"""#Coroner's Report: $reportTitle
#OS Architecture: ${osMx.getArch()}
#Available processors: ${osMx.getAvailableProcessors()}
#System load (last minute): ${osMx.getSystemLoadAverage()}
#VM start time: ${new Date(rtMx.getStartTime())}
#VM uptime: ${rtMx.getUptime()}ms
#Heap usage: ${memMx.getHeapMemoryUsage()}
#Non-heap usage: ${memMx.getNonHeapMemoryUsage()}""".stripMargin('#'))
def dumpAllThreads: Seq[ThreadInfo] = {
threadMx.dumpAllThreads(
threadMx.isObjectMonitorUsageSupported,
threadMx.isSynchronizerUsageSupported)
}
def findDeadlockedThreads: (Seq[ThreadInfo], String) = {
val (ids, desc) = if (threadMx.isSynchronizerUsageSupported()) {
(threadMx.findDeadlockedThreads(), "monitors and ownable synchronizers")
} else {
(threadMx.findMonitorDeadlockedThreads(), "monitors, but NOT ownable synchronizers")
}
if (ids == null) {
(Seq.empty, desc)
} else {
val maxTraceDepth = 1000 // Seems deep enough
(threadMx.getThreadInfo(ids, maxTraceDepth), desc)
}
}
def printThreadInfos(threadInfos: Seq[ThreadInfo]) = {
if (threadInfos.isEmpty) {
println("None")
} else {
for (ti ← threadInfos.sortBy(_.getThreadName)) { println(threadInfoToString(ti)) }
}
}
def threadInfoToString(ti: ThreadInfo): String = {
val sb = new java.lang.StringBuilder
sb.append("\\"")
sb.append(ti.getThreadName)
sb.append("\\" Id=")
sb.append(ti.getThreadId)
sb.append(" ")
sb.append(ti.getThreadState)
if (ti.getLockName != null) {
sb.append(" on " + ti.getLockName)
}
if (ti.getLockOwnerName != null) {
sb.append(" owned by \\"")
sb.append(ti.getLockOwnerName)
sb.append("\\" Id=")
sb.append(ti.getLockOwnerId)
}
if (ti.isSuspended) {
sb.append(" (suspended)")
}
if (ti.isInNative) {
sb.append(" (in native)")
}
sb.append('\\n')
def appendMsg(msg: String, o: Any) = {
sb.append(msg)
sb.append(o)
sb.append('\\n')
}
val stackTrace = ti.getStackTrace
for (i ← 0 until stackTrace.length) {
val ste = stackTrace(i)
appendMsg("\\tat ", ste)
if (i == 0 && ti.getLockInfo != null) {
import java.lang.Thread.State._
ti.getThreadState match {
case BLOCKED ⇒ appendMsg("\\t- blocked on ", ti.getLockInfo)
case WAITING ⇒ appendMsg("\\t- waiting on ", ti.getLockInfo)
case TIMED_WAITING ⇒ appendMsg("\\t- waiting on ", ti.getLockInfo)
case _ ⇒
}
}
for (mi ← ti.getLockedMonitors if mi.getLockedStackDepth == i)
appendMsg("\\t- locked ", mi)
}
val locks = ti.getLockedSynchronizers
if (locks.length > 0) {
appendMsg("\\n\\tNumber of locked synchronizers = ", locks.length)
for (li ← locks) appendMsg("\\t- ", li)
}
sb.append('\\n')
return sb.toString
}
println("All threads:")
printThreadInfos(dumpAllThreads)
val (deadlockedThreads, deadlockDesc) = findDeadlockedThreads
println(s"Deadlocks found for $deadlockDesc:")
printThreadInfos(deadlockedThreads)
}
}
/**
* Mixin for tests that should be watched by the Coroner. The `startCoroner`
* and `stopCoroner` methods should be called before and after the test runs.
* The Coroner will display its report if the test takes longer than the
* (dilated) `expectedTestDuration` to run.
*
* If displayThreadCounts is set to true, then the Coroner will print thread
* counts during start and stop.
*/
trait WatchedByCoroner {
self: TestKit ⇒
@volatile private var coronerWatch: Coroner.WatchHandle = _
final def startCoroner() {
coronerWatch = Coroner.watch(expectedTestDuration.dilated, getClass.getName, System.err,
startAndStopDuration.dilated, displayThreadCounts)
}
final def stopCoroner() {
coronerWatch.cancel()
coronerWatch = null
}
def expectedTestDuration: FiniteDuration
def displayThreadCounts: Boolean = true
def startAndStopDuration: FiniteDuration = Coroner.defaultStartAndStopDuration
}
|
imace/open-muvr
|
server/exercise/src/test/scala/akka/stream/testkit/Coroner.scala
|
Scala
|
apache-2.0
| 9,262 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.