code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package com.mariotti.urlshortener.dao.persistence
import com.mariotti.urlshortener.model.Stats
object MapperWriterDB {
def put(id:Int, value:String){
Persistence.put(id, value)
}
def putStats(id:Int, stats:Stats){
Persistence.putStats(id, stats)
}
}
|
LupoDiRoma/miscellanea
|
scala/url_shortener/src/main/scala/com/mariotti/urlshortener/dao/persistence/MapperWriterDB.scala
|
Scala
|
apache-2.0
| 267 |
package com.cloudray.scalapress.plugin.ecommerce.tag
import org.scalatest.{OneInstancePerTest, FunSuite}
import org.scalatest.mock.MockitoSugar
import com.cloudray.scalapress.plugin.ecommerce.domain.Order
import javax.servlet.http.HttpServletRequest
import com.cloudray.scalapress.plugin.ecommerce.tags.InvoiceAccountNameTag
import com.cloudray.scalapress.account.Account
import com.cloudray.scalapress.framework.{ScalapressRequest, ScalapressContext}
/** @author Stephen Samuel */
class InvoiceAccountNameTagTest extends FunSuite with MockitoSugar with OneInstancePerTest {
val order = new Order
order.id = 51
order.vatable = true
order.deliveryDetails = "superfast delivery"
order.account = new Account
order.account.name = "sammy"
order.account.email = "[email protected]"
val tag = new InvoiceAccountNameTag()
val req = mock[HttpServletRequest]
val context = mock[ScalapressContext]
val sreq = new ScalapressRequest(req, context).withOrder(order)
test("tag renders email from account") {
val actual = tag.render(sreq, Map.empty)
assert("sammy" === actual.get)
}
}
|
vidyacraghav/scalapress
|
src/test/scala/com/cloudray/scalapress/plugin/ecommerce/tag/InvoiceAccountNameTagTest.scala
|
Scala
|
apache-2.0
| 1,109 |
package org.alitouka.spark.dbscan.spatial.rdd
import org.apache.spark.rdd.{RDD, ShuffledRDD}
import org.alitouka.spark.dbscan.spatial.{PointSortKey, BoxCalculator, Box, Point}
import org.alitouka.spark.dbscan.PairOfAdjacentBoxIds
/**
* An RDD that stores points which are close to each other but
* reside in different density-based partitions of the original data set
* (these partitions are referred to as "boxes" below)
*
* Each partition of this RDD contains points in 2 adjacent boxes
* Each point may appear multiple times in this RDD - as many times as many boxes are adjacent to the box in which
* this point resides
*
* @param prev An RDD where each entry contains a pair of IDs of adjacent boxes and a point which resides in one of
* these boxes
* @param adjacentBoxIdPairs A collection of distinct pairs of box IDs
*/
private [dbscan] class PointsInAdjacentBoxesRDD (prev: RDD[(PairOfAdjacentBoxIds, Point)], val adjacentBoxIdPairs: Array[PairOfAdjacentBoxIds])
extends ShuffledRDD [PairOfAdjacentBoxIds, Point, Point] (prev, new AdjacentBoxesPartitioner(adjacentBoxIdPairs))
private [dbscan] object PointsInAdjacentBoxesRDD {
def apply (points: RDD[Point], boxesWithAdjacentBoxes: Iterable[Box]): PointsInAdjacentBoxesRDD = {
val adjacentBoxIdPairs = BoxCalculator.generateDistinctPairsOfAdjacentBoxIds(boxesWithAdjacentBoxes).toArray
val broadcastBoxIdPairs = points.sparkContext.broadcast(adjacentBoxIdPairs)
val pointsKeyedByPairOfBoxes = points.mapPartitions {
it => {
val boxIdPairs = broadcastBoxIdPairs.value
for (p <- it; pair <- boxIdPairs; if p.boxId == pair._1 || p.boxId == pair._2)
yield (pair, p)
}
}
new PointsInAdjacentBoxesRDD(pointsKeyedByPairOfBoxes, adjacentBoxIdPairs)
}
}
|
isaacboucinha/CardioStream
|
web-app/src/main/scala/org/alitouka/spark/dbscan/spatial/rdd/PointsInAdjacentBoxesRDD.scala
|
Scala
|
apache-2.0
| 1,802 |
package com.plista.solrindexer.persistence
case class SolrConfig(url: String, username: String, password: String, solr5url: String)
|
rvegas/solrindexer
|
src/main/scala/com/plista/solrindexer/persistence/SolrConfig.scala
|
Scala
|
unlicense
| 133 |
package de.htwg.zeta.persistence.actorCache
import java.util.UUID
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration
import scala.util.Failure
import scala.util.Success
import akka.actor.ActorRef
import akka.actor.ActorSystem
import akka.pattern.ask
import akka.routing.ConsistentHashingPool
import akka.routing.ConsistentHashingRouter.ConsistentHashMapping
import akka.util.Timeout
import de.htwg.zeta.common.models.entity.{Entity, User}
import de.htwg.zeta.persistence.actorCache.EntityCacheActor.Create
import de.htwg.zeta.persistence.actorCache.EntityCacheActor.Delete
import de.htwg.zeta.persistence.actorCache.EntityCacheActor.Read
import de.htwg.zeta.persistence.actorCache.EntityCacheActor.Update
import de.htwg.zeta.persistence.general.AccessAuthorisationRepository
import de.htwg.zeta.persistence.general.BondedTaskRepository
import de.htwg.zeta.persistence.general.EntityRepository
import de.htwg.zeta.persistence.general.EventDrivenTaskRepository
import de.htwg.zeta.persistence.general.FilterImageRepository
import de.htwg.zeta.persistence.general.FilterRepository
import de.htwg.zeta.persistence.general.GdslProjectRepository
import de.htwg.zeta.persistence.general.GeneratorImageRepository
import de.htwg.zeta.persistence.general.GeneratorRepository
import de.htwg.zeta.persistence.general.GraphicalDslInstanceRepository
import de.htwg.zeta.persistence.general.GraphicalDslReleaseRepository
import de.htwg.zeta.persistence.general.LogRepository
import de.htwg.zeta.persistence.general.SettingsRepository
import de.htwg.zeta.persistence.general.TimedTaskRepository
import de.htwg.zeta.persistence.general.UserRepository
import javax.inject.Inject
import javax.inject.Singleton
/**
* Actor Cache Implementation of EntityPersistence.
*/
sealed abstract class ActorCacheEntityRepository[E <: Entity](
underlying: EntityRepository[E],
system: ActorSystem,
numberActorsPerEntityType: Int,
cacheDuration: FiniteDuration,
implicit val timeout: Timeout
)(implicit manifest: Manifest[E]) extends EntityRepository[E] {
protected def hashMapping: ConsistentHashMapping = {
case Create(entity) => entity.id.hashCode
case Read(id) => id.hashCode
case Update(id, _) => id.hashCode
case Delete(id) => id.hashCode
}
private val router: ActorRef = system.actorOf(
ConsistentHashingPool(
nrOfInstances = numberActorsPerEntityType,
hashMapping = hashMapping
).props(
EntityCacheActor.props(underlying, cacheDuration)
),
entityTypeName
)
override def create(entity: E): Future[E] = {
(router ? Create(entity)).flatMap {
case Success(entity: E) => Future.successful(entity)
case Failure(e) => Future.failed(e)
}
}
override def read(id: UUID): Future[E] = {
(router ? Read(id)).flatMap {
case Success(entity: E) => Future.successful(entity)
case Failure(e) => Future.failed(e)
}
}
override def update(id: UUID, updateEntity: E => E): Future[E] = {
(router ? Update(id, updateEntity)).flatMap {
case Success(entity: E) => Future.successful(entity)
case Failure(e) => Future.failed(e)
}
}
override def delete(id: UUID): Future[Unit] = {
(router ? Delete(id)).flatMap {
case Success(()) => Future.successful(())
case Failure(e) => Future.failed(e)
}
}
override def readAllIds(): Future[Set[UUID]] = {
underlying.readAllIds()
}
}
@Singleton
class ActorCacheAccessAuthorisationRepository @Inject()(
underlying: AccessAuthorisationRepository,
system: ActorSystem,
numberActorsPerEntityType: Int,
cacheDuration: FiniteDuration,
timeout: Timeout
) extends ActorCacheEntityRepository(underlying, system, numberActorsPerEntityType, cacheDuration, timeout)
with AccessAuthorisationRepository
@Singleton
class ActorCacheBondedTaskRepository @Inject()(
underlying: BondedTaskRepository,
system: ActorSystem,
numberActorsPerEntityType: Int,
cacheDuration: FiniteDuration,
timeout: Timeout
) extends ActorCacheEntityRepository(underlying, system, numberActorsPerEntityType, cacheDuration, timeout)
with BondedTaskRepository
@Singleton
class ActorCacheEventDrivenTaskRepository @Inject()(
underlying: EventDrivenTaskRepository,
system: ActorSystem,
numberActorsPerEntityType: Int,
cacheDuration: FiniteDuration,
timeout: Timeout
) extends ActorCacheEntityRepository(underlying, system, numberActorsPerEntityType, cacheDuration, timeout)
with EventDrivenTaskRepository
@Singleton
class ActorCacheFilterRepository @Inject()(
underlying: FilterRepository,
system: ActorSystem,
numberActorsPerEntityType: Int,
cacheDuration: FiniteDuration,
timeout: Timeout
) extends ActorCacheEntityRepository(underlying, system, numberActorsPerEntityType, cacheDuration, timeout)
with FilterRepository
@Singleton
class ActorCacheFilterImageRepository @Inject()(
underlying: FilterImageRepository,
system: ActorSystem,
numberActorsPerEntityType: Int,
cacheDuration: FiniteDuration,
timeout: Timeout
) extends ActorCacheEntityRepository(underlying, system, numberActorsPerEntityType, cacheDuration, timeout)
with FilterImageRepository
@Singleton
class ActorCacheGeneratorRepository @Inject()(
underlying: GeneratorRepository,
system: ActorSystem,
numberActorsPerEntityType: Int,
cacheDuration: FiniteDuration,
timeout: Timeout
) extends ActorCacheEntityRepository(underlying, system, numberActorsPerEntityType, cacheDuration, timeout)
with GeneratorRepository
@Singleton
class ActorCacheGeneratorImageRepository @Inject()(
underlying: GeneratorImageRepository,
system: ActorSystem,
numberActorsPerEntityType: Int,
cacheDuration: FiniteDuration,
timeout: Timeout
) extends ActorCacheEntityRepository(underlying, system, numberActorsPerEntityType, cacheDuration, timeout)
with GeneratorImageRepository
@Singleton
class ActorCacheLogRepository @Inject()(
underlying: LogRepository,
system: ActorSystem,
numberActorsPerEntityType: Int,
cacheDuration: FiniteDuration,
timeout: Timeout
) extends ActorCacheEntityRepository(underlying, system, numberActorsPerEntityType, cacheDuration, timeout)
with LogRepository
@Singleton
class ActorCacheGraphicalDslRepository @Inject()(
underlying: GdslProjectRepository,
system: ActorSystem,
numberActorsPerEntityType: Int,
cacheDuration: FiniteDuration,
timeout: Timeout
) extends ActorCacheEntityRepository(underlying, system, numberActorsPerEntityType, cacheDuration, timeout)
with GdslProjectRepository
@Singleton
class ActorCacheGraphicalDslReleaseRepository @Inject()(
underlying: GraphicalDslReleaseRepository,
system: ActorSystem,
numberActorsPerEntityType: Int,
cacheDuration: FiniteDuration,
timeout: Timeout
) extends ActorCacheEntityRepository(underlying, system, numberActorsPerEntityType, cacheDuration, timeout)
with GraphicalDslReleaseRepository
@Singleton
class ActorCacheGraphicalDslInstanceRepository @Inject()(
underlying: GraphicalDslInstanceRepository,
system: ActorSystem,
numberActorsPerEntityType: Int,
cacheDuration: FiniteDuration,
timeout: Timeout
) extends ActorCacheEntityRepository(underlying, system, numberActorsPerEntityType, cacheDuration, timeout)
with GraphicalDslInstanceRepository
@Singleton
class ActorCacheSettingsRepository @Inject()(
underlying: SettingsRepository,
system: ActorSystem,
numberActorsPerEntityType: Int,
cacheDuration: FiniteDuration,
timeout: Timeout
) extends ActorCacheEntityRepository(underlying, system, numberActorsPerEntityType, cacheDuration, timeout)
with SettingsRepository
@Singleton
class ActorCacheTimedTaskRepository @Inject()(
underlying: TimedTaskRepository,
system: ActorSystem,
numberActorsPerEntityType: Int,
cacheDuration: FiniteDuration,
timeout: Timeout
) extends ActorCacheEntityRepository(underlying, system, numberActorsPerEntityType, cacheDuration, timeout)
with TimedTaskRepository
@Singleton
class ActorCacheUserRepository @Inject()(
underlying: UserRepository,
system: ActorSystem,
numberActorsPerEntityType: Int,
cacheDuration: FiniteDuration,
timeout: Timeout
) extends ActorCacheEntityRepository(underlying, system, numberActorsPerEntityType, cacheDuration, timeout)
with UserRepository {
/** Get a user by email
*
* @param email The email of the user
* @return Future containing the read entity
*/
override def readByEmail(email: String): Future[User] = {
// TODO
Future.failed(new NotImplementedError)
}
}
|
Zeta-Project/zeta
|
api/persistence/src/main/scala/de/htwg/zeta/persistence/actorCache/ActorCacheEntityRepository.scala
|
Scala
|
bsd-2-clause
| 8,724 |
/**
* Problem: http://www.codechef.com/SPT2015/problems/SPIT1
* GitHub: https://github.com/amezhenin/codechef_problems
*/
import scala.collection.mutable.ListBuffer
object Main {
/**
* Checkout https://github.com/amezhenin/codechef_scala_template to test your solutions with sbt-doctest
* {{{
* >>> Main.alg(List(("mahi", 3), ("vicky", 5), ("mahi", 2)))
* vicky
*
* >>> Main.alg(List(("mahi", 3), ("mahi", 2), ("vicky", 5), ("ankit", 5), ("pooja", 4)))
* mahi
*
* }}}
* */
def alg(a: List[(String, Int)]):String = {
val g = a
.zipWithIndex
.map(x => (x._1._1, x._1._2, x._2))
.groupBy(_._1)
.map(x => (x._1, x._2.map(y => y._2).sum, x._2.map(y => y._3).max))
val max = g.map(_._2).max
g.filter(_._2 == max).toList.sortBy(_._3).head._1
}
def main(args : Array[String]) = {
val a = ListBuffer.empty[(String, Int)]
for (i <- 0 until readInt()){
val r = readLine().split(" ")
a += Pair(r(0), r(1).toInt)
}
val res = alg(a.toList)
println(res)
}
}
|
amezhenin/codechef_problems
|
contests/SPT2015/spit1.scala
|
Scala
|
mit
| 1,063 |
/* Copyright (C) 2008-2016 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.app.bib.parser
import scala.annotation.tailrec
import scala.collection.immutable.TreeMap
object Dom {
final case class Document(
comments: List[String],
preambles: List[String],
entries: Map[String, Entry])
final case class Entry(
ty: String,
citationKey: String,
crossReference: Option[Entry], // todo: maybe remove this field - I haven't seen a "crossref" yet
authors: Option[List[Name]],
editors: Option[List[Name]],
otherFields: Map[String, String])
final case class Name(
first: String,
von: String,
last: String,
jr: String)
def stringToDom(str: String, expandAbbreviations: Boolean = true): Either[String, Document] =
try {
DocumentParser.parseString(str).right.map(astToDom(_, expandAbbreviations))
} catch {
case e: Exception => Left(e.toString)
}
private[parser] def astToDom(astDoc: AST.Document, expandAbbreviations: Boolean = true): Document = {
// NOTE: some fields (acknowledgement, etc) don't quote their string inputs so we just pass through
// symbols that we can't find unchanged. month abbreviations also are not really useful to expand
val standardEnvironment = Map.empty[String, String].withDefault(identity)
val emptyDocument = Document(Nil, Nil, Map.empty)
def evalValue(value: AST.Value, env: Map[String, String]): String = value match {
case AST.Literal(str) => str
case AST.Abbrev(id) => if (expandAbbreviations) env(id) else id
case AST.Concat(l, r) => evalValue(l, env) + evalValue(r, env)
}
@tailrec def loop(
currentDoc: Document = emptyDocument,
astEntries: List[AST.Entry] = astDoc.entries,
env: Map[String, String] = standardEnvironment
): Document = astEntries match {
case Nil => currentDoc
case entry :: rest => entry match {
case AST.StringEntry(name, value) =>
loop(currentDoc, rest, env + (name -> evalValue(value, env)))
case AST.CommentEntry(comment) =>
val newComments =
if (comment.trim.isEmpty) currentDoc.comments
else currentDoc.comments :+ comment
loop(currentDoc.copy(comments = newComments), rest, env)
case AST.PreambleEntry(pre) =>
loop(currentDoc.copy(preambles = currentDoc.preambles :+ evalValue(pre, env)), rest, env)
case AST.RegularEntry(ty, citationKey, tags) =>
val insensitiveMap = new TreeMap[String, String]()(CaseInsensitiveCompare)
val evaldTags = tags.foldLeft(insensitiveMap)((acc, el) => acc + (el._1 -> evalValue(el._2, env)))
val crossRefEntry = for {
referenceName <- evaldTags.get("crossref")
referenceEntry <- currentDoc.entries.get(referenceName)
} yield referenceEntry
def namesForField(fieldName: String) =
evaldTags.get(fieldName).map(NameParser.stringToNames(_)).toList.flatten
val remainingTags = evaldTags - "crossref" - "author" - "editor"
val authorNames = Some(namesForField("author"))
val editorNames = Some(namesForField("editor"))
val entry = Entry(ty, citationKey, crossRefEntry, authorNames, editorNames, remainingTags)
loop(currentDoc.copy(entries = currentDoc.entries + (entry.citationKey -> entry)), rest, env)
}
}
loop()
}
object CaseInsensitiveCompare extends Ordering[String] {
def compare(x: String, y: String) = x.compareToIgnoreCase(y)
}
}
|
strubell/factorie
|
src/main/scala/cc/factorie/app/bib/parser/Dom.scala
|
Scala
|
apache-2.0
| 5,095 |
package org.improving.fuse
import com.sun.jna._
import jnajava.C._
import jnajava.Fuse._
import java.util.{ Map, HashMap }
import jnajava.Fuse.Operations
import java.{ lang => jl }
import jl.reflect.{ Method, InvocationHandler }
object Fuse {
// fuse_main is implemented as a macro:
// #define fuse_main(argc, argv, op, user_data) fuse_main_real(argc, argv, op, sizeof(*(op)), user_data)
final val invocationMapper: InvocationMapper = new InvocationMapper() {
def getInvocationHandler(lib: NativeLibrary, m: Method): InvocationHandler = {
if (m.getName != "fuse_main") null
else {
val f: Function = lib getFunction "fuse_main_real"
new InvocationHandler {
def invoke(proxy: Object, method: Method, args: Array[Object]): Object = {
val newArgs = new Array[Object](5)
newArgs(0) = args(0).asInstanceOf[jl.Integer]
newArgs(1) = args(1).asInstanceOf[Array[String]]
newArgs(2) = args(2)
newArgs(3) = new jl.Integer(args(2).asInstanceOf[Operations].size)
newArgs(4) = args(3)
new jl.Integer(f invokeInt newArgs)
//
// int argc = (Integer)args[0];
// String[] argv = (String[])args[1];
// Object[] newArgs = new Object[5];
// FuseLibrary.FuseOperations fops = (FuseLibrary.FuseOperations)args[2];
// newArgs[0] = new Integer(argc);
// newArgs[1] = argv;
// newArgs[2] = args[2];
// newArgs[3] = ((FuseLibrary.FuseOperations)args[2]).size();
// newArgs[4] = args[3];
//
// return f.invokeInt(newArgs);
}
}
}
}
}
final val libOptions: java.util.Map[String, Object] =
new java.util.HashMap[String, Object]
libOptions.put(Library.OPTION_INVOCATION_MAPPER, invocationMapper)
final val LIBFUSE: FuseLibrary =
Native.loadLibrary("fuse", classOf[FuseLibrary], libOptions).asInstanceOf[FuseLibrary]
abstract trait FuseLibrary extends Library {
def fuse_main(argc: Int, argv: Array[String], op: Operations, user_data: Pointer)
def fuse_get_context(): StructFuseContext
}
}
// package org.improving.fuse;
//
// import static org.improving.fuse.CommonStruct.*;
// import static org.improving.fuse.CommonFuse.*;
// import com.sun.jna.*;
// import java.util.HashMap;
// import java.util.Map;
//
// public interface FuseLibrary extends Library {
// FuseLibrary LIBFUSE = (FuseLibrary)Native.loadLibrary("fuse", FuseLibrary.class, Fuse.libOptions);
// // FuseLibrary SYNC_INSTANCE = (FuseLibrary)Native.synchronizedLibrary(LIBFUSE);
//
// // public static class ScalaCallback extends Structure { public Callback cb; }
// public static class FuseOperations extends Structure {
// public ScalaCallback[] callbacks = new ScalaCallback[FUSE_OPERATION_CALLBACKS];
// }
//
// // int fuse_main(int argc, String argv[], const struct fuse_operations *op, void *user_data);
// public int fuse_main(int argc, String argv[], FuseOperations op, Pointer user_data);
//
// // struct fuse_context *fuse_get_context(void);
// public StructFuseContext fuse_get_context();
// }
|
tonosaman/jna-fuse-scala
|
src/scala/lib/Fuse.scala
|
Scala
|
isc
| 3,241 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.mv.datamap
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.analysis.{UnresolvedAlias, UnresolvedAttribute}
import org.apache.spark.sql.catalyst.expressions.{Alias, ScalaUDF}
import org.apache.spark.sql.catalyst.plans.logical.{Command, DeserializeToObject, LogicalPlan}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.execution.datasources.LogicalRelation
import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.datamap.DataMapStoreManager
import org.apache.carbondata.core.metadata.schema.datamap.DataMapClassProvider
import org.apache.carbondata.core.metadata.schema.table.DataMapSchema
import org.apache.carbondata.datamap.DataMapManager
import org.apache.carbondata.mv.rewrite.{SummaryDataset, SummaryDatasetCatalog}
/**
* Analyzer rule to rewrite the query for MV datamap
*
* @param sparkSession
*/
class MVAnalyzerRule(sparkSession: SparkSession) extends Rule[LogicalPlan] {
// TODO Find way better way to get the provider.
private val dataMapProvider =
DataMapManager.get().getDataMapProvider(null,
new DataMapSchema("", DataMapClassProvider.MV.getShortName), sparkSession)
private val LOGGER = LogServiceFactory.getLogService(classOf[MVAnalyzerRule].getName)
override def apply(plan: LogicalPlan): LogicalPlan = {
var needAnalysis = true
plan.transformAllExpressions {
// first check if any preAgg scala function is applied it is present is in plan
// then call is from create preaggregate table class so no need to transform the query plan
// TODO Add different UDF name
case al@Alias(udf: ScalaUDF, name) if name.equalsIgnoreCase("preAgg") =>
needAnalysis = false
al
// in case of query if any unresolve alias is present then wait for plan to be resolved
// return the same plan as we can tranform the plan only when everything is resolved
case unresolveAlias@UnresolvedAlias(_, _) =>
needAnalysis = false
unresolveAlias
case attr@UnresolvedAttribute(_) =>
needAnalysis = false
attr
}
val catalog = DataMapStoreManager.getInstance().getDataMapCatalog(dataMapProvider,
DataMapClassProvider.MV.getShortName).asInstanceOf[SummaryDatasetCatalog]
if (needAnalysis && catalog != null && isValidPlan(plan, catalog)) {
val modularPlan = catalog.mvSession.sessionState.rewritePlan(plan).withMVTable
if (modularPlan.find (_.rewritten).isDefined) {
val compactSQL = modularPlan.asCompactSQL
LOGGER.audit(s"\\n$compactSQL\\n")
val analyzed = sparkSession.sql(compactSQL).queryExecution.analyzed
analyzed
} else {
plan
}
} else {
plan
}
}
def isValidPlan(plan: LogicalPlan, catalog: SummaryDatasetCatalog): Boolean = {
!plan.isInstanceOf[Command] && !isDataMapExists(plan, catalog.listAllSchema()) &&
!plan.isInstanceOf[DeserializeToObject]
}
/**
* Check whether datamap table already updated in the query.
*
* @param plan
* @param mvs
* @return
*/
def isDataMapExists(plan: LogicalPlan, mvs: Array[SummaryDataset]): Boolean = {
val catalogs = plan collect {
case l: LogicalRelation => l.catalogTable
}
catalogs.isEmpty || catalogs.exists { c =>
mvs.exists { mv =>
val identifier = mv.dataMapSchema.getRelationIdentifier
identifier.getTableName.equals(c.get.identifier.table) &&
identifier.getDatabaseName.equals(c.get.database)
}
}
}
}
|
jatin9896/incubator-carbondata
|
datamap/mv/core/src/main/scala/org/apache/carbondata/mv/datamap/MVAnalyzerRule.scala
|
Scala
|
apache-2.0
| 4,385 |
package com.twitter.finagle.cacheresolver
import _root_.java.io.ByteArrayInputStream
import _root_.java.net.{SocketAddress, InetSocketAddress}
import com.google.gson.GsonBuilder
import com.twitter.common.io.{Codec,JsonCodec}
import com.twitter.common.zookeeper._
import com.twitter.concurrent.Spool
import com.twitter.concurrent.Spool.*::
import com.twitter.conversions.time._
import com.twitter.finagle.{Group, Resolver, Addr, WeightedInetSocketAddress}
import com.twitter.finagle.builder.Cluster
import com.twitter.finagle.stats.{ClientStatsReceiver, StatsReceiver, NullStatsReceiver}
import com.twitter.finagle.zookeeper.{ZkGroup, DefaultZkClientFactory, ZookeeperServerSetCluster}
import com.twitter.finagle.{Group, Resolver}
import com.twitter.thrift.ServiceInstance
import com.twitter.thrift.Status.ALIVE
import com.twitter.util._
import scala.collection.mutable
// Type definition representing a cache node
case class CacheNode(host: String, port: Int, weight: Int, key: Option[String] = None) extends SocketAddress {
// Use overloads to keep the same ABI
def this(host: String, port: Int, weight: Int) = this(host, port, weight, None)
}
/**
* Indicates that an error occurred while resolving a cache address.
* See [[com.twitter.finagle.memcached.TwitterCacheResolver]] for details.
*/
class TwitterCacheResolverException(msg: String) extends Exception(msg)
/**
* A [[com.twitter.finagle.Resolver]] for resolving destination names associated
* with Twitter cache pools.
*/
class TwitterCacheResolver extends Resolver {
val scheme = "twcache"
def bind(arg: String) = {
arg.split("!") match {
// twcache!<host1>:<port>:<weight>:<key>,<host2>:<port>:<weight>:<key>,<host3>:<port>:<weight>:<key>
case Array(hosts) =>
val group = CacheNodeGroup(hosts) map {
case node: CacheNode => node: SocketAddress
}
group.set map { newSet => Addr.Bound(newSet) }
// twcache!zkhost:2181!/twitter/service/cache/<stage>/<name>
case Array(zkHosts, path) =>
val zkClient = DefaultZkClientFactory.get(DefaultZkClientFactory.hostSet(zkHosts))._1
val group = CacheNodeGroup.newZkCacheNodeGroup(
path, zkClient, ClientStatsReceiver.scope(scheme).scope(path)
) map { case c: CacheNode => c: SocketAddress }
group.set map { newSet => Addr.Bound(newSet) }
case _ =>
throw new TwitterCacheResolverException(
"Invalid twcache format \\"%s\\"".format(arg))
}
}
}
// TODO: Rewrite Memcache cluster representation in terms of Var[Addr].
object CacheNodeGroup {
// <host1>:<port>:<weight>:<key>,<host2>:<port>:<weight>:<key>,<host3>:<port>:<weight>:<key>
def apply(hosts: String) = {
val hostSeq = hosts.split(Array(' ', ','))
.filter((_ != ""))
.map(_.split(":"))
.map {
case Array(host) => (host, 11211, 1, None)
case Array(host, port) => (host, port.toInt, 1, None)
case Array(host, port, weight) => (host, port.toInt, weight.toInt, None)
case Array(host, port, weight, key) => (host, port.toInt, weight.toInt, Some(key))
}
newStaticGroup(hostSeq.map {
case (host, port, weight, key) => new CacheNode(host, port, weight, key)
}.toSet)
}
def apply(group: Group[SocketAddress], useOnlyResolvedAddress: Boolean = false) = group collect {
case node: CacheNode => node
// TODO: should we use the weights propagated here? The weights passed
// by WeightedInetSocketAddress are doubles -- should we discretize these?
case WeightedInetSocketAddress(ia, weight)
if useOnlyResolvedAddress && !ia.isUnresolved =>
//Note: unresolvedAddresses won't be added even if they are able
// to be resolved after added
new CacheNode(ia.getHostName, ia.getPort, 1,
Some(ia.getAddress.getHostAddress + ":" + ia.getPort))
case WeightedInetSocketAddress(ia, weight) if !useOnlyResolvedAddress =>
new CacheNode(ia.getHostName, ia.getPort, 1, None)
}
def newStaticGroup(cacheNodeSet: Set[CacheNode]) = Group(cacheNodeSet.toSeq:_*)
def newZkCacheNodeGroup(
path: String, zkClient: ZooKeeperClient, statsReceiver: StatsReceiver = NullStatsReceiver
) = new ZookeeperCacheNodeGroup(zkPath = path, zkClient = zkClient, statsReceiver = statsReceiver)
}
/**
* Cache specific cluster implementation.
* - A cache pool is a Cluster of cache nodes.
* - cache pool requires a underlying pool manager as the source of the cache nodes
* - the underlying pool manager encapsulates logic of monitoring the cache node changes and
* deciding when to update the cache pool cluster
*/
object CachePoolCluster {
val timer = new JavaTimer(isDaemon = true)
/**
* Cache pool based on a static list
* @param cacheNodeSet static set of cache nodes to construct the cluster
*/
def newStaticCluster(cacheNodeSet: Set[CacheNode]) = new StaticCachePoolCluster(cacheNodeSet)
/**
* Zookeeper based cache pool cluster.
* The cluster will monitor the underlying serverset changes and report the detected underlying
* pool size. The cluster snapshot will be updated during cache-team's managed operation, and
* the Future spool will be updated with corresponding changes
*
* @param zkPath the zookeeper path representing the cache pool
* @param zkClient zookeeper client talking to the zookeeper, it will only be used to read zookeeper
* @param backupPool Optional, the backup static pool to use in case of ZK failure. Empty pool means
* the same as no backup pool.
* @param statsReceiver Optional, the destination to report the stats to
*/
def newZkCluster(zkPath: String, zkClient: ZooKeeperClient, backupPool: Option[Set[CacheNode]] = None, statsReceiver: StatsReceiver = NullStatsReceiver) =
new ZookeeperCachePoolCluster(zkPath, zkClient, backupPool, statsReceiver)
/**
* Zookeeper based cache pool cluster.
* The cluster will monitor the underlying serverset changes and report the detected underlying
* pool size. The cluster snapshot is unmanaged in a way that any serverset change will be immediately
* reflected.
*
* @param zkPath the zookeeper path representing the cache pool
* @param zkClient zookeeper client talking to the zookeeper, it will only be used to read zookeeper
*/
def newUnmanagedZkCluster(
zkPath: String,
zkClient: ZooKeeperClient
) = new ZookeeperServerSetCluster(
ServerSets.create(zkClient, ZooKeeperUtils.EVERYONE_READ_CREATOR_ALL, zkPath)
) map { case addr: InetSocketAddress =>
CacheNode(addr.getHostName, addr.getPort, 1)
}
}
trait CachePoolCluster extends Cluster[CacheNode] {
/**
* Cache pool snapshot and future changes
* These two should only change when a key-ring rehashing is needed (e.g. cache pool
* initialization, migration, expansion, etc), thus we only let the underlying pool manager
* to change them
*/
private[this] val cachePool = new mutable.HashSet[CacheNode]
private[this] var cachePoolChanges = new Promise[Spool[Cluster.Change[CacheNode]]]
def snap: (Seq[CacheNode], Future[Spool[Cluster.Change[CacheNode]]]) = cachePool synchronized {
(cachePool.toSeq, cachePoolChanges)
}
/**
* TODO: pick up new rev of Cluster once it's ready
* Soon enough the Cluster will be defined in a way that we can directly managing the managers
* in a more flexible way, by then we should be able to do batch update we want here. For now,
* the updating pool is still done one by one.
*/
final protected[this] def updatePool(newSet: Set[CacheNode]) = cachePool synchronized {
val added = newSet &~ cachePool
val removed = cachePool &~ newSet
// modify cachePool and cachePoolChanges
removed foreach { node =>
cachePool -= node
appendUpdate(Cluster.Rem(node))
}
added foreach { node =>
cachePool += node
appendUpdate(Cluster.Add(node))
}
}
private[this] def appendUpdate(update: Cluster.Change[CacheNode]) = cachePool synchronized {
val newTail = new Promise[Spool[Cluster.Change[CacheNode]]]
cachePoolChanges() = Return(update *:: newTail)
cachePoolChanges = newTail
}
}
/**
* Cache pool config data object
*/
object CachePoolConfig {
val jsonCodec: Codec[CachePoolConfig] =
JsonCodec.create(classOf[CachePoolConfig],
new GsonBuilder().setExclusionStrategies(JsonCodec.getThriftExclusionStrategy()).create())
}
/**
* Cache pool config data format
* Currently this data format is only used by ZookeeperCachePoolManager to read the config data
* from zookeeper serverset parent node, and the expected cache pool size is the only attribute
* we need for now. In the future this can be extended for other config attributes like cache
* pool migrating state, backup cache servers list, or replication role, etc
*/
case class CachePoolConfig(cachePoolSize: Int, detectKeyRemapping: Boolean = false)
/**
* Cache pool based on a static list
* @param cacheNodeSet static set of cache nodes to construct the cluster
*/
class StaticCachePoolCluster(cacheNodeSet: Set[CacheNode]) extends CachePoolCluster {
// The cache pool will updated once and only once as the underlying pool never changes
updatePool(cacheNodeSet)
}
/**
* ZooKeeper based cache pool cluster companion object
*/
object ZookeeperCachePoolCluster {
private val CachePoolWaitCompleteTimeout = 10.seconds
private val BackupPoolFallBackTimeout = 10.seconds
}
/**
* Zookeeper based cache pool cluster with a serverset as the underlying pool.
* It will monitor the underlying serverset changes and report the detected underlying pool size.
* It will also monitor the serverset parent node for cache pool config data, cache pool cluster
* update will be triggered whenever cache config data change event happens.
*
* @param zkPath the zookeeper path representing the cache pool
* @param zkClient zookeeper client talking to the zookeeper, it will only be used to read zookeeper
* @param backupPool Optional, the backup static pool to use in case of ZK failure. Empty pool means
* the same as no backup pool.
* @param statsReceiver Optional, the destination to report the stats to
*/
class ZookeeperCachePoolCluster private[cacheresolver](
protected val zkPath: String,
protected val zkClient: ZooKeeperClient,
backupPool: Option[Set[CacheNode]] = None,
protected val statsReceiver: StatsReceiver = NullStatsReceiver)
extends CachePoolCluster with ZookeeperStateMonitor {
import ZookeeperCachePoolCluster._
private[this] val zkServerSetCluster =
new ZookeeperServerSetCluster(
ServerSets.create(zkClient, ZooKeeperUtils.EVERYONE_READ_CREATOR_ALL, zkPath)) map {
case addr: InetSocketAddress =>
CacheNode(addr.getHostName, addr.getPort, 1)
}
@volatile private[this] var underlyingSize = 0
zkServerSetCluster.snap match {
case (current, changes) =>
underlyingSize = current.size
changes foreach { spool =>
spool foreach {
case Cluster.Add(node) => underlyingSize += 1
case Cluster.Rem(node) => underlyingSize -= 1
}
}
}
// continuously gauging underlying cluster size
private[this] val underlyingSizeGauge = statsReceiver.addGauge("underlyingPoolSize") {
underlyingSize
}
// Falling back to use the backup pool (if provided) after a certain timeout.
// Meanwhile, the first time invoke of updating pool will still proceed once it successfully
// get the underlying pool config data and a complete pool members ready, by then it
// will overwrite the backup pool.
// This backup pool is mainly provided in case of long time zookeeper outage during which
// cache client needs to be restarted.
backupPool foreach { pool =>
if (!pool.isEmpty) {
ready within (CachePoolCluster.timer, BackupPoolFallBackTimeout) onFailure {
_ => updatePool(pool)
}
}
}
override def applyZKData(data: Array[Byte]): Unit = {
if(data != null) {
val cachePoolConfig = CachePoolConfig.jsonCodec.deserialize(new ByteArrayInputStream(data))
// apply the cache pool config to the cluster
val expectedClusterSize = cachePoolConfig.cachePoolSize
val (snapshotSeq, snapshotChanges) = zkServerSetCluster.snap
// TODO: this can be blocking or non-blocking, depending on the protocol
// for now I'm making it blocking call as the current known scenario is that cache config data
// should be always exactly matching existing memberships, controlled by cache-team operator.
// It will only block for 10 seconds after which it should trigger alerting metrics and schedule
// another try
val newSet = Await.result(waitForClusterComplete(snapshotSeq.toSet, expectedClusterSize, snapshotChanges),
CachePoolWaitCompleteTimeout)
updatePool(newSet)
}
}
/**
* Wait for the current set to contain expected size of members.
* If the underlying zk cluster change is triggered by operator (for migration/expansion etc), the
* config data change should always happen after the operator has verified that this zk pool manager
* already see expected size of members, in which case this method would immediately return;
* however during the first time this pool manager is initialized, it's possible that the zkServerSetCluster
* hasn't caught up all existing members yet hence this method may need to wait for the future changes.
*/
private[this] def waitForClusterComplete(
currentSet: Set[CacheNode],
expectedSize: Int,
spoolChanges: Future[Spool[Cluster.Change[CacheNode]]]
): Future[Set[CacheNode]] = {
if (expectedSize == currentSet.size) {
Future.value(currentSet)
} else spoolChanges flatMap { spool =>
spool match {
case Cluster.Add(node) *:: tail =>
waitForClusterComplete(currentSet + node, expectedSize, tail)
case Cluster.Rem(node) *:: tail =>
// this should not happen in general as this code generally is only for first time pool
// manager initialization
waitForClusterComplete(currentSet - node, expectedSize, tail)
}
}
}
}
/**
* Zookeeper based cache node group with a serverset as the underlying pool.
* It will monitor the underlying serverset changes and report the detected underlying pool size.
* It will monitor the serverset parent node for cache pool config data, cache node group
* update will be triggered whenever cache config data change event happens.
*
* @param zkPath the zookeeper path representing the cache pool
* @param zkClient zookeeper client talking to the zookeeper, it will only be used to read zookeeper
* @param statsReceiver Optional, the destination to report the stats to
*/
class ZookeeperCacheNodeGroup(
protected val zkPath: String,
protected val zkClient: ZooKeeperClient,
protected val statsReceiver: StatsReceiver = NullStatsReceiver
) extends Group[CacheNode] with ZookeeperStateMonitor {
protected[finagle] val set = Var(Set[CacheNode]())
@volatile private var detectKeyRemapping = false
private val zkGroup =
new ZkGroup(new ServerSetImpl(zkClient, zkPath), zkPath) collect {
case inst if inst.getStatus == ALIVE =>
val ep = inst.getServiceEndpoint
val shardInfo = if (inst.isSetShard) Some(inst.getShard.toString) else None
CacheNode(ep.getHost, ep.getPort, 1, shardInfo)
}
private[this] val underlyingSizeGauge = statsReceiver.addGauge("underlyingPoolSize") {
zkGroup.members.size
}
def applyZKData(data: Array[Byte]) {
if(data != null) {
val cachePoolConfig = CachePoolConfig.jsonCodec.deserialize(new ByteArrayInputStream(data))
detectKeyRemapping = cachePoolConfig.detectKeyRemapping
// apply the cache pool config to the cluster
val expectedGroupSize = cachePoolConfig.cachePoolSize
if (expectedGroupSize != zkGroup.members.size)
throw new IllegalStateException("Underlying group size not equal to expected size")
set() = zkGroup.members
}
}
// when enabled, monitor and apply new members in case of pure cache node key remapping
override def applyZKChildren(children: List[String]) = if (detectKeyRemapping) {
val newMembers = zkGroup.members
if (newMembers.size != children.size)
throw new IllegalStateException("Underlying children size not equal to expected children size")
if (newMembers.size == members.size) {
val removed = (members &~ newMembers)
val added = (newMembers &~ members)
// pick up the diff only if new members contains exactly the same set of cache node keys,
// e.g. certain cache node key is re-assigned to another host
if (removed.forall(_.key.isDefined) && added.forall(_.key.isDefined) &&
removed.size == added.size && removed.map(_.key.get) == added.map(_.key.get)) {
set() = newMembers
}
}
}
}
|
jay-johnson/finagle
|
finagle-cacheresolver/src/main/scala/com/twitter/finagle/cacheresolver/CachePoolCluster.scala
|
Scala
|
apache-2.0
| 17,012 |
package org.lancegatlin
import scala.language.higherKinds
import scala.language.existentials
object Try4b {
case class Person(id: Int, name: String, age: Int)
trait Schema[C] {
class Field[A](val unapply: C => A) { self =>
def name: String = {
// org.lancegatlin.Try1$PersonSchema$id$
val name = getClass.getName
// Try1$PersonSchema$id
val simpleName = name.substring(name.lastIndexOf('.') + 1).dropRight(1)
// id
simpleName.substring(simpleName.lastIndexOf('$') + 1)
}
// Note: bug in this call for objects
//getClass.getSimpleName
override def toString = s"Field($name)"
}
def fields: Seq[Field[_]]
}
implicit object PersonSchema extends Schema[Person] {
object id extends Field(_.id)
object name extends Field(_.name)
object age extends Field(_.age)
val fields = Seq(id,name,age)
}
trait ToDialect[A,R] extends (A => R)
sealed trait Ops
case object Equals extends Ops
case object NotEquals extends Ops
case object LessThan extends Ops
case object LessThanEquals extends Ops
case object GreaterThan extends Ops
case object GreatThanEquals extends Ops
sealed trait Relator
case object And extends Relator
case object Or extends Relator
sealed trait Value[D,A]
case class Literal[D,A](value: A)(implicit val toDialect: ToDialect[A,D]) extends Value[D,A]
case class Field[D,A](field: Schema[_]#Field[A]) extends Value[D,A]
sealed trait Ast[D]
case class Test[D,A](op: Ops, _1: Value[D,A], _2: Value[D,A]) extends Ast[D]
case class R[D](r:Relator,_1: Ast[D], _2:Ast[D]) extends Ast[D]
implicit class PimpMyField[A](val self: Schema[_]#Field[A]) extends AnyVal {
def ===[D](value: A)(implicit d:ToDialect[A,D]) = Test(Equals, Field[D,A](self),Literal[D,A](value))
def <[D](value: A)(implicit d:ToDialect[A,D]) = Test(LessThan,Field[D,A](self),Literal[D,A](value))
}
implicit class PimpMyAst[D](val self: Ast[D]) extends AnyVal {
def and(other: Ast[D]) = R(And,self, other)
def or(other: Ast[D]) = R(Or,self, other)
}
case class Sql(value: String)
object SqlDialect {
implicit object sql_Int extends ToDialect[Int,Sql] {
override def apply(v1: Int): Sql = Sql(v1.toString)
}
implicit object sql_String extends ToDialect[String,Sql] {
override def apply(v1: String): Sql = Sql(s""""$v1"""")
}
}
val ast =
{
import PersonSchema._
import SqlDialect._
id === 1 and name === "asdf" and age < 30
}
def astToSql(e: Ast[Sql]) : String = {
val builder = new StringBuilder(256)
def valToString(_val: Value[Sql,_]) : String = {
_val match {
case l@Literal(value) => l.toDialect(value).value
case Field(field) => field.name
}
}
def opToString(op: Ops) : String = {
op match {
case Equals => " = "
case NotEquals => " != "
case LessThan => " < "
case LessThanEquals => " <= "
case GreaterThan => " > "
case GreatThanEquals => " >= "
}
}
def relatorToString(r: Relator) : String = {
r match {
case And => " AND "
case Or => " OR "
}
}
def loop(ast: Ast[Sql]) : Unit = {
ast match {
case Test(op,_1,_2) =>
builder
.append(valToString(_1))
.append(opToString(op))
.append(valToString(_2))
case R(relator,_1,_2) =>
loop(_1)
builder.append(relatorToString(relator))
loop(_2)
}
}
loop(e)
builder.result()
}
// TODO: this doesn't work if there is more than one schema involved
// such as when a joined collection returns a tuple
// TODO: for in-memory db, translating this to a C => Boolean is preferred
}
|
lancegatlin/caseclass_ql
|
src/main/scala/org/lancegatlin/Try4b.scala
|
Scala
|
mit
| 3,807 |
package com.github.j5ik2o.spetstore.adaptor.aggregate
import akka.actor.Props
import com.github.j5ik2o.spetstore.adaptor.eventbus.EventBus
import com.github.j5ik2o.spetstore.domain.purchase.CartAggregateProtocol.Query.{ GetStateRequest, GetStateResponse }
import com.github.j5ik2o.spetstore.domain.purchase.CartAggregateProtocol.Create.{ CartCreateCommandRequest, CartCreateEvent, CreateFailed, CreateSucceeded }
import com.github.j5ik2o.spetstore.domain.purchase.CartAggregateProtocol.Update.{ CartUpdateCommandRequest, CartUpdateEvent, UpdateFailed, UpdateSucceeded }
import com.github.j5ik2o.spetstore.domain.purchase.{ Cart, CartId }
import com.github.j5ik2o.spetstore.infrastructure.domainsupport.{ EntityFactory, EntityProtocol }
import com.github.j5ik2o.spetstore.infrastructure.domainsupport.EntityProtocol._
import scala.reflect.ClassTag
object CartAggregate {
def name(id: CartId): String = s"cart-${id.value}"
def props(eventBus: EventBus, id: CartId): Props = Props(new CartAggregate(eventBus, id))
}
final class CartAggregate(eventBus: EventBus, id: CartId)
extends AbstractAggregate[CartId, Cart, CartCreateEvent, CartUpdateEvent](eventBus, id, CartAggregate.name) {
override protected val entityFactory: EntityFactory[CartId, Cart, CartCreateEvent, CartUpdateEvent] = Cart
override def getSucceeded[Q <: EntityProtocol.GetStateRequest[CartId]: ClassTag](queryRequest: Q): GetStateResponse =
GetStateResponse(QueryResponseId(), queryRequest.id, id, state)
override def createSucceeded[C <: EntityProtocol.CommandRequest[CartId]: ClassTag](commandRequest: C): CommandSucceeded[CartId, Cart] =
CreateSucceeded(CommandResponseId(), commandRequest.id, id)
override def createFailed[C <: EntityProtocol.CommandRequest[CartId]: ClassTag](commandRequest: C): CommandFailed[CartId] =
CreateFailed(CommandResponseId(), commandRequest.id, id, new Exception)
override def updateSucceeded[C <: EntityProtocol.CommandRequest[CartId] : ClassTag](commandRequest: C): CommandSucceeded[CartId, Cart] =
UpdateSucceeded(CommandResponseId(), commandRequest.id, id)
override def updateFailed[C <: EntityProtocol.CommandRequest[CartId] : ClassTag](commandRequest: C): CommandFailed[CartId] =
UpdateFailed(CommandResponseId(), commandRequest.id, id, new Exception)
override def receiveRecover: Receive = {
case createEvent: CartCreateEvent => applyCreateEvent(createEvent)
case updateEvent: CartUpdateEvent => applyUpdateEvent(updateEvent)
}
override def receiveCommand: Receive = {
case queryRequest: GetStateRequest => getState(queryRequest)
case updateRequest: CartUpdateCommandRequest => updateState(updateRequest)
case createRequest: CartCreateCommandRequest => createState(createRequest)
}
}
|
j5ik2o/spetstore-cqrs-es-akka
|
write-interface/src/main/scala/com/github/j5ik2o/spetstore/adaptor/aggregate/CartAggregate.scala
|
Scala
|
mit
| 2,781 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package iht.utils
import iht.config.AppConfig
import iht.constants.Constants
import play.api.mvc.{Request, Session}
object SessionHelper {
def ensureSessionHasNino(session: Session, userNino: Option[String])(implicit appConfig: AppConfig): Session =
CommonHelper.withValue(StringHelperFixture().getNino(userNino)) { currentNino =>
val optionSession = session.get(Constants.NINO).fold[Option[Session]](
None
) { foundNino =>
if (foundNino == currentNino) {
Option(session)
} else {
None
}
}
optionSession.fold(session + (Constants.NINO -> currentNino))(identity)
}
def getNinoFromSession(request:Request[_]): Option[String] = request.session.get(Constants.NINO)
}
|
hmrc/iht-frontend
|
app/iht/utils/SessionHelper.scala
|
Scala
|
apache-2.0
| 1,362 |
/*
Copyright 2011 Andrew Fowler <[email protected]>
This file is part of Terinology2ODM Terminology2ODMConverter.
Terminology2ODMConverter is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Terminology2ODMConverter is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Terminology2ODMConverter. If not, see <http://www.gnu.org/licenses/>.
*/
package model
trait OIDEntity {
var oid : String = null
}
|
rwynne/ops-data-conversion
|
ODM_Converter/src/model/OIDEntity.scala
|
Scala
|
bsd-3-clause
| 853 |
/*
* Copyright 2007-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package mapper
import java.util.Locale
import org.specs2.mutable.Specification
import org.specs2.specification.BeforeExample
import common._
import json._
import util._
import Helpers._
import http.LiftRules
import http.provider.HTTPRequest
/**
* Systems under specification for Mapper. The model classes here are
* defined in MapperSpecsModel.scala
*/
class MapperSpec extends Specification with BeforeExample {
"Mapper Specification".title
// Do everything in order.
sequential
// Make sure we have everything configured first
MapperSpecsModel.setup()
def providers = DbProviders.H2MemoryProvider :: Nil
/*
private def logDBStuff(log: DBLog, len: Long) {
println(" in log stuff "+log.getClass.getName)
log match {
case null =>
case _ => println(log.allEntries)
}
}
DB.addLogFunc(logDBStuff)
*/
// if (!DB.loggingEnabled_? && doLog) DB.addLogFunc(logDBStuff)
def before = MapperSpecsModel.cleanup() // before each example
providers.foreach(provider => {
try {
provider.setupDB
("Mapper for " + provider.name) should {
"schemify" in {
val elwood = SampleModel.find(By(SampleModel.firstName, "Elwood")).open_!
val madeline = SampleModel.find(By(SampleModel.firstName, "Madeline")).open_!
val archer = SampleModel.find(By(SampleModel.firstName, "Archer")).open_!
val notNull = SampleModel.find(By(SampleModel.firstName, "NotNull")).open_!
elwood.firstName.is must_== "Elwood"
madeline.firstName.is must_== "Madeline"
archer.firstName.is must_== "Archer"
archer.moose.is must_== Empty
notNull.moose.is must_== Full(99L)
val disabled = SampleModel.find(By(SampleModel.status, SampleStatus.Disabled))
val meow = SampleTag.find(By(SampleTag.tag, "Meow")).open_!
meow.tag.is must_== "Meow"
elwood.id.is must be_<(madeline.id.is).eventually
}
"non-snake connection should lower case default table & column names" in {
SampleModel.firstName.name must_== "firstName"
SampleModel.firstName.dbColumnName must_== "firstname"
SampleModel.dbTableName must_== "samplemodel"
}
"should use displayNameCalculator for displayName" in {
val localeCalculator = LiftRules.localeCalculator
SampleModel.firstName.displayName must_== "DEFAULT:SampleModel.firstName"
LiftRules.localeCalculator = (request: Box[HTTPRequest]) => request.flatMap(_.locale)
.openOr(new Locale("da", "DK"))
SampleModel.firstName.displayName must_== "da_DK:SampleModel.firstName"
LiftRules.localeCalculator = localeCalculator
success
}
"snake connection should snakify default table & column names" in {
SampleModelSnake.firstName.name must_== "firstName"
SampleModelSnake.firstName.dbColumnName must_== "first_name"
SampleModelSnake.dbTableName must_== "sample_model_snake"
}
"user defined names are not changed" in {
SampleTag.extraColumn.name must_== "extraColumn"
SampleTag.extraColumn.dbColumnName must_== "AnExtraColumn"
Mixer.dbTableName must_== "MIXME_UP"
}
"basic JSON encoding/decoding works" in {
val m = SampleModel.findAll().head
val json = m.encodeAsJson()
val rebuilt = SampleModel.buildFromJson(json)
m must_== rebuilt
}
"basic JSON encoding/decoding works with snake_case" in {
val m = SampleModelSnake.findAll().head
val json = m.encodeAsJson()
val rebuilt = SampleModelSnake.buildFromJson(json)
m must_== rebuilt
}
"Can JSON decode and write back" in {
val m = SampleModel.find(2).open_!
val json = m.encodeAsJson()
val rebuilt = SampleModel.buildFromJson(json)
rebuilt.firstName("yak").save
val recalled = SampleModel.find(2).open_!
recalled.firstName.is must_== "yak"
}
"You can put stuff in a Set" in {
val m1 = SampleModel.find(1).open_!
val m2 = SampleModel.find(1).open_!
(m1 == m2) must_== true
val s1 = Set(SampleModel.findAll: _*)
s1.contains(m1) must_== true
val s2 = s1 ++ SampleModel.findAll
s1.size must_== s2.size
}
"Like works" in {
val oo = SampleTag.findAll(Like(SampleTag.tag, "%oo%"))
(oo.length > 0) must beTrue
for (t <- oo)
(t.tag.is.indexOf("oo") >= 0) must beTrue
for (t <- oo)
t.model.cached_? must beFalse
val mm = SampleTag.findAll(Like(SampleTag.tag, "M%"))
(mm.length > 0) must beTrue
for (t <- mm)
(t.tag.is.startsWith("M")) must beTrue
for (t <- mm) yield {
t.model.cached_? must beFalse
t.model.obj
t.model.cached_? must beTrue
}
}
"Nullable Long works" in {
SampleModel.create.firstName("fruit").moose(Full(77L)).save
SampleModel.findAll(By(SampleModel.moose, Empty)).length must_== 3L
SampleModel.findAll(NotBy(SampleModel.moose, Empty)).length must_== 2L
SampleModel.findAll(NotNullRef(SampleModel.moose)).length must_== 2L
SampleModel.findAll(NullRef(SampleModel.moose)).length must_== 3L
}
"enforce NOT NULL" in {
val nullString: String = null
SampleModel.create.firstName("Not Null").notNull(nullString).save must throwA[java.sql.SQLException]
}
"enforce FK constraint on DefaultConnection" in {
val supportsFK = DB.use(DefaultConnectionIdentifier) { conn => conn.driverType.supportsForeignKeys_? }
if (!supportsFK) skipped("Driver %s does not support FK constraints".format(provider))
SampleTag.create.model(42).save must throwA[java.sql.SQLException]
}
"not enforce FK constraint on SnakeConnection" in {
SampleTagSnake.create.model(42).save must_== true
}
"Precache works" in {
val oo = SampleTag.findAll(By(SampleTag.tag, "Meow"), PreCache(SampleTag.model))
(oo.length > 0) must beTrue
for (t <- oo) yield t.model.cached_? must beTrue
}
"Precache works with OrderBy" in {
if ((provider ne DbProviders.DerbyProvider)
&& (provider ne DbProviders.MySqlProvider)) {
// this doesn't work for Derby, but it's a derby bug
// nor does it work in MySQL, but it's a MySQL limitation
// try { provider.setupDB } catch { case e => skip(e.getMessage) }
val dogs = Dog.findAll(By(Dog.name, "fido"), OrderBy(Dog.name, Ascending), PreCache(Dog.owner))
val oo = SampleTag.findAll(OrderBy(SampleTag.tag, Ascending), MaxRows(2), PreCache(SampleTag.model))
(oo.length > 0) must beTrue
for (t <- oo) t.model.cached_? must beTrue
}
success
}
"Non-deterministic Precache works" in {
val dogs = Dog.findAll(By(Dog.name, "fido"), PreCache(Dog.owner, false))
val oo = SampleTag.findAll(By(SampleTag.tag, "Meow"), PreCache(SampleTag.model, false))
(oo.length > 0) must beTrue
for (t <- oo) yield t.model.cached_? must beTrue
}
"Non-deterministic Precache works with OrderBy" in {
val dogs = Dog.findAll(By(Dog.name, "fido"), OrderBy(Dog.name, Ascending), PreCache(Dog.owner, false))
val oo = SampleTag.findAll(OrderBy(SampleTag.tag, Ascending), MaxRows(2), PreCache(SampleTag.model, false))
(oo.length > 0) must beTrue
for (t <- oo) yield t.model.cached_? must beTrue
}
"work with Mixed case" in {
val elwood = Mixer.find(By(Mixer.name, "Elwood")).open_!
val madeline = Mixer.find(By(Mixer.name, "Madeline")).open_!
val archer = Mixer.find(By(Mixer.name, "Archer")).open_!
elwood.name.is must_== "Elwood"
madeline.name.is must_== "Madeline"
archer.name.is must_== "Archer"
elwood.weight.is must_== 33
madeline.weight.is must_== 44
archer.weight.is must_== 105
}
"work with Mixed case update and delete" in {
val elwood = Mixer.find(By(Mixer.name, "Elwood")).open_!
elwood.name.is must_== "Elwood"
elwood.name("FruitBar").weight(966).save
val fb = Mixer.find(By(Mixer.weight, 966)).open_!
fb.name.is must_== "FruitBar"
fb.weight.is must_== 966
fb.delete_!
Mixer.find(By(Mixer.weight, 966)).isDefined must_== false
Mixer.find(By(Mixer.name, "FruitBar")).isDefined must_== false
Mixer.find(By(Mixer.name, "Elwood")).isDefined must_== false
}
"work with Mixed case update and delete for Dog2" in {
val elwood = Dog2.find(By(Dog2.name, "Elwood")).open_!
elwood.name.is must_== "Elwood"
elwood.name("FruitBar").actualAge(966).save
val fb = Dog2.find(By(Dog2.actualAge, 966)).open_!
fb.name.is must_== "FruitBar"
fb.actualAge.is must_== 966
fb.delete_!
Dog2.find(By(Dog2.actualAge, 966)).isDefined must_== false
Dog2.find(By(Dog2.name, "FruitBar")).isDefined must_== false
Dog2.find(By(Dog2.name, "Elwood")).isDefined must_== false
}
"Non-autogenerated primary key items should be savable after a field has been changed" in {
val item = TstItem.create.tmdbId(1L).saveMe
item.name("test").save must_== true
}
"we can read and write String primary keys" in {
val i1 = Thing.create.name("frog").saveMe
val i2 = Thing.create.name("dog").saveMe
Thing.find(By(Thing.thing_id, i1.thing_id.is)).open_!.name.is must_== "frog"
Thing.find(By(Thing.thing_id, i2.thing_id.is)).open_!.name.is must_== "dog"
}
"Precache works with OrderBy with Mixed Case" in {
if ((provider ne DbProviders.DerbyProvider)
&& (provider ne DbProviders.MySqlProvider)) {
// this doesn't work for Derby, but it's a derby bug
// nor does it work in MySQL, but it's a MySQL limitation
// try { provider.setupDB } catch { case e => skip(e.getMessage) }
val dogs = Dog2.findAll(By(Dog2.name, "fido"), OrderBy(Dog2.name, Ascending), PreCache(Dog2.owner))
val oo = SampleTag.findAll(OrderBy(SampleTag.tag, Ascending), MaxRows(2), PreCache(SampleTag.model))
(oo.length > 0) must beTrue
for (t <- oo) yield t.model.cached_? must beTrue
}
success
}
"Non-deterministic Precache works with Mixed Case" in {
val dogs = Dog2.findAll(By(Dog2.name, "fido"), PreCache(Dog2.owner, false))
val oo = SampleTag.findAll(By(SampleTag.tag, "Meow"), PreCache(SampleTag.model, false))
(oo.length > 0) must beTrue
for (t <- oo) yield t.model.cached_? must beTrue
}
"CreatedAt and UpdatedAt work" in {
val now = Helpers.now
val dog = Dog2.find().open_!
val oldUpdate = dog.updatedAt.is
val d1 = (now.getTime - dog.createdAt.get.getTime) / 100000L
d1 must_== 0L
val d2 = (now.getTime - dog.updatedAt.get.getTime) / 100000L
d2 must_== 0L
dog.name("ralph").save
val dog2 = Dog2.find(dog.dog2id.is).open_!
dog.createdAt.is.getTime must_== dog2.createdAt.is.getTime
oldUpdate.getTime must_!= dog2.updatedAt.is.getTime
}
"Non-deterministic Precache works with OrderBy with Mixed Case" in {
val dogs = Dog2.findAll(By(Dog2.name, "fido"), OrderBy(Dog2.name, Ascending), PreCache(Dog2.owner, false))
val oo = SampleTag.findAll(OrderBy(SampleTag.tag, Ascending), MaxRows(2), PreCache(SampleTag.model, false))
(oo.length > 0) must beTrue
for (t <- oo) yield t.model.cached_? must beTrue
}
"Save flag results in update rather than insert" in {
val elwood = SampleModel.find(By(SampleModel.firstName, "Elwood")).open_!
elwood.firstName.is must_== "Elwood"
elwood.firstName("Frog").save
val frog = SampleModel.find(By(SampleModel.firstName, "Frog")).open_!
frog.firstName.is must_== "Frog"
SampleModel.findAll().length must_== 4
SampleModel.find(By(SampleModel.firstName, "Elwood")).isEmpty must_== true
}
"accept a Seq[T] as argument to ByList query parameter" in {
// See http://github.com/dpp/liftweb/issues#issue/77 for original request
val seq: Seq[String] = List("Elwood", "Archer")
val result = SampleModel.findAll(ByList(SampleModel.firstName, seq))
result.length must_== 2
}
}
} catch {
case e if !provider.required_? => skipped("Provider %s not available: %s".format(provider, e))
case _ => skipped
}
})
}
|
pbrant/framework
|
persistence/mapper/src/test/scala/net/liftweb/mapper/MapperSpec.scala
|
Scala
|
apache-2.0
| 13,422 |
package com.arcusys.valamis.gradebook.storage
import com.arcusys.valamis.gradebook.model.PackageGrade
trait PackageGradesStorage {
def get(userId: Long, packageId: Long): Option[PackageGrade]
def delete(userId: Long, packageId: Long): Unit
def modify(entity: PackageGrade): PackageGrade
def create(entity: PackageGrade): PackageGrade
}
|
ViLPy/Valamis
|
valamis-gradebook/src/main/scala/com/arcusys/valamis/gradebook/storage/PackageGradesStorage.scala
|
Scala
|
lgpl-3.0
| 349 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.shuffle
import org.apache.spark.{ShuffleDependency, TaskContext}
/**
* Pluggable interface for shuffle systems. A ShuffleManager is created in SparkEnv on the driver
* and on each executor, based on the spark.shuffle.manager setting. The driver registers shuffles
* with it, and executors (or tasks running locally in the driver) can ask to read and write data.
*
* NOTE: this will be instantiated by SparkEnv so its constructor can take a SparkConf and
* boolean isDriver as parameters.
*/
private[spark] trait ShuffleManager {
/**
* Register a shuffle with the manager and obtain a handle for it to pass to tasks.
*/
def registerShuffle[K, V, C](
shuffleId: Int,
dependency: ShuffleDependency[K, V, C]): ShuffleHandle
/** Get a writer for a given partition. Called on executors by map tasks. */
def getWriter[K, V](
handle: ShuffleHandle,
mapId: Long,
context: TaskContext,
metrics: ShuffleWriteMetricsReporter): ShuffleWriter[K, V]
/**
* Get a reader for a range of reduce partitions (startPartition to endPartition-1, inclusive).
* Called on executors by reduce tasks.
*/
def getReader[K, C](
handle: ShuffleHandle,
startPartition: Int,
endPartition: Int,
context: TaskContext,
metrics: ShuffleReadMetricsReporter): ShuffleReader[K, C]
/**
* Get a reader for a range of reduce partitions (startPartition to endPartition-1, inclusive)
* that are produced by one specific mapper. Called on executors by reduce tasks.
*/
def getReaderForOneMapper[K, C](
handle: ShuffleHandle,
mapIndex: Int,
startPartition: Int,
endPartition: Int,
context: TaskContext,
metrics: ShuffleReadMetricsReporter): ShuffleReader[K, C]
/**
* Remove a shuffle's metadata from the ShuffleManager.
* @return true if the metadata removed successfully, otherwise false.
*/
def unregisterShuffle(shuffleId: Int): Boolean
/**
* Return a resolver capable of retrieving shuffle block data based on block coordinates.
*/
def shuffleBlockResolver: ShuffleBlockResolver
/** Shut down this ShuffleManager. */
def stop(): Unit
}
|
jkbradley/spark
|
core/src/main/scala/org/apache/spark/shuffle/ShuffleManager.scala
|
Scala
|
apache-2.0
| 3,004 |
package xbee
import akka.io._
import akka.util.ByteString
import akka.actor.{Props, Actor, ActorRef}
import com.github.jodersky.flow.Serial.Received
import java.nio.ByteBuffer
class LengthFieldFrame extends SymmetricPipelineStage[PipelineContext, ByteString, ByteString] {
var buffer = None: Option[ByteString]
val HEADER_LENGTH = 4
def extractFrames(bs: ByteString, acc: List[ByteString]): (Option[ByteString], Seq[ByteString]) = {
def size(a: Byte, b: Byte): Int = a << 8 | b;
bs.toList match {
case 0x7e :: Nil => (Some(bs), acc)
case 0x7e :: rest if rest.size < 3 => (Some(bs), acc)
case 0x7e :: rest if rest.size < 3 => (Some(bs), acc)
case 0x7e :: a :: b :: api :: rest if rest.size < size(a, b) => (Some(bs), acc)
case 0x7e :: s0:: s1 :: api :: rest if rest.size >= size(s0, s1) => {
val frameLength = size(s0, s1) + HEADER_LENGTH
val frame = bs.take(frameLength)
extractFrames(bs.drop(frameLength), acc ++ Some(frame))
}
case Nil => (None, acc)
case _ => extractFrames(bs.drop(1), acc)
}
}
def apply(ctx: PipelineContext) = new SymmetricPipePair[ByteString, ByteString] {
override def commandPipeline = {
bs: ByteString ⇒
???
}
def eventPipeline = {
bs: ByteString ⇒ {
val data = if (buffer.isEmpty) bs else buffer.get ++ bs
val (nb, frames) = extractFrames(data, Nil)
buffer = nb
frames match {
case Nil ⇒ Nil
case one :: Nil ⇒ ctx.singleEvent(one)
case many ⇒ many reverseMap (Left(_))
}
}
}
}
}
class XBeeFrame extends SymmetricPipelineStage[PipelineContext, Response, ByteString] {
def apply(ctx: PipelineContext) = new SymmetricPipePair[Response, ByteString] {
def eventPipeline = {
bs: ByteString ⇒ {
ctx.singleEvent(XBeePacket2.unapply(bs))
}
}
override def commandPipeline = {
bs: Response ⇒
???
}
}
}
sealed trait Response
case class AT(frame: Array[Byte]) extends Response
case class AT_RESPONSE(frame: ByteString) extends Response
//lass XBeePacket(r: Response)
object XBeePacket2 {
def unapply(
s: ByteString) = {
val a = s.toArray
if (a.head != 0x7e) throw new Error
val frameSize = ByteBuffer.wrap(a.slice(1, 3)).asShortBuffer().get()
val apiIdentifier = a(3)
val frame = a.slice(4, frameSize + 3)
val checksum = a.last
val verify = frame.foldLeft(apiIdentifier) { (a, b) =>
( b + a).toByte
} + checksum.toByte
val t = verify.toByte == 0xFF.toByte
apiIdentifier match {
case 0x08 => AT(frame)
}
}
}
//class XBeeProcessor(cmds: ActorRef, evts: ActorRef) extends Actor {
//
// import context._
//
// val reader = actorOf(Props[Lo])
//
// val ctx = new HasActorContext {
// def getContext = XBeeProcessor.this.context
// }
//
// val pipeline = PipelineFactory.buildWithSinkFunctions(ctx,
// new XBeeFrame >> new LengthFieldFrame
// )(cmd ⇒ reader ! cmd.get,
// evt ⇒ reader ! evt.get)
//
// def receive = {
// //case m: Message ⇒ pipeline.injectCommand(m)
// case b: ByteString ⇒ pipeline.injectEvent(b)
// case t: TickGenerator.Trigger ⇒ pipeline.managementCommand(t)
// case Received(data) => pipeline.injectEvent(data)
// }
//}
|
charroch/scala-bee
|
src/main/scala/xbee/Frame.scala
|
Scala
|
apache-2.0
| 3,352 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive
import java.net.{InetAddress, InterfaceAddress, NetworkInterface}
import scala.collection.JavaConverters._
import scala.util.control.Breaks._
import org.apache.spark.SparkContext
import org.apache.spark.scheduler.cluster.CoarseGrainedSchedulerBackend
import org.apache.carbondata.common.logging.LogServiceFactory
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.datastore.block.Distributable
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.carbondata.processing.util.CarbonLoaderUtil
object DistributionUtil {
@transient
val LOGGER = LogServiceFactory.getLogService(this.getClass.getCanonicalName)
/*
* minimum required registered resource for starting block distribution
*/
lazy val minRegisteredResourceRatio: Double = {
val value: String = CarbonProperties.getInstance()
.getProperty(CarbonCommonConstants.CARBON_SCHEDULER_MIN_REGISTERED_RESOURCES_RATIO,
CarbonCommonConstants.CARBON_SCHEDULER_MIN_REGISTERED_RESOURCES_RATIO_DEFAULT)
java.lang.Double.parseDouble(value)
}
/*
* node registration wait time
*/
lazy val dynamicAllocationSchTimeOut: Integer = {
val value: String = CarbonProperties.getInstance()
.getProperty(CarbonCommonConstants.CARBON_DYNAMIC_ALLOCATION_SCHEDULER_TIMEOUT,
CarbonCommonConstants.CARBON_DYNAMIC_ALLOCATION_SCHEDULER_TIMEOUT_DEFAULT)
// milli second
java.lang.Integer.parseInt(value) * 1000
}
/*
* This method will return the list of executers in the cluster.
* For this we take the memory status of all node with getExecutorMemoryStatus
* and extract the keys. getExecutorMemoryStatus also returns the driver memory also
* In client mode driver will run in the localhost
* There can be executor spawn in same drive node. So we can remove first occurance of
* localhost for retriving executor list
*/
def getNodeList(sparkContext: SparkContext): Array[String] = {
val arr = sparkContext.getExecutorMemoryStatus.map { kv =>
kv._1.split(":")(0)
}.toSeq
val localhostIPs = getLocalhostIPs
val selectedLocalIPList = localhostIPs.filter(arr.contains(_))
val nodelist: List[String] = withoutDriverIP(arr.toList)(selectedLocalIPList.contains(_))
val masterMode = sparkContext.getConf.get("spark.master")
if (nodelist.nonEmpty) {
// Specific for Yarn Mode
if ("yarn-cluster".equals(masterMode) || "yarn-client".equals(masterMode)) {
val nodeNames = nodelist.map { x =>
val addr = InetAddress.getByName(x)
addr.getHostName
}
nodeNames.toArray
} else {
// For Standalone cluster, node IPs will be returned.
nodelist.toArray
}
} else {
Seq(InetAddress.getLocalHost.getHostName).toArray
}
}
private def getLocalhostIPs = {
val iface = NetworkInterface.getNetworkInterfaces
var addresses: List[InterfaceAddress] = List.empty
while (iface.hasMoreElements) {
addresses = iface.nextElement().getInterfaceAddresses.asScala.toList ++ addresses
}
val inets = addresses.map(_.getAddress.getHostAddress)
inets
}
/*
* This method will remove the first occurance of any of the ips mentioned in the predicate.
* Eg: l = List(Master,slave1,Master,slave2,slave3) is the list of nodes where first Master is
* the Driver node.
* this method withoutFirst (l)(x=> x == 'Master') will remove the first occurance of Master.
* The resulting List containt List(slave1,Master,slave2,slave3)
*/
def withoutDriverIP[A](xs: List[A])(p: A => Boolean): List[A] = {
xs match {
case x :: rest => if (p(x)) {
rest
} else {
x :: withoutDriverIP(rest)(p)
}
case _ => Nil
}
}
/**
*
* Checking if the existing executors is greater than configured executors, if yes
* returning configured executors.
*
* @param blockList total number of blocks in the identified segments
* @param sparkContext
* @return
*/
def ensureExecutorsAndGetNodeList(blockList: Seq[Distributable],
sparkContext: SparkContext): Seq[String] = {
val nodeMapping = CarbonLoaderUtil.nodeBlockMapping(blockList.asJava)
ensureExecutorsByNumberAndGetNodeList(nodeMapping, blockList, sparkContext)
}
def ensureExecutorsByNumberAndGetNodeList(nodesOfData: Int,
sparkContext: SparkContext): Seq[String] = {
val confExecutors: Int = getConfiguredExecutors(sparkContext)
LOGGER.info(s"Executors configured : $confExecutors")
val requiredExecutors = if (nodesOfData < 1 || nodesOfData > confExecutors) {
confExecutors
} else {
nodesOfData
}
// request for starting the number of required executors
ensureExecutors(sparkContext, requiredExecutors)
getDistinctNodesList(sparkContext, requiredExecutors)
}
/**
* This method will ensure that the required/configured number of executors are requested
* for processing the identified blocks
*
* @param nodeMapping
* @param blockList
* @param sparkContext
* @return
*/
private def ensureExecutorsByNumberAndGetNodeList(
nodeMapping: java.util.Map[String, java.util.List[Distributable]],
blockList: Seq[Distributable],
sparkContext: SparkContext): Seq[String] = {
val nodesOfData = nodeMapping.size()
val confExecutors: Int = getConfiguredExecutors(sparkContext)
LOGGER.info(s"Executors configured : $confExecutors")
val requiredExecutors = if (nodesOfData < 1) {
1
} else if (nodesOfData > confExecutors) {
confExecutors
} else if (confExecutors > nodesOfData) {
var totalExecutorsToBeRequested = nodesOfData
// If total number of blocks are greater than the nodes identified then ensure
// that the configured number of max executors can be opened based on the difference of
// block list size and nodes identified
if (blockList.size > nodesOfData) {
// e.g 1. blockList size = 40, confExecutors = 6, then all 6 executors
// need to be opened
// 2. blockList size = 4, confExecutors = 6, then
// total 4 executors need to be opened
if (blockList.size > confExecutors) {
totalExecutorsToBeRequested = confExecutors
} else {
totalExecutorsToBeRequested = blockList.size
}
}
LOGGER.info(s"Total executors requested: $totalExecutorsToBeRequested")
totalExecutorsToBeRequested
} else {
nodesOfData
}
// request for starting the number of required executors
ensureExecutors(sparkContext, requiredExecutors, blockList.size)
getDistinctNodesList(sparkContext, requiredExecutors)
}
/**
* This method will return the configured executors
*
* @param sparkContext
* @return
*/
private def getConfiguredExecutors(sparkContext: SparkContext): Int = {
var confExecutors: Int = 0
if (sparkContext.getConf.getBoolean("spark.dynamicAllocation.enabled", false)) {
// default value for spark.dynamicAllocation.maxExecutors is infinity
confExecutors = sparkContext.getConf.getInt("spark.dynamicAllocation.maxExecutors", 1)
LOGGER.info(s"spark.dynamicAllocation.maxExecutors property is set to = $confExecutors")
} else {
// default value for spark.executor.instances is 2
confExecutors = sparkContext.getConf.getInt("spark.executor.instances", 1)
LOGGER.info(s"spark.executor.instances property is set to = $confExecutors")
}
confExecutors
}
/**
* This method will return the distinct nodes list
*
* @param sparkContext
* @param requiredExecutors
* @return
*/
private def getDistinctNodesList(sparkContext: SparkContext,
requiredExecutors: Int): Seq[String] = {
val startTime = System.currentTimeMillis()
var nodes = DistributionUtil.getNodeList(sparkContext)
// calculate the number of times loop has to run to check for starting
// the requested number of executors
val threadSleepTime =
CarbonCommonConstants.CARBON_DYNAMIC_ALLOCATION_SCHEDULER_THREAD_SLEEP_TIME
val maxRetryCount = calculateMaxRetry
var maxTimes = maxRetryCount
breakable {
while (nodes.length < requiredExecutors && maxTimes > 0) {
Thread.sleep(threadSleepTime);
nodes = DistributionUtil.getNodeList(sparkContext)
maxTimes = maxTimes - 1;
val resourceRatio = (nodes.length.toDouble / requiredExecutors)
if (resourceRatio.compareTo(minRegisteredResourceRatio) >= 0) {
break
}
}
}
val timDiff = System.currentTimeMillis() - startTime
LOGGER.info(s"Total Time taken to ensure the required executors : $timDiff")
LOGGER.info(s"Time elapsed to allocate the required executors: " +
s"${(maxRetryCount - maxTimes) * threadSleepTime}")
nodes.distinct.toSeq
}
/**
* Requesting the extra executors other than the existing ones.
*
* @param sc sparkContext
* @param requiredExecutors required number of executors to be requested
* @param localityAwareTasks The number of pending tasks which is locality required
* @param hostToLocalTaskCount A map to store hostname with its possible task number running on it
* @return
*/
def ensureExecutors(sc: SparkContext,
requiredExecutors: Int,
localityAwareTasks: Int = 0,
hostToLocalTaskCount: Map[String, Int] = Map.empty): Boolean = {
sc.schedulerBackend match {
case b: CoarseGrainedSchedulerBackend =>
if (requiredExecutors > 0) {
LOGGER.info(s"Requesting total executors: $requiredExecutors")
b.requestTotalExecutors(requiredExecutors, localityAwareTasks, hostToLocalTaskCount)
}
true
case _ =>
false
}
}
/**
* This method will calculate how many times a loop will run with an interval of given sleep
* time to wait for requested executors to come up
*
* @return The max retry count
*/
def calculateMaxRetry(): Int = {
val remainder = dynamicAllocationSchTimeOut % CarbonCommonConstants
.CARBON_DYNAMIC_ALLOCATION_SCHEDULER_THREAD_SLEEP_TIME
val retryCount: Int = dynamicAllocationSchTimeOut / CarbonCommonConstants
.CARBON_DYNAMIC_ALLOCATION_SCHEDULER_THREAD_SLEEP_TIME
if (remainder > 0) {
retryCount + 1
} else {
retryCount
}
}
}
|
manishgupta88/carbondata
|
integration/spark-common/src/main/scala/org/apache/spark/sql/hive/DistributionUtil.scala
|
Scala
|
apache-2.0
| 11,268 |
package akka.persistence.jdbc.state
import scala.concurrent.Future
import akka.actor.ActorSystem
import akka.Done
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.matchers.should.Matchers
object ScaladslSnippets extends ScalaFutures with Matchers {
def create(): Unit = {
// #create
import akka.persistence.jdbc.testkit.scaladsl.SchemaUtils
implicit val system: ActorSystem = ActorSystem("example")
val _: Future[Done] = SchemaUtils.createIfNotExists()
// #create
}
def durableStatePlugin(): Unit = {
implicit val system: ActorSystem = ActorSystem()
// #jdbc-durable-state-store
import akka.persistence.state.DurableStateStoreRegistry
import akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore
val store = DurableStateStoreRegistry
.get(system)
.durableStateStoreFor[JdbcDurableStateStore[String]](JdbcDurableStateStore.Identifier)
// #jdbc-durable-state-store
}
def getObject(): Unit = {
implicit val system: ActorSystem = ActorSystem()
// #get-object
import akka.persistence.state.DurableStateStoreRegistry
import akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore
import akka.persistence.state.scaladsl.GetObjectResult
val store = DurableStateStoreRegistry
.get(system)
.durableStateStoreFor[JdbcDurableStateStore[String]](JdbcDurableStateStore.Identifier)
val futureResult: Future[GetObjectResult[String]] = store.getObject("InvalidPersistenceId")
futureResult.futureValue.value shouldBe None
// #get-object
}
def upsertAndGetObject(): Unit = {
implicit val system: ActorSystem = ActorSystem()
implicit val e = system.dispatcher
// #upsert-get-object
import akka.persistence.state.DurableStateStoreRegistry
import akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore
import akka.persistence.state.scaladsl.GetObjectResult
val store = DurableStateStoreRegistry
.get(system)
.durableStateStoreFor[JdbcDurableStateStore[String]](JdbcDurableStateStore.Identifier)
val v: Future[GetObjectResult[String]] =
for {
n <- store.upsertObject("p234", 1, "a valid string", "t123")
_ = n shouldBe akka.Done
g <- store.getObject("p234")
_ = g.value shouldBe Some("a valid string")
u <- store.upsertObject("p234", 2, "updated valid string", "t123")
_ = u shouldBe akka.Done
h <- store.getObject("p234")
} yield h
v.futureValue.value shouldBe Some("updated valid string")
// #upsert-get-object
}
def deleteObject(): Unit = {
implicit val system: ActorSystem = ActorSystem()
// #delete-object
import akka.persistence.state.DurableStateStoreRegistry
import akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore
val store = DurableStateStoreRegistry
.get(system)
.durableStateStoreFor[JdbcDurableStateStore[String]](JdbcDurableStateStore.Identifier)
store.deleteObject("p123").futureValue shouldBe Done
store.getObject("p123").futureValue.value shouldBe None
// #delete-object
}
def currentChanges(): Unit = {
implicit val system: ActorSystem = ActorSystem()
// #current-changes
import akka.NotUsed
import akka.stream.scaladsl.Source
import akka.persistence.state.DurableStateStoreRegistry
import akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore
import akka.persistence.query.{ DurableStateChange, NoOffset }
val store = DurableStateStoreRegistry
.get(system)
.durableStateStoreFor[JdbcDurableStateStore[String]](JdbcDurableStateStore.Identifier)
val willCompleteTheStream: Source[DurableStateChange[String], NotUsed] =
store.currentChanges("tag-1", NoOffset)
// #current-changes
}
def changes(): Unit = {
implicit val system: ActorSystem = ActorSystem()
// #changes
import akka.NotUsed
import akka.stream.scaladsl.Source
import akka.persistence.state.DurableStateStoreRegistry
import akka.persistence.jdbc.state.scaladsl.JdbcDurableStateStore
import akka.persistence.query.{ DurableStateChange, NoOffset }
val store = DurableStateStoreRegistry
.get(system)
.durableStateStoreFor[JdbcDurableStateStore[String]](JdbcDurableStateStore.Identifier)
val willNotCompleteTheStream: Source[DurableStateChange[String], NotUsed] =
store.changes("tag-1", NoOffset)
// #changes
}
}
|
dnvriend/akka-persistence-jdbc
|
core/src/test/scala/akka/persistence/jdbc/state/ScaladslSnippets.scala
|
Scala
|
apache-2.0
| 4,446 |
package de.hpi.asg.breezetestgen
import akka.actor.Props
import de.hpi.asg.breezetestgen.actors.Simulator
import Simulator.RunTest
import de.hpi.asg.breezetestgen.testing.TestRunner.{TestFailed, TestResult, TestSucceeded}
import fixtures.gcdNetlist
/** tests a complete simulation run: providing it to a Simulator(-Actor) and run it */
class SimulationSpec extends baseclasses.AkkaIntegrationSpec("SimulationSpec") {
val netlist = gcdNetlist()
val workingTest = fixtures.GCDTest(12, 8, 4)
val workingTest2 = fixtures.GCDTest(4, 8, 4)
val brokenTest = fixtures.GCDTest(12, 8, 3)
def newSimulator() = system.actorOf(Props(classOf[Simulator], netlist))
"The simulator" should "answer with a result" in {
val simulator = newSimulator()
simulator ! RunTest(workingTest)
expectMsgType[TestResult]
}
it should "answer with TestSucceeded for working tests" in {
val simulator = newSimulator()
simulator ! RunTest(workingTest)
expectMsg(TestSucceeded)
}
it should "answer with TestFailed for broken tests" in {
val simulator = newSimulator()
simulator ! RunTest(brokenTest)
expectMsgClass(classOf[TestFailed])
}
it should "be able to run multiple tests" in {
val simulator = newSimulator()
simulator ! RunTest(workingTest)
expectMsg(TestSucceeded)
simulator ! RunTest(workingTest2)
expectMsg(TestSucceeded)
simulator ! RunTest(brokenTest)
expectMsgClass(classOf[TestFailed])
}
}
|
0x203/BreezeTestGen
|
src/test/scala/de/hpi/asg/breezetestgen/SimulationSpec.scala
|
Scala
|
mit
| 1,467 |
package jp.co.bizreach.elasticsearch4s
import org.elasticsearch.action.search.SearchRequestBuilder
import ESClient._
import org.slf4j.LoggerFactory
import org.elasticsearch.client.support.AbstractClient
import scala.reflect.ClassTag
import scala.annotation.tailrec
import com.ning.http.client.{AsyncHttpClient, AsyncHttpClientConfig}
/**
* Helper for accessing to Elasticsearch.
*/
object ESClient {
private val logger = LoggerFactory.getLogger(classOf[ESClient])
private var httpClient: AsyncHttpClient = null
/**
* This is the entry point of processing using ElasticSearch.
* Give ESConfig and your function which takes ESSearchHelper as an argument.
*/
def using[T](url: String)(f: ESClient => T): T = {
val httpClient = new AsyncHttpClient()
val client = new ESClient(httpClient, url)
try {
f(client)
} finally {
httpClient.close()
}
}
/**
* Initialize AsyncHttpClient. ESClient is available by calling this method.
*/
def init(): Unit = {
httpClient = HttpUtils.createHttpClient()
}
/**
* Return ESClient instance.
*/
def apply(url: String): ESClient = {
if(httpClient == null){
throw new IllegalStateException("ESClient has not been initialized.")
}
new ESClient(httpClient, url)
}
/**
* Initialize AsyncHttpClient with given configuration. ESClient is available by calling this method.
*/
def init(config: AsyncHttpClientConfig): Unit = {
httpClient = HttpUtils.createHttpClient(config)
}
/**
* Shutdown AsyncHttpClient. ESClient is disabled by calling this method.
*/
def shutdown() = {
httpClient.close()
httpClient = null
}
}
class ESClient(httpClient: AsyncHttpClient, url: String) {
private val queryClient = new QueryBuilderClient()
def insertJson(config: ESConfig, json: String): Either[Map[String, Any], Map[String, Any]] = {
logger.debug(s"insertJson:\\n${json}")
logger.debug(s"insertRequest:\\n${json}")
val resultJson = HttpUtils.post(httpClient, config.url(url), json)
val map = JsonUtils.deserialize[Map[String, Any]](resultJson)
map.get("error").map { case message: String => Left(map) }.getOrElse(Right(map))
}
def insert(config: ESConfig, entity: AnyRef): Either[Map[String, Any], Map[String, Any]] = {
insertJson(config, JsonUtils.serialize(entity))
}
def updateJson(config: ESConfig, id: String, json: String): Either[Map[String, Any], Map[String, Any]] = {
logger.debug(s"updateJson:\\n${json}")
logger.debug(s"updateRequest:\\n${json}")
val resultJson = HttpUtils.put(httpClient, config.url(url) + "/" + id, json)
val map = JsonUtils.deserialize[Map[String, Any]](resultJson)
map.get("error").map { case message: String => Left(map) }.getOrElse(Right(map))
}
def update(config: ESConfig, id: String, entity: AnyRef): Either[Map[String, Any], Map[String, Any]] = {
updateJson(config, id, JsonUtils.serialize(entity))
}
def delete(config: ESConfig, id: String): Either[Map[String, Any], Map[String, Any]] = {
logger.debug(s"delete id:\\n${id}")
val resultJson = HttpUtils.delete(httpClient, config.url(url) + "/" + id)
val map = JsonUtils.deserialize[Map[String, Any]](resultJson)
map.get("error").map { case message: String => Left(map) }.getOrElse(Right(map))
}
def deleteByQuery(config: ESConfig)(f: SearchRequestBuilder => Unit): Either[Map[String, Any], Map[String, Any]] = {
logger.debug("******** ESConfig:" + config.toString)
val searcher = queryClient.prepareSearch(config.indexName)
config.typeName.foreach(x => searcher.setTypes(x))
f(searcher)
logger.debug(s"deleteByQuery:${searcher.toString}")
val resultJson = HttpUtils.delete(httpClient, config.url(url) + "/_query", searcher.toString)
val map = JsonUtils.deserialize[Map[String, Any]](resultJson)
map.get("error").map { case message: String => Left(map) }.getOrElse(Right(map))
}
def count(config: ESConfig)(f: SearchRequestBuilder => Unit): Either[Map[String, Any], Map[String, Any]] = {
logger.debug("******** ESConfig:" + config.toString)
val searcher = queryClient.prepareSearch(config.indexName)
config.typeName.foreach(x => searcher.setTypes(x))
f(searcher)
logger.debug(s"countRequest:${searcher.toString}")
val resultJson = HttpUtils.post(httpClient, config.preferenceUrl(url, "_count"), searcher.toString)
val map = JsonUtils.deserialize[Map[String, Any]](resultJson)
map.get("error").map { case message: String => Left(map) }.getOrElse(Right(map))
}
def countAsInt(config: ESConfig)(f: SearchRequestBuilder => Unit): Int = {
count(config)(f) match {
case Left(x) => throw new RuntimeException(x("error").toString)
case Right(x) => x("count").asInstanceOf[Int]
}
}
def search(config: ESConfig)(f: SearchRequestBuilder => Unit): Either[Map[String, Any], Map[String, Any]] = {
logger.debug("******** ESConfig:" + config.toString)
val searcher = queryClient.prepareSearch(config.indexName)
config.typeName.foreach(x => searcher.setTypes(x))
f(searcher)
logger.debug(s"searchRequest:${searcher.toString}")
val resultJson = HttpUtils.post(httpClient, config.preferenceUrl(url, "_search"), searcher.toString)
val map = JsonUtils.deserialize[Map[String, Any]](resultJson)
map.get("error").map { case message: String => Left(map) }.getOrElse(Right(map))
}
def searchAll(config: ESConfig)(f: SearchRequestBuilder => Unit): Either[Map[String, Any], Map[String, Any]] = {
count(config)(f) match {
case Left(x) => Left(x)
case Right(x) => {
val total = x("count").asInstanceOf[Int]
search(config) { searcher =>
f(searcher)
searcher.setFrom(0)
searcher.setSize(total)
}
}
}
}
def searchByTemplate(config: ESConfig)(lang: String, template: String, params: AnyRef, options: Option[String] = None): Either[Map[String, Any], Map[String, Any]] = {
logger.debug("******** ESConfig:" + config.toString)
val json = JsonUtils.serialize(
Map(
"lang" -> lang,
"template" -> Map(
"file" -> template
),
"params" -> params
)
)
logger.debug(s"searchRequest:${json}")
val resultJson = HttpUtils.post(httpClient, config.urlWithParameters(url, "_search/template" + options.getOrElse("")), json)
val map = JsonUtils.deserialize[Map[String, Any]](resultJson)
map.get("error").map { case message: String => Left(map) }.getOrElse(Right(map))
}
def find[T](config: ESConfig)(f: SearchRequestBuilder => Unit)(implicit c: ClassTag[T]): Option[(String, T)] = {
search(config)(f) match {
case Left(x) => throw new RuntimeException(x("error").toString)
case Right(x) => {
val hits = x("hits").asInstanceOf[Map[String, Any]]("hits").asInstanceOf[Seq[Map[String, Any]]]
if(hits.length == 0){
None
} else {
Some((hits.head("_id").toString, JsonUtils.deserialize[T](JsonUtils.serialize(getDocumentMap(hits.head)))))
}
}
}
}
def findAsList[T](config: ESConfig)(f: SearchRequestBuilder => Unit)(implicit c: ClassTag[T]): List[(String, T)] = {
search(config)(f) match {
case Left(x) => throw new RuntimeException(x("error").toString)
case Right(x) => createESSearchResult(x).list.map { x => (x.id, x.doc) }
}
}
def findAllAsList[T](config: ESConfig)(f: SearchRequestBuilder => Unit)(implicit c: ClassTag[T]): List[(String, T)] = {
findAsList(config){ searcher =>
f(searcher)
searcher.setFrom(0)
searcher.setSize(countAsInt(config)(f))
}
}
def list[T](config: ESConfig)(f: SearchRequestBuilder => Unit)(implicit c: ClassTag[T]): ESSearchResult[T] = {
search(config)(f) match {
case Left(x) => throw new RuntimeException(x("error").toString)
case Right(x) => createESSearchResult(x)
}
}
def listAll[T](config: ESConfig)(f: SearchRequestBuilder => Unit)(implicit c: ClassTag[T]): ESSearchResult[T] = {
list(config){ searcher =>
f(searcher)
searcher.setFrom(0)
searcher.setSize(countAsInt(config)(f))
}
}
def listByTemplate[T](config: ESConfig)(lang: String, template: String, params: AnyRef)(implicit c: ClassTag[T]): ESSearchResult[T] = {
searchByTemplate(config)(lang, template, params) match {
case Left(x) => throw new RuntimeException(x("error").toString)
case Right(x) => createESSearchResult(x)
}
}
def countByTemplate(config: ESConfig)(lang: String, template: String, params: AnyRef): Either[Map[String, Any], Map[String, Any]] = {
searchByTemplate(config)(lang, template, params, Some("?search_type=count"))
}
def countByTemplateAsInt(config: ESConfig)(lang: String, template: String, params: AnyRef): Int = {
countByTemplate(config)(lang: String, template: String, params: AnyRef) match {
case Left(x) => throw new RuntimeException(x("error").toString)
case Right(x) => x("hits").asInstanceOf[Map[String, Any]]("total").asInstanceOf[Int]
}
}
def refresh(config: ESConfig)(): Either[Map[String, Any], Map[String, Any]] = {
val resultJson = HttpUtils.post(httpClient, s"${url}/${config.indexName}/_refresh", "")
val map = JsonUtils.deserialize[Map[String, Any]](resultJson)
map.get("error").map { case message: String => Left(map) }.getOrElse(Right(map))
}
def scroll[T, R](config: ESConfig)(f: SearchRequestBuilder => Unit)(p: (String, T) => R)(implicit c1: ClassTag[T], c2: ClassTag[R]): Stream[R] = {
@tailrec
def scroll0[R](init: Boolean, searchUrl: String, body: String, stream: Stream[R], invoker: (String, Map[String, Any]) => R): Stream[R] = {
val resultJson = HttpUtils.post(httpClient, searchUrl + "?scroll=5m" + (if(init) "&search_type=scan" else ""), body)
val map = JsonUtils.deserialize[Map[String, Any]](resultJson)
if(map.get("error").isDefined){
throw new RuntimeException(map("error").toString)
} else {
val scrollId = map("_scroll_id").toString
val list = map("hits").asInstanceOf[Map[String, Any]]("hits").asInstanceOf[List[Map[String, Any]]]
list match {
case Nil if init == false => stream
case Nil => scroll0(false, s"${url}/_search/scroll", scrollId, stream, invoker)
case list => scroll0(false, s"${url}/_search/scroll", scrollId, list.map { map => invoker(map("_id").toString, getDocumentMap(map)) }.toStream #::: stream, invoker)
}
}
}
logger.debug("******** ESConfig:" + config.toString)
val searcher = queryClient.prepareSearch(config.indexName)
config.typeName.foreach(x => searcher.setTypes(x))
f(searcher)
logger.debug(s"searchRequest:${searcher.toString}")
scroll0(true, config.url(url) + "/_search", searcher.toString, Stream.empty,
(_id: String, map: Map[String, Any]) => p(_id, JsonUtils.deserialize[T](JsonUtils.serialize(map))))
}
def scrollChunk[T, R](config: ESConfig)(f: SearchRequestBuilder => Unit)(p: (Seq[(String, T)]) => R)(implicit c1: ClassTag[T], c2: ClassTag[R]): Stream[R] = {
@tailrec
def scroll0[R](init: Boolean, searchUrl: String, body: String, stream: Stream[R], invoker: (Seq[(String, Map[String, Any])]) => R): Stream[R] = {
val resultJson = HttpUtils.post(httpClient, searchUrl + "?scroll=5m" + (if(init) "&search_type=scan" else ""), body)
val map = JsonUtils.deserialize[Map[String, Any]](resultJson)
if(map.get("error").isDefined){
throw new RuntimeException(map("error").toString)
} else {
val scrollId = map("_scroll_id").toString
val list = map("hits").asInstanceOf[Map[String, Any]]("hits").asInstanceOf[List[Map[String, Any]]]
list match {
case Nil if init == false => stream
case Nil => scroll0(false, s"${url}/_search/scroll", scrollId, stream, invoker)
case list => scroll0(false, s"${url}/_search/scroll", scrollId, Seq(invoker(list.map { map => (map("_id").toString, getDocumentMap(map)) })).toStream #::: stream, invoker)
}
}
}
logger.debug("******** ESConfig:" + config.toString)
val searcher = queryClient.prepareSearch(config.indexName)
config.typeName.foreach(x => searcher.setTypes(x))
f(searcher)
logger.debug(s"searchRequest:${searcher.toString}")
scroll0(true, config.url(url) + "/_search", searcher.toString, Stream.empty,
(maps: Seq[(String, Map[String, Any])]) => p(maps.map { case (id, map) =>
(id, JsonUtils.deserialize[T](JsonUtils.serialize(map)))
})
)
}
// def scrollAsMap[R](config: ESConfig)(f: SearchRequestBuilder => Unit)(p: Map[String, Any] => R)(implicit c: ClassTag[R]): Stream[R] = {
// logger.debug("******** ESConfig:" + config.toString)
// val searcher = queryClient.prepareSearch(config.indexName)
// config.typeName.foreach(x => searcher.setTypes(x))
// f(searcher)
// logger.debug(s"searchRequest:${searcher.toString}")
//
// scroll0(config.url(url) + "/_search", searcher.toString, Stream.empty, (_id: String, map: Map[String, Any]) => p(map))
// }
def bulk[T](actions: Seq[BulkAction]): Either[Map[String, Any], Map[String, Any]] = {
val resultJson = HttpUtils.post(httpClient, s"${url}/_bulk", actions.map(_.jsonString).mkString("", "\\n", "\\n"))
val map = JsonUtils.deserialize[Map[String, Any]](resultJson)
map.get("error").map { case message: String => Left(map) }.getOrElse(Right(map))
}
private def getDocumentMap(hit: Map[String, Any]): Map[String, Any] = {
hit.get("_source").map(_.asInstanceOf[Map[String, Any]])
.getOrElse(structuredMap(hit("fields").asInstanceOf[Map[String, Any]]))
}
private def createESSearchResult[T](x: Map[String, Any])(implicit c: ClassTag[T]): ESSearchResult[T] = {
val total = x("hits").asInstanceOf[Map[String, Any]]("total").asInstanceOf[Int]
val took = x("took").asInstanceOf[Int]
val hits = x("hits").asInstanceOf[Map[String, Any]]("hits").asInstanceOf[Seq[Map[String, Any]]]
ESSearchResult(
total,
took,
hits.map { hit =>
ESSearchResultItem(
hit("_id").asInstanceOf[String],
hit("_score").asInstanceOf[Double],
JsonUtils.deserialize[T](JsonUtils.serialize(getDocumentMap(hit))),
hit.get("highlight").asInstanceOf[Option[Map[String, List[String]]]].getOrElse(Map.empty),
hit.get("_explanation").asInstanceOf[Option[Map[String, Any]]].getOrElse(Map.empty)
)
}.toList,
x.get("facets").asInstanceOf[Option[Map[String, Map[String, Any]]]].getOrElse(Map.empty),
x.get("aggregations").asInstanceOf[Option[Map[String, Any]]].getOrElse(Map.empty),
x
)
}
private def structuredMap(map: Map[String, Any]): Map[String, Any] = {
def structuredMap0(group: List[(List[String], Any)]): Any = {
group.groupBy { case (key, value) => key.head }.map { case (key, value) =>
key -> (if(value.head._1.length == 1){
value.head._2
} else {
structuredMap0(value.map { case (key, value) => key.tail -> value })
})
}
}
val list = map.map { case (key, value) => key.split("\\\\.").toList -> value }.toList
structuredMap0(list).asInstanceOf[Map[String, Any]]
}
}
|
saito400/elastic-scala-httpclient
|
elastic-scala-httpclient/src/main/scala/jp/co/bizreach/elasticsearch4s/ESClient.scala
|
Scala
|
apache-2.0
| 15,350 |
package org.jetbrains.plugins.scala
package codeInspection.booleans
import com.intellij.codeInspection.{ProblemHighlightType, ProblemsHolder}
import com.intellij.openapi.project.Project
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.codeInspection.{AbstractFixOnPsiElement, AbstractInspection}
import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScExpression, ScInfixExpr, ScParenthesisedExpr, ScPrefixExpr}
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory.createExpressionFromText
import scala.annotation.tailrec
import scala.collection.mutable
/**
* Nikolay.Tropin
* 4/23/13
*/
class DoubleNegationInspection extends AbstractInspection("DoubleNegation", "Double negation"){
def actionFor(holder: ProblemsHolder): PartialFunction[PsiElement, Any] = {
case expr: ScExpression if DoubleNegationUtil.hasDoubleNegation(expr) =>
holder.registerProblem(expr, "Double negation", ProblemHighlightType.GENERIC_ERROR_OR_WARNING, new DoubleNegationQuickFix(expr))
case _ =>
}
}
class DoubleNegationQuickFix(expr: ScExpression)
extends AbstractFixOnPsiElement("Remove double negation", expr){
def doApplyFix(project: Project) {
val scExpr = getElement
if (!scExpr.isValid || !DoubleNegationUtil.hasDoubleNegation(scExpr)) return
val newExpr = DoubleNegationUtil.removeDoubleNegation(scExpr)
scExpr.replaceExpression(newExpr, removeParenthesis = true)
}
}
object DoubleNegationUtil {
def hasDoubleNegation(expr: ScExpression): Boolean = {
if (hasNegation(expr))
expr match {
case ScPrefixExpr(_, operand) => hasNegation(operand)
case ScInfixExpr(left, _, right) => hasNegation(left) || hasNegation(right)
case _ => false
}
else
expr match {
case ScInfixExpr(left, operation, right) => operation.refName == "==" && hasNegation(left) && hasNegation(right)
case _ => false
}
}
def removeDoubleNegation(expr: ScExpression): ScExpression = {
val text: String = stripParentheses(expr) match {
case ScPrefixExpr(_, operand) => invertedNegationText(operand)
case infix @ ScInfixExpr(left, _, right) =>
val hasNegLeft = hasNegation(left)
val hasNegRight = hasNegation(right)
val hasNegInfix = hasNegation(infix)
val builder = new mutable.StringBuilder()
builder.append(if (hasNegLeft) invertedNegationText(left) else left.getText)
builder.append(if (hasNegLeft && hasNegInfix && hasNegRight) " != " else " == ")
builder.append(if (hasNegRight) invertedNegationText(right) else right.getText)
builder.toString()
}
createExpressionFromText(text)(expr.getManager)
}
@tailrec
private def stripParentheses(expr: ScExpression): ScExpression = expr match {
case ScParenthesisedExpr(inner) => stripParentheses(inner)
case expr: ScExpression => expr
}
private def hasNegation(expr: ScExpression): Boolean = {
val withoutParentheses = stripParentheses(expr)
withoutParentheses match {
case ScPrefixExpr(operation, _) => operation.refName == "!"
case ScInfixExpr(_, operation, _) => operation.refName == "!="
case _ => false
}
}
private def invertedNegationText(expr: ScExpression): String = {
require(hasNegation(expr))
val withoutParentheses = stripParentheses(expr)
withoutParentheses match {
case ScPrefixExpr(_, operand) => operand.getText
case ScInfixExpr(left, _, right) => left.getText + "==" + right.getText
}
}
}
|
ilinum/intellij-scala
|
src/org/jetbrains/plugins/scala/codeInspection/booleans/DoubleNegationInspection.scala
|
Scala
|
apache-2.0
| 3,550 |
package com.dwolla.cloudflare.domain.dto.accounts
import io.circe.Codec
import io.circe.generic.semiauto.deriveCodec
case class NewAccountMemberDTO (
email: String,
roles: Seq[String],
status: Option[String] = None
)
object NewAccountMemberDTO {
implicit val newAccountMemberDTOCodec: Codec[NewAccountMemberDTO] = deriveCodec
}
|
Dwolla/scala-cloudflare
|
dto/src/main/scala/com/dwolla/cloudflare/domain/dto/accounts/NewAccountMemberDTO.scala
|
Scala
|
mit
| 339 |
package ps.tricerato.pureimage.test
import org.specs2.mutable._
import ps.tricerato.pureimage._
/**
* Created by michael.schiff on 6/12/14.
*/
class Output extends Specification {
"RBG image output" in {
val Right(RGBImage(image)) = Input(zardozJpeg)
Output(image, PNG).length must_!= 0
}
"Gray image output" in {
val Right(RGBImage(image)) = Input(zardozJpeg)
val gray = new Image[Gray] {
def width = image.width
def height = image.height
def apply(x:Int, y:Int):Gray = Gray((image(x,y).red + image(x,y).green + image(x,y).blue) / 3)
}
Output(gray, PNG).length must_!= 0
}
}
|
stephenjudkins/pureimage
|
src/test/scala/ps/tricerato/pureimage/test/Output.scala
|
Scala
|
mit
| 633 |
package com.holmesprocessing.analytics.actors
import java.util.UUID
import scala.concurrent.duration._
import akka.actor.{ Actor, ActorLogging, ActorRef, Props }
import akka.util.Timeout
import com.holmesprocessing.analytics.types.{AnalyticEngineProtocol, GenericAnalyticService}
object Job {
def props(id: UUID, name: String, analyticEngine: ActorRef, analyticService: GenericAnalyticService, servicesPath: String, parameters: Map[String, String]): Props = Props(new Job(id, name, analyticEngine, analyticService, servicesPath, parameters))
}
object JobProtocol {
final case class GetId()
final case class GetName()
final case class GetStatus()
final case class GetResult()
}
class Job(id: UUID, name: String, analyticEngine: ActorRef, analyticService: GenericAnalyticService, servicesPath: String, parameters: Map[String, String]) extends Actor with ActorLogging {
override def preStart(): Unit = log.info("Job " + name + " started")
override def postStop(): Unit = log.info("Job " + name + " stopped")
// no reason to wait, start the job
this.start()
override def receive = {
case msg: JobProtocol.GetId =>
sender() ! this.id
case msg: JobProtocol.GetName =>
sender() ! this.name
case msg: JobProtocol.GetStatus =>
analyticEngine forward AnalyticEngineProtocol.GetStatus()
case msg: JobProtocol.GetResult =>
analyticEngine forward AnalyticEngineProtocol.GetResult()
case x => log.warning("Received unknown message: {}", x)
}
def start(): Unit = {
implicit val timeout: Timeout = 10.minutes
//TODO: catch build errors here
analyticService.build(servicesPath)
val objPath = analyticService.getObjPath(servicesPath)
analyticEngine ! AnalyticEngineProtocol.Execute(objPath)
}
def stop(): Unit = {
analyticEngine ! AnalyticEngineProtocol.Stop
}
}
|
cynexit/Holmes-Analytics
|
src/main/scala/com/holmesprocessing/analytics/actors/Job.scala
|
Scala
|
apache-2.0
| 1,812 |
package pl.newicom.dddd.monitoring
import akka.actor.Actor
import pl.newicom.dddd.aggregate.BusinessEntity
import pl.newicom.dddd.messaging.event.EventSourceProvider
import pl.newicom.dddd.monitoring.Stage._
trait ReceptorMonitoring extends EventSourceProvider with TraceContextSupport {
this: Actor =>
override abstract def eventSource(es: EventStore, observable: BusinessEntity, fromPosExcl: Option[Long]): EventSource =
super.eventSource(es, observable, fromPosExcl) map {
entry =>
/**
* Record elapsed time since the event was persisted in the event store
*/
def recordCreationToReceptionPeriod() =
newTraceContext(
name = Reception_Of_Event.traceContextName(observable, entry.msg),
startedOnMillis = entry.created.get.getMillis
).foreach(
_.finish()
)
recordCreationToReceptionPeriod()
entry.copy(msg = entry.msg.withMetaAttribute(Reaction_On_Event.shortName, System.nanoTime()))
}
}
|
AndreyLadniy/akka-ddd
|
akka-ddd-monitoring/src/main/scala/pl/newicom/dddd/monitoring/ReceptorMonitoring.scala
|
Scala
|
mit
| 1,041 |
package im.actor.bots
import derive.key
import upickle.Js
import upickle.default._
object BotMessages {
sealed trait BotMessage
sealed trait BotMessageIn extends BotMessage
sealed trait BotMessageOut extends BotMessage
object Services {
val KeyValue = "keyvalue"
val Messaging = "messaging"
}
final case class FileLocation(
fileId: Long,
accessHash: Long
)
final case class AvatarImage(
fileLocation: FileLocation,
width: Int,
height: Int,
fileSize: Int
)
final case class Avatar(
smallImage: Option[AvatarImage],
largeImage: Option[AvatarImage],
fullImage: Option[AvatarImage]
)
final case class User(
id: Int,
accessHash: Long,
name: String,
sex: Option[Int],
about: Option[String],
avatar: Option[Avatar],
username: Option[String],
isBot: Option[Boolean]
) {
def isMale = sex.contains(1)
def isFemale = sex.contains(2)
def isABot = isBot.contains(true)
}
final case class GroupMember(
userId: Int,
inviterUserId: Int,
memberSince: Long,
isAdmin: Option[Boolean]
)
final case class Group(
id: Int,
accessHash: Long,
title: String,
about: Option[String],
avatar: Option[Avatar],
isMember: Boolean,
creatorUserId: Int,
members: Seq[GroupMember]
)
final object OutPeer {
def privat(id: Int, accessHash: Long) = OutPeer(1, id, accessHash)
def user(id: Int, accessHash: Long) = privat(id, accessHash)
def group(id: Int, accessHash: Long) = OutPeer(2, id, accessHash)
}
final case class OutPeer(
`type`: Int,
id: Int,
accessHash: Long
) {
final def isPrivate = `type` == 1
final def isUser = isPrivate
final def isGroup = `type` == 2
}
final case class UserOutPeer(
id: Int,
accessHash: Long
) {
val asOutPeer = OutPeer(1, id, accessHash)
}
final case class Peer(
`type`: Int,
id: Int
)
sealed trait RequestBody {
type Response <: ResponseBody
val service: String
def readResponse(obj: Js.Obj): Response
}
trait ResponseBody
@key("Request")
final case class BotRequest(
id: Long,
service: String,
body: RequestBody
) extends BotMessageIn
@key("Response")
final case class BotResponse(
id: Long,
body: BotResponseBody
) extends BotMessageOut
sealed trait BotResponseBody
sealed trait BotUpdate extends BotMessageOut {
val seq: Int
val body: UpdateBody
}
sealed trait UpdateBody
@key("SeqUpdate")
final case class BotSeqUpdate(
seq: Int,
body: UpdateBody
) extends BotUpdate
@key("FatSeqUpdate")
final case class BotFatSeqUpdate(
seq: Int,
body: UpdateBody,
users: Map[Int, User],
groups: Map[Int, Group]
) extends BotUpdate
@key("Error")
case class BotError(code: Int, tag: String, data: Js.Obj, retryIn: Option[Int]) extends RuntimeException with BotResponseBody
@key("Success")
case class BotSuccess(obj: Js.Obj) extends BotResponseBody
implicit val objWriter = Writer[Js.Obj] {
case obj => obj
}
implicit val objReader = Reader[Js.Obj] {
case obj: Js.Obj => obj
}
implicit val botSuccessWriter = upickle.default.Writer[BotSuccess] {
case BotSuccess(obj) => obj
}
implicit val botSuccessReader = upickle.default.Reader[BotSuccess] {
case obj: Js.Obj => BotSuccess(obj)
}
implicit val botErrorWriter = upickle.default.Writer[BotError] {
case BotError(code, tag, data, retryInOpt) =>
Js.Obj(
"code" -> Js.Num(code.toDouble),
"tag" -> Js.Str(tag),
"data" -> data,
"retryIn" -> retryInOpt.map(n => Js.Num(n.toDouble)).getOrElse(Js.Null)
)
}
final case class Container[T](value: T) extends ResponseBody
trait Void extends ResponseBody
final case object Void extends Void
implicit val voidReader = upickle.default.Reader[Void] {
case Js.Obj() => Void
}
implicit val voidWriter = upickle.default.Writer[Void] {
case _ => Js.Obj()
}
@key("SendMessage")
final case class SendTextMessage(
peer: OutPeer,
randomId: Long,
text: String
) extends RequestBody {
override type Response = MessageSent
override val service = Services.Messaging
override def readResponse(obj: Js.Obj) = readJs[MessageSent](obj)
}
@key("SetValue")
final case class SetValue(
keyspace: String,
key: String,
value: String
) extends RequestBody {
override type Response = Void
override val service = Services.KeyValue
override def readResponse(obj: Js.Obj) = readJs[Void](obj)
}
@key("GetValue")
final case class GetValue(
keyspace: String,
key: String
) extends RequestBody {
override type Response = Container[Option[String]]
override val service = Services.KeyValue
override def readResponse(obj: Js.Obj) = readJs[Container[Option[String]]](obj)
}
@key("DeleteValue")
final case class DeleteValue(
keyspace: String,
key: String
) extends RequestBody {
override type Response = Void
override val service = Services.KeyValue
override def readResponse(obj: Js.Obj) = readJs[Void](obj)
}
@key("GetKeys")
final case class GetKeys(keyspace: String) extends RequestBody {
override type Response = Container[Seq[String]]
override val service = Services.KeyValue
override def readResponse(obj: Js.Obj) = readJs[Container[Seq[String]]](obj)
}
final case class MessageSent(date: Long) extends ResponseBody
@key("TextMessage")
final case class TextMessage(
peer: OutPeer,
sender: UserOutPeer,
date: Long,
randomId: Long,
text: String
) extends UpdateBody
}
|
lzpfmh/actor-platform
|
actor-server/actor-bots-shared/src/main/scala/im/actor/bots/BotMessages.scala
|
Scala
|
mit
| 7,775 |
package builder.api_json
import core.{Importer, TypeValidator, TypesProvider, TypesProviderEnum, TypesProviderField, TypesProviderModel, TypesProviderUnion}
import lib.{DatatypeResolver, Kind}
private[api_json] case class InternalServiceFormTypesProvider(internal: InternalServiceForm) extends TypesProvider {
override def enums = internal.enums.map { enum =>
TypesProviderEnum(
namespace = internal.namespace.getOrElse(""),
name = enum.name,
plural = enum.plural,
values = enum.values.flatMap(_.name)
)
}
override def unions = internal.unions.map { u =>
TypesProviderUnion(
namespace = internal.namespace.getOrElse(""),
name = u.name,
plural = u.plural,
types = u.types.flatMap(_.datatype).map(_.name).map { core.TypesProviderUnionType(_) }
)
}
override def models = internal.models.map { m =>
TypesProviderModel(
namespace = internal.namespace.getOrElse(""),
name = m.name,
plural = m.plural,
fields = m.fields.filter(!_.name.isEmpty).filter(!_.datatype.isEmpty) map { f =>
TypesProviderField(
name = f.name.get,
`type` = f.datatype.get.label
)
}
)
}
}
/**
* Takes an internal service form and recursively builds up a type
* provider for all enums and all models specified in the service or
* in any of the imports. Takes care to avoid importing the same
* service multiple times (based on uniqueness of the import URIs)
*/
private[api_json] case class RecursiveTypesProvider(
internal: InternalServiceForm
) extends TypesProvider {
override def enums = providers.map(_.enums).flatten
override def unions = providers.map(_.unions).flatten
override def models = providers.map(_.models).flatten
private lazy val providers = Seq(InternalServiceFormTypesProvider(internal)) ++ resolve(internal.imports.flatMap(_.uri))
private def resolve(
importUris: Seq[String],
imported: Set[String] = Set.empty
): Seq[TypesProvider] = {
importUris.headOption match {
case None => Seq.empty
case Some(uri) => {
if (imported.contains(uri.toLowerCase.trim)) {
// already imported
resolve(importUris.drop(1), imported)
} else {
val importer = Importer(internal.fetcher, uri)
importer.validate match {
case Nil => {
Seq(TypesProvider.FromService(importer.service)) ++ resolve(importUris.drop(1), imported ++ Set(uri))
}
case errors => {
// There are errors w/ this import - skip it
resolve(importUris.drop(1), imported ++ Set(uri))
}
}
}
}
}
}
}
private[api_json] case class TypeResolver(
defaultNamespace: Option[String],
provider: TypesProvider
) {
private val resolver = DatatypeResolver(
enumNames = provider.enums.map(_.name),
modelNames = provider.models.map(_.name),
unionNames = provider.unions.map(_.name)
)
private lazy val validator = TypeValidator(
defaultNamespace = defaultNamespace,
provider.enums
)
def toType(name: String): Option[Kind] = {
resolver.parse(name)
}
def parseWithError(internal: InternalDatatype): Kind = {
parse(internal).getOrElse {
sys.error(s"Unrecognized datatype[${internal.label}]")
}
}
/**
* Resolves the type name into instances of a first class Type.
*/
def parse(internal: InternalDatatype): Option[Kind] = {
resolver.parse(internal.label)
}
def assertValidDefault(kind: Kind, value: String) {
validate(kind, value) match {
case None => {}
case Some(msg) => sys.error(msg)
}
}
def validate(
kind: Kind,
value: String,
errorPrefix: Option[String] = None
): Option[String] = {
validator.validate(kind, value, errorPrefix)
}
}
|
Seanstoppable/apidoc
|
core/src/main/scala/core/builder/api_json/Types.scala
|
Scala
|
mit
| 3,864 |
package ee.cone.c4gate
import com.typesafe.scalalogging.LazyLogging
import ee.cone.c4actor.{RawQSender, _}
import ee.cone.c4assemble.Single
import ee.cone.c4di.c4
import scala.annotation.tailrec
import scala.collection.immutable.Seq
class TestQRecordImpl(val topic: TxLogName, val value: Array[Byte], val headers: Seq[RawHeader]) extends QRecord
@c4("KafkaLatTestApp") final class TestRootProducerImpl(
rawQSender: RawQSender, toUpdate: ToUpdate,
currentTxLogName: CurrentTxLogName,
) extends Executable with LazyLogging {
def run(): Unit = {
iteration()
}
@tailrec private def iteration(): Unit = {
val updates = Nil //LEvent.update(S_Firstborn(actorName,offset)).toList.map(toUpdate.toUpdate)
val (bytes, headers) = toUpdate.toBytes(updates)
val offset = rawQSender.send(new TestQRecordImpl(currentTxLogName,bytes,headers))
logger.info(s"pushed $offset")
Thread.sleep(1000)
iteration()
}
}
@c4("KafkaLatTestApp") final class TestRootConsumerImpl(consuming: Consuming) extends Executable with LazyLogging {
def run(): Unit = {
consuming.process("0" * OffsetHexSize(), consumer => iteration(consumer))
}
@tailrec private def iteration(consumer: Consumer): Unit = {
val events = consumer.poll()
logger.info(s"poll-ed ${events.size}")
iteration(consumer)
}
}
|
conecenter/c4proto
|
base_examples/src/main/scala/ee/cone/c4gate/KafkaLatTestApp.scala
|
Scala
|
apache-2.0
| 1,326 |
/*
* Copyright 2012-2013 Stephane Godbillon (@sgodbillon)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package play.modules.reactivemongo.json.commands
import play.api.libs.json.{
JsError,
JsNumber,
JsObject,
JsResult,
JsSuccess,
JsValue,
Reads
}
import reactivemongo.api.commands.{ CommandError, UnitBox }
import play.modules.reactivemongo.json.JSONSerializationPack
object CommonImplicits {
implicit object UnitBoxReader extends Reads[UnitBox.type] {
private val Success = JsSuccess(UnitBox)
def reads(doc: JsValue): JsResult[UnitBox.type] = Success
}
}
trait JSONCommandError extends CommandError {
def originalDocument: JsObject
}
case class DefaultJSONCommandError(
code: Option[Int],
errmsg: Option[String],
originalDocument: JsObject) extends JSONCommandError {
override def getMessage = s"CommandError[code=${code.getOrElse("<unknown>")}, errmsg=${errmsg.getOrElse("<unknown>")}, doc: ${originalDocument}]"
}
private[commands] trait DealingWithGenericCommandErrorsReader[A]
extends Reads[A] {
def readResult(doc: JsObject): A
final def reads(json: JsValue): JsResult[A] = json match {
case doc: JsObject => {
if (!(doc \\ "ok").asOpt[JsNumber].exists(_.value.toInt == 1)) {
JsError(new DefaultJSONCommandError(
code = (doc \\ "code").asOpt[Int],
errmsg = (doc \\ "errmsg").asOpt[String],
originalDocument = doc).getMessage())
} else JsSuccess(readResult(doc))
}
case v => JsError(s"Expecting a ReactiveMongo document, found: $v")
}
}
|
duncancrawford/Play-Json-ReactiveMongo
|
src/main/scala/play/modules/reactivemongo/jsoncommands.scala
|
Scala
|
apache-2.0
| 2,079 |
package io.viper.common
import collection.mutable.ListBuffer
import java.io.File
import collection.mutable
import org.clapper.classutil.{ClassFinder, ClassInfo}
import io.viper.common.FileWalker.FileHandler
import java.net.URLClassLoader
class DynamicContainerApp(val port: Int = 80, val path: String = ".") extends MultiHostServerApp(port) {
def loadClass[T](jar: File, name: String): T = {
val child = new URLClassLoader(Array(jar.toURL()), this.getClass().getClassLoader())
Class.forName(name, true, child).newInstance.asInstanceOf[T]
}
println("looking for jars in: " + path)
DynamicLoader.load(path).foreach { info =>
println("loading location: %s, name: %s".format(info.location, info.name))
val runner = loadClass[VirtualServerRunner](info.location, info.name)
println("adding: %s".format(runner.hostname))
route(runner)
}
}
class DynamicContainer(port: Int = 80, path: String = ".") extends MultiHostServer(port) {
def loadClass[T](jar: File, name: String): T = {
val child = new URLClassLoader(Array(jar.toURL()), this.getClass().getClassLoader())
Class.forName(name, true, child).newInstance.asInstanceOf[T]
}
override def run {
DynamicLoader.load(path).foreach { info =>
println("loading location: %s, name: %s".format(info.location, info.name))
val runner = loadClass[VirtualServerRunner](info.location, info.name)
println("adding: %s".format(runner.hostname))
route(runner)
}
super.run
}
}
object DynamicLoader {
def load(path: String): List[ClassInfo] = {
val files = new ListBuffer[File]
FileWalker.enumerateFolders(path, new FileHandler {
def process(file: File) {
if (file.getName.endsWith(".jar")) {
files.append(file)
}
}
})
load(files.toList)
}
def load(jarFiles: List[File]): List[ClassInfo] = {
val finder = ClassFinder(jarFiles)
val classesMap = ClassFinder.classInfoMap(finder.getClasses())
val plugins = ClassFinder.concreteSubclasses("io.viper.common.VirtualServerRunner", classesMap).toList
val filtered = plugins.filter(_.name != "io.viper.common.VirtualServer")
filtered.foreach(println(_))
filtered
}
}
object FileWalker {
def enumerateFolders(startFolder: String, handler: FileWalker.FileHandler) {
val rootDir = new File(startFolder)
if (!rootDir.exists) {
throw new IllegalArgumentException("file does not exist: " + startFolder)
}
val stack = new mutable.Stack[File]
stack.push(rootDir)
while (!stack.isEmpty) {
val curFile = stack.pop()
val subFiles = curFile.listFiles
if (subFiles != null) {
val toPush = List() ++ subFiles.par.flatMap {
file =>
if (file.isDirectory) {
if (handler.skipDir(file)) {
Nil
} else {
List(file)
}
} else {
handler.process(file)
Nil
}
}
toPush.foreach(stack.push)
}
}
}
trait FileHandler {
def skipDir(file: File): Boolean = false
def process(file: File)
}
}
|
briangu/viper.io
|
core/src/main/scala/io/viper/common/DynamicContainerApp.scala
|
Scala
|
apache-2.0
| 3,152 |
package com.crobox.clickhouse.balancing
import akka.http.scaladsl.model.Uri
package object discovery {
// TODO we might want to provide the ability to specify a different port when using the hostnames from the cluster table
case class ConnectionConfig(host: Uri, cluster: String)
}
|
crobox/clickhouse-scala-client
|
client/src/main/scala/com.crobox.clickhouse/balancing/discovery/package.scala
|
Scala
|
lgpl-3.0
| 289 |
/**
* Copyright (C) 2013-2014 Typesafe Inc. <http://www.typesafe.com>
*/
package akka.testkit
import org.scalatest.{ WordSpec, BeforeAndAfterAll }
import org.scalatest.Matchers
import akka.actor.ActorSystem
// @note IMPLEMENT IN SCALA.JS @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
class ImplicitSenderSpec
extends WordSpec with Matchers with BeforeAndAfterAll with TestKitBase with ImplicitSender {
implicit lazy val system = ActorSystem("AkkaCustomSpec")
override def afterAll = system.shutdown
"An ImplicitSender" should {
"have testActor as its self" in {
self should be(testActor)
}
}
}
|
jmnarloch/akka.js
|
akka-js-testkit/js/src/test/scala/akka/testkit/ImplicitSenderSpec.scala
|
Scala
|
bsd-3-clause
| 646 |
package controllers
import play.api.libs.json._
import play.api.mvc._
import lila.api.Context
import lila.app._
import lila.common.{ LilaCookie, HTTPRequest }
import views._
object Lobby extends LilaController {
def home = Open { implicit ctx =>
negotiate(
html = renderHome(Results.Ok).map(_.withHeaders(
CACHE_CONTROL -> "no-cache", PRAGMA -> "no-cache"
)),
api = _ => fuccess {
Ok(Json.obj(
"lobby" -> Json.obj(
"version" -> Env.lobby.history.version)
))
}
)
}
def handleStatus(req: RequestHeader, status: Results.Status): Fu[Result] =
reqToCtx(req) flatMap { ctx => renderHome(status)(ctx) }
def renderHome(status: Results.Status)(implicit ctx: Context): Fu[Result] =
Env.current.preloader(
posts = Env.forum.recent(ctx.me, Env.team.cached.teamIds),
tours = Env.tournament promotable true,
simuls = Env.simul allCreatedFeaturable true
) map (html.lobby.home.apply _).tupled map { template =>
// the session cookie is required for anon lobby filter storage
ctx.req.session.data.contains(LilaCookie.sessionId).fold(
status(template),
status(template) withCookies LilaCookie.makeSessionId(ctx.req)
)
}
def seeks = Open { implicit ctx =>
negotiate(
html = fuccess(NotFound),
api = _ => ctx.me.fold(Env.lobby.seekApi.forAnon)(Env.lobby.seekApi.forUser) map { seeks =>
Ok(JsArray(seeks.map(_.render)))
}
)
}
def socket(apiVersion: Int) = SocketOption[JsValue] { implicit ctx =>
get("sri") ?? { uid =>
Env.lobby.socketHandler(uid = uid, user = ctx.me) map some
}
}
def timeline = Auth { implicit ctx =>
me =>
Env.timeline.entryRepo.userEntries(me.id) map { html.timeline.entries(_) }
}
def timelineMore = Auth { implicit ctx =>
me =>
Env.timeline.entryRepo.moreUserEntries(me.id) map { html.timeline.more(_) }
}
}
|
danilovsergey/i-bur
|
app/controllers/Lobby.scala
|
Scala
|
mit
| 1,971 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.tools.accumulo.commands
import com.beust.jcommander.{JCommander, Parameter, Parameters}
import com.typesafe.scalalogging.LazyLogging
import org.locationtech.geomesa.tools.accumulo.GeoMesaConnectionParams
import org.locationtech.geomesa.tools.accumulo.commands.GetSftCommand._
import org.locationtech.geomesa.tools.common.FeatureTypeNameParam
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
class GetSftCommand(parent: JCommander) extends CommandWithCatalog(parent) with LazyLogging {
override val command = "getsft"
override val params = new GetSftParameters
override def execute() = {
logger.info(s"Getting SFT for feature ${params.featureName} from catalog $catalog")
try {
params.format.toLowerCase match {
case "typesafe" =>
println(SimpleFeatureTypes.toConfigString(ds.getSchema(params.featureName), !params.excludeUserData, params.concise))
case "spec" =>
println(SimpleFeatureTypes.encodeType(ds.getSchema(params.featureName), !params.excludeUserData))
case _ =>
logger.error(s"Unknown config format: ${params.format}")
}
} catch {
case npe: NullPointerException =>
logger.error(s"Error: feature '${params.featureName}' not found. Check arguments...", npe)
case e: Exception =>
logger.error(s"Error describing feature '${params.featureName}':", e)
} finally {
ds.dispose()
}
}
}
object GetSftCommand {
@Parameters(commandDescription = "Get the SimpleFeatureType of a feature")
class GetSftParameters extends GeoMesaConnectionParams with FeatureTypeNameParam {
@Parameter(names = Array("--concise"), description = "Render in concise format", required = false)
var concise: Boolean = false
@Parameter(names = Array("--format"), description = "Formats for sft (comma separated string, allowed values are typesafe, spec)", required = false)
var format: String = "typesafe"
@Parameter(names = Array("--exclude-user-data"), description = "Exclude user data", required = false)
var excludeUserData: Boolean = false
}
}
|
mdzimmerman/geomesa
|
geomesa-tools/src/main/scala/org/locationtech/geomesa/tools/accumulo/commands/GetSftCommand.scala
|
Scala
|
apache-2.0
| 2,597 |
/*
* The MIT License
*
* Copyright (c) 2011 John Svazic
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package net.auxesia
import scala.collection.immutable.List
import scala.math.round
import org.scalatest.WordSpec
import org.scalatest.matchers.MustMatchers
class PopulationSpec extends WordSpec with MustMatchers {
"A Population" should {
"provide a valid value for the crossover ratio" in {
val p1 = Population(1024, 0.8f, 0.1f, 0.05f)
val p2 = Population(1024, 0.0f, 0.1f, 0.05f)
val p3 = Population(1024, 1.0f, 0.1f, 0.05f)
(p1.crossover * 100).intValue must be === 80
(p2.crossover * 100).intValue must be === 0
(p3.crossover * 100).intValue must be === 100
}
"provide a valid value for the elitism ratio" in {
val p1 = Population(1024, 0.8f, 0.1f, 0.05f)
val p2 = Population(1024, 0.8f, 0.0f, 0.05f)
val p3 = Population(1024, 0.8f, 0.99f, 0.05f)
(p1.elitism * 100).intValue must be === 10
(p2.elitism * 100).intValue must be === 0
(p3.elitism * 100).intValue must be === 99
}
"provide a valid value for the mutation ratio" in {
val p1 = Population(1024, 0.8f, 0.1f, 0.05f)
val p2 = Population(1024, 0.8f, 0.1f, 0.0f)
val p3 = Population(1024, 0.8f, 0.1f, 1.0f)
(p1.mutation * 100).intValue must be === 5
(p2.mutation * 100).intValue must be === 0
(p3.mutation * 100).intValue must be === 100
}
"provide a valid initial population" in {
val pop = Population(1024, 0.8f, 0.1f, 0.05f)
pop.population must have length (1024)
val list = (Vector() ++ pop.population).sortWith((s, t) => s.fitness < t.fitness)
list must have length (1024)
list.sameElements(pop.population) must be === true
}
"be able to evlove successfully" in {
val pop = Population(1024, 0.8f, 0.1f, 0.05f)
val list = Vector() ++ pop.population
list must have length (1024)
pop.evolve
pop.population must have length (1024)
(pop.crossover * 100).intValue must be === 80
(pop.elitism * 100).intValue must be === 10
(pop.mutation * 100).intValue must be === 5
val elitisimMaxIdx = round(pop.population.length * pop.elitism)
var counter = 0
for (ch <- list) {
if (pop.population contains ch) {
counter += 1
}
}
counter must be >= round(pop.population.length * pop.elitism)
counter must be < pop.population.length
pop.population(elitisimMaxIdx).fitness >= list(elitisimMaxIdx).fitness
}
"use elitism properly" in {
val pop = Population(1024, 0.8f, 0.1f, 0.05f)
val list = Vector() ++ pop.population
list must have length (1024)
for (i <- 1 to 100) {
pop.evolve
}
val elitisimMaxIdx = round(pop.population.length * pop.elitism)
pop.population(elitisimMaxIdx).fitness >= list(elitisimMaxIdx).fitness
}
}
}
|
jboissard/GAHelloWorld
|
scala/src/test/scala/net/auxesia/PopulationSpec.scala
|
Scala
|
mit
| 4,073 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.httpclient
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.headers.RawHeader
import akka.stream.BidiShape
import akka.stream.scaladsl._
import akka.util.ByteString
import com.typesafe.config.ConfigFactory
import org.scalatest.BeforeAndAfterAll
import org.scalatest.flatspec.AsyncFlatSpec
import org.scalatest.matchers.should.Matchers
import org.squbs.pipeline.{Context, PipelineFlow, PipelineFlowFactory, RequestContext}
import org.squbs.resolver.ResolverRegistry
import org.squbs.testkit.Timeouts._
import scala.concurrent.{Await, Future}
import scala.util.{Success, Try}
object ClientFlowPipelineSpec {
val config = ConfigFactory.parseString(
s"""
|dummyFlow {
| type = squbs.pipelineflow
| factory = org.squbs.httpclient.DummyFlow
|}
|
|preFlow {
| type = squbs.pipelineflow
| factory = org.squbs.httpclient.PreFlow
|}
|
|postFlow {
| type = squbs.pipelineflow
| factory = org.squbs.httpclient.PostFlow
|}
|
|squbs.pipeline.client.default {
| pre-flow = preFlow
| post-flow = postFlow
|}
|
|clientWithCustomPipelineWithDefaults {
| type = squbs.httpclient
| pipeline = dummyFlow
|}
|
|clientWithOnlyDefaults {
| type = squbs.httpclient
|}
|
|clientWithCustomPipelineWithoutDefaults {
| type = squbs.httpclient
| pipeline = dummyFlow
| defaultPipeline = off
|}
|
|clientWithNoPipeline {
| type = squbs.httpclient
| defaultPipeline = off
|}
""".stripMargin
)
implicit val system: ActorSystem = ActorSystem("ClientFlowPipelineSpec", config)
import akka.http.scaladsl.server.Directives._
val route =
path("hello") {
extract(_.request.headers) { headers =>
// Filter any non-test headers
complete(headers.filter(_.name.startsWith("key")).sortBy(_.name).mkString(","))
}
}
val serverBinding = Await.result(Http().newServerAt("localhost", 0).bind(route), awaitMax)
val port = serverBinding.localAddress.getPort
}
class ClientFlowPipelineSpec extends AsyncFlatSpec with Matchers with BeforeAndAfterAll {
import ClientFlowPipelineSpec._
override def afterAll(): Unit = {
serverBinding.unbind() map {_ => system.terminate()}
}
ResolverRegistry(system).register[HttpEndpoint]("LocalhostEndpointResolver")
{ (_, _) => Some(HttpEndpoint(s"http://localhost:$port")) }
it should "build the flow with defaults" in {
val expectedResponseHeaders = Seq(
RawHeader("keyD", "valD"),
RawHeader("keyPreOutbound", "valPreOutbound"),
RawHeader("keyPostOutbound", "valPostOutbound"))
val expectedEntity = Seq(
RawHeader("keyA", "valA"),
RawHeader("keyB", "valB"),
RawHeader("keyC", "valC"),
RawHeader("keyPreInbound", "valPreInbound"),
RawHeader("keyPostInbound", "valPostInbound")).sortBy(_.name).mkString(",")
assertPipeline("clientWithCustomPipelineWithDefaults", expectedResponseHeaders, expectedEntity)
}
it should "build the flow only with defaults" in {
val expectedResponseHeaders = Seq(
RawHeader("keyPreOutbound", "valPreOutbound"),
RawHeader("keyPostOutbound", "valPostOutbound"))
val expectedEntity = Seq(
RawHeader("keyPreInbound", "valPreInbound"),
RawHeader("keyPostInbound", "valPostInbound")).sortBy(_.name).mkString(",")
assertPipeline("clientWithOnlyDefaults", expectedResponseHeaders, expectedEntity)
}
it should "build the flow without defaults" in {
val expectedResponseHeaders = Seq(RawHeader("keyD", "valD"))
val expectedEntity = Seq(
RawHeader("keyA", "valA"),
RawHeader("keyB", "valB"),
RawHeader("keyC", "valC")).sortBy(_.name).mkString(",")
assertPipeline("clientWithCustomPipelineWithoutDefaults", expectedResponseHeaders, expectedEntity)
}
it should "not build a pipeline" in {
assertPipeline("clientWithNoPipeline", Seq.empty[RawHeader], "")
}
// TODO Add tests to make sure do not change the type of userContext
// it should "keep the user context"
private def assertPipeline(clientName: String, expectedResponseHeaders: Seq[RawHeader], expectedEntity: String) = {
val clientFlow = ClientFlow[Int](clientName)
val responseFuture: Future[(Try[HttpResponse], Int)] =
Source.single(HttpRequest(uri = "/hello") -> 42)
.via(clientFlow)
.runWith(Sink.head)
val (Success(response), userContext) = Await.result(responseFuture, awaitMax)
userContext shouldBe 42 // Make sure we keep user context
response.status should be (StatusCodes.OK)
response.headers.filter(_.name.startsWith("key")) should contain theSameElementsAs expectedResponseHeaders
val entity = response.entity.dataBytes.runFold(ByteString(""))(_ ++ _) map(_.utf8String)
entity map { e => e shouldEqual expectedEntity }
}
}
class DummyFlow extends PipelineFlowFactory {
override def create(context: Context)(implicit system: ActorSystem): PipelineFlow = {
BidiFlow.fromGraph(GraphDSL.create() { implicit b =>
import GraphDSL.Implicits._
val stageA = b.add(Flow[RequestContext].map { rc => rc.withRequestHeader(RawHeader("keyA", "valA")) })
val stageB = b.add(Flow[RequestContext].map { rc => rc.withRequestHeader(RawHeader("keyB", "valB")) })
val stageC = b.add(dummyBidi)
val stageD = b.add(Flow[RequestContext].map { rc => rc.withResponseHeader(RawHeader("keyD", "valD")) })
stageA ~> stageB ~> stageC.in1
stageD <~ stageC.out2
BidiShape(stageA.in, stageC.out1, stageC.in2, stageD.out)
})
}
val requestFlow = Flow[RequestContext].map { rc => rc.withRequestHeader(RawHeader("keyC", "valC")) }
val dummyBidi = BidiFlow.fromFlows(requestFlow, Flow[RequestContext])
}
class PreFlow extends PipelineFlowFactory {
override def create(context: Context)(implicit system: ActorSystem): PipelineFlow = {
val inbound = Flow[RequestContext].map { rc => rc.withRequestHeader(RawHeader("keyPreInbound", "valPreInbound")) }
val outbound = Flow[RequestContext].map { rc => rc.withResponseHeader(RawHeader("keyPreOutbound", "valPreOutbound")) }
BidiFlow.fromFlows(inbound, outbound)
}
}
class PostFlow extends PipelineFlowFactory {
override def create(context: Context)(implicit system: ActorSystem): PipelineFlow = {
val inbound = Flow[RequestContext].map { rc => rc.withRequestHeader(RawHeader("keyPostInbound", "valPostInbound")) }
val outbound = Flow[RequestContext].map { rc => rc.withResponseHeader(RawHeader("keyPostOutbound", "valPostOutbound")) }
BidiFlow.fromFlows(inbound, outbound)
}
}
|
paypal/squbs
|
squbs-httpclient/src/test/scala/org/squbs/httpclient/ClientFlowPipelineSpec.scala
|
Scala
|
apache-2.0
| 7,448 |
package com.mind_era.optimization
// Based on https://github.com/scalanlp/breeze/blob/releases/v0.13.1/math/src/main/scala/breeze/optimize/LBFGS.scala
/*
Copyright 2009 David Hall, Daniel Ramage
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//import breeze.linalg._
//import breeze.linalg.operators.OpMulMatrix
//import breeze.math.MutableInnerProductModule
//import breeze.optimize.FirstOrderMinimizer.{ConvergenceCheck, ConvergenceReason}
//import breeze.optimize.linear.PowerMethod
//import breeze.util.SerializableLogging
import com.mind_era.math.norm
import com.mind_era.optimization.FirstOrderMinimizer.ConvergenceCheck
import scribe.Logging
import spire.algebra.{Eq, Field, Order, Signed}
/**
* Port of LBFGS to Scala.
*
* Special note for LBFGS:
* If you use it in published work, you must cite one of:
* * J. Nocedal. Updating Quasi-Newton Matrices with Limited Storage
* (1980), Mathematics of Computation 35, pp. 773-782.
* * D.C. Liu and J. Nocedal. On the Limited mem Method for Large
* Scale Optimization (1989), Mathematical Programming B, 45, 3,
* pp. 503-528.
*
* @param m: The memory of the search. 3 to 7 is usually sufficient.
*/
//class LBFGS[T, R: Eq: Order: Signed: Field](convergenceCheck: ConvergenceCheck[T, R], m: Int)(implicit space: MutableInnerProductModule[T, Double]) extends FirstOrderMinimizer[T, R, DiffFunction[T]](convergenceCheck) /*with SerializableLogging*/ with Logging {
//
// def this(maxIter: Int = -1, m: Int=7, tolerance: Double=1E-9)
// (implicit space: MutableInnerProductModule[T, Double]) = this(FirstOrderMinimizer.defaultConvergenceCheck(maxIter, tolerance), m )
// import space._
// require(m > 0)
//
// type History = LBFGS.ApproximateInverseHessian[T]
//
// override protected def adjustFunction(f: DiffFunction[T]): DiffFunction[T] = f.cached
//
// protected def takeStep(state: State, dir: T, stepSize: Double) = state.x + dir * stepSize
// protected def initialHistory(f: DiffFunction[T], x: T): History = new LBFGS.ApproximateInverseHessian(m)
// protected def chooseDescentDirection(state: State, fn: DiffFunction[T]):T = {
// state.history * state.grad
// }
//
// protected def updateHistory(newX: T, newGrad: T, newVal: Double, f: DiffFunction[T], oldState: State): History = {
// oldState.history.updated(newX - oldState.x, newGrad -:- oldState.grad)
// }
//
// /**
// * Given a direction, perform a line search to find
// * a direction to descend. At the moment, this just executes
// * backtracking, so it does not fulfill the wolfe conditions.
// *
// * @param state the current state
// * @param f The objective
// * @param dir The step direction
// * @return stepSize
// */
// protected def determineStepSize(state: State, f: DiffFunction[T], dir: T) = {
// val x = state.x
// val grad = state.grad
//
// val ff = LineSearch.functionFromSearchDirection(f, x, dir)
// val search = new StrongWolfeLineSearch(maxZoomIter = 10, maxLineSearchIter = 10) // TODO: Need good default values here.
// val alpha = search.minimize(ff, if(state.iter == 0.0) 1.0/norm(dir) else 1.0)
//
// if(alpha * norm(grad) < 1E-10)
// throw new StepSizeUnderflow
// alpha
// }
//}
//
//object LBFGS {
// case class ApproximateInverseHessian[T](m: Int,
// private[LBFGS] val memStep: IndexedSeq[T] = IndexedSeq.empty,
// private[LBFGS] val memGradDelta: IndexedSeq[T] = IndexedSeq.empty)
// (implicit space: MutableInnerProductModule[T, Double]) extends NumericOps[ApproximateInverseHessian[T]] {
//
// import space._
//
// def repr: ApproximateInverseHessian[T] = this
//
// def updated(step: T, gradDelta: T) = {
// val memStep = (step +: this.memStep) take m
// val memGradDelta = (gradDelta +: this.memGradDelta) take m
//
// new ApproximateInverseHessian(m, memStep,memGradDelta)
// }
//
//
// def historyLength = memStep.length
//
// def *(grad: T) = {
// val diag = if(historyLength > 0) {
// val prevStep = memStep.head
// val prevGradStep = memGradDelta.head
// val sy = prevStep dot prevGradStep
// val yy = prevGradStep dot prevGradStep
// if(sy < 0 || sy.isNaN) throw new NaNHistory
// sy/yy
// } else {
// 1.0
// }
//
// val dir = space.copy(grad)
// val as = new Array[Double](m)
// val rho = new Array[Double](m)
//
// for(i <- 0 until historyLength) {
// rho(i) = (memStep(i) dot memGradDelta(i))
// as(i) = (memStep(i) dot dir)/rho(i)
// if(as(i).isNaN) {
// throw new NaNHistory
// }
// axpy(-as(i), memGradDelta(i), dir)
// }
//
// dir *= diag
//
// for(i <- (historyLength - 1) to 0 by (-1)) {
// val beta = (memGradDelta(i) dot dir)/rho(i)
// axpy(as(i) - beta, memStep(i), dir)
// }
//
// dir *= -1.0
// dir
// }
// }
//
// implicit def multiplyInverseHessian[T](implicit vspace: MutableInnerProductModule[T, Double]):OpMulMatrix.Impl2[ApproximateInverseHessian[T], T, T] = {
// new OpMulMatrix.Impl2[ApproximateInverseHessian[T], T, T] {
// def apply(a: ApproximateInverseHessian[T], b: T): T = a * b
// }
// }
//}
|
aborg0/arima
|
src/main/scala/com/mind_era/optimization/LBFGS.scala
|
Scala
|
agpl-3.0
| 5,811 |
import S99.P16._
import org.scalatest._
class P16Spec extends FlatSpec {
"duplicateN()" should "duplicate the elements of a list a given number of times" in {
assertResult(List('a, 'b, 'd, 'e, 'g, 'h, 'j, 'k)) {
drop(3, List('a, 'b, 'c, 'd, 'e, 'f, 'g, 'h, 'i, 'j, 'k))
}
assertResult(Nil) {
drop(3, Nil)
}
assertResult(List('a, 'b)) {
drop(3, List('a, 'b, 'c))
}
assertResult(List('a, 'b)) {
drop(3, List('a, 'b))
}
}
}
|
gcanti/S-99
|
src/test/scala/P16Spec.scala
|
Scala
|
mit
| 483 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.examples.saic
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.carbondata.examples.util.ExampleUtils
import java.util.Date
object SaicNoFilterDescOrderQuery {
def main(args: Array[String]) {
val cc = ExampleUtils.createCarbonContext("CarbonExample")
val testData = ExampleUtils.currentPath + "/src/main/resources/rx5_parquet_10m.csv"
// Specify timestamp format based on raw data
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy-MM-dd hh:mm:ss")
cc.sql("""
select
*
from rx5_tbox_parquet_all
order by vin
limit 10000
""").show(10)
var start = System.currentTimeMillis()
cc.sql("""
select
vin,
gnsstime,
vehsyspwrmod,
vehdoorfrontpas,
vehdoorfrontdrv,
vehdoorrearleft,
vehdoorrearright,
vehbonnet,
vehboot
from rx5_tbox_parquet_all
order by vin desc
limit 50000
""").show(500000)
var end = System.currentTimeMillis()
print("limit 50000 query time: " + (end - start))
start = System.currentTimeMillis()
cc.sql("""
select
vin,
gnsstime,
vehsyspwrmod,
vehdoorfrontpas,
vehdoorfrontdrv,
vehdoorrearleft,
vehdoorrearright,
vehbonnet,
vehboot
from rx5_tbox_parquet_all
order by vin desc
limit 10000
""").show(500000)
end = System.currentTimeMillis()
print(" limit 10000 query time: " + (end - start))
start = System.currentTimeMillis()
cc.sql("""
select
vin,
gnsstime,
vehsyspwrmod,
vehdoorfrontpas,
vehdoorfrontdrv,
vehdoorrearleft,
vehdoorrearright,
vehbonnet,
vehboot,
vehwindowfrontleft,
vehwindowrearleft,
vehwindowfrontright,
vehwindowrearright
from rx5_tbox_parquet_all
order by vin desc
limit 1000
""").show(500000)
end = System.currentTimeMillis()
print("limit 1000 query time: " + (end - start))
/* for (index <- 1 to 1) {
var start = System.currentTimeMillis()
cc.sql("""
select
vin,
gnsstime,
vehsyspwrmod,
vehdoorfrontpas,
vehdoorfrontdrv,
vehdoorrearleft,
vehdoorrearright,
vehbonnet,
vehboot,
vehwindowfrontleft,
vehwindowrearleft,
vehwindowfrontright,
vehwindowrearright,
vehsunroof,
vehcruiseactive,
vehcruiseenabled,
vehseatbeltdrv
from rx5_tbox_parquet_all
order by vin
limit 100002
""").show(1000000)
var end = System.currentTimeMillis()
print("query time: " + (end - start))
start = System.currentTimeMillis()
cc.sql("""
select
gnsstime,
vin,
vehsyspwrmod,
vehdoorfrontpas,
vehdoorfrontdrv,
vehdoorrearleft,
vehdoorrearright,
vehbonnet,
vehboot,
vehwindowfrontleft,
vehwindowrearleft,
vehwindowfrontright,
vehwindowrearright,
vehsunroof,
vehcruiseactive,
vehcruiseenabled,
vehseatbeltdrv
from rx5_tbox_parquet_all
order by gnsstime
limit 100002
""").show(1000000)
end = System.currentTimeMillis()
print("query time: " + (end - start))
}*/
}
}
|
mayunSaicmotor/incubator-carbondata
|
examples/spark/src/main/scala/org/apache/carbondata/examples/saic/SaicNoFilterDescOrderQuery.scala
|
Scala
|
apache-2.0
| 5,033 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package expr
import com.intellij.lang.ASTNode
import com.intellij.psi.{PsiElementVisitor, PsiField, ResolveState}
import org.jetbrains.plugins.scala.lang.psi.api.ScalaElementVisitor
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScClassParameter
import org.jetbrains.plugins.scala.lang.psi.api.statements.{ScFunction, ScVariable}
import org.jetbrains.plugins.scala.lang.psi.types.Compatibility.Expression
import org.jetbrains.plugins.scala.lang.psi.types.api.Unit
import org.jetbrains.plugins.scala.lang.psi.types.result.{Success, TypingContext}
import org.jetbrains.plugins.scala.lang.resolve.processor.MethodResolveProcessor
import org.jetbrains.plugins.scala.lang.resolve.{ScalaResolveResult, StdKinds}
import org.jetbrains.plugins.scala.macroAnnotations.{Cached, ModCount}
/**
* @author Alexander Podkhalyuzin
*/
class ScAssignStmtImpl(node: ASTNode) extends ScalaPsiElementImpl(node) with ScAssignStmt {
override def toString: String = "AssignStatement"
protected override def innerType(ctx: TypingContext) = {
getLExpression match {
case call: ScMethodCall => call.getType(ctx)
case _ =>
resolveAssignment match {
case Some(resolveResult) =>
mirrorMethodCall match {
case Some(call) => call.getType(TypingContext.empty)
case None => Success(Unit, Some(this))
}
case _ => Success(Unit, Some(this))
}
}
}
override def accept(visitor: PsiElementVisitor) {
visitor match {
case visitor: ScalaElementVisitor => super.accept(visitor)
case _ => super.accept(visitor)
}
}
@Cached(synchronized = false, ModCount.getBlockModificationCount, this)
def resolveAssignment: Option[ScalaResolveResult] = resolveAssignmentInner(shapeResolve = false)
@Cached(synchronized = false, ModCount.getBlockModificationCount, this)
def shapeResolveAssignment: Option[ScalaResolveResult] = resolveAssignmentInner(shapeResolve = true)
@Cached(synchronized = false, ModCount.getBlockModificationCount, this)
def mirrorMethodCall: Option[ScMethodCall] = {
getLExpression match {
case ref: ScReferenceExpression =>
val text = s"${ref.refName}_=(${getRExpression.map(_.getText).getOrElse("")})"
val mirrorExpr = ScalaPsiElementFactory.createExpressionWithContextFromText(text, getContext, this)
mirrorExpr match {
case call: ScMethodCall =>
call.getInvokedExpr.asInstanceOf[ScReferenceExpression].setupResolveFunctions(
() => resolveAssignment.toArray, () => shapeResolveAssignment.toArray
)
Some(call)
case _ => None
}
case methodCall: ScMethodCall =>
val invokedExpr = methodCall.getInvokedExpr
val text = s"${invokedExpr.getText}.update(${methodCall.args.exprs.map(_.getText).mkString(",")}," +
s" ${getRExpression.map(_.getText).getOrElse("")}"
val mirrorExpr = ScalaPsiElementFactory.createExpressionWithContextFromText(text, getContext, this)
//todo: improve performance: do not re-evaluate resolve to "update" method
mirrorExpr match {
case call: ScMethodCall => Some(call)
case _ => None
}
case _ => None
}
}
private def resolveAssignmentInner(shapeResolve: Boolean): Option[ScalaResolveResult] = {
getLExpression match {
case ref: ScReferenceExpression =>
ref.bind() match {
case Some(r: ScalaResolveResult) =>
ScalaPsiUtil.nameContext(r.element) match {
case v: ScVariable => None
case c: ScClassParameter if c.isVar => None
case f: PsiField => None
case fun: ScFunction if ScalaPsiUtil.isViableForAssignmentFunction(fun) =>
val processor = new MethodResolveProcessor(ref, fun.name + "_=",
getRExpression.map(expr => List(Seq(new Expression(expr)))).getOrElse(Nil), Nil, ref.getPrevTypeInfoParams,
isShapeResolve = shapeResolve, kinds = StdKinds.methodsOnly)
r.fromType match {
case Some(tp) => processor.processType(tp, ref)
case None =>
fun.getContext match {
case d: ScDeclarationSequenceHolder =>
d.processDeclarations(processor, ResolveState.initial(), fun, ref)
case _ =>
}
}
val candidates = processor.candidatesS
if (candidates.size == 1) Some(candidates.toArray.apply(0))
else None
case _ => None
}
case _ => None
}
case _ => None
}
}
}
|
whorbowicz/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/psi/impl/expr/ScAssignStmtImpl.scala
|
Scala
|
apache-2.0
| 4,875 |
package vggames.shared
import br.com.caelum.vraptor.{Get, Resource, Result}
import br.com.caelum.vraptor.ioc.Component
import br.com.caelum.vraptor.view.Results
import vggames.shared.player.PlayerSession
import vggames.shared.vraptor.RequestData
@Resource
class GameLegacyUrls(data : RequestData, game : Game, result : Result, session : PlayerSession) {
@Get(Array("/play/{gameName}/task/{i}"))
def play(gameName : String, i : Int) = {
result.permanentlyRedirectTo(s"/aprenda/$gameName/task/$i")
result.use(Results.nothing)
}
@Get(Array("/play/{gameName}"))
def play(gameName : String) = {
result.permanentlyRedirectTo(s"/aprenda/$gameName")
result.use(Results.nothing)
}
@Get(Array("/play/{gameName}/resource/{resource}"))
def findGameResource(gameName: String, resource: String) = {
result.permanentlyRedirectTo(s"/aprenda/$gameName/resource/$resource")
result.use(Results.nothing)
}
@Get(Array("/theory/{gameName}"))
def theory(gameName : String) = {
result.permanentlyRedirectTo(s"/aprenda/$gameName")
result.use(Results.nothing)
}
@Get(Array("/reference/{gameName}"))
def reference(gameName : String) = {
result.permanentlyRedirectTo(s"/aprenda/$gameName")
result.use(Results.nothing)
}
}
|
vidageek/games
|
web/src/main/scala/vggames/shared/GameLegacyUrls.scala
|
Scala
|
gpl-3.0
| 1,273 |
package skinny.oauth2.client
import org.apache.oltu.oauth2.common.message.types.{ GrantType => OltuGrantType }
/**
* Grant Type.
*/
case class GrantType(value: String) {
def toOltuEnum(): OltuGrantType = value match {
case v if v == OltuGrantType.AUTHORIZATION_CODE.name() => OltuGrantType.AUTHORIZATION_CODE
case v if v == OltuGrantType.CLIENT_CREDENTIALS.name() => OltuGrantType.CLIENT_CREDENTIALS
case v if v == OltuGrantType.PASSWORD.name() => OltuGrantType.PASSWORD
case v if v == OltuGrantType.REFRESH_TOKEN.name() => OltuGrantType.REFRESH_TOKEN
}
}
object GrantType {
def apply(t: OltuGrantType): GrantType = new GrantType(t.name())
val AuthorizationCode = GrantType(OltuGrantType.AUTHORIZATION_CODE)
val ClientCredentials = GrantType(OltuGrantType.CLIENT_CREDENTIALS)
val Password = GrantType(OltuGrantType.PASSWORD)
val RefreshToken = GrantType(OltuGrantType.REFRESH_TOKEN)
}
|
Kuchitama/skinny-framework
|
oauth2/src/main/scala/skinny/oauth2/client/GrantType.scala
|
Scala
|
mit
| 922 |
package special
import org.apache.spark.{SparkContext, SparkConf}
object BasicJoin {
def main (args: Array[String]) {
val conf = new SparkConf().setAppName("HashJoin").setMaster("local[4]")
val sc = new SparkContext(conf)
val smallRDD = sc.parallelize(Seq(("V1", '1'), ("V2", '2')), 4)
val largeRDD = sc.parallelize(Seq(("V1", '1'), ("V1", '2')), 4)
// val joined = largeRDD.join(smallRDD)
val joined = smallRDD.join(largeRDD)
joined.collect().foreach(println)
}
}
|
chocolateBlack/LearningSpark
|
src/main/scala/special/BasicJoin.scala
|
Scala
|
mit
| 501 |
package com.github.libsml.optimization
import com.github.libsml.math.linalg.Vector
import com.github.libsml.math.function.Function
/**
* Created by huangyu on 15/8/23.
*/
object OptimizerUtils {
val shortFullMap: Map[String, String] = Map("tron" -> "com.github.libsml.optimization.liblinear.Tron",
"Tron" -> "com.github.libsml.optimization.liblinear.Tron",
"FixedPoint" -> "com.github.libsml.feature.engineering.smooth.FixedPointDirichletMultinomial",
"median" -> "com.github.libsml.feature.engineering.smooth.MedianDirichletMultinomial",
"lbfgs" -> "com.github.libsml.optimization.lbfgs.LBFGS",
"fixedPoint" -> "com.github.libsml.feature.engineering.smooth.FixedPointDirichletMultinomial")
private[this] def fullClassName(className: String): String = {
shortFullMap.getOrElse(className, className)
}
// Create an instance of the class with the given name
def instantiateOptimizer(className: String): Optimizer[Vector] = {
instantiateOptimizer(fullClassName(className), Map[String, String]())
}
// Create an instance of the class with the given name
def instantiateOptimizer(_className: String, map: Map[String, String]): Optimizer[Vector] = {
val className = fullClassName(_className)
val cls = Class.forName(className)
try {
cls.getConstructor(classOf[Map[String, String]])
.newInstance(map)
.asInstanceOf[Optimizer[Vector]]
} catch {
case _: NoSuchMethodException =>
cls.getConstructor().newInstance().asInstanceOf[Optimizer[Vector]]
}
}
def instantiateOptimizer(_className: String, weight: Vector): Optimizer[Vector] = {
val className = fullClassName(_className)
val cls = Class.forName(className)
try {
cls.getConstructor(classOf[Vector])
.newInstance(weight)
.asInstanceOf[Optimizer[Vector]]
} catch {
case _: NoSuchMethodException =>
cls.getConstructor().newInstance().asInstanceOf[Optimizer[Vector]].prior(weight)
}
}
def instantiateOptimizer(_className: String, function: Function[Vector]): Optimizer[Vector] = {
val className = fullClassName(_className)
val cls = Class.forName(className)
try {
cls.getConstructor(classOf[Function[Vector]])
.newInstance(function)
.asInstanceOf[Optimizer[Vector]]
} catch {
case _: NoSuchMethodException =>
cls.getConstructor().newInstance().asInstanceOf[Optimizer[Vector]].setFunction(function)
}
}
def instantiateOptimizer(_className: String, weight: Vector, map: Map[String, String]): Optimizer[Vector] = {
val className = fullClassName(_className)
val cls = Class.forName(className)
try {
cls.getConstructor(classOf[Vector], classOf[Map[String, String]])
.newInstance(weight, map)
.asInstanceOf[Optimizer[Vector]]
} catch {
case _: NoSuchMethodException =>
try {
instantiateOptimizer(className, weight)
} catch {
case _: NoSuchMethodException =>
cls.getConstructor(classOf[Map[String, String]])
.newInstance(map)
.asInstanceOf[Optimizer[Vector]].prior(weight)
}
}
}
def instantiateOptimizer(_className: String, weight: Vector, function: Function[Vector]): Optimizer[Vector] = {
val className = fullClassName(_className)
val cls = Class.forName(className)
try {
cls.getConstructor(classOf[Vector], classOf[Function[Vector]])
.newInstance(weight, function)
.asInstanceOf[Optimizer[Vector]]
} catch {
case _: NoSuchMethodException =>
try {
instantiateOptimizer(className, weight).setFunction(function)
} catch {
case _: NoSuchMethodException =>
cls.getConstructor(classOf[Function[Vector]])
.newInstance(function)
.asInstanceOf[Optimizer[Vector]]
}
}
}
def instantiateOptimizer(_className: String, map: Map[String, String], function: Function[Vector]): Optimizer[Vector] = {
val className = fullClassName(_className)
val cls = Class.forName(className)
try {
cls.getConstructor(classOf[Map[String, String]], classOf[Function[Vector]])
.newInstance(map, function)
.asInstanceOf[Optimizer[Vector]]
} catch {
case _: NoSuchMethodException =>
try {
instantiateOptimizer(className, map).setFunction(function)
} catch {
case _: NoSuchMethodException =>
cls.getConstructor(classOf[Function[Vector]])
.newInstance(function)
.asInstanceOf[Optimizer[Vector]]
}
}
}
def instantiateOptimizer(_className: String, weight: Vector, map: Map[String, String], function: Function[Vector]): Optimizer[Vector] = {
val className = fullClassName(_className)
val cls = Class.forName(className)
try {
cls.getConstructor(classOf[Vector], classOf[Map[String, String]], classOf[Function[Vector]])
.newInstance(weight, map, function)
.asInstanceOf[Optimizer[Vector]]
} catch {
case _: NoSuchMethodException =>
try {
instantiateOptimizer(className, weight, map)
} catch {
case _: NoSuchMethodException =>
try {
instantiateOptimizer(className, weight, function)
} catch {
case _: NoSuchMethodException =>
instantiateOptimizer(className, map, function)
}
}
}
}
}
|
libsml/libsml
|
optimization/src/main/scala/com/github/libsml/optimization/OptimizerUtils.scala
|
Scala
|
apache-2.0
| 5,499 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.nn.ops
import com.intel.analytics.bigdl.dllib.tensor.{DenseType, SparseType, Tensor}
import com.intel.analytics.bigdl.dllib.utils.serializer.ModuleSerializationTest
import com.intel.analytics.bigdl.dllib.utils.{T, Table}
import org.scalatest.{FlatSpec, Matchers}
import scala.collection.mutable.ArrayBuffer
import scala.util.Random
class Kv2TensorSpec extends FlatSpec with Matchers {
protected def randDoubles(length: Int,
lp: Double = 0.0,
up: Double = 1.0): Array[Double] = {
(1 to length).map(_ => lp + (up - lp) * Random.nextDouble()).toArray
}
protected def randKVMap(size: Int,
numActive: Int,
lp: Double = 0.0,
up: Double = 1.0): Map[Int, Double] = {
require(numActive <= size)
val keys = Random.shuffle((0 until size).toList).take(numActive)
val values = randDoubles(numActive, lp, up)
keys.zip(values).toMap
}
val batchLen = 3
val numActive = Array(2, 3, 5)
val feaLen = 8
val originData = new ArrayBuffer[String]()
val originArr = new ArrayBuffer[Table]()
val indices0 = new ArrayBuffer[Int]()
val indices1 = new ArrayBuffer[Int]()
val values = new ArrayBuffer[Double]()
for (i <- 0 until batchLen) {
val kvMap = randKVMap(feaLen, numActive(i))
val kvStr = kvMap.map(data => s"${data._1}:${data._2}").mkString(",")
originData += kvStr
originArr += T(kvStr)
indices0 ++= ArrayBuffer.fill(numActive(i))(i)
val kvArr = kvMap.toArray
indices1 ++= kvArr.map(kv => kv._1)
values ++= kvArr.map(kv => kv._2)
}
val originTable = T.array(originArr.toArray)
val indices = Array(indices0.toArray, indices1.toArray)
val shape = Array(batchLen, feaLen)
"Kv2Tensor operation kvString to SparseTensor" should "work correctly" in {
val input =
T(
Tensor[String](originTable),
Tensor[Int](Array(feaLen), shape = Array[Int]())
)
val expectOutput =
Tensor.sparse[Double](
indices = indices,
values = values.toArray,
shape = shape
)
val output = Kv2Tensor[Double, Double](transType = 1)
.forward(input)
output should be(expectOutput)
}
"Kv2Tensor operation kvString to DenseTensor" should "work correctly" in {
val input =
T(
Tensor[String](originTable),
Tensor[Int](Array(feaLen), shape = Array[Int]())
)
val expectOutput =
Tensor.dense(Tensor.sparse[Double](
indices = indices,
values = values.toArray,
shape = shape
))
val output = Kv2Tensor[Double, Double](transType = 0)
.forward(input)
output should be(expectOutput)
}
}
class Kv2TensorSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val kv2tensor = Kv2Tensor[Float, Float](
kvDelimiter = ",", itemDelimiter = ":", transType = 0
).setName("kv2tensor")
val input = T(
Tensor[String](
T(T("0:0.1,1:0.2"), T("1:0.3,3:0.5"), T("2:0.15,4:0.25"))),
Tensor[Int](Array(5), shape = Array[Int]())
)
runSerializationTest(kv2tensor, input)
}
}
|
intel-analytics/BigDL
|
scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/nn/ops/Kv2TensorSpec.scala
|
Scala
|
apache-2.0
| 3,809 |
package io.swagger.client.model
import org.joda.time.DateTime
case class InputFile (
/* Unique identifier for the file. */
id: String,
/* How the file has been generated. */
_type: String,
/* The source of the file to be used that can be either, an external url. An identifier for an uploaded file to the server or an identifier for another job. */
source: String,
/* Filename of the file. */
filename: String,
/* Size of the file in bytes. */
size: Integer,
/* Date and time when the job was created. */
created_at: DateTime,
/* Date and time when the job was last modified. */
modified_at: DateTime)
|
onlineconvert/onlineconvert-api-sdk-scala
|
src/main/scala/io/swagger/client/model/InputFile.scala
|
Scala
|
apache-2.0
| 637 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.checkpoint.kafka
import kafka.admin.AdminUtils
import kafka.common.{InvalidMessageSizeException, UnknownTopicOrPartitionException}
import kafka.message.InvalidMessageException
import kafka.server.{KafkaConfig, KafkaServer, ConfigType}
import kafka.utils.{CoreUtils, TestUtils, ZkUtils}
import kafka.integration.KafkaServerTestHarness
import org.apache.kafka.common.security.JaasUtils
import org.apache.kafka.clients.producer.{KafkaProducer, Producer, ProducerConfig, ProducerRecord}
import org.apache.samza.checkpoint.Checkpoint
import org.apache.samza.config.{JobConfig, KafkaProducerConfig, MapConfig}
import org.apache.samza.container.TaskName
import org.apache.samza.container.grouper.stream.GroupByPartitionFactory
import org.apache.samza.serializers.CheckpointSerde
import org.apache.samza.system.SystemStreamPartition
import org.apache.samza.util.{ClientUtilTopicMetadataStore, KafkaUtilException, TopicMetadataStore}
import org.apache.samza.{Partition, SamzaException}
import org.junit.Assert._
import org.junit._
import scala.collection.JavaConversions._
import scala.collection._
class TestKafkaCheckpointManager extends KafkaServerTestHarness {
protected def numBrokers: Int = 3
def generateConfigs() = {
val props = TestUtils.createBrokerConfigs(numBrokers, zkConnect, true)
props.map(KafkaConfig.fromProps)
}
val checkpointTopic = "checkpoint-topic"
val serdeCheckpointTopic = "checkpoint-topic-invalid-serde"
val checkpointTopicConfig = KafkaCheckpointManagerFactory.getCheckpointTopicProperties(null)
val zkSecure = JaasUtils.isZkSecurityEnabled()
val partition = new Partition(0)
val partition2 = new Partition(1)
val cp1 = new Checkpoint(Map(new SystemStreamPartition("kafka", "topic", partition) -> "123"))
val cp2 = new Checkpoint(Map(new SystemStreamPartition("kafka", "topic", partition) -> "12345"))
var producerConfig: KafkaProducerConfig = null
var metadataStore: TopicMetadataStore = null
var failOnTopicValidation = true
val systemStreamPartitionGrouperFactoryString = classOf[GroupByPartitionFactory].getCanonicalName
@Before
override def setUp {
super.setUp
TestUtils.waitUntilTrue(() => servers.head.metadataCache.getAliveBrokers.size == numBrokers, "Wait for cache to update")
val config = new java.util.HashMap[String, Object]()
val brokers = brokerList.split(",").map(p => "localhost" + p).mkString(",")
config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokers)
config.put("acks", "all")
config.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "1")
config.put(ProducerConfig.RETRIES_CONFIG, (new Integer(java.lang.Integer.MAX_VALUE-1)).toString)
config.putAll(KafkaCheckpointManagerFactory.INJECTED_PRODUCER_PROPERTIES)
producerConfig = new KafkaProducerConfig("kafka", "i001", config)
metadataStore = new ClientUtilTopicMetadataStore(brokers, "some-job-name")
}
@After
override def tearDown() {
if (servers != null) {
servers.foreach(_.shutdown())
servers.foreach(server => CoreUtils.delete(server.config.logDirs))
}
super.tearDown
}
private def writeCheckpoint(taskName: TaskName, checkpoint: Checkpoint, cpTopic: String = checkpointTopic) = {
val producer: Producer[Array[Byte], Array[Byte]] = new KafkaProducer(producerConfig.getProducerProperties)
val record = new ProducerRecord(
cpTopic,
0,
KafkaCheckpointLogKey.getCheckpointKey(taskName).toBytes(),
new CheckpointSerde().toBytes(checkpoint)
)
try {
producer.send(record).get()
} catch {
case e: Exception => println(e.getMessage)
} finally {
producer.close()
}
}
private def createCheckpointTopic(cpTopic: String = checkpointTopic, partNum: Int = 1) = {
val zkClient = ZkUtils(zkConnect, 6000, 6000, zkSecure)
try {
AdminUtils.createTopic(
zkClient,
cpTopic,
partNum,
1,
checkpointTopicConfig)
} catch {
case e: Exception => println(e.getMessage)
} finally {
zkClient.close
}
}
@Test
def testCheckpointShouldBeNullIfCheckpointTopicDoesNotExistShouldBeCreatedOnWriteAndShouldBeReadableAfterWrite {
val kcm = getKafkaCheckpointManager
val taskName = new TaskName(partition.toString)
kcm.register(taskName)
createCheckpointTopic()
kcm.kafkaUtil.validateTopicPartitionCount(checkpointTopic, "kafka", metadataStore, 1)
// check that log compaction is enabled.
val zkClient = ZkUtils(zkConnect, 6000, 6000, zkSecure)
val topicConfig = AdminUtils.fetchEntityConfig(zkClient, ConfigType.Topic, checkpointTopic)
zkClient.close
assertEquals("compact", topicConfig.get("cleanup.policy"))
assertEquals("26214400", topicConfig.get("segment.bytes"))
// read before topic exists should result in a null checkpoint
var readCp = kcm.readLastCheckpoint(taskName)
assertNull(readCp)
// create topic the first time around
writeCheckpoint(taskName, cp1)
readCp = kcm.readLastCheckpoint(taskName)
assertEquals(cp1, readCp)
// should get an exception if partition doesn't exist
try {
readCp = kcm.readLastCheckpoint(new TaskName(new Partition(1).toString))
fail("Expected a SamzaException, since only one partition (partition 0) should exist.")
} catch {
case e: SamzaException => None // expected
case _: Exception => fail("Expected a SamzaException, since only one partition (partition 0) should exist.")
}
// writing a second message should work, too
writeCheckpoint(taskName, cp2)
readCp = kcm.readLastCheckpoint(taskName)
assertEquals(cp2, readCp)
kcm.stop
}
@Test
def testUnrecoverableKafkaErrorShouldThrowKafkaCheckpointManagerException {
val exceptions = List("InvalidMessageException", "InvalidMessageSizeException", "UnknownTopicOrPartitionException")
exceptions.foreach { exceptionName =>
val kcm = getKafkaCheckpointManagerWithInvalidSerde(exceptionName)
val taskName = new TaskName(partition.toString)
kcm.register(taskName)
createCheckpointTopic(serdeCheckpointTopic)
kcm.kafkaUtil.validateTopicPartitionCount(serdeCheckpointTopic, "kafka", metadataStore, 1)
writeCheckpoint(taskName, cp1, serdeCheckpointTopic)
// because serde will throw unrecoverable errors, it should result a KafkaCheckpointException
try {
kcm.readLastCheckpoint(taskName)
fail("Expected a KafkaUtilException.")
} catch {
case e: KafkaUtilException => None
}
kcm.stop
}
}
@Test
def testFailOnTopicValidation {
// first case - default case, we should fail on validation
failOnTopicValidation = true
val checkpointTopic8 = checkpointTopic + "8";
val kcm = getKafkaCheckpointManagerWithParam(checkpointTopic8)
val taskName = new TaskName(partition.toString)
kcm.register(taskName)
createCheckpointTopic(checkpointTopic8, 8) // create topic with the wrong number of partitions
try {
kcm.start
fail("Expected a KafkaUtilException for invalid number of partitions in the topic.")
}catch {
case e: KafkaUtilException => None
}
kcm.stop
// same validation but ignore the validation error (pass 'false' to validate..)
failOnTopicValidation = false
val kcm1 = getKafkaCheckpointManagerWithParam((checkpointTopic8))
kcm1.register(taskName)
try {
kcm1.start
}catch {
case e: KafkaUtilException => fail("Did not expect a KafkaUtilException for invalid number of partitions in the topic.")
}
kcm1.stop
}
private def getKafkaCheckpointManagerWithParam(cpTopic: String) = new KafkaCheckpointManager(
clientId = "some-client-id",
checkpointTopic = cpTopic,
systemName = "kafka",
replicationFactor = 3,
socketTimeout = 30000,
bufferSize = 64 * 1024,
fetchSize = 300 * 1024,
metadataStore = metadataStore,
connectProducer = () => new KafkaProducer(producerConfig.getProducerProperties),
connectZk = () => ZkUtils(zkConnect, 6000, 6000, zkSecure),
systemStreamPartitionGrouperFactoryString = systemStreamPartitionGrouperFactoryString,
failOnCheckpointValidation = failOnTopicValidation,
checkpointTopicProperties = KafkaCheckpointManagerFactory.getCheckpointTopicProperties(new MapConfig(Map[String, String]())))
// CheckpointManager with a specific checkpoint topic
private def getKafkaCheckpointManager = getKafkaCheckpointManagerWithParam(checkpointTopic)
// inject serde. Kafka exceptions will be thrown when serde.fromBytes is called
private def getKafkaCheckpointManagerWithInvalidSerde(exception: String) = new KafkaCheckpointManager(
clientId = "some-client-id-invalid-serde",
checkpointTopic = serdeCheckpointTopic,
systemName = "kafka",
replicationFactor = 3,
socketTimeout = 30000,
bufferSize = 64 * 1024,
fetchSize = 300 * 1024,
metadataStore = metadataStore,
connectProducer = () => new KafkaProducer(producerConfig.getProducerProperties),
connectZk = () => ZkUtils(zkConnect, 6000, 6000, zkSecure),
systemStreamPartitionGrouperFactoryString = systemStreamPartitionGrouperFactoryString,
failOnCheckpointValidation = failOnTopicValidation,
serde = new InvalideSerde(exception),
checkpointTopicProperties = KafkaCheckpointManagerFactory.getCheckpointTopicProperties(new MapConfig(Map[String, String]())))
class InvalideSerde(exception: String) extends CheckpointSerde {
override def fromBytes(bytes: Array[Byte]): Checkpoint = {
exception match {
case "InvalidMessageException" => throw new InvalidMessageException
case "InvalidMessageSizeException" => throw new InvalidMessageSizeException
case "UnknownTopicOrPartitionException" => throw new UnknownTopicOrPartitionException
}
}
}
}
|
nickpan47/samza
|
samza-kafka/src/test/scala/org/apache/samza/checkpoint/kafka/TestKafkaCheckpointManager.scala
|
Scala
|
apache-2.0
| 10,708 |
/**
* Copyright (c) 2002-2012 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher
import org.junit.{After, Before}
import scala.collection.JavaConverters._
import org.scalatest.junit.JUnitSuite
import collection.Map
import org.neo4j.graphdb._
import org.neo4j.test.ImpermanentGraphDatabase
import org.neo4j.kernel.GraphDatabaseAPI
class GraphDatabaseTestBase extends JUnitSuite {
var graph: GraphDatabaseAPI with Snitch = null
var refNode: Node = null
var nodes: List[Node] = null
@Before
def baseInit() {
graph = new ImpermanentGraphDatabase() with Snitch
refNode = graph.getReferenceNode
}
@After
def cleanUp() {
if (graph != null) graph.shutdown()
}
def indexNode(n: Node, idxName: String, key: String, value: String) {
inTx(() => n.getGraphDatabase.index.forNodes(idxName).add(n, key, value))
}
def indexRel(r: Relationship, idxName: String, key: String, value: String) {
inTx(() => r.getGraphDatabase.index.forRelationships(idxName).add(r, key, value))
}
def createNode(): Node = createNode(Map[String, Any]())
def createNode(name: String): Node = createNode(Map[String, Any]("name" -> name))
def createNode(props: Map[String, Any]): Node = {
inTx(() => {
val node = graph.createNode()
props.foreach((kv) => node.setProperty(kv._1, kv._2))
node
}).asInstanceOf[Node]
}
def createNode(values: (String, Any)*): Node = createNode(values.toMap)
def inTx[T](f: () => T): T = {
val tx = graph.beginTx
val result = f.apply()
tx.success()
tx.finish()
result
}
def nodeIds = nodes.map(_.getId).toArray
def relate(a: Node, b: Node): Relationship = relate(a, b, "REL")
def relate(a: Node, b: Node, pk:(String,Any)*): Relationship = relate(a, b, "REL", pk.toMap)
def relate(n1: Node, n2: Node, relType: String, name: String): Relationship = relate(n1, n2, relType, Map("name" -> name))
def relate(a: Node, b: Node, c: Node*) {
(Seq(a, b) ++ c).reduce((n1, n2) => {
relate(n1, n2)
n2
})
}
def relate(n1: Node, n2: Node, relType: String, props: Map[String, Any] = Map()): Relationship = {
inTx(() => {
val r = n1.createRelationshipTo(n2, DynamicRelationshipType.withName(relType))
props.foreach((kv) => r.setProperty(kv._1, kv._2))
r
})
}
def relate(x: ((String, String), String)): Relationship = inTx(() => {
x match {
case ((from, relType), to) => {
val f = node(from)
val t = node(to)
f.createRelationshipTo(t, DynamicRelationshipType.withName(relType))
}
}
})
def node(name: String): Node = nodes.find(_.getProperty("name") == name).get
def relType(name: String): RelationshipType = graph.getRelationshipTypes.asScala.find(_.name() == name).get
def createNodes(names: String*): List[Node] = {
nodes = names.map(x => createNode(Map("name" -> x))).toList
nodes
}
def createDiamond(): (Node, Node, Node, Node) = {
// Graph:
// (a)
// / \\
// v v
// (b) (c)
// \\ /
// v v
// (d)
val a = createNode("a")
val b = createNode("b")
val c = createNode("c")
val d = createNode("d")
relate(a, b)
relate(b, d)
relate(a, c)
relate(c, d)
(a, b, c, d)
}
}
trait Snitch extends GraphDatabaseService {
val createdNodes = collection.mutable.Queue[Node]()
abstract override def createNode(): Node = {
val n = super.createNode()
createdNodes.enqueue(n)
n
}
}
|
dksaputra/community
|
cypher/src/test/scala/org/neo4j/cypher/GraphDatabaseTestBase.scala
|
Scala
|
gpl-3.0
| 4,307 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.javaapi.message
import org.junit.Assert._
import org.scalatest.junit.JUnitSuite
import org.junit.Test
import kafka.message.{CompressionCodec, DefaultCompressionCodec, Message, NoCompressionCodec}
import org.apache.kafka.test.TestUtils
import scala.collection.JavaConverters._
trait BaseMessageSetTestCases extends JUnitSuite {
val messages = Array(new Message("abcd".getBytes()), new Message("efgh".getBytes()))
def createMessageSet(messages: Seq[Message], compressed: CompressionCodec = NoCompressionCodec): MessageSet
@Test
def testWrittenEqualsRead(): Unit = {
val messageSet = createMessageSet(messages)
assertEquals(messages.toSeq, messageSet.asScala.map(m => m.message))
}
@Test
def testIteratorIsConsistent() {
val m = createMessageSet(messages)
// two iterators over the same set should give the same results
TestUtils.checkEquals(m, m)
}
@Test
def testIteratorIsConsistentWithCompression() {
val m = createMessageSet(messages, DefaultCompressionCodec)
// two iterators over the same set should give the same results
TestUtils.checkEquals(m, m)
}
@Test
def testSizeInBytes() {
assertEquals("Empty message set should have 0 bytes.",
0,
createMessageSet(Array[Message]()).sizeInBytes)
assertEquals("Predicted size should equal actual size.",
kafka.message.MessageSet.messageSetSize(messages),
createMessageSet(messages).sizeInBytes)
}
@Test
def testSizeInBytesWithCompression () {
assertEquals("Empty message set should have 0 bytes.",
0, // overhead of the GZIP output stream
createMessageSet(Array[Message](), DefaultCompressionCodec).sizeInBytes)
}
}
|
themarkypantz/kafka
|
core/src/test/scala/unit/kafka/javaapi/message/BaseMessageSetTestCases.scala
|
Scala
|
apache-2.0
| 2,579 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.joins
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, ExprCode}
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.physical._
import org.apache.spark.sql.execution.{BinaryExecNode, CodegenSupport,
ExternalAppendOnlyUnsafeRowArray, RowIterator, SparkPlan}
import org.apache.spark.sql.execution.metric.{SQLMetric, SQLMetrics}
import org.apache.spark.util.collection.BitSet
/**
* Performs a sort merge join of two child relations.
*/
case class SortMergeJoinExec(
leftKeys: Seq[Expression],
rightKeys: Seq[Expression],
joinType: JoinType,
condition: Option[Expression],
left: SparkPlan,
right: SparkPlan) extends BinaryExecNode with CodegenSupport {
override lazy val metrics = Map(
"numOutputRows" -> SQLMetrics.createMetric(sparkContext, "number of output rows"))
override def output: Seq[Attribute] = {
joinType match {
case _: InnerLike =>
left.output ++ right.output
case LeftOuter =>
left.output ++ right.output.map(_.withNullability(true))
case RightOuter =>
left.output.map(_.withNullability(true)) ++ right.output
case FullOuter =>
(left.output ++ right.output).map(_.withNullability(true))
case j: ExistenceJoin =>
left.output :+ j.exists
case LeftExistence(_) =>
left.output
case x =>
throw new IllegalArgumentException(
s"${getClass.getSimpleName} should not take $x as the JoinType")
}
}
override def outputPartitioning: Partitioning = joinType match {
case _: InnerLike =>
PartitioningCollection(Seq(left.outputPartitioning, right.outputPartitioning))
// For left and right outer joins, the output is partitioned by the streamed input's join keys.
case LeftOuter => left.outputPartitioning
case RightOuter => right.outputPartitioning
case FullOuter => UnknownPartitioning(left.outputPartitioning.numPartitions)
case LeftExistence(_) => left.outputPartitioning
case x =>
throw new IllegalArgumentException(
s"${getClass.getSimpleName} should not take $x as the JoinType")
}
override def requiredChildDistribution: Seq[Distribution] =
ClusteredDistribution(leftKeys) :: ClusteredDistribution(rightKeys) :: Nil
override def outputOrdering: Seq[SortOrder] = joinType match {
// For inner join, orders of both sides keys should be kept.
case _: InnerLike =>
val leftKeyOrdering = getKeyOrdering(leftKeys, left.outputOrdering)
val rightKeyOrdering = getKeyOrdering(rightKeys, right.outputOrdering)
leftKeyOrdering.zip(rightKeyOrdering).map { case (lKey, rKey) =>
// Also add the right key and its `sameOrderExpressions`
SortOrder(lKey.child, Ascending, lKey.sameOrderExpressions + rKey.child ++ rKey
.sameOrderExpressions)
}
// For left and right outer joins, the output is ordered by the streamed input's join keys.
case LeftOuter => getKeyOrdering(leftKeys, left.outputOrdering)
case RightOuter => getKeyOrdering(rightKeys, right.outputOrdering)
// There are null rows in both streams, so there is no order.
case FullOuter => Nil
case LeftExistence(_) => getKeyOrdering(leftKeys, left.outputOrdering)
case x =>
throw new IllegalArgumentException(
s"${getClass.getSimpleName} should not take $x as the JoinType")
}
/**
* The utility method to get output ordering for left or right side of the join.
*
* Returns the required ordering for left or right child if childOutputOrdering does not
* satisfy the required ordering; otherwise, which means the child does not need to be sorted
* again, returns the required ordering for this child with extra "sameOrderExpressions" from
* the child's outputOrdering.
*/
private def getKeyOrdering(keys: Seq[Expression], childOutputOrdering: Seq[SortOrder])
: Seq[SortOrder] = {
val requiredOrdering = requiredOrders(keys)
if (SortOrder.orderingSatisfies(childOutputOrdering, requiredOrdering)) {
keys.zip(childOutputOrdering).map { case (key, childOrder) =>
SortOrder(key, Ascending, childOrder.sameOrderExpressions + childOrder.child - key)
}
} else {
requiredOrdering
}
}
override def requiredChildOrdering: Seq[Seq[SortOrder]] =
requiredOrders(leftKeys) :: requiredOrders(rightKeys) :: Nil
private def requiredOrders(keys: Seq[Expression]): Seq[SortOrder] = {
// This must be ascending in order to agree with the `keyOrdering` defined in `doExecute()`.
keys.map(SortOrder(_, Ascending))
}
private def createLeftKeyGenerator(): Projection =
UnsafeProjection.create(leftKeys, left.output)
private def createRightKeyGenerator(): Projection =
UnsafeProjection.create(rightKeys, right.output)
private def getSpillThreshold: Int = {
sqlContext.conf.sortMergeJoinExecBufferSpillThreshold
}
private def getInMemoryThreshold: Int = {
sqlContext.conf.sortMergeJoinExecBufferInMemoryThreshold
}
protected override def doExecute(): RDD[InternalRow] = {
val numOutputRows = longMetric("numOutputRows")
val spillThreshold = getSpillThreshold
val inMemoryThreshold = getInMemoryThreshold
left.execute().zipPartitions(right.execute()) { (leftIter, rightIter) =>
val boundCondition: (InternalRow) => Boolean = {
condition.map { cond =>
newPredicate(cond, left.output ++ right.output).eval _
}.getOrElse {
(r: InternalRow) => true
}
}
// An ordering that can be used to compare keys from both sides.
val keyOrdering = newNaturalAscendingOrdering(leftKeys.map(_.dataType))
val resultProj: InternalRow => InternalRow = UnsafeProjection.create(output, output)
joinType match {
case _: InnerLike =>
new RowIterator {
private[this] var currentLeftRow: InternalRow = _
private[this] var currentRightMatches: ExternalAppendOnlyUnsafeRowArray = _
private[this] var rightMatchesIterator: Iterator[UnsafeRow] = null
private[this] val smjScanner = new SortMergeJoinScanner(
createLeftKeyGenerator(),
createRightKeyGenerator(),
keyOrdering,
RowIterator.fromScala(leftIter),
RowIterator.fromScala(rightIter),
inMemoryThreshold,
spillThreshold
)
private[this] val joinRow = new JoinedRow
if (smjScanner.findNextInnerJoinRows()) {
currentRightMatches = smjScanner.getBufferedMatches
currentLeftRow = smjScanner.getStreamedRow
rightMatchesIterator = currentRightMatches.generateIterator()
}
override def advanceNext(): Boolean = {
while (rightMatchesIterator != null) {
if (!rightMatchesIterator.hasNext) {
if (smjScanner.findNextInnerJoinRows()) {
currentRightMatches = smjScanner.getBufferedMatches
currentLeftRow = smjScanner.getStreamedRow
rightMatchesIterator = currentRightMatches.generateIterator()
} else {
currentRightMatches = null
currentLeftRow = null
rightMatchesIterator = null
return false
}
}
joinRow(currentLeftRow, rightMatchesIterator.next())
if (boundCondition(joinRow)) {
numOutputRows += 1
return true
}
}
false
}
override def getRow: InternalRow = resultProj(joinRow)
}.toScala
case LeftOuter =>
val smjScanner = new SortMergeJoinScanner(
streamedKeyGenerator = createLeftKeyGenerator(),
bufferedKeyGenerator = createRightKeyGenerator(),
keyOrdering,
streamedIter = RowIterator.fromScala(leftIter),
bufferedIter = RowIterator.fromScala(rightIter),
inMemoryThreshold,
spillThreshold
)
val rightNullRow = new GenericInternalRow(right.output.length)
new LeftOuterIterator(
smjScanner, rightNullRow, boundCondition, resultProj, numOutputRows).toScala
case RightOuter =>
val smjScanner = new SortMergeJoinScanner(
streamedKeyGenerator = createRightKeyGenerator(),
bufferedKeyGenerator = createLeftKeyGenerator(),
keyOrdering,
streamedIter = RowIterator.fromScala(rightIter),
bufferedIter = RowIterator.fromScala(leftIter),
inMemoryThreshold,
spillThreshold
)
val leftNullRow = new GenericInternalRow(left.output.length)
new RightOuterIterator(
smjScanner, leftNullRow, boundCondition, resultProj, numOutputRows).toScala
case FullOuter =>
val leftNullRow = new GenericInternalRow(left.output.length)
val rightNullRow = new GenericInternalRow(right.output.length)
val smjScanner = new SortMergeFullOuterJoinScanner(
leftKeyGenerator = createLeftKeyGenerator(),
rightKeyGenerator = createRightKeyGenerator(),
keyOrdering,
leftIter = RowIterator.fromScala(leftIter),
rightIter = RowIterator.fromScala(rightIter),
boundCondition,
leftNullRow,
rightNullRow)
new FullOuterIterator(
smjScanner,
resultProj,
numOutputRows).toScala
case LeftSemi =>
new RowIterator {
private[this] var currentLeftRow: InternalRow = _
private[this] val smjScanner = new SortMergeJoinScanner(
createLeftKeyGenerator(),
createRightKeyGenerator(),
keyOrdering,
RowIterator.fromScala(leftIter),
RowIterator.fromScala(rightIter),
inMemoryThreshold,
spillThreshold
)
private[this] val joinRow = new JoinedRow
override def advanceNext(): Boolean = {
while (smjScanner.findNextInnerJoinRows()) {
val currentRightMatches = smjScanner.getBufferedMatches
currentLeftRow = smjScanner.getStreamedRow
if (currentRightMatches != null && currentRightMatches.length > 0) {
val rightMatchesIterator = currentRightMatches.generateIterator()
while (rightMatchesIterator.hasNext) {
joinRow(currentLeftRow, rightMatchesIterator.next())
if (boundCondition(joinRow)) {
numOutputRows += 1
return true
}
}
}
}
false
}
override def getRow: InternalRow = currentLeftRow
}.toScala
case LeftAnti =>
new RowIterator {
private[this] var currentLeftRow: InternalRow = _
private[this] val smjScanner = new SortMergeJoinScanner(
createLeftKeyGenerator(),
createRightKeyGenerator(),
keyOrdering,
RowIterator.fromScala(leftIter),
RowIterator.fromScala(rightIter),
inMemoryThreshold,
spillThreshold
)
private[this] val joinRow = new JoinedRow
override def advanceNext(): Boolean = {
while (smjScanner.findNextOuterJoinRows()) {
currentLeftRow = smjScanner.getStreamedRow
val currentRightMatches = smjScanner.getBufferedMatches
if (currentRightMatches == null || currentRightMatches.length == 0) {
numOutputRows += 1
return true
}
var found = false
val rightMatchesIterator = currentRightMatches.generateIterator()
while (!found && rightMatchesIterator.hasNext) {
joinRow(currentLeftRow, rightMatchesIterator.next())
if (boundCondition(joinRow)) {
found = true
}
}
if (!found) {
numOutputRows += 1
return true
}
}
false
}
override def getRow: InternalRow = currentLeftRow
}.toScala
case j: ExistenceJoin =>
new RowIterator {
private[this] var currentLeftRow: InternalRow = _
private[this] val result: InternalRow = new GenericInternalRow(Array[Any](null))
private[this] val smjScanner = new SortMergeJoinScanner(
createLeftKeyGenerator(),
createRightKeyGenerator(),
keyOrdering,
RowIterator.fromScala(leftIter),
RowIterator.fromScala(rightIter),
inMemoryThreshold,
spillThreshold
)
private[this] val joinRow = new JoinedRow
override def advanceNext(): Boolean = {
while (smjScanner.findNextOuterJoinRows()) {
currentLeftRow = smjScanner.getStreamedRow
val currentRightMatches = smjScanner.getBufferedMatches
var found = false
if (currentRightMatches != null && currentRightMatches.length > 0) {
val rightMatchesIterator = currentRightMatches.generateIterator()
while (!found && rightMatchesIterator.hasNext) {
joinRow(currentLeftRow, rightMatchesIterator.next())
if (boundCondition(joinRow)) {
found = true
}
}
}
result.setBoolean(0, found)
numOutputRows += 1
return true
}
false
}
override def getRow: InternalRow = resultProj(joinRow(currentLeftRow, result))
}.toScala
case x =>
throw new IllegalArgumentException(
s"SortMergeJoin should not take $x as the JoinType")
}
}
}
override def supportCodegen: Boolean = {
joinType.isInstanceOf[InnerLike]
}
override def inputRDDs(): Seq[RDD[InternalRow]] = {
left.execute() :: right.execute() :: Nil
}
private def createJoinKey(
ctx: CodegenContext,
row: String,
keys: Seq[Expression],
input: Seq[Attribute]): Seq[ExprCode] = {
ctx.INPUT_ROW = row
ctx.currentVars = null
keys.map(BindReferences.bindReference(_, input).genCode(ctx))
}
private def copyKeys(ctx: CodegenContext, vars: Seq[ExprCode]): Seq[ExprCode] = {
vars.zipWithIndex.map { case (ev, i) =>
ctx.addBufferedState(leftKeys(i).dataType, "value", ev.value)
}
}
private def genComparison(ctx: CodegenContext, a: Seq[ExprCode], b: Seq[ExprCode]): String = {
val comparisons = a.zip(b).zipWithIndex.map { case ((l, r), i) =>
s"""
|if (comp == 0) {
| comp = ${ctx.genComp(leftKeys(i).dataType, l.value, r.value)};
|}
""".stripMargin.trim
}
s"""
|comp = 0;
|${comparisons.mkString("\\n")}
""".stripMargin
}
/**
* Generate a function to scan both left and right to find a match, returns the term for
* matched one row from left side and buffered rows from right side.
*/
private def genScanner(ctx: CodegenContext): (String, String) = {
// Create class member for next row from both sides.
// Inline mutable state since not many join operations in a task
val leftRow = ctx.addMutableState("InternalRow", "leftRow", forceInline = true)
val rightRow = ctx.addMutableState("InternalRow", "rightRow", forceInline = true)
// Create variables for join keys from both sides.
val leftKeyVars = createJoinKey(ctx, leftRow, leftKeys, left.output)
val leftAnyNull = leftKeyVars.map(_.isNull).mkString(" || ")
val rightKeyTmpVars = createJoinKey(ctx, rightRow, rightKeys, right.output)
val rightAnyNull = rightKeyTmpVars.map(_.isNull).mkString(" || ")
// Copy the right key as class members so they could be used in next function call.
val rightKeyVars = copyKeys(ctx, rightKeyTmpVars)
// A list to hold all matched rows from right side.
val clsName = classOf[ExternalAppendOnlyUnsafeRowArray].getName
val spillThreshold = getSpillThreshold
val inMemoryThreshold = getInMemoryThreshold
// Inline mutable state since not many join operations in a task
val matches = ctx.addMutableState(clsName, "matches",
v => s"$v = new $clsName($inMemoryThreshold, $spillThreshold);", forceInline = true)
// Copy the left keys as class members so they could be used in next function call.
val matchedKeyVars = copyKeys(ctx, leftKeyVars)
ctx.addNewFunction("findNextInnerJoinRows",
s"""
|private boolean findNextInnerJoinRows(
| scala.collection.Iterator leftIter,
| scala.collection.Iterator rightIter) {
| $leftRow = null;
| int comp = 0;
| while ($leftRow == null) {
| if (!leftIter.hasNext()) return false;
| $leftRow = (InternalRow) leftIter.next();
| ${leftKeyVars.map(_.code).mkString("\\n")}
| if ($leftAnyNull) {
| $leftRow = null;
| continue;
| }
| if (!$matches.isEmpty()) {
| ${genComparison(ctx, leftKeyVars, matchedKeyVars)}
| if (comp == 0) {
| return true;
| }
| $matches.clear();
| }
|
| do {
| if ($rightRow == null) {
| if (!rightIter.hasNext()) {
| ${matchedKeyVars.map(_.code).mkString("\\n")}
| return !$matches.isEmpty();
| }
| $rightRow = (InternalRow) rightIter.next();
| ${rightKeyTmpVars.map(_.code).mkString("\\n")}
| if ($rightAnyNull) {
| $rightRow = null;
| continue;
| }
| ${rightKeyVars.map(_.code).mkString("\\n")}
| }
| ${genComparison(ctx, leftKeyVars, rightKeyVars)}
| if (comp > 0) {
| $rightRow = null;
| } else if (comp < 0) {
| if (!$matches.isEmpty()) {
| ${matchedKeyVars.map(_.code).mkString("\\n")}
| return true;
| }
| $leftRow = null;
| } else {
| $matches.add((UnsafeRow) $rightRow);
| $rightRow = null;;
| }
| } while ($leftRow != null);
| }
| return false; // unreachable
|}
""".stripMargin, inlineToOuterClass = true)
(leftRow, matches)
}
/**
* Creates variables and declarations for left part of result row.
*
* In order to defer the access after condition and also only access once in the loop,
* the variables should be declared separately from accessing the columns, we can't use the
* codegen of BoundReference here.
*/
private def createLeftVars(ctx: CodegenContext, leftRow: String): (Seq[ExprCode], Seq[String]) = {
ctx.INPUT_ROW = leftRow
left.output.zipWithIndex.map { case (a, i) =>
val value = ctx.freshName("value")
val valueCode = ctx.getValue(leftRow, a.dataType, i.toString)
val javaType = ctx.javaType(a.dataType)
val defaultValue = ctx.defaultValue(a.dataType)
if (a.nullable) {
val isNull = ctx.freshName("isNull")
val code =
s"""
|$isNull = $leftRow.isNullAt($i);
|$value = $isNull ? $defaultValue : ($valueCode);
""".stripMargin
val leftVarsDecl =
s"""
|boolean $isNull = false;
|$javaType $value = $defaultValue;
""".stripMargin
(ExprCode(code, isNull, value), leftVarsDecl)
} else {
val code = s"$value = $valueCode;"
val leftVarsDecl = s"""$javaType $value = $defaultValue;"""
(ExprCode(code, "false", value), leftVarsDecl)
}
}.unzip
}
/**
* Creates the variables for right part of result row, using BoundReference, since the right
* part are accessed inside the loop.
*/
private def createRightVar(ctx: CodegenContext, rightRow: String): Seq[ExprCode] = {
ctx.INPUT_ROW = rightRow
right.output.zipWithIndex.map { case (a, i) =>
BoundReference(i, a.dataType, a.nullable).genCode(ctx)
}
}
/**
* Splits variables based on whether it's used by condition or not, returns the code to create
* these variables before the condition and after the condition.
*
* Only a few columns are used by condition, then we can skip the accessing of those columns
* that are not used by condition also filtered out by condition.
*/
private def splitVarsByCondition(
attributes: Seq[Attribute],
variables: Seq[ExprCode]): (String, String) = {
if (condition.isDefined) {
val condRefs = condition.get.references
val (used, notUsed) = attributes.zip(variables).partition{ case (a, ev) =>
condRefs.contains(a)
}
val beforeCond = evaluateVariables(used.map(_._2))
val afterCond = evaluateVariables(notUsed.map(_._2))
(beforeCond, afterCond)
} else {
(evaluateVariables(variables), "")
}
}
override def needCopyResult: Boolean = true
override def doProduce(ctx: CodegenContext): String = {
// Inline mutable state since not many join operations in a task
val leftInput = ctx.addMutableState("scala.collection.Iterator", "leftInput",
v => s"$v = inputs[0];", forceInline = true)
val rightInput = ctx.addMutableState("scala.collection.Iterator", "rightInput",
v => s"$v = inputs[1];", forceInline = true)
val (leftRow, matches) = genScanner(ctx)
// Create variables for row from both sides.
val (leftVars, leftVarDecl) = createLeftVars(ctx, leftRow)
val rightRow = ctx.freshName("rightRow")
val rightVars = createRightVar(ctx, rightRow)
val iterator = ctx.freshName("iterator")
val numOutput = metricTerm(ctx, "numOutputRows")
val (beforeLoop, condCheck) = if (condition.isDefined) {
// Split the code of creating variables based on whether it's used by condition or not.
val loaded = ctx.freshName("loaded")
val (leftBefore, leftAfter) = splitVarsByCondition(left.output, leftVars)
val (rightBefore, rightAfter) = splitVarsByCondition(right.output, rightVars)
// Generate code for condition
ctx.currentVars = leftVars ++ rightVars
val cond = BindReferences.bindReference(condition.get, output).genCode(ctx)
// evaluate the columns those used by condition before loop
val before = s"""
|boolean $loaded = false;
|$leftBefore
""".stripMargin
val checking = s"""
|$rightBefore
|${cond.code}
|if (${cond.isNull} || !${cond.value}) continue;
|if (!$loaded) {
| $loaded = true;
| $leftAfter
|}
|$rightAfter
""".stripMargin
(before, checking)
} else {
(evaluateVariables(leftVars), "")
}
s"""
|while (findNextInnerJoinRows($leftInput, $rightInput)) {
| ${leftVarDecl.mkString("\\n")}
| ${beforeLoop.trim}
| scala.collection.Iterator<UnsafeRow> $iterator = $matches.generateIterator();
| while ($iterator.hasNext()) {
| InternalRow $rightRow = (InternalRow) $iterator.next();
| ${condCheck.trim}
| $numOutput.add(1);
| ${consume(ctx, leftVars ++ rightVars)}
| }
| if (shouldStop()) return;
|}
""".stripMargin
}
}
/**
* Helper class that is used to implement [[SortMergeJoinExec]].
*
* To perform an inner (outer) join, users of this class call [[findNextInnerJoinRows()]]
* ([[findNextOuterJoinRows()]]), which returns `true` if a result has been produced and `false`
* otherwise. If a result has been produced, then the caller may call [[getStreamedRow]] to return
* the matching row from the streamed input and may call [[getBufferedMatches]] to return the
* sequence of matching rows from the buffered input (in the case of an outer join, this will return
* an empty sequence if there are no matches from the buffered input). For efficiency, both of these
* methods return mutable objects which are re-used across calls to the `findNext*JoinRows()`
* methods.
*
* @param streamedKeyGenerator a projection that produces join keys from the streamed input.
* @param bufferedKeyGenerator a projection that produces join keys from the buffered input.
* @param keyOrdering an ordering which can be used to compare join keys.
* @param streamedIter an input whose rows will be streamed.
* @param bufferedIter an input whose rows will be buffered to construct sequences of rows that
* have the same join key.
* @param inMemoryThreshold Threshold for number of rows guaranteed to be held in memory by
* internal buffer
* @param spillThreshold Threshold for number of rows to be spilled by internal buffer
*/
private[joins] class SortMergeJoinScanner(
streamedKeyGenerator: Projection,
bufferedKeyGenerator: Projection,
keyOrdering: Ordering[InternalRow],
streamedIter: RowIterator,
bufferedIter: RowIterator,
inMemoryThreshold: Int,
spillThreshold: Int) {
private[this] var streamedRow: InternalRow = _
private[this] var streamedRowKey: InternalRow = _
private[this] var bufferedRow: InternalRow = _
// Note: this is guaranteed to never have any null columns:
private[this] var bufferedRowKey: InternalRow = _
/**
* The join key for the rows buffered in `bufferedMatches`, or null if `bufferedMatches` is empty
*/
private[this] var matchJoinKey: InternalRow = _
/** Buffered rows from the buffered side of the join. This is empty if there are no matches. */
private[this] val bufferedMatches =
new ExternalAppendOnlyUnsafeRowArray(inMemoryThreshold, spillThreshold)
// Initialization (note: do _not_ want to advance streamed here).
advancedBufferedToRowWithNullFreeJoinKey()
// --- Public methods ---------------------------------------------------------------------------
def getStreamedRow: InternalRow = streamedRow
def getBufferedMatches: ExternalAppendOnlyUnsafeRowArray = bufferedMatches
/**
* Advances both input iterators, stopping when we have found rows with matching join keys.
* @return true if matching rows have been found and false otherwise. If this returns true, then
* [[getStreamedRow]] and [[getBufferedMatches]] can be called to construct the join
* results.
*/
final def findNextInnerJoinRows(): Boolean = {
while (advancedStreamed() && streamedRowKey.anyNull) {
// Advance the streamed side of the join until we find the next row whose join key contains
// no nulls or we hit the end of the streamed iterator.
}
if (streamedRow == null) {
// We have consumed the entire streamed iterator, so there can be no more matches.
matchJoinKey = null
bufferedMatches.clear()
false
} else if (matchJoinKey != null && keyOrdering.compare(streamedRowKey, matchJoinKey) == 0) {
// The new streamed row has the same join key as the previous row, so return the same matches.
true
} else if (bufferedRow == null) {
// The streamed row's join key does not match the current batch of buffered rows and there are
// no more rows to read from the buffered iterator, so there can be no more matches.
matchJoinKey = null
bufferedMatches.clear()
false
} else {
// Advance both the streamed and buffered iterators to find the next pair of matching rows.
var comp = keyOrdering.compare(streamedRowKey, bufferedRowKey)
do {
if (streamedRowKey.anyNull) {
advancedStreamed()
} else {
assert(!bufferedRowKey.anyNull)
comp = keyOrdering.compare(streamedRowKey, bufferedRowKey)
if (comp > 0) advancedBufferedToRowWithNullFreeJoinKey()
else if (comp < 0) advancedStreamed()
}
} while (streamedRow != null && bufferedRow != null && comp != 0)
if (streamedRow == null || bufferedRow == null) {
// We have either hit the end of one of the iterators, so there can be no more matches.
matchJoinKey = null
bufferedMatches.clear()
false
} else {
// The streamed row's join key matches the current buffered row's join, so walk through the
// buffered iterator to buffer the rest of the matching rows.
assert(comp == 0)
bufferMatchingRows()
true
}
}
}
/**
* Advances the streamed input iterator and buffers all rows from the buffered input that
* have matching keys.
* @return true if the streamed iterator returned a row, false otherwise. If this returns true,
* then [[getStreamedRow]] and [[getBufferedMatches]] can be called to produce the outer
* join results.
*/
final def findNextOuterJoinRows(): Boolean = {
if (!advancedStreamed()) {
// We have consumed the entire streamed iterator, so there can be no more matches.
matchJoinKey = null
bufferedMatches.clear()
false
} else {
if (matchJoinKey != null && keyOrdering.compare(streamedRowKey, matchJoinKey) == 0) {
// Matches the current group, so do nothing.
} else {
// The streamed row does not match the current group.
matchJoinKey = null
bufferedMatches.clear()
if (bufferedRow != null && !streamedRowKey.anyNull) {
// The buffered iterator could still contain matching rows, so we'll need to walk through
// it until we either find matches or pass where they would be found.
var comp = 1
do {
comp = keyOrdering.compare(streamedRowKey, bufferedRowKey)
} while (comp > 0 && advancedBufferedToRowWithNullFreeJoinKey())
if (comp == 0) {
// We have found matches, so buffer them (this updates matchJoinKey)
bufferMatchingRows()
} else {
// We have overshot the position where the row would be found, hence no matches.
}
}
}
// If there is a streamed input then we always return true
true
}
}
// --- Private methods --------------------------------------------------------------------------
/**
* Advance the streamed iterator and compute the new row's join key.
* @return true if the streamed iterator returned a row and false otherwise.
*/
private def advancedStreamed(): Boolean = {
if (streamedIter.advanceNext()) {
streamedRow = streamedIter.getRow
streamedRowKey = streamedKeyGenerator(streamedRow)
true
} else {
streamedRow = null
streamedRowKey = null
false
}
}
/**
* Advance the buffered iterator until we find a row with join key that does not contain nulls.
* @return true if the buffered iterator returned a row and false otherwise.
*/
private def advancedBufferedToRowWithNullFreeJoinKey(): Boolean = {
var foundRow: Boolean = false
while (!foundRow && bufferedIter.advanceNext()) {
bufferedRow = bufferedIter.getRow
bufferedRowKey = bufferedKeyGenerator(bufferedRow)
foundRow = !bufferedRowKey.anyNull
}
if (!foundRow) {
bufferedRow = null
bufferedRowKey = null
false
} else {
true
}
}
/**
* Called when the streamed and buffered join keys match in order to buffer the matching rows.
*/
private def bufferMatchingRows(): Unit = {
assert(streamedRowKey != null)
assert(!streamedRowKey.anyNull)
assert(bufferedRowKey != null)
assert(!bufferedRowKey.anyNull)
assert(keyOrdering.compare(streamedRowKey, bufferedRowKey) == 0)
// This join key may have been produced by a mutable projection, so we need to make a copy:
matchJoinKey = streamedRowKey.copy()
bufferedMatches.clear()
do {
bufferedMatches.add(bufferedRow.asInstanceOf[UnsafeRow])
advancedBufferedToRowWithNullFreeJoinKey()
} while (bufferedRow != null && keyOrdering.compare(streamedRowKey, bufferedRowKey) == 0)
}
}
/**
* An iterator for outputting rows in left outer join.
*/
private class LeftOuterIterator(
smjScanner: SortMergeJoinScanner,
rightNullRow: InternalRow,
boundCondition: InternalRow => Boolean,
resultProj: InternalRow => InternalRow,
numOutputRows: SQLMetric)
extends OneSideOuterIterator(
smjScanner, rightNullRow, boundCondition, resultProj, numOutputRows) {
protected override def setStreamSideOutput(row: InternalRow): Unit = joinedRow.withLeft(row)
protected override def setBufferedSideOutput(row: InternalRow): Unit = joinedRow.withRight(row)
}
/**
* An iterator for outputting rows in right outer join.
*/
private class RightOuterIterator(
smjScanner: SortMergeJoinScanner,
leftNullRow: InternalRow,
boundCondition: InternalRow => Boolean,
resultProj: InternalRow => InternalRow,
numOutputRows: SQLMetric)
extends OneSideOuterIterator(smjScanner, leftNullRow, boundCondition, resultProj, numOutputRows) {
protected override def setStreamSideOutput(row: InternalRow): Unit = joinedRow.withRight(row)
protected override def setBufferedSideOutput(row: InternalRow): Unit = joinedRow.withLeft(row)
}
/**
* An abstract iterator for sharing code between [[LeftOuterIterator]] and [[RightOuterIterator]].
*
* Each [[OneSideOuterIterator]] has a streamed side and a buffered side. Each row on the
* streamed side will output 0 or many rows, one for each matching row on the buffered side.
* If there are no matches, then the buffered side of the joined output will be a null row.
*
* In left outer join, the left is the streamed side and the right is the buffered side.
* In right outer join, the right is the streamed side and the left is the buffered side.
*
* @param smjScanner a scanner that streams rows and buffers any matching rows
* @param bufferedSideNullRow the default row to return when a streamed row has no matches
* @param boundCondition an additional filter condition for buffered rows
* @param resultProj how the output should be projected
* @param numOutputRows an accumulator metric for the number of rows output
*/
private abstract class OneSideOuterIterator(
smjScanner: SortMergeJoinScanner,
bufferedSideNullRow: InternalRow,
boundCondition: InternalRow => Boolean,
resultProj: InternalRow => InternalRow,
numOutputRows: SQLMetric) extends RowIterator {
// A row to store the joined result, reused many times
protected[this] val joinedRow: JoinedRow = new JoinedRow()
// Index of the buffered rows, reset to 0 whenever we advance to a new streamed row
private[this] var rightMatchesIterator: Iterator[UnsafeRow] = null
// This iterator is initialized lazily so there should be no matches initially
assert(smjScanner.getBufferedMatches.length == 0)
// Set output methods to be overridden by subclasses
protected def setStreamSideOutput(row: InternalRow): Unit
protected def setBufferedSideOutput(row: InternalRow): Unit
/**
* Advance to the next row on the stream side and populate the buffer with matches.
* @return whether there are more rows in the stream to consume.
*/
private def advanceStream(): Boolean = {
rightMatchesIterator = null
if (smjScanner.findNextOuterJoinRows()) {
setStreamSideOutput(smjScanner.getStreamedRow)
if (smjScanner.getBufferedMatches.isEmpty) {
// There are no matching rows in the buffer, so return the null row
setBufferedSideOutput(bufferedSideNullRow)
} else {
// Find the next row in the buffer that satisfied the bound condition
if (!advanceBufferUntilBoundConditionSatisfied()) {
setBufferedSideOutput(bufferedSideNullRow)
}
}
true
} else {
// Stream has been exhausted
false
}
}
/**
* Advance to the next row in the buffer that satisfies the bound condition.
* @return whether there is such a row in the current buffer.
*/
private def advanceBufferUntilBoundConditionSatisfied(): Boolean = {
var foundMatch: Boolean = false
if (rightMatchesIterator == null) {
rightMatchesIterator = smjScanner.getBufferedMatches.generateIterator()
}
while (!foundMatch && rightMatchesIterator.hasNext) {
setBufferedSideOutput(rightMatchesIterator.next())
foundMatch = boundCondition(joinedRow)
}
foundMatch
}
override def advanceNext(): Boolean = {
val r = advanceBufferUntilBoundConditionSatisfied() || advanceStream()
if (r) numOutputRows += 1
r
}
override def getRow: InternalRow = resultProj(joinedRow)
}
private class SortMergeFullOuterJoinScanner(
leftKeyGenerator: Projection,
rightKeyGenerator: Projection,
keyOrdering: Ordering[InternalRow],
leftIter: RowIterator,
rightIter: RowIterator,
boundCondition: InternalRow => Boolean,
leftNullRow: InternalRow,
rightNullRow: InternalRow) {
private[this] val joinedRow: JoinedRow = new JoinedRow()
private[this] var leftRow: InternalRow = _
private[this] var leftRowKey: InternalRow = _
private[this] var rightRow: InternalRow = _
private[this] var rightRowKey: InternalRow = _
private[this] var leftIndex: Int = 0
private[this] var rightIndex: Int = 0
private[this] val leftMatches: ArrayBuffer[InternalRow] = new ArrayBuffer[InternalRow]
private[this] val rightMatches: ArrayBuffer[InternalRow] = new ArrayBuffer[InternalRow]
private[this] var leftMatched: BitSet = new BitSet(1)
private[this] var rightMatched: BitSet = new BitSet(1)
advancedLeft()
advancedRight()
// --- Private methods --------------------------------------------------------------------------
/**
* Advance the left iterator and compute the new row's join key.
* @return true if the left iterator returned a row and false otherwise.
*/
private def advancedLeft(): Boolean = {
if (leftIter.advanceNext()) {
leftRow = leftIter.getRow
leftRowKey = leftKeyGenerator(leftRow)
true
} else {
leftRow = null
leftRowKey = null
false
}
}
/**
* Advance the right iterator and compute the new row's join key.
* @return true if the right iterator returned a row and false otherwise.
*/
private def advancedRight(): Boolean = {
if (rightIter.advanceNext()) {
rightRow = rightIter.getRow
rightRowKey = rightKeyGenerator(rightRow)
true
} else {
rightRow = null
rightRowKey = null
false
}
}
/**
* Populate the left and right buffers with rows matching the provided key.
* This consumes rows from both iterators until their keys are different from the matching key.
*/
private def findMatchingRows(matchingKey: InternalRow): Unit = {
leftMatches.clear()
rightMatches.clear()
leftIndex = 0
rightIndex = 0
while (leftRowKey != null && keyOrdering.compare(leftRowKey, matchingKey) == 0) {
leftMatches += leftRow.copy()
advancedLeft()
}
while (rightRowKey != null && keyOrdering.compare(rightRowKey, matchingKey) == 0) {
rightMatches += rightRow.copy()
advancedRight()
}
if (leftMatches.size <= leftMatched.capacity) {
leftMatched.clearUntil(leftMatches.size)
} else {
leftMatched = new BitSet(leftMatches.size)
}
if (rightMatches.size <= rightMatched.capacity) {
rightMatched.clearUntil(rightMatches.size)
} else {
rightMatched = new BitSet(rightMatches.size)
}
}
/**
* Scan the left and right buffers for the next valid match.
*
* Note: this method mutates `joinedRow` to point to the latest matching rows in the buffers.
* If a left row has no valid matches on the right, or a right row has no valid matches on the
* left, then the row is joined with the null row and the result is considered a valid match.
*
* @return true if a valid match is found, false otherwise.
*/
private def scanNextInBuffered(): Boolean = {
while (leftIndex < leftMatches.size) {
while (rightIndex < rightMatches.size) {
joinedRow(leftMatches(leftIndex), rightMatches(rightIndex))
if (boundCondition(joinedRow)) {
leftMatched.set(leftIndex)
rightMatched.set(rightIndex)
rightIndex += 1
return true
}
rightIndex += 1
}
rightIndex = 0
if (!leftMatched.get(leftIndex)) {
// the left row has never matched any right row, join it with null row
joinedRow(leftMatches(leftIndex), rightNullRow)
leftIndex += 1
return true
}
leftIndex += 1
}
while (rightIndex < rightMatches.size) {
if (!rightMatched.get(rightIndex)) {
// the right row has never matched any left row, join it with null row
joinedRow(leftNullRow, rightMatches(rightIndex))
rightIndex += 1
return true
}
rightIndex += 1
}
// There are no more valid matches in the left and right buffers
false
}
// --- Public methods --------------------------------------------------------------------------
def getJoinedRow(): JoinedRow = joinedRow
def advanceNext(): Boolean = {
// If we already buffered some matching rows, use them directly
if (leftIndex <= leftMatches.size || rightIndex <= rightMatches.size) {
if (scanNextInBuffered()) {
return true
}
}
if (leftRow != null && (leftRowKey.anyNull || rightRow == null)) {
joinedRow(leftRow.copy(), rightNullRow)
advancedLeft()
true
} else if (rightRow != null && (rightRowKey.anyNull || leftRow == null)) {
joinedRow(leftNullRow, rightRow.copy())
advancedRight()
true
} else if (leftRow != null && rightRow != null) {
// Both rows are present and neither have null values,
// so we populate the buffers with rows matching the next key
val comp = keyOrdering.compare(leftRowKey, rightRowKey)
if (comp <= 0) {
findMatchingRows(leftRowKey.copy())
} else {
findMatchingRows(rightRowKey.copy())
}
scanNextInBuffered()
true
} else {
// Both iterators have been consumed
false
}
}
}
private class FullOuterIterator(
smjScanner: SortMergeFullOuterJoinScanner,
resultProj: InternalRow => InternalRow,
numRows: SQLMetric) extends RowIterator {
private[this] val joinedRow: JoinedRow = smjScanner.getJoinedRow()
override def advanceNext(): Boolean = {
val r = smjScanner.advanceNext()
if (r) numRows += 1
r
}
override def getRow: InternalRow = resultProj(joinedRow)
}
|
saltstar/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/joins/SortMergeJoinExec.scala
|
Scala
|
apache-2.0
| 44,439 |
package com.twitter.server
import com.twitter.util.logging.Logger
import java.net.URL
import java.util.Properties
import scala.collection.JavaConverters._
import scala.util.control.NonFatal
/**
* A simply utility for loading information from a build.properties file. The ClassLoader for the
* given object is used to load the build.properties file, which is first searched for relative to
* the given object's class's package (class-package-name/build.properties), and if not found there,
* then it is searched for with an absolute path ("/build.properties").
*/
private[twitter] object BuildProperties {
private[this] val log = Logger(BuildProperties.getClass)
private[this] val basicServerInfo: Map[String, String] =
Map(
"name" -> "unknown",
"version" -> "0.0",
"build" -> "unknown",
"build_revision" -> "unknown",
"build_branch_name" -> "unknown",
"merge_base" -> "unknown",
"merge_base_commit_date" -> "unknown",
"scm_repository" -> "unknown"
)
private[this] val properties: Map[String, String] = {
val buildProperties = new Properties
try {
buildProperties.load(BuildProperties.getClass.getResource("build.properties").openStream)
} catch {
case NonFatal(_) =>
try {
BuildProperties.getClass.getResource("/build.properties") match {
case resource: URL =>
buildProperties.load(resource.openStream)
case _ => // do nothing
}
} catch {
case NonFatal(e) =>
log.warn("Unable to load build.properties file from classpath. " + e.getMessage)
}
}
basicServerInfo ++ buildProperties.asScala
}
/**
* Returns the [[String]] value associated with this key or a `NoSuchElementException` if there
* is no mapping from the given key to a value.
*
* @param key the key
* @return the value associated with the given key, or a `NoSuchElementException`.
*/
def get(key: String): String = all(key)
/**
* Returns the value associated with a key, or a default value if the key is not contained in the map.
*
* @param key the key
* @param defaultValue a default value in case no binding for `key` is found in the map.
* @return the value associated with `key` if it exists, otherwise the `defaultValue`.
*/
def get(key: String, defaultValue: String): String = all.getOrElse(key, defaultValue)
/**
* Return all build properties.
*/
def all: Map[String, String] = properties
}
|
twitter/twitter-server
|
server/src/main/scala/com/twitter/server/BuildProperties.scala
|
Scala
|
apache-2.0
| 2,524 |
package mesosphere.marathon
package core.storage.store
import java.io.File
import java.time.{Clock, OffsetDateTime}
import akka.Done
import akka.http.scaladsl.marshalling.Marshaller
import akka.http.scaladsl.unmarshalling.Unmarshaller
import akka.stream.scaladsl.{FileIO, Keep, Sink}
import mesosphere.AkkaUnitTest
import mesosphere.marathon.core.storage.backup.impl.TarBackupFlow
import mesosphere.marathon.core.storage.repository.RepositoryConstants
import mesosphere.marathon.core.storage.store.impl.BasePersistenceStore
import mesosphere.marathon.storage.migration.{Migration, StorageVersions}
import mesosphere.marathon.test.SettableClock
import scala.concurrent.Future
import scala.concurrent.duration._
case class TestClass1(str: String, int: Int, version: OffsetDateTime)
object TestClass1 {
def apply(str: String, int: Int)(implicit clock: Clock): TestClass1 = {
TestClass1(str, int, OffsetDateTime.now(clock))
}
}
private[storage] trait PersistenceStoreTest { this: AkkaUnitTest =>
def basicPersistenceStore[K, C, Serialized](name: String, newStore: => PersistenceStore[K, C, Serialized])(implicit
ir: IdResolver[String, TestClass1, C, K],
m: Marshaller[TestClass1, Serialized],
um: Unmarshaller[Serialized, TestClass1]
): Unit = {
name should {
"is open" in {
val store = newStore
store.isOpen shouldBe true
}
"cannot be opened twice" in {
val store = newStore
val thrown = the[IllegalStateException] thrownBy store.markOpen()
thrown.getMessage shouldBe "it was opened before"
}
"cannot be reopened" in {
val store = newStore
store.markClosed()
val thrown = the[IllegalStateException] thrownBy store.markOpen()
thrown.getMessage shouldBe "it was opened before"
}
"cannot be closed twice" in {
val store = newStore
store.markClosed()
val thrown = the[IllegalStateException] thrownBy store.markClosed()
thrown.getMessage shouldBe "attempt to close while not being opened"
}
"have no ids" in {
val store = newStore
store.ids().runWith(Sink.seq).futureValue should equal(Nil)
}
"have no keys" in {
val store = newStore
store match {
case s: BasePersistenceStore[_, _, _] =>
s.allKeys().runWith(Sink.seq).futureValue should equal(Nil)
case _ =>
}
}
"not fail if the key doesn't exist" in {
val store = newStore
store.get("task-1").futureValue should be('empty)
}
"create and list an object" in {
implicit val clock = new SettableClock()
val store = newStore
val tc = TestClass1("abc", 1)
store.store("task-1", tc).futureValue should be(Done)
store.get("task-1").futureValue.value should equal(tc)
store.ids().runWith(Sink.seq).futureValue should contain theSameElementsAs Seq("task-1")
store.versions("task-1").runWith(Sink.seq).futureValue should contain theSameElementsAs Seq(tc.version)
}
"update an object" in {
implicit val clock = new SettableClock()
val store = newStore
val original = TestClass1("abc", 1)
clock.advanceBy(1.minute)
val updated = TestClass1("def", 2)
store.store("task-1", original).futureValue should be(Done)
store.store("task-1", updated).futureValue should be(Done)
store.get("task-1").futureValue.value should equal(updated)
store.get("task-1", original.version).futureValue.value should equal(original)
store.versions("task-1").runWith(Sink.seq).futureValue should contain theSameElementsAs
Seq(original.version, updated.version)
}
"delete idempontently" in {
implicit val clock = new SettableClock()
val store = newStore
store.deleteAll("task-1").futureValue should be(Done)
store.store("task-2", TestClass1("def", 2)).futureValue should be(Done)
store.deleteAll("task-2").futureValue should be(Done)
store.deleteAll("task-2").futureValue should be(Done)
}
"store the multiple versions of the old values" in {
val clock = new SettableClock()
val versions = 0.until(10).map { i =>
clock.advanceBy(1.minute)
TestClass1("abc", i, OffsetDateTime.now(clock))
}
val store = newStore
versions.foreach { v =>
store.store("task", v).futureValue should be(Done)
}
clock.advanceBy(1.hour)
val newestVersion = TestClass1("def", 3, OffsetDateTime.now(clock))
store.store("task", newestVersion).futureValue should be(Done)
// it should have dropped one element.
val storedVersions = store.versions("task").runWith(Sink.seq).futureValue
// the current version is listed too.
storedVersions should contain theSameElementsAs newestVersion.version +: versions.map(_.version)
versions.foreach { v =>
store.get("task", v.version).futureValue.value should equal(v)
}
}
"allow storage of a value at a specific version even if the value doesn't exist in an unversioned slot" in {
val store = newStore
implicit val clock = new SettableClock()
val tc = TestClass1("abc", 1)
store.store("test", tc, tc.version).futureValue should be(Done)
store.ids().runWith(Sink.seq).futureValue should contain theSameElementsAs Seq("test")
store.get("test").futureValue should be('empty)
store.get("test", tc.version).futureValue.value should be(tc)
store.versions("test").runWith(Sink.seq).futureValue should contain theSameElementsAs Seq(tc.version)
store.deleteVersion("test", tc.version).futureValue should be(Done)
store.versions("test").runWith(Sink.seq).futureValue should be('empty)
}
"allow storage of a value at a specific version without replacing the existing one" in {
val store = newStore
implicit val clock = new SettableClock()
val tc = TestClass1("abc", 1)
val old = TestClass1("def", 2, OffsetDateTime.now(clock).minusHours(1))
store.store("test", tc).futureValue should be(Done)
store.store("test", old, old.version).futureValue should be(Done)
store.versions("test").runWith(Sink.seq).futureValue should contain theSameElementsAs
Seq(tc.version, old.version)
store.get("test").futureValue.value should equal(tc)
store.get("test", old.version).futureValue.value should equal(old)
store.deleteAll("test").futureValue should be(Done)
store.get("test").futureValue should be('empty)
}
}
}
def backupRestoreStore[K, C, Serialized](name: String, newStore: => PersistenceStore[K, C, Serialized])(implicit
ir: IdResolver[String, TestClass1, C, K],
m: Marshaller[TestClass1, Serialized],
um: Unmarshaller[Serialized, TestClass1]
): Unit = {
name should {
"be able to backup and restore the state" in {
Given("a persistent store with some data in some version")
val store = newStore
store.setStorageVersion(StorageVersions(1, 1, 1)).futureValue
implicit val clock = new SettableClock()
val numEntries = 3
val content = 0.until(numEntries).map(num => TestClass1(s"name-$num", num))
Future.sequence(content.map(item => store.store(item.str, item))).futureValue
val file = File.createTempFile("marathon-zipfile", ".zip")
file.deleteOnExit()
val tarSink = TarBackupFlow.tar.toMat(FileIO.toPath(file.toPath))(Keep.right)
When("a backup is created")
store.backup().runWith(tarSink).futureValue
Then("the content of the store can be removed completely")
store.ids().map(store.deleteAll(_)).mapAsync(RepositoryConstants.maxConcurrency)(identity).runWith(Sink.ignore).futureValue
store.setStorageVersion(StorageVersions(0, 0, 0)).futureValue
When("the state is read from the backup")
val tarSource = FileIO.fromPath(file.toPath).via(TarBackupFlow.untar)
tarSource.runWith(store.restore()).futureValue
Then("the state is restored completely")
val children = store.backup().runWith(Sink.seq).futureValue
children.size should be >= numEntries
children.exists(_.key == Migration.StorageVersionName) should be(true)
content.foreach { item =>
store.get(item.str).futureValue should be(Some(item))
}
file.delete()
And("the storage version is also restored correctly")
store.storageVersion().futureValue should be(Some(StorageVersions(1, 1, 1)))
}
}
}
}
|
mesosphere/marathon
|
src/test/scala/mesosphere/marathon/core/storage/store/PersistenceStoreTest.scala
|
Scala
|
apache-2.0
| 8,749 |
package com.webtrends.harness.utils
import org.specs2.mutable.SpecificationWithJUnit
import scala.concurrent.{Await, Future}
import scala.util.{Failure, Success}
import org.specs2.concurrent.ExecutionEnv
import scala.concurrent.duration.Duration
class FutureExtensionsSpec(implicit ee: ExecutionEnv) extends SpecificationWithJUnit {
case class FutureException(message: String) extends Exception(message)
import com.webtrends.harness.utils.FutureExtensions._
val duration = Duration.fromNanos(10000000000L)
"flatMapAll" should {
"Successfully flatMap Success case" in {
val f = Future.successful("").flatMapAll {
case Success(_) => Future.successful("success")
case Failure(_) => throw new Exception()
}
Await.result(f, duration) must be equalTo "success"
}
"Successfully flatMap Failure case" in {
val f = Future.failed[String](new Exception()).flatMapAll {
case Success(_) => Future.successful("")
case Failure(_) => Future.successful("success")
}
Await.result(f, duration) must be equalTo "success"
}
"return failed future if exception is thrown in Success case" in {
val f = Future.successful("").flatMapAll {
case Success(_) => throw FutureException("Failed")
case Failure(_) => Future.successful("success")
}
Await.result(f, duration) must throwAn[FutureException]
}
"return failed future if exception is thrown in Failure case" in {
val f = Future.failed[String](new Exception()).flatMapAll {
case Success(_) => Future.successful("success")
case Failure(_) => throw FutureException("Failed")
}
Await.result(f, duration) must throwAn[FutureException]
}
}
"mapAll" should {
"Successfully map Success case" in {
val f = Future.successful("").mapAll {
case Success(_) => "success"
case Failure(_) => throw new Exception()
}
Await.result(f, duration) must be equalTo "success"
}
"Successfully map Failure case" in {
val f = Future.failed[String](new Exception()).mapAll {
case Success(_) => throw new Exception()
case Failure(_) => "success"
}
Await.result(f, duration) must be equalTo "success"
}
"Return failed future if exception is thrown in Success case" in {
val f = Future.successful("").mapAll {
case Success(_) => throw FutureException("Failed")
case Failure(_) => "success"
}
Await.result(f, duration) must throwAn[FutureException]
}
"Return failed future if exception is thrown in Failure case" in {
val f = Future.failed[String](new Exception()).mapAll {
case Success(_) => "success"
case Failure(_) => throw FutureException("Failed")
}
Await.result(f, duration) must throwAn[FutureException]
}
}
}
|
Webtrends/wookiee
|
wookiee-core/src/test/scala/com/webtrends/harness/utils/FutureExtensionsSpec.scala
|
Scala
|
apache-2.0
| 2,876 |
package models
case class NewsstandShardConfig(shards:Long)
|
guardian/mobile-n10n
|
common/src/main/scala/models/NewsstandShardConfig.scala
|
Scala
|
apache-2.0
| 61 |
/* sbt -- Simple Build Tool
* Copyright 2008, 2009, 2010 Mark Harrah, Viktor Klang, Ross McDonald
*/
package sbt
import Using._
import ErrorHandling.translate
import java.io.{ BufferedReader, ByteArrayOutputStream, BufferedWriter, File, FileInputStream, InputStream, OutputStream, PrintWriter }
import java.io.{ ObjectInputStream, ObjectStreamClass }
import java.net.{ URI, URISyntaxException, URL }
import java.nio.charset.Charset
import java.util.Properties
import java.util.jar.{ Attributes, JarEntry, JarFile, JarInputStream, JarOutputStream, Manifest }
import java.util.zip.{ CRC32, GZIPOutputStream, ZipEntry, ZipFile, ZipInputStream, ZipOutputStream }
import scala.collection.immutable.TreeSet
import scala.collection.mutable.{ HashMap, HashSet }
import scala.reflect.{ Manifest => SManifest }
import Function.tupled
/** A collection of File, URL, and I/O utility methods.*/
object IO {
/** The maximum number of times a unique temporary filename is attempted to be created.*/
private val MaximumTries = 10
/** The producer of randomness for unique name generation.*/
private lazy val random = new java.util.Random
val temporaryDirectory = new File(System.getProperty("java.io.tmpdir"))
/** The size of the byte or char buffer used in various methods.*/
private val BufferSize = 8192
/** File scheme name */
private[sbt] val FileScheme = "file"
/** The newline string for this system, as obtained by the line.separator system property. */
val Newline = System.getProperty("line.separator")
val utf8 = Charset.forName("UTF-8")
/**
* Returns a URL for the directory or jar containing the the class file `cl`.
* If the location cannot be determined, an error is generated.
* Note that Java standard library classes typically do not have a location associated with them.
*/
def classLocation(cl: Class[_]): URL =
{
val codeSource = cl.getProtectionDomain.getCodeSource
if (codeSource == null) sys.error("No class location for " + cl)
else codeSource.getLocation
}
/**
* Returns the directory or jar file containing the the class file `cl`.
* If the location cannot be determined or it is not a file, an error is generated.
* Note that Java standard library classes typically do not have a location associated with them.
*/
def classLocationFile(cl: Class[_]): File = toFile(classLocation(cl))
/**
* Returns a URL for the directory or jar containing the class file for type `T` (as determined by an implicit Manifest).
* If the location cannot be determined, an error is generated.
* Note that Java standard library classes typically do not have a location associated with them.
*/
def classLocation[T](implicit mf: SManifest[T]): URL = classLocation(mf.runtimeClass)
/**
* Returns the directory or jar file containing the the class file for type `T` (as determined by an implicit Manifest).
* If the location cannot be determined, an error is generated.
* Note that Java standard library classes typically do not have a location associated with them.
*/
def classLocationFile[T](implicit mf: SManifest[T]): File = classLocationFile(mf.runtimeClass)
/**
* Constructs a File corresponding to `url`, which must have a scheme of `file`.
* This method properly works around an issue with a simple conversion to URI and then to a File.
*/
def toFile(url: URL): File =
try { new File(url.toURI) }
catch { case _: URISyntaxException => new File(url.getPath) }
/** Converts the given URL to a File. If the URL is for an entry in a jar, the File for the jar is returned. */
def asFile(url: URL): File = urlAsFile(url) getOrElse sys.error("URL is not a file: " + url)
def urlAsFile(url: URL): Option[File] =
url.getProtocol match {
case FileScheme => Some(toFile(url))
case "jar" =>
val path = url.getPath
val end = path.indexOf('!')
Some(uriToFile(if (end == -1) path else path.substring(0, end)))
case _ => None
}
private[this] def uriToFile(uriString: String): File =
{
val uri = new URI(uriString)
assert(uri.getScheme == FileScheme, "Expected protocol to be '" + FileScheme + "' in URI " + uri)
if (uri.getAuthority eq null)
new File(uri)
else {
/* https://github.com/sbt/sbt/issues/564
* http://blogs.msdn.com/b/ie/archive/2006/12/06/file-uris-in-windows.aspx
* http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=5086147
* The specific problem here is that `uri` will have a defined authority component for UNC names like //foo/bar/some/path.jar
* but the File constructor requires URIs with an undefined authority component.
*/
new File(uri.getSchemeSpecificPart)
}
}
def assertDirectory(file: File) { assert(file.isDirectory, (if (file.exists) "Not a directory: " else "Directory not found: ") + file) }
def assertDirectories(file: File*) { file.foreach(assertDirectory) }
// "base.extension" -> (base, extension)
/**
* Splits the given string into base and extension strings.
* If `name` contains no period, the base string is the input string and the extension is the empty string.
* Otherwise, the base is the substring up until the last period (exclusive) and
* the extension is the substring after the last period.
*
* For example, `split("Build.scala") == ("Build", "scala")`
*/
def split(name: String): (String, String) =
{
val lastDot = name.lastIndexOf('.')
if (lastDot >= 0)
(name.substring(0, lastDot), name.substring(lastDot + 1))
else
(name, "")
}
/**
* Each input file in `files` is created if it doesn't exist.
* If a file already exists, the last modified time is set to the current time.
* It is not guaranteed that all files will have the same last modified time after this call.
*/
def touch(files: Traversable[File]): Unit = files.foreach(f => touch(f))
/**
* Creates a file at the given location if it doesn't exist.
* If the file already exists and `setModified` is true, this method sets the last modified time to the current time.
*/
def touch(file: File, setModified: Boolean = true) {
val absFile = file.getAbsoluteFile
createDirectory(absFile.getParentFile)
val created = translate("Could not create file " + absFile) { absFile.createNewFile() }
if (created || absFile.isDirectory)
()
else if (setModified && !absFile.setLastModified(System.currentTimeMillis))
sys.error("Could not update last modified time for file " + absFile)
}
/** Creates directories `dirs` and all parent directories. It tries to work around a race condition in `File.mkdirs()` by retrying up to a limit.*/
def createDirectories(dirs: Traversable[File]): Unit =
dirs.foreach(createDirectory)
/** Creates directory `dir` and all parent directories. It tries to work around a race condition in `File.mkdirs()` by retrying up to a limit.*/
def createDirectory(dir: File): Unit =
{
def failBase = "Could not create directory " + dir
// Need a retry because mkdirs() has a race condition
var tryCount = 0
while (!dir.exists && !dir.mkdirs() && tryCount < 100) { tryCount += 1 }
if (dir.isDirectory)
()
else if (dir.exists) {
sys.error(failBase + ": file exists and is not a directory.")
} else
sys.error(failBase)
}
/** Gzips the file 'in' and writes it to 'out'. 'in' cannot be the same file as 'out'. */
def gzip(in: File, out: File) {
require(in != out, "Input file cannot be the same as the output file.")
Using.fileInputStream(in) { inputStream =>
Using.fileOutputStream()(out) { outputStream =>
gzip(inputStream, outputStream)
}
}
}
/** Gzips the InputStream 'in' and writes it to 'output'. Neither stream is closed.*/
def gzip(input: InputStream, output: OutputStream): Unit =
gzipOutputStream(output) { gzStream => transfer(input, gzStream) }
/** Gunzips the file 'in' and writes it to 'out'. 'in' cannot be the same file as 'out'. */
def gunzip(in: File, out: File) {
require(in != out, "Input file cannot be the same as the output file.")
Using.fileInputStream(in) { inputStream =>
Using.fileOutputStream()(out) { outputStream =>
gunzip(inputStream, outputStream)
}
}
}
/** Gunzips the InputStream 'input' and writes it to 'output'. Neither stream is closed.*/
def gunzip(input: InputStream, output: OutputStream): Unit =
gzipInputStream(input) { gzStream => transfer(gzStream, output) }
def unzip(from: File, toDirectory: File, filter: NameFilter = AllPassFilter, preserveLastModified: Boolean = true): Set[File] =
fileInputStream(from)(in => unzipStream(in, toDirectory, filter, preserveLastModified))
def unzipURL(from: URL, toDirectory: File, filter: NameFilter = AllPassFilter, preserveLastModified: Boolean = true): Set[File] =
urlInputStream(from)(in => unzipStream(in, toDirectory, filter, preserveLastModified))
def unzipStream(from: InputStream, toDirectory: File, filter: NameFilter = AllPassFilter, preserveLastModified: Boolean = true): Set[File] =
{
createDirectory(toDirectory)
zipInputStream(from) { zipInput => extract(zipInput, toDirectory, filter, preserveLastModified) }
}
private def extract(from: ZipInputStream, toDirectory: File, filter: NameFilter, preserveLastModified: Boolean) =
{
val set = new HashSet[File]
def next() {
val entry = from.getNextEntry
if (entry == null)
()
else {
val name = entry.getName
if (filter.accept(name)) {
val target = new File(toDirectory, name)
//log.debug("Extracting zip entry '" + name + "' to '" + target + "'")
if (entry.isDirectory)
createDirectory(target)
else {
set += target
translate("Error extracting zip entry '" + name + "' to '" + target + "': ") {
fileOutputStream(false)(target) { out => transfer(from, out) }
}
}
if (preserveLastModified)
target.setLastModified(entry.getTime)
} else {
//log.debug("Ignoring zip entry '" + name + "'")
}
from.closeEntry()
next()
}
}
next()
Set() ++ set
}
/** Retrieves the content of the given URL and writes it to the given File. */
def download(url: URL, to: File) =
Using.urlInputStream(url) { inputStream =>
transfer(inputStream, to)
}
/** Copies the contents of `in` to `out`.*/
def transfer(in: File, out: File): Unit =
fileInputStream(in) { in => transfer(in, out) }
/**
* Copies the contents of the input file `in` to the `out` stream.
* The output stream is not closed by this method.
*/
def transfer(in: File, out: OutputStream): Unit =
fileInputStream(in) { in => transfer(in, out) }
/** Copies all bytes from the given input stream to the given File. The input stream is not closed by this method.*/
def transfer(in: InputStream, to: File): Unit =
Using.fileOutputStream()(to) { outputStream =>
transfer(in, outputStream)
}
/**
* Copies all bytes from the given input stream to the given output stream.
* Neither stream is closed.
*/
def transfer(in: InputStream, out: OutputStream): Unit = transferImpl(in, out, false)
/**
* Copies all bytes from the given input stream to the given output stream. The
* input stream is closed after the method completes.
*/
def transferAndClose(in: InputStream, out: OutputStream): Unit = transferImpl(in, out, true)
private def transferImpl(in: InputStream, out: OutputStream, close: Boolean) {
try {
val buffer = new Array[Byte](BufferSize)
def read() {
val byteCount = in.read(buffer)
if (byteCount >= 0) {
out.write(buffer, 0, byteCount)
read()
}
}
read()
} finally { if (close) in.close }
}
/**
* Creates a temporary directory and provides its location to the given function. The directory
* is deleted after the function returns.
*/
def withTemporaryDirectory[T](action: File => T): T =
{
val dir = createTemporaryDirectory
try { action(dir) }
finally { delete(dir) }
}
/** Creates a directory in the default temporary directory with a name generated from a random integer. */
def createTemporaryDirectory: File = createUniqueDirectory(temporaryDirectory)
/** Creates a directory in `baseDirectory` with a name generated from a random integer */
def createUniqueDirectory(baseDirectory: File): File =
{
def create(tries: Int): File =
{
if (tries > MaximumTries)
sys.error("Could not create temporary directory.")
else {
val randomName = "sbt_" + java.lang.Integer.toHexString(random.nextInt)
val f = new File(baseDirectory, randomName)
try { createDirectory(f); f }
catch { case e: Exception => create(tries + 1) }
}
}
create(0)
}
/**
* Creates a file in the default temporary directory, calls `action` with the file, deletes the file, and returns the result of calling `action`.
* The name of the file will begin with `prefix`, which must be at least three characters long, and end with `postfix`, which has no minimum length.
*/
def withTemporaryFile[T](prefix: String, postfix: String)(action: File => T): T =
{
val file = File.createTempFile(prefix, postfix)
try { action(file) }
finally { file.delete() }
}
private[sbt] def jars(dir: File): Iterable[File] = listFiles(dir, GlobFilter("*.jar"))
/** Deletes all empty directories in the set. Any non-empty directories are ignored. */
def deleteIfEmpty(dirs: collection.Set[File]): Unit =
{
val isEmpty = new HashMap[File, Boolean]
def visit(f: File): Boolean = isEmpty.getOrElseUpdate(f, dirs(f) && f.isDirectory && (f.listFiles forall visit))
dirs foreach visit
for ((f, true) <- isEmpty) f.delete
}
/** Deletes each file or directory (recursively) in `files`.*/
def delete(files: Iterable[File]): Unit = files.foreach(delete)
/** Deletes each file or directory in `files` recursively. Any empty parent directories are deleted, recursively.*/
def deleteFilesEmptyDirs(files: Iterable[File]): Unit =
{
def isEmptyDirectory(dir: File) = dir.isDirectory && listFiles(dir).isEmpty
def parents(fs: Set[File]) = fs flatMap { f => Option(f.getParentFile) }
def deleteEmpty(dirs: Set[File]) {
val empty = dirs filter isEmptyDirectory
if (empty.nonEmpty) // looks funny, but this is true if at least one of `dirs` is an empty directory
{
empty foreach { _.delete() }
deleteEmpty(parents(empty))
}
}
delete(files)
deleteEmpty(parents(files.toSet))
}
/** Deletes `file`, recursively if it is a directory. */
def delete(file: File) {
translate("Error deleting file " + file + ": ") {
val deleted = file.delete()
if (!deleted && file.isDirectory) {
delete(listFiles(file))
file.delete
}
}
}
/** Returns the children of directory `dir` that match `filter` in a non-null array.*/
def listFiles(filter: java.io.FileFilter)(dir: File): Array[File] = wrapNull(dir.listFiles(filter))
/** Returns the children of directory `dir` that match `filter` in a non-null array.*/
def listFiles(dir: File, filter: java.io.FileFilter): Array[File] = wrapNull(dir.listFiles(filter))
/** Returns the children of directory `dir` in a non-null array.*/
def listFiles(dir: File): Array[File] = wrapNull(dir.listFiles())
private[sbt] def wrapNull(a: Array[File]) =
if (a == null)
new Array[File](0)
else
a
/**
* Creates a jar file.
* @param sources The files to include in the jar file paired with the entry name in the jar. Only the pairs explicitly listed are included.
* @param outputJar The file to write the jar to.
* @param manifest The manifest for the jar.
*/
def jar(sources: Traversable[(File, String)], outputJar: File, manifest: Manifest): Unit =
archive(sources.toSeq, outputJar, Some(manifest))
/**
* Creates a zip file.
* @param sources The files to include in the zip file paired with the entry name in the zip. Only the pairs explicitly listed are included.
* @param outputZip The file to write the zip to.
*/
def zip(sources: Traversable[(File, String)], outputZip: File): Unit =
archive(sources.toSeq, outputZip, None)
private def archive(sources: Seq[(File, String)], outputFile: File, manifest: Option[Manifest]) {
if (outputFile.isDirectory)
sys.error("Specified output file " + outputFile + " is a directory.")
else {
val outputDir = outputFile.getParentFile
createDirectory(outputDir)
withZipOutput(outputFile, manifest) { output =>
val createEntry: (String => ZipEntry) = if (manifest.isDefined) new JarEntry(_) else new ZipEntry(_)
writeZip(sources, output)(createEntry)
}
}
}
private def writeZip(sources: Seq[(File, String)], output: ZipOutputStream)(createEntry: String => ZipEntry) {
val files = sources.flatMap { case (file, name) => if (file.isFile) (file, normalizeName(name)) :: Nil else Nil }
val now = System.currentTimeMillis
// The CRC32 for an empty value, needed to store directories in zip files
val emptyCRC = new CRC32().getValue()
def addDirectoryEntry(name: String) {
output putNextEntry makeDirectoryEntry(name)
output.closeEntry()
}
def makeDirectoryEntry(name: String) =
{
// log.debug("\tAdding directory " + relativePath + " ...")
val e = createEntry(name)
e setTime now
e setSize 0
e setMethod ZipEntry.STORED
e setCrc emptyCRC
e
}
def makeFileEntry(file: File, name: String) =
{
// log.debug("\tAdding " + file + " as " + name + " ...")
val e = createEntry(name)
e setTime file.lastModified
e
}
def addFileEntry(file: File, name: String) {
output putNextEntry makeFileEntry(file, name)
transfer(file, output)
output.closeEntry()
}
//Calculate directories and add them to the generated Zip
allDirectoryPaths(files) foreach addDirectoryEntry
//Add all files to the generated Zip
files foreach { case (file, name) => addFileEntry(file, name) }
}
// map a path a/b/c to List("a", "b")
private def relativeComponents(path: String): List[String] =
path.split("/").toList.dropRight(1)
// map components List("a", "b", "c") to List("a/b/c/", "a/b/", "a/", "")
private def directories(path: List[String]): List[String] =
path.foldLeft(List(""))((e, l) => (e.head + l + "/") :: e)
// map a path a/b/c to List("a/b/", "a/")
private def directoryPaths(path: String): List[String] =
directories(relativeComponents(path)).filter(_.length > 1)
// produce a sorted list of all the subdirectories of all provided files
private def allDirectoryPaths(files: Iterable[(File, String)]) =
TreeSet[String]() ++ (files flatMap { case (file, name) => directoryPaths(name) })
private def normalizeDirName(name: String) =
{
val norm1 = normalizeName(name)
if (norm1.endsWith("/")) norm1 else (norm1 + "/")
}
private def normalizeName(name: String) =
{
val sep = File.separatorChar
if (sep == '/') name else name.replace(sep, '/')
}
private def withZipOutput(file: File, manifest: Option[Manifest])(f: ZipOutputStream => Unit) {
fileOutputStream(false)(file) { fileOut =>
val (zipOut, ext) =
manifest match {
case Some(mf) =>
{
import Attributes.Name.MANIFEST_VERSION
val main = mf.getMainAttributes
if (!main.containsKey(MANIFEST_VERSION))
main.put(MANIFEST_VERSION, "1.0")
(new JarOutputStream(fileOut, mf), "jar")
}
case None => (new ZipOutputStream(fileOut), "zip")
}
try { f(zipOut) }
finally { zipOut.close }
}
}
/**
* Returns the relative file for `file` relative to directory `base` or None if `base` is not a parent of `file`.
* If `file` or `base` are not absolute, they are first resolved against the current working directory.
*/
def relativizeFile(base: File, file: File): Option[File] = relativize(base, file).map { path => new File(path) }
/**
* Returns the path for `file` relative to directory `base` or None if `base` is not a parent of `file`.
* If `file` or `base` are not absolute, they are first resolved against the current working directory.
*/
def relativize(base: File, file: File): Option[String] =
{
val pathString = file.getAbsolutePath
baseFileString(base) flatMap
{
baseString =>
{
if (pathString.startsWith(baseString))
Some(pathString.substring(baseString.length))
else
None
}
}
}
private def baseFileString(baseFile: File): Option[String] =
{
if (baseFile.isDirectory) {
val cp = baseFile.getAbsolutePath
assert(cp.length > 0)
val normalized = if (cp.charAt(cp.length - 1) == File.separatorChar) cp else cp + File.separatorChar
Some(normalized)
} else
None
}
/**
* For each pair in `sources`, copies the contents of the first File (the source) to the location of the second File (the target).
*
* A source file is always copied if `overwrite` is true.
* If `overwrite` is false, the source is only copied if the target is missing or is older than the source file according to last modified times.
* If the source is a directory, the corresponding directory is created.
*
* If `preserveLastModified` is `true`, the last modified times are transferred as well.
* Any parent directories that do not exist are created.
* The set of all target files is returned, whether or not they were updated by this method.
*/
def copy(sources: Traversable[(File, File)], overwrite: Boolean = false, preserveLastModified: Boolean = false): Set[File] =
sources.map(tupled(copyImpl(overwrite, preserveLastModified))).toSet
private def copyImpl(overwrite: Boolean, preserveLastModified: Boolean)(from: File, to: File): File =
{
if (overwrite || !to.exists || from.lastModified > to.lastModified) {
if (from.isDirectory)
createDirectory(to)
else {
createDirectory(to.getParentFile)
copyFile(from, to, preserveLastModified)
}
}
to
}
/**
* Copies the contents of each file in the `source` directory to the corresponding file in the `target` directory.
* A source file is always copied if `overwrite` is true.
* If `overwrite` is false, the source is only copied if the target is missing or is older than the source file according to last modified times.
* Files in `target` without a corresponding file in `source` are left unmodified in any case.
* If `preserveLastModified` is `true`, the last modified times are transferred as well.
* Any parent directories that do not exist are created.
*/
def copyDirectory(source: File, target: File, overwrite: Boolean = false, preserveLastModified: Boolean = false): Unit =
copy((PathFinder(source) ***) x Path.rebase(source, target), overwrite, preserveLastModified)
/**
* Copies the contents of `sourceFile` to the location of `targetFile`, overwriting any existing content.
* If `preserveLastModified` is `true`, the last modified time is transferred as well.
*/
def copyFile(sourceFile: File, targetFile: File, preserveLastModified: Boolean = false) {
// NOTE: when modifying this code, test with larger values of CopySpec.MaxFileSizeBits than default
require(sourceFile.exists, "Source file '" + sourceFile.getAbsolutePath + "' does not exist.")
require(!sourceFile.isDirectory, "Source file '" + sourceFile.getAbsolutePath + "' is a directory.")
fileInputChannel(sourceFile) { in =>
fileOutputChannel(targetFile) { out =>
// maximum bytes per transfer according to from http://dzone.com/snippets/java-filecopy-using-nio
val max = (64 * 1024 * 1024) - (32 * 1024)
val total = in.size
def loop(offset: Long): Long =
if (offset < total)
loop(offset + out.transferFrom(in, offset, max))
else
offset
val copied = loop(0)
if (copied != in.size)
sys.error("Could not copy '" + sourceFile + "' to '" + targetFile + "' (" + copied + "/" + in.size + " bytes copied)")
}
}
if (preserveLastModified)
copyLastModified(sourceFile, targetFile)
}
/** Transfers the last modified time of `sourceFile` to `targetFile`. */
def copyLastModified(sourceFile: File, targetFile: File) = {
val last = sourceFile.lastModified
// lastModified can return a negative number, but setLastModified doesn't accept it
// see Java bug #6791812
targetFile.setLastModified(math.max(last, 0L))
}
/** The default Charset used when not specified: UTF-8. */
def defaultCharset = utf8
/**
* Writes `content` to `file` using `charset` or UTF-8 if `charset` is not explicitly specified.
* If `append` is `false`, the existing contents of `file` are overwritten.
* If `append` is `true`, the new `content` is appended to the existing contents.
* If `file` or any parent directories do not exist, they are created.
*/
def write(file: File, content: String, charset: Charset = defaultCharset, append: Boolean = false): Unit =
writer(file, content, charset, append) { _.write(content) }
def writer[T](file: File, content: String, charset: Charset, append: Boolean = false)(f: BufferedWriter => T): T =
if (charset.newEncoder.canEncode(content))
fileWriter(charset, append)(file) { f }
else
sys.error("String cannot be encoded by charset " + charset.name)
def reader[T](file: File, charset: Charset = defaultCharset)(f: BufferedReader => T): T =
fileReader(charset)(file) { f }
/** Reads the full contents of `file` into a String using `charset` or UTF-8 if `charset` is not explicitly specified. */
def read(file: File, charset: Charset = defaultCharset): String =
{
val out = new ByteArrayOutputStream(file.length.toInt)
transfer(file, out)
out.toString(charset.name)
}
/** Reads the full contents of `in` into a byte array. This method does not close `in`.*/
def readStream(in: InputStream, charset: Charset = defaultCharset): String =
{
val out = new ByteArrayOutputStream
transfer(in, out)
out.toString(charset.name)
}
/** Reads the full contents of `in` into a byte array. */
def readBytes(file: File): Array[Byte] = fileInputStream(file)(readBytes)
/** Reads the full contents of `in` into a byte array. This method does not close `in`. */
def readBytes(in: InputStream): Array[Byte] =
{
val out = new ByteArrayOutputStream
transfer(in, out)
out.toByteArray
}
/**
* Appends `content` to the existing contents of `file` using `charset` or UTF-8 if `charset` is not explicitly specified.
* If `file` does not exist, it is created, as are any parent directories.
*/
def append(file: File, content: String, charset: Charset = defaultCharset): Unit =
write(file, content, charset, true)
/**
* Appends `bytes` to the existing contents of `file`.
* If `file` does not exist, it is created, as are any parent directories.
*/
def append(file: File, bytes: Array[Byte]): Unit =
writeBytes(file, bytes, true)
/**
* Writes `bytes` to `file`, overwriting any existing content.
* If any parent directories do not exist, they are first created.
*/
def write(file: File, bytes: Array[Byte]): Unit =
writeBytes(file, bytes, false)
private def writeBytes(file: File, bytes: Array[Byte], append: Boolean): Unit =
fileOutputStream(append)(file) { _.write(bytes) }
/** Reads all of the lines from `url` using the provided `charset` or UTF-8 if `charset` is not explicitly specified. */
def readLinesURL(url: URL, charset: Charset = defaultCharset): List[String] =
urlReader(charset)(url)(readLines)
/** Reads all of the lines in `file` using the provided `charset` or UTF-8 if `charset` is not explicitly specified. */
def readLines(file: File, charset: Charset = defaultCharset): List[String] =
fileReader(charset)(file)(readLines)
/** Reads all of the lines from `in`. This method does not close `in`.*/
def readLines(in: BufferedReader): List[String] =
foldLines[List[String]](in, Nil)((accum, line) => line :: accum).reverse
/** Applies `f` to each line read from `in`. This method does not close `in`.*/
def foreachLine(in: BufferedReader)(f: String => Unit): Unit =
foldLines(in, ())((_, line) => f(line))
/**
* Applies `f` to each line read from `in` and the accumulated value of type `T`, with initial value `init`.
* This method does not close `in`.
*/
def foldLines[T](in: BufferedReader, init: T)(f: (T, String) => T): T =
{
def readLine(accum: T): T =
{
val line = in.readLine()
if (line eq null) accum else readLine(f(accum, line))
}
readLine(init)
}
/**
* Writes `lines` to `file` using the given `charset` or UTF-8 if `charset` is not explicitly specified.
* If `append` is `false`, the contents of the file are overwritten.
* If `append` is `true`, the lines are appended to the file.
* A newline is written after each line and NOT before the first line.
* If any parent directories of `file` do not exist, they are first created.
*/
def writeLines(file: File, lines: Seq[String], charset: Charset = defaultCharset, append: Boolean = false): Unit =
writer(file, lines.headOption.getOrElse(""), charset, append) { w =>
lines.foreach { line => w.write(line); w.newLine() }
}
/** Writes `lines` to `writer` using `writer`'s `println` method. */
def writeLines(writer: PrintWriter, lines: Seq[String]): Unit =
lines foreach writer.println
/**
* Writes `properties` to the File `to`, using `label` as the comment on the first line.
* If any parent directories of `to` do not exist, they are first created.
*/
def write(properties: Properties, label: String, to: File) =
fileOutputStream()(to) { output => properties.store(output, label) }
/** Reads the properties in `from` into `properties`. If `from` does not exist, `properties` is left unchanged.*/
def load(properties: Properties, from: File): Unit =
if (from.exists)
fileInputStream(from) { input => properties.load(input) }
/** A pattern used to split a String by path separator characters.*/
private val PathSeparatorPattern = java.util.regex.Pattern.compile(File.pathSeparator)
/** Splits a String around the platform's path separator characters. */
def pathSplit(s: String) = PathSeparatorPattern.split(s)
/**
* Move the provided files to a temporary location.
* If 'f' returns normally, delete the files.
* If 'f' throws an Exception, return the files to their original location.
*/
def stash[T](files: Set[File])(f: => T): T =
withTemporaryDirectory { dir =>
val stashed = stashLocations(dir, files.toArray)
move(stashed)
try { f } catch {
case e: Exception =>
try { move(stashed.map(_.swap)); throw e }
catch { case _: Exception => throw e }
}
}
private def stashLocations(dir: File, files: Array[File]) =
for ((file, index) <- files.zipWithIndex) yield (file, new File(dir, index.toHexString))
// TODO: the reference to the other move overload does not resolve, probably due to a scaladoc bug
/**
* For each pair in `files`, moves the contents of the first File to the location of the second.
* See [[move(File,File)]] for the behavior of the individual move operations.
*/
def move(files: Traversable[(File, File)]): Unit =
files.foreach(Function.tupled(move))
/**
* Moves the contents of `a` to the location specified by `b`.
* This method deletes any content already at `b` and creates any parent directories of `b` if they do not exist.
* It will first try `File.renameTo` and if that fails, resort to copying and then deleting the original file.
* In either case, the original File will not exist on successful completion of this method.
*/
def move(a: File, b: File): Unit =
{
if (b.exists)
delete(b)
createDirectory(b.getParentFile)
if (!a.renameTo(b)) {
copyFile(a, b, true)
delete(a)
}
}
/**
* Applies `f` to a buffered gzip `OutputStream` for `file`.
* The streams involved are opened before calling `f` and closed after it returns.
* The result is the result of `f`.
*/
def gzipFileOut[T](file: File)(f: OutputStream => T): T =
Using.fileOutputStream()(file) { fout =>
Using.gzipOutputStream(fout) { outg =>
Using.bufferedOutputStream(outg)(f)
}
}
/**
* Applies `f` to a buffered gzip `InputStream` for `file`.
* The streams involved are opened before calling `f` and closed after it returns.
* The result is the result of `f`.
*/
def gzipFileIn[T](file: File)(f: InputStream => T): T =
Using.fileInputStream(file) { fin =>
Using.gzipInputStream(fin) { ing =>
Using.bufferedInputStream(ing)(f)
}
}
/**
* Converts an absolute File to a URI. The File is converted to a URI (toURI),
* normalized (normalize), encoded (toASCIIString), and a forward slash ('/') is appended to the path component if
* it does not already end with a slash.
*/
def directoryURI(dir: File): URI =
{
assertAbsolute(dir)
directoryURI(dir.toURI.normalize)
}
/**
* Converts an absolute File to a URI. The File is converted to a URI (toURI),
* normalized (normalize), encoded (toASCIIString), and a forward slash ('/') is appended to the path component if
* it does not already end with a slash.
*/
def directoryURI(uri: URI): URI =
{
if (!uri.isAbsolute) return uri; //assertAbsolute(uri)
val str = uri.toASCIIString
val dirStr = if (str.endsWith("/") || uri.getScheme != FileScheme) str else str + "/"
(new URI(dirStr)).normalize
}
/** Converts the given File to a URI. If the File is relative, the URI is relative, unlike File.toURI*/
def toURI(f: File): URI =
// need to use the three argument URI constructor because the single argument version doesn't encode
if (f.isAbsolute) f.toURI else new URI(null, normalizeName(f.getPath), null)
/**
* Resolves `f` against `base`, which must be an absolute directory.
* The result is guaranteed to be absolute.
* If `f` is absolute, it is returned without changes.
*/
def resolve(base: File, f: File): File =
{
assertAbsolute(base)
val fabs = if (f.isAbsolute) f else new File(directoryURI(new File(base, f.getPath)))
assertAbsolute(fabs)
fabs
}
def assertAbsolute(f: File) = assert(f.isAbsolute, "Not absolute: " + f)
def assertAbsolute(uri: URI) = assert(uri.isAbsolute, "Not absolute: " + uri)
/** Parses a classpath String into File entries according to the current platform's path separator.*/
def parseClasspath(s: String): Seq[File] = IO.pathSplit(s).map(new File(_)).toSeq
/**
* Constructs an `ObjectInputStream` on `wrapped` that uses `loader` to load classes.
* See also [[https://github.com/sbt/sbt/issues/136 issue 136]].
*/
def objectInputStream(wrapped: InputStream, loader: ClassLoader): ObjectInputStream = new ObjectInputStream(wrapped) {
override def resolveClass(osc: ObjectStreamClass): Class[_] =
{
val c = Class.forName(osc.getName, false, loader)
if (c eq null) super.resolveClass(osc) else c
}
}
}
|
xeno-by/old-scalameta-sbt
|
util/io/src/main/scala/sbt/IO.scala
|
Scala
|
bsd-3-clause
| 36,335 |
package com.twitter.scrooge.java_generator
import com.twitter.scrooge.frontend._
import java.io._
import com.github.mustachejava.DefaultMustacheFactory
import com.twitter.mustache.ScalaObjectHandler
import com.google.common.base.Charsets
import com.google.common.io.CharStreams
import com.twitter.scrooge.ast._
import com.twitter.scrooge.java_generator.test.ApacheCompatibilityHelpers
import com.twitter.scrooge.frontend.{ResolvedDocument, TypeResolver}
import com.twitter.scrooge.testutil.Spec
import org.mockito.Mockito._
/**
* To generate the apache output for birdcage compatible thrift:
* ~/birdcage/maven-plugins/maven-finagle-thrift-plugin/src/main/resources/thrift/thrift-finagle.osx10.6
* --gen java -o /tmp/thrift test_thrift/empty_struct.thrift
*/
class ApacheJavaGeneratorSpec extends Spec {
def generateDoc(str: String) = {
val importer = Importer(Seq("src/test/resources/test_thrift", "scrooge-generator/src/test/resources/test_thrift"))
val parser = new ThriftParser(importer, true)
val doc = parser.parse(str, parser.document)
TypeResolver(allowStructRHS = true)(doc).document
}
def getGenerator(doc0: Document, genHashcode: Boolean = false) = {
new ApacheJavaGenerator(Map(), "thrift", genHashcode = genHashcode)
}
def getFileContents(resource: String) = {
val ccl = Thread.currentThread().getContextClassLoader
val is = ccl.getResourceAsStream(resource)
val br = new BufferedReader(new InputStreamReader(is, Charsets.UTF_8))
CharStreams.toString(br)
}
"Generator"should {
System.setProperty("mustache.debug", "true")
"populate enum controller" in {
val doc = generateDoc(getFileContents("test_thrift/enum.thrift"))
val controller = new EnumController(doc.enums(0), getGenerator(doc), doc.namespace("java"))
controller.name must be("test")
controller.constants(0).last must be(false)
controller.constants(1).last must be(true)
controller.constants(0).name must be("foo")
controller.namespace must be("com.twitter.thrift")
}
"generate enum code" in {
val controller = mock[EnumController]
when(controller.name) thenReturn "test"
when(controller.constants) thenReturn Seq(new EnumConstant("foo", 1, false), new EnumConstant("bar", 2, true))
when(controller.namespace) thenReturn "com.twitter.thrift"
when(controller.has_namespace) thenReturn true
val sw = renderMustache("enum.mustache", controller)
verify(sw, getFileContents("apache_output/enum.txt"))
}
"populate consts" in {
val doc = generateDoc(getFileContents("test_thrift/consts.thrift"))
val controller = new ConstController(doc.consts, getGenerator(doc), doc.namespace("java"))
val sw = renderMustache("consts.mustache", controller)
verify(sw, getFileContents("apache_output/consts.txt"))
}
"populate const map" in {
val doc = generateDoc(getFileContents("test_thrift/constant_map.thrift"))
val generator = getGenerator(doc, genHashcode = true)
val controller = new ConstController(doc.consts, generator, doc.namespace("java"))
val sw = renderMustache("consts.mustache", controller)
verify(sw, getFileContents("apache_output/constant_map.txt"))
}
"generate struct" in {
val doc = generateDoc(getFileContents("test_thrift/struct.thrift"))
val controller = new StructController(doc.structs(1), false, getGenerator(doc), doc.namespace("java"))
val sw = renderMustache("struct.mustache", controller)
verify(sw, getFileContents("apache_output/struct.txt"))
}
"generate struct with hashcode" in {
val doc = generateDoc(getFileContents("test_thrift/struct.thrift"))
val generator = getGenerator(doc, genHashcode = true)
val controller = new StructController(doc.structs(1), false, generator, doc.namespace("java"))
val sw = renderMustache("struct.mustache", controller)
verify(sw, getFileContents("apache_output/struct_with_hashcode.txt"))
}
"generate empty struct" in {
val doc = generateDoc(getFileContents("test_thrift/empty_struct.thrift"))
val controller = new StructController(doc.structs(0), false, getGenerator(doc), doc.namespace("java"))
val sw = renderMustache("struct.mustache", controller)
verify(sw, getFileContents("apache_output/empty_struct.txt"), false)
}
"generate exception" in {
val doc = generateDoc(getFileContents("test_thrift/service.thrift"))
val controller = new StructController(doc.structs(1), false, getGenerator(doc), doc.namespace("java"))
val sw = renderMustache("struct.mustache", controller)
verify(sw, getFileContents("apache_output/test_exception.txt"))
}
"generate union" in {
val doc = generateDoc(getFileContents("test_thrift/union.thrift"))
val controller = new StructController(doc.structs(0), false, getGenerator(doc), doc.namespace("java"))
val sw = renderMustache("struct.mustache", controller)
verify(sw, getFileContents("apache_output/union.txt"))
}
"generate union with hashcode" in {
val doc = generateDoc(getFileContents("test_thrift/union.thrift"))
val generator = getGenerator(doc, genHashcode = true)
val controller = new StructController(doc.structs(0), false, generator, doc.namespace("java"))
val sw = renderMustache("struct.mustache", controller)
verify(sw, getFileContents("apache_output/union_with_hashcode.txt"))
}
"generate service that extends parent" in {
val doc = generateDoc(getFileContents("test_thrift/service.thrift"))
val controller = new ServiceController(doc.services(1), getGenerator(doc), doc.namespace("java"))
val sw = renderMustache("service.mustache", controller)
verify(sw, getFileContents("apache_output/test_service.txt"))
}
"generate service that does not extend parent" in {
val doc = generateDoc(getFileContents("test_thrift/service_without_parent.thrift"))
val controller = new ServiceController(doc.services(0), getGenerator(doc), doc.namespace("java"))
val sw = renderMustache("service.mustache", controller)
verify(sw, getFileContents("apache_output/test_service_without_parent.txt"))
}
"generate service with a parent from a different namespace" in {
val baseDoc = mock[Document]
val parentDoc = mock[ResolvedDocument]
when(baseDoc.namespace("java")) thenReturn Some(QualifiedID(Seq("com", "twitter", "thrift")))
when(parentDoc.document) thenReturn baseDoc
val doc = generateDoc(getFileContents("test_thrift/service_with_parent_different_namespace.thrift"))
val generator = new ApacheJavaGenerator(Map("service" -> parentDoc), "thrift", false)
val controller = new ServiceController(doc.services(0), generator, doc.namespace("java"))
val sw = renderMustache("service.mustache", controller)
verify(sw, getFileContents("apache_output/other_service.txt"))
}
}
def verify(actual: String, expected: String, cleanEmptySemicolons: Boolean = true) {
val actualItems = ApacheCompatibilityHelpers.cleanWhitespace(actual, cleanEmptySemicolons)
val expectedItems = ApacheCompatibilityHelpers.cleanWhitespace(expected, cleanEmptySemicolons)
for (i <- 0 until actualItems.size) {
if (!actualItems(i).equals(expectedItems(i))) {
println("Actual: " + actualItems(i))
println("Expected: " + expectedItems(i))
// No idea why the one below doesn't make the test red
assert(actualItems(i) == expectedItems(i))
} else {
// println(expectedItems(i))
}
actualItems(i) must be(expectedItems(i))
}
}
def renderMustache(template: String, controller: Object) = {
val mf = new DefaultMustacheFactory("apachejavagen/")
mf.setObjectHandler(new ScalaObjectHandler)
val m = mf.compile(template)
val sw = new StringWriter()
m.execute(sw, controller).flush()
// Files.write(sw.toString, new File("/tmp/test"), Charsets.UTF_8)
sw.toString
}
}
|
elipoz/scrooge
|
scrooge-generator/src/test/scala/com/twitter/scrooge/java_generator/ApacheJavaGeneratorSpec.scala
|
Scala
|
apache-2.0
| 8,065 |
package akka.contrib.persistence.mongodb
import com.typesafe.config._
import akka.actor.ActorSystem
class MongoSettingsSpec extends BaseUnitTest {
def reference: Config = ConfigFactory.parseString(
"""
|akka.contrib.persistence.mongodb.mongo.driver = foo
""".stripMargin)
def withUri: Config = ConfigFactory.parseString(
"""
|akka.contrib.persistence.mongodb.mongo.mongouri = "mongodb://appuser:apppass@localhost:27017/sample_db_name"
|akka.contrib.persistence.mongodb.mongo.driver = foo
""".stripMargin)
def withMultiLegacy: Config = ConfigFactory.parseString(
"""
|akka.contrib.persistence.mongodb.mongo.urls = ["mongo1.example.com:27017","mongo2.example.com:27017"]
|akka.contrib.persistence.mongodb.mongo.driver = foo
""".stripMargin)
def withMultiLegacyAndCreds: Config = ConfigFactory.parseString(
"""
|akka.contrib.persistence.mongodb.mongo.urls = ["mongo1.example.com:27017","mongo2.example.com:27017","mongo3.example.com:27017"]
|akka.contrib.persistence.mongodb.mongo.username = my_user
|akka.contrib.persistence.mongodb.mongo.password = my_pass
|akka.contrib.persistence.mongodb.mongo.driver = foo
""".stripMargin)
def withCredentialsLegacy: Config = ConfigFactory.parseString(
"""
|akka.contrib.persistence.mongodb.mongo.urls = ["mongo1.example.com:27017"]
|akka.contrib.persistence.mongodb.mongo.username = user
|akka.contrib.persistence.mongodb.mongo.password = pass
|akka.contrib.persistence.mongodb.mongo.db = spec_db
|akka.contrib.persistence.mongodb.mongo.driver = foo
""".stripMargin)
def fixture[A](config: Config)(testCode: MongoSettings => A): A = {
testCode(MongoSettings(new ActorSystem.Settings(getClass.getClassLoader,config.withFallback(ConfigFactory.defaultReference()),"settings name")))
}
"A settings object" should "correctly load the defaults" in fixture(reference) { s =>
s.MongoUri shouldBe "mongodb://localhost:27017/akka-persistence"
}
it should "correctly load a uri" in fixture(withUri) { s =>
s.MongoUri shouldBe "mongodb://appuser:apppass@localhost:27017/sample_db_name"
}
it should "correctly load a replica set" in fixture(withMultiLegacy) { s =>
s.MongoUri shouldBe "mongodb://mongo1.example.com:27017,mongo2.example.com:27017/akka-persistence"
}
it should "correctly load a replica set with creds" in fixture(withMultiLegacyAndCreds) { s =>
s.MongoUri shouldBe "mongodb://my_user:[email protected]:27017,mongo2.example.com:27017,mongo3.example.com:27017/akka-persistence"
}
it should "correctly load legacy credentials" in fixture(withCredentialsLegacy) { s =>
s.MongoUri shouldBe "mongodb://user:[email protected]:27017/spec_db"
}
it should "allow for override" in fixture(withUri) { s =>
val overridden = ConfigFactory.parseString("""
|mongouri = "mongodb://localhost:27017/override"
""".stripMargin)
s.withOverride(overridden).MongoUri shouldBe "mongodb://localhost:27017/override"
s.withOverride(overridden).Implementation shouldBe "foo"
s.withOverride(overridden).JournalAutomaticUpgrade shouldBe false
}
}
|
scullxbones/akka-persistence-mongo
|
common/src/test/scala/akka/contrib/persistence/mongodb/MongoSettingsSpec.scala
|
Scala
|
apache-2.0
| 3,203 |
package wash
import scalaz._
import Scalaz._
import app.MConfig
import io.megam.common._
import io.megam.common.amqp._
import io.megam.common.amqp.request._
import io.megam.common.amqp.response._
import models.base.RequestResult
import controllers.Constants._
/**
* @author rajthilak
*
*/
case class AOneWasher(pq: models.Messageble) extends MessageContext {
def topic = (pq.topic(()).getOrElse(""))
val msg = pq.messages
def wash(): ValidationNel[Throwable, AMQPResponse] = {
execute(nsqClient.publish(msg))
}
}
case class PQd(f: Unit => Option[String], msg: String) extends models.Messageble {
override def topic(x: Unit): Option[String] = f(x)
override def messages = msg
}
object PQd {
def topic(x: Unit) = "testing".some
def empty: PQd = new PQd(topic, "")
}
|
indykish/vertice_gateway
|
app/wash/AOneWasher.scala
|
Scala
|
mit
| 797 |
/**
* Copyright (C) 2010-2011 LShift Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.lshift.diffa.agent.itest.support
import net.lshift.diffa.kernel.participants.{UpstreamMemoryParticipant, DownstreamMemoryParticipant, UpstreamParticipant, DownstreamParticipant}
import net.lshift.diffa.kernel.client._
import net.lshift.diffa.kernel.util.Placeholders
import org.joda.time.DateTime
import scala.collection.JavaConversions._
import net.lshift.diffa.kernel.differencing.AttributesUtil
import net.lshift.diffa.agent.itest.support.TestConstants._
import org.restlet.data.Protocol
import org.restlet.routing.Router
import org.restlet.resource.{ServerResource, Post}
import net.lshift.diffa.kernel.config._
import net.lshift.diffa.schema.servicelimits.ChangeEventRate
import org.restlet.{Application, Component}
import collection.mutable.HashMap
import net.lshift.diffa.agent.client._
import java.util.List
import net.lshift.diffa.adapter.scanning.{ScanAggregation, ScanConstraint}
import net.lshift.diffa.kernel.frontend._
import net.lshift.diffa.adapter.changes.ChangeEvent
import net.lshift.diffa.kernel.limiting.SystemClock
/**
* An assembled environment consisting of a downstream and upstream adapter. Provides a factory for the
* various parts, along with convenience methods for making the configuration valid.
*/
class TestEnvironment(val pairKey: String,
val participants: Participants,
changesClientBuilder: (TestEnvironment, String) => ChangesClient,
versionScheme: VersionScheme,
inboundURLBuilder:String => String = (_) => null) {
import TestEnvironment._
// Keep a tally of the amount of requested resends
val entityResendTally = new HashMap[String,Int]
private val repairActionsComponent = {
val component = new Component
component.getServers.add(Protocol.HTTP, 8123)
component.getDefaultHost.attach("/repair", new RepairActionsApplication(entityResendTally))
component
}
def startActionServer() {
repairActionsComponent.start()
}
def stopActionServer() {
repairActionsComponent.stop()
}
def withActionsServer(op: => Unit) {
try {
startActionServer()
op
}
finally {
stopActionServer()
}
}
// Domain
val domain = TestEnvironment.domain
val serverRoot = TestEnvironment.serverRoot
// Version Generation
val versionForUpstream = versionScheme.upstreamVersionGen
val versionForDownstream = versionScheme.downstreamVersionGen
// Participants
val upstreamEpName = pairKey + "-us"
val downstreamEpName = pairKey + "-ds"
val upstream = new UpstreamMemoryParticipant(versionScheme.upstreamVersionGen) {
var queryResponseDelay = 0
override protected def doQuery(constraints: List[ScanConstraint], aggregations: List[ScanAggregation]) = {
if (queryResponseDelay > 0)
Thread.sleep(queryResponseDelay)
super.doQuery(constraints, aggregations)
}
}
val downstream = new DownstreamMemoryParticipant(versionScheme.upstreamVersionGen, versionScheme.downstreamVersionGen)
// Clients
lazy val upstreamChangesClient:ChangesClient = changesClientBuilder(this, upstreamEpName)
lazy val downstreamChangesClient:ChangesClient = changesClientBuilder(this, downstreamEpName)
lazy val configurationClient:ConfigurationRestClient = TestEnvironment.configurationClient
lazy val diffClient:DifferencesRestClient = TestEnvironment.diffClient
lazy val actionsClient:ActionsRestClient = TestEnvironment.actionsClient
lazy val escalationsClient:EscalationsRestClient = TestEnvironment.escalationsClient
lazy val usersClient:SecurityRestClient = TestEnvironment.usersClient
lazy val scanningClient:ScanningRestClient = TestEnvironment.scanningClient
lazy val systemConfig = TestEnvironment.systemConfig
lazy val inventoryClient = TestEnvironment.inventoryClient
// Helpers
val differencesHelper = new DifferencesHelper(pairKey, diffClient)
// Actions
val entityScopedActionName = "Resend Source"
val entityScopedActionUrl = "http://localhost:8123/repair/resend/{id}"
val pairScopedActionName = "Resend All"
val pairScopedActionUrl = "http://localhost:8123/repair/resend-all"
// Escalations
val escalationName = "Repair By Resending"
// Categories
val categories = Map("someDate" -> new RangeCategoryDescriptor("datetime"), "someString" -> new SetCategoryDescriptor(Set("ss", "tt")))
val views = Seq(EndpointViewDef(name = "tt-only", categories = Map("someString" -> new SetCategoryDescriptor(Set("tt")))))
// Participants' RPC server setup
participants.startUpstreamServer(upstream, upstream)
participants.startDownstreamServer(downstream, downstream, downstream)
// Ensure that the configuration exists
systemConfig.declareDomain(domain)
configurationClient.declareEndpoint(EndpointDef(name = upstreamEpName,
scanUrl = participants.upstreamScanUrl, contentRetrievalUrl = participants.upstreamContentUrl,
inboundUrl = inboundURLBuilder(upstreamEpName),
categories = categories,
views = views))
configurationClient.declareEndpoint(EndpointDef(name = downstreamEpName,
scanUrl = participants.downstreamScanUrl, contentRetrievalUrl = participants.downstreamContentUrl,
versionGenerationUrl = participants.downstreamVersionUrl,
inboundUrl = inboundURLBuilder(downstreamEpName),
categories = categories,
views = views))
createPair
configurationClient.declareRepairAction(entityScopedActionName, entityScopedActionUrl, RepairAction.ENTITY_SCOPE, pairKey)
configurationClient.declareEscalation(escalationName, pairKey, entityScopedActionName, EscalationActionType.REPAIR, "downstreamMissing", 0)
def createPair = configurationClient.declarePair(PairDef(key = pairKey,
versionPolicyName = versionScheme.policyName,
matchingTimeout = matchingTimeout,
upstreamName = upstreamEpName, downstreamName = downstreamEpName,
scanCronSpec = "0 15 10 15 * ?",
views = Seq(PairViewDef("tt-only"))))
def deletePair() {
configurationClient.deletePair(pairKey)
}
def createPairScopedAction = configurationClient.declareRepairAction(pairScopedActionName, pairScopedActionUrl, RepairAction.PAIR_SCOPE, pairKey)
val username = "foo"
val mail = "[email protected]"
usersClient.declareUser(UserDef(name = username,email = mail,superuser=false,password="bar"))
// Add a user to the domain so that at least 1 mail will be sent
configurationClient.makeDomainMember(username, "User")
/**
* Requests that the environment remove all stored state from the participants.
*/
def clearParticipants() {
upstream.clearEntities
downstream.clearEntities
}
// TODO: remove this when limiting can be configured via REST
private def pauseToAvoidRateLimitingFailure() {
def timeSince(t: Long) = SystemClock.currentTimeMillis - t
val t0 = SystemClock.currentTimeMillis
val minimumPauseInterval = 1000L / ChangeEventRate.defaultLimit
while (timeSince(t0) < minimumPauseInterval) {
try {
Thread.sleep(minimumPauseInterval)
} catch {
case _ =>
}
}
}
def addAndNotifyUpstream(id:String, content:String, someDate:DateTime, someString:String) {
pauseToAvoidRateLimitingFailure()
val attributes = pack(someDate = someDate, someString = someString)
upstream.addEntity(id, someDate, someString, Placeholders.dummyLastUpdated, content)
upstreamChangesClient.onChangeEvent(ChangeEvent.forChange(id, versionForUpstream(content), Placeholders.dummyLastUpdated, attributes))
}
def addAndNotifyDownstream(id:String, content:String, someDate:DateTime, someString:String) {
pauseToAvoidRateLimitingFailure()
val attributes = pack(someDate = someDate, someString = someString)
downstream.addEntity(id, someDate, someString, Placeholders.dummyLastUpdated, content)
versionScheme match {
case SameVersionScheme =>
downstreamChangesClient.onChangeEvent(ChangeEvent.forChange(id, versionForDownstream(content), Placeholders.dummyLastUpdated, attributes))
case CorrelatedVersionScheme =>
downstreamChangesClient.onChangeEvent(ChangeEvent.forTriggeredChange(id,
versionForUpstream(content), versionForDownstream(content), Placeholders.dummyLastUpdated, attributes))
}
}
def pack(someDate:DateTime, someString:String) = Map("someDate" -> someDate.toString(), "someString" -> someString)
}
object TestEnvironment {
// Domain
val domain = DomainDef(name="domain")
def serverRoot = agentURL
val matchingTimeout = 1 // 1 second
// Clients
lazy val configurationClient:ConfigurationRestClient = new ConfigurationRestClient(serverRoot, domain.name)
lazy val diffClient:DifferencesRestClient = new DifferencesRestClient(serverRoot, domain.name)
lazy val actionsClient:ActionsRestClient = new ActionsRestClient(serverRoot, domain.name)
lazy val escalationsClient:EscalationsRestClient = new EscalationsRestClient(serverRoot, domain.name)
lazy val usersClient:SecurityRestClient = new SecurityRestClient(serverRoot)
lazy val scanningClient:ScanningRestClient = new ScanningRestClient(serverRoot, domain.name)
lazy val systemConfig = new SystemConfigRestClient(serverRoot)
lazy val inventoryClient = new InventoryRestClient(serverRoot, domain.name)
}
class ResendAllResource extends ServerResource {
@Post def resendAll = "resending all"
}
class ResendEntityResource extends ServerResource {
/**
* Update the resend tally for each entity
*/
@Post def resend = {
val entityId = getRequest.getResourceRef.getLastSegment
val tally = getContext.getAttributes.get("tally").asInstanceOf[HashMap[String,Int]]
tally.synchronized {
tally.get(entityId) match {
case Some(x) => tally(entityId) = x + 1
case None => tally(entityId) = 1
}
tally.notifyAll
}
"resending entity"
}
}
class RepairActionsApplication(tally:HashMap[String,Int]) extends Application {
override def createInboundRoot = {
// Pass the tally to the underlying resource
// NOTE: This is due to Restlets API - it feels like they should provide a resource that you can constructor inject
getContext.setAttributes(Map("tally"-> tally))
val router = new Router(getContext)
router.attach("/resend-all", classOf[ResendAllResource])
router.attach("/resend/abc", classOf[ResendEntityResource])
router
}
}
abstract class VersionScheme {
def policyName:String
def upstreamVersionGen:Function1[String, String]
def downstreamVersionGen:Function1[String, String]
}
object SameVersionScheme extends VersionScheme {
val policyName = "same"
val upstreamVersionGen = (content) => "vsn_" + content
val downstreamVersionGen = (content) => "vsn_" + content
}
object CorrelatedVersionScheme extends VersionScheme {
val policyName = "correlated"
val upstreamVersionGen = (content) => "uvsn_" + content
val downstreamVersionGen = (content) => "dvsn_" + content
}
|
lshift/diffa
|
agent/src/test/scala/net/lshift/diffa/agent/itest/support/TestEnvironment.scala
|
Scala
|
apache-2.0
| 11,553 |
package eu.brosbit.opos.snippet.admin
import java.util.Date
import scala.xml.{NodeSeq, Text, XML, Unparsed}
import _root_.net.liftweb.util._
import _root_.net.liftweb.common._
import _root_.eu.brosbit.opos.model.page._
import _root_.eu.brosbit.opos.model._
import _root_.net.liftweb.http.{S, SHtml, RequestVar}
import _root_.net.liftweb.mapper.{Ascending, OrderBy, By}
import _root_.net.liftweb.http.js._
import JsCmds._
import JE._
import Helpers._
import org.bson.types.ObjectId
import _root_.net.liftweb.json.JsonDSL._
class AdminAddSlidesSn {
def addSlide() = {
var id = ""
var descript = ""
var src = ""
var htmlContent = ""
def save() {
val mainPageSlide = MainPageSlide.find(id) match {
case Some(mainPageSlide) => mainPageSlide
case _ => MainPageSlide.create
}
mainPageSlide.desc = descript
mainPageSlide.img = src
mainPageSlide.html = htmlContent
mainPageSlide.save
}
def delete() {
MainPageSlide.find(id) match {
case Some(mainPageSlide) => mainPageSlide.delete
case _ => println("not found sliders")
}
}
"#id" #> SHtml.text(id, id = _, "style" -> "display:none;") &
"#link" #> SHtml.text(src, src = _) &
"#htmlContent" #> SHtml.textarea(htmlContent, htmlContent = _) &
"#description" #> SHtml.text(descript, descript = _) &
"#save" #> SHtml.submit("Zapisz!", save) &
"#delete" #> SHtml.submit("Usuń!", delete,
"onclick"-> "return prompt('Na pewno usunąć?');")
}
def slideList() = {
val slides = MainPageSlide.findAll
"tr" #> slides.map(slide => {
<tr id={slide._id.toString} ondblclick="setData(this);">
<td>
<img src={slide.img} style="width:300px;height:100px;"/>
</td>
<td>
{slide.desc}
</td> <td>
{Unparsed(slide.html)}
</td>
</tr>
})
}
}
|
mikolajs/osp
|
src/main/scala/eu/brosbit/opos/snippet/admin/AdminAddSlidesSn.scala
|
Scala
|
agpl-3.0
| 1,917 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.inject
import com.google.inject.AbstractModule
import com.typesafe.config.Config
import org.specs2.matcher.BeEqualTypedValueCheck
import org.specs2.mutable.Specification
import play.api.{ Configuration, Environment }
import play.{ Configuration => JavaConfiguration, Environment => JavaEnvironment }
class ModulesSpec extends Specification {
"Modules.locate" should {
"load simple Guice modules" in {
val env = Environment.simple()
val conf = Configuration("play.modules.enabled" -> Seq(
classOf[PlainGuiceModule].getName
))
val located: Seq[AnyRef] = Modules.locate(env, conf)
located.size must_== 1
val head = located.head.asInstanceOf[BeEqualTypedValueCheck[AnyRef]]
head.expected must beAnInstanceOf[PlainGuiceModule]
}
"load Guice modules that take a Scala Environment and Configuration" in {
val env = Environment.simple()
val conf = Configuration("play.modules.enabled" -> Seq(
classOf[ScalaGuiceModule].getName
))
val located: Seq[Any] = Modules.locate(env, conf)
located.size must_== 1
located.head must beLike {
case mod: ScalaGuiceModule =>
mod.environment must_== env
mod.configuration must_== conf
}
}
"load Guice modules that take a Java Environment and Configuration" in {
val env = Environment.simple()
val conf = Configuration("play.modules.enabled" -> Seq(
classOf[JavaGuiceConfigurationModule].getName
))
val located: Seq[Any] = Modules.locate(env, conf)
located.size must_== 1
located.head must beLike {
case mod: JavaGuiceConfigurationModule =>
mod.environment.underlying must_== env
mod.configuration.underlying must_== conf.underlying
}
}
"load Guice modules that take a Java Environment and Config" in {
val env = Environment.simple()
val conf = Configuration("play.modules.enabled" -> Seq(
classOf[JavaGuiceConfigModule].getName
))
val located: Seq[Any] = Modules.locate(env, conf)
located.size must_== 1
located.head must beLike {
case mod: JavaGuiceConfigModule =>
mod.environment.underlying must_== env
mod.config must_== conf.underlying
}
}
}
}
class PlainGuiceModule extends AbstractModule {
def configure(): Unit = ()
}
class ScalaGuiceModule(
val environment: Environment,
val configuration: Configuration) extends AbstractModule {
def configure(): Unit = ()
}
class JavaGuiceConfigModule(
val environment: JavaEnvironment,
val config: Config) extends AbstractModule {
def configure(): Unit = ()
}
class JavaGuiceConfigurationModule(
val environment: JavaEnvironment,
val configuration: JavaConfiguration) extends AbstractModule {
def configure(): Unit = ()
}
|
aradchykov/playframework
|
framework/src/play-guice/src/test/scala/play/api/inject/ModulesSpec.scala
|
Scala
|
apache-2.0
| 2,953 |
package com.trueaccord.scalapb
import org.scalatest._
import org.scalatest.prop.GeneratorDrivenPropertyChecks
class EncodingSpec extends PropSpec with GeneratorDrivenPropertyChecks with Matchers {
property("fromBase64 is the inverse of toBase64") {
forAll {
b: Array[Byte] =>
Encoding.fromBase64(Encoding.toBase64(b)) should be(b)
}
}
property("fromBase64 is compatible with javax.printBase64") {
forAll {
b: Array[Byte] =>
Encoding.fromBase64(javax.xml.bind.DatatypeConverter.printBase64Binary(b)) should be(b)
}
}
property("toBase64 is compatible with javax.parseBase64") {
forAll {
b: Array[Byte] =>
javax.xml.bind.DatatypeConverter.parseBase64Binary(
Encoding.toBase64(b)) should be(b)
}
}
}
|
eiennohito/ScalaPB
|
scalapb-runtime/shared/src/test/scala/com/trueaccord/scalapb/EncodingSpec.scala
|
Scala
|
apache-2.0
| 788 |
package com.github.mdr.mash.ns.os
import java.nio.charset.StandardCharsets
import java.nio.file.Path
import com.github.mdr.mash.completions.CompletionSpec
import com.github.mdr.mash.evaluator._
import com.github.mdr.mash.functions.{ BoundParams, MashFunction, Parameter, ParameterModel }
import com.github.mdr.mash.inference._
import com.github.mdr.mash.ns.core.UnitClass
import com.github.mdr.mash.runtime.{ MashBoolean, MashList, MashUnit, MashValue }
import org.apache.commons.io.FileUtils
import scala.collection.JavaConverters._
object WriteFunction extends MashFunction("os.write") {
object Params {
val Append = Parameter(
nameOpt = Some("append"),
summaryOpt = Some("Append to the end of the file, if it already exists"),
shortFlagOpt = Some('a'),
isFlag = true,
defaultValueGeneratorOpt = Some(false),
isBooleanFlag = true)
val File = Parameter(
nameOpt = Some("file"),
summaryOpt = Some("File to write to"))
val Data = Parameter(
nameOpt = Some("data"),
summaryOpt = Some("Data to write to the file"),
descriptionOpt = Some("""If the given data is a sequence, write a line to the file for each item.
Otherwise, write the item as a string."""))
}
import Params._
val params = ParameterModel(Append, File, Data)
def call(boundParams: BoundParams): MashUnit = {
val append = boundParams(Append).isTruthy
val path = boundParams.validatePath(File)
val data = boundParams(Data)
write(path, data, append)
MashUnit
}
def write(path: Path, data: MashValue, append: Boolean): Unit = {
val file = path.toFile
data match {
case xs: MashList ⇒
val lines = xs.elements.map(ToStringifier.stringify)
FileUtils.writeLines(file, lines.asJava, append)
case x ⇒
FileUtils.write(file, ToStringifier.stringify(x), StandardCharsets.UTF_8, append)
}
}
override def typeInferenceStrategy = Unit
override def getCompletionSpecs(argPos: Int, arguments: TypedArguments) =
Seq(CompletionSpec.File)
override def summaryOpt = Some("Write an object or sequence of objects to a file as a string")
override def descriptionOpt = Some("""The default encoding is used to convert the strings to bytes.
If multiple lines are written, the default line separator is used.""")
}
|
mdr/mash
|
src/main/scala/com/github/mdr/mash/ns/os/WriteFunction.scala
|
Scala
|
mit
| 2,349 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.exchange
import java.util.Random
import java.util.function.Supplier
import scala.concurrent.Future
import org.apache.spark._
import org.apache.spark.internal.config
import org.apache.spark.rdd.RDD
import org.apache.spark.serializer.Serializer
import org.apache.spark.shuffle.{ShuffleWriteMetricsReporter, ShuffleWriteProcessor}
import org.apache.spark.shuffle.sort.SortShuffleManager
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.errors._
import org.apache.spark.sql.catalyst.expressions.{Attribute, BoundReference, UnsafeProjection, UnsafeRow}
import org.apache.spark.sql.catalyst.expressions.codegen.LazilyGeneratedOrdering
import org.apache.spark.sql.catalyst.plans.physical._
import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.adaptive.{LocalShuffledRowRDD, SkewedShuffledRowRDD}
import org.apache.spark.sql.execution.metric.{SQLMetric, SQLMetrics, SQLShuffleReadMetricsReporter, SQLShuffleWriteMetricsReporter}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.MutablePair
import org.apache.spark.util.collection.unsafe.sort.{PrefixComparators, RecordComparator}
/**
* Performs a shuffle that will result in the desired partitioning.
*/
case class ShuffleExchangeExec(
override val outputPartitioning: Partitioning,
child: SparkPlan,
canChangeNumPartitions: Boolean = true) extends Exchange {
// NOTE: coordinator can be null after serialization/deserialization,
// e.g. it can be null on the Executor side
private lazy val writeMetrics =
SQLShuffleWriteMetricsReporter.createShuffleWriteMetrics(sparkContext)
private lazy val readMetrics =
SQLShuffleReadMetricsReporter.createShuffleReadMetrics(sparkContext)
override lazy val metrics = Map(
"dataSize" -> SQLMetrics.createSizeMetric(sparkContext, "data size")
) ++ readMetrics ++ writeMetrics
override def nodeName: String = "Exchange"
private val serializer: Serializer =
new UnsafeRowSerializer(child.output.size, longMetric("dataSize"))
@transient lazy val inputRDD: RDD[InternalRow] = child.execute()
// 'mapOutputStatisticsFuture' is only needed when enable AQE.
@transient lazy val mapOutputStatisticsFuture: Future[MapOutputStatistics] = {
if (inputRDD.getNumPartitions == 0) {
Future.successful(null)
} else {
sparkContext.submitMapStage(shuffleDependency)
}
}
/**
* A [[ShuffleDependency]] that will partition rows of its child based on
* the partitioning scheme defined in `newPartitioning`. Those partitions of
* the returned ShuffleDependency will be the input of shuffle.
*/
@transient
lazy val shuffleDependency : ShuffleDependency[Int, InternalRow, InternalRow] = {
ShuffleExchangeExec.prepareShuffleDependency(
inputRDD,
child.output,
outputPartitioning,
serializer,
writeMetrics)
}
def createShuffledRDD(
partitionRanges: Option[Array[(Int, Int)]]): ShuffledRowRDD = {
new ShuffledRowRDD(shuffleDependency, readMetrics, partitionRanges)
}
def createLocalShuffleRDD(
partitionStartIndicesPerMapper: Array[Array[Int]]): LocalShuffledRowRDD = {
new LocalShuffledRowRDD(shuffleDependency, readMetrics, partitionStartIndicesPerMapper)
}
def createSkewedShuffleRDD(
partitionIndex: Int,
startMapIndex: Int,
endMapIndex: Int): SkewedShuffledRowRDD = {
new SkewedShuffledRowRDD(shuffleDependency,
partitionIndex, startMapIndex, endMapIndex, readMetrics)
}
/**
* Caches the created ShuffleRowRDD so we can reuse that.
*/
private var cachedShuffleRDD: ShuffledRowRDD = null
protected override def doExecute(): RDD[InternalRow] = attachTree(this, "execute") {
// Returns the same ShuffleRowRDD if this plan is used by multiple plans.
if (cachedShuffleRDD == null) {
cachedShuffleRDD = createShuffledRDD(None)
}
cachedShuffleRDD
}
}
object ShuffleExchangeExec {
/**
* Determines whether records must be defensively copied before being sent to the shuffle.
* Several of Spark's shuffle components will buffer deserialized Java objects in memory. The
* shuffle code assumes that objects are immutable and hence does not perform its own defensive
* copying. In Spark SQL, however, operators' iterators return the same mutable `Row` object. In
* order to properly shuffle the output of these operators, we need to perform our own copying
* prior to sending records to the shuffle. This copying is expensive, so we try to avoid it
* whenever possible. This method encapsulates the logic for choosing when to copy.
*
* In the long run, we might want to push this logic into core's shuffle APIs so that we don't
* have to rely on knowledge of core internals here in SQL.
*
* See SPARK-2967, SPARK-4479, and SPARK-7375 for more discussion of this issue.
*
* @param partitioner the partitioner for the shuffle
* @return true if rows should be copied before being shuffled, false otherwise
*/
private def needToCopyObjectsBeforeShuffle(partitioner: Partitioner): Boolean = {
// Note: even though we only use the partitioner's `numPartitions` field, we require it to be
// passed instead of directly passing the number of partitions in order to guard against
// corner-cases where a partitioner constructed with `numPartitions` partitions may output
// fewer partitions (like RangePartitioner, for example).
val conf = SparkEnv.get.conf
val shuffleManager = SparkEnv.get.shuffleManager
val sortBasedShuffleOn = shuffleManager.isInstanceOf[SortShuffleManager]
val bypassMergeThreshold = conf.get(config.SHUFFLE_SORT_BYPASS_MERGE_THRESHOLD)
val numParts = partitioner.numPartitions
if (sortBasedShuffleOn) {
if (numParts <= bypassMergeThreshold) {
// If we're using the original SortShuffleManager and the number of output partitions is
// sufficiently small, then Spark will fall back to the hash-based shuffle write path, which
// doesn't buffer deserialized records.
// Note that we'll have to remove this case if we fix SPARK-6026 and remove this bypass.
false
} else if (numParts <= SortShuffleManager.MAX_SHUFFLE_OUTPUT_PARTITIONS_FOR_SERIALIZED_MODE) {
// SPARK-4550 and SPARK-7081 extended sort-based shuffle to serialize individual records
// prior to sorting them. This optimization is only applied in cases where shuffle
// dependency does not specify an aggregator or ordering and the record serializer has
// certain properties and the number of partitions doesn't exceed the limitation. If this
// optimization is enabled, we can safely avoid the copy.
//
// Exchange never configures its ShuffledRDDs with aggregators or key orderings, and the
// serializer in Spark SQL always satisfy the properties, so we only need to check whether
// the number of partitions exceeds the limitation.
false
} else {
// Spark's SortShuffleManager uses `ExternalSorter` to buffer records in memory, so we must
// copy.
true
}
} else {
// Catch-all case to safely handle any future ShuffleManager implementations.
true
}
}
/**
* Returns a [[ShuffleDependency]] that will partition rows of its child based on
* the partitioning scheme defined in `newPartitioning`. Those partitions of
* the returned ShuffleDependency will be the input of shuffle.
*/
def prepareShuffleDependency(
rdd: RDD[InternalRow],
outputAttributes: Seq[Attribute],
newPartitioning: Partitioning,
serializer: Serializer,
writeMetrics: Map[String, SQLMetric])
: ShuffleDependency[Int, InternalRow, InternalRow] = {
val part: Partitioner = newPartitioning match {
case RoundRobinPartitioning(numPartitions) => new HashPartitioner(numPartitions)
case HashPartitioning(_, n) =>
new Partitioner {
override def numPartitions: Int = n
// For HashPartitioning, the partitioning key is already a valid partition ID, as we use
// `HashPartitioning.partitionIdExpression` to produce partitioning key.
override def getPartition(key: Any): Int = key.asInstanceOf[Int]
}
case RangePartitioning(sortingExpressions, numPartitions) =>
// Extract only fields used for sorting to avoid collecting large fields that does not
// affect sorting result when deciding partition bounds in RangePartitioner
val rddForSampling = rdd.mapPartitionsInternal { iter =>
val projection =
UnsafeProjection.create(sortingExpressions.map(_.child), outputAttributes)
val mutablePair = new MutablePair[InternalRow, Null]()
// Internally, RangePartitioner runs a job on the RDD that samples keys to compute
// partition bounds. To get accurate samples, we need to copy the mutable keys.
iter.map(row => mutablePair.update(projection(row).copy(), null))
}
// Construct ordering on extracted sort key.
val orderingAttributes = sortingExpressions.zipWithIndex.map { case (ord, i) =>
ord.copy(child = BoundReference(i, ord.dataType, ord.nullable))
}
implicit val ordering = new LazilyGeneratedOrdering(orderingAttributes)
new RangePartitioner(
numPartitions,
rddForSampling,
ascending = true,
samplePointsPerPartitionHint = SQLConf.get.rangeExchangeSampleSizePerPartition)
case SinglePartition =>
new Partitioner {
override def numPartitions: Int = 1
override def getPartition(key: Any): Int = 0
}
case _ => sys.error(s"Exchange not implemented for $newPartitioning")
// TODO: Handle BroadcastPartitioning.
}
def getPartitionKeyExtractor(): InternalRow => Any = newPartitioning match {
case RoundRobinPartitioning(numPartitions) =>
// Distributes elements evenly across output partitions, starting from a random partition.
var position = new Random(TaskContext.get().partitionId()).nextInt(numPartitions)
(row: InternalRow) => {
// The HashPartitioner will handle the `mod` by the number of partitions
position += 1
position
}
case h: HashPartitioning =>
val projection = UnsafeProjection.create(h.partitionIdExpression :: Nil, outputAttributes)
row => projection(row).getInt(0)
case RangePartitioning(sortingExpressions, _) =>
val projection = UnsafeProjection.create(sortingExpressions.map(_.child), outputAttributes)
row => projection(row)
case SinglePartition => identity
case _ => sys.error(s"Exchange not implemented for $newPartitioning")
}
val isRoundRobin = newPartitioning.isInstanceOf[RoundRobinPartitioning] &&
newPartitioning.numPartitions > 1
val rddWithPartitionIds: RDD[Product2[Int, InternalRow]] = {
// [SPARK-23207] Have to make sure the generated RoundRobinPartitioning is deterministic,
// otherwise a retry task may output different rows and thus lead to data loss.
//
// Currently we following the most straight-forward way that perform a local sort before
// partitioning.
//
// Note that we don't perform local sort if the new partitioning has only 1 partition, under
// that case all output rows go to the same partition.
val newRdd = if (isRoundRobin && SQLConf.get.sortBeforeRepartition) {
rdd.mapPartitionsInternal { iter =>
val recordComparatorSupplier = new Supplier[RecordComparator] {
override def get: RecordComparator = new RecordBinaryComparator()
}
// The comparator for comparing row hashcode, which should always be Integer.
val prefixComparator = PrefixComparators.LONG
// The prefix computer generates row hashcode as the prefix, so we may decrease the
// probability that the prefixes are equal when input rows choose column values from a
// limited range.
val prefixComputer = new UnsafeExternalRowSorter.PrefixComputer {
private val result = new UnsafeExternalRowSorter.PrefixComputer.Prefix
override def computePrefix(row: InternalRow):
UnsafeExternalRowSorter.PrefixComputer.Prefix = {
// The hashcode generated from the binary form of a [[UnsafeRow]] should not be null.
result.isNull = false
result.value = row.hashCode()
result
}
}
val pageSize = SparkEnv.get.memoryManager.pageSizeBytes
val sorter = UnsafeExternalRowSorter.createWithRecordComparator(
StructType.fromAttributes(outputAttributes),
recordComparatorSupplier,
prefixComparator,
prefixComputer,
pageSize,
// We are comparing binary here, which does not support radix sort.
// See more details in SPARK-28699.
false)
sorter.sort(iter.asInstanceOf[Iterator[UnsafeRow]])
}
} else {
rdd
}
// round-robin function is order sensitive if we don't sort the input.
val isOrderSensitive = isRoundRobin && !SQLConf.get.sortBeforeRepartition
if (needToCopyObjectsBeforeShuffle(part)) {
newRdd.mapPartitionsWithIndexInternal((_, iter) => {
val getPartitionKey = getPartitionKeyExtractor()
iter.map { row => (part.getPartition(getPartitionKey(row)), row.copy()) }
}, isOrderSensitive = isOrderSensitive)
} else {
newRdd.mapPartitionsWithIndexInternal((_, iter) => {
val getPartitionKey = getPartitionKeyExtractor()
val mutablePair = new MutablePair[Int, InternalRow]()
iter.map { row => mutablePair.update(part.getPartition(getPartitionKey(row)), row) }
}, isOrderSensitive = isOrderSensitive)
}
}
// Now, we manually create a ShuffleDependency. Because pairs in rddWithPartitionIds
// are in the form of (partitionId, row) and every partitionId is in the expected range
// [0, part.numPartitions - 1]. The partitioner of this is a PartitionIdPassthrough.
val dependency =
new ShuffleDependency[Int, InternalRow, InternalRow](
rddWithPartitionIds,
new PartitionIdPassthrough(part.numPartitions),
serializer,
shuffleWriterProcessor = createShuffleWriteProcessor(writeMetrics))
dependency
}
/**
* Create a customized [[ShuffleWriteProcessor]] for SQL which wrap the default metrics reporter
* with [[SQLShuffleWriteMetricsReporter]] as new reporter for [[ShuffleWriteProcessor]].
*/
def createShuffleWriteProcessor(metrics: Map[String, SQLMetric]): ShuffleWriteProcessor = {
new ShuffleWriteProcessor {
override protected def createMetricsReporter(
context: TaskContext): ShuffleWriteMetricsReporter = {
new SQLShuffleWriteMetricsReporter(context.taskMetrics().shuffleWriteMetrics, metrics)
}
}
}
}
|
ptkool/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/ShuffleExchangeExec.scala
|
Scala
|
apache-2.0
| 16,057 |
package com.sksamuel.elastic4s.aws
import java.io.ByteArrayInputStream
import java.net.URLEncoder
import java.nio.charset.StandardCharsets
import org.apache.http.client.methods.{HttpGet, HttpPost}
import org.apache.http.entity.BasicHttpEntity
trait SharedTestData {
val region = "us-east-1"
val service = "es"
val dateTime = "20150830T123600Z"
val date = "20150830"
val host = "es.amazonaws.com"
val awsKey = "AKIDEXAMPLE"
val awsSecret = "YNexysRYkuJmLzyNKfotrkEEWWwTEiOgXPEHHGsp"
val awsSessionToken = "ThisIsASessionToken"
val forbiddenCharactersAndMore = URLEncoder.encode("!@#$%ˆ&*()/to[]{};'", "UTF-8")
val encodedForbiddenCharactersAndMore = "%2521%2540%2523%2524%2525%25CB%2586%2526%2A%2528%2529%252Fto%255B%255D%257B%257D%253B%2527"
def httpWithForbiddenCharacters = {
val request = new HttpGet(s"https://es.amazonaws.com/path/to/resource${forbiddenCharactersAndMore}?Action=ListUsers&Version=2010-05-08")
request.addHeader("x-amz-date", dateTime)
request.addHeader("Host", host)
request.addHeader("content-type", "application/x-www-form-urlencoded; charset=utf-8")
request
}
def httpGetRequest = {
val request = new HttpGet("https://es.amazonaws.com/path/to/resource?Action=ListUsers&Version=2010-05-08")
request.addHeader("x-amz-date", dateTime)
request.addHeader("Host", host)
request.addHeader("content-type", "application/x-www-form-urlencoded; charset=utf-8")
request
}
def httpGetRequestWithUnorderedQueryParams = {
val request = new HttpGet("https://es.amazonaws.com/path/to/resource?Version=2010-05-08&Action=ListUsers")
request.addHeader("x-amz-date", dateTime)
request.addHeader("Host", host)
request.addHeader("content-type", "application/x-www-form-urlencoded; charset=utf-8")
request
}
def httpPostRequest = {
val entity = new BasicHttpEntity()
entity.setContent(new ByteArrayInputStream("This is the content".getBytes(StandardCharsets.UTF_8.name())))
val request = new HttpPost("https://es.amazonaws.com/path/to/resource?Action=ListUsers&Version=2010-05-08")
request.setEntity(entity)
request.addHeader("x-amz-date", dateTime)
request.addHeader("Host", host)
request.addHeader("content-type", "application/x-www-form-urlencoded; charset=utf-8")
request
}
def httpPostRequestWithoutDate = {
val entity = new BasicHttpEntity()
entity.setContent(new ByteArrayInputStream("This is the content".getBytes(StandardCharsets.UTF_8.name())))
val request = new HttpPost("https://es.amazonaws.com/path/to/resource?Action=ListUsers&Version=2010-05-08")
request.setEntity(entity)
request.addHeader("Host", host)
request.addHeader("content-type", "application/x-www-form-urlencoded; charset=utf-8")
request
}
def httpPostRequestWithBadHost = {
val entity = new BasicHttpEntity()
entity.setContent(new ByteArrayInputStream("This is the content".getBytes(StandardCharsets.UTF_8.name())))
val request = new HttpPost("https://es.amazonaws.com:443/path/to/resource?Action=ListUsers&Version=2010-05-08")
request.setEntity(entity)
request.addHeader("Host", host)
request.addHeader("content-type", "application/x-www-form-urlencoded; charset=utf-8")
request
}
}
|
Tecsisa/elastic4s
|
elastic4s-tests/src/test/scala/com/sksamuel/elastic4s/aws/SharedTestData.scala
|
Scala
|
apache-2.0
| 3,272 |
package kvstore
import akka.actor.Props
import akka.actor.Actor
import akka.actor.ActorRef
import scala.concurrent.duration._
object Replicator {
case class Replicate(key: String, valueOption: Option[String], id: Long)
case class Replicated(key: String, id: Long)
case class Snapshot(key: String, valueOption: Option[String], seq: Long)
case class SnapshotAck(key: String, seq: Long)
def props(replica: ActorRef): Props = Props(new Replicator(replica))
}
class Replicator(val replica: ActorRef) extends Actor {
import Replicator._
import Replica._
import context.dispatcher
/*
* The contents of this actor is just a suggestion, you can implement it in any way you like.
*/
// map from sequence number to pair of sender and request
var acks = Map.empty[Long, (ActorRef, Replicate)]
// a sequence of not-yet-sent snapshots (you can disregard this if not implementing batching)
var pending = Vector.empty[Snapshot]
var _seqCounter = 0L
def nextSeq = {
val ret = _seqCounter
_seqCounter += 1
ret
}
/* TODO Behavior for the Replicator. */
def receive: Receive = {
case rep @ Replicate(key, valueOption, id) => {
val seq = nextSeq
acks += seq -> (sender, rep)
send(rep, seq)
}
case SnapshotAck(key, seq) => {
acks.get(seq).foreach { case(requester, r) =>
requester ! Replicated(key, r.id)
acks -= seq
}
}
}
def send(r: Replicate, seq: Long) = {
replica ! Snapshot(r.key, r.valueOption, seq)
}
def resend() = {
acks.foreach { case (seq, (_, r)) =>
send(r, seq)
}
}
override def preStart() = {
context.system.scheduler.schedule(0 milliseconds, 100 milliseconds)(resend)
}
}
|
wezil/principles-reactive-programming
|
kvstore/Replicator.scala
|
Scala
|
mit
| 1,752 |
/*
* Copyright 2014 – 2015 Paul Horn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package rx.redis.serialization
import rx.redis.util._
import io.netty.buffer.ByteBuf
import annotation.implicitNotFound
import util.control.NonFatal
@implicitNotFound("Cannot find a ByteBufReader of ${A}. You have to implement an rx.redis.serialization.ByteBufReader[${A}] in order to read an ${A} as a custom value.")
trait ByteBufReader[@specialized(Boolean, Byte, Int, Long) A] {
def fromByteBuf(bb: ByteBuf): A
final def readAndRelease(bb: ByteBuf): A =
try fromByteBuf(bb) finally bb.release()
def map[B](f: A ⇒ B): ByteBufReader[B] =
ByteBufReader(fromByteBuf _ andThen f)
def flatMap[B](f: A ⇒ ByteBufReader[B]): ByteBufReader[B] =
ByteBufReader(bb ⇒ f(fromByteBuf(bb)).fromByteBuf(bb))
def andRead[B](f: ⇒ ByteBufReader[B]): ByteBufReader[(A, B)] =
ByteBufReader(bb ⇒ (fromByteBuf(bb), f.fromByteBuf(bb)))
def orRead[B](f: ⇒ ByteBufReader[B]): ByteBufReader[Either[A, B]] =
ByteBufReader(bb ⇒ try Left(fromByteBuf(bb)) catch { case NonFatal(_) ⇒ Right(f.fromByteBuf(bb)) })
}
object ByteBufReader {
def apply[A](f: ByteBuf ⇒ A): ByteBufReader[A] = new ByteBufReader[A] {
def fromByteBuf(bb: ByteBuf): A = f(bb)
}
def of[A](implicit A: ByteBufReader[A]): ByteBufReader[A] = A
implicit val readBoolean: ByteBufReader[Boolean] =
apply(_.readBoolean())
implicit val readByte: ByteBufReader[Byte] =
apply(_.readByte())
implicit val readChar: ByteBufReader[Char] =
apply(_.readChar())
implicit val readInt: ByteBufReader[Int] =
apply(_.readInt())
implicit val readLong: ByteBufReader[Long] =
apply(_.readLong())
implicit val readDouble: ByteBufReader[Double] =
apply(_.readDouble())
implicit val readFloat: ByteBufReader[Float] =
apply(_.readFloat())
implicit val readByteArray: ByteBufReader[Array[Byte]] =
apply(bb ⇒ {
val length = bb.readInt()
bb.ensuring(_.isReadable(length),
s"An array of $length bytes should be decoded, but there are only" +
s" ${bb.readableBytes()} more bytes left to read. This ByteBuf likely" +
s" represents different data or a different encoding")
val target = new Array[Byte](length)
bb.readBytes(target)
target
})
implicit val readString: ByteBufReader[String] =
readByteArray.map(bs ⇒ new String(bs, Utf8))
// Option
// List / etc...
val readFramelessByteArray: ByteBufReader[Array[Byte]] =
apply(bb ⇒ if (bb.hasArray) {
val backing = bb.array()
val offset = bb.arrayOffset() + bb.readerIndex()
val length = bb.readableBytes()
if (offset == 0 && length == backing.length) backing
else {
val array = new Array[Byte](length)
System.arraycopy(backing, offset, array, 0, length)
array
}
} else {
val length = bb.readableBytes()
val array = new Array[Byte](length)
bb.readBytes(array)
array
})
val readFramelessString: ByteBufReader[String] =
readFramelessByteArray.map(bs ⇒ new String(bs, Utf8))
val readLongAsString: ByteBufReader[Long] =
readFramelessString.map(_.toLong)
}
|
knutwalker/rx-redis
|
modules/serialization/src/main/scala/rx/redis/serialization/ByteBufReader.scala
|
Scala
|
apache-2.0
| 3,734 |
package com.avsystem.commons
package redis.commands
import com.avsystem.commons.redis._
trait SetsApiSuite extends CommandsSuite {
import RedisApi.Batches.StringTyped._
apiTest("SADD") {
sadd("key", Nil).assertEquals(0)
sadd("key", "a", "b", "c").assertEquals(3)
sadd("key", "a", "b", "d").assertEquals(1)
smembers("key").assertEquals(Set("a", "b", "c", "d"))
}
apiTest("SCARD") {
setup(sadd("key", "a", "b", "c"))
scard("???").assertEquals(0)
scard("key").assertEquals(3)
}
apiTest("SDIFF") {
setup(
sadd("{key}1", "a", "b", "c"),
sadd("{key}2", "b", "d"),
sadd("{key}3", "c", "d")
)
sdiff("{key}1", "{key}2", "{key}3").assertEquals(Set("a"))
}
apiTest("SDIFFSTORE") {
setup(
sadd("{key}1", "a", "b", "c"),
sadd("{key}2", "b", "d"),
sadd("{key}3", "c", "d")
)
sdiffstore("{key}d", "{key}1", "{key}2", "{key}3").assertEquals(1)
smembers("{key}d").assertEquals(Set("a"))
}
apiTest("SINTER") {
setup(
sadd("{key}1", "a", "b", "c"),
sadd("{key}2", "b", "a"),
sadd("{key}3", "c", "a")
)
sinter("{key}1", "{key}2", "{key}3").assertEquals(Set("a"))
}
apiTest("SINTERSTORE") {
setup(
sadd("{key}1", "a", "b", "c"),
sadd("{key}2", "b", "a"),
sadd("{key}3", "c", "a")
)
sinterstore("{key}d", "{key}1", "{key}2", "{key}3").assertEquals(1)
smembers("{key}d").assertEquals(Set("a"))
}
apiTest("SISMEMBER") {
setup(sadd("key", "a", "b", "c"))
sismember("???", "a").assertEquals(false)
sismember("key", "a").assertEquals(true)
sismember("key", "d").assertEquals(false)
}
apiTest("SMEMBERS") {
setup(sadd("key", "a", "b", "c"))
smembers("???").assertEquals(Set.empty)
smembers("key").assertEquals(Set("a", "b", "c"))
}
apiTest("SMISMEMBER") {
setup(sadd("key", "a", "b", "c"))
smismember("???", "a").assertEquals(Seq(false))
smismember("key", "a").assertEquals(Seq(true))
smismember("key", "a", "c").assertEquals(Seq(true, true))
smismember("key", "a", "d").assertEquals(Seq(true, false))
}
apiTest("SMOVE") {
setup(
sadd("{key}1", "a", "b"),
sadd("{key}2", "c", "d")
)
smove("{key}1", "{key}2", "?").assertEquals(false)
smove("{key}1", "{key}2", "a").assertEquals(true)
smembers("{key}1").assertEquals(Set("b"))
smembers("{key}2").assertEquals(Set("a", "c", "d"))
}
apiTest("SPOP") {
setup(sadd("key", "a", "b", "c"))
spop("???").assertEquals(Opt.Empty)
spop("???", 2).assertEquals(Set.empty)
spop("key").assert(_.exists(Set("a", "b", "c").contains))
spop("key", 2).assert(s => s.size == 2 && s.forall(Set("a", "b", "c").contains))
scard("key").assertEquals(0)
}
apiTest("SRANDMEMBER") {
setup(sadd("key", "a", "b", "c"))
srandmember("???").assertEquals(Opt.Empty)
srandmemberDistinct("???", 2).assertEquals(Set.empty)
srandmember("key").assert(_.exists(Set("a", "b", "c").contains))
srandmemberDistinct("key", 2).assert(s => s.size == 2 && s.forall(Set("a", "b", "c").contains))
scard("key").assertEquals(3)
}
apiTest("SREM") {
setup(sadd("key", "a", "b", "c"))
srem("key", Nil).assertEquals(0)
srem("???", "a", "b").assertEquals(0)
srem("key", "a", "d").assertEquals(1)
scard("key").assertEquals(2)
}
apiTest("SSCAN") {
val scanMembers = (0 to 256).map(i => s"toscan$i").toSet
setup(sadd("key", scanMembers))
def sscanCollect(cursor: Cursor, acc: Set[String]): Future[Set[String]] =
sscan("key", cursor, "toscan*", 4).exec.flatMapNow {
case (Cursor.NoCursor, data) => Future.successful(acc ++ data)
case (nextCursor, data) => sscanCollect(nextCursor, acc ++ data)
}
assert(sscanCollect(Cursor.NoCursor, Set.empty).futureValue == scanMembers)
}
apiTest("SUNION") {
setup(
sadd("{key}1", "a", "b", "c"),
sadd("{key}2", "b", "d"),
sadd("{key}3", "c", "e")
)
sunion(Nil).assertEquals(Set.empty)
sunion("{key}1", "{key}2", "{key}3").assertEquals(Set("a", "b", "c", "d", "e"))
}
apiTest("SUNIONSTORE") {
setup(
sadd("{key}1", "a", "b", "c"),
sadd("{key}2", "b", "d"),
sadd("{key}3", "c", "e")
)
sunionstore("{key}d", "{key}1", "{key}2", "{key}3").assertEquals(5)
smembers("{key}d").assertEquals(Set("a", "b", "c", "d", "e"))
}
}
|
AVSystem/scala-commons
|
commons-redis/src/test/scala/com/avsystem/commons/redis/commands/SetsApiSuite.scala
|
Scala
|
mit
| 4,417 |
package edu.nus.maxsmtplay
import com.microsoft.z3._
import scala.util.control.Breaks._
/**
* Implementation of Fu-Malik algorithm
*
* For more information on the Fu & Malik procedure:
*
* Z. Fu and S. Malik, On solving the partial MAX-SAT problem, in International
* Conference on Theory and Applications of Satisfiability Testing, 2006.
*/
abstract class FuMalik(bound: Option[Int]) extends MaxSMT with Printer {
this: AtMostOne with Z3 =>
var lastCores: List[Int] = Nil
override def solveAndGetModel(soft: List[BoolExpr], hard: List[BoolExpr]): Option[(List[BoolExpr], Model)] = {
lastCores = Nil
//hard constraints
hard.map((c: BoolExpr) => solver.add(c))
// System.err.println("SOFT: " + soft.size)
//FIXME should I check formula before solving?
// val Some(sat) = solver.check()
// if (!sat) {
// throw new Exception("Hard constraints are not satisfiable")
// }
// saving (soft * aux) * (orig * blocks)
var assumptions = assertAssumptions(soft).map({
case (s, a) => ((s, a), (s, List[BoolExpr]()))
})
var count = 0
breakable {
while(true) {
var blockVars = List[BoolExpr]()
val assumptionsAndSwitches =
assumptions.map({case ((s, a), ob) => ((s, a), ob, z3.mkNot(a))})
assumptions.map({case ((s, a), _) => solver.add(z3.mkOr(s, a))})
val checkResult = solver.check(assumptionsAndSwitches.map(_._3):_*)
val sat = (checkResult == Status.SATISFIABLE)
if (sat) break()
count = count + 1
bound match {
case Some(v) if count > v => return None
case _ => ()
}
val core = solver.getUnsatCore().toList
lastCores = lastCores ++ List(core.size) //saving unsat-core size
var coreLog = List[BoolExpr]()
assumptions = assumptionsAndSwitches.map({
case ((soft, aux), (orig, oldBlocks), switch) => {
if (core.size == 0 || core.contains(switch)) {
coreLog = soft :: coreLog
val blockVar = z3.mkBoolConst(UniqueName.withPrefix("b"))
blockVars = blockVar :: blockVars
val newSoft = z3.mkOr(soft, blockVar)
val newAux = z3.mkBoolConst(UniqueName.withPrefix("a"))
val newBlocks = blockVar :: oldBlocks
((newSoft, newAux), (orig, newBlocks))
} else {
((soft, aux), (orig, oldBlocks))
}
}
})
//writeLog("fumalik-core", coreLog.map({c => c.toString + "\\n"}).reduceLeft(_ + _))
atMostOne(blockVars)
}
}
val model = solver.getModel()
val result = assumptions.filter({
case (_, (_, blocks)) => {
val evalList = blocks.map(b => {
//FIXME what does this true mean?
val result = model.eval(b, true)
result.equals(z3.mkFalse)
})
evalList.foldLeft(true)(_ && _)
}
})
writeLog("fumalik-model", solver.getModel().toString())
Some((result.map({case (_, (orig, _)) => orig}) ++ hard, model))
}
}
|
mechtaev/maxsmt-playground
|
src/main/scala/edu/nus/maxsmtplay/FuMalik.scala
|
Scala
|
mit
| 3,099 |
package models
import java.util.UUID
import com.mohiva.play.silhouette.api.{ Identity, LoginInfo }
import play.api.libs.json.Json
import play.api.i18n.Lang
import play.api.libs.json.Writes
import play.api.libs.json.JsPath
import play.api.libs.json.Reads
import play.api.libs.json.Format
import play.api.libs.json.JsValue
import play.api.libs.json.JsSuccess
import plm.core.lang.ProgrammingLanguage
/**
* The user object.
*
* @param userID The unique ID of the user.
* @param loginInfo The linked login info.
* @param firstName Maybe the first name of the authenticated user.
* @param lastName Maybe the last name of the authenticated user.
* @param fullName Maybe the full name of the authenticated user.
* @param email Maybe the email of the authenticated provider.
* @param avatarURL Maybe the avatar URL of the authenticated provider.
*/
case class User(
userID: UUID,
gitID: UUID,
loginInfo: LoginInfo,
firstName: Option[String],
lastName: Option[String],
fullName: Option[String],
email: Option[String],
preferredLang: Option[Lang],
lastProgLang: Option[String],
avatarURL: Option[String]) extends Identity
/**
* The companion object.
*/
object User {
implicit val langWrites = new Writes[Lang] {
def writes(lang: Lang) = Json.obj(
"code" -> lang.code
)
}
implicit val langRead = new Reads[Lang] {
def reads(json: JsValue) = {
JsSuccess(Lang((json \\ "code").as[String]))
}
}
/**
* Converts the [User] object to Json and vice versa.
*/
implicit val jsonFormat = Json.format[User]
}
|
BaptisteMounier/webPLM
|
app/models/User.scala
|
Scala
|
agpl-3.0
| 1,579 |
/*
* Copyright (C) 2010 Lalit Pant <[email protected]>
*
* The contents of this file are subject to the GNU General Public License
* Version 3 (the "License"); you may not use this file
* except in compliance with the License. You may obtain a copy of
* the License at http://www.gnu.org/copyleft/gpl.html
*
* Software distributed under the License is distributed on an "AS
* IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
* implied. See the License for the specific language governing
* rights and limitations under the License.
*
*/
package net.kogics.kojo.core
import org.jfugue.{Rhythm => JFRhythm, _}
trait Voice {
def pattern(n: Int): Pattern
}
case class Melody(instrument: String, music: String) extends Voice {
def pattern(n: Int) = {
val p = new Pattern("I[%s] %s" format(instrument, music))
val ret = new Pattern()
ret.add(p, n)
ret
}
}
case class Rhythm(instrument: String, duration: String, beat: String) extends Voice {
def pattern(n: Int) = {
val rhy = new JFRhythm()
rhy.setLayer(1, beat)
rhy.addSubstitution(beatChar(beat), "[%s]%s" format(instrument, duration))
rhy.addSubstitution('.', "R%s" format(duration))
val ret = new Pattern()
ret.add(rhy.getPattern, n)
ret
}
def beatChar(b: String): Char = {
b.find {c => c != '.'}.get
}
}
case class Score(voices: Voice *) extends Voice {
def pattern(n: Int): Pattern = {
val score = new Pattern()
var idx = 0
val rhy = new JFRhythm()
var rLayer = 1
voices.foreach { voice => voice match {
case Melody(i, m) =>
val p = new Pattern("V%d I[%s] %s" format(idx, i, m))
score.add(p, n)
case rv @ Rhythm(i, d, b) =>
rhy.setLayer(rLayer, b)
rhy.addSubstitution(rv.beatChar(b), "[%s]%s" format(i, d))
rhy.addSubstitution('.', "R%s" format(d))
rLayer += 1
}
idx += 1
}
score.add(rhy.getPattern, n)
score
}
}
|
vnkmr7620/kojo
|
KojoEnv/src/net/kogics/kojo/core/music.scala
|
Scala
|
gpl-3.0
| 1,996 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions.aggregate
import scala.collection.generic.Growable
import scala.collection.mutable
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.util.GenericArrayData
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.types._
/**
* The Collect aggregate function collects all seen expression values into a list of values.
*
* The operator is bound to the slower sort based aggregation path because the number of
* elements (and their memory usage) can not be determined in advance. This also means that the
* collected elements are stored on heap, and that too many elements can cause GC pauses and
* eventually Out of Memory Errors.
*/
abstract class Collect extends ImperativeAggregate {
val child: Expression
override def children: Seq[Expression] = child :: Nil
override def nullable: Boolean = true
override def dataType: DataType = ArrayType(child.dataType)
override def inputTypes: Seq[AbstractDataType] = Seq(AnyDataType)
override def supportsPartial: Boolean = false
override def aggBufferAttributes: Seq[AttributeReference] = Nil
override def aggBufferSchema: StructType = StructType.fromAttributes(aggBufferAttributes)
override def inputAggBufferAttributes: Seq[AttributeReference] = Nil
// Both `CollectList` and `CollectSet` are non-deterministic since their results depend on the
// actual order of input rows.
override def deterministic: Boolean = false
protected[this] val buffer: Growable[Any] with Iterable[Any]
override def initialize(b: MutableRow): Unit = {
buffer.clear()
}
override def update(b: MutableRow, input: InternalRow): Unit = {
// Do not allow null values. We follow the semantics of Hive's collect_list/collect_set here.
// See: org.apache.hadoop.hive.ql.udf.generic.GenericUDAFMkCollectionEvaluator
val value = child.eval(input)
if (value != null) {
buffer += value
}
}
override def merge(buffer: MutableRow, input: InternalRow): Unit = {
sys.error("Collect cannot be used in partial aggregations.")
}
override def eval(input: InternalRow): Any = {
new GenericArrayData(buffer.toArray)
}
}
/**
* Collect a list of elements.
*/
@ExpressionDescription(
usage = "_FUNC_(expr) - Collects and returns a list of non-unique elements.")
case class CollectList(
child: Expression,
mutableAggBufferOffset: Int = 0,
inputAggBufferOffset: Int = 0) extends Collect {
def this(child: Expression) = this(child, 0, 0)
override def withNewMutableAggBufferOffset(newMutableAggBufferOffset: Int): ImperativeAggregate =
copy(mutableAggBufferOffset = newMutableAggBufferOffset)
override def withNewInputAggBufferOffset(newInputAggBufferOffset: Int): ImperativeAggregate =
copy(inputAggBufferOffset = newInputAggBufferOffset)
override def prettyName: String = "collect_list"
override protected[this] val buffer: mutable.ArrayBuffer[Any] = mutable.ArrayBuffer.empty
}
/**
* Collect a list of unique elements.
*/
@ExpressionDescription(
usage = "_FUNC_(expr) - Collects and returns a set of unique elements.")
case class CollectSet(
child: Expression,
mutableAggBufferOffset: Int = 0,
inputAggBufferOffset: Int = 0) extends Collect {
def this(child: Expression) = this(child, 0, 0)
override def checkInputDataTypes(): TypeCheckResult = {
if (!child.dataType.existsRecursively(_.isInstanceOf[MapType])) {
TypeCheckResult.TypeCheckSuccess
} else {
TypeCheckResult.TypeCheckFailure("collect_set() cannot have map type data")
}
}
override def withNewMutableAggBufferOffset(newMutableAggBufferOffset: Int): ImperativeAggregate =
copy(mutableAggBufferOffset = newMutableAggBufferOffset)
override def withNewInputAggBufferOffset(newInputAggBufferOffset: Int): ImperativeAggregate =
copy(inputAggBufferOffset = newInputAggBufferOffset)
override def prettyName: String = "collect_set"
override protected[this] val buffer: mutable.HashSet[Any] = mutable.HashSet.empty
}
|
gioenn/xSpark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/collect.scala
|
Scala
|
apache-2.0
| 4,951 |
package models.tosca
import scalaz._
import Scalaz._
import scalaz.effect.IO
import scalaz.Validation
import scalaz.Validation.FlatMap._
import scalaz.NonEmptyList._
import scalaz.syntax.SemigroupOps
import cache._
import db._
import models.json.tosca._
import models.Constants._
import io.megam.auth.funnel.FunnelErrors._
import models.base._
import net.liftweb.json._
import net.liftweb.json.scalaz.JsonScalaz._
import java.nio.charset.Charset
class UnitsBreaker(input: String, profileName: String, authBag: Option[io.megam.auth.stack.AuthBag] ) {
implicit val formats = DefaultFormats
private lazy val toObject: ValidationNel[Throwable, AssembliesInput] = {
val aio: ValidationNel[Throwable, AssembliesInput] = (Validation.fromTryCatchThrowable[AssembliesInput, Throwable] {
parse(input).extract[AssembliesInput]
} leftMap { t: Throwable => new MalformedBodyError(input, t.getMessage) }).toValidationNel
for { //for comprehension is abused here. :)
aip <- aio
} yield {
aip
}
}
private def till = toObject.map(_.number_of_units).getOrElse(0)
private def uno = (till == 1)
//if its a uno (single launch, then return and use the name)
private def nameOfUnit(i: Int, n: String): String = {
if (uno) n else toObject.map(_.nameOverriden(n + i)).getOrElse(n + i)
}
def break: ValidationNel[Throwable, AssembliesInputList] = {
if (till < 1) return (new MalformedBodyError(input, "Can't parse assemblies input.").failureNel[AssembliesInputList])
((1 to till).map(i => mkLaunchable(i, input)).some map {
_.foldRight((AssembliesInputList.empty).successNel[Throwable])(_ +++ _)
}).head
}
private def mkLaunchable(i: Int, input: String) = {
for {
too <- toObject
} yield {
val changed = too.assemblies.map { ai =>
val labelledInputs = PatternLabeler(ai.inputs, profileName, authBag).labeled
val decompkvs = FieldSplitter(till, KeyValueField("quota_ids", "quota_id"), labelledInputs).merged
Assembly(nameOfUnit(i, ai.name), ai.components, ai.tosca_type,
ai.policies, decompkvs.get(i).get, ai.outputs, ai.status, ai.state)
}
List(AssembliesInput(too.name, too.org_id, changed, too.inputs))
}
}
}
case class PatternLabeler(inputs: KeyValueList, profileName: String, authBag: Option[io.megam.auth.stack.AuthBag]) {
lazy val email = authBag.get.email
lazy val org_id = authBag.get.org_id
lazy val pi = PatternedLabel(inputs, profileName, email, org_id)
lazy val name = (new PatternedLabelReplacer(pi)).name
lazy val nameAsMap = name match {
case Some(succ) => Map(succ._1 -> succ._2.getOrElse("")).filter(x => x._2.length > 0)
case None => Map[String, String]()
}
def labeled = {
name match {
case Some(succ) => KeyValueList(KeyValueList.toMap(inputs) ++ nameAsMap)
case None => inputs
}
}
}
case class FieldSplitter(nos: Int, field: KeyValueField, kvs: KeyValueList) {
val VALUE_KEY = field.value
val FILTER_KEY = field.key
val COMMA = ","
val filter = KeyValueList.filter(kvs, FILTER_KEY)
val filterNot = KeyValueList.filterNot(kvs, FILTER_KEY)
val split = (filter.map { x =>
if(x.value.contains(COMMA)) {
x.value.split(COMMA).map(KeyValueField(VALUE_KEY, _)).toList
} else {
List(KeyValueField(VALUE_KEY, x.value)).toList
}}).flatten.zipWithIndex.map(x => List(x._1)
)
val merged: Map[Int, KeyValueList] = ({
if (split.isEmpty) {
((1 to nos).toList.map((_, filterNot)))
} else {
((1 to nos).toList.zip(split.map(_ ++ filterNot)))
}
}).toMap
}
|
indykish/verticegateway
|
app/models/tosca/UnitsBreaker.scala
|
Scala
|
mit
| 3,691 |
package com.github.morikuni.locest.area.domain.model
import org.specs2.mutable.Specification
class CoordinateSpec extends Specification {
"create" should {
"return Coordinate successfully" in {
val lat = 90d
val lng = 180d
val coordinate = Coordinate.create(lat, lng).get
coordinate must be equalTo Coordinate.createUnsafe(lat, lng)
}
"fail with IllegalArgumentException" in {
def fail(lat: Double, lng: Double) = {
val coordinateTry = Coordinate.create(lat ,lng)
coordinateTry.get must throwA[IllegalArgumentException]
}
"when lat is too big" in {
fail(91d, 180d)
}
"when lat is too small" in {
fail(-91d, 180d)
}
"when lng is too big" in {
fail(90d, 181d)
}
"when lng is too small" in {
fail(90d, -181d)
}
}
}
"createUnsafe" should {
"return Coordinate successfully" in {
val lat = 90d
val lng = 180d
val coordinate = Coordinate.createUnsafe(lat, lng)
coordinate.lat must be equalTo lat
coordinate.lng must be equalTo lng
}
}
}
|
morikuni/locest
|
area/test/com/github/morikuni/locest/area/domain/model/CoordinateSpec.scala
|
Scala
|
mit
| 1,136 |
package com.jeff.chaser.models.components.motion
import com.badlogic.ashley.core.Component
class AccelerationComponent(val x: Float, val y: Float) extends Component
|
jregistr/Academia
|
CSC455-Game-Programming/Chaser/core/src/com/jeff/chaser/models/components/motion/AccelerationComponent.scala
|
Scala
|
mit
| 167 |
package maths
object `package` {
def factorial(x: Int): Int =
numbers.oneto(x).foldLeft(1)((x,y) => x * y)
}
|
melezov/xsbt-web-plugin
|
src/sbt-test/container/multi-module-single-webapp/maths/src/main/scala/maths.scala
|
Scala
|
bsd-3-clause
| 119 |
/**
* This file is part of the TA Buddy project.
* Copyright (c) 2013-2014 Alexey Aksenov [email protected]
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Affero General Global License version 3
* as published by the Free Software Foundation with the addition of the
* following permission added to Section 15 as permitted in Section 7(a):
* FOR ANY PART OF THE COVERED WORK IN WHICH THE COPYRIGHT IS OWNED
* BY Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS»,
* Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS» DISCLAIMS
* THE WARRANTY OF NON INFRINGEMENT OF THIRD PARTY RIGHTS.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Affero General Global License for more details.
* You should have received a copy of the GNU Affero General Global License
* along with this program; if not, see http://www.gnu.org/licenses or write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA, 02110-1301 USA, or download the license from the following URL:
* http://www.gnu.org/licenses/agpl.html
*
* The interactive user interfaces in modified source and object code versions
* of this program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU Affero General Global License.
*
* In accordance with Section 7(b) of the GNU Affero General Global License,
* you must retain the producer line in every report, form or document
* that is created or manipulated using TA Buddy.
*
* You can be released from the requirements of the license by purchasing
* a commercial license. Buying such a license is mandatory as soon as you
* develop commercial activities involving the TA Buddy software without
* disclosing the source code of your own applications.
* These activities include: offering paid services to customers,
* serving files in a web or/and network application,
* shipping TA Buddy with a closed source product.
*
* For more information, please contact Digimead Team at this
* address: [email protected]
*/
package org.digimead.tabuddy.desktop.core.definition.command
import java.util.UUID
import java.util.concurrent.ConcurrentHashMap
import java.util.concurrent.atomic.AtomicReference
import org.digimead.digi.lib.aop.log
import org.digimead.digi.lib.api.XDependencyInjection
import org.digimead.digi.lib.log.api.XLoggable
import org.digimead.tabuddy.desktop.core.Core
import org.digimead.tabuddy.desktop.core.definition.Context
import org.digimead.tabuddy.desktop.core.definition.command.api.XCommand
import org.eclipse.e4.core.contexts.{ ContextFunction, IEclipseContext, RunAndTrack }
import org.eclipse.jface.fieldassist.{ ContentProposal, IContentProposal, IContentProposalProvider }
import scala.collection.JavaConverters.mapAsScalaMapConverter
import scala.collection.immutable
import scala.concurrent.Future
import scala.language.implicitConversions
import scala.util.DynamicVariable
import scala.util.parsing.input.CharSequenceReader
/**
* Command supervisor.
*/
class Command extends XLoggable {
/**
* The composite parser from all actual parser combinators over the application.
* It is based on the current active context branch.
*/
protected val actualParserCombinators = new AtomicReference[Command.parser.Parser[Any]](Command.parser.stubParser)
/** Registry with registered commands. Parser id -> command descriptor. */
protected val registry = new ConcurrentHashMap[UUID, Command.Descriptor].asScala
/** Registry with information about all active parsers within application contexts. Unique id of parser -> context information. */
protected val perContext = new ConcurrentHashMap[UUID, Command.ContextInformation].asScala
/** Run and track active branch context listener. */
protected lazy val listener = new Command.Listener(actualParserCombinators)
private val contextCommandsAccessLock = new Object
// Listen for actual commands based on active branch.
// We add this from the beginning till the end of the component life,
// because of code monkeys that designs EclipseContext and forgot to add ability to remove RATs
Core.context.runAndTrack(listener)
log.debug("Alive. Add global active context listener.")
/**
* Add command parser to context.
* Create command parser unique copy and bind it to the context.
* So lately we may retrieve result and additional information.
* @return unique id of the commandParserTemplate copy
*/
@log
def addToContext(context: Context, parser: Command.CmdParser): Option[UUID] = contextCommandsAccessLock.synchronized {
val commandDescriptor = registry.get(parser.parserId) match {
case Some(commandDescriptor) ⇒ commandDescriptor
case None ⇒ throw new IllegalArgumentException(s"Unable to add parser to context: parser id ${parser.parserId} not found.")
}
log.debug(s"""Add command "${commandDescriptor.name}"(${commandDescriptor.parserId}) to context '${context}'.""")
if (commandDescriptor.name.trim().isEmpty())
throw new IllegalArgumentException(s"Unable to add parser to context: command name is absent.")
val newParserUniqueId = Option(context.get(Command.contextKey)) match {
case Some(commandsGeneric: immutable.HashMap[_, _]) ⇒
// there is already registered at least one context parser
val contextParser = parser.copy(uniqueId = UUID.randomUUID()).named(s""""${commandDescriptor.name}"(${commandDescriptor.parserId})""")
perContext(contextParser.parserId) = Command.ContextInformation(parser.parserId, contextParser, context)
context.set(Command.contextKey, commandsGeneric.asInstanceOf[immutable.HashMap[UUID, Command.CmdParser]] + (contextParser.parserId -> contextParser))
Some(contextParser.parserId)
case Some(unknown) ⇒
log.fatal("Unknown context commands value: " + unknown.getClass())
None
case None ⇒
// there are no any registered parsers
val contextParser = parser.copy(uniqueId = UUID.randomUUID()).named(s""""${commandDescriptor.name}"(${commandDescriptor.parserId})""")
perContext(contextParser.parserId) = Command.ContextInformation(parser.parserId, contextParser, context)
context.set(Command.contextKey, immutable.HashMap[UUID, Command.CmdParser](contextParser.parserId -> contextParser))
Some(contextParser.parserId)
}
newParserUniqueId
}
/** Get command descriptor for UUID. */
def apply(key: UUID) = registry.apply(key: UUID)
/** List all commands that is binded to context(s). */
def binded = perContext.values
/** Get command descriptor for UUID. */
def get(key: UUID): Option[Command.Descriptor] = registry.get(key)
/** Create new proposal provider for the text field. */
def getProposalProvider(): Command.ProposalProvider = {
log.debug("Get proposal provider.")
new Command.ProposalProvider(actualParserCombinators)
}
/** Get descriptor for commandId. */
def getDescriptor(commandId: UUID) = registry.get(commandId)
/** Get information for uniqueId of a context parser. */
def getContextParserInfo(uniqueId: UUID) = perContext.get(uniqueId)
/** List all registered commands. */
def registered = registry.values
/** Parse input. */
def parse(input: String, parser: Command.parser.Parser[Any] = actualParserCombinators.get): Command.Result = {
val (parserId, proposals, result) = Command.triggeredCmdParserId.withValue(None) {
Command.completionProposal.withValue(Seq.empty) {
try {
val result = Command.parser.parse(parser, input)
(Command.triggeredCmdParserId.value, Command.completionProposal.value, result)
} catch {
case e: Command.ParseException ⇒
(Command.triggeredCmdParserId.value, Seq(), Command.parser.Error(e.getMessage(), new CharSequenceReader(input)))
}
}
}
result match {
case r @ Command.parser.Success(result, next) ⇒
parserId match {
case Some(parserId) ⇒
if (Command.parser.isCompletionRequest(input) && proposals.nonEmpty)
// returns append proposals if any
proposals.foldLeft(Command.MissingCompletionOrFailure(Seq(), "empty append proposal"))((a, b) ⇒
Command.MissingCompletionOrFailure(a.completion ++ b.completions, "append proposals"))
else
Command.Success(parserId, result)
case None ⇒ Command.Error("Unable to find parser id for: " + r)
}
case Command.parser.MissingCompletionOrFailure(list, message, next) ⇒
if (proposals.nonEmpty)
// returns append proposals if any
proposals.foldLeft(Command.MissingCompletionOrFailure(Seq(), "empty append proposal"))((a, b) ⇒
Command.MissingCompletionOrFailure(a.completion ++ b.completions, "append proposals"))
else
Command.MissingCompletionOrFailure(list, message)
case Command.parser.Failure(message, next) ⇒
if (proposals.nonEmpty)
// returns append proposals if any
proposals.foldLeft(Command.MissingCompletionOrFailure(Seq(), "empty append proposal"))((a, b) ⇒
Command.MissingCompletionOrFailure(a.completion ++ b.completions, "append proposals"))
else
Command.Failure(message)
case Command.parser.Error(message, next) ⇒
Command.Error(message)
}
}
/** Register command. */
def register(commandDescriptor: Command.Descriptor): Unit = contextCommandsAccessLock.synchronized {
log.debug(s"""Register command "${commandDescriptor.name}" with id${commandDescriptor.parserId}.""")
registry += (commandDescriptor.parserId -> commandDescriptor)
}
/** Remove all actual parser that have specific unique Id from the context. */
def removeFromContext(context: Context, uniqueId: UUID) = contextCommandsAccessLock.synchronized {
log.debug(s"Remove parser ${uniqueId} from context ${context}.")
Option(context.get(Command.contextKey)) match {
case Some(commandsGeneric: immutable.HashMap[_, _]) ⇒
context.set(Command.contextKey, commandsGeneric.asInstanceOf[immutable.HashMap[UUID, Command.CmdParser]] - uniqueId)
case Some(unknown) ⇒
log.fatal("Unknown context commands keunknowny value: " + unknown.getClass())
case None ⇒
}
perContext.remove(uniqueId)
listener.changed(Core.context)
}
/** Remove all actual parser that have specific command Id from the context. */
def removeFromContext(context: Context, parser: Command.CmdParser) {
if (!registry.contains(parser.parserId))
throw new IllegalArgumentException(s"Unable to add parser to context: command id ${parser.parserId} not found")
perContext.filter { case (uniqueId, information) ⇒ information.parserId == parser.parserId }.foreach(kv ⇒ removeFromContext(context, kv._1))
}
/** Unregister command. */
def unregister(parserId: UUID): Unit = contextCommandsAccessLock.synchronized {
log.debug(s"Unregister command ${parserId}.")
val uniqueIdToRemove = perContext.filter { case (uniqueId, information) ⇒ information.parserId == parserId }.map(_._1)
uniqueIdToRemove.foreach { uniqueId ⇒
perContext.remove(uniqueId).foreach { information ⇒
Option(information.context.get(Command.contextKey)) match {
case Some(commandsGeneric: immutable.HashMap[_, _]) ⇒
information.context.set(Command.contextKey, commandsGeneric.asInstanceOf[immutable.HashMap[UUID, Command.CmdParser]] - parserId)
case Some(unknown) ⇒
log.fatal("Unknown context commands keunknowny value: " + unknown.getClass())
case None ⇒
}
}
}
registry -= parserId
listener.changed(Core.context)
}
/** Unregister command. */
def unregister(commandDescriptor: Command.Descriptor) {
log.debug(s"Unregister command ${commandDescriptor.parserId}: ${commandDescriptor.name}.")
val commandId = commandDescriptor.parserId
if (!registry.contains(commandId))
throw new IllegalArgumentException(s"Unable to add parser to context: command id ${commandId} not found")
unregister(commandId)
}
}
/**
* Monitor all actual commands add provide them with IContentProposalProvider
*/
object Command extends XLoggable {
implicit def cmdLine2implementation(c: Command.type): Command = c.inner
/** Last parsing process completion. */
val completionProposal = new DynamicVariable(Seq.empty[CommandParsers#MissingCompletionOrFailure])
/** Context commands map key. */
val contextKey = "Commands"
/** Context commands composite parser key. */
val contextParserKey = "CommandsParser"
/** Singleton identificator. */
val id = getClass.getSimpleName().dropRight(1)
/** Command parser implementation. */
lazy val parser = DI.parser
/** Last successful parser id. */
val triggeredCmdParserId = new DynamicVariable[Option[UUID]](None)
/** Command implementation. */
def inner = DI.implementation
sealed trait Result
case class Success(val uniqueId: UUID, val result: Any) extends Result
case class MissingCompletionOrFailure(val completion: Seq[Hint], val message: String) extends Result
case class Failure(val message: String) extends Result
case class Error(val message: String) extends Result
/** Information about command parser that is added to specific context. */
case class ContextInformation private[Command] (val parserId: UUID, val contextParser: Command.parser.Parser[Any], val context: Context)
/** Command descriptor where callback is (active context, parser context, parser result) => Unit */
case class Descriptor(val parserId: UUID)(val name: String, val shortDescription: String, val longDescription: String, val callback: (Context, Context, Any) ⇒ Future[Any])
extends XCommand.Descriptor {
override lazy val toString = s"Command.Descriptor(${name}, ${parserId})"
}
/** Command parser that wraps base parser combinator with 'phrase' sentence. */
class CmdParser(val parserId: UUID, base: parser.Parser[Any])
extends parser.CmdParser(parserId, base) {
/** Copy constructor. */
def copy(uniqueId: UUID = this.parserId, base: parser.Parser[Any] = this.base) =
new CmdParser(uniqueId, base)
/** Equals by uniqueId. */
override def equals(other: Any) = other match {
case that: CmdParser ⇒ (this eq that) || parserId == that.parserId
case _ ⇒ false
}
/** HashCode from parserId. */
override def hashCode = parserId.hashCode()
}
object CmdParser {
def apply(base: parser.Parser[Any])(implicit descriptor: Descriptor) =
new CmdParser(descriptor.parserId, base)
}
/** Application wide context listener that rebuild commands. */
class Listener(val commandParserCombinators: AtomicReference[parser.Parser[Any]]) extends RunAndTrack() {
private val lock = new Object
override def changed(context: IEclipseContext): Boolean = lock.synchronized {
log.trace("Update command line parser combinators.")
val leaf = Core.context.getActiveLeaf()
def getCompositeParsers(context: IEclipseContext): Seq[Option[parser.Parser[Any]]] = {
val contextCompositeParser = Option(context.getLocal(Command.contextParserKey).asInstanceOf[Option[parser.Parser[Any]]]).getOrElse {
context.set(Command.contextParserKey, new CompositeParserComputation)
context.getLocal(Command.contextParserKey).asInstanceOf[Option[parser.Parser[Any]]]
}
Option(context.getParent()) match {
case Some(parent) ⇒ contextCompositeParser +: getCompositeParsers(parent)
case None ⇒ Seq(contextCompositeParser)
}
}
getCompositeParsers(leaf).flatten match {
case Nil ⇒ commandParserCombinators.set(Command.parser.stubParser)
case seq if seq.nonEmpty ⇒ commandParserCombinators.set(seq.reduceLeft[parser.Parser[Any]] { (acc, p) ⇒ acc | p })
}
true
}
}
/** Parser exception that correctly terminate parse sequence. */
case class ParseException(message: String) extends java.text.ParseException(message, -1)
/** ProposalProvider for a text field. */
class ProposalProvider(val actualParserCombinators: AtomicReference[parser.Parser[Any]])
extends IContentProposalProvider {
@volatile protected var input = ""
/** Set input for current proposal. */
def setInput(text: String) = input = text
/** Return an array of content proposals representing the valid proposals for a field. */
def getProposals(contents: String, position: Int): Array[IContentProposal] = {
Command.parse(input, actualParserCombinators.get) match {
case Command.Success(uniqueId, result) ⇒
Array()
case Command.MissingCompletionOrFailure(hints, message) ⇒
{
hints.map {
case Hint(Some(label), description, list) ⇒
val completionList = list.filter(_.nonEmpty)
if (completionList.size == 1)
completionList.map(completion ⇒ new ContentProposal(completion, label, description getOrElse null))
else
completionList.map(completion ⇒ new ContentProposal(completion, s"${label}(${completion})", description getOrElse null))
case Hint(None, description, list) ⇒
list.filter(_.nonEmpty).map(completion ⇒ new ContentProposal(completion, completion, description getOrElse null))
}
}.flatten.toArray
case Command.Failure(message) ⇒
log.fatal(message)
Array()
case Command.Error(message) ⇒
log.fatal(message)
Array()
}
}
}
/** Completion hint. */
abstract class Hint {
/** Completion label. */
def label: Option[String]
/** Completion description. */
def description: Option[String]
/** Get copy of this hint with updated completions field. */
def copyWithCompletion(completions: String*): this.type
/** Completion list. */
def completions: Seq[String]
def canEqual(other: Any) = other.isInstanceOf[Hint]
override def equals(other: Any) = other match {
case that: Hint ⇒ (this eq that) || {
that.canEqual(this) && label == that.label && description == that.description && completions == that.completions
}
case _ ⇒ false
}
override def hashCode() = lazyHashCode
protected lazy val lazyHashCode = java.util.Arrays.hashCode(Array[AnyRef](label, description, completions))
override def toString = "Command.Hint(" + s"$label, $description, $completions)"
}
object Hint {
/** Get static Hint instance. */
def apply(completionLabel: String, completionDescription: Option[String] = None, explicitCompletion: Seq[String] = Seq.empty): Hint =
new Static(Some(completionLabel), completionDescription, explicitCompletion)
/** Get static Hint instance. */
def apply(explicitCompletion: String*): Hint =
new Static(None, None, explicitCompletion)
/** Hint extractor implementation. */
def unapply(hint: Hint): Option[(Option[String], Option[String], Seq[String])] =
Some(hint.label, hint.description, hint.completions)
/** Simple Hint implementation with static fields. */
class Static(
/** Completion label. */
val label: Option[String],
/** Completion description. */
val description: Option[String],
/** Completion list. */
val completions: Seq[String]) extends Hint {
/** Get copy of this hint with updated completions field. */
def copyWithCompletion(completions: String*): this.type =
new Static(label, description, completions).asInstanceOf[this.type]
}
/** Hint container returns list of hints to parser with regards of argument. */
trait Container {
def apply(arg: String): Seq[Hint]
}
object Container {
/** Get simple Hints container. */
def apply(hints: Hint*): Container = new Simple(hints)
/** Get simple Hints container. */
def apply(hints: Traversable[Hint]): Container = new Simple(hints.toSeq)
/** Simple Hints container that returns predefined sequence, regardless of argument. */
class Simple(val hints: Seq[Hint]) extends Container {
def apply(arg: String) = hints
}
}
}
/** Computation that calculates composite parser for current context. */
class CompositeParserComputation extends ContextFunction {
override def compute(context: IEclipseContext, fnKey: String): Option[parser.Parser[Any]] =
Option(context.getLocal(Command.contextKey)) match {
case Some(commands: immutable.HashMap[_, _]) if commands.nonEmpty ⇒
Some(commands.values.asInstanceOf[Iterable[CmdParser]].reduceLeft[parser.Parser[Any]] { (acc, p) ⇒ acc | p })
case _ ⇒
None
}
}
/**
* Dependency injection routines.
*/
private object DI extends XDependencyInjection.PersistentInjectable {
/** Command implementation. */
lazy val implementation = injectOptional[Command] getOrElse new Command
/** Parser implementation. */
lazy val parser = injectOptional[CommandParsers] getOrElse new CommandParsers
}
}
|
digimead/digi-TABuddy-desktop
|
part-core/src/main/scala/org/digimead/tabuddy/desktop/core/definition/command/Command.scala
|
Scala
|
agpl-3.0
| 21,471 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.samples.pubsub.actors
import org.squbs.samples.pubsub.messages.Publish
import scala.collection.mutable
import spray.http.{HttpData, MessageChunk}
import akka.util.ByteString
import UnsignedHexUtil._
object EventCache {
def apply(cacheSize: Int, lastEventOnConnect: Boolean): EventCache = {
val effectiveCacheSize = if (lastEventOnConnect && cacheSize < 1) 1 else cacheSize
if (effectiveCacheSize > 1) new Cache(effectiveCacheSize, lastEventOnConnect)
else if (effectiveCacheSize == 1) new OneCache(lastEventOnConnect)
else new NoCache
}
}
sealed trait EventCache {
val lastEventOnConnect: Boolean
def storeAndFormat(evt: Publish): MessageChunk
def eventsOnConnect(lastEvent: Option[String]): Iterator[MessageChunk]
}
class Cache(size: Int, val lastEventOnConnect: Boolean) extends EventCache {
private[actors] val cache = mutable.Queue.empty[(Long, MessageChunk)]
private[actors] var eventId = 0l
private[actors] val overFlowThreshold = Long.MaxValue - size
def storeAndFormat(evt: Publish) = {
eventId += 1
val messageChunk = MessageChunk(HttpData(SSESupport.toSSE(evt.eventType, eventId, evt.content)))
while (cache.size >= size) cache.dequeue()
cache += ((eventId, messageChunk))
messageChunk
}
// Consider the case where we have an number overflow. The new id may be far below the last.
// We detect this condition by seeing the head much lower than the last. Also, the last should be close to
// Long.MaxValue. For instance, it should be higher than Long.MaxValue - size.
private[actors] def findFromLastEvent(lastEventId: Long): Iterator[MessageChunk] = {
if (cache.isEmpty) return Iterator.empty
val firstCachedId = cache.head._1
if (firstCachedId < overFlowThreshold) { // Normal case, really most of the time.
if (lastEventId < firstCachedId) return cache.toIterator map (_._2)
// Example: size = 7
// EventIds 6, 7, 8, 9, 10, 11, 12
// lastEventId = 8
// offset = 8 - 6 + 1 = 3
// send events 9, 10, 11, 12 - don't send 8 itself
// This is the cache dropping the first n events where n = offset.
}
else { // We're near the overflow zone. Thread carefully. (Hope this code will never be exercised)
// For easy comprehension, lets assume MaxValue = 100, after that the id will jump to -100.
// (You may argue it should be 99 jumping to -100, but I just want to keep it easy)
// Lets try a cache size of 10.
// overflowThreshold would be 90.
// Once event id 100 goes out, the cache will have 91, 92,..., 100.
// If request comes in with last event less than 90, we just send the whole cache.
// But remember, -100, -99, and so forth are in fact the overflowed and are greater numbers. So they cannot be
// treated as a lower number. They are actually a higher number. So we can just send them an empty iterator.
// In this circumstance, we consider anything below 0 an event id from the future.
// Next, event id -100 goes out. The cache will have 92, 93, ..., 100, -100.
// Lets try another one. event id -99 goes out. The cache will have 93, 94, ..., 100, -100, -99.
// Here, if lastEventId is -100, But lastEventId - firstCachedId causes an underflow and thus results
// in the correct offset. So we really don't have to handle it differently.
if (lastEventId > 0 && lastEventId < firstCachedId) return cache.toIterator map (_._2)
}
val offset = 1 + lastEventId - firstCachedId
if (offset < size) cache.toIterator drop offset.toInt map (_._2)
else Iterator.empty
}
def eventsOnConnect(lastEvent: Option[String]) = {
lastEvent match {
case Some(lastEventId) => findFromLastEvent(lastEventId.uHexToLong)
case None if lastEventOnConnect => (cache.lastOption map (_._2)).toIterator
case None => Iterator.empty
}
}
}
class OneCache(val lastEventOnConnect: Boolean) extends EventCache {
private[actors] var cache: Option[(Long, MessageChunk)] = None
private[actors] var eventId = 0l
def storeAndFormat(evt: Publish) = {
eventId += 1
val messageChunk = MessageChunk(HttpData(SSESupport.toSSE(evt.eventType, eventId, evt.content)))
cache = Some((eventId, messageChunk))
messageChunk
}
def eventsOnConnect(lastEvent: Option[String]) = {
lastEvent match { // If lastEvent is the one in cache, we don't send. Otherwise send.
case Some(lastEventId) if cache exists (_._1 == lastEventId.uHexToLong) => Iterator.empty
case Some(lastEventId) => cache.toIterator map (_._2)
case None =>
if (lastEventOnConnect) cache.toIterator map (_._2)
else Iterator.empty
}
}
}
class NoCache extends EventCache {
val lastEventOnConnect = false
def storeAndFormat(evt: Publish) = MessageChunk(HttpData(SSESupport.toSSE(evt.eventType, evt.content)))
def eventsOnConnect(lastEvent: Option[String]) = Iterator.empty
}
object SSESupport {
private val event = ByteString("event: ")
private val id = ByteString("\\nid: ")
private val data = ByteString("\\ndata: ")
private val endEvent = ByteString("\\n\\n")
def toSSE(evtType: String, content: ByteString): ByteString =
event ++ ByteString(evtType) ++ data ++ content ++ endEvent
def toSSE(evtType: String, evtId: Long, content: ByteString): ByteString =
event ++ ByteString(evtType) ++ id ++ evtId.toUHexByteString ++ data ++ content ++ endEvent
val streamEnd = ByteString("event: stream_end\\ndata: End of stream\\n\\n")
}
|
SarathChandran/squbs
|
samples/pubsub/pubsubsvc/src/main/scala/org/squbs/samples/pubsub/actors/EventCache.scala
|
Scala
|
apache-2.0
| 6,139 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package helper
import connectors.HmrcTierConnectorWrapped
import controllers.utils.ControllerUtils
import javax.inject.Inject
import models.PbikCredentials
import play.api.Configuration
import play.api.libs.json
import play.api.mvc.{AnyContent, Request}
import uk.gov.hmrc.http.HeaderCarrier
import scala.concurrent.Future
// Stub this so we don't need to mock all the methods
class StubbedControllerUtils @Inject()(configuration: Configuration) extends ControllerUtils(configuration) {
override def retrieveNPSCredentials(tierConnector: HmrcTierConnectorWrapped, year: Int, empRef: String)(
implicit hc: HeaderCarrier,
formats: json.Format[PbikCredentials]): Future[PbikCredentials] =
Future.successful(new PbikCredentials(0, 0, 0, "", ""))
override def getNPSMutatorSessionHeader(implicit request: Request[AnyContent]): Future[Option[Map[String, String]]] =
Future.successful(Some(Map.empty[String, String]))
}
|
hmrc/pbik
|
test/helper/StubbedControllerUtils.scala
|
Scala
|
apache-2.0
| 1,543 |
//############################################################################
// Bitsets
//############################################################################
//############################################################################
import scala.language.postfixOps
object TestMutable {
import scala.collection.mutable.BitSet
val ms0 = new BitSet
val ms1 = new BitSet(8)
val ms2 = new BitSet(0)
ms0 += 2
ms1 ++= List(1, 2)
ms1 -= 1
ms1 --= List(1)
ms2(2) = true
ms2(3) = false
Console.println("ms0 = " + ms0)
Console.println("ms1 = " + ms1)
Console.println("ms2 = " + ms2)
Console.println("mb0 = " + ms0.contains(-1))
Console.println("mb1 = " + ms1.contains(2))
Console.println("mb2 = " + ms2.contains(3))
Console.println("xs0 = " + ms0.iterator.toList)
Console.println("xs1 = " + ms1.iterator.toList)
Console.println("xs2 = " + ms2.iterator.toList)
Console.println("ma0 = " + ms0.toList)
Console.println("ma1 = " + ms1.toList)
Console.println("ma2 = " + ms2.toList)
Console.println("mi0 = " + ms0.toImmutable)
Console.println("mi1 = " + ms1.toImmutable)
Console.println("mi2 = " + ms2.toImmutable)
Console.println()
val N = 257
val gen = 3
val bs = BitSet((1 until N): _*)
(1 until N).foldLeft(gen) {
case (acc, i) =>
assert(bs.size == N-i, s"Bad size for $bs, expected ${N-i} actual ${bs.size}")
assert(!bs.isEmpty, s"Unexpected isEmpty for $bs")
bs -= acc
acc*gen % N
}
assert(bs.size == 0, s"Expected size == 0 for $bs")
assert(bs.isEmpty, s"Expected isEmpty for $bs")
}
object TestMutable2 {
import scala.collection.mutable.BitSet
import scala.collection.immutable.TreeSet
val l0 = 0 to 24 by 2 toList
val l1 = (190 to 255 toList) reverse
val l2 = (0 to 256 toList)
val l3 = (1 to 200 by 2 toList) reverse
val t0 = TreeSet(l0: _*)
val t1 = TreeSet(l1: _*)
val t2 = TreeSet(l2: _*)
val t3 = TreeSet(l3: _*)
val b0 = BitSet(l0: _*)
val b1 = BitSet(l1: _*)
val b2 = BitSet(l2: _*)
val b3 = BitSet(l3: _*)
println("m2_m0 = " + b0.toBitMask.toList.map(_.toBinaryString))
println("m2_m2 = " + b2.toBitMask.toList.map(_.toHexString))
println("m2_m0c = " + (BitSet.fromBitMask(b0.toBitMask) == b0))
println("m2_m1c = " + (BitSet.fromBitMask(b1.toBitMask) == b1))
println("m2_m2c = " + (BitSet.fromBitMask(b2.toBitMask) == b2))
println("m2_m3c = " + (BitSet.fromBitMask(b3.toBitMask) == b3))
println("m2_i0 = " + (t0 == b0))
println("m2_i1 = " + (t1 == b1))
println("m2_i2 = " + (t2 == b2))
println("m2_i3 = " + (t3 == b3))
println("m2_f0 = " + (t0.rangeFrom(42) == b0.rangeFrom(42)))
println("m2_f1 = " + (t1.rangeFrom(42) == b1.rangeFrom(42)))
println("m2_f2 = " + (t2.rangeFrom(42) == b2.rangeFrom(42)))
println("m2_f3 = " + (t3.rangeFrom(42) == b3.rangeFrom(42)))
println("m2_t0 = " + (t0.rangeTo(195) == b0.rangeTo(195)))
println("m2_t1 = " + (t1.rangeTo(195) == b1.rangeTo(195)))
println("m2_t2 = " + (t2.rangeTo(195) == b2.rangeTo(195)))
println("m2_t3 = " + (t3.rangeTo(195) == b3.rangeTo(195)))
println("m2_r0 = " + (t0.range(43,194) == b0.range(43,194)))
println("m2_r1 = " + (t1.range(43,194) == b1.range(43,194)))
println("m2_r2 = " + (t2.range(43,194) == b2.range(43,194)))
println("m2_r3 = " + (t3.range(43,194) == b3.range(43,194)))
println()
}
object TestMutable3 {
import scala.collection.mutable.BitSet
val b0 = BitSet(5, 6)
val b1 = BitSet(7)
val b2 = BitSet(1, 5)
val b3 = BitSet(6, 7)
val b4 = BitSet(6, 7)
b1 |= b0
println(s"b1:$b1")
b2 &= b0
println(s"b2:$b2")
b3 ^= b0
println(s"b3:$b3")
b4 &~= b0
println(s"b4:$b4")
b0 ^= b0 |= b1
println(s"b0:$b0")
}
/***
The memory requirements here are way beyond
what a test should exercise.
object TestMutable4 {
import scala.collection.mutable.BitSet
val bMax = BitSet(Int.MaxValue)
println(s"bMax:$bMax")
bMax.foreach(println)
val bLarge = BitSet(2000000001)
println(s"bLarge:$bLarge")
println(bMax == bLarge)
}
***/
object TestImmutable {
import scala.collection.immutable.BitSet
val is0 = BitSet()
val is1 = BitSet.fromBitMask(Array())
val is2 = BitSet.fromBitMask(Array(4))
val is3 = BitSet.empty
Console.println("is0 = " + is0)
Console.println("is1 = " + is1)
Console.println("is2 = " + is2)
Console.println("is3 = " + is3)
Console.println("ib0 = " + is0.contains(-1))
Console.println("ib1 = " + is1.contains(0))
Console.println("ib2 = " + is2.contains(2))
Console.println("ib3 = " + is3.contains(2))
Console.println("ys0 = " + is0.iterator.toList)
Console.println("ys1 = " + is1.iterator.toList)
Console.println("ys2 = " + is2.iterator.toList)
Console.println("ys3 = " + is3.iterator.toList)
Console.println("ia0 = " + is0.toList)
Console.println("ia1 = " + is1.toList)
Console.println("ia2 = " + is2.toList)
Console.println("ia3 = " + is3.toList)
Console.println()
}
object TestImmutable2 {
import scala.collection.immutable.{BitSet, TreeSet}
val l0 = 0 to 24 by 2 toList
val l1 = (190 to 255 toList) reverse
val l2 = (0 to 256 toList)
val l3 = (1 to 200 by 2 toList) reverse
val t0 = TreeSet(l0: _*)
val t1 = TreeSet(l1: _*)
val t2 = TreeSet(l2: _*)
val t3 = TreeSet(l3: _*)
val b0 = BitSet(l0: _*)
val b1 = BitSet(l1: _*)
val b2 = BitSet(l2: _*)
val b3 = BitSet(l3: _*)
println("i2_m0 = " + b0.toBitMask.toList.map(_.toBinaryString))
println("i2_m2 = " + b2.toBitMask.toList.map(_.toHexString))
println("i2_m0c = " + (BitSet.fromBitMask(b0.toBitMask) == b0))
println("i2_m1c = " + (BitSet.fromBitMask(b1.toBitMask) == b1))
println("i2_m2c = " + (BitSet.fromBitMask(b2.toBitMask) == b2))
println("i2_m3c = " + (BitSet.fromBitMask(b3.toBitMask) == b3))
println("i2_i0 = " + (t0 == b0))
println("i2_i1 = " + (t1 == b1))
println("i2_i2 = " + (t2 == b2))
println("i2_i3 = " + (t3 == b3))
println("i2_f0 = " + (t0.rangeFrom(42) == b0.rangeFrom(42)))
println("i2_f1 = " + (t1.rangeFrom(42) == b1.rangeFrom(42)))
println("i2_f2 = " + (t2.rangeFrom(42) == b2.rangeFrom(42)))
println("i2_f3 = " + (t3.rangeFrom(42) == b3.rangeFrom(42)))
println("i2_t0 = " + (t0.rangeTo(195) == b0.rangeTo(195)))
println("i2_t1 = " + (t1.rangeTo(195) == b1.rangeTo(195)))
println("i2_t2 = " + (t2.rangeTo(195) == b2.rangeTo(195)))
println("i2_t3 = " + (t3.rangeTo(195) == b3.rangeTo(195)))
println("i2_r0 = " + (t0.range(77,194) == b0.range(77,194)))
println("i2_r1 = " + (t1.range(77,194) == b1.range(77,194)))
println("i2_r2 = " + (t2.range(77,194) == b2.range(77,194)))
println("i2_r3 = " + (t3.range(77,194) == b3.range(77,194)))
println()
}
object TestImmutable3 {
import scala.collection.immutable.BitSet
BitSet(125).filter{ xi => println(xi); true } // scala/bug#11380
println()
}
object Test extends App {
TestMutable
TestMutable2
TestMutable3
// TestMutable4
TestImmutable
TestImmutable2
TestImmutable3
}
//############################################################################
|
lrytz/scala
|
test/files/run/bitsets.scala
|
Scala
|
apache-2.0
| 7,034 |
/*
* Copyright (c) 2012-2014 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics
package snowplow
package enrich
package common
package adapters
// Iglu
import iglu.client.Resolver
// Scalaz
import scalaz._
import Scalaz._
// This project
import loaders.CollectorPayload
import registry.snowplow.{Tp1Adapter => SpTp1Adapter}
import registry.snowplow.{Tp2Adapter => SpTp2Adapter}
import registry.snowplow.{RedirectAdapter => SpRedirectAdapter}
import registry.{
CloudfrontAccessLogAdapter,
IgluAdapter,
CallrailAdapter,
MailchimpAdapter,
MandrillAdapter,
PagerdutyAdapter,
PingdomAdapter,
UrbanAirshipAdapter,
SendgridAdapter
}
/**
* The AdapterRegistry lets us convert a CollectorPayload
* into one or more RawEvents, using a given adapter.
*/
object AdapterRegistry {
private object Vendor {
val Snowplow = "com.snowplowanalytics.snowplow"
val Cumulo = "io.acuminous.cumulo"
val Redirect = "r"
val Iglu = "com.snowplowanalytics.iglu"
val Callrail = "com.callrail"
val Mailchimp = "com.mailchimp"
val Mandrill = "com.mandrill"
val Pagerduty = "com.pagerduty"
val Pingdom = "com.pingdom"
val Cloudfront = "com.amazon.aws.cloudfront"
val UrbanAirship = "com.urbanairship.connect"
val Sendgrid = "com.sendgrid"
}
/**
* Router to determine which adapter we use
* to convert the CollectorPayload into
* one or more RawEvents.
*
* @param payload The CollectorPayload we
* are transforming
* @param resolver (implicit) The Iglu resolver used for
* schema lookup and validation
* @return a Validation boxing either a
* NEL of RawEvents on Success,
* or a NEL of Strings on Failure
*/
def toRawEvents(payload: CollectorPayload)(implicit resolver: Resolver): ValidatedRawEvents = (payload.api.vendor, payload.api.version) match {
case (Vendor.Snowplow, "tp1") => SpTp1Adapter.toRawEvents(payload)
case (Vendor.Snowplow, "tp2") => SpTp2Adapter.toRawEvents(payload)
case (Vendor.Cumulo, "tp1") => SpTp1Adapter.toRawEvents(payload)
case (Vendor.Cumulo, "tp2") => SpTp2Adapter.toRawEvents(payload)
case (Vendor.Redirect, "tp2") => SpRedirectAdapter.toRawEvents(payload)
case (Vendor.Iglu, "v1") => IgluAdapter.toRawEvents(payload)
case (Vendor.Callrail, "v1") => CallrailAdapter.toRawEvents(payload)
case (Vendor.Mailchimp, "v1") => MailchimpAdapter.toRawEvents(payload)
case (Vendor.Mandrill, "v1") => MandrillAdapter.toRawEvents(payload)
case (Vendor.Pagerduty, "v1") => PagerdutyAdapter.toRawEvents(payload)
case (Vendor.Pingdom, "v1") => PingdomAdapter.toRawEvents(payload)
case (Vendor.Cloudfront, "wd_access_log") => CloudfrontAccessLogAdapter.WebDistribution.toRawEvents(payload)
case (Vendor.UrbanAirship, "v1") => UrbanAirshipAdapter.toRawEvents(payload)
case (Vendor.Sendgrid, "v3") => SendgridAdapter.toRawEvents(payload)
case _ => s"Payload with vendor ${payload.api.vendor} and version ${payload.api.version} not supported by this version of Scala Common Enrich".failNel
}
}
|
jramos/snowplow
|
3-enrich/scala-common-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich/common/adapters/AdapterRegistry.scala
|
Scala
|
apache-2.0
| 3,831 |
/*
// Copyright 2012/2013 de Gustavo Steinberg, Flavio Soares, Pierre Andrews, Gustavo Salazar Torres, Thomaz Abramo
//
// Este arquivo é parte do programa Vigia Político. O projeto Vigia
// Político é um software livre; você pode redistribuí-lo e/ou
// modificá-lo dentro dos termos da GNU Affero General Public License
// como publicada pela Fundação do Software Livre (FSF); na versão 3 da
// Licença. Este programa é distribuído na esperança que possa ser útil,
// mas SEM NENHUMA GARANTIA; sem uma garantia implícita de ADEQUAÇÃO a
// qualquer MERCADO ou APLICAÇÃO EM PARTICULAR. Veja a licença para
// maiores detalhes. Você deve ter recebido uma cópia da GNU Affero
// General Public License, sob o título "LICENCA.txt", junto com este
// programa, se não, acesse http://www.gnu.org/licenses/
*/
package models
import play.api.db._
import play.api.Play.current
import play.api.libs.json._
import play.api.libs.json.util._
import play.api.libs.json.Writes._
import play.api.libs.functional.syntax._
import java.util.Date
import anorm._
import anorm.SqlParser._
// import securesocial.core._
case class User(id: Pk[Long], firstName: String, lastName: String, email: Option[String], oauthId: String, oauthProvider: String,
countryName: Option[String], stateName: Option[String], cityName: Option[String], docId: Option[String], typeCode: Int, var configured: Boolean)
object User {
val NORMAL_TYPE: Int = 1
val CONGRESS_TYPE: Int = 2
implicit object PkFormat extends Format[Pk[Long]] {
def reads(json: JsValue): JsResult[Pk[Long]] = JsSuccess(
json.asOpt[Long].map(id => Id(id)).getOrElse(NotAssigned))
def writes(id: Pk[Long]): JsValue = id.map(JsNumber(_)).getOrElse(JsNull)
}
implicit val userWrites = Json.writes[User]
implicit val userReads = Json.reads[User]
val simple = {
(get[Pk[Long]]("id") ~
get[String]("first_name") ~
get[String]("last_name") ~
get[Option[String]]("email") ~
get[String]("oauth_id") ~
get[String]("oauth_provider") ~
get[Option[String]]("country_name") ~
get[Option[String]]("state_name") ~
get[Option[String]]("city_name") ~
get[Option[String]]("doc_id") ~
get[Int]("type_code") ~
get[Boolean]("configured")) map {
case id ~ first_name ~ last_name ~ email ~ oauth_id ~ oauth_provider ~ country_name ~ state_name ~ city_name ~ doc_id ~ type_code ~ configured =>
User(id, first_name, last_name, email, oauth_id, oauth_provider, country_name, state_name, city_name, doc_id, type_code, configured)
}
}
def all(): Seq[User] = {
DB.withConnection { implicit connection =>
SQL("select * from users").as(User.simple *)
}
}
def findAllCongressman(): Seq[User] = {
DB.withConnection { implicit connection =>
SQL("select * from users where type_code={type_code}").on('type_code -> CONGRESS_TYPE).as(User.simple *)
}
}
def findFirst100Congressman(): Seq[User] = {
DB.withConnection { implicit connection =>
SQL("select * from users where type_code={type_code}").on('type_code -> CONGRESS_TYPE).as(User.simple *)
}
}
def findById(id: Long): Option[User] = {
DB.withConnection { implicit connection =>
SQL("select * from users where id={id}").on(
'id -> id).as(User.simple singleOpt)
}
}
def findByEmail(email: String): Option[User] = {
DB.withConnection { implicit connection =>
SQL("select * from users where email={email} LIMIT 1").on(
'email -> email).as(User.simple singleOpt)
}
}
def findByEmailAndOAuthId(email: String, oauth_id: String): Option[User] = {
DB.withConnection { implicit connection =>
SQL("select * from users where oauth_id={oauth_id} and email={email}").on(
'email -> email,
'oauth_id -> oauth_id).as(User.simple singleOpt)
}
}
def findByOAuthId(oauth_id: String): Option[User] = {
DB.withConnection { implicit connection =>
SQL("select * from users where oauth_id={oauth_id}").on(
'oauth_id -> oauth_id).as(User.simple singleOpt)
}
}
def save(firstName: String, lastName: String, email: Option[String], oauthId: String, oauthProvider: String,
countryName: Option[String], stateName: Option[String], cityName: Option[String], docId: Option[String], typeCode: Int): User = {
println("entrou no metodo")
DB.withConnection { implicit connection =>
val idOpt: Option[Long] = SQL("""
INSERT INTO users(first_name, last_name, email, oauth_id, oauth_provider, country_name,
state_name, city_name, doc_id, type_code)
VALUES({first_name}, {last_name}, {email}, {oauth_id}, {oauth_provider}, {country_name}, {state_name},
{city_name},{doc_id}, {type_code})
""")
.on(
'first_name -> firstName,
'last_name -> lastName,
'email -> email,
'oauth_id -> oauthId,
'oauth_provider -> oauthProvider,
'country_name -> countryName,
'state_name -> stateName,
'city_name -> cityName,
'doc_id -> docId,
'type_code -> typeCode).executeInsert()
println("saiu do metodo")
idOpt.map { id => User(Id(id), firstName, lastName, email, oauthId, oauthProvider, countryName, stateName, cityName, docId, typeCode, false) }.get
}
}
def updateConfigured(user: User): Option[User] ={
DB.withConnection { implicit connection =>
SQL("""
UPDATE users SET configured={configured} WHERE id={id}
""")
.on(
'id -> user.id,
'configured -> user.configured).executeUpdate()
Option(user)
}
}
def updateById(id: Long, firstName: String, lastName: String, email: Option[String], oauthId: String, oauthProvider: String,
countryName: Option[String], stateName: Option[String], cityName: Option[String], docId: Option[String], typeCode: Int): Option[User] = {
DB.withConnection { implicit connection =>
SQL("""
UPDATE users SET first_name={first_name}, last_name={last_name}, email={email},
oauth_id={oauth_id}, oauth_provider={oauth_provider}, country_name={country_name},
state_name={state_name}, city_name={city_name}, doc_id={doc_id}, type_code={type_code}
WHERE id={id}
""")
.on(
'id -> id,
'first_name -> firstName,
'last_name -> lastName,
'email -> email,
'oauth_id -> oauthId,
'oauth_provider -> oauthProvider,
'country_name -> countryName,
'state_name -> stateName,
'city_name -> cityName,
'doc_id -> docId,
'type_code -> typeCode).executeUpdate()
Option(User(Id(id), firstName, lastName, email, oauthId, oauthProvider, countryName, stateName, cityName, docId, typeCode, false))
}
}
def deleteById(id: Long) {
DB.withConnection { implicit connection =>
SQL("""
DELETE FROM users
WHERE id={id}
""")
.on(
'id -> id).executeInsert()
}
}
def findByEmail(email: Option[String]): Option[User] = {
DB.withConnection { implicit connection =>
SQL("select * from users where email={email} LIMIT 1").on(
'email -> email.get).as(User.simple singleOpt)
}
}
def updateModelPath(id: Long, modelPath: Option[String])
{
DB.withConnection { implicit connection =>
SQL("""
UPDATE users SET model_path={model_path} WHERE id={id}
""")
.on(
'id -> id,
'model_path -> modelPath).executeUpdate()
}
}
def findModelPath(id: Long): Option[String] ={
DB.withConnection { implicit connection =>
var result=SQL("select model_path from users where id={id}").on(
'id -> id).apply().head
try{
return Option(result[String]("model_path"))
} catch{
case e: Exception => return None
}
}
}
}
|
cidadao-automatico/cidadao-server
|
app/models/User.scala
|
Scala
|
agpl-3.0
| 7,963 |
package org.powlab.jeye.decode.graph
import org.powlab.jeye.decode.graph.OpcodeNodes._
/**
* Набор утилитных методов
*/
object OpcodeTreeHelper {
def scanTree(tree: OpcodeTree, handler: NodeHandler) {
scanTree(tree, tree.head, handler)
}
/**
* Обход дерева dev.powlab.org 178.62.179.49
* TODO here: оптимизация, заменить рекурсию на стэковую обработку
*/
def scanTree(tree: OpcodeTree, fromNode: OpcodeNode, handler: NodeHandler) {
val marker = tree.prepared
def doSkan(node: OpcodeNode) {
var preview: OpcodeNode = null
var current: OpcodeNode = node
while (current != null) {
if (marker.isMarked(current)) {
return
}
marker.mark(current);
handler(current)
if (current.branchy) {
tree.nexts(current).foreach(doSkan(_))
return
}
preview = current
current = tree.next(current)
}
}
doSkan(fromNode)
}
def copy(tree: OpcodeTree): OpcodeTree = {
val newTree = new OpcodeTree(tree.plainTree)
def registryNode(node: OpcodeNode) {
if (! newTree.has(node)) {
val newDetails = OpcodeDetails.copyDetails(tree.details(node))
newTree.registryNode(node, newDetails)
}
}
// Получить ссылку на ранее созданный узел или создать новый
def get(node: OpcodeNode): OpcodeNode = {
scanOpcodeNodes(node, registryNode)
newTree.current(node.id)
}
// 2. copy of links
scanTree(tree, opcodeNode => {
val newOpcode = get(opcodeNode)
tree.nexts(opcodeNode).foreach(nextOpcodeNode => {
val nextNewNode = get(nextOpcodeNode)
newTree.link(nextNewNode, newOpcode)
})
})
newTree.link(newTree.current(0), newTree.head)
val newResources = newTree.resources
// TODO here: так как инструкции неизменяемые, то можно просто перекладывать их
tree.resources.scanAll(opcodeNode => {
val newResource = newTree.current(opcodeNode.id)
newResources += newResource
})
newResources ++= tree.resources.getPatternResults
newResources ++= tree.resources.getClassifiers
newTree
}
}
|
powlab/jeye
|
src/main/scala/org/powlab/jeye/decode/graph/OpcodeTreeHelper.scala
|
Scala
|
apache-2.0
| 2,338 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.stream.sql
import java.sql.Timestamp
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.functions.timestamps.BoundedOutOfOrdernessTimestampExtractor
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.api.windowing.time.Time
import org.apache.flink.table.api.EnvironmentSettings
import org.apache.flink.table.api.scala._
import org.apache.flink.table.runtime.utils.{StreamITCase, StreamingWithStateTestBase}
import org.apache.flink.types.Row
import org.junit.Assert.assertEquals
import org.junit._
import scala.collection.mutable
class TemporalJoinITCase extends StreamingWithStateTestBase {
@Before
def clear(): Unit = {
StreamITCase.clear
}
/**
* Because of nature of the processing time, we can not (or at least it is not that easy)
* validate the result here. Instead of that, here we are just testing whether there are no
* exceptions in a full blown ITCase. Actual correctness is tested in unit tests.
*/
@Test
def testProcessTimeInnerJoin(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val settings = EnvironmentSettings.newInstance().useOldPlanner().build
val tEnv = StreamTableEnvironment.create(env, settings)
env.setStateBackend(getStateBackend)
env.setParallelism(1)
env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime)
val sqlQuery =
"""
|SELECT
| o.amount * r.rate AS amount
|FROM
| Orders AS o,
| LATERAL TABLE (Rates(o.proctime)) AS r
|WHERE r.currency = o.currency
|""".stripMargin
val ordersData = new mutable.MutableList[(Long, String)]
ordersData.+=((2L, "Euro"))
ordersData.+=((1L, "US Dollar"))
ordersData.+=((50L, "Yen"))
ordersData.+=((3L, "Euro"))
ordersData.+=((5L, "US Dollar"))
val ratesHistoryData = new mutable.MutableList[(String, Long)]
ratesHistoryData.+=(("US Dollar", 102L))
ratesHistoryData.+=(("Euro", 114L))
ratesHistoryData.+=(("Yen", 1L))
ratesHistoryData.+=(("Euro", 116L))
ratesHistoryData.+=(("Euro", 119L))
val orders = env
.fromCollection(ordersData)
.toTable(tEnv, 'amount, 'currency, 'proctime.proctime)
val ratesHistory = env
.fromCollection(ratesHistoryData)
.toTable(tEnv, 'currency, 'rate, 'proctime.proctime)
tEnv.registerTable("Orders", orders)
tEnv.registerTable("RatesHistory", ratesHistory)
tEnv.registerFunction(
"Rates",
ratesHistory.createTemporalTableFunction('proctime, 'currency))
val result = tEnv.sqlQuery(sqlQuery).toAppendStream[Row]
result.addSink(new StreamITCase.StringSink[Row])
env.execute()
}
@Test
def testEventTimeInnerJoin(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val settings = EnvironmentSettings.newInstance().useOldPlanner().build
val tEnv = StreamTableEnvironment.create(env, settings)
env.setStateBackend(getStateBackend)
env.setParallelism(1)
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val sqlQuery =
"""
|SELECT
| o.amount * r.rate AS amount
|FROM
| Orders AS o,
| LATERAL TABLE (Rates(o.rowtime)) AS r
|WHERE r.currency = o.currency
|""".stripMargin
val ordersData = new mutable.MutableList[(Long, String, Timestamp)]
ordersData.+=((2L, "Euro", new Timestamp(2L)))
ordersData.+=((1L, "US Dollar", new Timestamp(3L)))
ordersData.+=((50L, "Yen", new Timestamp(4L)))
ordersData.+=((3L, "Euro", new Timestamp(5L)))
val ratesHistoryData = new mutable.MutableList[(String, Long, Timestamp)]
ratesHistoryData.+=(("US Dollar", 102L, new Timestamp(1L)))
ratesHistoryData.+=(("Euro", 114L, new Timestamp(1L)))
ratesHistoryData.+=(("Yen", 1L, new Timestamp(1L)))
ratesHistoryData.+=(("Euro", 116L, new Timestamp(5L)))
ratesHistoryData.+=(("Euro", 119L, new Timestamp(7L)))
var expectedOutput = new mutable.HashSet[String]()
expectedOutput += (2 * 114).toString
expectedOutput += (3 * 116).toString
val orders = env
.fromCollection(ordersData)
.assignTimestampsAndWatermarks(new TimestampExtractor[(Long, String, Timestamp)]())
.toTable(tEnv, 'amount, 'currency, 'rowtime.rowtime)
val ratesHistory = env
.fromCollection(ratesHistoryData)
.assignTimestampsAndWatermarks(new TimestampExtractor[(String, Long, Timestamp)]())
.toTable(tEnv, 'currency, 'rate, 'rowtime.rowtime)
tEnv.registerTable("Orders", orders)
tEnv.registerTable("RatesHistory", ratesHistory)
tEnv.registerTable("FilteredRatesHistory", tEnv.scan("RatesHistory").filter('rate > 110L))
tEnv.registerFunction(
"Rates",
tEnv.scan("FilteredRatesHistory").createTemporalTableFunction('rowtime, 'currency))
tEnv.registerTable("TemporalJoinResult", tEnv.sqlQuery(sqlQuery))
// Scan from registered table to test for interplay between
// LogicalCorrelateToTemporalTableJoinRule and TableScanRule
val result = tEnv.scan("TemporalJoinResult").toAppendStream[Row]
result.addSink(new StreamITCase.StringSink[Row])
env.execute()
assertEquals(expectedOutput, StreamITCase.testResults.toSet)
}
@Test
def testNestedTemporalJoin(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
val settings = EnvironmentSettings.newInstance().useOldPlanner().build
val tEnv = StreamTableEnvironment.create(env, settings)
env.setStateBackend(getStateBackend)
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val sqlQuery =
"""
|SELECT
| o.orderId,
| (o.amount * p.price * r.rate) as total_price
|FROM
| Orders AS o,
| LATERAL TABLE (Prices(o.rowtime)) AS p,
| LATERAL TABLE (Rates(o.rowtime)) AS r
|WHERE
| o.productId = p.productId AND
| r.currency = p.currency
|""".stripMargin
val ordersData = new mutable.MutableList[(Long, String, Long, Timestamp)]
ordersData.+=((1L, "A1", 2L, new Timestamp(2L)))
ordersData.+=((2L, "A2", 1L, new Timestamp(3L)))
ordersData.+=((3L, "A4", 50L, new Timestamp(4L)))
ordersData.+=((4L, "A1", 3L, new Timestamp(5L)))
val orders = env
.fromCollection(ordersData)
.assignTimestampsAndWatermarks(new TimestampExtractor[(Long, String, Long, Timestamp)]())
.toTable(tEnv, 'orderId, 'productId, 'amount, 'rowtime.rowtime)
val ratesHistoryData = new mutable.MutableList[(String, Long, Timestamp)]
ratesHistoryData.+=(("US Dollar", 102L, new Timestamp(1L)))
ratesHistoryData.+=(("Euro", 114L, new Timestamp(1L)))
ratesHistoryData.+=(("Yen", 1L, new Timestamp(1L)))
ratesHistoryData.+=(("Euro", 116L, new Timestamp(5L)))
ratesHistoryData.+=(("Euro", 119L, new Timestamp(7L)))
val ratesHistory = env
.fromCollection(ratesHistoryData)
.assignTimestampsAndWatermarks(new TimestampExtractor[(String, Long, Timestamp)]())
.toTable(tEnv, 'currency, 'rate, 'rowtime.rowtime)
val pricesHistoryData = new mutable.MutableList[(String, String, Double, Timestamp)]
pricesHistoryData.+=(("A2", "US Dollar", 10.2D, new Timestamp(1L)))
pricesHistoryData.+=(("A1", "Euro", 11.4D, new Timestamp(1L)))
pricesHistoryData.+=(("A4", "Yen", 1D, new Timestamp(1L)))
pricesHistoryData.+=(("A1", "Euro", 11.6D, new Timestamp(5L)))
pricesHistoryData.+=(("A1", "Euro", 11.9D, new Timestamp(7L)))
val pricesHistory = env
.fromCollection(pricesHistoryData)
.assignTimestampsAndWatermarks(new TimestampExtractor[(String, String, Double, Timestamp)]())
.toTable(tEnv, 'productId, 'currency, 'price, 'rowtime.rowtime)
tEnv.createTemporaryView("Orders", orders)
tEnv.createTemporaryView("RatesHistory", ratesHistory)
tEnv.registerFunction(
"Rates",
ratesHistory.createTemporalTableFunction($"rowtime", $"currency"))
tEnv.registerFunction(
"Prices",
pricesHistory.createTemporalTableFunction($"rowtime", $"productId"))
tEnv.createTemporaryView("TemporalJoinResult", tEnv.sqlQuery(sqlQuery))
// Scan from registered table to test for interplay between
// LogicalCorrelateToTemporalTableJoinRule and TableScanRule
val result = tEnv.from("TemporalJoinResult").toAppendStream[Row]
result.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = List(
s"1,${2 * 114 * 11.4}",
s"2,${1 * 102 * 10.2}",
s"3,${50 * 1 * 1.0}",
s"4,${3 * 116 * 11.6}")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
}
class TimestampExtractor[T <: Product]
extends BoundedOutOfOrdernessTimestampExtractor[T](Time.seconds(10)) {
override def extractTimestamp(element: T): Long = element match {
case (_, _, ts: Timestamp) => ts.getTime
case (_, _, _, ts: Timestamp) => ts.getTime
case _ => throw new IllegalArgumentException(
"Expected the last element in a tuple to be of a Timestamp type.")
}
}
|
hequn8128/flink
|
flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/runtime/stream/sql/TemporalJoinITCase.scala
|
Scala
|
apache-2.0
| 10,017 |
//Part of Cosmos by OpenGenus Foundation
object PrintReverse {
case class Node[T](value: T, next: Option[Node[T]]) {
// append new node at the end
def :~>(tail: Node[T]): Node[T] = next match {
case None => new Node(value, Some(tail))
case Some(x) => new Node(value, Some(x :~> tail))
}
}
object Node {
def apply[T](value: T): Node[T] = new Node(value, None)
}
def printReverse[T](node: Node[T]): Unit = {
node.next.foreach(printReverse)
println(node.value)
}
def main(args: Array[String]): Unit = {
val integerLinkedList = Node(1) :~> Node(2) :~> Node(3)
val stringLinkedList = Node("hello") :~> Node("world") :~> Node("good") :~> Node("bye")
printReverse(integerLinkedList)
printReverse(stringLinkedList)
}
}
|
OpenGenus/cosmos
|
code/data_structures/src/list/singly_linked_list/operations/print_reverse/print_reverse.scala
|
Scala
|
gpl-3.0
| 784 |
package devsearch.concat.actors
import java.io._
import java.nio.file.{ Paths, Files, Path }
import akka.actor.{ Actor, ActorLogging, ActorRef, Props }
import devsearch.concat.Utils
import devsearch.concat.actors.Coordinator._
import devsearch.concat.actors.Worker._
import org.apache.commons.compress.archivers.{ ArchiveOutputStream, ArchiveStreamFactory }
import org.apache.commons.compress.archivers.tar.{ TarArchiveEntry, TarArchiveOutputStream }
import org.apache.commons.compress.compressors.{ CompressorOutputStream, CompressorStreamFactory }
import org.apache.commons.compress.utils.IOUtils
import org.apache.commons.io.FilenameUtils
/**
* The worker actor is the one in charge of creating the large
* files from smaller ones.
*
* @param master The actor to which it has to request new files
*/
class Worker(master: ActorRef) extends Actor with ActorLogging {
case class Stats(totalBytesSeen: Long, totalBytesProcessed: Long) {
def add(seen: Long, processed: Long) = Stats(totalBytesSeen + seen, totalBytesProcessed + processed)
}
object Stats {
def empty: Stats = Stats(0, 0)
}
/**
* Default worker behaviour at startup
*/
override def receive: PartialFunction[Any, Unit] = {
/* Start to work */
case Begin =>
master ! BlobRequest
context.become(awaitBlob(Stats.empty))
}
def awaitBlob(stats: Stats): PartialFunction[Any, Unit] = {
/* New blob to be created, it will contain the concatenation of many files */
case BlobResponse(file) => {
val out = new BufferedOutputStream(Files.newOutputStream(file))
@SuppressWarnings(Array("org.brianmckenna.wartremover.warts.AsInstanceOf"))
val tarOut: TarArchiveOutputStream = new ArchiveStreamFactory().createArchiveOutputStream(ArchiveStreamFactory.TAR, out).asInstanceOf[TarArchiveOutputStream]
tarOut.setLongFileMode(TarArchiveOutputStream.LONGFILE_GNU)
master ! RepoRequest
context.become(fillBlob(file, tarOut, 0, stats))
}
}
def fillBlob(blob: Path, stream: TarArchiveOutputStream, bytesWritten: Long, stats: Stats): PartialFunction[Any, Unit] = {
/* One single file to append to the current blob */
case RepoResponse(file, relativePath) =>
val correctedPath = FilenameUtils.removeExtension(relativePath)
val sizes = Utils.walkFiles(file) { fileEntry =>
try {
val size = fileEntry.size
val isReasonableSize = size < Utils.MAX_FILE_SIZE
/** This lazy is important because is TextFile might read the whole file in memory */
lazy val isTextFile = Utils.isTextFile(fileEntry.inputStream)
if (isReasonableSize && isTextFile) {
val entry = new TarArchiveEntry(Paths.get(correctedPath, fileEntry.relativePath).toString)
entry.setSize(size)
stream.putArchiveEntry(entry)
val processed = IOUtils.copy(fileEntry.inputStream, stream)
stream.closeArchiveEntry()
(processed, size)
} else {
(0L, size)
}
} catch {
case e: IOException =>
log.error(e, s"Encountered error when processing ${fileEntry.relativePath} for repo ${relativePath}")
(0L, 0L)
}
}
val (processed, seen) = sizes.foldLeft((0L, 0L)) { (b, a) => (b._1 + a._1, b._2 + a._2) }
val updatedStats = stats.add(seen, processed)
val totalWritten = bytesWritten + processed
if (totalWritten >= Utils.BLOB_SIZE) {
stream.close()
log.info(s"Finished with blob : $blob")
master ! BlobRequest
context.become(awaitBlob(updatedStats))
} else {
master ! RepoRequest
context.become(fillBlob(blob, stream, totalWritten, updatedStats))
}
/* No more files, end what you are doing and send finished message */
case Shutdown =>
stream.close()
log.info(s"Closing last blob : $blob")
sender ! Finished(stats.totalBytesSeen, stats.totalBytesProcessed)
}
}
object Worker {
def props(reader: ActorRef): Props = Props(new Worker(reader))
case object RepoRequest
case object BlobRequest
case object Begin
case class Finished(bytesSeen: Long, bytesProcessed: Long)
}
|
devsearch-epfl/devsearch-concat
|
src/main/scala/devsearch/concat/actors/Worker.scala
|
Scala
|
agpl-3.0
| 4,236 |
/**
* Created by anicolaspp on 7/3/16.
*/
package com.nico.actors
import akka.actor.{Actor, ActorLogging, Props}
import com.nico.actors.TransactionManagerActor._
import com.nico.persistence.{Account, TransactionManager}
class TransactionManagerActor(transactionManager: TransactionManager) extends Actor with ActorLogging {
override def receive: Receive = {
case AccountInfo() => sender() ! AccountInfoResult(transactionManager.manager.accountInfo)
case Deposit(amount) => {
log.debug("deposit: " + transactionManager.manager.accountInfo.id)
sender() ! AccountInfoResult(transactionManager.manager.deposit(amount))
}
case Extract(amount) => {
log.debug("extract: " + transactionManager.manager.accountInfo.id)
val (result, acc) = transactionManager.manager.extract(amount)
sender() ! ExtractResult(result, acc)
}
}
}
object TransactionManagerActor {
def props(transactionManager: TransactionManager): Props = Props(new TransactionManagerActor(transactionManager))
trait Transaction
case class AccountInfo() extends Transaction
case class AccountInfoResult(account: Account)
case class Deposit(amount: Double) extends Transaction
case class Extract(amount: Double) extends Transaction
case class ExtractResult(result: Boolean, account: Account)
}
|
anicolaspp/distributd-transaction-processor
|
actors/src/main/scala/com/nico/actors/TransactionManagerActor.scala
|
Scala
|
mit
| 1,339 |
package jp.ne.opt.redshiftfake.read
import com.github.tototoshi.csv._
import jp.ne.opt.redshiftfake.{Column, Row}
case class InvalidCsvException(message: String) extends RuntimeException
class CsvReader(csvRow: String, delimiterChar: Char, nullAs: String) {
private[this] object csvFormat extends CSVFormat {
val delimiter: Char = delimiterChar
val quoteChar: Char = '"'
val treatEmptyLineAsNil: Boolean = false
val escapeChar: Char = '\\\\'
val lineTerminator: String = "\\r\\n"
val quoting: Quoting = QUOTE_ALL
}
private[this] val parser = new CSVParser(csvFormat)
private[this] val emptyField = s"${csvFormat.quoteChar}${csvFormat.quoteChar}"
val toRow: Row = {
// to recognize last field when the field is empty.
val parsed = parser.parseLine(csvRow + csvFormat.delimiter + emptyField)
parsed.map { xs => Row(xs.map {
column => Column(if (column.nonEmpty && column != nullAs) Some(column) else None)
})}.getOrElse(throw InvalidCsvException(s"invalid csv row : $csvRow"))
}
}
|
opt-tech/redshift-fake-driver
|
src/main/scala/jp/ne/opt/redshiftfake/read/CsvReader.scala
|
Scala
|
apache-2.0
| 1,038 |
package com.harborx.api.system
import org.scalatest._
import org.scalatestplus.play.{PlaySpec, _}
import play.api.mvc._
import play.api.test.FakeRequest
import play.api.test.Helpers._
import scala.concurrent.Future
class SystemSpec extends PlaySpec with OneServerPerSuite with MustMatchers with BeforeAndAfterAll {
"System controller" must {
"return OK when call GET /example" in {
val request = FakeRequest(GET, "/example")
val response = route(app, request)
response.isDefined mustEqual true
val result: Future[Result] = response.get
status(result) mustEqual OK
contentAsString(result) mustEqual "If you can see this, it means DI of Configuration is success!"
}
"return Test when call GET /env" in {
val request = FakeRequest(GET, "/env")
val response = route(app, request)
response.isDefined mustEqual true
val result: Future[Result] = response.get
status(result) mustEqual OK
contentAsString(result) mustEqual "current mode is:Test"
}
}
}
|
harborx/play-di-example
|
play-guice/test/com/harborx/api/system/SystemSpec.scala
|
Scala
|
mit
| 1,044 |
package ctlmc.bddgraph
import ctlmc._
import net.sf.javabdd._
class Domain(
val factory: GraphFactory,
val paramName: String,
val domainSize: Int
) {
val bddDomain = factory.bddFactory.extDomain(domainSize)
bddDomain.setName(paramName)
val bddSize = bddDomain.varNum()
def createBDD(value: Int): BDD = {
assert(value >= 0 && value < domainSize)
bddDomain.ithVar(value)
}
def createFullBDD(): BDD = {
bddDomain.domain()
}
}
|
fpoli/ctlmc
|
src/main/scala/bddgraph/Domain.scala
|
Scala
|
gpl-3.0
| 448 |
package generators
import java.io.IOException
import java.net.SocketException
import org.jsoup.nodes._
import org.jsoup.select._
import play.api.Logger
import services.SearchEngineService
import scala.collection.JavaConversions._
/**
* Created by franco on 9/12/16.
*/
trait BasicUrlGenerator {
final val ERROR_LOGGER: Logger = Logger("errorLogger")
/**
* En función de un nombre:String y un String a buscar, devuelve una Lista con los resultados obtenidos
* */
def getSearchedUrl(name : Option[String], query : Option[String]) : List[String]
/**
* En función del nombre de la persona que se encuentra dentro de un Array[String] y de un parametro
* de busqueda (query : String), nos retorna una Lista con posibles resultados
**/
def getGoogleSearchRegisters(query: String, domain: String, cleanDomain: Boolean = false): List[String] = {
// Setup proxy
/* Proxy proxy = new Proxy( //
Proxy.Type.HTTP, //
InetSocketAddress.createUnresolved("127.0.0.1", 8080) //
);*/
var result: List[String] = List()
try {
val tuple = SearchEngineService.getQuery(query)
val doc: Document = tuple._1
val realQuery = tuple._2
val links: Elements = doc.select("a[href*='" + domain + "']")
//Puede ser un span tambien <span class="url">https://ar.linkedin.com/in/emiliolopezgabeiras</span>
for (link: Element <- links) {
result = cleanAndAdd(link.attr("href"), result, cleanDomain, domain)
}
var textLinks = doc.select(".fz-ms.fw-m.fc-12th.wr-bw,span.url,a.result__url") //Yahoo,IxQuick,Duck Duck Go
for (link: Element <- textLinks) {
result = cleanAndAdd(link.text(), result, cleanDomain, domain)
}
if (result.isEmpty) {
ERROR_LOGGER.error(this.getClass.getName + " :-: Links Not Found :-: " + realQuery)
println("Exited " + domain + " Generator without exception. Did not found links. Query: " + query)
}
} catch {
case e: SocketException => e.printStackTrace()
case e: IOException => e.printStackTrace()
if (e.getMessage == "HTTP error fetching URL") {
//Thread.sleep(10000)
}
case e: Exception => e.printStackTrace()
}
result
}
def cleanAndAdd(url: String, existentLinks: List[String], cleanDomain: Boolean, domain: String): List[String] = {
var temp = url
var result = existentLinks
if (temp.contains(domain) && !temp.startsWith("/search") && !temp.contains("translate")) {
if (temp.indexOf("https") > -1) temp = "https" + temp.split("https")(1)
else if (temp.indexOf("http") > -1) temp = "http" + temp.split("http")(1)
if (cleanDomain) temp = cleanUrlDomain(temp)
if (!"".equals(temp))
result = temp :: result
}
return result
}
/**
* Metodo que se encarga de limpiar un dominio (url:String) para eliminar cualquier exceso de caracteres
* */
def cleanUrlDomain(url : String) : String
}
|
TVilaboa/Egresados
|
app/generators/BasicUrlGenerator.scala
|
Scala
|
gpl-3.0
| 3,058 |
package libref.spec
import libref.collection._
import leon.lang._
import scala.language.postfixOps
import scala.language.implicitConversions
abstract class HeapADT {
def findMin: Option[BigInt]
def deleteMin: HeapADT
def insert (e: BigInt): HeapADT
def toSortedList: List[BigInt]
def content: Set[BigInt]
def isHeap: Boolean
def size: BigInt
}
|
fmlab-iis/LibRef
|
spec/heap.scala
|
Scala
|
gpl-3.0
| 367 |
import System.out._
import com.codahale.simplespec.Spec
object BackoffSpec extends Spec {
class `exponential backoff` {
// def `should take time` {
// val b = new Backoff()
// val start = System.currentTimeMillis
// for (i <- 1 to 12) b.spin
// val end = System.currentTimeMillis
// (end - start > 100) must be(true)
// }
}
}
|
aturon/ChemistrySet
|
src/test/scala/BackoffSpec.scala
|
Scala
|
bsd-2-clause
| 369 |
package com.rasterfoundry.backsplash.server
import com.rasterfoundry.backsplash.Parameters._
import com.rasterfoundry.datamodel._
import com.rasterfoundry.database.Implicits._
import com.rasterfoundry.database.{ProjectDao, MetricDao, ToolRunDao}
import cats.data._
import cats.effect._
import cats.implicits._
import doobie.{ConnectionIO, Transactor}
import doobie.implicits._
import org.http4s._
import org.http4s.headers._
import org.http4s.dsl.io._
import java.time.Instant
import java.util.UUID
class MetricMiddleware[F[_]](xa: Transactor[F])(implicit Conc: Concurrent[F]) {
def middleware(http: AuthedService[User, F]): AuthedService[User, F] =
Kleisli { withMetrics(http) }
def withMetrics(http: AuthedService[User, F])(
authedReq: AuthedRequest[F, User]): OptionT[F, Response[F]] =
authedReq match {
case _ if !Config.metrics.enableMetrics => http(authedReq)
case req @ GET -> Root / UUIDWrapper(projectId) / "layers" / UUIDWrapper(
layerId) / IntVar(_) / IntVar(_) / IntVar(_) as user =>
for {
_ <- OptionT.liftF {
Conc.start {
(ProjectDao.getProjectById(projectId) flatMap { projectO =>
(projectO map { project =>
{
val metric =
Metric(Instant.now,
ProjectLayerMosaicEvent(projectId,
layerId,
project.owner,
getReferer(req.req)),
user.id)
MetricDao.insert(metric)
}
}).getOrElse(0.pure[ConnectionIO])
}).transact(xa)
}
}
resp <- http(req)
} yield resp
case req @ GET -> Root / UUIDWrapper(projectId) / "analyses" / UUIDWrapper(
analysisId) / IntVar(_) / IntVar(_) / IntVar(_) :? NodeQueryParamMatcher(
node) as user =>
for {
_ <- OptionT.liftF {
Conc.start {
analysisToMetricFib(analysisId,
Some(projectId),
node,
user.id,
getReferer(req.req))
}
}
resp <- http(req)
} yield resp
case req @ GET -> Root / UUIDWrapper(analysisId) / IntVar(_) / IntVar(_) / IntVar(
_) :? NodeQueryParamMatcher(node) as user
if req.req.scriptName == "/tools" =>
for {
_ <- OptionT.liftF {
Conc.start {
analysisToMetricFib(analysisId,
None,
node,
user.id,
getReferer(req.req))
}
}
resp <- http(req)
} yield resp
case GET -> _ as _ => http(authedReq)
}
private def analysisToMetricFib(analysisId: UUID,
projectId: Option[UUID],
nodeId: Option[UUID],
requester: String,
referer: String) =
(ToolRunDao.query
.filter(analysisId)
.selectOption flatMap { toolRunO =>
toolRunO
.map({ toolRun =>
{
val metric =
Metric(
Instant.now,
AnalysisEvent(projectId orElse toolRun.projectId,
toolRun.projectLayerId,
toolRun.id,
nodeId,
toolRun.owner,
referer),
requester
)
MetricDao.insert(metric)
}
})
.getOrElse { 0.pure[ConnectionIO] }
}).transact(xa)
private def getReferer[T[_]](req: Request[T]): String =
req.headers.get(Referer) map { _.value } getOrElse ""
}
|
azavea/raster-foundry
|
app-backend/backsplash-server/src/main/scala/com/rasterfoundry/backsplash/middleware/MetricMiddleware.scala
|
Scala
|
apache-2.0
| 4,105 |
package com.eclipsesource.schema.internal
import com.eclipsesource.schema.internal.validation.Validated
import play.api.libs.json._
import scalaz.Failure
object SchemaUtil {
def dropSlashIfAny(path: String): String = if (path.startsWith("/#")) path.substring(1) else path
def failure(keyword: String,
msg: String,
schemaPath: Option[JsPath],
instancePath: JsPath,
instance: JsValue,
additionalInfo: JsObject = Json.obj()
): Validated[JsonValidationError, JsValue] =
Failure(
Seq(
JsonValidationError(
msg,
createErrorObject(keyword, schemaPath, instancePath, instance, additionalInfo)
)
)
)
def createErrorObject(keyword: String, schemaPath: Option[JsPath], instancePath: JsPath, instance: JsValue, additionalInfo: JsObject): JsObject = {
Json.obj(
"keyword" -> keyword
).deepMerge(schemaPath.fold(Json.obj("schemaPath" -> ""))(p => Json.obj("schemaPath" -> dropSlashIfAny(p.toString()))))
.deepMerge(
Json.obj(
"instancePath" -> instancePath.toString(),
"value" -> instance,
"errors" -> additionalInfo
)
)
}
def typeOfAsString(json: JsValue): String = {
json match {
case JsString(_) => "string"
case JsNumber(_) => "number"
case JsBoolean(_) => "boolean"
case JsObject(_) => "object"
case JsArray(_) => "array"
case JsNull => "null"
}
}
def toJson(errors: collection.Seq[(JsPath, collection.Seq[JsonValidationError])]): JsArray = {
val emptyErrors = Json.arr()
errors.foldLeft(emptyErrors) { case (accumulatedErrors, (_, validationErrors)) =>
val maybeError = validationErrors.foldLeft(None: Option[JsObject])((aggregatedError, err) => err.args.headOption match {
case Some(o@JsObject(_)) =>
Some(
aggregatedError.fold(
deepMerge(o, Json.obj("msgs" -> err.messages))
)(errObj => deepMerge(errObj, Json.obj("msgs" -> err.messages)))
)
case _ => aggregatedError
})
maybeError.fold(accumulatedErrors)(o => accumulatedErrors :+ o)
}
}
private def deepMerge(obj: JsObject, other: JsObject): JsObject = {
def merge(existingObject: JsObject, otherObject: JsObject): JsObject = {
val result = existingObject.fields.toMap ++ otherObject.fields.toMap.map {
case (otherKey, otherValue) =>
val maybeExistingValue = existingObject.fields.toMap.get(otherKey)
val newValue = (maybeExistingValue, otherValue) match {
case (Some(e: JsObject), o: JsObject) => merge(e, o)
case (Some(e: JsArray), o: JsArray) => e ++ o
case _ => otherValue
}
otherKey -> newValue
}
JsObject(result)
}
merge(obj, other)
}
}
|
eclipsesource/play-json-schema-validator
|
src/main/scala/com/eclipsesource/schema/internal/SchemaUtil.scala
|
Scala
|
apache-2.0
| 2,902 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.log
import java.io.File
import kafka.utils.TestUtils
import kafka.utils.TestUtils.checkEquals
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.record._
import org.apache.kafka.common.utils.{Time, Utils}
import org.junit.Assert._
import org.junit.{After, Before, Test}
import scala.collection.JavaConverters._
import scala.collection._
class LogSegmentTest {
val topicPartition = new TopicPartition("topic", 0)
val segments = mutable.ArrayBuffer[LogSegment]()
var logDir: File = _
/* create a segment with the given base offset */
def createSegment(offset: Long, indexIntervalBytes: Int = 10): LogSegment = {
val msFile = TestUtils.tempFile()
val ms = FileRecords.open(msFile)
val idxFile = TestUtils.tempFile()
val timeIdxFile = TestUtils.tempFile()
val txnIdxFile = TestUtils.tempFile()
idxFile.delete()
timeIdxFile.delete()
txnIdxFile.delete()
val idx = new OffsetIndex(idxFile, offset, 1000)
val timeIdx = new TimeIndex(timeIdxFile, offset, 1500)
val txnIndex = new TransactionIndex(offset, txnIdxFile)
val seg = new LogSegment(ms, idx, timeIdx, txnIndex, offset, indexIntervalBytes, 0, Time.SYSTEM)
segments += seg
seg
}
/* create a ByteBufferMessageSet for the given messages starting from the given offset */
def records(offset: Long, records: String*): MemoryRecords = {
MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V1, offset, CompressionType.NONE, TimestampType.CREATE_TIME,
records.map { s => new SimpleRecord(offset * 10, s.getBytes) }: _*)
}
@Before
def setup(): Unit = {
logDir = TestUtils.tempDir()
}
@After
def teardown() {
for(seg <- segments) {
seg.index.delete()
seg.timeIndex.delete()
seg.txnIndex.delete()
seg.log.delete()
}
Utils.delete(logDir)
}
/**
* A read on an empty log segment should return null
*/
@Test
def testReadOnEmptySegment() {
val seg = createSegment(40)
val read = seg.read(startOffset = 40, maxSize = 300, maxOffset = None)
assertNull("Read beyond the last offset in the segment should be null", read)
}
/**
* Reading from before the first offset in the segment should return messages
* beginning with the first message in the segment
*/
@Test
def testReadBeforeFirstOffset() {
val seg = createSegment(40)
val ms = records(50, "hello", "there", "little", "bee")
seg.append(50, 53, RecordBatch.NO_TIMESTAMP, -1L, ms)
val read = seg.read(startOffset = 41, maxSize = 300, maxOffset = None).records
checkEquals(ms.records.iterator, read.records.iterator)
}
/**
* If we set the startOffset and maxOffset for the read to be the same value
* we should get only the first message in the log
*/
@Test
def testMaxOffset() {
val baseOffset = 50
val seg = createSegment(baseOffset)
val ms = records(baseOffset, "hello", "there", "beautiful")
seg.append(baseOffset, 52, RecordBatch.NO_TIMESTAMP, -1L, ms)
def validate(offset: Long) =
assertEquals(ms.records.asScala.filter(_.offset == offset).toList,
seg.read(startOffset = offset, maxSize = 1024, maxOffset = Some(offset+1)).records.records.asScala.toList)
validate(50)
validate(51)
validate(52)
}
/**
* If we read from an offset beyond the last offset in the segment we should get null
*/
@Test
def testReadAfterLast() {
val seg = createSegment(40)
val ms = records(50, "hello", "there")
seg.append(50, 51, RecordBatch.NO_TIMESTAMP, -1L, ms)
val read = seg.read(startOffset = 52, maxSize = 200, maxOffset = None)
assertNull("Read beyond the last offset in the segment should give null", read)
}
/**
* If we read from an offset which doesn't exist we should get a message set beginning
* with the least offset greater than the given startOffset.
*/
@Test
def testReadFromGap() {
val seg = createSegment(40)
val ms = records(50, "hello", "there")
seg.append(50, 51, RecordBatch.NO_TIMESTAMP, -1L, ms)
val ms2 = records(60, "alpha", "beta")
seg.append(60, 61, RecordBatch.NO_TIMESTAMP, -1L, ms2)
val read = seg.read(startOffset = 55, maxSize = 200, maxOffset = None)
checkEquals(ms2.records.iterator, read.records.records.iterator)
}
/**
* In a loop append two messages then truncate off the second of those messages and check that we can read
* the first but not the second message.
*/
@Test
def testTruncate() {
val seg = createSegment(40)
var offset = 40
for (_ <- 0 until 30) {
val ms1 = records(offset, "hello")
seg.append(offset, offset, RecordBatch.NO_TIMESTAMP, -1L, ms1)
val ms2 = records(offset + 1, "hello")
seg.append(offset + 1, offset + 1, RecordBatch.NO_TIMESTAMP, -1L, ms2)
// check that we can read back both messages
val read = seg.read(offset, None, 10000)
assertEquals(List(ms1.records.iterator.next(), ms2.records.iterator.next()), read.records.records.asScala.toList)
// now truncate off the last message
seg.truncateTo(offset + 1)
val read2 = seg.read(offset, None, 10000)
assertEquals(1, read2.records.records.asScala.size)
checkEquals(ms1.records.iterator, read2.records.records.iterator)
offset += 1
}
}
@Test
def testReloadLargestTimestampAndNextOffsetAfterTruncation() {
val numMessages = 30
val seg = createSegment(40, 2 * records(0, "hello").sizeInBytes - 1)
var offset = 40
for (_ <- 0 until numMessages) {
seg.append(offset, offset, offset, offset, records(offset, "hello"))
offset += 1
}
assertEquals(offset, seg.nextOffset)
val expectedNumEntries = numMessages / 2 - 1
assertEquals(s"Should have $expectedNumEntries time indexes", expectedNumEntries, seg.timeIndex.entries)
seg.truncateTo(41)
assertEquals(s"Should have 0 time indexes", 0, seg.timeIndex.entries)
assertEquals(s"Largest timestamp should be 400", 400L, seg.largestTimestamp)
assertEquals(41, seg.nextOffset)
}
/**
* Test truncating the whole segment, and check that we can reappend with the original offset.
*/
@Test
def testTruncateFull() {
// test the case where we fully truncate the log
val seg = createSegment(40)
seg.append(40, 41, RecordBatch.NO_TIMESTAMP, -1L, records(40, "hello", "there"))
seg.truncateTo(0)
assertNull("Segment should be empty.", seg.read(0, None, 1024))
seg.append(40, 41, RecordBatch.NO_TIMESTAMP, -1L, records(40, "hello", "there"))
}
/**
* Append messages with timestamp and search message by timestamp.
*/
@Test
def testFindOffsetByTimestamp() {
val messageSize = records(0, s"msg00").sizeInBytes
val seg = createSegment(40, messageSize * 2 - 1)
// Produce some messages
for (i <- 40 until 50)
seg.append(i, i, i * 10, i, records(i, s"msg$i"))
assertEquals(490, seg.largestTimestamp)
// Search for an indexed timestamp
assertEquals(42, seg.findOffsetByTimestamp(420).get.offset)
assertEquals(43, seg.findOffsetByTimestamp(421).get.offset)
// Search for an un-indexed timestamp
assertEquals(43, seg.findOffsetByTimestamp(430).get.offset)
assertEquals(44, seg.findOffsetByTimestamp(431).get.offset)
// Search beyond the last timestamp
assertEquals(None, seg.findOffsetByTimestamp(491))
// Search before the first indexed timestamp
assertEquals(41, seg.findOffsetByTimestamp(401).get.offset)
// Search before the first timestamp
assertEquals(40, seg.findOffsetByTimestamp(399).get.offset)
}
/**
* Test that offsets are assigned sequentially and that the nextOffset variable is incremented
*/
@Test
def testNextOffsetCalculation() {
val seg = createSegment(40)
assertEquals(40, seg.nextOffset)
seg.append(50, 52, RecordBatch.NO_TIMESTAMP, -1L, records(50, "hello", "there", "you"))
assertEquals(53, seg.nextOffset)
}
/**
* Test that we can change the file suffixes for the log and index files
*/
@Test
def testChangeFileSuffixes() {
val seg = createSegment(40)
val logFile = seg.log.file
val indexFile = seg.index.file
seg.changeFileSuffixes("", ".deleted")
assertEquals(logFile.getAbsolutePath + ".deleted", seg.log.file.getAbsolutePath)
assertEquals(indexFile.getAbsolutePath + ".deleted", seg.index.file.getAbsolutePath)
assertTrue(seg.log.file.exists)
assertTrue(seg.index.file.exists)
}
/**
* Create a segment with some data and an index. Then corrupt the index,
* and recover the segment, the entries should all be readable.
*/
@Test
def testRecoveryFixesCorruptIndex() {
val seg = createSegment(0)
for(i <- 0 until 100)
seg.append(i, i, RecordBatch.NO_TIMESTAMP, -1L, records(i, i.toString))
val indexFile = seg.index.file
TestUtils.writeNonsenseToFile(indexFile, 5, indexFile.length.toInt)
seg.recover(new ProducerStateManager(topicPartition, logDir))
for(i <- 0 until 100)
assertEquals(i, seg.read(i, Some(i + 1), 1024).records.records.iterator.next().offset)
}
@Test
def testRecoverTransactionIndex(): Unit = {
val segment = createSegment(100)
val producerEpoch = 0.toShort
val partitionLeaderEpoch = 15
val sequence = 100
val pid1 = 5L
val pid2 = 10L
// append transactional records from pid1
segment.append(firstOffset = 100L, largestOffset = 101L, largestTimestamp = RecordBatch.NO_TIMESTAMP,
shallowOffsetOfMaxTimestamp = 100L, MemoryRecords.withTransactionalRecords(100L, CompressionType.NONE,
pid1, producerEpoch, sequence, partitionLeaderEpoch, new SimpleRecord("a".getBytes), new SimpleRecord("b".getBytes)))
// append transactional records from pid2
segment.append(firstOffset = 102L, largestOffset = 103L, largestTimestamp = RecordBatch.NO_TIMESTAMP,
shallowOffsetOfMaxTimestamp = 102L, MemoryRecords.withTransactionalRecords(102L, CompressionType.NONE,
pid2, producerEpoch, sequence, partitionLeaderEpoch, new SimpleRecord("a".getBytes), new SimpleRecord("b".getBytes)))
// append non-transactional records
segment.append(firstOffset = 104L, largestOffset = 105L, largestTimestamp = RecordBatch.NO_TIMESTAMP,
shallowOffsetOfMaxTimestamp = 104L, MemoryRecords.withRecords(104L, CompressionType.NONE,
partitionLeaderEpoch, new SimpleRecord("a".getBytes), new SimpleRecord("b".getBytes)))
// abort the transaction from pid2 (note LSO should be 100L since the txn from pid1 has not completed)
segment.append(firstOffset = 106L, largestOffset = 106L, largestTimestamp = RecordBatch.NO_TIMESTAMP,
shallowOffsetOfMaxTimestamp = 106L, endTxnRecords(ControlRecordType.ABORT, pid2, producerEpoch, offset = 106L))
// commit the transaction from pid1
segment.append(firstOffset = 107L, largestOffset = 107L, largestTimestamp = RecordBatch.NO_TIMESTAMP,
shallowOffsetOfMaxTimestamp = 107L, endTxnRecords(ControlRecordType.COMMIT, pid1, producerEpoch, offset = 107L))
var stateManager = new ProducerStateManager(topicPartition, logDir)
segment.recover(stateManager)
assertEquals(108L, stateManager.mapEndOffset)
var abortedTxns = segment.txnIndex.allAbortedTxns
assertEquals(1, abortedTxns.size)
var abortedTxn = abortedTxns.head
assertEquals(pid2, abortedTxn.producerId)
assertEquals(102L, abortedTxn.firstOffset)
assertEquals(106L, abortedTxn.lastOffset)
assertEquals(100L, abortedTxn.lastStableOffset)
// recover again, but this time assuming the transaction from pid2 began on a previous segment
stateManager = new ProducerStateManager(topicPartition, logDir)
stateManager.loadProducerEntry(new ProducerIdEntry(pid2,
mutable.Queue[BatchMetadata](BatchMetadata(10, 10L, 5, RecordBatch.NO_TIMESTAMP)), producerEpoch, 0, Some(75L)))
segment.recover(stateManager)
assertEquals(108L, stateManager.mapEndOffset)
abortedTxns = segment.txnIndex.allAbortedTxns
assertEquals(1, abortedTxns.size)
abortedTxn = abortedTxns.head
assertEquals(pid2, abortedTxn.producerId)
assertEquals(75L, abortedTxn.firstOffset)
assertEquals(106L, abortedTxn.lastOffset)
assertEquals(100L, abortedTxn.lastStableOffset)
}
private def endTxnRecords(controlRecordType: ControlRecordType,
producerId: Long,
producerEpoch: Short,
offset: Long,
partitionLeaderEpoch: Int = 0,
coordinatorEpoch: Int = 0,
timestamp: Long = RecordBatch.NO_TIMESTAMP): MemoryRecords = {
val marker = new EndTransactionMarker(controlRecordType, coordinatorEpoch)
MemoryRecords.withEndTransactionMarker(offset, timestamp, partitionLeaderEpoch, producerId, producerEpoch, marker)
}
/**
* Create a segment with some data and an index. Then corrupt the index,
* and recover the segment, the entries should all be readable.
*/
@Test
def testRecoveryFixesCorruptTimeIndex() {
val seg = createSegment(0)
for(i <- 0 until 100)
seg.append(i, i, i * 10, i, records(i, i.toString))
val timeIndexFile = seg.timeIndex.file
TestUtils.writeNonsenseToFile(timeIndexFile, 5, timeIndexFile.length.toInt)
seg.recover(new ProducerStateManager(topicPartition, logDir))
for(i <- 0 until 100) {
assertEquals(i, seg.findOffsetByTimestamp(i * 10).get.offset)
if (i < 99)
assertEquals(i + 1, seg.findOffsetByTimestamp(i * 10 + 1).get.offset)
}
}
/**
* Randomly corrupt a log a number of times and attempt recovery.
*/
@Test
def testRecoveryWithCorruptMessage() {
val messagesAppended = 20
for (_ <- 0 until 10) {
val seg = createSegment(0)
for(i <- 0 until messagesAppended)
seg.append(i, i, RecordBatch.NO_TIMESTAMP, -1L, records(i, i.toString))
val offsetToBeginCorruption = TestUtils.random.nextInt(messagesAppended)
// start corrupting somewhere in the middle of the chosen record all the way to the end
val recordPosition = seg.log.searchForOffsetWithSize(offsetToBeginCorruption, 0)
val position = recordPosition.position + TestUtils.random.nextInt(15)
TestUtils.writeNonsenseToFile(seg.log.file, position, (seg.log.file.length - position).toInt)
seg.recover(new ProducerStateManager(topicPartition, logDir))
assertEquals("Should have truncated off bad messages.", (0 until offsetToBeginCorruption).toList,
seg.log.batches.asScala.map(_.lastOffset).toList)
seg.delete()
}
}
/* create a segment with pre allocate */
def createSegment(offset: Long, fileAlreadyExists: Boolean, initFileSize: Int, preallocate: Boolean): LogSegment = {
val tempDir = TestUtils.tempDir()
val seg = new LogSegment(tempDir, offset, 10, 1000, 0, Time.SYSTEM, fileAlreadyExists = fileAlreadyExists,
initFileSize = initFileSize, preallocate = preallocate)
segments += seg
seg
}
/* create a segment with pre allocate, put message to it and verify */
@Test
def testCreateWithInitFileSizeAppendMessage() {
val seg = createSegment(40, false, 512*1024*1024, true)
val ms = records(50, "hello", "there")
seg.append(50, 51, RecordBatch.NO_TIMESTAMP, -1L, ms)
val ms2 = records(60, "alpha", "beta")
seg.append(60, 61, RecordBatch.NO_TIMESTAMP, -1L, ms2)
val read = seg.read(startOffset = 55, maxSize = 200, maxOffset = None)
checkEquals(ms2.records.iterator, read.records.records.iterator)
}
/* create a segment with pre allocate and clearly shut down*/
@Test
def testCreateWithInitFileSizeClearShutdown() {
val tempDir = TestUtils.tempDir()
val seg = new LogSegment(tempDir, 40, 10, 1000, 0, Time.SYSTEM, false, 512*1024*1024, true)
val ms = records(50, "hello", "there")
seg.append(50, 51, RecordBatch.NO_TIMESTAMP, -1L, ms)
val ms2 = records(60, "alpha", "beta")
seg.append(60, 61, RecordBatch.NO_TIMESTAMP, -1L, ms2)
val read = seg.read(startOffset = 55, maxSize = 200, maxOffset = None)
checkEquals(ms2.records.iterator, read.records.records.iterator)
val oldSize = seg.log.sizeInBytes()
val oldPosition = seg.log.channel.position
val oldFileSize = seg.log.file.length
assertEquals(512*1024*1024, oldFileSize)
seg.close()
//After close, file should be trimmed
assertEquals(oldSize, seg.log.file.length)
val segReopen = new LogSegment(tempDir, 40, 10, 1000, 0, Time.SYSTEM, true, 512*1024*1024, true)
segments += segReopen
val readAgain = segReopen.read(startOffset = 55, maxSize = 200, maxOffset = None)
checkEquals(ms2.records.iterator, readAgain.records.records.iterator)
val size = segReopen.log.sizeInBytes()
val position = segReopen.log.channel.position
val fileSize = segReopen.log.file.length
assertEquals(oldPosition, position)
assertEquals(oldSize, size)
assertEquals(size, fileSize)
}
@Test
def shouldTruncateEvenIfOffsetPointsToAGapInTheLog() {
val seg = createSegment(40)
val offset = 40
def records(offset: Long, record: String): MemoryRecords =
MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, offset, CompressionType.NONE, TimestampType.CREATE_TIME,
new SimpleRecord(offset * 1000, record.getBytes))
//Given two messages with a gap between them (e.g. mid offset compacted away)
val ms1 = records(offset, "first message")
seg.append(offset, offset, RecordBatch.NO_TIMESTAMP, -1L, ms1)
val ms2 = records(offset + 3, "message after gap")
seg.append(offset + 3, offset + 3, RecordBatch.NO_TIMESTAMP, -1L, ms2)
// When we truncate to an offset without a corresponding log entry
seg.truncateTo(offset + 1)
//Then we should still truncate the record that was present (i.e. offset + 3 is gone)
val log = seg.read(offset, None, 10000)
assertEquals(offset, log.records.batches.iterator.next().baseOffset())
assertEquals(1, log.records.batches.asScala.size)
}
}
|
themarkypantz/kafka
|
core/src/test/scala/unit/kafka/log/LogSegmentTest.scala
|
Scala
|
apache-2.0
| 18,889 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.scala.operators
import java.lang.Iterable
import org.apache.flink.api.common.functions._
import org.apache.flink.api.common.operators.Order
import org.apache.flink.api.scala.ExecutionEnvironment
import org.apache.flink.api.scala.util.CollectionDataSets
import org.apache.flink.api.scala.util.CollectionDataSets.{CrazyNested, POJO, MutableTuple3,
CustomType}
import org.apache.flink.compiler.PactCompiler
import org.apache.flink.configuration.Configuration
import org.apache.flink.test.util.JavaProgramTestBase
import org.apache.flink.util.Collector
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import org.junit.runners.Parameterized.Parameters
import scala.collection.JavaConverters._
import scala.collection.mutable
import org.apache.flink.api.scala._
object GroupReduceProgs {
var NUM_PROGRAMS: Int = 26
def runProgram(progId: Int, resultPath: String, onCollection: Boolean): String = {
progId match {
case 1 =>
/*
* check correctness of groupReduce on tuples with key field selector
*/
val env = ExecutionEnvironment.getExecutionEnvironment
val ds = CollectionDataSets.get3TupleDataSet(env)
val reduceDs = ds.groupBy(1).reduceGroup {
in =>
in.map(t => (t._1, t._2)).reduce((l, r) => (l._1 + r._1, l._2))
}
reduceDs.writeAsCsv(resultPath)
env.execute()
"1,1\\n" + "5,2\\n" + "15,3\\n" + "34,4\\n" + "65,5\\n" + "111,6\\n"
case 2 =>
/*
* check correctness of groupReduce on tuples with multiple key field selector
*/
val env = ExecutionEnvironment.getExecutionEnvironment
val ds = CollectionDataSets
.get5TupleDataSet(env)
val reduceDs = ds.groupBy(4, 0).reduceGroup {
in =>
val (i, l, l2) = in
.map( t => (t._1, t._2, t._5))
.reduce((l, r) => (l._1, l._2 + r._2, l._3))
(i, l, 0, "P-)", l2)
}
reduceDs.writeAsCsv(resultPath)
env.execute()
"1,1,0,P-),1\\n" + "2,3,0,P-),1\\n" + "2,2,0,P-),2\\n" + "3,9,0,P-),2\\n" + "3,6,0," +
"P-),3\\n" + "4,17,0,P-),1\\n" + "4,17,0,P-),2\\n" + "5,11,0,P-),1\\n" + "5,29,0,P-)," +
"2\\n" + "5,25,0,P-),3\\n"
case 3 =>
/*
* check correctness of groupReduce on tuples with key field selector and group sorting
*/
val env = ExecutionEnvironment.getExecutionEnvironment
env.setDegreeOfParallelism(1)
val ds = CollectionDataSets.get3TupleDataSet(env)
val reduceDs = ds.groupBy(1).sortGroup(2, Order.ASCENDING).reduceGroup {
in =>
in.reduce((l, r) => (l._1 + r._1, l._2, l._3 + "-" + r._3))
}
reduceDs.writeAsCsv(resultPath)
env.execute()
"1,1,Hi\\n" +
"5,2,Hello-Hello world\\n" +
"15,3,Hello world, how are you?-I am fine.-Luke Skywalker\\n" +
"34,4,Comment#1-Comment#2-Comment#3-Comment#4\\n" +
"65,5,Comment#5-Comment#6-Comment#7-Comment#8-Comment#9\\n" +
"111,6,Comment#10-Comment#11-Comment#12-Comment#13-Comment#14-Comment#15\\n"
case 4 =>
/*
* check correctness of groupReduce on tuples with key extractor
*/
val env = ExecutionEnvironment.getExecutionEnvironment
val ds = CollectionDataSets.get3TupleDataSet(env)
val reduceDs = ds.groupBy(_._2).reduceGroup {
in =>
in.map(t => (t._1, t._2)).reduce((l, r) => (l._1 + r._1, l._2))
}
reduceDs.writeAsCsv(resultPath)
env.execute()
"1,1\\n" + "5,2\\n" + "15,3\\n" + "34,4\\n" + "65,5\\n" + "111,6\\n"
case 5 =>
/*
* check correctness of groupReduce on custom type with type extractor
*/
val env = ExecutionEnvironment.getExecutionEnvironment
val ds = CollectionDataSets.getCustomTypeDataSet(env)
val reduceDs = ds.groupBy(_.myInt).reduceGroup {
in =>
val iter = in.toIterator
val o = new CustomType
val c = iter.next()
o.myString = "Hello!"
o.myInt = c.myInt
o.myLong = c.myLong
while (iter.hasNext) {
val next = iter.next()
o.myLong += next.myLong
}
o
}
reduceDs.writeAsText(resultPath)
env.execute()
"1,0,Hello!\\n" + "2,3,Hello!\\n" + "3,12,Hello!\\n" + "4,30,Hello!\\n" + "5,60," +
"Hello!\\n" + "6,105,Hello!\\n"
case 6 =>
/*
* check correctness of all-groupreduce for tuples
*/
val env = ExecutionEnvironment.getExecutionEnvironment
val ds = CollectionDataSets.get3TupleDataSet(env)
val reduceDs = ds.reduceGroup {
in =>
var i = 0
var l = 0L
for (t <- in) {
i += t._1
l += t._2
}
(i, l, "Hello World")
}
reduceDs.writeAsCsv(resultPath)
env.execute()
"231,91,Hello World\\n"
case 7 =>
/*
* check correctness of all-groupreduce for custom types
*/
val env = ExecutionEnvironment.getExecutionEnvironment
val ds = CollectionDataSets.getCustomTypeDataSet(env)
val reduceDs = ds.reduceGroup {
in =>
val o = new CustomType(0, 0, "Hello!")
for (t <- in) {
o.myInt += t.myInt
o.myLong += t.myLong
}
o
}
reduceDs.writeAsText(resultPath)
env.execute()
"91,210,Hello!"
case 8 =>
/*
* check correctness of groupReduce with broadcast set
*/
val env = ExecutionEnvironment.getExecutionEnvironment
val intDs = CollectionDataSets.getIntDataSet(env)
val ds = CollectionDataSets.get3TupleDataSet(env)
val reduceDs = ds.groupBy(1).reduceGroup(
new RichGroupReduceFunction[(Int, Long, String), (Int, Long, String)] {
private var f2Replace = ""
override def open(config: Configuration) {
val ints = this.getRuntimeContext.getBroadcastVariable[Int]("ints").asScala
f2Replace = ints.sum + ""
}
override def reduce(
values: Iterable[(Int, Long, String)],
out: Collector[(Int, Long, String)]): Unit = {
var i: Int = 0
var l: Long = 0L
for (t <- values.asScala) {
i += t._1
l = t._2
}
out.collect((i, l, f2Replace))
}
}).withBroadcastSet(intDs, "ints")
reduceDs.writeAsCsv(resultPath)
env.execute()
"1,1,55\\n" + "5,2,55\\n" + "15,3,55\\n" + "34,4,55\\n" + "65,5,55\\n" + "111,6,55\\n"
case 9 =>
/*
* check correctness of groupReduce if UDF returns input objects multiple times and
* changes it in between
*/
val env = ExecutionEnvironment.getExecutionEnvironment
val ds = CollectionDataSets.get3TupleDataSet(env)
.map( t => MutableTuple3(t._1, t._2, t._3) )
val reduceDs = ds.groupBy(1).reduceGroup {
(in, out: Collector[MutableTuple3[Int, Long, String]]) =>
for (t <- in) {
if (t._1 < 4) {
t._3 = "Hi!"
t._1 += 10
out.collect(t)
t._1 += 10
t._3 = "Hi again!"
out.collect(t)
}
}
}
reduceDs.writeAsCsv(resultPath)
env.execute()
"11,1,Hi!\\n" + "21,1,Hi again!\\n" + "12,2,Hi!\\n" + "22,2,Hi again!\\n" + "13,2," +
"Hi!\\n" + "23,2,Hi again!\\n"
case 10 =>
/*
* check correctness of groupReduce on custom type with key extractor and combine
*/
val env = ExecutionEnvironment.getExecutionEnvironment
val ds = CollectionDataSets.getCustomTypeDataSet(env)
@RichGroupReduceFunction.Combinable
class CustomTypeGroupReduceWithCombine
extends RichGroupReduceFunction[CustomType, CustomType] {
override def combine(values: Iterable[CustomType], out: Collector[CustomType]): Unit = {
val o = new CustomType()
for (c <- values.asScala) {
o.myInt = c.myInt
o.myLong += c.myLong
o.myString = "test" + c.myInt
}
out.collect(o)
}
override def reduce(values: Iterable[CustomType], out: Collector[CustomType]): Unit = {
val o = new CustomType(0, 0, "")
for (c <- values.asScala) {
o.myInt = c.myInt
o.myLong += c.myLong
o.myString = c.myString
}
out.collect(o)
}
}
val reduceDs = ds.groupBy(_.myInt).reduceGroup(new CustomTypeGroupReduceWithCombine)
reduceDs.writeAsText(resultPath)
env.execute()
if (onCollection) {
null
}
else {
"1,0,test1\\n" + "2,3,test2\\n" + "3,12,test3\\n" + "4,30,test4\\n" + "5,60," +
"test5\\n" + "6,105,test6\\n"
}
case 11 =>
/*
* check correctness of groupReduce on tuples with combine
*/
val env = ExecutionEnvironment.getExecutionEnvironment
// important because it determines how often the combiner is called
env.setDegreeOfParallelism(2)
val ds = CollectionDataSets.get3TupleDataSet(env)
@RichGroupReduceFunction.Combinable
class Tuple3GroupReduceWithCombine
extends RichGroupReduceFunction[(Int, Long, String), (Int, String)] {
override def combine(
values: Iterable[(Int, Long, String)],
out: Collector[(Int, Long, String)]): Unit = {
var i = 0
var l = 0L
var s = ""
for (t <- values.asScala) {
i += t._1
l = t._2
s = "test" + t._2
}
out.collect((i, l, s))
}
override def reduce(
values: Iterable[(Int, Long, String)],
out: Collector[(Int, String)]): Unit = {
var i = 0
var s = ""
for (t <- values.asScala) {
i += t._1
s = t._3
}
out.collect((i, s))
}
}
val reduceDs = ds.groupBy(1).reduceGroup(new Tuple3GroupReduceWithCombine)
reduceDs.writeAsCsv(resultPath)
env.execute()
if (onCollection) {
null
}
else {
"1,test1\\n" + "5,test2\\n" + "15,test3\\n" + "34,test4\\n" + "65,test5\\n" + "111," +
"test6\\n"
}
// all-groupreduce with combine
case 12 =>
/*
* check correctness of all-groupreduce for tuples with combine
*/
val env = ExecutionEnvironment.getExecutionEnvironment
val ds = CollectionDataSets.get3TupleDataSet(env).map(t => t).setParallelism(4)
val cfg: Configuration = new Configuration
cfg.setString(PactCompiler.HINT_SHIP_STRATEGY, PactCompiler.HINT_SHIP_STRATEGY_REPARTITION)
@RichGroupReduceFunction.Combinable
class Tuple3AllGroupReduceWithCombine
extends RichGroupReduceFunction[(Int, Long, String), (Int, String)] {
override def combine(
values: Iterable[(Int, Long, String)],
out: Collector[(Int, Long, String)]): Unit = {
var i = 0
var l = 0L
var s = ""
for (t <- values.asScala) {
i += t._1
l += t._2
s += "test"
}
out.collect((i, l, s))
}
override def reduce(
values: Iterable[(Int, Long, String)],
out: Collector[(Int, String)]): Unit = {
var i = 0
var s = ""
for (t <- values.asScala) {
i += t._1 + t._2.toInt
s += t._3
}
out.collect((i, s))
}
}
val reduceDs = ds.reduceGroup(new Tuple3AllGroupReduceWithCombine).withParameters(cfg)
reduceDs.writeAsCsv(resultPath)
env.execute()
if (onCollection) {
null
}
else {
"322," +
"testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest\\n"
}
case 13 =>
/*
* check correctness of groupReduce with descending group sort
*/
val env = ExecutionEnvironment.getExecutionEnvironment
env.setDegreeOfParallelism(1)
val ds = CollectionDataSets.get3TupleDataSet(env)
val reduceDs = ds.groupBy(1).sortGroup(2, Order.DESCENDING).reduceGroup {
in =>
in.reduce((l, r) => (l._1 + r._1, l._2, l._3 + "-" + r._3))
}
reduceDs.writeAsCsv(resultPath)
env.execute()
"1,1,Hi\\n" + "5,2,Hello world-Hello\\n" + "15,3,Luke Skywalker-I am fine.-Hello " +
"world, how are you?\\n" + "34,4,Comment#4-Comment#3-Comment#2-Comment#1\\n" + "65,5," +
"Comment#9-Comment#8-Comment#7-Comment#6-Comment#5\\n" + "111,6," +
"Comment#15-Comment#14-Comment#13-Comment#12-Comment#11-Comment#10\\n"
case 14 =>
/*
* check correctness of groupReduce on tuples with tuple-returning key selector
*/
val env = ExecutionEnvironment.getExecutionEnvironment
val ds = CollectionDataSets
.get5TupleDataSet(env)
val reduceDs = ds.groupBy( t => (t._1, t._5)).reduceGroup {
in =>
val (i, l, l2) = in
.map( t => (t._1, t._2, t._5))
.reduce((l, r) => (l._1, l._2 + r._2, l._3))
(i, l, 0, "P-)", l2)
}
reduceDs.writeAsCsv(resultPath)
env.execute()
"1,1,0,P-),1\\n" + "2,3,0,P-),1\\n" + "2,2,0,P-),2\\n" + "3,9,0,P-),2\\n" + "3,6,0," +
"P-),3\\n" + "4,17,0,P-),1\\n" + "4,17,0,P-),2\\n" + "5,11,0,P-),1\\n" + "5,29,0,P-)," +
"2\\n" + "5,25,0,P-),3\\n"
case 15 =>
/*
* check that input of combiner is also sorted for combinable groupReduce with group
* sorting
*/
val env = ExecutionEnvironment.getExecutionEnvironment
env.setDegreeOfParallelism(1)
val ds = CollectionDataSets.get3TupleDataSet(env).map { t =>
MutableTuple3(t._1, t._2, t._3)
}
@RichGroupReduceFunction.Combinable
class OrderCheckingCombinableReduce
extends RichGroupReduceFunction[MutableTuple3[Int, Long, String],
MutableTuple3[Int, Long, String]] {
def reduce(
values: Iterable[MutableTuple3[Int, Long, String]],
out: Collector[MutableTuple3[Int, Long, String]]) {
val it = values.iterator()
var t = it.next()
val i = t._1
out.collect(t)
while (it.hasNext) {
t = it.next()
if (i > t._1 || (t._3 == "INVALID-ORDER!")) {
t._3 = "INVALID-ORDER!"
out.collect(t)
}
}
}
override def combine(
values: Iterable[MutableTuple3[Int, Long, String]],
out: Collector[MutableTuple3[Int, Long, String]]) {
val it = values.iterator()
var t = it.next
val i: Int = t._1
out.collect(t)
while (it.hasNext) {
t = it.next
if (i > t._1) {
t._3 = "INVALID-ORDER!"
out.collect(t)
}
}
}
}
val reduceDs = ds.groupBy(1)
.sortGroup(0, Order.ASCENDING).reduceGroup(new OrderCheckingCombinableReduce)
reduceDs.writeAsCsv(resultPath)
env.execute()
"1,1,Hi\\n" + "2,2,Hello\\n" + "4,3,Hello world, how are you?\\n" + "7,4," +
"Comment#1\\n" + "11,5,Comment#5\\n" + "16,6,Comment#10\\n"
case 16 =>
/*
* Deep nesting test
* + null value in pojo
*/
val env = ExecutionEnvironment.getExecutionEnvironment
val ds = CollectionDataSets
.getCrazyNestedDataSet(env)
val reduceDs = ds.groupBy("nest_Lvl1.nest_Lvl2.nest_Lvl3.nest_Lvl4.f1nal")
.reduceGroup {
in =>
var c = 0
var n: String = null
for (v <- in) {
c += 1
n = v.nest_Lvl1.nest_Lvl2.nest_Lvl3.nest_Lvl4.f1nal
}
(n, c)
}
reduceDs.writeAsCsv(resultPath)
env.execute()
"aa,1\\nbb,2\\ncc,3\\n"
case 17 =>
// We don't have that test but keep numbering compatible to Java GroupReduceITCase
val env = ExecutionEnvironment.getExecutionEnvironment
env.fromElements("Hello world").writeAsText(resultPath)
env.execute()
"Hello world"
case 18 =>
/*
* Test Pojo containing a Writable and Tuples
*/
val env = ExecutionEnvironment.getExecutionEnvironment
val ds = CollectionDataSets
.getPojoContainingTupleAndWritable(env)
val reduceDs = ds.groupBy("hadoopFan", "theTuple.*").reduceGroup(new
GroupReduceFunction[CollectionDataSets.PojoContainingTupleAndWritable, Integer] {
def reduce(
values: Iterable[CollectionDataSets.PojoContainingTupleAndWritable],
out: Collector[Integer]) {
var c: Int = 0
for (v <- values.asScala) {
c += 1
}
out.collect(c)
}
})
reduceDs.writeAsText(resultPath)
env.execute()
"1\\n5\\n"
case 19 =>
/*
* Test Tuple containing pojos and regular fields
*/
val env = ExecutionEnvironment.getExecutionEnvironment
val ds = CollectionDataSets.getTupleContainingPojos(env)
val reduceDs = ds.groupBy("_1", "_2.*").reduceGroup(
new GroupReduceFunction[(Int, CrazyNested, POJO), Int] {
def reduce(values: Iterable[(Int, CrazyNested, POJO)], out: Collector[Int]) {
var c: Int = 0
for (v <- values.asScala) {
c += 1
}
out.collect(c)
}
})
reduceDs.writeAsText(resultPath)
env.execute()
"3\\n1\\n"
case 20 =>
/*
* Test string-based definition on group sort, based on test:
* check correctness of groupReduce with descending group sort
*/
val env = ExecutionEnvironment.getExecutionEnvironment
env.setDegreeOfParallelism(1)
val ds = CollectionDataSets.get3TupleDataSet(env)
val reduceDs = ds.groupBy(1)
.sortGroup("_3", Order.DESCENDING)
.reduceGroup {
in =>
in.reduce((l, r) => (l._1 + r._1, l._2, l._3 + "-" + r._3))
}
reduceDs.writeAsCsv(resultPath)
env.execute()
"1,1,Hi\\n" + "5,2,Hello world-Hello\\n" + "15,3,Luke Skywalker-I am fine.-Hello " +
"world, how are you?\\n" + "34,4,Comment#4-Comment#3-Comment#2-Comment#1\\n" + "65,5," +
"Comment#9-Comment#8-Comment#7-Comment#6-Comment#5\\n" + "111,6," +
"Comment#15-Comment#14-Comment#13-Comment#12-Comment#11-Comment#10\\n"
case 21 =>
/*
* Test int-based definition on group sort, for (full) nested Tuple
*/
val env = ExecutionEnvironment.getExecutionEnvironment
env.setDegreeOfParallelism(1)
val ds = CollectionDataSets.getGroupSortedNestedTupleDataSet(env)
val reduceDs = ds.groupBy("_2").sortGroup(0, Order.DESCENDING)
.reduceGroup(new NestedTupleReducer)
reduceDs.writeAsText(resultPath)
env.execute()
"a--(2,1)-(1,3)-(1,2)-\\n" + "b--(2,2)-\\n" + "c--(4,9)-(3,6)-(3,3)-\\n"
case 22 =>
/*
* Test int-based definition on group sort, for (partial) nested Tuple ASC
*/
val env = ExecutionEnvironment.getExecutionEnvironment
env.setDegreeOfParallelism(1)
val ds = CollectionDataSets.getGroupSortedNestedTupleDataSet(env)
val reduceDs = ds.groupBy("_2")
.sortGroup("_1._1", Order.ASCENDING)
.sortGroup("_1._2", Order.ASCENDING)
.reduceGroup(new NestedTupleReducer)
reduceDs.writeAsText(resultPath)
env.execute()
"a--(1,2)-(1,3)-(2,1)-\\n" + "b--(2,2)-\\n" + "c--(3,3)-(3,6)-(4,9)-\\n"
case 23 =>
/*
* Test string-based definition on group sort, for (partial) nested Tuple DESC
*/
val env = ExecutionEnvironment.getExecutionEnvironment
env.setDegreeOfParallelism(1)
val ds = CollectionDataSets.getGroupSortedNestedTupleDataSet(env)
val reduceDs = ds.groupBy("_2")
.sortGroup("_1._1", Order.DESCENDING)
.sortGroup("_1._2", Order.ASCENDING)
.reduceGroup(new NestedTupleReducer)
reduceDs.writeAsText(resultPath)
env.execute()
"a--(2,1)-(1,2)-(1,3)-\\n" + "b--(2,2)-\\n" + "c--(4,9)-(3,3)-(3,6)-\\n"
case 24 =>
/*
* Test string-based definition on group sort, for two grouping keys
*/
val env = ExecutionEnvironment.getExecutionEnvironment
env.setDegreeOfParallelism(1)
val ds = CollectionDataSets.getGroupSortedNestedTupleDataSet(env)
val reduceDs = ds.groupBy("_2")
.sortGroup("_1._1", Order.DESCENDING)
.sortGroup("_1._2", Order.DESCENDING)
.reduceGroup(new NestedTupleReducer)
reduceDs.writeAsText(resultPath)
env.execute()
"a--(2,1)-(1,3)-(1,2)-\\n" + "b--(2,2)-\\n" + "c--(4,9)-(3,6)-(3,3)-\\n"
case 25 =>
/*
* Test string-based definition on group sort, for two grouping keys with Pojos
*/
val env = ExecutionEnvironment.getExecutionEnvironment
env.setDegreeOfParallelism(1)
val ds = CollectionDataSets.getGroupSortedPojoContainingTupleAndWritable(env)
val reduceDs = ds.groupBy("hadoopFan")
.sortGroup("theTuple._1", Order.DESCENDING)
.sortGroup("theTuple._2", Order.DESCENDING)
.reduceGroup(
new GroupReduceFunction[CollectionDataSets.PojoContainingTupleAndWritable, String] {
def reduce(
values: Iterable[CollectionDataSets.PojoContainingTupleAndWritable],
out: Collector[String]) {
var once: Boolean = false
val concat: StringBuilder = new StringBuilder
for (value <- values.asScala) {
if (!once) {
concat.append(value.hadoopFan.get)
concat.append("---")
once = true
}
concat.append(value.theTuple)
concat.append("-")
}
out.collect(concat.toString())
}
})
reduceDs.writeAsText(resultPath)
env.execute()
"1---(10,100)-\\n" + "2---(30,600)-(30,400)-(30,200)-(20,201)-(20,200)-\\n"
case 26 =>
/*
* Test grouping with pojo containing multiple pojos (was a bug)
*/
val env = ExecutionEnvironment.getExecutionEnvironment
env.setDegreeOfParallelism(1)
val ds = CollectionDataSets.getPojoWithMultiplePojos(env)
val reduceDs = ds.groupBy("p2.a2")
.reduceGroup(
new GroupReduceFunction[CollectionDataSets.PojoWithMultiplePojos, String] {
def reduce(
values: Iterable[CollectionDataSets.PojoWithMultiplePojos],
out: Collector[String]) {
val concat: StringBuilder = new StringBuilder
for (value <- values.asScala) {
concat.append(value.p2.a2)
}
out.collect(concat.toString())
}
})
reduceDs.writeAsText(resultPath)
env.execute()
"b\\nccc\\nee\\n"
case _ =>
throw new IllegalArgumentException("Invalid program id")
}
}
}
@RunWith(classOf[Parameterized])
class GroupReduceITCase(config: Configuration) extends JavaProgramTestBase(config) {
private val curProgId: Int = config.getInteger("ProgramId", -1)
private var resultPath: String = null
private var expectedResult: String = null
protected override def preSubmit(): Unit = {
resultPath = getTempDirPath("result")
}
protected def testProgram(): Unit = {
expectedResult = GroupReduceProgs.runProgram(curProgId, resultPath, isCollectionExecution)
}
protected override def postSubmit(): Unit = {
if (expectedResult != null) compareResultsByLinesInMemory(expectedResult, resultPath)
}
}
object GroupReduceITCase {
@Parameters
def getConfigurations: java.util.Collection[Array[AnyRef]] = {
val configs = mutable.MutableList[Array[AnyRef]]()
for (i <- 1 to GroupReduceProgs.NUM_PROGRAMS) {
val config = new Configuration()
config.setInteger("ProgramId", i)
configs += Array(config)
}
configs.asJavaCollection
}
}
class NestedTupleReducer extends GroupReduceFunction[((Int, Int), String), String] {
def reduce(values: Iterable[((Int, Int), String)], out: Collector[String]) {
var once: Boolean = false
val concat: StringBuilder = new StringBuilder
for (value <- values.asScala) {
if (!once) {
concat.append(value._2).append("--")
once = true
}
concat.append(value._1)
concat.append("-")
}
out.collect(concat.toString())
}
}
|
citlab/vs.msc.ws14
|
flink-0-7-custom/flink-tests/src/test/scala/org/apache/flink/api/scala/operators/GroupReduceITCase.scala
|
Scala
|
apache-2.0
| 26,761 |
import java.util.List
trait Foo {
val x: List[String] = null
}
abstract class Bar extends Foo
|
som-snytt/dotty
|
tests/pos-java-interop-separate/forwarder/Foo_1.scala
|
Scala
|
apache-2.0
| 98 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.