code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package org.adridadou.ethereum.propeller.converters.e2e
import java.math.BigInteger
import org.scalacheck.Arbitrary._
import org.scalacheck.Prop._
import org.scalatest.check.Checkers
import org.scalatest.{FlatSpec, Matchers}
import scala.util.Try
/**
* Created by davidroon on 26.03.17.
* This code is released under Apache 2 license
*/
class NumberTest extends FlatSpec with Matchers with Checkers with SolidityConversionHelper {
private val contract = contractObject[NumberContract]
"Number type" should "convert big integer from and to the same value" in {
check(forAll(arbitrary[BigInt])(checkEncode(contract, _)))
}
it should "convert Integer from and to the same value" in {
check(forAll(arbitrary[Int])(checkEncode(contract, _)))
}
it should "convert Long from and to the same value" in {
check(forAll(arbitrary[Long])(checkEncode(contract, _)))
}
it should "convert byte from and to the same value" in {
check(forAll(arbitrary[Byte])(checkEncodeSmall(contract, _)))
}
private def checkEncode(contract: NumberContract, seed: Long) = {
if (seed < 0) {
Try(contract.uintFunc(seed)).isFailure shouldEqual true
} else {
contract.uintFunc(seed) shouldEqual seed
}
contract.intFunc(seed) shouldEqual seed
true
}
private def checkEncode(contract: NumberContract, seed: Int) = {
if (seed < 0) {
Try(contract.uintFunc(seed.asInstanceOf[Integer])).isFailure shouldEqual true
} else {
contract.uintFunc(seed.asInstanceOf[Integer]) shouldEqual seed
}
contract.intFunc(seed.asInstanceOf[Integer]) shouldEqual seed
true
}
private def checkEncodeSmall(contract: NumberContract, seed: Byte) = {
if (seed < 0) {
Try(contract.smallUintFunc(seed.asInstanceOf[java.lang.Byte])).isFailure shouldEqual true
} else {
contract.smallUintFunc(seed.asInstanceOf[java.lang.Byte]) shouldEqual seed
}
true
}
private def checkEncode(contract: NumberContract, seed: BigInt) = {
val biValue = seed.bigInteger
if (biValue.signum() == -1) {
Try(contract.uintFunc(biValue)).isFailure shouldEqual true
} else {
contract.uintFunc(biValue) shouldEqual biValue
}
contract.intFunc(biValue) shouldEqual biValue
true
}
}
trait NumberContract {
def intFunc(intValue: BigInteger): BigInteger
def intFunc(intValue: Integer): Integer
def intFunc(intValue: java.lang.Long): java.lang.Long
def uintFunc(intValue: BigInteger): BigInteger
def uintFunc(intValue: Integer): Integer
def uintFunc(intValue: java.lang.Long): java.lang.Long
def smallUintFunc(byteValue: java.lang.Byte): java.lang.Byte
}
|
adridadou/eth-propeller-core
|
src/test/scala/org/adridadou/ethereum/propeller/converters/e2e/NumberTest.scala
|
Scala
|
apache-2.0
| 2,680 |
package breeze.linalg.operators
/*
Copyright 2012 Daniel Ramage
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/**
* Marker sealed trait for some operation, be it UnaryOp, BinaryOp, or
* BinaryUpdateOp.
*
* @author dramage
*/
sealed trait OpType
/**
* Type marker for BinaryOp A :+ B and BinaryUpdateOp A :+= B.
*
* @author dramage
*/
sealed trait OpAdd extends OpType
object OpAdd extends OpAdd
/**
* Type marker for BinaryOp A :- B and BinaryUpdateOp A :-= B.
*
* @author dramage
*/
sealed trait OpSub extends OpType
object OpSub extends OpSub
/**
* Type marker for BinaryOp A :* B and BinaryUpdateOp A :*= B.
*
* @author dramage
*/
sealed trait OpMulScalar extends OpType
object OpMulScalar extends OpMulScalar
/**
* Type marker for BinaryOp A :/ B and BinaryUpdateOp A:/= B.
*
* @author dramage
*/
sealed trait OpDiv extends OpType
object OpDiv extends OpDiv
/**
* Type marker for BinaryOp A :% B and BinaryUpdateOp A:%= B.
*
* @author dramage
*/
sealed trait OpMod extends OpType
object OpMod extends OpMod
/**
* Type marker for BinaryOp A :^ B and BinaryUpdateOp A:^= B.
*
* @author dramage
*/
sealed trait OpPow extends OpType
object OpPow extends OpPow
/**
* Type marker for BinaryOp A :< B.
*
* @author dramage
*/
sealed trait OpLT extends OpType
object OpLT extends OpLT
/**
* Type marker for BinaryOp A :<= B.
*
* @author dramage
*/
sealed trait OpLTE extends OpType
object OpLTE extends OpLTE
/**
* Type marker for BinaryOp A :> B.
*
* @author dramage
*/
sealed trait OpGT extends OpType
object OpGT extends OpGT
/**
* Type marker for BinaryOp A :>= B.
*
* @author dramage
*/
sealed trait OpGTE extends OpType
object OpGTE extends OpGTE
/**
* Type marker for BinaryOp A :== B.
*
* @author dramage
*/
sealed trait OpEq extends OpType
object OpEq extends OpEq
/**
* Type marker for BinaryOp A :!= B.
*
* @author dramage
*/
sealed trait OpNe extends OpType
object OpNe extends OpNe
/**
* Type marker for BinaryUpdateOp A := B.
*
* @author dramage
*/
sealed trait OpSet extends OpType
object OpSet extends OpSet
/**
* Type marker for BinaryOp A :&& B
*
* @author dramage
*/
sealed trait OpAnd extends OpType
object OpAnd extends OpAnd
/**
* Type marker for BinaryOp A :|| B
*
* @author dramage
*/
sealed trait OpOr extends OpType
object OpOr extends OpOr
/**
* Type marker for BinaryOp A :^^ B
*
* @author dramage
*/
sealed trait OpXor extends OpType
object OpXor extends OpXor
/**
* Type marker for UnaryOp -A.
*
* @author dramage
*/
sealed trait OpNeg extends OpType
object OpNeg extends OpNeg
/**
* Type marker for UnaryOp !A.
*
* @author dramage
*/
sealed trait OpNot extends OpType
object OpNot extends OpNot
/**
* Type marker for inner (dot) product of A and B.
*
* @author dramage
*/
sealed trait OpMulInner extends OpType
object OpMulInner extends OpMulInner
/**
* Type marker for BinaryOp A \\ B when A is a matrix.
*
* @author dramage
*/
sealed trait OpSolveMatrixBy extends OpType
object OpSolveMatrixBy extends OpSolveMatrixBy
/**
* Type marker for inner (dot) product of A and B.
*
* @author dramage
*/
sealed trait OpMulMatrix extends OpType
object OpMulMatrix extends OpMulMatrix
|
ktakagaki/breeze
|
src/main/scala/breeze/linalg/operators/OpType.scala
|
Scala
|
apache-2.0
| 3,720 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.catalog
import org.apache.spark.scheduler.SparkListenerEvent
/**
* Event emitted by the external catalog when it is modified. Events are either fired before or
* after the modification (the event should document this).
*/
trait ExternalCatalogEvent extends SparkListenerEvent
/**
* Listener interface for external catalog modification events.
*/
trait ExternalCatalogEventListener {
def onEvent(event: ExternalCatalogEvent): Unit
}
/**
* Event fired when a database is create or dropped.
*/
trait DatabaseEvent extends ExternalCatalogEvent {
/**
* Database of the object that was touched.
*/
val database: String
}
/**
* Event fired before a database is created.
*/
case class CreateDatabasePreEvent(database: String) extends DatabaseEvent
/**
* Event fired after a database has been created.
*/
case class CreateDatabaseEvent(database: String) extends DatabaseEvent
/**
* Event fired before a database is dropped.
*/
case class DropDatabasePreEvent(database: String) extends DatabaseEvent
/**
* Event fired after a database has been dropped.
*/
case class DropDatabaseEvent(database: String) extends DatabaseEvent
/**
* Event fired when a table is created, dropped or renamed.
*/
trait TableEvent extends DatabaseEvent {
/**
* Name of the table that was touched.
*/
val name: String
}
/**
* Event fired before a table is created.
*/
case class CreateTablePreEvent(database: String, name: String) extends TableEvent
/**
* Event fired after a table has been created.
*/
case class CreateTableEvent(database: String, name: String) extends TableEvent
/**
* Event fired before a table is dropped.
*/
case class DropTablePreEvent(database: String, name: String) extends TableEvent
/**
* Event fired after a table has been dropped.
*/
case class DropTableEvent(database: String, name: String) extends TableEvent
/**
* Event fired before a table is renamed.
*/
case class RenameTablePreEvent(
database: String,
name: String,
newName: String)
extends TableEvent
/**
* Event fired after a table has been renamed.
*/
case class RenameTableEvent(
database: String,
name: String,
newName: String)
extends TableEvent
/**
* Event fired when a function is created, dropped or renamed.
*/
trait FunctionEvent extends DatabaseEvent {
/**
* Name of the function that was touched.
*/
val name: String
}
/**
* Event fired before a function is created.
*/
case class CreateFunctionPreEvent(database: String, name: String) extends FunctionEvent
/**
* Event fired after a function has been created.
*/
case class CreateFunctionEvent(database: String, name: String) extends FunctionEvent
/**
* Event fired before a function is dropped.
*/
case class DropFunctionPreEvent(database: String, name: String) extends FunctionEvent
/**
* Event fired after a function has been dropped.
*/
case class DropFunctionEvent(database: String, name: String) extends FunctionEvent
/**
* Event fired before a function is altered.
*/
case class AlterFunctionPreEvent(database: String, name: String) extends FunctionEvent
/**
* Event fired after a function has been altered.
*/
case class AlterFunctionEvent(database: String, name: String) extends FunctionEvent
/**
* Event fired before a function is renamed.
*/
case class RenameFunctionPreEvent(
database: String,
name: String,
newName: String)
extends FunctionEvent
/**
* Event fired after a function has been renamed.
*/
case class RenameFunctionEvent(
database: String,
name: String,
newName: String)
extends FunctionEvent
|
minixalpha/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/events.scala
|
Scala
|
apache-2.0
| 4,432 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.fnothaft.gnocchi.cli
import java.io.File
import net.fnothaft.gnocchi.association._
import net.fnothaft.gnocchi.models._
import net.fnothaft.gnocchi.gnocchiModel._
import net.fnothaft.gnocchi.sql.GnocchiContext._
import org.apache.spark.SparkContext
import org.apache.spark.sql.SQLContext
import org.bdgenomics.utils.cli._
import org.kohsuke.args4j.{ Argument, Option => Args4jOption }
import org.bdgenomics.adam.cli.Vcf2ADAM
import org.apache.commons.io.FileUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Dataset
import org.apache.spark.sql.functions._
//import net.fnothaft.gnocchi.association.Ensembler TODO: pull in the ensembler code or predict won't work.
import net.fnothaft.gnocchi.gnocchiModel.BuildAdditiveLogisticGnocchiModel
import scala.collection.mutable.ListBuffer
object ConstructGnocchiModel extends BDGCommandCompanion {
val commandName = "ConstructGnocchiModel"
val commandDescription = "Fill this out later!!"
def apply(cmdLine: Array[String]) = {
new ConstructGnocchiModel(Args4j[ConstructGnocchiModelArgs](cmdLine))
}
}
class ConstructGnocchiModelArgs extends RegressPhenotypesArgs {
@Args4jOption(required = true, name = "-saveModelTo", usage = "The location to save model to.")
var saveTo: String = _
@Args4jOption(required = false, name = "SNPS", usage = "The IDs of the SNPs to include in the model, if not all.")
var snps: String = _
}
class ConstructGnocchiModel(protected val args: ConstructGnocchiModelArgs) extends BDGSparkCommand[ConstructGnocchiModelArgs] {
override val companion = ConstructGnocchiModel
override def run(sc: SparkContext) {
// Load in genotype data filtering out any SNPs not provided in command line
val genotypeStates = loadGenotypes(sc)
// instantiate regressPhenotypes obj
val regPheno = new RegressPhenotypes(args)
// Load in phenotype data
val phenotypes = regPheno.loadPhenotypes(sc)
// build model
val (model, assocs): (GnocchiModel, RDD[Association]) = buildModel[Array[Double]](genotypeStates.rdd, phenotypes, sc)
// save the associations
val sqlContext = new SQLContext(sc)
import sqlContext.implicits._
regPheno.logResults(assocs.toDS, sc)
// save the model
SaveGnocchiModel(model, args.saveTo)
}
def buildModel[T](genotypeStates: RDD[GenotypeState],
phenotypes: RDD[Phenotype[T]],
sc: SparkContext): (GnocchiModel, RDD[Association]) = {
val (model, assocs) = args.associationType match {
// case "ADDITIVE_LINEAR" => BuildAdditiveLinearGnocchiModel(genotypeStates, phenotypes, sc)
case "ADDITIVE_LOGISTIC" => BuildAdditiveLogisticGnocchiModel(genotypeStates, phenotypes, sc)
// case "DOMINANT_LINEAR" => BuildDominantLinearGnocchiModel(genotypeStates, phenotypes, sc)
// case "DOMINANT_LOGISTIC" => BuildDominantLogisticGnocchiModel
}
(model, assocs)
}
def loadGenotypes(sc: SparkContext): Dataset[GenotypeState] = {
// set up sqlContext
val sqlContext = SQLContext.getOrCreate(sc)
import sqlContext.implicits._
val absAssociationPath = new File(args.associations).getAbsolutePath
var parquetInputDestination = absAssociationPath.split("/").reverse.drop(1).reverse.mkString("/")
parquetInputDestination = parquetInputDestination + "/parquetInputFiles/"
val parquetFiles = new File(parquetInputDestination)
val vcfPath = args.genotypes
val posAndIds = GetVariantIds(sc, vcfPath)
// check for ADAM formatted version of the file specified in genotypes. If it doesn't exist, convert vcf to parquet using vcf2adam.
if (!parquetFiles.getAbsoluteFile.exists) {
val cmdLine: Array[String] = Array[String](vcfPath, parquetInputDestination)
Vcf2ADAM(cmdLine).run(sc)
} else if (args.overwrite) {
FileUtils.deleteDirectory(parquetFiles)
val cmdLine: Array[String] = Array[String](vcfPath, parquetInputDestination)
Vcf2ADAM(cmdLine).run(sc)
}
val genotypes = sqlContext.read.format("parquet").load(parquetInputDestination)
// transform the parquet-formatted genotypes into a dataFrame of GenotypeStates and convert to Dataset.
val genotypeStates = sqlContext
.toGenotypeStateDataFrame(genotypes, args.ploidy, sparse = false)
val genoStatesWithNames = genotypeStates.select(concat($"contig", lit("_"), $"end", lit("_"), $"alt") as "contig",
genotypeStates("start"),
genotypeStates("end"),
genotypeStates("ref"),
genotypeStates("alt"),
genotypeStates("sampleId"),
genotypeStates("genotypeState"),
genotypeStates("missingGenotypes"))
println(genoStatesWithNames.take(10).toList)
// mind filter
genoStatesWithNames.registerTempTable("genotypeStates")
val mindDF = sqlContext.sql("SELECT sampleId FROM genotypeStates GROUP BY sampleId HAVING SUM(missingGenotypes)/(COUNT(sampleId)*2) <= %s".format(args.mind))
var filteredGenotypeStates = genoStatesWithNames.filter($"sampleId".isin(mindDF.collect().map(r => r(0)): _*))
println("Pre-filtered GenotypeStates: " + filteredGenotypeStates.take(5).toList)
if (args.snps != null) {
// Filter out only specified snps
// TODO: Clean this
val snps = args.snps.split(',')
filteredGenotypeStates = filteredGenotypeStates.filter(filteredGenotypeStates("contig").isin(snps: _*))
}
println("Post-filtered GenotypeStates: " + filteredGenotypeStates.take(5).toList)
filteredGenotypeStates.as[GenotypeState]
}
}
|
bigdatagenomics/gnocchi
|
gnocchi-cli/src/main/scala/net/fnothaft/gnocchi/cli/ConstructGnocchiModel.scala
|
Scala
|
apache-2.0
| 6,337 |
package colossus
package core
import java.nio.ByteBuffer
import java.nio.channels.{CancelledKeyException, ClosedChannelException, SelectionKey, SocketChannel}
sealed trait WriteStatus
object WriteStatus {
//connection is busted
case object Failed extends WriteStatus
//data was partially written and the rest is buffered
case object Partial extends WriteStatus
//buffered data is still being written, requested write did not occur
case object Zero extends WriteStatus
//all the data was written
case object Complete extends WriteStatus
}
trait KeyInterestManager {
private var _readsEnabled = true
private var _writeReadyEnabled = false
def readsEnabled = _readsEnabled
def writeReadyEnabled = _writeReadyEnabled
protected def setKeyInterest()
def enableReads() {
_readsEnabled = true
setKeyInterest()
}
def disableReads() {
_readsEnabled = false
setKeyInterest()
}
def enableWriteReady() {
_writeReadyEnabled = true
setKeyInterest()
}
def disableWriteReady() {
_writeReadyEnabled = false
setKeyInterest()
}
}
/**
* The WriteBuffer handles everything dealing properly writing raw data to the
* SocketChannel and dealing with backpressure.
*
* When `write` is called on a SocketChannel in nonblocking mode, the channel
* may not accept all of the given data, returning how many bytes it actually
* accepted. Therefore it is up to the WriteBuffer to handle these situations,
* in which case it buffers the unwritten data and waits for the OP_WRITE signal
* from the event loop to continue writing.
*
* WriteBuffer does not handle all aspects of write backpressure, only the most
* immediate scenario of a partially written DataBuffer. Every call to `write`
* returns a WriteStatus, indicating the WriteBuffer's status in relation to the
* given data. In every case, a Partial status will always be returned before a
* Zero status, so a caller that is properly reacting to the Partial status
* should never in practice actually receive a zero status.
*
* The WriteBuffer works by writing all data into an internal buffer, which is
* then drained into the SocketChannel once per event loop iteration. This is
* done to minimize the number of calls to SocketChannel.write, a fairly
* expensive operation.
*/
private[colossus] trait WriteBuffer extends KeyInterestManager {
import WriteStatus._
def internalBufferSize: Int
//this will be called whenever a partial buffer was fully written from and handleWrite
def onBufferClear(): Unit
//mostly for DI for testing
def channelWrite(data: DataBuffer): Int
private var _bytesSent = 0L
def bytesSent = _bytesSent
//this is only filled when we only partially wrote data
private var partialBuffer: Option[DataBuffer] = None
//technically this value is wrong when first constructed, but since this is
//only used in determining idle time, initializing it to the current time
//simplifies the calculations
private var _lastTimeDataWritten: Long = System.currentTimeMillis
def lastTimeDataWritten = _lastTimeDataWritten
//this is set to true when we are in the process of writing the internal
//buffer to the channel. Normally this is only true outside of handleWrite
//when we fail to write the whole internal buffer to the socket
private var drainingInternal = false
//this is only used when the connection is about to disconnect. We allow the
//write buffer to drain, then perform the actual disconnect. Once this is
//set, no more writes are allowed and the connection is considered severed
//from the user's point of view
private var disconnectCallback: Option[() => Unit] = None
def disconnectBuffer(cb: () => Unit) {
if (partialBuffer.isEmpty && drainingInternal == false && internal.data.position == 0) {
cb()
//we set this to prevent any further writes (see write())
disconnectCallback = Some(() => ())
} else {
disconnectCallback = Some(cb)
}
}
def isDataBuffered: Boolean = partialBuffer.isDefined
//all writes are initially written to this internal buffer. The buffer is
//then drained at most once per event loop. This ends up being much faster
//than attempting to directly write to the socket each time
private val internal = DataBuffer(ByteBuffer.allocateDirect(internalBufferSize))
/**
* copy as much data as possible from src into the internal buffer. If
* drainingInternal is true is means we're currently in the process of writing
* the internal buffer to the socket so we cannot write data into it.
*/
private def copyInternal(src: ByteBuffer) {
if (!drainingInternal) {
val oldLimit = src.limit()
val newLimit = if (src.remaining > internal.remaining) {
oldLimit - (src.remaining - internal.remaining)
} else {
oldLimit
}
src.limit(newLimit)
internal.data.put(src)
src.limit(oldLimit)
}
}
/**
* Attempt to copy data into the internal buffer. Notice that this method is
* separated from write becuase :
*
* 1. this is called from handleWrite when the
* partialBuffer is set, though I think it could possibly be refactored
*
* 2. We want to keep draining the buffers even when disconnectCallback is
* defined (which prevents additional writes from the user)
*/
private def writeRaw(raw: DataBuffer): WriteStatus = {
try {
enableWriteReady()
_lastTimeDataWritten = System.currentTimeMillis
copyInternal(raw.data)
if (raw.hasUnreadData) {
//we must take a copy of the buffer since it will be repurposed
partialBuffer = Some(raw.takeCopy)
Partial
} else {
partialBuffer = None
Complete
}
} catch {
case t: CancelledKeyException => {
//no cleanup is required since the connection is closed for good,
Failed
}
}
}
//this method is designed such that the caller can safely call it once and not
//have to worry about having its data rejected. This way the caller doesn't
//need to do any buffering of its own, though it does need to be aware that
//any subsequent calls will return a Zero write status
def write(raw: DataBuffer): WriteStatus = {
if (disconnectCallback.isDefined) {
//the user has called disconnect, so we reject new writes. At this point
//the connection would only be open to drain the internal/partial buffers
Failed
} else if (partialBuffer.isDefined) {
Zero
} else {
writeRaw(raw)
}
}
/**
* Drain the internal buffer and perform the actual write to the socket. This
* is called by the event loop whenever the buffer is subscribed to the
* OP_WRITE key interest.
*/
def handleWrite() {
if (internal.data.position > 0) {
if (!drainingInternal) {
drainingInternal = true
internal.data.flip //prepare for reading
}
//notice that this method may throw a ClosedChannelException, however the
//worker is correctly handling the exception and doing the necessary cleanup
_bytesSent += channelWrite(internal)
if (internal.remaining == 0) {
//hooray! we wrote all the data, now we can accept more
internal.data.clear()
disableWriteReady()
drainingInternal = false
partialBuffer.map{raw =>
if (writeRaw(raw) == Complete) {
//notice that onBufferClear is only called if the user had previously
//called write and we returned a Partial status (which would result in
//partialBuffer being set)
onBufferClear()
}
}.getOrElse{
disconnectCallback.foreach{cb => cb()}
}
}
}
}
}
private[core] trait LiveWriteBuffer extends WriteBuffer {
//DO NOT MAKE THIS A VAL, screws up initialization order
def internalBufferSize = 1024 * 64
protected def channel: SocketChannel
def channelWrite(raw: DataBuffer): Int = raw.writeTo(channel)
def key: SelectionKey
def setKeyInterest() {
val ops = (if (readsEnabled) SelectionKey.OP_READ else 0) | (if (writeReadyEnabled) SelectionKey.OP_WRITE else 0)
key.interestOps(ops)
}
}
|
zgagnon/colossus
|
colossus/src/main/scala/colossus/core/WriteBuffer.scala
|
Scala
|
apache-2.0
| 8,235 |
package com.getbootstrap.savage.util
import scala.util.Try
object IntFromStr {
def unapply(str: String): Option[Int] = Try{ Integer.parseInt(str) }.toOption
}
|
twbs/savage
|
src/main/scala/com/getbootstrap/savage/util/IntFromStr.scala
|
Scala
|
mit
| 163 |
package net.scalax.hf.test
import slick.collection.heterogeneous.HNil
import slick.jdbc.H2Profile.api._
/**
* Created by djx314 on 15-6-22.
*/
case class SmallModel(
id: Option[Long],
a1: Int,
a2: Option[Int],
a3: String,
a4: Int,
a5: Int
)
class SmallTable(tag: Tag) extends Table[SmallModel](tag, "aabbbbbbb") {
def id = column[Option[Long]]("id", O.PrimaryKey)
def a1 = column[Int]("a1")
def a2 = column[Option[Int]]("a2")
def a3 = column[String]("a3")
def a4 = column[Int]("a4")
def a5 = column[Int]("a5")
def * =
(id ::
a1 ::
a2 ::
a3 ::
a4 ::
a5 ::
HNil
).mapTo[SmallModel]
}
case class LargeModel(
id: Option[Long],
a1: Option[Int],
a2: Option[Int],
a3: Option[Int],
a4: Option[Int],
a5: Int,
a6: Int,
a7: Int,
a8: Int,
a9: Int,
a10: Option[Int],
a11: Option[Int],
a12: Option[Int],
a13: Option[Int],
a14: Option[Int],
a15: Option[Int],
a16: Option[Int],
a17: Option[Int],
a18: Option[Int],
a19: Option[Int],
a20: Option[Int],
a21: Option[Int],
a22: Option[Int],
a23: Option[String],
a24: Option[String]
)
class LargeTable(tag: Tag) extends Table[LargeModel](tag, "test_aabb") {
def id = column[Option[Long]]("id", O.PrimaryKey, O.AutoInc)
def a1 = column[Option[Int]]("a1")
def a2 = column[Option[Int]]("a2")
def a3 = column[Option[Int]]("a3")
def a4 = column[Option[Int]]("a4")
def a5 = column[Int]("a5")
def a6 = column[Int]("a6")
def a7 = column[Int]("a7")
def a8 = column[Int]("a8")
def a9 = column[Int]("a9")
def a10 = column[Option[Int]]("a10")
def a11 = column[Option[Int]]("a11")
def a12 = column[Option[Int]]("a12")
def a13 = column[Option[Int]]("a13")
def a14 = column[Option[Int]]("a14")
def a15 = column[Option[Int]]("a15")
def a16 = column[Option[Int]]("a16")
def a17 = column[Option[Int]]("a17")
def a18 = column[Option[Int]]("a18")
def a19 = column[Option[Int]]("a19")
def a20 = column[Option[Int]]("a20")
def a21 = column[Option[Int]]("a21")
def a22 = column[Option[Int]]("a22")
def a23 = column[Option[String]]("a23")
def a24 = column[Option[String]]("a24")
def * =
(
id ::
a1 ::
a2 ::
a3 ::
a4 ::
a5 ::
a6 ::
a7 ::
a8 ::
a9 ::
a10 ::
a11 ::
a12 ::
a13 ::
a14 ::
a15 ::
a16 ::
a17 ::
a18 ::
a19 ::
a20 ::
a21 ::
a22 ::
a23 ::
a24 ::
HNil
).mapTo[LargeModel]
}
|
scalax/slick-summer
|
src/test/scala/net/scalax/hf/Models.scala
|
Scala
|
mit
| 2,584 |
import com.hypertino.binders.json.JsonBinders
import com.hypertino.binders.value._
import org.scalatest.{FlatSpec, Matchers}
case class Mixed(a: Int, b: String, extra: Value)
class TestMixJsonSerializer extends FlatSpec with Matchers {
import JsonBinders._
"Json " should " serialize Mixed" in {
val t = Mixed(1, "ha", Obj.from(
"f" -> 555
))
val str = t.toJson
assert (str === """{"a":1,"b":"ha","extra":{"f":555}}""")
}
"Json " should " deserialize Mixed" in {
val o = """{"a":1,"b":"ha","extra":{"f":555}}""".parseJson[Mixed]
val t = Mixed(1, "ha", Obj.from(
"f" -> 555
))
assert (o === t)
}
"Json " should " serialize Mixed (Null)" in {
val t = Mixed(1, "ha", Null)
val str = t.toJson
assert (str === """{"a":1,"b":"ha","extra":null}""")
}
"Json " should " deserialize Mixed (Null)" in {
val o = """{"a":1,"b":"ha"}""".parseJson[Mixed]
val t = Mixed(1, "ha", Null)
assert (o === t)
}
}
|
hypertino/json-binders
|
jsonBinders/shared/src/test/scala/TestMixJsonSerializer.scala
|
Scala
|
bsd-3-clause
| 985 |
package lila.slack
import org.joda.time.DateTime
import lila.common.LightUser
import lila.hub.actorApi.slack._
import lila.user.User
final class SlackApi(
client: SlackClient,
isProd: Boolean,
implicit val lightUser: LightUser.Getter) {
import SlackApi._
object charge {
import lila.hub.actorApi.plan.ChargeEvent
private var buffer: Vector[ChargeEvent] = Vector.empty
def apply(event: ChargeEvent): Funit =
if (event.amount <= 2000) addToBuffer(event)
else displayMessage {
s"${link(event)} donated ${amount(event.amount)}. Monthly progress: ${event.percent}%"
}
private def addToBuffer(event: ChargeEvent): Funit = {
buffer = buffer :+ event
(buffer.head.date isBefore DateTime.now.minusHours(3)) ?? {
val links = buffer map link mkString ", "
val amountSum = buffer.map(_.amount).sum
displayMessage {
s"$links donated ${amount(amountSum)}. Monthly progress: ${buffer.last.percent}%"
} >>- {
buffer = Vector.empty
}
}
}
private def displayMessage(text: String) = client(SlackMessage(
username = "Patron",
icon = "four_leaf_clover",
text = text,
channel = "general"
))
private def link(event: ChargeEvent) =
if (event.username == "Anonymous") "Anonymous"
else s"lichess.org/@/${event.username}"
private def amount(cents: Int) = s"$$${lila.common.Maths.truncateAt(cents / 100d, 2)}"
}
def publishEvent(event: Event): Funit = event match {
case Error(msg) => publishError(msg)
case Warning(msg) => publishWarning(msg)
case Info(msg) => publishInfo(msg)
case Victory(msg) => publishVictory(msg)
}
def publishError(msg: String): Funit = client(SlackMessage(
username = "lichess error",
icon = "lightning",
text = msg,
channel = "general"))
def publishWarning(msg: String): Funit = client(SlackMessage(
username = "lichess warning",
icon = "thinking_face",
text = msg,
channel = "general"))
def publishVictory(msg: String): Funit = client(SlackMessage(
username = "lichess victory",
icon = "tada",
text = msg,
channel = "general"))
def publishInfo(msg: String): Funit = client(SlackMessage(
username = "lichess info",
icon = "monkey",
text = msg,
channel = "general"))
def publishRestart =
if (isProd) publishInfo("Lichess has restarted!")
else client(SlackMessage(
username = stage.name,
icon = stage.icon,
text = "stage has restarted.",
channel = "general"))
def userMod(user: User, mod: User): Funit = client(SlackMessage(
username = mod.username,
icon = "oncoming_police_car",
text = s"Let's have a look at https://lichess.org/@/${user.username}?mod",
channel = "tavern"))
def userModNote(modName: String, username: String, note: String): Funit = client(SlackMessage(
username = modName,
icon = "spiral_note_pad",
text = s"left a mod note on https://lichess.org/@/$username\\n${note.take(140)}",
channel = "tavern"))
def deployPre: Funit =
if (isProd) client(SlackMessage(
username = "deployment",
icon = "rocket",
text = "Lichess will be updated in a minute! Fasten your seatbelts.",
channel = "general"))
else client(SlackMessage(
username = stage.name,
icon = stage.icon,
text = "stage will be updated in a minute.",
channel = "general"))
def deployPost: Funit =
if (isProd) client(SlackMessage(
username = "deployment",
icon = "rocket",
text = "Lichess is being updated! Brace for impact.",
channel = "general"))
else client(SlackMessage(
username = "stage.lichess.org",
icon = "volcano",
text = "stage has been updated!",
channel = "general"))
}
private object SlackApi {
object stage {
val name = "stage.lichess.org"
val icon = "volcano"
}
}
|
clarkerubber/lila
|
modules/slack/src/main/SlackApi.scala
|
Scala
|
agpl-3.0
| 3,959 |
package shield.config
import scala.concurrent.duration.FiniteDuration
sealed trait KVStoreConfig
case class BreakerStoreConfig(backer: String, maxFailures: Int, callTimeout: FiniteDuration, resetTimeout: FiniteDuration) extends KVStoreConfig
case class MemoryStoreConfig(hashCapacity: Int, keyCapacity: Int, limitCapacity: Int) extends KVStoreConfig
case class RedisStoreConfig(url: String) extends KVStoreConfig
|
RetailMeNot/shield
|
src/main/scala/shield/config/KVStoreConfig.scala
|
Scala
|
mit
| 416 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.index
import java.util.Map.Entry
import com.typesafe.scalalogging.LazyLogging
import org.apache.accumulo.core.client.{IteratorSetting, ScannerBase}
import org.apache.accumulo.core.data.{Key, Value, Range => aRange}
import org.apache.accumulo.core.security.Authorizations
import org.apache.hadoop.io.Text
import org.locationtech.geomesa.accumulo.AccumuloProperties.AccumuloQueryProperties
import org.locationtech.geomesa.accumulo.data.AccumuloDataStore
import org.locationtech.geomesa.accumulo.index.AccumuloQueryPlan.JoinFunction
import org.locationtech.geomesa.accumulo.util.BatchMultiScanner
import org.locationtech.geomesa.accumulo.{AccumuloFilterStrategyType, AccumuloQueryPlanType}
import org.locationtech.geomesa.index.utils.Explainer
import org.locationtech.geomesa.utils.collection.{CloseableIterator, SelfClosingIterator}
import org.opengis.feature.simple.SimpleFeature
sealed trait AccumuloQueryPlan extends AccumuloQueryPlanType {
def table: String
def columnFamilies: Seq[Text]
def ranges: Seq[aRange]
def iterators: Seq[IteratorSetting]
def numThreads: Int
def join: Option[(JoinFunction, AccumuloQueryPlan)] = None
override def explain(explainer: Explainer, prefix: String = ""): Unit =
AccumuloQueryPlan.explain(this, explainer, prefix)
protected def configure(scanner: ScannerBase): Unit = {
iterators.foreach(scanner.addScanIterator)
columnFamilies.foreach(scanner.fetchColumnFamily)
}
}
object AccumuloQueryPlan extends LazyLogging {
type JoinFunction = (Entry[Key, Value]) => aRange
def explain(plan: AccumuloQueryPlan, explainer: Explainer, prefix: String): Unit = {
explainer.pushLevel(s"${prefix}Plan: ${plan.getClass.getName}")
explainer(s"Table: ${plan.table}")
explainer(s"Deduplicate: ${plan.hasDuplicates}")
explainer(s"Column Families${if (plan.columnFamilies.isEmpty) ": all"
else s" (${plan.columnFamilies.size}): ${plan.columnFamilies.take(20)}"}")
explainer(s"Ranges (${plan.ranges.size}): ${plan.ranges.take(5).map(rangeToString).mkString(", ")}")
explainer(s"Iterators (${plan.iterators.size}):", plan.iterators.map(i => () => i.toString))
plan.join.foreach { j => explain(j._2, explainer, "Join ") }
explainer.popLevel()
}
// converts a range to a printable string - only includes the row
private def rangeToString(r: aRange): String = {
val a = if (r.isStartKeyInclusive) "[" else "("
val z = if (r.isEndKeyInclusive) "]" else ")"
val start = if (r.isInfiniteStartKey) "-inf" else keyToString(r.getStartKey)
val stop = if (r.isInfiniteStopKey) "+inf" else keyToString(r.getEndKey)
s"$a$start::$stop$z"
}
// converts a key to a printable string - only includes the row
private def keyToString(k: Key): String =
Key.toPrintableString(k.getRow.getBytes, 0, k.getRow.getLength, k.getRow.getLength)
}
// plan that will not actually scan anything
case class EmptyPlan(filter: AccumuloFilterStrategyType) extends AccumuloQueryPlan {
override val table: String = ""
override val iterators: Seq[IteratorSetting] = Seq.empty
override val ranges: Seq[aRange] = Seq.empty
override val columnFamilies: Seq[Text] = Seq.empty
override val hasDuplicates: Boolean = false
override val numThreads: Int = 0
override def scan(ds: AccumuloDataStore): CloseableIterator[SimpleFeature] = CloseableIterator.empty
}
// single scan plan
case class ScanPlan(filter: AccumuloFilterStrategyType,
table: String,
range: aRange,
iterators: Seq[IteratorSetting],
columnFamilies: Seq[Text],
entriesToFeatures: (Entry[Key, Value]) => SimpleFeature,
override val reduce: Option[(CloseableIterator[SimpleFeature]) => CloseableIterator[SimpleFeature]],
override val hasDuplicates: Boolean) extends AccumuloQueryPlan {
import scala.collection.JavaConversions._
override val numThreads = 1
override val ranges = Seq(range)
override def scan(ds: AccumuloDataStore): CloseableIterator[SimpleFeature] = {
val scanner = ds.connector.createScanner(table, ds.auths)
scanner.setRange(range)
configure(scanner)
SelfClosingIterator(scanner.iterator.map(entriesToFeatures), scanner.close())
}
}
// batch scan plan
case class BatchScanPlan(filter: AccumuloFilterStrategyType,
table: String,
ranges: Seq[aRange],
iterators: Seq[IteratorSetting],
columnFamilies: Seq[Text],
entriesToFeatures: (Entry[Key, Value]) => SimpleFeature,
override val reduce: Option[(CloseableIterator[SimpleFeature]) => CloseableIterator[SimpleFeature]],
numThreads: Int,
override val hasDuplicates: Boolean) extends AccumuloQueryPlan {
import scala.collection.JavaConversions._
override def scan(ds: AccumuloDataStore): CloseableIterator[SimpleFeature] =
scanEntries(ds).map(entriesToFeatures)
def scanEntries(ds: AccumuloDataStore, auths: Option[Authorizations] = None): CloseableIterator[Entry[Key, Value]] = {
if (ranges.isEmpty) { CloseableIterator.empty } else {
val batchRanges = AccumuloQueryProperties.SCAN_BATCH_RANGES.option.map(_.toInt).getOrElse(Int.MaxValue)
val batched = ranges.grouped(batchRanges)
SelfClosingIterator(batched).flatMap { ranges =>
val scanner = ds.connector.createBatchScanner(table, auths.getOrElse(ds.auths), numThreads)
scanner.setRanges(ranges)
configure(scanner)
SelfClosingIterator(scanner.iterator, scanner.close())
}
}
}
}
// join on multiple tables - requires multiple scans
case class JoinPlan(filter: AccumuloFilterStrategyType,
table: String,
ranges: Seq[aRange],
iterators: Seq[IteratorSetting],
columnFamilies: Seq[Text],
numThreads: Int,
override val hasDuplicates: Boolean,
joinFunction: JoinFunction,
joinQuery: BatchScanPlan) extends AccumuloQueryPlan {
override val join = Some((joinFunction, joinQuery))
override def reduce: Option[(CloseableIterator[SimpleFeature]) => CloseableIterator[SimpleFeature]] = joinQuery.reduce
override def scan(ds: AccumuloDataStore): CloseableIterator[SimpleFeature] = {
import scala.collection.JavaConversions._
val primary = if (ranges.length == 1) {
val scanner = ds.connector.createScanner(table, ds.auths)
scanner.setRange(ranges.head)
scanner
} else {
val scanner = ds.connector.createBatchScanner(table, ds.auths, numThreads)
scanner.setRanges(ranges)
scanner
}
configure(primary)
val bms = new BatchMultiScanner(ds, primary, joinQuery, joinFunction)
SelfClosingIterator(bms.iterator.map(joinQuery.entriesToFeatures), bms.close())
}
}
|
ddseapy/geomesa
|
geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/index/AccumuloQueryPlan.scala
|
Scala
|
apache-2.0
| 7,513 |
package org.jetbrains.plugins.scala
package lang
package psi
package stubs
package impl
import com.intellij.psi.PsiElement
import com.intellij.psi.stubs.{IStubElementType, StubElement}
import com.intellij.util.io.StringRef
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.ScReferencePattern
/**
* User: Alexander Podkhalyuzin
* Date: 17.07.2009
*/
class ScReferencePatternStubImpl(parent: StubElement[_ <: PsiElement],
elementType: IStubElementType[_ <: StubElement[_ <: PsiElement], _ <: PsiElement],
nameRef: StringRef)
extends ScNamedStubBase[ScReferencePattern](parent, elementType, nameRef) with ScReferencePatternStub
|
loskutov/intellij-scala
|
src/org/jetbrains/plugins/scala/lang/psi/stubs/impl/ScReferencePatternStubImpl.scala
|
Scala
|
apache-2.0
| 712 |
package dc.json
import spray.json.{JsonFormat, DefaultJsonProtocol}
case class CouchViewResult[T](total_rows: Int, offset: Int, rows: List[T])
object CouchViewResult extends DefaultJsonProtocol {
implicit def couchViewResultFormat[T: JsonFormat] = jsonFormat3(CouchViewResult.apply[T])
}
|
MagnusAk78/dynamic-checklist-server
|
tools/src/main/scala/dc/json/CouchViewResult.scala
|
Scala
|
gpl-3.0
| 292 |
package com.seadowg.milo.test.events
import org.specs2.mutable._
import org.specs2.mock._
import com.seadowg.milo.events._
class EventStreamSpec extends Specification with Mockito {
"EventStream".title
"when created" should {
"always have an event" in {
val stream1 = new EventStream(new Event[Int])
stream1.event must_!= null
val stream2 = new EventStream()
stream2.event must_!= null
}
}
"#bind(func)" should {
"pass the function to the the stream's event.bind" in {
val mockEvent = mock[Event[Int]]
val stream = new EventStream(mockEvent)
val func: Int => Unit = { i => i }
stream.bind(func)
there was one(mockEvent).bind(func)
}
}
"#map(func)" should {
"return a stream with the function applied to every element of the original" in {
var sent = 0
val stream = new EventStream[Int]
stream.map(value => value * 2).event.bind(value => sent = value)
stream.event.trigger(1)
sent mustEqual 2
}
}
"#filter(func)" should {
"return a stream containing only the elements that pass the filter" in {
var sent = 0
val stream = new EventStream[Int]
stream.filter(value => value < 2).event.bind(value => sent = value)
stream.event.trigger(1)
stream.event.trigger(2)
sent mustEqual 1
}
}
"#merge(event)" should {
"return a stream that contains the elements of both streams time interleaved" in {
var sent = 0
val stream1 = new EventStream[Int]
val stream2 = new EventStream[Int]
stream1.merge(stream2).event.bind(value => sent = value)
stream1.event.trigger(1)
sent mustEqual 1
stream2.event.trigger(2)
sent mustEqual 2
}
}
}
|
seadowg/milo
|
src/test/scala/com/seadowg/milo/test/events/EventStreamSpec.scala
|
Scala
|
mit
| 1,778 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.rules.logical
import java.util.Collections
import com.google.common.collect.ImmutableList
import org.apache.calcite.plan.RelOptRule._
import org.apache.calcite.plan.volcano.RelSubset
import org.apache.calcite.plan.{RelOptRule, RelOptRuleCall, RelOptRuleOperand}
import org.apache.calcite.rel.`type`.{RelDataTypeFieldImpl, RelRecordType, StructKind}
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.core.Uncollect
import org.apache.calcite.rel.logical._
import org.apache.calcite.sql.`type`.AbstractSqlType
import org.apache.flink.table.api.TableException
import org.apache.flink.table.calcite.FlinkTypeFactory
import org.apache.flink.table.functions.utils.UserDefinedFunctionUtils
import org.apache.flink.table.plan.schema.{ArrayRelDataType, MultisetRelDataType}
import org.apache.flink.table.plan.util.ExplodeFunctionUtil
class LogicalUnnestRule(
operand: RelOptRuleOperand,
description: String)
extends RelOptRule(operand, description) {
override def matches(call: RelOptRuleCall): Boolean = {
val join: LogicalCorrelate = call.rel(0).asInstanceOf[LogicalCorrelate]
val right = join.getRight.asInstanceOf[RelSubset].getOriginal
right match {
// a filter is pushed above the table function
case filter: LogicalFilter =>
filter.getInput.asInstanceOf[RelSubset].getOriginal match {
case u: Uncollect => !u.withOrdinality
case _ => false
}
case u: Uncollect => !u.withOrdinality
case _ => false
}
}
override def onMatch(call: RelOptRuleCall): Unit = {
val correlate = call.rel(0).asInstanceOf[LogicalCorrelate]
val outer = correlate.getLeft.asInstanceOf[RelSubset].getOriginal
val array = correlate.getRight.asInstanceOf[RelSubset].getOriginal
def convert(relNode: RelNode): RelNode = {
relNode match {
case rs: RelSubset =>
convert(rs.getRelList.get(0))
case f: LogicalFilter =>
f.copy(
f.getTraitSet,
ImmutableList.of(convert(f.getInput.asInstanceOf[RelSubset].getOriginal)))
case uc: Uncollect =>
// convert Uncollect into TableFunctionScan
val cluster = correlate.getCluster
val dataType = uc.getInput.getRowType.getFieldList.get(0).getValue
val (componentType, explodeTableFunc) = dataType match {
case arrayType: ArrayRelDataType =>
(arrayType.getComponentType,
ExplodeFunctionUtil.explodeTableFuncFromType(arrayType.typeInfo))
case mt: MultisetRelDataType =>
(mt.getComponentType, ExplodeFunctionUtil.explodeTableFuncFromType(mt.typeInfo))
case _ => throw TableException(s"Unsupported UNNEST on type: ${dataType.toString}")
}
// create sql function
val explodeSqlFunc = UserDefinedFunctionUtils.createTableSqlFunction(
"explode",
"explode",
explodeTableFunc,
FlinkTypeFactory.toTypeInfo(componentType),
cluster.getTypeFactory.asInstanceOf[FlinkTypeFactory])
// create table function call
val rexCall = cluster.getRexBuilder.makeCall(
explodeSqlFunc,
uc.getInput.asInstanceOf[RelSubset]
.getOriginal.asInstanceOf[LogicalProject].getChildExps
)
// determine rel data type of unnest
val rowType = componentType match {
case _: AbstractSqlType =>
new RelRecordType(
StructKind.FULLY_QUALIFIED,
ImmutableList.of(new RelDataTypeFieldImpl("f0", 0, componentType)))
case _: RelRecordType => componentType
case _ => throw TableException(
s"Unsupported component type in UNNEST: ${componentType.toString}")
}
// create table function scan
new LogicalTableFunctionScan(
cluster,
correlate.getTraitSet,
Collections.emptyList(),
rexCall,
classOf[Array[Object]],
rowType,
null)
}
}
// convert unnest into table function scan
val tableFunctionScan = convert(array)
// create correlate with table function scan as input
val newCorrleate =
correlate.copy(correlate.getTraitSet, ImmutableList.of(outer, tableFunctionScan))
call.transformTo(newCorrleate)
}
}
object LogicalUnnestRule {
val INSTANCE = new LogicalUnnestRule(
operand(classOf[LogicalCorrelate], any),
"LogicalUnnestRule")
}
|
zhangminglei/flink
|
flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/rules/logical/LogicalUnnestRule.scala
|
Scala
|
apache-2.0
| 5,391 |
package com.twitter.finagle.mysql.protocol
import com.twitter.logging.Logger
import org.jboss.netty.buffer.{ChannelBuffer, ChannelBuffers}
object Command {
val COM_SLEEP = 0x00.toByte // internal thread state
val COM_QUIT = 0x01.toByte // mysql_close
val COM_INIT_DB = 0x02.toByte // mysql_select_db
val COM_QUERY = 0x03.toByte // mysql_real_query
val COM_FIELD_LIST = 0x04.toByte // mysql_list_fields
val COM_CREATE_DB = 0x05.toByte // mysql_create_db (deperacted)
val COM_DROP_DB = 0x06.toByte // mysql_drop_db (deprecated)
val COM_REFRESH = 0x07.toByte // mysql_refresh
val COM_SHUTDOWN = 0x08.toByte // mysql_shutdown
val COM_STATISTICS = 0x09.toByte // mysql_stat
val COM_PROCESS_INFO = 0x0A.toByte // mysql_list_processes
val COM_CONNECT = 0x0B.toByte // internal thread state
val COM_PROCESS_KILL = 0x0C.toByte // mysql_kill
val COM_DEBUG = 0x0D.toByte // mysql_dump_debug_info
val COM_PING = 0x0E.toByte // mysql_ping
val COM_TIME = 0x0F.toByte // internal thread state
val COM_DELAYED_INSERT = 0x10.toByte // internal thread state
val COM_CHANGE_USER = 0x11.toByte // mysql_change_user
val COM_BINLOG_DUMP = 0x12.toByte // sent by slave IO thread to req a binlog
val COM_TABLE_DUMP = 0x13.toByte // deprecated
val COM_CONNECT_OUT = 0x14.toByte // internal thread state
val COM_REGISTER_SLAVE = 0x15.toByte // sent by the slave to register with the master (optional)
val COM_STMT_PREPARE = 0x16.toByte // mysql_stmt_prepare
val COM_STMT_EXECUTE = 0x17.toByte // mysql_stmt_execute
val COM_STMT_SEND_LONG_DATA = 0x18.toByte // mysql_stmt_send_long_data
val COM_STMT_CLOSE = 0x19.toByte // mysql_stmt_close
val COM_STMT_RESET = 0x1A.toByte // mysql_stmt_reset
val COM_SET_OPTION = 0x1B.toByte // mysql_set_server_option
val COM_STMT_FETCH = 0x1C.toByte // mysql_stmt_fetch
}
abstract class Request(seq: Short) {
/**
* Request data translates to the body of the MySQL
* Packet sent to the server. This field becomes
* part of a compisition of ChannelBuffers. To ensure
* that it has the correct byte order use Buffer.toChannelBuffer(...)
* to create the ChannelBuffer.
*/
val data: ChannelBuffer
def toChannelBuffer: ChannelBuffer =
Packet.toChannelBuffer(data.capacity, seq, data)
}
abstract class CommandRequest(val cmd: Byte) extends Request(0)
class SimpleCommandRequest(command: Byte, buffer: Array[Byte])
extends CommandRequest(command) {
override val data = Buffer.toChannelBuffer(Array(cmd), buffer)
}
/**
* NOOP Request used internally by this client.
*/
case object ClientInternalGreet extends Request(0) {
override val data = ChannelBuffers.EMPTY_BUFFER
override def toChannelBuffer = ChannelBuffers.EMPTY_BUFFER
}
case object PingRequest
extends SimpleCommandRequest(Command.COM_PING, Buffer.EMPTY_BYTE_ARRAY)
case class UseRequest(dbName: String)
extends SimpleCommandRequest(Command.COM_INIT_DB, dbName.getBytes)
case class QueryRequest(sqlStatement: String)
extends SimpleCommandRequest(Command.COM_QUERY, sqlStatement.getBytes)
case class PrepareRequest(sqlStatement: String)
extends SimpleCommandRequest(Command.COM_STMT_PREPARE, sqlStatement.getBytes)
/**
* An Execute Request.
* Uses the binary protocol to build an execute request for
* a prepared statement.
*/
case class ExecuteRequest(ps: PreparedStatement, flags: Byte = 0, iterationCount: Int = 1)
extends CommandRequest(Command.COM_STMT_EXECUTE) {
private[this] val log = Logger("finagle-mysql")
private[this] def isNull(param: Any): Boolean = param match {
case null => true
case _ => false
}
private[this] def makeNullBitmap(parameters: List[Any], bit: Int = 0, result: BigInt = BigInt(0)): Array[Byte] =
parameters match {
case Nil => result.toByteArray.reverse // As little-endian byte array
case param :: rest =>
val bits = if (isNull(param)) result.setBit(bit) else result
makeNullBitmap(rest, bit+1, bits)
}
private[this] def writeTypeCode(param: Any, writer: BufferWriter): Unit = {
val typeCode = Type.getCode(param)
if (typeCode != -1)
writer.writeShort(typeCode)
else {
// Unsupported type. Write the error to log, and write the type as null.
// This allows us to safely skip writing the parameter without corrupting the buffer.
log.error("ExecuteRequest: Unknown parameter %s will be treated as SQL NULL.".format(param.getClass.getName))
writer.writeShort(Type.NULL)
}
}
/**
* Returns sizeof all the parameters in the List.
*/
private[this] def sizeOfParameters(parameters: List[Any], size: Int = 0): Int = parameters match {
case Nil => size
case p :: rest =>
val typeSize = Type.sizeOf(p)
// We can safely convert unknown sizes to 0 because
// any unknown type is being sent as NULL.
val sizeOfParam = if (typeSize == -1) 0 else typeSize
sizeOfParameters(rest, size + sizeOfParam)
}
/**
* Writes the parameter into its MySQL binary representation.
*/
private[this] def writeParam(param: Any, writer: BufferWriter) = param match {
case s: String => writer.writeLengthCodedString(s)
case b: Boolean => writer.writeBoolean(b)
case b: Byte => writer.writeByte(b)
case s: Short => writer.writeShort(s)
case i: Int => writer.writeInt(i)
case l: Long => writer.writeLong(l)
case f: Float => writer.writeFloat(f)
case d: Double => writer.writeDouble(d)
case b: Array[Byte] => writer.writeBytes(b)
// Dates
case t: java.sql.Timestamp => TimestampValue.write(t, writer)
case d: java.sql.Date => DateValue.write(d, writer)
case d: java.util.Date => TimestampValue.write(new java.sql.Timestamp(d.getTime), writer)
case _ => writer // skip null and uknown values
}
override val data = {
val bw = BufferWriter(new Array[Byte](10))
bw.writeByte(cmd)
bw.writeInt(ps.statementId)
bw.writeByte(flags)
bw.writeInt(iterationCount)
val paramsList = ps.parameters.toList
val nullBytes = makeNullBitmap(paramsList)
val newParamsBound: Byte = if (ps.hasNewParameters) 1 else 0
val initialBuffer = Buffer.toChannelBuffer(bw.array, nullBytes, Array(newParamsBound))
// convert parameters to binary representation.
val sizeOfParams = sizeOfParameters(paramsList)
val values = BufferWriter(new Array[Byte](sizeOfParams))
paramsList foreach { writeParam(_, values) }
// parameters are tagged on to the end of the buffer
// after types or initialBuffer depending if the prepared statement
// has new parameters.
if (ps.hasNewParameters) {
// only add type data if the prepared statement has new parameters.
val types = BufferWriter(new Array[Byte](ps.numberOfParams * 2))
paramsList foreach { writeTypeCode(_, types) }
ChannelBuffers.wrappedBuffer(initialBuffer, types.toChannelBuffer, values.toChannelBuffer)
} else
ChannelBuffers.wrappedBuffer(initialBuffer, values.toChannelBuffer)
}
}
case class CloseRequest(ps: PreparedStatement) extends CommandRequest(Command.COM_STMT_CLOSE) {
override val data = {
val bw = BufferWriter(new Array[Byte](5))
bw.writeByte(cmd).writeInt(ps.statementId)
bw.toChannelBuffer
}
}
|
foursquare/finagle
|
finagle-mysql/src/main/scala/com/twitter/finagle/mysql/protocol/Request.scala
|
Scala
|
apache-2.0
| 7,764 |
package spark
import java.io.EOFException
import java.util.NoSuchElementException
import org.apache.hadoop.io.LongWritable
import org.apache.hadoop.io.NullWritable
import org.apache.hadoop.io.Text
import org.apache.hadoop.mapred.FileInputFormat
import org.apache.hadoop.mapred.InputFormat
import org.apache.hadoop.mapred.InputSplit
import org.apache.hadoop.mapred.JobConf
import org.apache.hadoop.mapred.TextInputFormat
import org.apache.hadoop.mapred.RecordReader
import org.apache.hadoop.mapred.Reporter
import org.apache.hadoop.util.ReflectionUtils
/**
* A Spark split class that wraps around a Hadoop InputSplit.
*/
class HadoopSplit(rddId: Int, idx: Int, @transient s: InputSplit)
extends Split
with Serializable {
val inputSplit = new SerializableWritable[InputSplit](s)
override def hashCode(): Int = (41 * (41 + rddId) + idx).toInt
override val index: Int = idx
}
/**
* An RDD that reads a Hadoop dataset as specified by a JobConf (e.g. files in HDFS, the local file
* system, or S3, tables in HBase, etc).
*/
class HadoopRDD[K, V](
sc: SparkContext,
@transient conf: JobConf,
inputFormatClass: Class[_ <: InputFormat[K, V]],
keyClass: Class[K],
valueClass: Class[V],
minSplits: Int)
extends RDD[(K, V)](sc) {
val serializableConf = new SerializableWritable(conf)
@transient @debugger.EventLogSerializable
val splits_ : Array[Split] = {
val inputFormat = createInputFormat(conf)
val inputSplits = inputFormat.getSplits(conf, minSplits)
val array = new Array[Split](inputSplits.size)
for (i <- 0 until inputSplits.size) {
array(i) = new HadoopSplit(id, i, inputSplits(i))
}
array
}
def createInputFormat(conf: JobConf): InputFormat[K, V] = {
ReflectionUtils.newInstance(inputFormatClass.asInstanceOf[Class[_]], conf)
.asInstanceOf[InputFormat[K, V]]
}
override def splits = splits_
override def compute(theSplit: Split) = new Iterator[(K, V)] {
val split = theSplit.asInstanceOf[HadoopSplit]
var reader: RecordReader[K, V] = null
val conf = serializableConf.value
val fmt = createInputFormat(conf)
reader = fmt.getRecordReader(split.inputSplit.value, conf, Reporter.NULL)
val key: K = reader.createKey()
val value: V = reader.createValue()
var gotNext = false
var finished = false
override def hasNext: Boolean = {
if (!gotNext) {
try {
finished = !reader.next(key, value)
} catch {
case eof: EOFException =>
finished = true
}
gotNext = true
}
if (finished) {
reader.close()
}
!finished
}
override def next: (K, V) = {
if (!gotNext) {
finished = !reader.next(key, value)
}
if (finished) {
throw new NoSuchElementException("End of stream")
}
gotNext = false
(key, value)
}
}
override def preferredLocations(split: Split) = {
// TODO: Filtering out "localhost" in case of file:// URLs
val hadoopSplit = split.asInstanceOf[HadoopSplit]
hadoopSplit.inputSplit.value.getLocations.filter(_ != "localhost")
}
override val dependencies: List[Dependency[_]] = Nil
}
|
ankurdave/arthur
|
core/src/main/scala/spark/HadoopRDD.scala
|
Scala
|
bsd-3-clause
| 3,214 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.log
import java.io.{File, IOException}
import java.nio.file.Files
import java.nio.file.attribute.FileTime
import java.util.concurrent.TimeUnit
import kafka.common._
import kafka.metrics.{KafkaMetricsGroup, KafkaTimer}
import kafka.server.epoch.LeaderEpochCache
import kafka.server.{FetchDataInfo, LogOffsetMetadata}
import kafka.utils._
import org.apache.kafka.common.errors.CorruptRecordException
import org.apache.kafka.common.record.FileRecords.LogOffsetPosition
import org.apache.kafka.common.record._
import org.apache.kafka.common.utils.Time
import scala.collection.JavaConverters._
import scala.math._
/**
* A segment of the log. Each segment has two components: a log and an index. The log is a FileMessageSet containing
* the actual messages. The index is an OffsetIndex that maps from logical offsets to physical file positions. Each
* segment has a base offset which is an offset <= the least offset of any message in this segment and > any offset in
* any previous segment.
*
* A segment with a base offset of [base_offset] would be stored in two files, a [base_offset].index and a [base_offset].log file.
*
* @param log The message set containing log entries
* @param index The offset index
* @param timeIndex The timestamp index
* @param baseOffset A lower bound on the offsets in this segment
* @param indexIntervalBytes The approximate number of bytes between entries in the index
* @param time The time instance
*/
@nonthreadsafe
//防止log过大,将log切割成多个日志文件,每个日志文件对应一个LogSegment
class LogSegment(val log: FileRecords,//替换了FileMessageSet
val index: OffsetIndex,//每个日志文件对应的索引文件
val timeIndex: TimeIndex,
val txnIndex: TransactionIndex,
val baseOffset: Long,//对应的第一条消息得offset值
val indexIntervalBytes: Int,//索引间间隔的最小字节数
val rollJitterMs: Long,
time: Time) extends Logging {
//标志创建时间
private var created = time.milliseconds
//自从上次添加索引后,在日志中累计写入的Message集合字节数,用于判断下次索引项添加的时机
/* the number of bytes since we last added an entry in the offset index */
private var bytesSinceLastIndexEntry = 0
/* The timestamp we used for time based log rolling */
private var rollingBasedTimestamp: Option[Long] = None
/* The maximum timestamp we see so far */
@volatile private var maxTimestampSoFar: Long = timeIndex.lastEntry.timestamp
@volatile private var offsetOfMaxTimestamp: Long = timeIndex.lastEntry.offset
def this(dir: File, startOffset: Long, indexIntervalBytes: Int, maxIndexSize: Int, rollJitterMs: Long, time: Time,
fileAlreadyExists: Boolean = false, initFileSize: Int = 0, preallocate: Boolean = false) =
this(FileRecords.open(Log.logFile(dir, startOffset), fileAlreadyExists, initFileSize, preallocate),
new OffsetIndex(Log.offsetIndexFile(dir, startOffset), baseOffset = startOffset, maxIndexSize = maxIndexSize),
new TimeIndex(Log.timeIndexFile(dir, startOffset), baseOffset = startOffset, maxIndexSize = maxIndexSize),
new TransactionIndex(startOffset, Log.transactionIndexFile(dir, startOffset)),
startOffset,
indexIntervalBytes,
rollJitterMs,
time)
/* Return the size in bytes of this log segment */
def size: Int = log.sizeInBytes()
/**
* checks that the argument offset can be represented as an integer offset relative to the baseOffset.
*/
def canConvertToRelativeOffset(offset: Long): Boolean = {
(offset - baseOffset) <= Integer.MAX_VALUE
}
/**
* Append the given messages starting with the given offset. Add
* an entry to the index if needed.
*
* It is assumed this method is being called from within a lock.
*
* @param firstOffset The first offset in the message set.
* @param largestOffset The last offset in the message set
* @param largestTimestamp The largest timestamp in the message set.
* @param shallowOffsetOfMaxTimestamp The offset of the message that has the largest timestamp in the messages to append.
* @param records The log entries to append.
* @return the physical position in the file of the appended records
*/
@nonthreadsafe
def append(firstOffset: Long,
largestOffset: Long,
largestTimestamp: Long,
shallowOffsetOfMaxTimestamp: Long,
records: MemoryRecords): Unit = {
if (records.sizeInBytes > 0) {
trace("Inserting %d bytes at offset %d at position %d with largest timestamp %d at shallow offset %d"
.format(records.sizeInBytes, firstOffset, log.sizeInBytes(), largestTimestamp, shallowOffsetOfMaxTimestamp))
//物理地址
val physicalPosition = log.sizeInBytes()
if (physicalPosition == 0)
rollingBasedTimestamp = Some(largestTimestamp)
// append the messages边界检查
require(canConvertToRelativeOffset(largestOffset), "largest offset in message set can not be safely converted to relative offset.")
//添加消息
val appendedBytes = log.append(records)
trace(s"Appended $appendedBytes to ${log.file()} at offset $firstOffset")
// Update the in memory max timestamp and corresponding offset.
if (largestTimestamp > maxTimestampSoFar) {
maxTimestampSoFar = largestTimestamp
offsetOfMaxTimestamp = shallowOffsetOfMaxTimestamp
}
// append an entry to the index (if needed)
if(bytesSinceLastIndexEntry > indexIntervalBytes) {
index.append(firstOffset, physicalPosition)
timeIndex.maybeAppend(maxTimestampSoFar, offsetOfMaxTimestamp)
bytesSinceLastIndexEntry = 0
}
//更新bytesSinceLastIndexEntry
bytesSinceLastIndexEntry += records.sizeInBytes
}
}
@nonthreadsafe
def updateTxnIndex(completedTxn: CompletedTxn, lastStableOffset: Long) {
if (completedTxn.isAborted) {
trace(s"Writing aborted transaction $completedTxn to transaction index, last stable offset is $lastStableOffset")
txnIndex.append(new AbortedTxn(completedTxn, lastStableOffset))
}
}
private def updateProducerState(producerStateManager: ProducerStateManager, batch: RecordBatch): Unit = {
if (batch.hasProducerId) {
val producerId = batch.producerId
val appendInfo = producerStateManager.prepareUpdate(producerId, loadingFromLog = true)
val maybeCompletedTxn = appendInfo.append(batch)
producerStateManager.update(appendInfo)
maybeCompletedTxn.foreach { completedTxn =>
val lastStableOffset = producerStateManager.completeTxn(completedTxn)
updateTxnIndex(completedTxn, lastStableOffset)
}
}
producerStateManager.updateMapEndOffset(batch.lastOffset + 1)
}
/**
* Find the physical file position for the first message with offset >= the requested offset.
*
* The startingFilePosition argument is an optimization that can be used if we already know a valid starting position
* in the file higher than the greatest-lower-bound from the index.
*
* @param offset The offset we want to translate
* @param startingFilePosition A lower bound on the file position from which to begin the search. This is purely an optimization and
* when omitted, the search will begin at the position in the offset index.
* @return The position in the log storing the message with the least offset >= the requested offset and the size of the
* message or null if no message meets this criteria.
*/
@threadsafe
private[log] def translateOffset(offset: Long, startingFilePosition: Int = 0): LogOffsetPosition = {
//得到索引项
val mapping = index.lookup(offset)
//遍历得到具体日志文件
log.searchForOffsetWithSize(offset, max(mapping.position, startingFilePosition))
}
/**
* Read a message set from this segment beginning with the first offset >= startOffset. The message set will include
* no more than maxSize bytes and will end before maxOffset if a maxOffset is specified.
*
* @param startOffset A lower bound on the first offset to include in the message set we read
* @param maxSize The maximum number of bytes to include in the message set we read
* @param maxOffset An optional maximum offset for the message set we read
* @param maxPosition The maximum position in the log segment that should be exposed for read
* @param minOneMessage If this is true, the first message will be returned even if it exceeds `maxSize` (if one exists)
*
* @return The fetched data and the offset metadata of the first message whose offset is >= startOffset,
* or null if the startOffset is larger than the largest offset in this log
*/
@threadsafe
//读取消息
//startOffset 指定读取的起始消息
//maxOffset 读取结束的最大offset
//maxSize 读取的最大字节数
//maxPosition 指定读取的最大物理地址
def read(startOffset: Long, maxOffset: Option[Long], maxSize: Int, maxPosition: Long = size,
minOneMessage: Boolean = false): FetchDataInfo = {
if (maxSize < 0)
throw new IllegalArgumentException("Invalid max size for log read (%d)".format(maxSize))
val logSize = log.sizeInBytes // this may change, need to save a consistent copy
//将startOffset转为物理地址
val startOffsetAndSize = translateOffset(startOffset)
// if the start position is already off the end of the log, return null
if (startOffsetAndSize == null)
return null
val startPosition = startOffsetAndSize.position
val offsetMetadata = new LogOffsetMetadata(startOffset, this.baseOffset, startPosition)
val adjustedMaxSize =
if (minOneMessage) math.max(maxSize, startOffsetAndSize.size)
else maxSize
// return a log segment but with zero size in the case below
if (adjustedMaxSize == 0)
return FetchDataInfo(offsetMetadata, MemoryRecords.EMPTY)
// calculate the length of the message set to read based on whether or not they gave us a maxOffset
val fetchSize: Int = maxOffset match {
case None =>
// no max offset, just read until the max position
//maxOffset为空,则maxPostion和adjustedMaxSize决定读取长度
min((maxPosition - startPosition).toInt, adjustedMaxSize)
case Some(offset) =>
// there is a max offset, translate it to a file position and use that to calculate the max read size;
// when the leader of a partition changes, it's possible for the new leader's high watermark to be less than the
// true high watermark in the previous leader for a short window. In this window, if a consumer fetches on an
// offset between new leader's high watermark and the log end offset, we want to return an empty response.
if (offset < startOffset)
return FetchDataInfo(offsetMetadata, MemoryRecords.EMPTY, firstEntryIncomplete = false)
val mapping = translateOffset(offset, startPosition)
val endPosition =
if (mapping == null)
logSize // the max offset is off the end of the log, use the end of the file
else
mapping.position
//读取文件长度
min(min(maxPosition, endPosition) - startPosition, adjustedMaxSize).toInt
}
//按照起始位置和长度生成一个分片对象
FetchDataInfo(offsetMetadata, log.read(startPosition, fetchSize),
firstEntryIncomplete = adjustedMaxSize < startOffsetAndSize.size)
}
def fetchUpperBoundOffset(startOffsetPosition: OffsetPosition, fetchSize: Int): Option[Long] =
index.fetchUpperBoundOffset(startOffsetPosition, fetchSize).map(_.offset)
/**
* Run recovery on the given segment. This will rebuild the index from the log file and lop off any invalid bytes
* from the end of the log and index.
*
* @param producerStateManager Producer state corresponding to the segment's base offset. This is needed to recover
* the transaction index.
* @param leaderEpochCache Optionally a cache for updating the leader epoch during recovery.
* @return The number of bytes truncated from the log
*/
@nonthreadsafe
//重建索引文件,遇到压缩消息需要解压,索引项中记录了相对offset第一条消息
//外层消息则是压缩消息中的最后一条offset
def recover(producerStateManager: ProducerStateManager, leaderEpochCache: Option[LeaderEpochCache] = None): Int = {
//清空索引
index.truncate()
//修改索引文件大小
index.resize(index.maxIndexSize)
timeIndex.truncate()
timeIndex.resize(timeIndex.maxIndexSize)
txnIndex.truncate()
var validBytes = 0
var lastIndexEntry = 0
maxTimestampSoFar = RecordBatch.NO_TIMESTAMP
try {
for (batch <- log.batches.asScala) {
//验证消息是否合法
batch.ensureValid()
// The max timestamp is exposed at the batch level, so no need to iterate the records
if (batch.maxTimestamp > maxTimestampSoFar) {
maxTimestampSoFar = batch.maxTimestamp
offsetOfMaxTimestamp = batch.lastOffset
}
// Build offset index 符合添加索引
if(validBytes - lastIndexEntry > indexIntervalBytes) {
val startOffset = batch.baseOffset
//添加索引项目
index.append(startOffset, validBytes)
timeIndex.maybeAppend(maxTimestampSoFar, offsetOfMaxTimestamp)
//修改lastIndexEntry
lastIndexEntry = validBytes
}
validBytes += batch.sizeInBytes()
if (batch.magic >= RecordBatch.MAGIC_VALUE_V2) {
leaderEpochCache.foreach { cache =>
//为了避免在执行assign时候出现警告。。。。不知道具体干嘛的。。
if (batch.partitionLeaderEpoch > cache.latestEpoch()) // this is to avoid unnecessary warning in cache.assign()
cache.assign(batch.partitionLeaderEpoch, batch.baseOffset)
}
updateProducerState(producerStateManager, batch)
}
}
} catch {
case e: CorruptRecordException =>
logger.warn("Found invalid messages in log segment %s at byte offset %d: %s."
.format(log.file.getAbsolutePath, validBytes, e.getMessage))
}
val truncated = log.sizeInBytes - validBytes
if (truncated > 0)
logger.debug(s"Truncated $truncated invalid bytes at the end of segment ${log.file.getAbsoluteFile} during recovery")
//对日志文件进行截取
log.truncateTo(validBytes)
//对索引文件进行截取
index.trimToValidSize()
// A normally closed segment always appends the biggest timestamp ever seen into log segment, we do this as well.
timeIndex.maybeAppend(maxTimestampSoFar, offsetOfMaxTimestamp, skipFullCheck = true)
timeIndex.trimToValidSize()
//截取字节数
truncated
}
private def loadLargestTimestamp() {
// Get the last time index entry. If the time index is empty, it will return (-1, baseOffset)
val lastTimeIndexEntry = timeIndex.lastEntry
maxTimestampSoFar = lastTimeIndexEntry.timestamp
offsetOfMaxTimestamp = lastTimeIndexEntry.offset
val offsetPosition = index.lookup(lastTimeIndexEntry.offset)
// Scan the rest of the messages to see if there is a larger timestamp after the last time index entry.
val maxTimestampOffsetAfterLastEntry = log.largestTimestampAfter(offsetPosition.position)
if (maxTimestampOffsetAfterLastEntry.timestamp > lastTimeIndexEntry.timestamp) {
maxTimestampSoFar = maxTimestampOffsetAfterLastEntry.timestamp
offsetOfMaxTimestamp = maxTimestampOffsetAfterLastEntry.offset
}
}
def collectAbortedTxns(fetchOffset: Long, upperBoundOffset: Long): TxnIndexSearchResult =
txnIndex.collectAbortedTxns(fetchOffset, upperBoundOffset)
override def toString = "LogSegment(baseOffset=" + baseOffset + ", size=" + size + ")"
/**
* Truncate off all index and log entries with offsets >= the given offset.
* If the given offset is larger than the largest message in this segment, do nothing.
*
* @param offset The offset to truncate to
* @return The number of log bytes truncated
*/
@nonthreadsafe
def truncateTo(offset: Long): Int = {
val mapping = translateOffset(offset)
if (mapping == null)
return 0
index.truncateTo(offset)
timeIndex.truncateTo(offset)
txnIndex.truncateTo(offset)
// after truncation, reset and allocate more space for the (new currently active) index
index.resize(index.maxIndexSize)
timeIndex.resize(timeIndex.maxIndexSize)
val bytesTruncated = log.truncateTo(mapping.position)
if(log.sizeInBytes == 0) {
created = time.milliseconds
rollingBasedTimestamp = None
}
bytesSinceLastIndexEntry = 0
if (maxTimestampSoFar >= 0)
loadLargestTimestamp()
bytesTruncated
}
/**
* Calculate the offset that would be used for the next message to be append to this segment.
* Note that this is expensive.
*/
@threadsafe
def nextOffset(): Long = {
val ms = read(index.lastOffset, None, log.sizeInBytes)
if (ms == null)
baseOffset
else
ms.records.batches.asScala.lastOption
.map(_.nextOffset)
.getOrElse(baseOffset)
}
/**
* Flush this log segment to disk
*/
@threadsafe
def flush() {
LogFlushStats.logFlushTimer.time {
log.flush()
index.flush()
timeIndex.flush()
txnIndex.flush()
}
}
/**
* Change the suffix for the index and log file for this log segment
*/
def changeFileSuffixes(oldSuffix: String, newSuffix: String) {
def kafkaStorageException(fileType: String, e: IOException) =
new KafkaStorageException(s"Failed to change the $fileType file suffix from $oldSuffix to $newSuffix for log segment $baseOffset", e)
try log.renameTo(new File(CoreUtils.replaceSuffix(log.file.getPath, oldSuffix, newSuffix)))
catch {
case e: IOException => throw kafkaStorageException("log", e)
}
try index.renameTo(new File(CoreUtils.replaceSuffix(index.file.getPath, oldSuffix, newSuffix)))
catch {
case e: IOException => throw kafkaStorageException("index", e)
}
try timeIndex.renameTo(new File(CoreUtils.replaceSuffix(timeIndex.file.getPath, oldSuffix, newSuffix)))
catch {
case e: IOException => throw kafkaStorageException("timeindex", e)
}
try txnIndex.renameTo(new File(CoreUtils.replaceSuffix(txnIndex.file.getPath, oldSuffix, newSuffix)))
catch {
case e: IOException => throw kafkaStorageException("txnindex", e)
}
}
/**
* Append the largest time index entry to the time index when this log segment become inactive segment.
* This entry will be used to decide when to delete the segment.
*/
def onBecomeInactiveSegment() {
timeIndex.maybeAppend(maxTimestampSoFar, offsetOfMaxTimestamp, skipFullCheck = true)
}
/**
* The time this segment has waited to be rolled.
* If the first message batch has a timestamp we use its timestamp to determine when to roll a segment. A segment
* is rolled if the difference between the new batch's timestamp and the first batch's timestamp exceeds the
* segment rolling time.
* If the first batch does not have a timestamp, we use the wall clock time to determine when to roll a segment. A
* segment is rolled if the difference between the current wall clock time and the segment create time exceeds the
* segment rolling time.
*/
def timeWaitedForRoll(now: Long, messageTimestamp: Long) : Long = {
// Load the timestamp of the first message into memory
if (rollingBasedTimestamp.isEmpty) {
val iter = log.batches.iterator()
if (iter.hasNext)
rollingBasedTimestamp = Some(iter.next().maxTimestamp)
}
rollingBasedTimestamp match {
case Some(t) if t >= 0 => messageTimestamp - t
case _ => now - created
}
}
/**
* Search the message offset based on timestamp and offset.
*
* This method returns an option of TimestampOffset. The returned value is determined using the following ordered list of rules:
*
* - If all the messages in the segment have smaller offsets, return None
* - If all the messages in the segment have smaller timestamps, return None
* - If all the messages in the segment have larger timestamps, or no message in the segment has a timestamp
* the returned the offset will be max(the base offset of the segment, startingOffset) and the timestamp will be Message.NoTimestamp.
* - Otherwise, return an option of TimestampOffset. The offset is the offset of the first message whose timestamp
* is greater than or equals to the target timestamp and whose offset is greater than or equals to the startingOffset.
*
* This methods only returns None when 1) all messages' offset < startOffing or 2) the log is not empty but we did not
* see any message when scanning the log from the indexed position. The latter could happen if the log is truncated
* after we get the indexed position but before we scan the log from there. In this case we simply return None and the
* caller will need to check on the truncated log and maybe retry or even do the search on another log segment.
*
* @param timestamp The timestamp to search for.
* @param startingOffset The starting offset to search.
* @return the timestamp and offset of the first message that meets the requirements. None will be returned if there is no such message.
*/
def findOffsetByTimestamp(timestamp: Long, startingOffset: Long = baseOffset): Option[TimestampOffset] = {
// Get the index entry with a timestamp less than or equal to the target timestamp
val timestampOffset = timeIndex.lookup(timestamp)
val position = index.lookup(math.max(timestampOffset.offset, startingOffset)).position
// Search the timestamp
Option(log.searchForTimestamp(timestamp, position, startingOffset)).map { timestampAndOffset =>
TimestampOffset(timestampAndOffset.timestamp, timestampAndOffset.offset)
}
}
/**
* Close this log segment
*/
def close() {
CoreUtils.swallow(timeIndex.maybeAppend(maxTimestampSoFar, offsetOfMaxTimestamp, skipFullCheck = true))
CoreUtils.swallow(index.close())
CoreUtils.swallow(timeIndex.close())
CoreUtils.swallow(log.close())
CoreUtils.swallow(txnIndex.close())
}
/**
* Delete this log segment from the filesystem.
*
* @throws KafkaStorageException if the delete fails.
*/
def delete() {
val deletedLog = log.delete()
val deletedIndex = index.delete()
val deletedTimeIndex = timeIndex.delete()
val deletedTxnIndex = txnIndex.delete()
if (!deletedLog && log.file.exists)
throw new KafkaStorageException("Delete of log " + log.file.getName + " failed.")
if (!deletedIndex && index.file.exists)
throw new KafkaStorageException("Delete of index " + index.file.getName + " failed.")
if (!deletedTimeIndex && timeIndex.file.exists)
throw new KafkaStorageException("Delete of time index " + timeIndex.file.getName + " failed.")
if (!deletedTxnIndex && txnIndex.file.exists)
throw new KafkaStorageException("Delete of transaction index " + txnIndex.file.getName + " failed.")
}
/**
* The last modified time of this log segment as a unix time stamp
*/
def lastModified = log.file.lastModified
/**
* The largest timestamp this segment contains.
*/
def largestTimestamp = if (maxTimestampSoFar >= 0) maxTimestampSoFar else lastModified
/**
* Change the last modified time for this log segment
*/
def lastModified_=(ms: Long) = {
val fileTime = FileTime.fromMillis(ms)
Files.setLastModifiedTime(log.file.toPath, fileTime)
Files.setLastModifiedTime(index.file.toPath, fileTime)
Files.setLastModifiedTime(timeIndex.file.toPath, fileTime)
}
}
object LogFlushStats extends KafkaMetricsGroup {
val logFlushTimer = new KafkaTimer(newTimer("LogFlushRateAndTimeMs", TimeUnit.MILLISECONDS, TimeUnit.SECONDS))
}
|
YMCoding/kafka-0.11.0.0-src-with-comment
|
core/src/main/scala/kafka/log/LogSegment.scala
|
Scala
|
apache-2.0
| 25,057 |
package mesosphere
import com.wix.accord.Descriptions.{Generic, Path}
import com.wix.accord._
import mesosphere.marathon.Normalization
import mesosphere.marathon.ValidationFailedException
import mesosphere.marathon.api.v2.Validation
import mesosphere.marathon.api.v2.Validation.ConstraintViolation
import org.scalatest._
import org.scalatest.matchers.{BePropertyMatchResult, BePropertyMatcher, MatchResult, Matcher}
import play.api.libs.json.{Format, JsError, Json}
import scala.collection.breakOut
/**
* Provides a set of scalatest matchers for use when testing validation.
*
* Wix Accord does provide matchers in import com.wix.accord.scalatest.ResultMatchers; however, the interface and the
* output of these matchers is not as friendly as we would prefer.
*/
trait ValidationTestLike extends Validation {
this: Assertions =>
private def jsErrorToFailure(error: JsError): Failure = Failure(
error.errors.flatMap {
case (path, validationErrors) =>
validationErrors.map { validationError =>
RuleViolation(
validationError.args.mkString(", "),
validationError.message,
path = Path(path.toString.split("/").filter(_ != "").map(Generic(_)): _*))
}
}(breakOut)
)
/**
* Validator which takes an object, serializes it to JSON, and then parses it back, allowing it to test validations
* specified in our RAML layer
*/
def roundTripValidator[T](underlyingValidator: Option[Validator[T]])(implicit format: Format[T]) = new Validator[T] {
override def apply(obj: T) = {
Json.fromJson[T](Json.toJson(obj)) match {
case err: JsError =>
jsErrorToFailure(err)
case obj => underlyingValidator.map { _(obj.get) } getOrElse Success
}
}
}
protected implicit val normalizeResult: Normalization[Result] = Normalization {
// normalize failures => human readable error messages
case f: Failure => f
case x => x
}
def withValidationClue[T](f: => T): T = scala.util.Try { f }.recover {
// handle RAML validation errors
case vfe: ValidationFailedException => fail(vfe.failure.violations.toString())
case th => throw th
}.get
private def describeViolation(c: ConstraintViolation) =
s"""- "${c.path}" -> "${c.constraint}""""
case class haveViolations(expectedViolations: (String, String)*) extends Matcher[Result] {
val expectedConstraintViolations = expectedViolations.map(ConstraintViolation.tupled)
override def apply(result: Result): MatchResult = {
result match {
case Success =>
MatchResult(
matches = false,
"Validation succeeded, had no violations",
"" /* This MatchResult is explicitly false; negated failure does not apply */ )
case f: Failure =>
val violations = Validation.allViolations(f)
val matches = expectedConstraintViolations.forall { e => violations contains e }
MatchResult(
matches,
s"""Validation failed, but expected violation not in actual violation set
| Expected:
| ${expectedConstraintViolations.map(describeViolation).mkString("\\n ")}
| All violations:
| ${violations.map(describeViolation).mkString("\\n ")}
|""".stripMargin.trim,
s"""Validation failed, but expected violations were in actual violation set
| Expected:
| ${expectedConstraintViolations.map(describeViolation).mkString("\\n ")}
| All violations:
| ${violations.map(describeViolation).mkString("\\n ")}
|""".stripMargin.trim)
}
}
}
object aSuccess extends BePropertyMatcher[Result] {
override def apply(result: Result): BePropertyMatchResult = {
result match {
case Success =>
BePropertyMatchResult(true, "Expected a failure, got success")
case f: Failure =>
val violations = Validation.allViolations(f)
BePropertyMatchResult(
false,
s"""Validation failed, but expected success
| All violations:
| ${violations.map(describeViolation).mkString("\\n ")}
|""".stripMargin.trim)
}
}
}
}
|
gsantovena/marathon
|
src/test/scala/mesosphere/ValidationTestLike.scala
|
Scala
|
apache-2.0
| 4,310 |
package uber.nosurge.constants
import shared.Location
object Endpoints {
def estimate(startLocation: Location, endLocation: Location): String = {
s"/estimates?startLatitude=${startLocation.latitude}&startLongitude=${startLocation.longitude}&endLatitude=${endLocation.latitude}&endLongitude=${endLocation.longitude}"
}
def subscription: String = {
s"/subscription"
}
def subscription(token: String) = {
s"/subscription?token=$token"
}
}
|
allantl/uber-nosurge-notifications
|
frontend/src/main/scala/uber/nosurge/constants/Endpoints.scala
|
Scala
|
apache-2.0
| 465 |
package uk.ac.ncl.openlab.intake24.foodsql.admin
import javax.sql.DataSource
import anorm.{AnormUtil, BatchSql, Macro, NamedParameter, SQL, SqlParser, sqlToSimple}
import com.google.inject.name.Named
import com.google.inject.{Inject, Singleton}
import org.apache.commons.lang3.StringUtils
import uk.ac.ncl.openlab.intake24.errors.{LookupError, RecordNotFound, UnexpectedDatabaseError}
import uk.ac.ncl.openlab.intake24.services.fooddb.admin.{NutrientTablesAdminService, SingleNutrientTypeUpdate}
import uk.ac.ncl.openlab.intake24.sql.SqlDataService
import uk.ac.ncl.openlab.intake24.{NewNutrientTableRecord, NutrientTable, NutrientTableRecord}
@Singleton
class NutrientTablesAdminImpl @Inject()(@Named("intake24_foods") val dataSource: DataSource) extends NutrientTablesAdminService with SqlDataService {
private case class NutrientTableDescRow(id: String, description: String) {
def asNutrientTable = NutrientTable(id, description)
}
private case class NutrientTableRecordRow(id: String, nutrient_table_id: String, english_description: String, local_description: Option[String]) {
def toNutrientTableRecord = NutrientTableRecord(id, nutrient_table_id, english_description, local_description)
}
def listNutrientTables(): Either[UnexpectedDatabaseError, Map[String, NutrientTable]] = tryWithConnection {
implicit conn =>
val query = "SELECT id, description FROM nutrient_tables"
Right(SQL(query).executeQuery().as(Macro.namedParser[NutrientTableDescRow].*).foldLeft(Map[String, NutrientTable]()) {
(result, row) => result + (row.id -> row.asNutrientTable)
})
}
def searchNutrientTableRecords(nutrientTableId: String, query: Option[String]): Either[UnexpectedDatabaseError, Seq[NutrientTableRecord]] = tryWithConnection {
implicit conn =>
val sqlQuery =
"""
|SELECT id, nutrient_table_id, english_description, local_description
|FROM nutrient_table_records
|WHERE nutrient_table_id = {nutrient_table_id} AND
|(id ILIKE {query} OR english_description ILIKE {query} OR local_description ILIKE {query})
|ORDER BY id LIMIT {limit};
""".stripMargin
val result = SQL(sqlQuery).on(
'nutrient_table_id -> nutrientTableId,
'query -> s"%${AnormUtil.escapeLike(StringUtils.stripAccents(query.getOrElse("")))}%",
'limit -> 200
).executeQuery().as(Macro.namedParser[NutrientTableRecordRow].*).map(_.toNutrientTableRecord)
Right(result)
}
def getNutrientTable(id: String): Either[LookupError, NutrientTable] = tryWithConnection {
implicit conn =>
var query = """SELECT id, description FROM nutrient_tables WHERE id = {id} ORDER BY english_name"""
SQL(query).on('id -> id).executeQuery().as(Macro.namedParser[NutrientTable].singleOpt) match {
case Some(table) => Right(table)
case None => Left(RecordNotFound(new RuntimeException(id)))
}
}
def createNutrientTable(data: NutrientTable): Either[UnexpectedDatabaseError, Unit] = tryWithConnection {
implicit conn =>
var query = """INSERT INTO nutrient_tables VALUES({id}, {description})"""
SQL(query).on('id -> data.id, 'description -> data.description).execute()
Right(())
}
def createOrUpdateNutrientTable(data: NutrientTable): Either[UnexpectedDatabaseError, Unit] = tryWithConnection {
implicit conn =>
var query = """INSERT INTO nutrient_tables VALUES({id}, {description}) ON CONFLICT ON CONSTRAINT nutrient_tables_pk DO UPDATE SET description=EXCLUDED.description"""
SQL(query).on('id -> data.id, 'description -> data.description).execute()
Right(())
}
def updateNutrientTable(id: String, data: NutrientTable): Either[LookupError, Unit] = tryWithConnection {
implicit conn =>
var query = """UPDATE nutrient_tables SET id={new_id}, description={description} WHERE id = {id}"""
val affectedRows = SQL(query).on('id -> id, 'new_id -> data.id, 'description -> data.description).executeUpdate()
if (affectedRows == 0)
Left(RecordNotFound(new RuntimeException(id)))
else
Right(())
}
def deleteNutrientTable(id: String): Either[LookupError, Unit] = tryWithConnection {
implicit conn =>
val query = """DELETE FROM nutrient_tables WHERE id={id}"""
val affectedRows = SQL(query).on('id -> id).executeUpdate()
if (affectedRows == 0)
Left(RecordNotFound(new RuntimeException(id)))
else
Right(())
}
def deleteAllNutrientTables(): Either[UnexpectedDatabaseError, Unit] = tryWithConnection {
implicit conn =>
SQL("DELETE FROM nutrient_tables").execute()
Right(())
}
private val nutrientsInsertQuery = "INSERT INTO nutrient_table_records_nutrients VALUES({record_id},{nutrient_table_id},{nutrient_type_id},{units_per_100g})"
def createNutrientTableRecords(records: Seq[NewNutrientTableRecord]): Either[UnexpectedDatabaseError, Unit] = tryWithConnection {
implicit conn =>
withTransaction {
val recordQuery = """INSERT INTO nutrient_table_records VALUES({id},{nutrient_table_id},{english_description},{local_description})"""
val recordParams =
records.map(r => Seq[NamedParameter]('id -> r.id, 'nutrient_table_id -> r.nutrientTableId, 'english_description -> r.description, 'local_description -> r.localDescription))
val nutrientParams =
records.flatMap {
record =>
record.nutrients.map {
case (nutrientType, unitsPer100g) =>
Seq[NamedParameter]('record_id -> record.id, 'nutrient_table_id -> record.nutrientTableId, 'nutrient_type_id -> nutrientType, 'units_per_100g -> unitsPer100g)
}
}
batchSql(recordQuery, recordParams).execute()
batchSql(nutrientsInsertQuery, nutrientParams).execute()
Right(())
}
}
def createOrUpdateNutrientTableRecords(records: Seq[NewNutrientTableRecord]): Either[UnexpectedDatabaseError, Unit] = tryWithConnection {
implicit conn =>
if (records.nonEmpty) {
withTransaction {
val recordUpsertQuery =
"""INSERT INTO nutrient_table_records VALUES({id},{nutrient_table_id},{english_description},{local_description})
|ON CONFLICT ON CONSTRAINT nutrient_table_records_pk DO UPDATE SET english_description=EXCLUDED.english_description, local_description=EXCLUDED.local_description""".stripMargin
val recordParams =
records.map(r => Seq[NamedParameter]('id -> r.id, 'nutrient_table_id -> r.nutrientTableId, 'english_description -> r.description, 'local_description -> r.localDescription))
BatchSql(recordUpsertQuery, recordParams.head, recordParams.tail: _*).execute()
val nutrientDeleteParams = records.map {
record =>
Seq[NamedParameter]('table_id -> record.nutrientTableId, 'record_id -> record.id)
}
BatchSql("DELETE FROM nutrient_table_records_nutrients WHERE nutrient_table_id={table_id} AND nutrient_table_record_id={record_id}", nutrientDeleteParams.head, nutrientDeleteParams.tail: _*).execute()
val nutrientUpdateParams =
records.flatMap {
record =>
record.nutrients.map {
case (nutrientType, unitsPer100g) =>
Seq[NamedParameter]('record_id -> record.id, 'nutrient_table_id -> record.nutrientTableId, 'nutrient_type_id -> nutrientType, 'units_per_100g -> unitsPer100g)
}
}
BatchSql(nutrientsInsertQuery, nutrientUpdateParams.head, nutrientUpdateParams.tail: _*).execute()
Right(())
}
} else Right(())
}
def updateSingleNutrientType(nutrientTableId: String, nutrientTypeId: Long, updates: Seq[SingleNutrientTypeUpdate]): Either[UnexpectedDatabaseError, Unit] = tryWithConnection {
implicit conn =>
withTransaction {
val (update, delete) = updates.partition(_.newValue.isDefined)
val deleteParams = delete.map {
record =>
Seq[NamedParameter]('table_id -> nutrientTableId, 'nutrient_type_id -> nutrientTypeId, 'record_id -> record.nutrientTableRecordId)
}
if (deleteParams.nonEmpty) {
val deleteQuery = "DELETE FROM nutrient_table_records_nutrients WHERE nutrient_table_id={table_id} AND nutrient_table_record_id={record_id} AND nutrient_type_id={nutrient_type_id}"
BatchSql(deleteQuery, deleteParams.head, deleteParams.tail: _*).execute()
}
val updateParams = update.map {
record =>
Seq[NamedParameter]('table_id -> nutrientTableId, 'nutrient_type_id -> nutrientTypeId, 'record_id -> record.nutrientTableRecordId, 'units_per_100g -> record.newValue.get)
}
if (updateParams.nonEmpty) {
val nutrientUpsertQuery =
"""INSERT INTO nutrient_table_records_nutrients VALUES({record_id}, {table_id}, {nutrient_type_id}, {units_per_100g})
|ON CONFLICT ON CONSTRAINT nutrient_table_records_nutrients_pk DO UPDATE SET units_per_100g=EXCLUDED.units_per_100g""".stripMargin
BatchSql(nutrientUpsertQuery, updateParams.head, updateParams.tail: _*).execute()
}
Right(())
}
}
def updateNutrientTableRecords(records: Seq[NewNutrientTableRecord]): Either[UnexpectedDatabaseError, Unit] = tryWithConnection {
implicit conn =>
withTransaction {
if (records.nonEmpty) {
val deleteParams = records.map {
record =>
Seq[NamedParameter]('table_id -> record.nutrientTableId, 'record_id -> record.id)
}
BatchSql("DELETE FROM nutrient_table_records_nutrients WHERE nutrient_table_id={table_id} AND nutrient_table_record_id={record_id}", deleteParams.head, deleteParams.tail: _*).execute()
val insertParams = records.flatMap {
record =>
record.nutrients.map {
case (nutrientType, unitsPer100g) =>
Seq[NamedParameter]('record_id -> record.id, 'nutrient_table_id -> record.nutrientTableId, 'nutrient_type_id -> nutrientType, 'units_per_100g -> unitsPer100g)
}
}
BatchSql(nutrientsInsertQuery, insertParams.head, insertParams.tail: _*).execute()
}
Right(())
}
}
def updateNutrientTableRecordDescriptions(nutrients: Seq[NutrientTableRecord]): Either[UnexpectedDatabaseError, Unit] = tryWithConnection {
implicit conn =>
val namedParameters = nutrients.map(n =>
Seq[NamedParameter]('id -> n.id, 'nutrient_table_id -> n.nutrientTableId,
'english_description -> n.description, 'local_description -> n.localDescription))
val sqlQuery =
"""
|UPDATE nutrient_table_records
|SET english_description = {english_description}, local_description = {local_description}
|WHERE id = {id} AND nutrient_table_id = {nutrient_table_id};
""".stripMargin
BatchSql(sqlQuery, namedParameters.head, namedParameters.tail: _*).execute()
Right(())
}
def getNutrientTableRecordIds(nutrientTableId: String): Either[UnexpectedDatabaseError, Seq[String]] = tryWithConnection {
implicit conn =>
Right(SQL("SELECT id FROM nutrient_table_records WHERE nutrient_table_id={nutrient_table_id}")
.on('nutrient_table_id -> nutrientTableId)
.as(SqlParser.str("id").*))
}
}
|
digitalinteraction/intake24
|
FoodDataSQL/src/main/scala/uk/ac/ncl/openlab/intake24/foodsql/admin/NutrientTablesAdminImpl.scala
|
Scala
|
apache-2.0
| 11,503 |
package com.rasterfoundry.datamodel
import cats.implicits._
import io.circe._
sealed abstract class TaskType(val repr: String) {
override def toString = repr
}
object TaskType {
case object Label extends TaskType("LABEL")
case object Review extends TaskType("REVIEW")
def fromString(s: String): TaskType = s.toUpperCase match {
case "LABEL" => Label
case "REVIEW" => Review
}
implicit val taskTypeEncoder: Encoder[TaskType] =
Encoder.encodeString.contramap[TaskType](_.toString)
implicit val taskTypeDecoder: Decoder[TaskType] =
Decoder.decodeString.emap { str =>
Either.catchNonFatal(fromString(str)).leftMap(_ => "TaskType")
}
implicit val taskTypeKeyEncoder: KeyEncoder[TaskType] =
new KeyEncoder[TaskType] {
override def apply(taskType: TaskType): String = taskType.toString
}
}
|
raster-foundry/raster-foundry
|
app-backend/datamodel/src/main/scala/TaskType.scala
|
Scala
|
apache-2.0
| 847 |
package sbt.testing
/**
* A way to identify test classes and/or modules that should
* be discovered when the client performs discovery.
*
* Scala.js: Implementations may not rely on the identity of Fingerprints,
* since they are serialized between JS / JVM.
*/
trait Fingerprint
/**
* Indicates that classes or modules with a specific annotation, either on at least one top level
* method or on the class or module itself, should be discovered as test classes.
*/
trait AnnotatedFingerprint extends Fingerprint {
/**
* Indicates whether modules with the annotation should
* be considered during discovery, or just classes.
*
* <p>
* If a test framework allows both classes and modules, they should return two different
* fingerprints from <code>Framework.fingerprints</code>, one that returns <code>false</code> for
* <code>isModule</code> and another that returns <code>true</code>.
* </p>
*/
def isModule(): Boolean
/**
* The fully qualified name of the annotation that identifies classes or modules as test
* classes or modules to be discovered.
*/
def annotationName(): String
}
/**
* Indicates that classes (and possibly modules) that extend a particular superclass,
* or mix in a particular supertrait, should be discovered as test classes.
*/
trait SubclassFingerprint extends Fingerprint {
/**
* Indicates whether modules (singleton objects) that extend the superclass or
* supertrait should be considered during discovery, or just classes.
*
* <p>
* If modules are not allowed by the test framework, they should return <code>false</code> for
* <code>isModule</code>. Returning <code>false</code> will speed up discovery because
* classes for modules can be quickly bypassed.
* </p>
*/
def isModule(): Boolean
/**
* The name of the superclass or supertrait that identifies classes (and possibly modules) as test
* classes to be discovered.
*/
def superclassName(): String
/**
* Indicates whether discovered classes must have a no-arg constructor.
*
* <p>
* If this method returns <code>true</code>, the client should not discover any subclass of
* the given <code>superClassName</code> that does not declare a no-arg constructor, <em>i.e.</em>,
* a constructor that takes no arguments.
* </p>
*/
def requireNoArgConstructor(): Boolean
}
|
colinrgodsey/scala-js
|
test-interface/src/main/scala/sbt/testing/Fingerprints.scala
|
Scala
|
bsd-3-clause
| 2,380 |
def fmap[A, B](f: A => B)(ca: Const[C, A]): Const[C, B]
|
hmemcpy/milewski-ctfp-pdf
|
src/content/1.7/code/scala/snippet27.scala
|
Scala
|
gpl-3.0
| 55 |
/*
active-learning-scala: Active Learning library for Scala
Copyright (c) 2014 Davi Pereira dos Santos
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package al.strategies
import ml.Pattern
import ml.classifiers.Learner
import ml.models.Model
case class Margin(learner: Learner, pool: Seq[Pattern], debug: Boolean = false)
extends StrategyWithLearner with MarginMeasure {
override val toString = "Margin"
val abr = "Mar"
val id = 3
def next(current_model: Model, unlabeled: Seq[Pattern], labeled: Seq[Pattern]) = {
unlabeled minBy margin(current_model)
}
}
|
active-learning/active-learning-scala
|
src/main/scala/al/strategies/Margin.scala
|
Scala
|
gpl-2.0
| 1,179 |
import sbt._
import Keys._
object BuildSettings {
// Basic settings for our app
lazy val basicSettings = Seq[Setting[_]](
//format: OFF
organization := "org.scalario",
mainClass := Some("org.scalario.Boot"),
version := "1.1",
description := "a server for scalariform",
scalaVersion := "2.10.5",
scalacOptions := Seq("-deprecation", "-encoding", "utf8",
"-unchecked", "-feature", "-target:jvm-1.7"),
scalacOptions in Test := Seq("-Yrangepos"),
maxErrors := 5,
// http://www.scala-sbt.org/0.13.0/docs/Detailed-Topics/Forking.html
fork in run := true,
resolvers ++= Dependencies.resolutionRepos
)
//format: ON
import com.typesafe.sbt.SbtScalariform
import com.typesafe.sbt.SbtScalariform.ScalariformKeys
lazy val formatSettings = SbtScalariform.scalariformSettings ++ Seq(
ScalariformKeys.preferences in Compile := formattingPreferences,
ScalariformKeys.preferences in Test := formattingPreferences)
import scalariform.formatter.preferences._
def formattingPreferences =
FormattingPreferences()
.setPreference(RewriteArrowSymbols, true)
.setPreference(AlignParameters, true)
.setPreference(AlignSingleLineCaseStatements, true)
.setPreference(DoubleIndentClassDeclaration, true)
lazy val buildSettings = basicSettings ++ formatSettings
}
|
ferrlin/scalario
|
project/BuildSettings.scala
|
Scala
|
mit
| 1,482 |
package com.rarebooks.library
class CatalogSpec extends BaseSpec {
import Catalog._
import RareBooksProtocol._
"books" should {
"contain theEpicOfGilgamesh, phaedrus and theHistories" in {
books.values.toSet should === (Set[BookCard](theEpicOfGilgamesh, phaedrus, theHistories))
}
}
"findBookByIsbn" should {
"return theEpicOfGilgamesh" in {
findBookByIsbn(theEpicOfGilgamesh.isbn) should === (Some[List[BookCard]](List(theEpicOfGilgamesh)))
}
"return phaedrus" in {
findBookByIsbn(phaedrus.isbn) should === (Some[List[BookCard]](List(phaedrus)))
}
"return theHistories" in {
findBookByIsbn(theHistories.isbn) should === (Some[List[BookCard]](List(theHistories)))
}
"return None when isbn not found" in {
findBookByIsbn("1234567890") should === (None)
}
}
"findBookByAuthor" should {
"return theEpicOfGilgamesh" in {
findBookByAuthor(theEpicOfGilgamesh.author) should === (Some[List[BookCard]](List(theEpicOfGilgamesh)))
}
"return phaedrus" in {
findBookByAuthor(phaedrus.author) should === (Some[List[BookCard]](List(phaedrus)))
}
"return theHistories" in {
findBookByAuthor(theHistories.author) should === (Some[List[BookCard]](List(theHistories)))
}
"return None when author not found" in {
findBookByAuthor("Michael Crichton") should === (None)
}
}
"findBookByTitle" should {
"return theEpicOfGilgamesh" in {
findBookByTitle(theEpicOfGilgamesh.title) should === (Some[List[BookCard]](List(theEpicOfGilgamesh)))
}
"return phaedrus" in {
findBookByTitle(phaedrus.title) should === (Some[List[BookCard]](List(phaedrus)))
}
"return theHistories" in {
findBookByTitle(theHistories.title) should === (Some[List[BookCard]](List(theHistories)))
}
"return None when title not found" in {
findBookByTitle("Swiss Family Robinson") should === (None)
}
}
"findBookByTopic" should {
"for Gilgamesh, Persia and Royalty, return theEpicOfGilgamesh" in {
findBookByTopic(Set(Gilgamesh, Persia, Royalty)) should === (Some[List[BookCard]](List(theEpicOfGilgamesh)))
}
"for Philosophy, return phaedrus" in {
findBookByTopic(Set(Philosophy)) should === (Some[List[BookCard]](List(phaedrus)))
}
"for Greece and Philosophy, return phaedrus and theHistories" in {
findBookByTopic(Set(Greece, Philosophy)) should === (Some[List[BookCard]](List(phaedrus, theHistories)))
}
"for Africa, Asia, Greece and Tradition, return phaedrus and theHistories" in {
findBookByTopic(Set(Africa, Asia, Greece, Tradition)) should === (Some[List[BookCard]](List(phaedrus, theHistories)))
}
"for Tradition, return theHistories" in {
findBookByTopic(Set(Tradition)) should === (Some[List[BookCard]](List(theHistories)))
}
"return None when tag not found" in {
findBookByTopic(Set(Unknown)) should === (None)
}
}
}
|
ironfish/reactive-application-development-scala
|
chapter4_004_resilience/src/test/scala/com/rarebooks/library/CatalogSpec.scala
|
Scala
|
apache-2.0
| 2,969 |
object Test {
class A {
object foo {
}
def foo(x: String) = 1
}
val a = new A
a./* line: 7 */foo("")
}
|
ilinum/intellij-scala
|
testdata/resolve2/overloading/hardOverloadings/FunctionObject.scala
|
Scala
|
apache-2.0
| 126 |
package mr.merc.image
import scalafx.scene.image.Image
class LazyMImage private [image] (path:String, xOffset:Int, yOffset:Int, alpha:Float) extends MImage(xOffset, yOffset, alpha){
@volatile
lazy val image: Image = MImageCache.get(path)
def imagePath: Some[String] = Some(path)
override def changeAlpha(newAlpha:Float) = new LazyMImage(path, xOffset, yOffset, newAlpha)
}
|
RenualdMarch/merc
|
src/main/scala/mr/merc/image/LazyMImage.scala
|
Scala
|
gpl-3.0
| 386 |
/** Copyright 2009 Steve Jenson under the Apache 2.0 License */
package com.saladwithsteve.mailslot
import com.twitter.commons.Stats.Counter
import com.twitter.commons.Stats.Timing
import java.util.concurrent.atomic.AtomicInteger
object MailStats {
val bytesWritten = new Counter
val totalSessions = new AtomicInteger(0)
val closedSessions = new Counter
val sessionErrors = new Counter
val mailRouterLatency = new Timing
}
|
stevej/mailslot
|
src/main/scala/com/saladwithsteve/mailslot/MailStats.scala
|
Scala
|
apache-2.0
| 435 |
package de.sciss.fscape
package tests
import de.sciss.file._
import de.sciss.filecache.Limit
import de.sciss.fscape.Ops._
import de.sciss.fscape.lucre.Cache
import de.sciss.lucre.DoubleObj
import de.sciss.lucre.synth.InMemory
import de.sciss.proc.FScape.Output
import de.sciss.proc.{FScape, GenView, Universe}
import scala.concurrent.stm.Ref
import scala.util.{Failure, Success}
object DoubleOutputTest extends App {
type S = InMemory
type T = InMemory.Txn
implicit val cursor: S = InMemory()
FScape.init()
GenView.addFactory(FScape.genViewFactory())
val folder = userHome / "Documents" / "temp" / "fscape_test" // File.createTemp(directory = true)
folder.mkdir()
Cache.init(folder = folder, capacity = Limit())
cursor.step { implicit tx =>
val f = FScape[T]()
val g = Graph {
import graph._
import lucre.graph._
1.poll(0, label = "rendering")
val value = WhiteNoise(100).take(100000000).last
MkDouble("out-1", value)
MkDouble("out-2", value + 1)
}
val out1 = f.outputs.add("out-1", DoubleObj)
val out2 = f.outputs.add("out-2", DoubleObj)
f.graph() = g
val count = Ref(0)
implicit val universe: Universe[T] = Universe.dummy
def mkView(out: Output[T], idx: Int): GenView[T] = {
val view = GenView(out)
import de.sciss.lucre.Txn.peer
view.reactNow { implicit tx => upd =>
if (upd.isComplete) {
view.value.foreach { value =>
value match {
case Success(v) =>
println(s"Value ${idx + 1} is now $v")
case Failure(ex) =>
println(s"Value ${idx + 1} failed:")
ex.printStackTrace()
}
if (count.transformAndGet(_ + 1) == 2) tx.afterCommit(sys.exit())
}
}
}
view
}
/* val view1 = */ mkView(out1, idx = 0)
/* val view2 = */ mkView(out2, idx = 1)
new Thread {
override def run(): Unit = Thread.sleep(Long.MaxValue)
start()
}
}
}
|
Sciss/FScape-next
|
lucre/jvm/src/test/scala/de/sciss/fscape/tests/DoubleOutputTest.scala
|
Scala
|
agpl-3.0
| 2,063 |
package models
import org.opencompare.api.java.PCMContainer
/**
* Created by gbecan on 12/12/14.
*/
class DatabasePCM(val id : Option[String], var pcmContainer : Option[PCMContainer]) {
}
|
OpenCompare/OpenCompare
|
org.opencompare/play-app/app/models/DatabasePCM.scala
|
Scala
|
apache-2.0
| 193 |
/*
* Copyright 2015 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.pattern.timeoutpolicy
import org.scalatest.{FlatSpecLike, Matchers}
class TimeoutRuleConversionsSpec extends FlatSpecLike with Matchers{
"Implicit Conversions" should "work" in {
(1 sigma).asInstanceOf[SigmaTimeoutRule].unit should be(1.0)
(1 σ).asInstanceOf[SigmaTimeoutRule].unit should be(1.0)
// around 2.99
((99.7 percent).asInstanceOf[SigmaTimeoutRule].unit * 10).round should be(30)
((99.7 `%ile`).asInstanceOf[SigmaTimeoutRule].unit * 10).round should be(30)
((99.7 percentile).asInstanceOf[SigmaTimeoutRule].unit * 10).round should be(30)
fixedRule should be(FixedTimeoutRule)
}
}
|
keshin/squbs
|
squbs-pattern/src/test/scala/org/squbs/pattern/timeoutpolicy/TimeoutRuleConversionsSpec.scala
|
Scala
|
apache-2.0
| 1,244 |
/**
* Copyright (C) 2015 DANS - Data Archiving and Networked Services ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.knaw.dans.easy.pid
import java.io.ByteArrayOutputStream
import better.files.File
import nl.knaw.dans.easy.pid.fixture.{ ConfigurationSupportFixture, CustomMatchers, TestSupportFixture }
class ReadmeSpec extends TestSupportFixture with CustomMatchers with ConfigurationSupportFixture {
// private val configuration = Configuration(Paths.get("src/main/assembly/dist"))
private val clo = new CommandLineOptions(Array[String](), configuration) {
// avoids System.exit() in case of invalid arguments or "--help"
override def verify(): Unit = {
verified = true
}
}
private val helpInfo = {
val mockedStdOut = new ByteArrayOutputStream()
Console.withOut(mockedStdOut) {
clo.printHelp()
}
mockedStdOut.toString
}
"options in help info" should "be part of README.md" in {
val lineSeparators = s"(${ System.lineSeparator() })+"
val options = helpInfo.split(s"${ lineSeparators }Options:$lineSeparators")(1)
options.trim.length shouldNot be(0)
File("docs/index.md") should containTrimmed(options)
}
"synopsis in help info" should "be part of README.md" in {
File("docs/index.md") should containTrimmed(clo.synopsis)
}
"description line(s) in help info" should "be part of README.md and pom.xml" in {
File("docs/index.md") should containTrimmed(clo.description)
File("README.md") should containTrimmed(clo.description)
File("pom.xml") should containTrimmed(clo.description)
}
"synopsis" should "list all subcommands" in {
clo.subcommands.map(_.printedName)
.foreach(clo.synopsis should include(_))
}
}
|
DANS-KNAW/easy-pid-generator
|
src/test/scala/nl/knaw/dans/easy/pid/ReadmeSpec.scala
|
Scala
|
apache-2.0
| 2,268 |
package service
import api.{LoginRequest, LoginResponse}
import org.slf4j.LoggerFactory
import play.api.libs.json.{JsError, JsSuccess, Json}
import utils.TokenManager
import scala.concurrent.Future
/**
* Created by rois on 10/02/2017.
*/
object LoginService extends Service[LoginRequest, LoginResponse] {
val logger = LoggerFactory.getLogger(getClass)
override def handle(request: LoginRequest): Future[LoginResponse] = {
isNickNameAlreadyExists(request.nickName) match {
case true => Future.successful(LoginResponse(false,None))
case false =>
val token = TokenManager.createToken(request.nickName)
//TODO: store in redis (key = nickName, value = token). add nickName to list of users
Future.successful(LoginResponse(true,Some(token)))
}
}
override def fromJson(request: String): Option[LoginRequest] =
LoginRequest.fmtJson.reads(Json.parse(request)) match {
case JsSuccess(loginRequest, _) =>
Some(loginRequest)
case JsError(e) =>
logger.info(s"error parsing LoginRequest : $e")
None
}
override def toJson(response: LoginResponse): String =
LoginResponse.fmtJson.writes(response).toString()
def isNickNameAlreadyExists(nickName: String) : Boolean = {
//TODO: check in redis
false
}
}
|
roischmidt/OOOServer
|
src/main/scala-2.12/service/LoginService.scala
|
Scala
|
gpl-3.0
| 1,464 |
/* SCALA Implementation of Selection Sort.
Though the corner cases are covered. But still if you find any additions to it,
please do add a test for it.
Any improvements/tests in the code is highly appreciated.
*/
class ShellSort {
def incSeq(len: Int) = new Iterator[Int] {
private[this] var x: Int = len / 2
def hasNext = x > 0
def next() = {
x =
if (x == 2) 1
else x * 5 / 11
x
}
}
def InsertionSort(a: Array[Int], inc: Int) = {
for (i <- inc until a.length; temp = a(i)) {
var j = i
while (j >= inc && a(j - inc) > temp) {
a(j) = a(j - inc)
j = j - inc
}
a(j) = temp
}
}
def shellSort(arrayToBeSorted:Array[Int]): Array[Int] = {
for (inc <- incSeq(arrayToBeSorted.length)) InsertionSort(arrayToBeSorted, inc)
arrayToBeSorted
}
}
|
aayushKumarJarvis/Code-Snippets-JAVA-SCALA
|
src/main/scala/ShellSort.scala
|
Scala
|
mit
| 862 |
/**
* Copyright 2014 Marco Vermeulen
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.sdkman.release
import javax.validation.ValidationException
import org.springframework.dao.DataAccessException
import org.springframework.web.bind.annotation._
@ControllerAdvice
class ExceptionHandlerAdvice {
@ExceptionHandler
def handle(e: VersionNotFoundException) = BadRequest(e.getMessage)
@ExceptionHandler
def handle(e: CandidateNotFoundException) = BadRequest(e.getMessage)
@ExceptionHandler
def handle(e: DataAccessException) = ServiceUnavailable(e.getMessage)
@ExceptionHandler
def handle(e: ValidationException) = BadRequest(e.getMessage)
@ExceptionHandler
def handle(e: DuplicateVersionException) = Conflict(e.getMessage)
@ExceptionHandler
def handle(e: AuthorisationDeniedException) = Forbidden(e.getMessage)
}
|
sdkman/sdkman-release-api
|
src/main/scala/io/sdkman/release/ExceptionHandlerAdvice.scala
|
Scala
|
apache-2.0
| 1,362 |
package com.fsist.stream
import com.fsist.util.concurrent.{Func, AsyncFunc}
import org.scalatest.FunSuite
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import scala.collection.immutable
import scala.collection.immutable.BitSet
import scala.concurrent.Promise
import scala.util.Random
import scala.concurrent.duration._
class ConnectorTest extends FunSuite with StreamTester {
test("split, constant choice of single output") {
val range = 1 to 100
val count = 3
val splitter = Source.from(range).split(count, (i: Int) => BitSet(0))
val sinks = for (output <- splitter.outputs) yield {
output.collect[List]().single
}
val stream = sinks(0).build()
stream.completion.futureValue
assert(stream(sinks(0)).result.futureValue == range, "All data went to sink 0")
assert(stream(sinks(1)).result.futureValue == List(), "All data went to sink 0")
assert(stream(sinks(2)).result.futureValue == List(), "All data went to sink 0")
}
test("split, constant choice of two outputs") {
val range = 1 to 100
val count = 3
val splitter = Source.from(range).split(count, (i: Int) => BitSet(0, 2))
val sinks = for (output <- splitter.outputs) yield {
output.collect[List]().single
}
val stream = sinks(0).build()
stream.completion.futureValue
assert(stream(sinks(0)).result.futureValue == range, "All data went to sinks 0 and 2")
assert(stream(sinks(1)).result.futureValue == List(), "All data went to sinks 0 and 2")
assert(stream(sinks(2)).result.futureValue == range, "All data went to sinks 0 and 2")
}
test("split, random choice of single output") {
val range = 1 to 100
val count = 3
val random = new Random()
val splitter = Source.from(range).split(count, (i: Int) => BitSet(random.nextInt(3)))
val sinks = for (output <- splitter.outputs) yield {
output.collect[List]().single
}
val stream = sinks(0).build()
stream.completion.futureValue
val allResults =
(for (sink <- sinks)
yield stream(sink).result.futureValue).flatten
assert(allResults.sorted == range, "Each data element went to a single sink")
}
test("round robin") {
val range = 1 to 100
val count = 3
val splitter = Source.from(range).roundRobin(count)
val sinks = for (output <- splitter.outputs) yield {
output.collect[List]().single
}
val stream = sinks(0).build()
stream.completion.futureValue
assert(stream(sinks(0)).result.futureValue == range.by(3))
assert(stream(sinks(1)).result.futureValue == range.drop(1).by(3))
assert(stream(sinks(2)).result.futureValue == range.drop(2).by(3))
}
test("tee") {
val range = 1 to 100
val count = 3
val splitter = Source.from(range).tee(3)
val sinks = for (output <- splitter.outputs) yield {
output.collect[List]().single
}
val stream = sinks(0).build()
stream.completion.futureValue
assert(stream(sinks(0)).result.futureValue == range, "All items were passed to sink 0")
assert(stream(sinks(1)).result.futureValue == range, "All items were passed to sink 1")
assert(stream(sinks(2)).result.futureValue == range, "All items were passed to sink 2")
}
test("merge") {
val range = 1 to 100
val count = 3
val sources = Vector.fill(count)(Source.from(range))
val merger = Connector.merge[Int](count)
merger.connectInputs(sources)
val result = merger.output.toList().singleResult().futureValue
val expected = Vector.fill(count)(range).flatten
assert(result.sorted == expected.sorted, "All elements were merged")
}
test("scatter") {
val range = 1 to 100
val count = 3
val scatterer = Source.from(range).scatter(3)
val sinks = for (output <- scatterer.outputs) yield {
output.toList.single
}
val stream = sinks(0).build()
stream.completion.futureValue
val allResults =
(for (sink <- sinks)
yield stream(sink).result.futureValue).flatten
assert(allResults.sorted == range, "Each data element went to a single sink")
}
test("scatter: ensure parallelism") {
// Need quick timeouts
implicit def patienceConfig = PatienceConfig(250.millis)
val range = 1 to 100
val count = 3
val scatterer = Source.from(range).scatter(3)
val promises = Vector.fill(count)(Promise[Unit]())
val sinks = for (promise <- promises) yield Sink.foreachAsync[Int, Unit](x => promise.future)
scatterer.connectOutputs(sinks)
val stream = sinks(0).build()
awaitTimeout(stream.completion, "All sinks are blocked; stream should not complete")
promises(0).success(())
for (sink <- sinks) {
awaitTimeout(stream(sink).completion, "Sink will not complete as long as the stream doesn't")
}
awaitTimeout(stream.completion, "All sinks are blocked; stream should not complete")
promises(1).success(())
promises(2).success(())
stream.completion.futureValue // Entire stream should now complete
}
test("Scatter-gather") {
val range = 1 to 100
val count = 3
val scatterer = Source.from(range).scatter(3)
val gatherer = Connector.merge[Int](count)
gatherer.connectInputs(scatterer.outputs)
val result = gatherer.output.toList().singleResult().futureValue
assert(result.sorted == range, "Scatter-gather")
}
test("Split-gather") {
val range = 1 to 100
val count = 3
val splitter = Source.from(range).roundRobin(3)
val gatherer = Connector.merge[Int](count)
gatherer.connectInputs(splitter.outputs)
val result = gatherer.output.toList().singleResult().futureValue
assert(result.sorted == range, "Scatter-gather")
}
test("Splitter completion promise is fulfilled") {
val source = Source.of(1, 2, 3)
val connector = source.roundRobin(2)
for (output <- connector.outputs)
output.foreach(Func.nopLiteral)
val stream = source.builder.run()
stream(connector).completion.futureValue(Timeout(1.second))
}
test("Scatterer completion promise is fulfilled") {
val source = Source.of(1, 2, 3)
val connector = source.scatter(2)
for (output <- connector.outputs)
output.foreach(Func.nopLiteral)
val stream = source.builder.run()
stream(connector).completion.futureValue(Timeout(1.second))
}
test("Merger completion promise is fulfilled") {
val merger = Merger[Int](2)
for (input <- merger.inputs)
Source.of(1, 2, 3).to(input)
val stream = merger.output.foreach(Func.nopLiteral).build()
stream(merger).completion.futureValue(Timeout(1.second))
}
test("Concatenator completion promise is fulfilled") {
val concat = Connector.concatenate[Int](2)
for (input <- concat.inputs) {
Source.from(1 to 10).to(input)
}
val stream = concat.output.discard().build()
stream.completion.futureValue
assert(stream(concat).completion.isCompleted)
}
}
|
fsist/future-streams
|
src/test/scala/com/fsist/stream/ConnectorTest.scala
|
Scala
|
apache-2.0
| 6,951 |
package org.dsa.iot.rx.core
import org.dsa.iot.rx.RxMergerN
import rx.lang.scala.Observable
/**
* Combines multiple Observables into a single Observable of lists, emitting a new list each time any
* of the sources emits a new item.
*
* @see <a href="http://reactivex.io/documentation/operators/combinelatest.html">ReactiveX operators documentation: CombineLatest</a>
*/
class CombineLatest[T] extends RxMergerN[T, Seq[T]] {
protected def compute = sources.combinedIns
}
/**
* Factory for [[CombineLatest]] instances.
*/
object CombineLatest {
/**
* Creates a new CombineLatest instance.
*/
def apply[T]: CombineLatest[T] = new CombineLatest[T]
}
|
IOT-DSA/dslink-scala-ignition
|
src/main/scala/org/dsa/iot/rx/core/CombineLatest.scala
|
Scala
|
apache-2.0
| 670 |
/* Copyright 2017-18, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.tensorflow.api.learn
import org.platanios.tensorflow.api.core.Graph
import org.platanios.tensorflow.api.learn.layers.{Input, Layer}
import org.platanios.tensorflow.api.ops.{Math, Op, Output, OutputLike}
import org.platanios.tensorflow.api.ops.training.optimizers.Optimizer
import org.platanios.tensorflow.api.ops.io.data.Iterator
import org.platanios.tensorflow.api.ops.metrics.Metric
import org.platanios.tensorflow.api.ops.variables.Variable
import org.platanios.tensorflow.api.types.FLOAT32
/**
* @author Emmanouil Antonios Platanios
*/
trait Model {
protected val colocateGradientsWithOps: Boolean = false
}
trait InferenceModel[IT, IO, ID, IS, I] extends Model {
def buildInferOps(): Model.InferOps[IT, IO, ID, IS, I]
}
trait TrainableModel[IT, IO, ID, IS, I, TT, TO, TD, TS, EI] extends InferenceModel[IT, IO, ID, IS, I] {
def buildTrainOps(): Model.TrainOps[IT, IO, ID, IS, I, TT, TO, TD, TS]
def buildEvaluateOps(metrics: Seq[Metric[EI, Output]]): Model.EvaluateOps[TT, TO, TD, TS, I]
}
trait SupervisedTrainableModel[IT, IO, ID, IS, I, TT, TO, TD, TS, T]
extends TrainableModel[IT, IO, ID, IS, I, (IT, TT), (IO, TO), (ID, TD), (IS, TS), (I, T)] {
def buildTrainOps(): Model.SupervisedTrainOps[IT, IO, ID, IS, I, TT, TO, TD, TS, T]
def buildEvaluateOps(
metrics: Seq[Metric[(I, T), Output]]
): Model.EvaluateOps[(IT, TT), (IO, TO), (ID, TD), (IS, TS), I]
}
trait UnsupervisedTrainableModel[IT, IO, ID, IS, I]
extends TrainableModel[IT, IO, ID, IS, I, IT, IO, ID, IS, I] {
def buildTrainOps(): Model.UnsupervisedTrainOps[IT, IO, ID, IS, I]
def buildEvaluateOps(metrics: Seq[Metric[I, Output]]): Model.EvaluateOps[IT, IO, ID, IS, I]
}
object Model {
def unsupervised[IT, IO, IDA, ID, IS, I](
input: Input[IT, IO, IDA, ID, IS],
layer: Layer[IO, I],
loss: Layer[(IO, I), Output],
optimizer: Optimizer,
clipGradients: ClipGradients = NoClipGradients,
colocateGradientsWithOps: Boolean = false
): UnsupervisedTrainableModel[IT, IO, ID, IS, I] = {
new SimpleUnsupervisedTrainableModel(input, layer, loss, optimizer, clipGradients, colocateGradientsWithOps)
}
def supervised[IT, IO, IDA, ID, IS, I, TT, TO, TDA, TD, TS, T](
input: Input[IT, IO, IDA, ID, IS],
layer: Layer[IO, I],
trainInput: Input[TT, TO, TDA, TD, TS],
trainInputLayer: Layer[TO, T],
loss: Layer[(I, T), Output],
optimizer: Optimizer
): SupervisedTrainableModel[IT, IO, ID, IS, I, TT, TO, TD, TS, T] = {
new SimpleSupervisedTrainableModel(input, layer, trainInput, trainInputLayer, loss, optimizer)
}
def supervised[IT, IO, IDA, ID, IS, I, TT, TO, TDA, TD, TS, T](
input: Input[IT, IO, IDA, ID, IS],
layer: Layer[IO, I],
trainInput: Input[TT, TO, TDA, TD, TS],
trainInputLayer: Layer[TO, T],
loss: Layer[(I, T), Output],
optimizer: Optimizer,
clipGradients: ClipGradients
): SupervisedTrainableModel[IT, IO, ID, IS, I, TT, TO, TD, TS, T] = {
new SimpleSupervisedTrainableModel(input, layer, trainInput, trainInputLayer, loss, optimizer, clipGradients)
}
def supervised[IT, IO, IDA, ID, IS, I, TT, TO, TDA, TD, TS](
input: Input[IT, IO, IDA, ID, IS],
layer: Layer[IO, I],
trainInput: Input[TT, TO, TDA, TD, TS],
loss: Layer[(I, TO), Output],
optimizer: Optimizer
): SupervisedTrainableModel[IT, IO, ID, IS, I, TT, TO, TD, TS, TO] = {
new SimpleSupervisedTrainableModel(
input, layer, trainInput, layers.Identity[TO]("TrainInputLayer"), loss, optimizer)
}
def supervised[IT, IO, IDA, ID, IS, I, TT, TO, TDA, TD, TS](
input: Input[IT, IO, IDA, ID, IS],
layer: Layer[IO, I],
trainInput: Input[TT, TO, TDA, TD, TS],
loss: Layer[(I, TO), Output],
optimizer: Optimizer,
clipGradients: ClipGradients
): SupervisedTrainableModel[IT, IO, ID, IS, I, TT, TO, TD, TS, TO] = {
new SimpleSupervisedTrainableModel(
input, layer, trainInput, layers.Identity[TO]("TrainInputLayer"), loss, optimizer, clipGradients)
}
def supervised[IT, IO, IDA, ID, IS, I, TT, TO, TDA, TD, TS, T](
input: Input[IT, IO, IDA, ID, IS],
layer: Layer[IO, I],
trainLayer: Layer[(IO, TO), I],
trainInput: Input[TT, TO, TDA, TD, TS],
trainInputLayer: Layer[TO, T],
loss: Layer[(I, T), Output],
optimizer: Optimizer
): SupervisedConditionalTrainableModel[IT, IO, ID, IS, I, TT, TO, TD, TS, T] = {
new SupervisedConditionalTrainableModel(
input, layer, trainLayer, trainInput, trainInputLayer, loss, optimizer)
}
def supervised[IT, IO, IDA, ID, IS, I, TT, TO, TDA, TD, TS, T](
input: Input[IT, IO, IDA, ID, IS],
layer: Layer[IO, I],
trainLayer: Layer[(IO, TO), I],
trainInput: Input[TT, TO, TDA, TD, TS],
trainInputLayer: Layer[TO, T],
loss: Layer[(I, T), Output],
optimizer: Optimizer,
clipGradients: ClipGradients
): SupervisedConditionalTrainableModel[IT, IO, ID, IS, I, TT, TO, TD, TS, T] = {
new SupervisedConditionalTrainableModel(
input, layer, trainLayer, trainInput, trainInputLayer, loss, optimizer, clipGradients)
}
def supervised[IT, IO, IDA, ID, IS, I, TT, TO, TDA, TD, TS](
input: Input[IT, IO, IDA, ID, IS],
layer: Layer[IO, I],
trainLayer: Layer[(IO, TO), I],
trainInput: Input[TT, TO, TDA, TD, TS],
loss: Layer[(I, TO), Output],
optimizer: Optimizer
): SupervisedConditionalTrainableModel[IT, IO, ID, IS, I, TT, TO, TD, TS, TO] = {
new SupervisedConditionalTrainableModel(
input, layer, trainLayer, trainInput, layers.Identity[TO]("TrainInputLayer"), loss, optimizer)
}
def supervised[IT, IO, IDA, ID, IS, I, TT, TO, TDA, TD, TS](
input: Input[IT, IO, IDA, ID, IS],
layer: Layer[IO, I],
trainLayer: Layer[(IO, TO), I],
trainInput: Input[TT, TO, TDA, TD, TS],
loss: Layer[(I, TO), Output],
optimizer: Optimizer,
clipGradients: ClipGradients
): SupervisedConditionalTrainableModel[IT, IO, ID, IS, I, TT, TO, TD, TS, TO] = {
new SupervisedConditionalTrainableModel(
input, layer, trainLayer, trainInput, layers.Identity[TO]("TrainInputLayer"), loss, optimizer,
clipGradients)
}
def supervised[IT, IO, IDA, ID, IS, I, TT, TO, TDA, TD, TS, T](
input: Input[IT, IO, IDA, ID, IS],
layer: Layer[IO, I],
trainInput: Input[TT, TO, TDA, TD, TS],
trainInputLayer: Layer[TO, T],
loss: Layer[(I, T), Output],
optimizer: Optimizer,
colocateGradientsWithOps: Boolean
): SupervisedTrainableModel[IT, IO, ID, IS, I, TT, TO, TD, TS, T] = {
new SimpleSupervisedTrainableModel(
input, layer, trainInput, trainInputLayer, loss, optimizer, colocateGradientsWithOps = colocateGradientsWithOps)
}
def supervised[IT, IO, IDA, ID, IS, I, TT, TO, TDA, TD, TS, T](
input: Input[IT, IO, IDA, ID, IS],
layer: Layer[IO, I],
trainInput: Input[TT, TO, TDA, TD, TS],
trainInputLayer: Layer[TO, T],
loss: Layer[(I, T), Output],
optimizer: Optimizer,
clipGradients: ClipGradients,
colocateGradientsWithOps: Boolean
): SupervisedTrainableModel[IT, IO, ID, IS, I, TT, TO, TD, TS, T] = {
new SimpleSupervisedTrainableModel(
input, layer, trainInput, trainInputLayer, loss, optimizer, clipGradients,
colocateGradientsWithOps = colocateGradientsWithOps)
}
def supervised[IT, IO, IDA, ID, IS, I, TT, TO, TDA, TD, TS](
input: Input[IT, IO, IDA, ID, IS],
layer: Layer[IO, I],
trainInput: Input[TT, TO, TDA, TD, TS],
loss: Layer[(I, TO), Output],
optimizer: Optimizer,
colocateGradientsWithOps: Boolean
): SupervisedTrainableModel[IT, IO, ID, IS, I, TT, TO, TD, TS, TO] = {
new SimpleSupervisedTrainableModel(
input, layer, trainInput, layers.Identity[TO]("TrainInputLayer"), loss, optimizer,
colocateGradientsWithOps = colocateGradientsWithOps)
}
def supervised[IT, IO, IDA, ID, IS, I, TT, TO, TDA, TD, TS](
input: Input[IT, IO, IDA, ID, IS],
layer: Layer[IO, I],
trainInput: Input[TT, TO, TDA, TD, TS],
loss: Layer[(I, TO), Output],
optimizer: Optimizer,
clipGradients: ClipGradients,
colocateGradientsWithOps: Boolean
): SupervisedTrainableModel[IT, IO, ID, IS, I, TT, TO, TD, TS, TO] = {
new SimpleSupervisedTrainableModel(
input, layer, trainInput, layers.Identity[TO]("TrainInputLayer"), loss, optimizer, clipGradients,
colocateGradientsWithOps = colocateGradientsWithOps)
}
def supervised[IT, IO, IDA, ID, IS, I, TT, TO, TDA, TD, TS, T](
input: Input[IT, IO, IDA, ID, IS],
layer: Layer[IO, I],
trainLayer: Layer[(IO, TO), I],
trainInput: Input[TT, TO, TDA, TD, TS],
trainInputLayer: Layer[TO, T],
loss: Layer[(I, T), Output],
optimizer: Optimizer,
colocateGradientsWithOps: Boolean
): SupervisedConditionalTrainableModel[IT, IO, ID, IS, I, TT, TO, TD, TS, T] = {
new SupervisedConditionalTrainableModel(
input, layer, trainLayer, trainInput, trainInputLayer, loss, optimizer,
colocateGradientsWithOps = colocateGradientsWithOps)
}
def supervised[IT, IO, IDA, ID, IS, I, TT, TO, TDA, TD, TS, T](
input: Input[IT, IO, IDA, ID, IS],
layer: Layer[IO, I],
trainLayer: Layer[(IO, TO), I],
trainInput: Input[TT, TO, TDA, TD, TS],
trainInputLayer: Layer[TO, T],
loss: Layer[(I, T), Output],
optimizer: Optimizer,
clipGradients: ClipGradients,
colocateGradientsWithOps: Boolean
): SupervisedConditionalTrainableModel[IT, IO, ID, IS, I, TT, TO, TD, TS, T] = {
new SupervisedConditionalTrainableModel(
input, layer, trainLayer, trainInput, trainInputLayer, loss, optimizer, clipGradients,
colocateGradientsWithOps = colocateGradientsWithOps)
}
def supervised[IT, IO, IDA, ID, IS, I, TT, TO, TDA, TD, TS](
input: Input[IT, IO, IDA, ID, IS],
layer: Layer[IO, I],
trainLayer: Layer[(IO, TO), I],
trainInput: Input[TT, TO, TDA, TD, TS],
loss: Layer[(I, TO), Output],
optimizer: Optimizer,
colocateGradientsWithOps: Boolean
): SupervisedConditionalTrainableModel[IT, IO, ID, IS, I, TT, TO, TD, TS, TO] = {
new SupervisedConditionalTrainableModel(
input, layer, trainLayer, trainInput, layers.Identity[TO]("TrainInputLayer"), loss, optimizer,
colocateGradientsWithOps = colocateGradientsWithOps)
}
def supervised[IT, IO, IDA, ID, IS, I, TT, TO, TDA, TD, TS](
input: Input[IT, IO, IDA, ID, IS],
layer: Layer[IO, I],
trainLayer: Layer[(IO, TO), I],
trainInput: Input[TT, TO, TDA, TD, TS],
loss: Layer[(I, TO), Output],
optimizer: Optimizer,
clipGradients: ClipGradients,
colocateGradientsWithOps: Boolean
): SupervisedConditionalTrainableModel[IT, IO, ID, IS, I, TT, TO, TD, TS, TO] = {
new SupervisedConditionalTrainableModel(
input, layer, trainLayer, trainInput, layers.Identity[TO]("TrainInputLayer"), loss, optimizer,
clipGradients, colocateGradientsWithOps)
}
case class InferOps[IT, IO, ID, IS, I](inputIterator: Iterator[IT, IO, ID, IS], input: IO, output: I)
private[learn] class TrainOps[IT, IO, ID, IS, I, TT, TO, TD, TS](
val inputIterator: Iterator[TT, TO, TD, TS],
val input: TO,
val output: I,
val loss: Output,
val gradientsAndVariables: Seq[(OutputLike, Variable)],
val trainOp: Op)
case class UnsupervisedTrainOps[IT, IO, ID, IS, I](
override val inputIterator: Iterator[IT, IO, ID, IS],
override val input: IO,
override val output: I,
override val loss: Output,
override val gradientsAndVariables: Seq[(OutputLike, Variable)],
override val trainOp: Op
) extends TrainOps[IT, IO, ID, IS, I, IT, IO, ID, IS](
inputIterator, input, output, loss, gradientsAndVariables, trainOp)
case class SupervisedTrainOps[IT, IO, ID, IS, I, TT, TO, TD, TS, T](
override val inputIterator: Iterator[(IT, TT), (IO, TO), (ID, TD), (IS, TS)],
override val input: (IO, TO),
override val output: I,
trainOutput: T,
override val loss: Output,
override val gradientsAndVariables: Seq[(OutputLike, Variable)],
override val trainOp: Op
) extends TrainOps[IT, IO, ID, IS, I, (IT, TT), (IO, TO), (ID, TD), (IS, TS)](
inputIterator, input, output, loss, gradientsAndVariables, trainOp)
object SupervisedTrainOps {
def apply[IT, IO, ID, IS, I](
inputIterator: Iterator[(IT, IT), (IO, IO), (ID, ID), (IS, IS)],
input: (IO, IO),
output: I,
loss: Output,
gradientsAndVariables: Seq[(OutputLike, Variable)],
trainOp: Op
): SupervisedTrainOps[IT, IO, ID, IS, I, IT, IO, ID, IS, I] = {
SupervisedTrainOps(inputIterator, input, output, output, loss, gradientsAndVariables, trainOp)
}
}
case class EvaluateOps[IT, IO, ID, IS, I](
inputIterator: Iterator[IT, IO, ID, IS],
input: IO,
output: I,
metricValues: Seq[Output],
metricUpdates: Seq[Output],
metricResets: Seq[Op])
}
private[learn] class SimpleInferenceModel[IT, IO, ID, IS, I] private[learn](
val input: Input[IT, IO, _, ID, IS],
val layer: Layer[IO, I]
) extends InferenceModel[IT, IO, ID, IS, I] {
override def buildInferOps(): Model.InferOps[IT, IO, ID, IS, I] = {
implicit val mode: Mode = INFERENCE
val inputIterator = input()
val inputIteratorNext = inputIterator.next()
val layerOutput = layer(inputIteratorNext)
Model.InferOps(inputIterator, inputIteratorNext, layerOutput)
}
}
private[learn] class SimpleUnsupervisedTrainableModel[IT, IO, ID, IS, I] private[learn](
override val input: Input[IT, IO, _, ID, IS],
override val layer: Layer[IO, I],
val loss: Layer[(IO, I), Output],
val optimizer: Optimizer,
val clipGradients: ClipGradients = NoClipGradients,
override protected val colocateGradientsWithOps: Boolean = false
) extends SimpleInferenceModel[IT, IO, ID, IS, I](input, layer)
with UnsupervisedTrainableModel[IT, IO, ID, IS, I] {
// TODO: [LEARN] Add support for trainable models with only the loss function gradient available.
override def buildTrainOps(): Model.UnsupervisedTrainOps[IT, IO, ID, IS, I] = {
implicit val mode: Mode = TRAINING
val inputIterator = input()
val inputIteratorNext = inputIterator.next()
val layerOutput = layer(inputIteratorNext)
// TODO: [LEARN] Remove this cast.
val lossOutput = Math.cast(loss((inputIteratorNext, layerOutput)), FLOAT32, name = "LossCast")
val iteration = Counter.getOrCreate(Graph.Keys.GLOBAL_STEP, local = false)
val gradientsAndVariables = optimizer.computeGradients(
lossOutput, colocateGradientsWithOps = colocateGradientsWithOps)
val clippedGradientsAndVariables = clipGradients(gradientsAndVariables)
val trainOp = optimizer.applyGradients(clippedGradientsAndVariables, Some(iteration))
Model.UnsupervisedTrainOps(
inputIterator, inputIteratorNext, layerOutput, lossOutput, gradientsAndVariables, trainOp)
}
override def buildEvaluateOps(metrics: Seq[Metric[I, Output]]): Model.EvaluateOps[IT, IO, ID, IS, I] = {
implicit val mode: Mode = EVALUATION
val inputIterator = input()
val inputIteratorNext = inputIterator.next()
val layerOutput = layer(inputIteratorNext)
val streamingInstances = metrics.map(_.streaming(layerOutput))
Model.EvaluateOps(
inputIterator, inputIteratorNext, layerOutput,
streamingInstances.map(_.value), streamingInstances.map(_.update), streamingInstances.map(_.reset))
}
}
private[learn] class SimpleSupervisedTrainableModel[IT, IO, ID, IS, I, TT, TO, TD, TS, T] private[learn](
override val input: Input[IT, IO, _, ID, IS],
override val layer: Layer[IO, I],
val trainInput: Input[TT, TO, _, TD, TS],
val trainInputLayer: Layer[TO, T],
val loss: Layer[(I, T), Output],
val optimizer: Optimizer,
val clipGradients: ClipGradients = NoClipGradients,
override protected val colocateGradientsWithOps: Boolean = false
) extends SimpleInferenceModel[IT, IO, ID, IS, I](input, layer)
with SupervisedTrainableModel[IT, IO, ID, IS, I, TT, TO, TD, TS, T] {
// TODO: [LEARN] Add support for trainable models with only the loss function gradient available.
override def buildTrainOps(): Model.SupervisedTrainOps[IT, IO, ID, IS, I, TT, TO, TD, TS, T] = {
implicit val mode: Mode = TRAINING
val inputIterator = input.zip(trainInput).apply()
val inputIteratorNext = inputIterator.next()
val layerOutput = layer(inputIteratorNext._1)
val trainLayerOutput = trainInputLayer(inputIteratorNext._2)
// TODO: [LEARN] Remove this cast.
val lossOutput = Math.cast(
loss((layerOutput, trainLayerOutput)), FLOAT32, name = "LossCast")
val iteration = Counter.getOrCreate(Graph.Keys.GLOBAL_STEP, local = false)
val gradientsAndVariables = optimizer.computeGradients(
lossOutput, colocateGradientsWithOps = colocateGradientsWithOps)
val clippedGradientsAndVariables = clipGradients(gradientsAndVariables)
val trainOp = optimizer.applyGradients(clippedGradientsAndVariables, Some(iteration))
Model.SupervisedTrainOps(
inputIterator, inputIteratorNext, layerOutput, trainLayerOutput, lossOutput, gradientsAndVariables, trainOp)
}
override def buildEvaluateOps(
metrics: Seq[Metric[(I, T), Output]]
): Model.EvaluateOps[(IT, TT), (IO, TO), (ID, TD), (IS, TS), I] = {
implicit val mode: Mode = EVALUATION
val inputIterator = input.zip(trainInput).apply()
val inputIteratorNext = inputIterator.next()
val layerOutput = layer(inputIteratorNext._1)
val trainLayerOutput = trainInputLayer(inputIteratorNext._2)
val streamingInstances = metrics.map(_.streaming((layerOutput, trainLayerOutput)))
Model.EvaluateOps(
inputIterator, inputIteratorNext, layerOutput,
streamingInstances.map(_.value), streamingInstances.map(_.update), streamingInstances.map(_.reset))
}
}
private[learn] class SupervisedConditionalTrainableModel[IT, IO, ID, IS, I, TT, TO, TD, TS, T] private[learn](
override val input: Input[IT, IO, _, ID, IS],
override val layer: Layer[IO, I],
val trainLayer: Layer[(IO, TO), I],
val trainInput: Input[TT, TO, _, TD, TS],
val trainInputLayer: Layer[TO, T],
val loss: Layer[(I, T), Output],
val optimizer: Optimizer,
val clipGradients: ClipGradients = NoClipGradients,
override protected val colocateGradientsWithOps: Boolean = false
) extends SimpleInferenceModel[IT, IO, ID, IS, I](input, layer)
with SupervisedTrainableModel[IT, IO, ID, IS, I, TT, TO, TD, TS, T] {
// TODO: [LEARN] Add support for trainable models with only the loss function gradient available.
override def buildTrainOps(): Model.SupervisedTrainOps[IT, IO, ID, IS, I, TT, TO, TD, TS, T] = {
implicit val mode: Mode = TRAINING
val inputIterator = input.zip(trainInput).apply()
val inputIteratorNext = inputIterator.next()
val layerOutput = trainLayer(inputIteratorNext)
val trainLayerOutput = trainInputLayer(inputIteratorNext._2)
// TODO: [LEARN] Remove this cast.
val lossOutput = Math.cast(
loss((layerOutput, trainLayerOutput)), FLOAT32, name = "LossCast")
val iteration = Counter.getOrCreate(Graph.Keys.GLOBAL_STEP, local = false)
val gradientsAndVariables = optimizer.computeGradients(
lossOutput, colocateGradientsWithOps = colocateGradientsWithOps)
val clippedGradientsAndVariables = clipGradients(gradientsAndVariables)
val trainOp = optimizer.applyGradients(clippedGradientsAndVariables, Some(iteration))
Model.SupervisedTrainOps(
inputIterator, inputIteratorNext, layerOutput, trainLayerOutput, lossOutput, gradientsAndVariables, trainOp)
}
override def buildEvaluateOps(
metrics: Seq[Metric[(I, T), Output]]
): Model.EvaluateOps[(IT, TT), (IO, TO), (ID, TD), (IS, TS), I] = {
implicit val mode: Mode = EVALUATION
val inputIterator = input.zip(trainInput).apply()
val inputIteratorNext = inputIterator.next()
val layerOutput = layer(inputIteratorNext._1)
val trainLayerOutput = trainInputLayer(inputIteratorNext._2)
val streamingInstances = metrics.map(_.streaming((layerOutput, trainLayerOutput)))
Model.EvaluateOps(
inputIterator, inputIteratorNext, layerOutput,
streamingInstances.map(_.value), streamingInstances.map(_.update), streamingInstances.map(_.reset))
}
}
|
eaplatanios/tensorflow
|
tensorflow/scala/api/src/main/scala/org/platanios/tensorflow/api/learn/Model.scala
|
Scala
|
apache-2.0
| 21,221 |
package net.shiroka.tools.ofx.conversions
import java.io._
import scala.io.Source
import scala.util.control.Exception.allCatch
import scala.collection._
import scala.collection.mutable.ArrayBuffer
import com.github.tototoshi.csv._
import org.joda.time._
import org.joda.time.format._
import net.ceedubs.ficus.Ficus._
import com.typesafe.config.Config
import net.shiroka.tools.ofx._
import Statement._
import Transaction._
import implicits.Tapper
// A conversion for transfers.csv of freee https://www.freee.co.jp/
case class FreeeTransfers(config: Config) extends Conversion {
import FreeeTransfers._
lazy val accounts = config.as[Config]("accounts")
val currencyCode = config.as[String]("currency-code")
def apply(
source: InputStream,
sink: PrintStream): Result = {
val csv = CSVReader.open(Source.fromInputStream(source, "Shift_JIS"))
val statements = read(csv.iterator.drop(1))
closing(csv)(_ => Message(statements).writeOfx(sink))
sink :: source :: Nil
}
private def read(rows: Iterator[Seq[String]]): Iterable[Statement] = {
var lastTxn: Option[Transaction] = None
val transactionGroups = mutable.Map.empty[String, ArrayBuffer[Transaction]]
val accTypes = mutable.Map.empty[String, AccountType]
for (row <- rows) row.toList match {
case row @ date :: from :: to :: desc :: amountStr :: Nil =>
allCatch.either {
accTypes.getOrElseUpdate(from, findOrGuess(from))
transactionGroups
.getOrElseUpdate(from, ArrayBuffer.empty[Transaction])
.append {
val (_type, amount) = typeAndAmount(accTypes(from), amountStr)
Transaction(
dateTime = DateTime.parse(s"$date +09:00", dateFormat),
`type` = _type,
description = noneIfEmpty(desc).getOrElse(s"$from → $to"),
amount = amount,
balance = 0).uniquifyTime(lastTxn.map(_.dateTime), ascending = false)
.tap(txn => lastTxn = Some(txn))
}
}.fold(rethrow(_, s"Failed process row $row"), identity)
case row => sys.error(s"Malformed row $row")
}
transactionGroups.map {
case (name, transactions) =>
Statement(
accounts.as[Option[String]](s"$name.account-id").getOrElse(name),
accTypes(name),
currencyCode,
transactions.iterator)
}
}
private def findOrGuess(name: String): AccountType =
AccountType.find(accounts, name)
.orElse(if (name.endsWith("カード")) Some(CreditLine) else None)
.getOrElse(Savings)
private def typeAndAmount(accType: AccountType, amountStr: String) =
(accType, money(amountStr)) match {
case (`CreditLine`, amount) => (Credit, -amount)
case (_, amount) => (Debit, -amount)
}
}
object FreeeTransfers {
val header = "振替日, 振替元口座, 振替先口座, 備考, 金額".split(", ").toList
val dateFormat = DateTimeFormat.forPattern("yyyy/MM/dd Z")
}
|
ikuo/ofx-tools
|
src/main/scala/net/shiroka/tools/ofx/conversions/FreeeTransfers.scala
|
Scala
|
mit
| 3,003 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import cats.effect.IO
import minitest.TestSuite
import monix.eval.Task
import monix.execution.Ack
import monix.execution.Ack.{Continue, Stop}
import monix.execution.schedulers.TestScheduler
import monix.reactive.Observable
import monix.execution.exceptions.DummyException
import monix.reactive.observers.Subscriber
import scala.concurrent.Future
object DoOnEarlyStopSuite extends TestSuite[TestScheduler] {
def setup(): TestScheduler = TestScheduler()
def tearDown(s: TestScheduler): Unit = {
assert(s.state.tasks.isEmpty, "TestScheduler should have no pending tasks")
}
test("should execute for cats.effect.IO") { implicit s =>
var wasCanceled = 0
var wasCompleted = 0
Observable
.now(1)
.doOnEarlyStopF(IO { wasCanceled += 1 })
.unsafeSubscribeFn(new Subscriber[Int] {
val scheduler = s
def onNext(elem: Int) = Stop
def onError(ex: Throwable): Unit = ()
def onComplete(): Unit = wasCompleted += 1
})
assertEquals(wasCanceled, 1)
assertEquals(wasCompleted, 1)
}
test("should execute for synchronous subscribers") { implicit s =>
var wasCanceled = 0
var wasCompleted = 0
Observable
.now(1)
.doOnEarlyStop(Task.eval { wasCanceled += 1 })
.unsafeSubscribeFn(new Subscriber[Int] {
val scheduler = s
def onNext(elem: Int) = Stop
def onError(ex: Throwable): Unit = ()
def onComplete(): Unit = wasCompleted += 1
})
assertEquals(wasCanceled, 1)
assertEquals(wasCompleted, 1)
}
test("should execute for asynchronous subscribers") { implicit s =>
var wasCanceled = 0
var wasCompleted = 0
Observable
.now(1)
.doOnEarlyStop(Task.evalAsync { wasCanceled += 1 })
.unsafeSubscribeFn(new Subscriber[Int] {
val scheduler = s
def onNext(elem: Int) = Future(Stop)
def onError(ex: Throwable): Unit = ()
def onComplete(): Unit = wasCompleted += 1
})
s.tick()
assertEquals(wasCanceled, 1)
assertEquals(wasCompleted, 1)
}
test("should not execute if cancel does not happen") { implicit s =>
var wasCanceled = 0
var wasCompleted = 0
Observable
.range(0, 10)
.doOnEarlyStop(Task.eval { wasCanceled += 1 })
.unsafeSubscribeFn(new Subscriber[Long] {
val scheduler = s
def onNext(elem: Long): Future[Ack] =
if (elem % 2 == 0) Continue else Future(Continue)
def onError(ex: Throwable): Unit = ()
def onComplete(): Unit = wasCompleted += 1
})
s.tick()
assertEquals(wasCanceled, 0)
assertEquals(wasCompleted, 1)
}
test("should stream onError") { implicit s =>
val dummy = DummyException("ex")
var wasCanceled = 0
var wasCompleted = 0
var errorThrown: Throwable = null
Observable
.raiseError(dummy)
.doOnEarlyStop(Task.eval { wasCanceled += 1 })
.unsafeSubscribeFn(new Subscriber[Long] {
val scheduler = s
def onNext(elem: Long): Future[Ack] =
if (elem % 2 == 0) Continue else Future(Continue)
def onError(ex: Throwable): Unit =
errorThrown = ex
def onComplete(): Unit =
wasCompleted += 1
})
s.tick()
assertEquals(wasCanceled, 0)
assertEquals(wasCompleted, 0)
assertEquals(errorThrown, dummy)
}
test("should protect against user code") { implicit s =>
val dummy = DummyException("dummy")
var hasError = false
Observable
.repeat(1)
.doOnEarlyStop(Task.eval { throw dummy })
.unsafeSubscribeFn(new Subscriber[Int] {
val scheduler = s
def onNext(elem: Int) = Stop
def onError(ex: Throwable) = hasError = true
def onComplete() = ()
})
s.tick()
assertEquals(s.state.lastReportedError, dummy)
assertEquals(hasError, false)
}
}
|
monixio/monix
|
monix-reactive/shared/src/test/scala/monix/reactive/internal/operators/DoOnEarlyStopSuite.scala
|
Scala
|
apache-2.0
| 4,588 |
/*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
/*
Copyright (c) 2007-2016, Rickard Nilsson
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the EPFL nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.scalacheck
import org.scalacheck.Test._
private[scalacheck] object Platform {
import util.FreqMap
def runWorkers(
params: Parameters,
workerFun: Int => Result,
stop: () => Unit
): Result = {
import params._
def mergeResults(r1: Result, r2: Result): Result = {
val Result(st1, s1, d1, fm1, _) = r1
val Result(st2, s2, d2, fm2, _) = r2
if (st1 != Passed && st1 != Exhausted)
Result(st1, s1+s2, d1+d2, fm1++fm2, 0)
else if (st2 != Passed && st2 != Exhausted)
Result(st2, s1+s2, d1+d2, fm1++fm2, 0)
else {
if (s1+s2 >= minSuccessfulTests && maxDiscardRatio*(s1+s2) >= (d1+d2))
Result(Passed, s1+s2, d1+d2, fm1++fm2, 0)
else
Result(Exhausted, s1+s2, d1+d2, fm1++fm2, 0)
}
}
if(workers < 2) workerFun(0)
else {
import concurrent._
val tp = java.util.concurrent.Executors.newFixedThreadPool(workers)
implicit val ec = ExecutionContext.fromExecutor(tp)
try {
val fs = List.range(0,workers) map (idx => Future {
params.customClassLoader.map(
Thread.currentThread.setContextClassLoader(_)
)
blocking { workerFun(idx) }
})
val zeroRes = Result(Passed,0,0,FreqMap.empty[Set[Any]],0)
val res =
if (fs.isEmpty) Future.successful(zeroRes)
else Future.sequence(fs).map(_.foldLeft(zeroRes)(mergeResults))
Await.result(res, concurrent.duration.Duration.Inf)
} finally {
stop()
tp.shutdown()
}
}
}
def newInstance(name: String, loader: ClassLoader)(args: Seq[AnyRef]): AnyRef =
if(!args.isEmpty) ???
else Class.forName(name, true, loader).newInstance.asInstanceOf[AnyRef]
def loadModule(name: String, loader: ClassLoader): AnyRef =
Class.forName(name + "$", true, loader).getField("MODULE$").get(null)
class JSExportDescendentObjects(ignoreInvalidDescendants: Boolean)
extends scala.annotation.Annotation {
def this() = this(false)
}
class JSExportDescendentClasses(ignoreInvalidDescendants: Boolean)
extends scala.annotation.Annotation {
def this() = this(false)
}
}
|
sirthias/swave
|
core/src/test/scala/org/scalacheck/Platform.scala
|
Scala
|
mpl-2.0
| 3,936 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules.logical
import org.apache.flink.api.java.typeutils.MapTypeInfo
import org.apache.flink.table.api.TableException
import org.apache.flink.table.functions.FunctionIdentifier
import org.apache.flink.table.planner.calcite.FlinkTypeFactory
import org.apache.flink.table.planner.calcite.FlinkTypeFactory.toLogicalType
import org.apache.flink.table.planner.functions.utils.UserDefinedFunctionUtils
import org.apache.flink.table.planner.plan.utils.ExplodeFunctionUtil
import org.apache.flink.table.runtime.types.LogicalTypeDataTypeConverter.fromLogicalTypeToDataType
import org.apache.flink.table.runtime.types.TypeInfoLogicalTypeConverter.fromLogicalTypeToTypeInfo
import org.apache.flink.table.types.logical.RowType
import com.google.common.collect.ImmutableList
import org.apache.calcite.plan.RelOptRule._
import org.apache.calcite.plan.hep.HepRelVertex
import org.apache.calcite.plan.{RelOptRule, RelOptRuleCall, RelOptRuleOperand}
import org.apache.calcite.rel.RelNode
import org.apache.calcite.rel.`type`.{RelDataTypeFieldImpl, RelRecordType, StructKind}
import org.apache.calcite.rel.core.Uncollect
import org.apache.calcite.rel.logical._
import org.apache.calcite.sql.`type`.{AbstractSqlType, ArraySqlType, MapSqlType, MultisetSqlType}
import java.util.Collections
import org.apache.flink.table.planner.utils.ShortcutUtils.unwrapTypeFactory
/**
* Planner rule that rewrites UNNEST to explode function.
*
* Note: This class can only be used in HepPlanner.
*/
class LogicalUnnestRule(
operand: RelOptRuleOperand,
description: String)
extends RelOptRule(operand, description) {
override def matches(call: RelOptRuleCall): Boolean = {
val join: LogicalCorrelate = call.rel(0)
val right = getRel(join.getRight)
right match {
// a filter is pushed above the table function
case filter: LogicalFilter =>
getRel(filter.getInput) match {
case u: Uncollect => !u.withOrdinality
case p: LogicalProject => getRel(p.getInput) match {
case u: Uncollect => !u.withOrdinality
case _ => false
}
case _ => false
}
case project: LogicalProject =>
getRel(project.getInput) match {
case u: Uncollect => !u.withOrdinality
case _ => false
}
case u: Uncollect => !u.withOrdinality
case _ => false
}
}
override def onMatch(call: RelOptRuleCall): Unit = {
val correlate: LogicalCorrelate = call.rel(0)
val outer = getRel(correlate.getLeft)
val array = getRel(correlate.getRight)
def convert(relNode: RelNode): RelNode = {
relNode match {
case rs: HepRelVertex =>
convert(getRel(rs))
case f: LogicalProject =>
f.copy(f.getTraitSet, ImmutableList.of(convert(getRel(f.getInput))))
case f: LogicalFilter =>
f.copy(f.getTraitSet, ImmutableList.of(convert(getRel(f.getInput))))
case uc: Uncollect =>
// convert Uncollect into TableFunctionScan
val cluster = correlate.getCluster
val dataType = uc.getInput.getRowType.getFieldList.get(0).getValue
val (componentType, explodeTableFunc) = dataType match {
case arrayType: ArraySqlType =>
(arrayType.getComponentType,
ExplodeFunctionUtil.explodeTableFuncFromType(
fromLogicalTypeToTypeInfo(toLogicalType(arrayType))))
case map: MapSqlType =>
val keyType = toLogicalType(map.getKeyType)
val valueType = toLogicalType(map.getValueType)
val rowInternalType = RowType.of(keyType, valueType)
val componentType = cluster.getTypeFactory.asInstanceOf[FlinkTypeFactory]
.createFieldTypeFromLogicalType(rowInternalType)
val mapTypeInfo = new MapTypeInfo(
fromLogicalTypeToTypeInfo(keyType),
fromLogicalTypeToTypeInfo(valueType)
)
val explodeFunction = ExplodeFunctionUtil.explodeTableFuncFromType(mapTypeInfo)
(componentType, explodeFunction)
case mt: MultisetSqlType =>
(mt.getComponentType,
ExplodeFunctionUtil.explodeTableFuncFromType(
fromLogicalTypeToTypeInfo(toLogicalType(mt))))
case _ => throw new TableException(s"Unsupported UNNEST on type: ${dataType.toString}")
}
// create sql function
val explodeSqlFunc = UserDefinedFunctionUtils.createTableSqlFunction(
FunctionIdentifier.of("explode"),
"explode",
explodeTableFunc,
fromLogicalTypeToDataType(toLogicalType(componentType)),
cluster.getTypeFactory.asInstanceOf[FlinkTypeFactory])
// create table function call
// TODO use BridgingSqlFunction once we remove TableSqlFunction
val rexCall = cluster.getRexBuilder.makeCall(
explodeSqlFunc.getRowType(unwrapTypeFactory(cluster), Collections.emptyList()),
explodeSqlFunc,
getRel(uc.getInput).asInstanceOf[LogicalProject].getChildExps
)
// determine rel data type of unnest
val rowType = componentType match {
case _: AbstractSqlType =>
new RelRecordType(
StructKind.FULLY_QUALIFIED,
ImmutableList.of(new RelDataTypeFieldImpl("f0", 0, componentType)))
case _: RelRecordType => componentType
case _ => throw new TableException(
s"Unsupported multiset component type in UNNEST: ${componentType.toString}")
}
// create table function scan
new LogicalTableFunctionScan(
cluster,
correlate.getTraitSet,
Collections.emptyList(),
rexCall,
classOf[Array[Object]],
rowType,
null)
}
}
// convert unnest into table function scan
val tableFunctionScan = convert(array)
// create correlate with table function scan as input
val newCorrelate =
correlate.copy(correlate.getTraitSet, ImmutableList.of(outer, tableFunctionScan))
call.transformTo(newCorrelate)
}
private def getRel(rel: RelNode): RelNode = {
rel match {
case vertex: HepRelVertex => vertex.getCurrentRel
case _ => rel
}
}
}
object LogicalUnnestRule {
val INSTANCE = new LogicalUnnestRule(
operand(classOf[LogicalCorrelate], any),
"LogicalUnnestRule")
}
|
hequn8128/flink
|
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/rules/logical/LogicalUnnestRule.scala
|
Scala
|
apache-2.0
| 7,372 |
package ioinformatics.neo4j.rdf.plugin
import java.io.InputStream
import java.net.URLEncoder
import javax.ws.rs.core.{HttpHeaders, MediaType}
import org.apache.commons.io.IOUtils
import org.codehaus.jackson.node.ArrayNode
import org.neo4j.harness.{ServerControls, TestServerBuilders}
import org.neo4j.test.server.HTTP
import org.scalatest.time._
import org.scalatest.{BeforeAndAfter, FlatSpec, Matchers}
/**
* @author Alexander De Leon <[email protected]>
*/
class RdfPluginTest extends FlatSpec with Matchers with BeforeAndAfter {
var server: ServerControls = null
before {
server = TestServerBuilders.newInProcessBuilder.withExtension("/test", classOf[RdfResource])
.withExtension("/test", classOf[SparqlResource]).newServer
}
after {
server.close()
}
"insertRdf" should
"store the RDF triples on the Neo4J graph" in {
val payload: String = testTriples
// Given
val response: HTTP.Response = HTTP.withHeaders(HttpHeaders.CONTENT_TYPE, MediaType.TEXT_PLAIN).
PUT(server.httpURI.resolve("test").toString, HTTP.RawPayload.rawPayload(payload))
response.status should be(200)
val query = "select * where { ?s ?p ?o }"
val queryResponse: HTTP.Response = HTTP.withHeaders(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_FORM_URLENCODED)
.POST(server.httpURI.resolve("test/sparql").toString, HTTP.RawPayload.rawPayload(s"query=${urlEncode(query)}"))
println(queryResponse)
queryResponse.status should be(200)
queryResponse.get("results").get("bindings").asInstanceOf[ArrayNode].size() should be(2)
val firstBinding = queryResponse.get("results").get("bindings").get(0)
firstBinding.get("s").get("value").asText() should be("http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/ProductType1")
val secondBinding = queryResponse.get("results").get("bindings").get(1)
secondBinding.get("o").get("type").asText() should be("literal")
secondBinding.get("o").get("value").asText() should be("Thing")
val vars = queryResponse.get("head").get("vars").asInstanceOf[ArrayNode]
vars.get(0).asText() should be("s")
vars.get(1).asText() should be("p")
vars.get(2).asText() should be("o")
}
it should "insert only one node per URI" in {
val payload: String = _10000Triples
// Given
val response1: HTTP.Response = HTTP.withHeaders(HttpHeaders.CONTENT_TYPE, MediaType.TEXT_PLAIN).
PUT(server.httpURI.resolve("test").toString, HTTP.RawPayload.rawPayload(payload))
response1.status should be(200)
val response2: HTTP.Response = HTTP.withHeaders(HttpHeaders.CONTENT_TYPE, MediaType.TEXT_PLAIN).
PUT(server.httpURI.resolve("test").toString, HTTP.RawPayload.rawPayload(payload))
response2.status should be(200)
val query = "select * where { ?s <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://rdf.ebi.ac.uk/terms/chembl#Activity> }"
val queryResponse: HTTP.Response = HTTP.withHeaders(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_FORM_URLENCODED)
.POST(server.httpURI.resolve("test/sparql").toString, HTTP.RawPayload.rawPayload(s"query=${urlEncode(query)}"))
queryResponse.status should be(200)
queryResponse.get("results").get("bindings").asInstanceOf[ArrayNode].size() should be(19998)
}
"executeSparql" should
"return empty results" in {
val query = "select * where { ?s ?p ?o }"
val response: HTTP.Response = HTTP.withHeaders(HttpHeaders.CONTENT_TYPE, MediaType.APPLICATION_FORM_URLENCODED)
.POST(server.httpURI.resolve("test/sparql").toString, HTTP.RawPayload.rawPayload(s"query=${urlEncode(query)}"))
response.status should be(200)
response.get("results").get("bindings").asInstanceOf[ArrayNode].size() should be(0)
}
private def _10000Triples: String = {
val input: InputStream = getClass.getResourceAsStream("/test.nt")
val buffer: StringBuffer = new StringBuffer
import scala.collection.JavaConversions._
for (line <- IOUtils.readLines(input)) {
buffer.append(line).append("\\n")
}
buffer.toString
}
private def testTriples: String = "<http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/ProductType1> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/vocabulary/ProductType> .\\n"+
"<http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/ProductType1> <http://www.w3.org/2000/01/rdf-schema#label> \\"Thing\\" ."
private def repitedTriple: String = "<http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/ProductType1> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/vocabulary/ProductType> .\\n"+
"<http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/instances/ProductType1> <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <http://www4.wiwiss.fu-berlin.de/bizer/bsbm/v01/vocabulary/ProductType> ."
private def berlin100: String = {
val input: InputStream = getClass.getResourceAsStream("/berlin_nt_100.nt")
val buffer: StringBuffer = new StringBuffer
import scala.collection.JavaConversions._
for (line <- IOUtils.readLines(input)) {
buffer.append(line).append("\\n")
}
buffer.toString
}
private def urlEncode(text: String) = URLEncoder.encode(text, "utf8")
}
|
io-informatics/neo4j-rdf-plugin
|
src/test/scala/ioinformatics/neo4j/rdf/plugin/RdfPluginTest.scala
|
Scala
|
gpl-3.0
| 5,279 |
package jp.co.bizreach.elasticsearch4s.retry
import scala.concurrent.duration.FiniteDuration
case class RetryConfig(
maxAttempts: Int,
retryDuration: FiniteDuration,
backOff: BackOff
)
|
bizreach/elastic-scala-httpclient
|
elastic-scala-httpclient/src/main/scala/jp/co/bizreach/elasticsearch4s/retry/RetryConfig.scala
|
Scala
|
apache-2.0
| 192 |
/***
* Excerpted from "Seven Concurrency Models in Seven Weeks",
* published by The Pragmatic Bookshelf.
* Copyrights apply to this code. It may not be used to create training material,
* courses, books, articles, and the like. Contact us if you are in doubt.
* We make no guarantees that this code is fit for any purpose.
* Visit http://www.pragmaticprogrammer.com/titles/pb7con for more book information.
***/
package com.paulbutcher
import akka.actor._
case object Processed
class Parser(counter: ActorRef) extends Actor {
val pages = Pages(100000, "enwiki.xml")
override def preStart {
for (page <- pages.take(10))
counter ! page
}
def receive = {
case Processed if pages.hasNext => counter ! pages.next
case _ => context.stop(self)
}
}
|
XBOOS/concurrency
|
code/ActorsScala/WordCount/src/main/scala/com/paulbutcher/Parser.scala
|
Scala
|
gpl-2.0
| 787 |
/*
* Copyright 2009 Mark Tye
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
*/
package net.liftweb.ext_api.appengine
import com.google.appengine.api.{datastore => gae}
import net.liftweb.common.{Box, Empty, Failure, Full}
import net.liftweb.util.Log
import scala.collection.jcl._
trait EntityCreator {
private[appengine] var createEntity: () => gae.Entity
}
trait Entity[T <: Entity[T]] {
val kind = getClass.getCanonicalName
private[appengine] var entity = new gae.Entity(kind)
private[appengine] def has(name: String): Boolean = entity.hasProperty(name)
private[appengine] def get(name: String): Option[Any] = if (has(name)) Some(entity.getProperty(name)) else None
private[appengine] def set(name: String, maybe: Option[Any]): T = { maybe match {
case Some(value) => entity.setProperty(name, value)
case None => entity.removeProperty(name)
}
this.asInstanceOf[T]
}
private[appengine] def entityKey = entity.getKey
private[appengine] def keyIsComplete = entity.getKey.isComplete
def id: Long = entity.getKey.getId
def key: String = gae.KeyFactory.keyToString(entity.getKey)
def delete() = Entity.datastore.delete(entity.getKey)
def put(): T = {
Entity.datastore.put(entity)
this.asInstanceOf[T]
}
def create(entity: gae.Entity): T = {
require(entity.getKind == this.kind, "Entities must be of same kind")
val created = getClass.newInstance.asInstanceOf[T]
created.entity = entity
created
}
private def query: gae.Query = {
val query = new gae.Query(kind)
import Conversions.convertMap
for ((name, value) <- entity.getProperties) {
query.addFilter(name, gae.Query.FilterOperator.EQUAL, value)
}
query
}
def findAllLike: List[T] = {
val preparedQuery = Entity.datastore.prepare(query)
val it = new MutableIterator.Wrapper(preparedQuery.asIterator)
it.map(create(_)).toList
}
}
object Entity {
lazy val datastore = gae.DatastoreServiceFactory.getDatastoreService
}
trait Parent {
self: Entity[_] =>
type Parent <: Entity[Parent]
private var isParentSet: Boolean = false
private[appengine] def getParent: gae.Entity = Entity.datastore.get(entity.getParent)
def parent(parent: Parent) {
require( (parent != null), "Parent can not be null")
require( !isParentSet, "Parent has already been set")
// require( parent.keyIsComplete, "Parent key is incomplete" )
isParentSet = true
self.entity = new gae.Entity(kind, parent.entityKey)
}
}
trait MetaEntity[T <: Entity[T]] {
val entity: T
val canonicalName = getClass getCanonicalName
val companionName = canonicalName take (canonicalName.length - 1)
def kind = entity.kind
def apply(): T = Class.forName(companionName).newInstance.asInstanceOf[T]
def create(entity: gae.Entity) = this.entity create entity
def delete(entities: List[T]) {
entities foreach (_.delete())
}
private def get(key: gae.Key): Box[T] = {
try {
Full(create(Entity.datastore get key))
} catch {
case e: Exception => Failure(e.getMessage, Full(e), Empty)
}
}
def get(key: String): Box[T] = {
this get (gae.KeyFactory stringToKey key)
}
def getWithId(id: Long): Box[T] = {
this get gae.KeyFactory.createKey(kind, id)
}
def getWithId(id: String): Box[T] = {
try {
this getWithId id.toLong
} catch {
case nfe: NumberFormatException => Failure("Id must be numeric", Full(nfe), Empty)
}
}
def getWithParentAndId(parent: Entity[_], id: String): Box[T] = {
this get gae.KeyFactory.createKey(parent.entityKey, kind, id.toLong)
}
def getParentOf(child: Parent): T = create (child getParent)
def all: gae.PreparedQuery = Entity.datastore prepare (new gae.Query(kind))
def list(query: gae.Query): List[T] = {
val preparedQuery = Entity.datastore.prepare(query)
val it = new MutableIterator.Wrapper(preparedQuery.asIterator)
it.map(create(_)).toList
}
def first(query: gae.Query): Option[T] = {
val preparedQuery = Entity.datastore.prepare(query)
val it = preparedQuery.asIterator
if (it.hasNext) Some(create(it.next)) else None
}
def find(key: gae.Key): T = {
this create Entity.datastore.get(key)
}
def findAll: List[T] = {
val query = new gae.Query(kind)
list(query)
}
def findAllWithParent(parent: Entity[_]): List[T] = {
val key: gae.Key = gae.KeyFactory.stringToKey(parent.key)
val query = new gae.Query(kind, key)
list(query)
}
def findOneWithParent(parent: Entity[_], sortedBy: Property[_, T], direction: gae.Query.SortDirection): Option[T] = {
val key: gae.Key = gae.KeyFactory.stringToKey(parent.key)
val query = new gae.Query(kind, key)
query.addSort(sortedBy.kind, direction)
first(query)
}
def findFirstWithParent(parent: Entity[_], sortedBy: Property[_, T]): Option[T] =
findOneWithParent(parent, sortedBy, gae.Query.SortDirection.ASCENDING)
def findLastWithParent(parent: Entity[_], sortedBy: Property[_, T]): Option[T] =
findOneWithParent(parent, sortedBy, gae.Query.SortDirection.DESCENDING)
def countAll: Int = all.countEntities
}
|
mtye/lift-appengine
|
lift-appengine/src/main/scala/net/liftweb/ext_api/appengine/Entity.scala
|
Scala
|
apache-2.0
| 5,732 |
package com.twitter.server.view
import com.twitter.finagle.{Service, SimpleFilter}
import com.twitter.finagle.http.{Request, Response}
import com.twitter.io.Buf
import com.twitter.server.util.HttpUtils.{expectsHtml, newResponse}
import com.twitter.util.Future
class TextBlockView extends SimpleFilter[Request, Response] {
def apply(req: Request, svc: Service[Request, Response]): Future[Response] = {
val serviced = svc(req)
if (!expectsHtml(req)) {
serviced
} else {
serviced.flatMap { res =>
val html = s"<pre>${res.contentString}</pre>"
newResponse(
contentType = "text/html;charset=UTF-8",
content = Buf.Utf8(html)
)
}
}
}
}
|
twitter/twitter-server
|
server/src/main/scala/com/twitter/server/view/TextBlockView.scala
|
Scala
|
apache-2.0
| 713 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.kudu.schema
import java.util.{Date, UUID}
import org.locationtech.jts.geom.{Geometry, Point}
import org.apache.kudu.Schema
import org.apache.kudu.client.RowResult
import org.geotools.util.Converters
import org.junit.runner.RunWith
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.locationtech.geomesa.utils.text.WKTUtils
import org.specs2.mock.Mockito
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class KuduColumnAdapterTest extends Specification with Mockito {
import scala.collection.JavaConverters._
val sft = SimpleFeatureTypes.createType("test",
"string:String,int:Int,long:Long,float:Float,double:Double,boolean:Boolean,date:Date,uuid:UUID," +
"bytes:Bytes,list:List[Float],map:Map[String,Int],*point:Point:srid=4326,line:LineString:srid=4326")
"KuduColumnAdapter" should {
"adapt string columns" in {
val adapter = KuduColumnAdapter(sft, sft.getDescriptor("string")).asInstanceOf[KuduColumnAdapter[String]]
val write = new Schema(adapter.writeColumns.asJava).newPartialRow()
adapter.writeToRow(write, "test")
write.getString(adapter.columns.head.getName) mustEqual "test"
val read = mock[RowResult]
read.getString(adapter.columns.head.getName) returns "test"
adapter.readFromRow(read) mustEqual "test"
}
"adapt int columns" in {
val adapter = KuduColumnAdapter(sft, sft.getDescriptor("int")).asInstanceOf[KuduColumnAdapter[Int]]
val write = new Schema(adapter.writeColumns.asJava).newPartialRow()
adapter.writeToRow(write, 2)
write.getInt(adapter.columns.head.getName) mustEqual 2
val read = mock[RowResult]
read.getInt(adapter.columns.head.getName) returns 2
adapter.readFromRow(read) mustEqual 2
}
"adapt long columns" in {
val adapter = KuduColumnAdapter(sft, sft.getDescriptor("long")).asInstanceOf[KuduColumnAdapter[Long]]
val write = new Schema(adapter.writeColumns.asJava).newPartialRow()
adapter.writeToRow(write, 3L)
write.getLong(adapter.columns.head.getName) mustEqual 3L
val read = mock[RowResult]
read.getLong(adapter.columns.head.getName) returns 3L
adapter.readFromRow(read) mustEqual 3L
}
"adapt float columns" in {
val adapter = KuduColumnAdapter(sft, sft.getDescriptor("float")).asInstanceOf[KuduColumnAdapter[Float]]
val write = new Schema(adapter.writeColumns.asJava).newPartialRow()
adapter.writeToRow(write, 4.1f)
write.getFloat(adapter.columns.head.getName) mustEqual 4.1f
val read = mock[RowResult]
read.getFloat(adapter.columns.head.getName) returns 4.1f
adapter.readFromRow(read) mustEqual 4.1f
}
"adapt double columns" in {
val adapter = KuduColumnAdapter(sft, sft.getDescriptor("double")).asInstanceOf[KuduColumnAdapter[Double]]
val write = new Schema(adapter.writeColumns.asJava).newPartialRow()
adapter.writeToRow(write, 5.2d)
write.getDouble(adapter.columns.head.getName) mustEqual 5.2d
val read = mock[RowResult]
read.getDouble(adapter.columns.head.getName) returns 5.2d
adapter.readFromRow(read) mustEqual 5.2d
}
"adapt boolean columns" in {
val adapter = KuduColumnAdapter(sft, sft.getDescriptor("boolean")).asInstanceOf[KuduColumnAdapter[Boolean]]
val write = new Schema(adapter.writeColumns.asJava).newPartialRow()
adapter.writeToRow(write, true)
write.getBoolean(adapter.columns.head.getName) mustEqual true
val read = mock[RowResult]
read.getBoolean(adapter.columns.head.getName) returns true
adapter.readFromRow(read) mustEqual true
}
"adapt date columns" in {
val adapter = KuduColumnAdapter(sft, sft.getDescriptor("date")).asInstanceOf[KuduColumnAdapter[Date]]
val date = Converters.convert("2018-01-02T00:01:30.000Z", classOf[Date])
date must not(beNull)
val write = new Schema(adapter.writeColumns.asJava).newPartialRow()
adapter.writeToRow(write, date)
write.getLong(adapter.columns.head.getName) mustEqual date.getTime * 1000 // micros
val read = mock[RowResult]
read.getLong(adapter.columns.head.getName) returns date.getTime * 1000
adapter.readFromRow(read) mustEqual date
}
"adapt uuid columns" in {
val adapter = KuduColumnAdapter(sft, sft.getDescriptor("uuid")).asInstanceOf[KuduColumnAdapter[UUID]]
val uuid = UUID.fromString("e8c274ef-7d7c-4556-af68-1fc4b7f26a36")
val write = new Schema(adapter.writeColumns.asJava).newPartialRow()
adapter.writeToRow(write, uuid)
val bytes = write.getBinary(adapter.columns.head.getName)
bytes.remaining() mustEqual 16
bytes.getLong mustEqual uuid.getMostSignificantBits
bytes.getLong mustEqual uuid.getLeastSignificantBits
bytes.rewind()
val read = mock[RowResult]
read.getBinary(adapter.columns.head.getName) returns bytes
adapter.readFromRow(read) mustEqual uuid
}
"adapt bytes columns" in {
val adapter = KuduColumnAdapter(sft, sft.getDescriptor("bytes")).asInstanceOf[KuduColumnAdapter[Array[Byte]]]
val bytes = Array.tabulate[Byte](7)(i => i.toByte)
val write = new Schema(adapter.writeColumns.asJava).newPartialRow()
adapter.writeToRow(write, bytes)
write.getBinaryCopy(adapter.columns.head.getName) mustEqual bytes
val read = mock[RowResult]
read.getBinaryCopy(adapter.columns.head.getName) returns bytes
adapter.readFromRow(read) mustEqual bytes
}
"adapt list columns" in {
val adapter = KuduColumnAdapter(sft, sft.getDescriptor("list")).asInstanceOf[KuduColumnAdapter[java.util.List[Float]]]
val list = Seq(0f, 1f, 2f).asJava
val write = new Schema(adapter.writeColumns.asJava).newPartialRow()
adapter.writeToRow(write, list)
val read = mock[RowResult]
read.getBinary(adapter.columns.head.getName) returns write.getBinary(adapter.columns.head.getName)
adapter.readFromRow(read) mustEqual list
}
"adapt map columns" in {
val adapter = KuduColumnAdapter(sft, sft.getDescriptor("map")).asInstanceOf[KuduColumnAdapter[java.util.Map[String, Int]]]
val map = Map("zero" -> 0, "one" -> 1, "two" -> 2).asJava
val write = new Schema(adapter.writeColumns.asJava).newPartialRow()
adapter.writeToRow(write, map)
val read = mock[RowResult]
read.getBinary(adapter.columns.head.getName) returns write.getBinary(adapter.columns.head.getName)
adapter.readFromRow(read) mustEqual map
}
"adapt point columns" in {
val adapter = KuduColumnAdapter(sft, sft.getDescriptor("point")).asInstanceOf[KuduColumnAdapter[Point]]
val pt = WKTUtils.read("POINT(45 55)").asInstanceOf[Point]
val write = new Schema(adapter.writeColumns.asJava).newPartialRow()
adapter.writeToRow(write, pt)
write.getDouble(adapter.columns.head.getName) mustEqual 45d
write.getDouble(adapter.columns.last.getName) mustEqual 55d
val read = mock[RowResult]
read.getDouble(adapter.columns.head.getName) returns 45d
read.getDouble(adapter.columns.last.getName) returns 55d
adapter.readFromRow(read) mustEqual pt
}
"adapt non-point columns" in {
val adapter = KuduColumnAdapter(sft, sft.getDescriptor("line")).asInstanceOf[KuduColumnAdapter[Geometry]]
val line = WKTUtils.read("LINESTRING(45 55, 46 56, 47 57)")
val write = new Schema(adapter.writeColumns.asJava).newPartialRow()
adapter.writeToRow(write, line)
val read = mock[RowResult]
read.getBinary(adapter.columns.head.getName) returns write.getBinary(adapter.columns.head.getName)
adapter.readFromRow(read) mustEqual line
}
}
}
|
elahrvivaz/geomesa
|
geomesa-kudu/geomesa-kudu-datastore/src/test/scala/org/locationtech/geomesa/kudu/schema/KuduColumnAdapterTest.scala
|
Scala
|
apache-2.0
| 8,309 |
/* Title: Pure/General/file.scala
Author: Makarius
File-system operations.
*/
package isabelle
import java.io.{BufferedWriter, OutputStreamWriter, FileOutputStream, BufferedOutputStream,
OutputStream, InputStream, FileInputStream, BufferedInputStream, BufferedReader,
InputStreamReader, File => JFile, IOException}
import java.nio.file.{StandardOpenOption, StandardCopyOption, Path => JPath,
Files, SimpleFileVisitor, FileVisitOption, FileVisitResult, FileSystemException}
import java.nio.file.attribute.BasicFileAttributes
import java.net.{URL, MalformedURLException}
import java.util.zip.{GZIPInputStream, GZIPOutputStream}
import java.util.regex.Pattern
import java.util.EnumSet
import org.tukaani.xz.{XZInputStream, XZOutputStream}
import scala.collection.mutable
import scala.util.matching.Regex
object File
{
/* standard path (Cygwin or Posix) */
def standard_path(path: Path): String = path.expand.implode
def standard_path(platform_path: String): String =
if (Platform.is_windows) {
val Platform_Root = new Regex("(?i)" +
Pattern.quote(Isabelle_System.cygwin_root()) + """(?:\\\\+|\\z)(.*)""")
val Drive = new Regex("""([a-zA-Z]):\\\\*(.*)""")
platform_path.replace('/', '\\\\') match {
case Platform_Root(rest) => "/" + rest.replace('\\\\', '/')
case Drive(letter, rest) =>
"/cygdrive/" + Word.lowercase(letter) +
(if (rest == "") "" else "/" + rest.replace('\\\\', '/'))
case path => path.replace('\\\\', '/')
}
}
else platform_path
def standard_path(file: JFile): String = standard_path(file.getPath)
def standard_url(name: String): String =
try {
val url = new URL(name)
if (url.getProtocol == "file" && Url.is_wellformed_file(name))
standard_path(Url.parse_file(name))
else name
}
catch { case _: MalformedURLException => standard_path(name) }
/* platform path (Windows or Posix) */
private val Cygdrive = new Regex("/cygdrive/([a-zA-Z])($|/.*)")
private val Named_Root = new Regex("//+([^/]*)(.*)")
def platform_path(standard_path: String): String =
if (Platform.is_windows) {
val result_path = new StringBuilder
val rest =
standard_path match {
case Cygdrive(drive, rest) =>
result_path ++= (Word.uppercase(drive) + ":" + JFile.separator)
rest
case Named_Root(root, rest) =>
result_path ++= JFile.separator
result_path ++= JFile.separator
result_path ++= root
rest
case path if path.startsWith("/") =>
result_path ++= Isabelle_System.cygwin_root()
path
case path => path
}
for (p <- space_explode('/', rest) if p != "") {
val len = result_path.length
if (len > 0 && result_path(len - 1) != JFile.separatorChar)
result_path += JFile.separatorChar
result_path ++= p
}
result_path.toString
}
else standard_path
def platform_path(path: Path): String = platform_path(standard_path(path))
def platform_file(path: Path): JFile = new JFile(platform_path(path))
/* platform files */
def absolute(file: JFile): JFile = file.toPath.toAbsolutePath.normalize.toFile
def absolute_name(file: JFile): String = absolute(file).getPath
def canonical(file: JFile): JFile = file.getCanonicalFile
def canonical_name(file: JFile): String = canonical(file).getPath
def path(file: JFile): Path = Path.explode(standard_path(file))
def pwd(): Path = path(Path.current.absolute_file)
/* relative paths */
def relative_path(base: Path, other: Path): Option[Path] =
{
val base_path = base.file.toPath
val other_path = other.file.toPath
if (other_path.startsWith(base_path))
Some(path(base_path.relativize(other_path).toFile))
else None
}
def rebase_path(base: Path, other: Path): Option[Path] =
relative_path(base, other).map(base + _)
/* bash path */
def bash_path(path: Path): String = Bash.string(standard_path(path))
def bash_path(file: JFile): String = Bash.string(standard_path(file))
/* directory entries */
def check_dir(path: Path): Path =
if (path.is_dir) path else error("No such directory: " + path)
def check_file(path: Path): Path =
if (path.is_file) path else error("No such file: " + path)
/* directory content */
def read_dir(dir: Path): List[String] =
{
if (!dir.is_dir) error("No such directory: " + dir.toString)
val files = dir.file.listFiles
if (files == null) Nil
else files.toList.map(_.getName).sorted
}
def find_files(
start: JFile,
pred: JFile => Boolean = _ => true,
include_dirs: Boolean = false,
follow_links: Boolean = false): List[JFile] =
{
val result = new mutable.ListBuffer[JFile]
def check(file: JFile) { if (pred(file)) result += file }
if (start.isFile) check(start)
else if (start.isDirectory) {
val options =
if (follow_links) EnumSet.of(FileVisitOption.FOLLOW_LINKS)
else EnumSet.noneOf(classOf[FileVisitOption])
Files.walkFileTree(start.toPath, options, Integer.MAX_VALUE,
new SimpleFileVisitor[JPath] {
override def preVisitDirectory(path: JPath, attrs: BasicFileAttributes): FileVisitResult =
{
if (include_dirs) check(path.toFile)
FileVisitResult.CONTINUE
}
override def visitFile(path: JPath, attrs: BasicFileAttributes): FileVisitResult =
{
val file = path.toFile
if (include_dirs || !file.isDirectory) check(file)
FileVisitResult.CONTINUE
}
}
)
}
result.toList
}
/* read */
def read(file: JFile): String = Bytes.read(file).text
def read(path: Path): String = read(path.file)
def read_stream(reader: BufferedReader): String =
{
val output = new StringBuilder(100)
var c = -1
while ({ c = reader.read; c != -1 }) output += c.toChar
reader.close
output.toString
}
def read_stream(stream: InputStream): String =
read_stream(new BufferedReader(new InputStreamReader(stream, UTF8.charset)))
def read_gzip(file: JFile): String =
read_stream(new GZIPInputStream(new BufferedInputStream(new FileInputStream(file))))
def read_gzip(path: Path): String = read_gzip(path.file)
def read_xz(file: JFile): String =
read_stream(new XZInputStream(new BufferedInputStream(new FileInputStream(file))))
def read_xz(path: Path): String = read_xz(path.file)
/* read lines */
def read_line(reader: BufferedReader): Option[String] =
{
val line =
try { reader.readLine}
catch { case _: IOException => null }
if (line == null) None else Some(line)
}
def read_lines(reader: BufferedReader, progress: String => Unit): List[String] =
{
val result = new mutable.ListBuffer[String]
var line: Option[String] = None
while ({ line = read_line(reader); line.isDefined }) {
progress(line.get)
result += line.get
}
reader.close
result.toList
}
/* write */
def write_file(file: JFile, text: CharSequence, make_stream: OutputStream => OutputStream)
{
val stream = make_stream(new FileOutputStream(file))
using(new BufferedWriter(new OutputStreamWriter(stream, UTF8.charset)))(_.append(text))
}
def write(file: JFile, text: CharSequence): Unit = write_file(file, text, s => s)
def write(path: Path, text: CharSequence): Unit = write(path.file, text)
def write_gzip(file: JFile, text: CharSequence): Unit =
write_file(file, text, (s: OutputStream) => new GZIPOutputStream(new BufferedOutputStream(s)))
def write_gzip(path: Path, text: CharSequence): Unit = write_gzip(path.file, text)
def write_xz(file: JFile, text: CharSequence, options: XZ.Options): Unit =
File.write_file(file, text, s => new XZOutputStream(new BufferedOutputStream(s), options))
def write_xz(file: JFile, text: CharSequence): Unit = write_xz(file, text, XZ.options())
def write_xz(path: Path, text: CharSequence, options: XZ.Options): Unit =
write_xz(path.file, text, options)
def write_xz(path: Path, text: CharSequence): Unit = write_xz(path, text, XZ.options())
def write_backup(path: Path, text: CharSequence)
{
if (path.is_file) move(path, path.backup)
write(path, text)
}
def write_backup2(path: Path, text: CharSequence)
{
if (path.is_file) move(path, path.backup2)
write(path, text)
}
/* append */
def append(file: JFile, text: CharSequence): Unit =
Files.write(file.toPath, UTF8.bytes(text.toString),
StandardOpenOption.APPEND, StandardOpenOption.CREATE)
def append(path: Path, text: CharSequence): Unit = append(path.file, text)
/* eq */
def eq(file1: JFile, file2: JFile): Boolean =
try { java.nio.file.Files.isSameFile(file1.toPath, file2.toPath) }
catch { case ERROR(_) => false }
def eq(path1: Path, path2: Path): Boolean = eq(path1.file, path2.file)
/* eq_content */
def eq_content(file1: JFile, file2: JFile): Boolean =
if (eq(file1, file2)) true
else if (file1.length != file2.length) false
else Bytes.read(file1) == Bytes.read(file2)
def eq_content(path1: Path, path2: Path): Boolean = eq_content(path1.file, path2.file)
/* copy */
def copy(src: JFile, dst: JFile)
{
val target = if (dst.isDirectory) new JFile(dst, src.getName) else dst
if (!eq(src, target))
Files.copy(src.toPath, target.toPath,
StandardCopyOption.COPY_ATTRIBUTES,
StandardCopyOption.REPLACE_EXISTING)
}
def copy(path1: Path, path2: Path): Unit = copy(path1.file, path2.file)
/* move */
def move(src: JFile, dst: JFile)
{
val target = if (dst.isDirectory) new JFile(dst, src.getName) else dst
if (!eq(src, target))
Files.move(src.toPath, target.toPath, StandardCopyOption.REPLACE_EXISTING)
}
def move(path1: Path, path2: Path): Unit = move(path1.file, path2.file)
/* symbolic link */
def link(src: Path, dst: Path, force: Boolean = false)
{
val src_file = src.file
val dst_file = dst.file
val target = if (dst_file.isDirectory) new JFile(dst_file, src_file.getName) else dst_file
if (force) target.delete
try { Files.createSymbolicLink(target.toPath, src_file.toPath) }
catch {
case _: UnsupportedOperationException if Platform.is_windows =>
Cygwin.link(standard_path(src), target)
case _: FileSystemException if Platform.is_windows =>
Cygwin.link(standard_path(src), target)
}
}
/* permissions */
def is_executable(path: Path): Boolean =
{
if (Platform.is_windows) Isabelle_System.bash("test -x " + bash_path(path)).check.ok
else path.file.canExecute
}
def set_executable(path: Path, flag: Boolean)
{
if (Platform.is_windows && flag) Isabelle_System.bash("chmod a+x " + bash_path(path)).check
else if (Platform.is_windows) Isabelle_System.bash("chmod a-x " + bash_path(path)).check
else path.file.setExecutable(flag, false)
}
}
|
larsrh/libisabelle
|
modules/pide/2019-RC4/src/main/scala/General/file.scala
|
Scala
|
apache-2.0
| 11,070 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import org.apache.hadoop.fs.Path
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization
import org.apache.spark.sql.{SPARK_LEGACY_DATETIME, SPARK_VERSION_METADATA_KEY}
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
object DataSourceUtils {
/**
* The key to use for storing partitionBy columns as options.
*/
val PARTITIONING_COLUMNS_KEY = "__partition_columns"
/**
* Utility methods for converting partitionBy columns to options and back.
*/
private implicit val formats = Serialization.formats(NoTypeHints)
def encodePartitioningColumns(columns: Seq[String]): String = {
Serialization.write(columns)
}
def decodePartitioningColumns(str: String): Seq[String] = {
Serialization.read[Seq[String]](str)
}
/**
* Verify if the schema is supported in datasource. This verification should be done
* in a driver side.
*/
def verifySchema(format: FileFormat, schema: StructType): Unit = {
schema.foreach { field =>
if (!format.supportDataType(field.dataType)) {
throw new AnalysisException(
s"$format data source does not support ${field.dataType.catalogString} data type.")
}
}
}
// SPARK-24626: Metadata files and temporary files should not be
// counted as data files, so that they shouldn't participate in tasks like
// location size calculation.
private[sql] def isDataPath(path: Path): Boolean = isDataFile(path.getName)
private[sql] def isDataFile(fileName: String) =
!(fileName.startsWith("_") || fileName.startsWith("."))
def needRebaseDateTime(lookupFileMeta: String => String): Option[Boolean] = {
if (Utils.isTesting && SQLConf.get.getConfString("spark.test.forceNoRebase", "") == "true") {
return Some(false)
}
// If there is no version, we return None and let the caller side to decide.
Option(lookupFileMeta(SPARK_VERSION_METADATA_KEY)).map { version =>
// Files written by Spark 2.4 and earlier follow the legacy hybrid calendar and we need to
// rebase the datetime values.
// Files written by Spark 3.0 and latter may also need the rebase if they were written with
// the "rebaseInWrite" config enabled.
version < "3.0.0" || lookupFileMeta(SPARK_LEGACY_DATETIME) != null
}
}
}
|
kevinyu98/spark
|
sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSourceUtils.scala
|
Scala
|
apache-2.0
| 3,243 |
/**
* Generated by Scrooge
* version: 4.7.0
* rev: d9d56174937f524a1981b38ebd6280eef7eeda4a
* built at: 20160427-121531
*/
package com.komanov.serialization.domain.thriftscala
import com.twitter.scrooge.{
LazyTProtocol,
TFieldBlob, ThriftException, ThriftStruct, ThriftStructCodec3, ThriftStructFieldInfo,
ThriftStructMetaData, ThriftUtil}
import org.apache.thrift.protocol._
import org.apache.thrift.transport.{TMemoryBuffer, TTransport}
import java.nio.ByteBuffer
import java.util.Arrays
import scala.collection.immutable.{Map => immutable$Map}
import scala.collection.mutable.Builder
import scala.collection.mutable.{
ArrayBuffer => mutable$ArrayBuffer, Buffer => mutable$Buffer,
HashMap => mutable$HashMap, HashSet => mutable$HashSet}
import scala.collection.{Map, Set}
object DomainRemovedPb extends ThriftStructCodec3[DomainRemovedPb] {
private val NoPassthroughFields = immutable$Map.empty[Short, TFieldBlob]
val Struct = new TStruct("DomainRemovedPb")
val NameField = new TField("name", TType.STRING, 1)
val NameFieldManifest = implicitly[Manifest[String]]
/**
* Field information in declaration order.
*/
lazy val fieldInfos: scala.List[ThriftStructFieldInfo] = scala.List[ThriftStructFieldInfo](
new ThriftStructFieldInfo(
NameField,
true,
false,
NameFieldManifest,
_root_.scala.None,
_root_.scala.None,
immutable$Map.empty[String, String],
immutable$Map.empty[String, String]
)
)
lazy val structAnnotations: immutable$Map[String, String] =
immutable$Map.empty[String, String]
/**
* Checks that all required fields are non-null.
*/
def validate(_item: DomainRemovedPb): Unit = {
}
def withoutPassthroughFields(original: DomainRemovedPb): DomainRemovedPb =
new Immutable(
name =
{
val field = original.name
field.map { field =>
field
}
}
)
override def encode(_item: DomainRemovedPb, _oproto: TProtocol): Unit = {
_item.write(_oproto)
}
private[this] def lazyDecode(_iprot: LazyTProtocol): DomainRemovedPb = {
var nameOffset: Int = -1
var _passthroughFields: Builder[(Short, TFieldBlob), immutable$Map[Short, TFieldBlob]] = null
var _done = false
val _start_offset = _iprot.offset
_iprot.readStructBegin()
while (!_done) {
val _field = _iprot.readFieldBegin()
if (_field.`type` == TType.STOP) {
_done = true
} else {
_field.id match {
case 1 =>
_field.`type` match {
case TType.STRING =>
nameOffset = _iprot.offsetSkipString
case _actualType =>
val _expectedType = TType.STRING
throw new TProtocolException(
"Received wrong type for field 'name' (expected=%s, actual=%s).".format(
ttypeToString(_expectedType),
ttypeToString(_actualType)
)
)
}
case _ =>
if (_passthroughFields == null)
_passthroughFields = immutable$Map.newBuilder[Short, TFieldBlob]
_passthroughFields += (_field.id -> TFieldBlob.read(_field, _iprot))
}
_iprot.readFieldEnd()
}
}
_iprot.readStructEnd()
new LazyImmutable(
_iprot,
_iprot.buffer,
_start_offset,
_iprot.offset,
nameOffset,
if (_passthroughFields == null)
NoPassthroughFields
else
_passthroughFields.result()
)
}
override def decode(_iprot: TProtocol): DomainRemovedPb =
_iprot match {
case i: LazyTProtocol => lazyDecode(i)
case i => eagerDecode(i)
}
private[this] def eagerDecode(_iprot: TProtocol): DomainRemovedPb = {
var name: _root_.scala.Option[String] = _root_.scala.None
var _passthroughFields: Builder[(Short, TFieldBlob), immutable$Map[Short, TFieldBlob]] = null
var _done = false
_iprot.readStructBegin()
while (!_done) {
val _field = _iprot.readFieldBegin()
if (_field.`type` == TType.STOP) {
_done = true
} else {
_field.id match {
case 1 =>
_field.`type` match {
case TType.STRING =>
name = _root_.scala.Some(readNameValue(_iprot))
case _actualType =>
val _expectedType = TType.STRING
throw new TProtocolException(
"Received wrong type for field 'name' (expected=%s, actual=%s).".format(
ttypeToString(_expectedType),
ttypeToString(_actualType)
)
)
}
case _ =>
if (_passthroughFields == null)
_passthroughFields = immutable$Map.newBuilder[Short, TFieldBlob]
_passthroughFields += (_field.id -> TFieldBlob.read(_field, _iprot))
}
_iprot.readFieldEnd()
}
}
_iprot.readStructEnd()
new Immutable(
name,
if (_passthroughFields == null)
NoPassthroughFields
else
_passthroughFields.result()
)
}
def apply(
name: _root_.scala.Option[String] = _root_.scala.None
): DomainRemovedPb =
new Immutable(
name
)
def unapply(_item: DomainRemovedPb): _root_.scala.Option[_root_.scala.Option[String]] = _root_.scala.Some(_item.name)
@inline private def readNameValue(_iprot: TProtocol): String = {
_iprot.readString()
}
@inline private def writeNameField(name_item: String, _oprot: TProtocol): Unit = {
_oprot.writeFieldBegin(NameField)
writeNameValue(name_item, _oprot)
_oprot.writeFieldEnd()
}
@inline private def writeNameValue(name_item: String, _oprot: TProtocol): Unit = {
_oprot.writeString(name_item)
}
object Immutable extends ThriftStructCodec3[DomainRemovedPb] {
override def encode(_item: DomainRemovedPb, _oproto: TProtocol): Unit = { _item.write(_oproto) }
override def decode(_iprot: TProtocol): DomainRemovedPb = DomainRemovedPb.decode(_iprot)
override lazy val metaData: ThriftStructMetaData[DomainRemovedPb] = DomainRemovedPb.metaData
}
/**
* The default read-only implementation of DomainRemovedPb. You typically should not need to
* directly reference this class; instead, use the DomainRemovedPb.apply method to construct
* new instances.
*/
class Immutable(
val name: _root_.scala.Option[String],
override val _passthroughFields: immutable$Map[Short, TFieldBlob])
extends DomainRemovedPb {
def this(
name: _root_.scala.Option[String] = _root_.scala.None
) = this(
name,
Map.empty
)
}
/**
* This is another Immutable, this however keeps strings as lazy values that are lazily decoded from the backing
* array byte on read.
*/
private[this] class LazyImmutable(
_proto: LazyTProtocol,
_buf: Array[Byte],
_start_offset: Int,
_end_offset: Int,
nameOffset: Int,
override val _passthroughFields: immutable$Map[Short, TFieldBlob])
extends DomainRemovedPb {
override def write(_oprot: TProtocol): Unit = {
_oprot match {
case i: LazyTProtocol => i.writeRaw(_buf, _start_offset, _end_offset - _start_offset)
case _ => super.write(_oprot)
}
}
lazy val name: _root_.scala.Option[String] =
if (nameOffset == -1)
None
else {
Some(_proto.decodeString(_buf, nameOffset))
}
/**
* Override the super hash code to make it a lazy val rather than def.
*
* Calculating the hash code can be expensive, caching it where possible
* can provide significant performance wins. (Key in a hash map for instance)
* Usually not safe since the normal constructor will accept a mutable map or
* set as an arg
* Here however we control how the class is generated from serialized data.
* With the class private and the contract that we throw away our mutable references
* having the hash code lazy here is safe.
*/
override lazy val hashCode = super.hashCode
}
/**
* This Proxy trait allows you to extend the DomainRemovedPb trait with additional state or
* behavior and implement the read-only methods from DomainRemovedPb using an underlying
* instance.
*/
trait Proxy extends DomainRemovedPb {
protected def _underlying_DomainRemovedPb: DomainRemovedPb
override def name: _root_.scala.Option[String] = _underlying_DomainRemovedPb.name
override def _passthroughFields = _underlying_DomainRemovedPb._passthroughFields
}
}
trait DomainRemovedPb
extends ThriftStruct
with scala.Product1[Option[String]]
with java.io.Serializable
{
import DomainRemovedPb._
def name: _root_.scala.Option[String]
def _passthroughFields: immutable$Map[Short, TFieldBlob] = immutable$Map.empty
def _1 = name
/**
* Gets a field value encoded as a binary blob using TCompactProtocol. If the specified field
* is present in the passthrough map, that value is returned. Otherwise, if the specified field
* is known and not optional and set to None, then the field is serialized and returned.
*/
def getFieldBlob(_fieldId: Short): _root_.scala.Option[TFieldBlob] = {
lazy val _buff = new TMemoryBuffer(32)
lazy val _oprot = new TCompactProtocol(_buff)
_passthroughFields.get(_fieldId) match {
case blob: _root_.scala.Some[TFieldBlob] => blob
case _root_.scala.None => {
val _fieldOpt: _root_.scala.Option[TField] =
_fieldId match {
case 1 =>
if (name.isDefined) {
writeNameValue(name.get, _oprot)
_root_.scala.Some(DomainRemovedPb.NameField)
} else {
_root_.scala.None
}
case _ => _root_.scala.None
}
_fieldOpt match {
case _root_.scala.Some(_field) =>
val _data = Arrays.copyOfRange(_buff.getArray, 0, _buff.length)
_root_.scala.Some(TFieldBlob(_field, _data))
case _root_.scala.None =>
_root_.scala.None
}
}
}
}
/**
* Collects TCompactProtocol-encoded field values according to `getFieldBlob` into a map.
*/
def getFieldBlobs(ids: TraversableOnce[Short]): immutable$Map[Short, TFieldBlob] =
(ids flatMap { id => getFieldBlob(id) map { id -> _ } }).toMap
/**
* Sets a field using a TCompactProtocol-encoded binary blob. If the field is a known
* field, the blob is decoded and the field is set to the decoded value. If the field
* is unknown and passthrough fields are enabled, then the blob will be stored in
* _passthroughFields.
*/
def setField(_blob: TFieldBlob): DomainRemovedPb = {
var name: _root_.scala.Option[String] = this.name
var _passthroughFields = this._passthroughFields
_blob.id match {
case 1 =>
name = _root_.scala.Some(readNameValue(_blob.read))
case _ => _passthroughFields += (_blob.id -> _blob)
}
new Immutable(
name,
_passthroughFields
)
}
/**
* If the specified field is optional, it is set to None. Otherwise, if the field is
* known, it is reverted to its default value; if the field is unknown, it is removed
* from the passthroughFields map, if present.
*/
def unsetField(_fieldId: Short): DomainRemovedPb = {
var name: _root_.scala.Option[String] = this.name
_fieldId match {
case 1 =>
name = _root_.scala.None
case _ =>
}
new Immutable(
name,
_passthroughFields - _fieldId
)
}
/**
* If the specified field is optional, it is set to None. Otherwise, if the field is
* known, it is reverted to its default value; if the field is unknown, it is removed
* from the passthroughFields map, if present.
*/
def unsetName: DomainRemovedPb = unsetField(1)
override def write(_oprot: TProtocol): Unit = {
DomainRemovedPb.validate(this)
_oprot.writeStructBegin(Struct)
if (name.isDefined) writeNameField(name.get, _oprot)
if (_passthroughFields.nonEmpty) {
_passthroughFields.values.foreach { _.write(_oprot) }
}
_oprot.writeFieldStop()
_oprot.writeStructEnd()
}
def copy(
name: _root_.scala.Option[String] = this.name,
_passthroughFields: immutable$Map[Short, TFieldBlob] = this._passthroughFields
): DomainRemovedPb =
new Immutable(
name,
_passthroughFields
)
override def canEqual(other: Any): Boolean = other.isInstanceOf[DomainRemovedPb]
override def equals(other: Any): Boolean =
canEqual(other) &&
_root_.scala.runtime.ScalaRunTime._equals(this, other) &&
_passthroughFields == other.asInstanceOf[DomainRemovedPb]._passthroughFields
override def hashCode: Int = _root_.scala.runtime.ScalaRunTime._hashCode(this)
override def toString: String = _root_.scala.runtime.ScalaRunTime._toString(this)
override def productArity: Int = 1
override def productElement(n: Int): Any = n match {
case 0 => this.name
case _ => throw new IndexOutOfBoundsException(n.toString)
}
override def productPrefix: String = "DomainRemovedPb"
}
|
dkomanov/scala-serialization
|
scala-serialization/src/main/scala/com/komanov/serialization/domain/thriftscala/DomainRemovedPb.scala
|
Scala
|
mit
| 13,233 |
object Fibonacci {
def fib(n: Int): Int = {
@annotation.tailrec
def calc(n: Int, pr: Int, nx: Int): Int = {
if (0 == n) pr
else calc(n-1, nx, pr+nx)
}
// using tail-recursive local fn:
// pr=prior, nx=next fibonacci numbers
calc(n, 0, 1)
}
def main(args: Array[String]): Unit = {
println(fib(args(0).toInt))
}
}
|
dpapathanasiou/fp-in-scala
|
2-Getting-Started/Fibonacci.scala
|
Scala
|
mit
| 362 |
package hoecoga.actor.scheduler
import akka.actor.Props
import akka.persistence.{PersistentActor, RecoveryCompleted, RecoveryFailure, SnapshotOffer}
import hoecoga.actor.scheduler.JobPersistentActor._
import hoecoga.scheduler.JobData
import hoecoga.slack.SlackChannel
/**
* A persistent actor for [[JobData]].
*/
class JobPersistentActor(settings: JobPersistentActorSettings) extends PersistentActor {
import settings._
private[this] var state: State = State(List.empty)
private[this] def insert(event: InsertEvent) = state = state.copy(jobs = event.data :: state.jobs)
private[this] def delete(event: DeleteEvent) = state = state.copy(
jobs = state.jobs.filterNot(job => job.id == event.id && job.channel == event.channel))
override def receiveRecover: Receive = {
case RecoveryCompleted => onRecoveryCompleted(state.jobs)
case RecoveryFailure => onRecoveryFailure()
case e: InsertEvent => insert(e)
case e: DeleteEvent => delete(e)
case SnapshotOffer(meta, snapshot: State) => state = snapshot
}
override def receiveCommand: Receive = {
case Insert(job) =>
persist(InsertEvent(job))(insert)
case Delete(channel, jobId) =>
persist(DeleteEvent(channel, jobId))(delete)
case SaveSnapshot => saveSnapshot(state)
case Query(channel) =>
sender() ! QueryResult(channel, state.jobs.filter(_.channel == channel))
}
override def persistenceId: String = id
}
object JobPersistentActor {
private case class State(jobs: List[JobData])
/**
* An incoming order to insert `data` into the state of [[JobPersistentActor]].
*/
case class Insert(data: JobData)
private case class InsertEvent(data: JobData)
/**
* An incoming order to delete [[JobData]] from the state of [[JobPersistentActor]].
*/
case class Delete(channel: SlackChannel, id: String)
private case class DeleteEvent(channel: SlackChannel, id: String)
/**
* An incoming order to save a snapshot.
*/
case object SaveSnapshot
/**
* An incoming order to retrieve all [[JobData]] on `channel`.
*/
case class Query(channel: SlackChannel)
/**
* A result of [[Query]].
*/
case class QueryResult(channel: SlackChannel, jobs: List[JobData])
/**
* @param id the persistence id.
*/
case class JobPersistentActorSettings(id: String, onRecoveryCompleted: List[JobData] => Unit, onRecoveryFailure: () => Unit)
def props(settings: JobPersistentActorSettings): Props = Props(new JobPersistentActor(settings))
}
|
hoecoga/hoecoga-bot
|
src/main/scala/hoecoga/actor/scheduler/JobPersistentActor.scala
|
Scala
|
mit
| 2,513 |
package com.heromq.play
import grizzled.slf4j.Logging
import java.nio.charset.Charset
import play.api.libs.iteratee._
import play.api.mvc._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.language.reflectiveCalls
case class Chunk(topic: String, payload: String)
object API extends Controller with Logging {
private val utf8 = Charset.forName("UTF-8")
private def str(bytes: Array[Byte]) = new String(bytes, utf8)
val (enumerator, channel) = Concurrent.broadcast[Chunk]
def pub(topic: String) = EssentialAction {rh =>
Iteratee.foreach[Array[Byte]] {bytes => channel.push(Chunk(topic, str(bytes)))} map {_ => Ok}
}
def filter(topic: String) = Enumeratee.filter[Chunk] {_.topic == topic}
def payload = Enumeratee.map[Chunk] {c => debug(s"${c.topic} : ${c.payload}"); c.payload}
def sub(topic: String) = Action {req =>
debug(req.remoteAddress + " - client connected, topic: " + topic)
val enStream: Enumerator[String] = enumerator &> filter(topic) &> payload &> Concurrent.buffer(50)
Ok.chunked(enStream)
}
}
|
imikushin/heromq-play
|
app/com/heromq/play/API.scala
|
Scala
|
mit
| 1,072 |
package repositories.loan.dao
import com.google.inject.{Inject, Singleton}
import models.loan.LoanEventTypes.{ObjectLentType, ObjectReturnedType}
import models.loan.LoanType
import models.loan.event.{LoanEvent, ObjectsLent, ObjectsReturned}
import no.uio.musit.MusitResults.{MusitResult, MusitSuccess}
import no.uio.musit.models.{EventId, MuseumId, ObjectUUID}
import no.uio.musit.repositories.DbErrorHandlers
import no.uio.musit.time.dateTimeNow
import org.joda.time.DateTime
import play.api.Logger
import play.api.db.slick.DatabaseConfigProvider
import scala.concurrent.{ExecutionContext, Future}
@Singleton
class LoanDao @Inject()(
implicit
val dbConfigProvider: DatabaseConfigProvider,
val ec: ExecutionContext
) extends LoanTables
with DbErrorHandlers {
val logger = Logger(classOf[LoanDao])
import profile.api._
private def insertEventRow(event: LoanEventRow): DBIO[EventId] = {
loanTable returning loanTable.map(_.id) += event
}
private def insertActiveLoanRow(activeLoanRow: ActiveLoanRow): DBIO[Long] = {
activeLoanTable returning activeLoanTable.map(_.id) += activeLoanRow
}
private def insertActiveLoanRows(
mid: MuseumId,
eid: EventId,
returnDate: DateTime,
objs: Seq[ObjectUUID]
) = {
val actions = objs.map(o => insertActiveLoanRow((None, mid, o, eid, returnDate)))
DBIO.sequence(actions)
}
private def deleteActiveLoanRow(objectId: ObjectUUID): DBIO[Int] = {
activeLoanTable.filter(_.objectUuid === objectId).delete
}
private def deleteActiveLoanRows(objectIds: Seq[ObjectUUID]): DBIO[Int] = {
DBIO.sequence(objectIds.map(deleteActiveLoanRow)).map(_.sum)
}
private def insertLentObject(eId: EventId, obj: ObjectUUID): DBIO[Long] = {
lentObjectTable returning lentObjectTable.map(_.id) += ((None, eId, obj))
}
private def insertLentObjects(eid: EventId, objs: Seq[ObjectUUID]) = {
val actions = objs.map(o => insertLentObject(eid, o))
DBIO.sequence(actions)
}
def insertReturnedObjectEvent(
mid: MuseumId,
retEvt: ObjectsReturned
): Future[MusitResult[EventId]] = {
val actions = for {
_ <- deleteActiveLoanRows(retEvt.objects)
id <- insertEventRow(asEventRowTuple(mid, retEvt))
} yield id
db.run(actions.transactionally)
.map(MusitSuccess.apply)
.recover(nonFatal("Unable to insert returned loans"))
}
def insertLentObjectEvent(
mid: MuseumId,
objectsLent: ObjectsLent
): Future[MusitResult[EventId]] = {
val actions = for {
id <- insertEventRow(asEventRowTuple(mid, objectsLent))
_ <- insertLentObjects(id, objectsLent.objects)
_ <- insertActiveLoanRows(mid, id, objectsLent.returnDate, objectsLent.objects)
} yield id
db.run(actions.transactionally)
.map(MusitSuccess.apply)
.recover(nonFatal("Unable to insert loan"))
}
def findExpectedReturnedObjects(
mid: MuseumId
): Future[MusitResult[Seq[(ObjectUUID, DateTime)]]] = {
val action = activeLoanTable.filter { r =>
r.museumId === mid &&
r.returnDate < dateTimeNow
}.sortBy(_.returnDate).map(r => r.objectUuid -> r.returnDate)
db.run(action.result)
.map(MusitSuccess.apply)
.recover(nonFatal("Unable to fetch active loans"))
}
def findEventForObject(objectUUID: ObjectUUID): Future[MusitResult[Seq[LoanEvent]]] = {
val query = loanTable
.join(lentObjectTable)
.filter {
case (lt, lo) => lt.objectUuid === objectUUID || lo.objectUuid === objectUUID
}
.sortBy(_._1.eventDate)
.map(res => res._1.typeId -> res._1.eventJson)
db.run(query.result)
.map { res =>
res.map {
case (typ, json) =>
typ match {
case ObjectLentType => json.as[ObjectsLent]
case ObjectReturnedType => json.as[ObjectsReturned]
}
}
}
.map(MusitSuccess.apply)
}
private def activeLoan(mid: MuseumId) =
activeLoanTable.filter(_.museumId === mid).map(_.eventId).distinct
def findActiveLoanEvents(mid: MuseumId): Future[MusitResult[Seq[LoanEvent]]] = {
val query = loanTable
.filter(_.id in activeLoan(mid))
.filter(_.typeId === ObjectLentType.asInstanceOf[LoanType])
.sortBy(_.registeredDate.asc)
db.run(query.result).map(r => r.map(fromLoanEventRow)).map(MusitSuccess.apply)
}
}
|
MUSIT-Norway/musit
|
service_backend/app/repositories/loan/dao/LoanDao.scala
|
Scala
|
gpl-2.0
| 4,394 |
package scala.reflect.internal
import org.openjdk.jmh.annotations._
import org.openjdk.jmh.infra._
import org.openjdk.jmh.runner.IterationType
import benchmark._
import java.util.concurrent.TimeUnit
import scala.reflect.internal.util.BatchSourceFile
@BenchmarkMode(Array(org.openjdk.jmh.annotations.Mode.SampleTime))
@Fork(4)
@Threads(1)
@Warmup(iterations = 10)
@Measurement(iterations = 10)
@OutputTimeUnit(TimeUnit.NANOSECONDS)
@State(Scope.Benchmark)
class LubBenchmark {
import scala.tools.nsc._
var g: Global = _
var ts: List[Global#Type] = _
trait A1 ; trait A2 ; trait A3 extends A1 ; trait A4 extends A2 ; trait A5 ; trait A6 ; trait A7 ; trait A8 extends A7
trait Odd extends A1 with A3 with A5 with A7
trait Even extends A2 with A3 with A6 with A8
trait Low extends A1 with A2 with A3 with A4
trait High extends A5 with A6 with A7 with A8
trait All extends A1 with A2 with A3 with A4 with A5 with A6 with A7 with A8
class B1 extends A1 with A2
class B2 extends A7 with A8
class B3 extends B2 with Low
class B4 extends B1 with High
@Setup(Level.Trial)
def setup(): Unit = {
val settings = new Settings()
settings.usejavacp.value = true
val global = new Global(settings)
g = global
val run = new global.Run()
import language.existentials
val tp = global.typeOf[((A1, A2, A3, A4), (Odd, Even, High, Low), (B1, B2, B3, B4) )]
ts = tp.typeArgs
}
@Benchmark def measure(bh: Blackhole): Any = {
val global = g
import global._
lub(ts.asInstanceOf[List[Type]])
}
}
|
scala/scala
|
test/benchmarks/src/main/scala/scala/reflect/internal/LubBenchmark.scala
|
Scala
|
apache-2.0
| 1,554 |
package com.blinkbox.books.catalogue.searchv1
import com.blinkbox.books.catalogue.common.{ BookFixtures, Contributor, OtherText, Subject, Events }
import com.blinkbox.books.catalogue.searchv1.V1SearchService.{BookSimilarResponse, Book}
import org.scalatest.{ FlatSpec, Matchers }
import spray.http.StatusCodes
import scala.concurrent.duration._
import scala.util.Random
class SimilarBooksSpecs extends FlatSpec with Matchers with ApiSpecBase {
val baseBook = BookFixtures.simpleBook
def book(
isbn: String,
title: String,
authors: List[String] = baseBook.contributors.map(_.displayName),
description: String = baseBook.descriptions.headOption.map(_.content).getOrElse(""),
subjects: List[String] = baseBook.subjects.map(_.code)
) =
baseBook.copy(
isbn = isbn,
title = title,
contributors = authors.map(n => Contributor(Random.nextString(10), "author", n, n)),
descriptions = OtherText(Nil, description, "description", None) :: Nil,
subjects = subjects.map(c => Subject("bisac", c, Some(true)))
)
val alpha = book("0000000000001", "Alpha", "Kilgore Trout" :: Nil, "Book about something", "abc123" :: "def456" :: Nil)
val beta = book("0000000000002", "Beta", "Luther Blissett" :: Nil, "Anything really does", "ghi789" :: Nil)
val gamma = book("0000000000003", "Gamma", "Kilgore Trout" :: Nil, "Foobar bar baz", "jkl000" :: Nil)
val delta = book("0000000000004", "Delta", "Bilbo Baggins" :: Nil, "Anything", "zzz999" :: Nil)
val aNewAlpha = book("0000000000005", "A new Alpha", "Anonymous" :: Nil, "Running out of stuffs to write", "yyy888" :: Nil)
val epsilon = book("0000000000006", "Epsilon", "Soumynona" :: Nil, "Blablabla", "xxx777" :: "zzz999" :: Nil)
val dataSet = alpha :: beta :: gamma :: delta :: aNewAlpha :: epsilon :: Nil
override def beforeAll(): Unit = {
super.beforeAll()
catalogueIndex indexAndCheck (dataSet.toSeq: _*) andAwaitFor (10.seconds)
}
"The similar-books endpoint" should "return books that are similar to the provided one for title, description, author or subject" in {
def testSimilar(isbn: String)(f: Seq[String] => Unit) = {
Get(s"/catalogue/search/books/$isbn/similar") ~> routes ~> check {
status should equal(StatusCodes.OK)
f(responseAs[BookSimilarResponse].books.getOrElse(Seq.empty).map(_.id))
}
}
def isbns(books: Events.Book*) = books.map(_.isbn)
testSimilar(alpha.isbn) { _ should contain theSameElementsAs isbns(gamma, aNewAlpha) }
testSimilar(beta.isbn) { _ should contain theSameElementsAs isbns(delta) }
testSimilar(gamma.isbn) { _ should contain theSameElementsAs isbns(alpha) }
testSimilar(delta.isbn) { _ should contain theSameElementsAs isbns(beta, epsilon) }
testSimilar(aNewAlpha.isbn) { _ should contain theSameElementsAs isbns(alpha) }
testSimilar(epsilon.isbn) { _ should contain theSameElementsAs isbns(delta) }
}
it should "return a 400 signaling an invalid ID if the isbn is not numeric" in {
Get("/catalogue/search/books/abcdefghijklm/similar") ~> routes ~> check {
status should equal(StatusCodes.BadRequest)
checkInvalidResponse("Invalid ID: abcdefghijklm")
}
}
it should "return a 400 signaling an invalid ID if the isbn is shorter than 13 digits" in {
Get("/catalogue/search/books/123456789012/similar") ~> routes ~> check {
status should equal(StatusCodes.BadRequest)
checkInvalidResponse("Invalid ID: 123456789012")
}
}
it should "return a 400 signaling an invalid ID if the isbn is longer than 13 digits" in {
Get("/catalogue/search/books/12345678901234/similar") ~> routes ~> check {
status should equal(StatusCodes.BadRequest)
checkInvalidResponse("Invalid ID: 12345678901234")
}
}
}
|
blinkboxbooks/catalogue-v2.scala
|
catalogue2-search-public/src/test/scala/com/blinkbox/books/catalogue/searchv1/SimilarBooksSpecs.scala
|
Scala
|
mit
| 3,791 |
package org.lolhens.renderengine.vector
final case class Vector2d private(override val x: Double,
override val y: Double) extends Vector2[Double](x, y) {
override type Self = Vector2d
override def Vector2(x: Double, y: Double): Vector2d =
if (x == this.x && y == this.y) this
else Vector2d(x, y)
override def isZero: Boolean = x == 0 && y == 0
override def isOne: Boolean = x == 1 && y == 1
override def unary_- : Vector2d = Vector2d(-x, -y)
override def +(x: Double, y: Double): Vector2d = if (x == 0 && y == 0) this else Vector2d(this.x + x, this.y + y)
override def -(x: Double, y: Double): Vector2d = if (x == 0 && y == 0) this else Vector2d(this.x - x, this.y - y)
override def *(x: Double, y: Double): Vector2d = if (x == 1 && y == 1) this else Vector2d(this.x * x, this.y * y)
override def /(x: Double, y: Double): Vector2d = if (x == 1 && y == 1) this else Vector2d(this.x / x, this.y / y)
override def `length²`: Double = x * x + y * y
override def length: Double = Math.sqrt(`length²`)
}
object Vector2d {
val Zero = new Vector2d(0, 0)
val One = new Vector2d(1, 1)
val X = new Vector2d(1, 0)
val Y = new Vector2d(0, 1)
def apply(x: Double, y: Double): Vector2d =
if (x == 0 && y == 0) Zero
else if (x == 1 && y == 1) One
else new Vector2d(x, y)
}
|
LolHens/LibRenderEngine
|
src/main/scala/org/lolhens/renderengine/vector/Vector2d.scala
|
Scala
|
gpl-2.0
| 1,354 |
/*
* Copyright (C) 2010 Mikhail Vorozhtsov
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.mvv.layson.json
import com.github.mvv.layson.bson._
sealed trait JsonValue extends NotNull {
def serialize: Iterator[Char]
def toBson: BsonValue
}
sealed trait OptJsonValue extends JsonValue
sealed trait MandatoryJsonValue extends JsonValue
sealed trait OptJsonBool extends OptJsonValue {
def toBson: OptBsonBool
}
object OptJsonBool {
implicit def optJsonBoolToBoolean(x: OptJsonBool) = x match {
case JsonNull => null
case JsonBool(x) => java.lang.Boolean.valueOf(x)
}
}
sealed trait JsonBool extends OptJsonBool with MandatoryJsonValue {
def value: Boolean
def toBson: BsonBool
}
object JsonBool {
object True extends JsonBool {
val value = true
def serialize = "true".toIterator
def toBson = BsonBool.True
}
object False extends JsonBool {
val value = false
def serialize = "false".toIterator
def toBson = BsonBool.False
}
def apply(value: Boolean) = if (value) True else False
def unapply(x: JsonBool): Option[Boolean] = Some(x.value)
implicit def jsonBoolToBoolean(x: JsonBool) = x.value
}
sealed trait OptJsonNum extends OptJsonValue {
def toBson: OptNumericBsonValue
}
final class JsonNum(val value: BigDecimal) extends OptJsonNum
with MandatoryJsonValue {
def serialize = value.toString.toIterator
final def toBson: NumericBsonValue =
if (value.scale == 0) {
val l = value.longValue
if (l < Int.MinValue || l > Int.MaxValue)
BsonLong(l)
else
BsonInt(l.intValue)
} else
BsonDouble(value.doubleValue)
}
object JsonNum {
def apply(value: Int) = new JsonNum(BigDecimal(value))
def apply(value: Long) = new JsonNum(BigDecimal(value))
def apply(value: Double) = new JsonNum(BigDecimal(value.toString))
def apply(value: BigInt) = new JsonNum(BigDecimal(value))
def apply(value: BigDecimal) = new JsonNum(value)
def unapply(x: JsonNum): Option[BigDecimal] = Some(x.value)
}
sealed trait OptJsonStr extends OptJsonValue {
def toBson: OptBsonStr
}
object OptJsonStr {
implicit def optJsonStrToString(x: OptJsonStr) = x match {
case JsonNull => null
case JsonStr(x) => x
}
}
trait JsonStr extends OptJsonStr with MandatoryJsonValue {
def iterator: Iterator[Char]
def value: String
final def serialize = {
val it = iterator
Iterator.single('"') ++
(it.map {
case c if c == '"' || c == '\\' || c <= 0x1F =>
("\\u%04X" format c.intValue).toIterator
case c => Iterator.single(c)
} flatten) ++ Iterator.single('"')
}
def toBson: BsonStr = new JsonBsonStr(this)
}
class StrictJsonStr(val value: String) extends JsonStr {
require(value != null)
def iterator = value.iterator
}
class LazyJsonStr(it: Iterator[Char]) extends JsonStr {
private lazy val chars = it.toStream
def iterator = chars.iterator
def value = chars.mkString
}
object JsonStr {
def apply(value: String): JsonStr = new StrictJsonStr(value)
def apply(it: Iterator[Char]): JsonStr = new LazyJsonStr(it)
def unapply(x: JsonStr): Option[String] = Some(x.value)
implicit def jsonStrToString(x: JsonStr) = x.value
}
trait OptCompoundJsonValue extends OptJsonValue
trait CompoundJsonValue extends OptCompoundJsonValue with MandatoryJsonValue
sealed trait OptJsonArray extends OptCompoundJsonValue {
def toBson: OptBsonArray
}
trait JsonArray extends OptJsonArray with CompoundJsonValue {
def iterator: Iterator[JsonValue]
def elements: Seq[JsonValue]
final def isEmpty = iterator.isEmpty
final def serialize = {
val it = iterator
Iterator.single('[') ++
(it.zipWithIndex.map { case (element, i) =>
(if (i == 0) Iterator.empty else Iterator.single(',')) ++
element.serialize
} flatten) ++ Iterator.single(']')
}
def toBson: BsonArray = new JsonBsonArray(this)
}
class StrictJsonArray(val elements: Seq[JsonValue]) extends JsonArray {
def iterator = elements.iterator
}
class LazyJsonArray(it: Iterator[JsonValue]) extends JsonArray {
lazy val elements = it.toStream
def iterator = elements.iterator
}
object JsonArray {
def apply(elems: JsonValue*) = new StrictJsonArray(elems)
def apply(it: Iterator[JsonValue]) = new LazyJsonArray(it)
def unapply(x: JsonArray): Option[Seq[JsonValue]] = Some(x.elements)
}
sealed trait OptJsonObject extends OptCompoundJsonValue {
def toBson: OptBsonObject
}
trait JsonObject extends OptJsonObject with CompoundJsonValue {
def iterator: Iterator[(String, JsonValue)]
def members: Seq[(String, JsonValue)]
def membersMap: Map[String, JsonValue]
def get(key: String): Option[JsonValue]
final def isEmpty = iterator.isEmpty
final def serialize = {
val it = iterator
Iterator.single('{') ++
(it.zipWithIndex.map { case ((name, value), i) =>
(if (i == 0) Iterator.empty else Iterator.single(',')) ++
JsonStr(name).serialize ++ Iterator.single(':') ++ value.serialize
} flatten) ++ Iterator.single('}')
}
def toBson: BsonObject = new JsonBsonObject(this)
}
class SeqJsonObject(val members: Seq[(String, JsonValue)]) extends JsonObject {
def this() = this(Vector())
def iterator = members.iterator
def membersMap = members.toMap
def get(key: String) = members.find(_._1 == key).map(_._2)
}
class MapJsonObject(val membersMap: Map[String, JsonValue]) extends JsonObject {
def this() = this(Map())
def iterator = membersMap.iterator
def members = membersMap.toSeq
def get(key: String) = membersMap.get(key)
}
class LazyJsonObject(it: Iterator[(String, JsonValue)]) extends JsonObject {
private lazy val mems = it.toStream
def iterator = mems.iterator
def members = mems
def membersMap = mems.toMap
override def get(key: String) = mems.find(_._1 == key).map(_._2)
}
object JsonObject {
def apply() = new SeqJsonObject()
def apply[T <% JsonValue](
member: (String, T), members: (String, JsonValue)*) =
new SeqJsonObject(
(member._1 -> implicitly[T => JsonValue].apply(member._2)) +: members)
def apply(members: Seq[(String, JsonValue)]) = new SeqJsonObject(members)
def apply(map: Map[String, JsonValue]) = new MapJsonObject(map)
def apply(it: Iterator[(String, JsonValue)]) = new LazyJsonObject(it)
def unapply(x: JsonObject): Option[Map[String, JsonValue]] =
Option(x.membersMap)
}
object JsonNull extends OptJsonValue
with OptJsonBool
with OptJsonNum
with OptJsonStr
with OptJsonArray
with OptJsonObject {
def serialize = "null".toIterator
def toBson: BsonNull.type = BsonNull
}
object JsonValue {
implicit def booleanToJsonBool(x: Boolean) = JsonBool(x)
implicit def booleanToOptJsonBool(x: java.lang.Boolean) =
if (x == null) JsonNull else JsonBool(x.booleanValue)
implicit def intToJsonNum(x: Int) = JsonNum(x)
implicit def longToJsonNum(x: Long) = JsonNum(x)
implicit def floatToJsonNum(x: Float) = JsonNum(x)
implicit def doubleToJsonNum(x: Double) = JsonNum(x)
implicit def bigIntToOptJsonNum(x: BigInt) =
if (x == null) JsonNull else JsonNum(x)
implicit def bigDecToOptJsonNum(x: BigDecimal) =
if (x == null) JsonNull else JsonNum(x)
implicit def stringToOptJsonStr(x: String) =
if (x == null) JsonNull else JsonStr(x)
implicit def nullToJsonNull(x: Null) = JsonNull
}
final class JsonBsonStr(underlying: JsonStr) extends BsonStr {
def iterator = underlying.iterator
def value = underlying.value
}
final class JsonBsonArray(underlying: JsonArray) extends BsonArray {
def iterator = underlying.iterator.map(_.toBson)
def elements = underlying.elements.view.map(_.toBson)
}
final class JsonBsonObject(underlying: JsonObject) extends BsonObject {
def iterator = underlying.iterator.map { case (k, v) => k -> v.toBson }
def members = underlying.members.view.map { case (k, v) => k -> v.toBson }
def membersMap = underlying.membersMap.mapValues(_.toBson)
def get(key: String) = underlying.get(key).map(_.toBson)
}
|
mvv/layson
|
src/main/scala/com/github/mvv/layson/json/Values.scala
|
Scala
|
apache-2.0
| 8,608 |
package space.spacelift.mq.proxy.impl.amqp
import java.util.concurrent.TimeUnit
import akka.actor.{Actor, ActorSystem, Props}
import akka.pattern.ask
import akka.testkit.{ImplicitSender, TestKit}
import com.rabbitmq.client.ConnectionFactory
import org.junit.runner.RunWith
import org.scalatest.{Matchers, WordSpecLike}
import org.scalatest.junit.JUnitRunner
import space.spacelift.amqp.Amqp.{Binding, ChannelParameters, ExchangeParameters, QueueParameters, _}
import space.spacelift.amqp.{Amqp, ConnectionOwner}
import space.spacelift.mq.proxy.Proxy
import space.spacelift.mq.proxy.calculator.Calculator
import space.spacelift.mq.proxy.calculator.Calculator.{AddRequest, AddResponse}
import space.spacelift.mq.proxy.serializers.ProtobufSerializer
import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext, Future}
@RunWith(classOf[JUnitRunner])
class RemoteGpbCallTest extends TestKit(ActorSystem("TestSystem")) with ImplicitSender with WordSpecLike with Matchers {
"AMQP Proxy" should {
"handle GPB calls" in {
import ExecutionContext.Implicits.global
val connFactory = new ConnectionFactory()
val conn = system.actorOf(Props(new ConnectionOwner(connFactory)), name = "conn")
val exchange = ExchangeParameters(name = "amq.direct", exchangeType = "", passive = true)
val queue = QueueParameters(name = "calculator-gpb", passive = false, autodelete = true)
// create a simple calculator actor
val calc = system.actorOf(Props(new Actor() {
def receive = {
case request: AddRequest => sender ! AddResponse.newBuilder().setX(request.getX).setY(request.getY).setSum(request.getX + request.getY).build()
}
}))
// create an AMQP proxy server which consumes messages from the "calculator" queue and passes
// them to our Calculator actor
val server = ConnectionOwner.createChildActor(conn, AmqpRpcServer.props(new Proxy.ProxyServer(calc), channelParams = Some(ChannelParameters(qos = 1))))
Amqp.waitForConnection(system, server).await(5, TimeUnit.SECONDS)
server ! AddBinding(Binding(exchange, queue, "calculator-gpb"))
expectMsgPF() {
case Amqp.Ok(AddBinding(_), _) => true
}
// create an AMQP proxy client in front of the "calculator queue"
val client = ConnectionOwner.createChildActor(conn, AmqpRpcClient.props(ExchangeParameters("amq.direct", true, "direct"), "calculator-gpb"))
val proxy = system.actorOf(Props(new Proxy.ProxyClient(client, ProtobufSerializer)), name = "proxy")
Amqp.waitForConnection(system, client).await(5, TimeUnit.SECONDS)
implicit val timeout: akka.util.Timeout = 5 seconds
val futures = for (x <- 0 until 5; y <- 0 until 5) yield (proxy ? AddRequest.newBuilder.setX(x).setY(y).build()).mapTo[AddResponse]
val result = Await.result(Future.sequence(futures), 5 seconds)
assert(result.length === 25)
assert(result.filter(r => r.getSum != r.getX + r.getY).isEmpty)
}
}
}
|
Spacelift/akka-mq-proxies
|
akka-mq-proxies-amqp/src/test/scala/space/spacelift/mq/proxy/impl/amqp/RemoteGpbCallTest.scala
|
Scala
|
mit
| 3,013 |
/*
* IndexTest.scala
* Variable elimination tests.
*
* Created By: Brian Ruttenberg ([email protected])
* Creation Date: Jan 1, 2009
*
* Copyright 2013 Avrom J. Pfeffer and Charles River Analytics, Inc.
* See http://www.cra.com or email [email protected] for information.
*
* See http://www.github.com/p2t2/figaro for a copy of the software license.
*/
package com.cra.figaro.test.algorithm.decision.index
import org.scalatest.Matchers
import org.scalatest.{ WordSpec, PrivateMethodTester }
import math.log
import com.cra.figaro.algorithm._
import com.cra.figaro.algorithm.factored._
import com.cra.figaro.algorithm.sampling._
import com.cra.figaro.algorithm.decision._
import com.cra.figaro.algorithm.decision.index._
import com.cra.figaro.algorithm.sampling._
import com.cra.figaro.language._
import com.cra.figaro.library.decision._
import com.cra.figaro.library.compound._
import com.cra.figaro.library.atomic.discrete.Uniform
import com.cra.figaro.library.atomic.continuous.Normal
import com.cra.figaro.util._
import com.cra.figaro.test._
import scala.collection.immutable.Map
class IndexTest extends WordSpec with Matchers {
"An Index" when {
"Nearest Neighbors retrieved" should {
"return the correct neighbors with a VPIndex" in {
Universe.createNew()
val (d, strat_map) = DecisionContinuous(false)
val index1 = FlatIndex[d.PValue, d.DValue](strat_map)
val index2 = VPIndex[d.PValue, d.DValue](strat_map, 50)
val nn1 = index1.getNN(0.0, 100)
val nn2 = index2.getNN(0.0, 100)
val diff = nn1.diff(nn2)
diff should be('empty)
}
}
}
def DecisionContinuous(sim: Boolean): (Decision[Double, Boolean], Map[(Double, Boolean), DecisionSample]) = {
// Closely replicates the discrete test. Should be pretty close to the same decisions,
// but some tests may fail (inconsistently)
Universe.createNew()
val p = Select[Int](0.25 -> 0, 0.75 -> 2)
val pp = Uniform[Int](-2, 0)
val ppp = Chain(p, pp, (a: Int, b: Int) => Normal((a + b).toDouble, 0.1))
val d = NonCachingDecision[Double, Boolean](ppp, List(true, false))
def u_fcn(b: Boolean, i: Int): Double = {
i match {
case 0 => if (b) -7.0 else 6.0
case 2 => if (b) 1.0 else -2.0
}
}
val u = Apply[Boolean, Int, Double](d, p, u_fcn)
val ve = DecisionImportance(10000, List(u), d)
ve.start()
(d, ve.getUtility().asInstanceOf[Map[(Double, Boolean), DecisionSample]])
}
}
|
agarbuno/figaro
|
Figaro/src/test/scala/com/cra/figaro/test/algorithm/decision/index/IndexTest.scala
|
Scala
|
bsd-3-clause
| 2,533 |
package com.faacets.qalg
package algebra
package converted
import scala.{specialized => sp}
trait ConvertedVec[V, @sp(Double, Long) A, J] extends Any
with Converted[A, J]
with Vec[V, A] {
def source: Vec[V, J]
override def sameShape(x: V, y: V): Boolean = source.sameShape(x, y)
override def linearLength(v: V): Int = length(v)
override def linearApply(v: V, k: Int): A = apply(v, k)
def length(v: V): Int = source.length(v)
def apply(v: V, k: Int): A = jToA(source(v, k))
override def toIndexedSeq(v: V): IndexedSeq[A] = new IndexedSeq[A] {
def length: Int = source.length(v)
def apply(k: Int): A = jToA(source(v, k))
}
}
|
denisrosset/qalg
|
core/src/main/scala/qalg/algebra/converted/ConvertedVec.scala
|
Scala
|
mit
| 659 |
package ru.pavlenov.handler
/**
* ⓭ + 16
* Какой сам? by Pavlenov Semen 24.07.14.
* ${TITLE}
* ${URL}
*
* ${GIVEN}
* ${RETURN}
*/
import java.util.Date
import akka.actor.Actor
import org.mashupbots.socko.events.HttpRequestEvent
/**
* Hello processor writes a greeting and stops.
*/
class HelloHandler extends Actor {
def receive = {
case event: HttpRequestEvent =>
event.response.write("Hello from Socko (" + new Date().toString + ")")
context.stop(self)
}
}
|
laser13/Akka-plus
|
src/main/scala/ru/pavlenov/handler/HelloHandler.scala
|
Scala
|
mit
| 503 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package play.it.test
import okhttp3.Protocol
import okhttp3.Response
import play.api.mvc._
import play.api.mvc.request.RequestAttrKey
import play.api.test.PlaySpecification
/**
* Tests that the [[EndpointIntegrationSpecification]] works properly.
*/
class EndpointIntegrationSpecificationSpec
extends PlaySpecification
with EndpointIntegrationSpecification
with OkHttpEndpointSupport {
"Endpoints" should {
"respond with the highest supported HTTP protocol" in {
withResult(Results.Ok("Hello")).withAllOkHttpEndpoints { okEndpoint: OkHttpEndpoint =>
val response: Response = okEndpoint.call("/")
val protocol = response.protocol
if (okEndpoint.endpoint.protocols.contains(HTTP_2_0)) {
protocol must_== Protocol.HTTP_2
} else if (okEndpoint.endpoint.protocols.contains(HTTP_1_1)) {
protocol must_== Protocol.HTTP_1_1
} else {
ko("All endpoints should support at least HTTP/1.1")
}
response.body.string must_== "Hello"
}
}
"respond with the correct server attribute" in withAction { Action: DefaultActionBuilder =>
Action { request: Request[_] =>
Results.Ok(request.attrs.get(RequestAttrKey.Server).toString)
}
}.withAllOkHttpEndpoints { okHttpEndpoint: OkHttpEndpoint =>
val response: Response = okHttpEndpoint.call("/")
response.body.string must_== okHttpEndpoint.endpoint.serverAttribute.toString
}
}
}
|
wegtam/playframework
|
core/play-integration-test/src/it/scala/play/it/test/EndpointIntegrationSpecificationSpec.scala
|
Scala
|
apache-2.0
| 1,552 |
/*
* Copyright ActionML, LLC under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* ActionML licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.actionml.authserver.services
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.HttpEntity.Strict
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.headers.{Authorization, BasicHttpCredentials}
import akka.stream.Materializer
import akka.util.ByteString
import com.actionml.authserver.service.AuthorizationService
import com.actionml.authserver.{AccessToken, AuthorizationCheckRequest, ResourceId, RoleId}
import com.actionml.circe.CirceSupport
import com.actionml.router.config.AppConfig
import com.typesafe.scalalogging.LazyLogging
import io.circe.generic.auto._
import io.circe.syntax._
import scaldi.{Injectable, Injector}
import scala.collection.{immutable, mutable}
import scala.concurrent.{ExecutionContext, Future}
class ClientAuthorizationService(implicit inj: Injector) extends AuthorizationService with CirceSupport
with Injectable with LazyLogging {
private val config = inject[AppConfig]
private implicit val ec = inject[ExecutionContext]
private implicit val actorSystem = inject[ActorSystem]
private implicit val materializer = inject[Materializer]
override def authorize(accessToken: AccessToken, role: RoleId, resourceId: ResourceId): Future[Boolean] = {
val request = mkAuthorizeRequest(accessToken, role, resourceId)
Http().singleRequest(request)
.collect {
case HttpResponse(StatusCodes.OK, _, _, _) => true
case _ => false
}.recoverWith {
case ex =>
logger.error("AuthServer response error", ex)
Future.successful(false)
}
}
private def mkAuthorizeRequest(accessToken: AccessToken, role: RoleId, resourceId: ResourceId) = {
val body = Strict(ContentTypes.`application/json`, ByteString(AuthorizationCheckRequest(accessToken, role, resourceId).asJson.noSpaces))
HttpRequest(method = HttpMethods.POST,
uri = authServerRoot.copy(path = authServerRoot.path + "/auth/authorize"),
entity = body,
headers = immutable.Seq(authorizationHeader)
)
}
private val authServerRoot = Uri(config.auth.serverUrl)
private val authorizationHeader: HttpHeader = Authorization(BasicHttpCredentials.apply(config.auth.clientId, config.auth.clientSecret))
}
|
actionml/harness
|
rest-server/server/src/main/scala/com/actionml/authserver/services/ClientAuthorizationService.scala
|
Scala
|
apache-2.0
| 3,032 |
/*
* Copyright 2012 OneCalendar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package api.eventbrite
import java.lang.IllegalArgumentException
import play.api.libs.json._
object EventBriteParser extends EventbriteJsonReader {
def parseEvents(json: String): Seq[EventbriteEvent] = {
eventbriteResponseReader.reads(Json.parse(json)) match {
case JsSuccess(events, _) =>
events.asOpt.getOrElse(Nil)
case JsError(errors) => throw new IllegalArgumentException("unknown response from eventbrite : " + errors)
case _ => throw new IllegalStateException("unknown response from eventbrite : " + json)
}
}
}
case class EventbriteEvent(id: Option[String] = None,
title: Option[String] = None,
start_date: Option[String] = None,
description: Option[String] = None,
end_date: Option[String] = None,
tags: Option[String] = None,
timezone_offset: Option[String] = None,
url: Option[String] = None,
venue: Option[Venue] = None)
case class Venue(address: Option[String] = None,
address_2: Option[String] = None,
city: Option[String] = None,
region: Option[String] = None,
country: Option[String] = None,
postal_code: Option[String] = None)
|
OneCalendar/OneCalendar
|
app/api/eventbrite/EventBriteParser.scala
|
Scala
|
apache-2.0
| 2,021 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.hibench.sparkbench.graph.nweight
import it.unimi.dsi.fastutil.objects.ObjectHeaps
class SizedPriorityQueue(
val capacity:Int) extends Traversable[(Long, Double)] with Serializable {
protected val buf = new Array[(Long, Double)](capacity)
protected val comparator = new java.util.Comparator[(Long, Double)] with Serializable {
override def compare(m1: (Long, Double), m2: (Long, Double)) : Int = {
if (m1._2 < m2._2) {
-1
} else if (m1._2 > m2._2) {
1
} else if (m1._1 < m2._1) {
-1
} else if (m1._1 > m2._1) {
1
} else {
0
}
}
}
protected var size_ = 0
override def size() = size_
def clear() {
size_ = 0
}
def fullySorted(): Array[(Long, Double)] = {
val slicedBuf = buf.slice(0, size_ - 1)
java.util.Arrays.sort(slicedBuf, comparator)
slicedBuf
}
def foreach[U](f: ((Long, Double)) => U): Unit = {
for (i <- 0 until size_) f(buf(i))
}
def enqueue(value: (Long, Double)) {
if (size_ < capacity) {
buf(size_) = value
size_ = size_ + 1
ObjectHeaps.upHeap(buf, size_, size_ - 1, comparator)
} else if (comparator.compare(value, buf(0)) > 0) {
buf(0) = value
ObjectHeaps.downHeap(buf, size_, 0, comparator)
}
}
}
object SizedPriorityQueue {
def apply(capacity :Int)(elems: (Long, Double)*) = {
val q = new SizedPriorityQueue(capacity);
for ((i, v) <- elems)
q.enqueue(i, v);
q
}
}
|
kimihe/Swallow
|
swallow-benchmark/HiBench-master/sparkbench/graph/src/main/scala/com/intel/hibench/sparkbench/graph/nweight/Utils.scala
|
Scala
|
apache-2.0
| 2,304 |
package com.datawizards.splot.mapper
import com.datawizards.splot.model.PlotAxisValues._
import com.datawizards.splot.model.{Plot, PlotType, PlotsGrid}
import org.knowm.xchart.XYSeries.XYSeriesRenderStyle
import org.knowm.xchart.internal.Series
import org.knowm.xchart._
import org.knowm.xchart.internal.chartpart.Chart
import org.knowm.xchart.style._
import scala.collection.JavaConversions._
object SPlotToXChartMapper {
def mapPlotToXChart(plot: Plot): Chart[_ <: Styler, _ <: Series] = {
plot.plotType match {
case PlotType.Bar => mapCategoryChart(plot)
case PlotType.Scatter => mapScatterChart(plot)
case PlotType.Line => mapLineChart(plot)
case PlotType.Histogram => mapCategoryChart(plot)
case PlotType.Bubble => mapBubbleChart(plot)
case PlotType.Pie => mapPieChart(plot)
case PlotType.Area => mapAreaChart(plot)
case _ => throw new Exception("Unknown plot type")
}
}
def mapPlotsGridToXChart(plotsGrid: PlotsGrid): List[Chart[_ <: Styler, _ <: Series]] = {
plotsGrid.plotType match {
case PlotType.Bar => mapCategoryChartsGrid(plotsGrid)
case PlotType.Scatter => mapScatterChartsGrid(plotsGrid)
case PlotType.Line => mapLineChartsGrid(plotsGrid)
case PlotType.Histogram => mapCategoryChartsGrid(plotsGrid)
case PlotType.Bubble => mapBubbleChartsGrid(plotsGrid)
case PlotType.Pie => mapPieChartsGrid(plotsGrid)
case PlotType.Area => mapAreaChartsGrid(plotsGrid)
case _ => throw new Exception("Unknown plot type")
}
}
private def mapCategoryChartsGrid(plotsGrid: PlotsGrid): List[Chart[_ <: Styler, _ <: Series]] = {
val charts = plotsGrid.plots.map(mapCategoryChart).toList
charts.foreach(ch => ch.getStyler.setLegendVisible(false))
charts
}
private def mapScatterChartsGrid(plotsGrid: PlotsGrid): List[Chart[_ <: Styler, _ <: Series]] = {
val charts = plotsGrid.plots.map(mapScatterChart).toList
charts.foreach(ch => ch.getStyler.setLegendVisible(false))
charts
}
private def mapLineChartsGrid(plotsGrid: PlotsGrid): List[Chart[_ <: Styler, _ <: Series]] = {
val charts = plotsGrid.plots.map(mapLineChart).toList
charts.foreach(ch => ch.getStyler.setLegendVisible(false))
charts
}
private def mapAreaChartsGrid(plotsGrid: PlotsGrid): List[Chart[_ <: Styler, _ <: Series]] = {
val charts = plotsGrid.plots.map(mapAreaChart).toList
charts.foreach(ch => ch.getStyler.setLegendVisible(false))
charts
}
private def mapBubbleChartsGrid(plotsGrid: PlotsGrid): List[Chart[_ <: Styler, _ <: Series]] = {
val charts = plotsGrid.plots.map(mapBubbleChart).toList
charts.foreach(ch => ch.getStyler.setLegendVisible(false))
charts
}
private def mapPieChartsGrid(plotsGrid: PlotsGrid): List[Chart[_ <: Styler, _ <: Series]] = {
val charts = plotsGrid.plots.map(mapPieChart).toList
charts.foreach(ch => ch.getStyler.setLegendVisible(false))
charts
}
private def mapCategoryChart(plot: Plot): CategoryChart = {
val chart = new CategoryChartBuilder().build()
for(series <- plot.series)
chart.addSeries(series.name, mapXAxisValues(series.xValues), mapYAxisValues(series.yValues))
mapPlotToChart(plot, chart)
chart
}
private def mapBubbleChart(plot: Plot): BubbleChart = {
val chart = new BubbleChartBuilder().build()
for(series <- plot.series)
chart.addSeries(series.name, mapXAxisValues(series.xValues), mapYAxisValues(series.yValues), mapYAxisValues(series.zValues))
mapPlotToChart(plot, chart)
chart
}
private def mapPieChart(plot: Plot): PieChart = {
val chart = new PieChartBuilder().build()
val seriesCount = plot.series.size
for(series <- plot.series) {
for((x,y) <- series.xValues.values zip series.yValues.values) {
val name = if(seriesCount == 1) x.toString else series.name + " " + x.toString
chart.addSeries(name, mapYAxisValueType(y))
}
}
mapPlotToChart(plot, chart)
chart
}
private def mapLineChart(plot: Plot): XYChart = mapXYChart(plot)
private def mapScatterChart(plot: Plot): XYChart = {
val chart = mapXYChart(plot)
chart.getStyler.setDefaultSeriesRenderStyle(XYSeriesRenderStyle.Scatter)
chart
}
private def mapAreaChart(plot: Plot): XYChart = {
val chart = mapXYChart(plot)
chart.getStyler.setDefaultSeriesRenderStyle(XYSeriesRenderStyle.Area)
chart
}
private def mapXYChart(plot: Plot): XYChart = {
val chart = new XYChartBuilder().build()
mapPlotToChart(plot, chart)
for(series <- plot.series)
chart.addSeries(series.name, mapXAxisValues(series.xValues), mapYAxisValues(series.yValues))
chart
}
private def mapPlotToChart(plot: Plot, chart: Chart[_, _]): Unit = {
chart.setWidth(plot.width)
chart.setHeight(plot.height)
chart.setTitle(plot.title)
chart.setXAxisTitle(plot.xTitle)
chart.setYAxisTitle(plot.yTitle)
plot.theme(chart)
val styler: Styler = chart match {
case c:XYChart => c.getStyler
case c:CategoryChart => c.getStyler
case c:BubbleChart => c.getStyler
case c:PieChart => c.getStyler
}
plot.annotations match {
case Some(b:Boolean) => styler.setHasAnnotations(b)
case None => ()
}
plot.legendVisible match {
case Some(b:Boolean) => styler.setLegendVisible(b)
case None => ()
}
}
private def mapXAxisValues(plotAxisValues: XAxisValues): java.util.List[_] =
plotAxisValues.values.map(x => x.value).toList
private def mapYAxisValues(plotAxisValues: YAxisValues): java.util.List[_ <: Number] = {
plotAxisValues
.values
.map(mapYAxisValueType)
.toList
}
private def mapYAxisValueType(y: YAxisValueType): Number = y.value match {
case i:Int => new java.lang.Integer(i)
case d:Double => new java.lang.Double(d)
case _ => throw new Exception("Not supported type.")
}
}
|
piotr-kalanski/SPlot
|
src/main/scala/com/datawizards/splot/mapper/SPlotToXChartMapper.scala
|
Scala
|
apache-2.0
| 5,958 |
package mesosphere.marathon
package api.v2
import java.net.URI
import javax.inject.Inject
import javax.servlet.http.HttpServletRequest
import javax.ws.rs._
import javax.ws.rs.core.{ Context, Response }
import akka.stream.Materializer
import mesosphere.marathon.api.v2.InfoEmbedResolver._
import mesosphere.marathon.api.v2.Validation._
import mesosphere.marathon.api.v2.json.Formats._
import mesosphere.marathon.api.{ AuthResource, MarathonMediaType }
import mesosphere.marathon.core.appinfo.{ GroupInfo, GroupInfoService, Selector }
import mesosphere.marathon.core.deployment.DeploymentPlan
import mesosphere.marathon.core.group.GroupManager
import mesosphere.marathon.plugin.auth._
import mesosphere.marathon.raml.{ GroupConversion, Raml }
import mesosphere.marathon.state.PathId._
import mesosphere.marathon.state._
import mesosphere.marathon.stream.Implicits._
import mesosphere.marathon.stream.Sink
import play.api.libs.json.Json
import scala.concurrent.Future
import scala.util.matching.Regex
@Path("v2/groups")
@Produces(Array(MarathonMediaType.PREFERRED_APPLICATION_JSON))
class GroupsResource @Inject() (
groupManager: GroupManager,
infoService: GroupInfoService,
val config: MarathonConf)(implicit
val authenticator: Authenticator,
val authorizer: Authorizer,
mat: Materializer) extends AuthResource {
import GroupsResource._
import Normalization._
/** convert app to canonical form */
private implicit val appNormalization: Normalization[raml.App] = {
val appNormalizationConfig = AppNormalization.Configuration(
config.defaultNetworkName.get,
config.mesosBridgeName())
AppsResource.appNormalization(AppsResource.NormalizationConfig(config.availableFeatures, appNormalizationConfig))
}
/**
* For backward compatibility, we embed always apps, pods, and groups if nothing is specified.
*/
val defaultEmbeds = Set(EmbedApps, EmbedPods, EmbedGroups)
/**
* Path matchers. Needed since Jersey is not able to handle parameters with slashes.
*/
val ListApps: Regex = """^((?:.+/)|)apps$""".r
val ListRootApps: Regex = """^apps$""".r
val ListVersionsRE: Regex = """^(.+)/versions$""".r
val ListRootVersionRE: Regex = """^versions$""".r
val GetVersionRE: Regex = """^(.+)/versions/(.+)$""".r
val GetRootVersionRE: Regex = """^versions/(.+)$""".r
/**
* Get root group.
*/
@GET
def root(@Context req: HttpServletRequest, @QueryParam("embed") embed: java.util.Set[String]): Response =
group("/", embed, req)
/**
* Get a specific group, optionally with specific version
* @param id the identifier of the group encoded as path
* @return the group or the group versions.
*/
@GET
@Path("""{id:.+}""")
def group(
@PathParam("id") id: String,
@QueryParam("embed") embed: java.util.Set[String],
@Context req: HttpServletRequest): Response = authenticated(req) { implicit identity =>
import mesosphere.marathon.core.async.ExecutionContexts.global
val embeds: Set[String] = if (embed.isEmpty) defaultEmbeds else embed
val (appEmbed, groupEmbed) = resolveAppGroup(embeds)
//format:off
def appsResponse(id: PathId) =
infoService.selectAppsInGroup(id, authorizationSelectors.appSelector, appEmbed).map(info => ok(info))
def groupResponse(id: PathId) =
infoService.selectGroup(id, authorizationSelectors, appEmbed, groupEmbed).map {
case Some(info) => ok(info)
case None if id.isRoot => ok(GroupInfo.empty)
case None => unknownGroup(id)
}
def groupVersionResponse(id: PathId, version: Timestamp) =
infoService.selectGroupVersion(id, version, authorizationSelectors, groupEmbed).map {
case Some(info) => ok(info)
case None => unknownGroup(id)
}
def versionsResponse(groupId: PathId) = {
withAuthorization(ViewGroup, groupManager.group(groupId), unknownGroup(groupId)) { _ =>
result(groupManager.versions(groupId).runWith(Sink.seq).map(versions => ok(versions)))
}
}
val response: Future[Response] = id match {
case ListApps(gid) => appsResponse(gid.toRootPath)
case ListRootApps() => appsResponse(PathId.empty)
case ListVersionsRE(gid) => Future.successful(versionsResponse(gid.toRootPath))
case ListRootVersionRE() => Future.successful(versionsResponse(PathId.empty))
case GetVersionRE(gid, version) => groupVersionResponse(gid.toRootPath, Timestamp(version))
case GetRootVersionRE(version) => groupVersionResponse(PathId.empty, Timestamp(version))
case _ => groupResponse(id.toRootPath)
}
result(response)
}
/**
* Create a new group.
* @param force if the change has to be forced. A running upgrade process will be halted and the new one is started.
* @param body the request body as array byte buffer
*/
@POST
def create(
@DefaultValue("false")@QueryParam("force") force: Boolean,
body: Array[Byte],
@Context req: HttpServletRequest): Response = createWithPath("", force, body, req)
/**
* performs basic app validation and normalization for all apps (transitively) for the given group-update.
*/
def normalizeApps(basePath: PathId, update: raml.GroupUpdate): raml.GroupUpdate = {
// note: we take special care to:
// (a) canonize and rewrite the app ID before normalization, and;
// (b) canonize BUT NOT REWRITE the group ID while iterating (validation has special rules re: number of set fields)
// convert apps to canonical form here
val apps = update.apps.map(_.map { a =>
a.copy(id = a.id.toPath.canonicalPath(basePath).toString).normalize
})
val groups = update.groups.map(_.map { g =>
// TODO: this "getOrElse" logic seems funny, but it's what nested group validation does;
// funny because all child groups that contain apps should probably specify an ID -- default to the parent's
// base seems wrong.
val groupBase = g.id.map(_.toPath.canonicalPath(basePath)).getOrElse(basePath)
// TODO: recursion without tailrec
normalizeApps(groupBase, g)
})
update.copy(apps = apps, groups = groups)
}
/**
* Create a group.
* If the path to the group does not exist, it gets created.
* @param id is the identifier of the the group to update.
* @param force if the change has to be forced. A running upgrade process will be halted and the new one is started.
* @param body the request body as array byte buffer
*/
@POST
@Path("""{id:.+}""")
def createWithPath(
@PathParam("id") id: String,
@DefaultValue("false")@QueryParam("force") force: Boolean,
body: Array[Byte],
@Context req: HttpServletRequest): Response = authenticated(req) { implicit identity =>
assumeValid {
val validatedId = validateOrThrow(id.toRootPath)
val raw = Json.parse(body).as[raml.GroupUpdate]
val effectivePath = raw.id.map(id => validateOrThrow(PathId(id)).canonicalPath(validatedId)).getOrElse(validatedId)
val groupValidator = Group.validNestedGroupUpdateWithBase(effectivePath)
val groupUpdate = validateOrThrow(
normalizeApps(
effectivePath,
raw
))(groupValidator)
val rootGroup = groupManager.rootGroup()
def throwIfConflicting[A](conflict: Option[Any], msg: String) = {
conflict.map(_ => throw ConflictingChangeException(msg))
}
throwIfConflicting(
rootGroup.group(effectivePath),
s"Group $effectivePath is already created. Use PUT to change this group.")
throwIfConflicting(
rootGroup.transitiveAppsById.get(effectivePath),
s"An app with the path $effectivePath already exists.")
val (deployment, path) = updateOrCreate(effectivePath, groupUpdate, force)
deploymentResult(deployment, Response.created(new URI(path.toString)))
}
}
@PUT
def updateRoot(
@DefaultValue("false")@QueryParam("force") force: Boolean,
@DefaultValue("false")@QueryParam("dryRun") dryRun: Boolean,
body: Array[Byte],
@Context req: HttpServletRequest): Response = {
update("", force, dryRun, body, req)
}
/**
* Create or update a group.
* If the path to the group does not exist, it gets created.
* @param id is the identifier of the the group to update.
* @param force if the change has to be forced. A running upgrade process will be halted and the new one is started.
* @param dryRun only create the deployment without executing it.
*/
@PUT
@Path("""{id:.+}""")
def update(
@PathParam("id") id: String,
@DefaultValue("false")@QueryParam("force") force: Boolean,
@DefaultValue("false")@QueryParam("dryRun") dryRun: Boolean,
body: Array[Byte],
@Context req: HttpServletRequest): Response = authenticated(req) { implicit identity =>
assumeValid {
val validatedId = validateOrThrow(id.toRootPath)
val raw = Json.parse(body).as[raml.GroupUpdate]
val effectivePath = raw.id.map(id => validateOrThrow(PathId(id)).canonicalPath(validatedId)).getOrElse(validatedId)
val groupValidator = Group.validNestedGroupUpdateWithBase(effectivePath)
val groupUpdate = validateOrThrow(
normalizeApps(
effectivePath,
raw
))(groupValidator)
if (dryRun) {
val newVersion = Timestamp.now()
val originalGroup = groupManager.rootGroup()
val updatedGroup = applyGroupUpdate(originalGroup, effectivePath, groupUpdate, newVersion)
ok(
Json.obj(
"steps". ->(DeploymentPlan(originalGroup, updatedGroup).steps)
).toString()
)
} else {
val (deployment, _) = updateOrCreate(effectivePath, groupUpdate, force)
deploymentResult(deployment)
}
}
}
@DELETE
def delete(
@DefaultValue("false")@QueryParam("force") force: Boolean,
@Context req: HttpServletRequest): Response = authenticated(req) { implicit identity =>
val version = Timestamp.now()
def clearRootGroup(rootGroup: RootGroup): RootGroup = {
checkAuthorization(DeleteGroup, rootGroup)
RootGroup(version = version)
}
val deployment = result(groupManager.updateRoot(PathId.empty, clearRootGroup, version, force))
deploymentResult(deployment)
}
/**
* Delete a specific subtree or a complete tree.
* @param id the identifier of the group to delete encoded as path
* @param force if the change has to be forced. A running upgrade process will be halted and the new one is started.
* @return A version response, which defines the resulting change.
*/
@DELETE
@Path("""{id:.+}""")
def delete(
@PathParam("id") id: String,
@DefaultValue("false")@QueryParam("force") force: Boolean,
@Context req: HttpServletRequest): Response = authenticated(req) { implicit identity =>
val groupId = id.toRootPath
val version = Timestamp.now()
def deleteGroup(rootGroup: RootGroup) = {
rootGroup.group(groupId) match {
case Some(group) => checkAuthorization(DeleteGroup, group)
case None => throw UnknownGroupException(groupId)
}
rootGroup.removeGroup(groupId, version)
}
val deployment = result(groupManager.updateRoot(groupId.parent, deleteGroup, version, force))
deploymentResult(deployment)
}
private def applyGroupUpdate(
rootGroup: RootGroup,
groupId: PathId,
groupUpdate: raml.GroupUpdate,
newVersion: Timestamp)(implicit identity: Identity): RootGroup = {
val group = rootGroup.group(groupId).getOrElse(Group.empty(groupId))
/**
* roll back to a previous group version
*/
def versionChange: Option[RootGroup] = groupUpdate.version.map { version =>
val targetVersion = Timestamp(version)
checkAuthorization(UpdateGroup, group)
val versionedGroup = result(groupManager.group(group.id, targetVersion))
.map(checkAuthorization(ViewGroup, _))
rootGroup.putGroup(versionedGroup.getOrElse(
throw new IllegalArgumentException(s"Group $group.id not available in version $targetVersion")
), newVersion)
}
def scaleChange: Option[RootGroup] = groupUpdate.scaleBy.map { scale =>
checkAuthorization(UpdateGroup, group)
rootGroup.updateTransitiveApps(group.id, app => app.copy(instances = (app.instances * scale).ceil.toInt), newVersion)
}
def createOrUpdateChange: RootGroup = {
// groupManager.update always passes a group, even if it doesn't exist
val maybeExistingGroup = groupManager.group(group.id)
val appConversionFunc: (raml.App => AppDefinition) = Raml.fromRaml[raml.App, AppDefinition]
val updatedGroup: Group = Raml.fromRaml(
GroupConversion(groupUpdate, group, newVersion) -> appConversionFunc)
maybeExistingGroup.fold(checkAuthorization(CreateRunSpec, updatedGroup))(checkAuthorization(UpdateGroup, _))
rootGroup.putGroup(updatedGroup, newVersion)
}
versionChange.orElse(scaleChange).getOrElse(createOrUpdateChange)
}
private def updateOrCreate(
id: PathId,
update: raml.GroupUpdate,
force: Boolean)(implicit identity: Identity): (DeploymentPlan, PathId) = {
val version = Timestamp.now()
val effectivePath = update.id.map(PathId(_).canonicalPath(id)).getOrElse(id)
val deployment = result(groupManager.updateRoot(
id.parent, applyGroupUpdate(_, effectivePath, update, version), version, force))
(deployment, effectivePath)
}
def authorizationSelectors(implicit identity: Identity): GroupInfoService.Selectors = {
GroupInfoService.Selectors(
AppsResource.authzSelector,
PodsResource.authzSelector,
authzSelector)
}
}
object GroupsResource {
private def authzSelector(implicit authz: Authorizer, identity: Identity) = Selector[Group] { g =>
authz.isAuthorized(identity, ViewGroup, g)
}
}
|
Caerostris/marathon
|
src/main/scala/mesosphere/marathon/api/v2/GroupsResource.scala
|
Scala
|
apache-2.0
| 13,888 |
package spatial.lang
import control._
import FringeTransfers._
import argon.core._
import forge._
import virtualized._
import spatial.metadata._
import spatial.utils._
// object ShiftInternal {
// target = spatialConfig.target
// @internal def expandLsh
// }
object DRAMTransfersInternal {
@stateful def target = spatialConfig.target
/** Internals **/
// Expansion rule for CoarseBurst - Use coarse_burst(tile,onchip,isLoad) for anything in the frontend
@internal def copy_dense[T:Type:Bits,C[T]](
offchip: Exp[DRAM[T]],
onchip: Exp[C[T]],
ofs: Seq[Exp[Index]],
lens: Seq[Exp[Index]],
strides: Seq[Exp[Index]],
units: Seq[Boolean],
par: Const[Index],
isLoad: Boolean,
isAlign: Boolean,
og: Seq[Exp[Index]]
)(implicit mem: Mem[T,C], mC: Type[C[T]], mD: Type[DRAM[T]], ctx: SrcCtx): MUnit = {
val unitDims = units
val offchipOffsets = wrap(ofs)
val tileDims = wrap(lens)
val strideNums = wrap(strides)
val local = wrap(onchip)
val dram = wrap(offchip)
// Last counter is used as counter for load/store
// Other counters (if any) are used to iterate over higher dimensions
val counters = tileDims.zip(strideNums).map{case(d,st) => () => Counter(start = 0, end = d, step = 1, par = 1) }
val requestLength = tileDims.last
val p = wrap(par)
val bytesPerWord = bits[T].length / 8 + (if (bits[T].length % 8 != 0) 1 else 0)
// Metaprogrammed (unstaged) if-then-else
if (counters.length > 1) {
Stream.Foreach(counters.dropRight(1).map{ctr => ctr()}){ is =>
val indices = is :+ 0.to[Index]
val offchipAddr = () => flatIndex( offchipOffsets.zip(indices.zip(strideNums)).map{case (a,(b,st)) => a + b*st}, wrap(stagedDimsOf(offchip)))
val onchipOfs = indices.zip(unitDims).collect{case (i,isUnitDim) if !isUnitDim => i }
val onchipAddr = {i: Index => onchipOfs.take(onchipOfs.length - 1).zip(og.take(onchipOfs.length-1)).map{case(of,o)=> of + wrap(o)} :+ (onchipOfs.last + i + wrap(og.last))}
if (isLoad) load(offchipAddr(), onchipAddr)
else store(offchipAddr(), onchipAddr)
}
}
else {
Stream {
def offchipAddr = () => flatIndex(offchipOffsets, wrap(stagedDimsOf(offchip)))
if (isLoad) load(offchipAddr(), {i => List(i+wrap(og.head)) })
else store(offchipAddr(), {i => List(i+wrap(og.head))})
}
}
// NOTE: Results of register reads are allowed to be used to specialize for aligned load/stores,
// as long as the value of the register read is known to be exactly some value.
// FIXME: We should also be checking if the start address is aligned...
def store(offchipAddr: => Index, onchipAddr: Index => Seq[Index]): MUnit = requestLength.s match {
case Exact(c: BigInt) if (c.toInt*bits[T].length) % target.burstSize == 0 | spatialConfig.enablePIR | isAlign => //TODO: Hack for pir
dbg(u"$onchip => $offchip: Using aligned store ($c * ${bits[T].length} % ${target.burstSize} = ${c*bits[T].length % target.burstSize})")
alignedStore(offchipAddr, onchipAddr)
case Exact(c: BigInt) =>
dbg(u"$onchip => $offchip: Using unaligned store ($c * ${bits[T].length} % ${target.burstSize} = ${c*bits[T].length % target.burstSize})")
unalignedStore(offchipAddr, onchipAddr)
case _ =>
dbg(u"$onchip => $offchip: Using unaligned store (request length is statically unknown)")
unalignedStore(offchipAddr, onchipAddr)
}
def load(offchipAddr: => Index, onchipAddr: Index => Seq[Index]): MUnit = requestLength.s match {
case Exact(c: BigInt) if (c.toInt*bits[T].length) % target.burstSize == 0 | spatialConfig.enablePIR | isAlign => //TODO: Hack for pir
dbg(u"$offchip => $onchip: Using aligned load ($c * ${bits[T].length} % ${target.burstSize} = ${c*bits[T].length % target.burstSize})")
alignedLoad(offchipAddr, onchipAddr)
case Exact(c: BigInt) =>
dbg(u"$offchip => $onchip: Using unaligned load ($c * ${bits[T].length} % ${target.burstSize}* ${c*bits[T].length % target.burstSize})")
unalignedLoad(offchipAddr, onchipAddr)
case _ =>
dbg(u"$offchip => $onchip: Using unaligned load (request length is statically unknown)")
unalignedLoad(offchipAddr, onchipAddr)
}
def alignedStore(offchipAddr: => Index, onchipAddr: Index => Seq[Index]): MUnit = {
val cmdStream = StreamOut[BurstCmd](BurstCmdBus)
isAligned(cmdStream.s) = true
// val issueQueue = FIFO[Index](16) // TODO: Size of issued queue?
val dataStream = StreamOut[MTuple2[T,Bit]](BurstFullDataBus[T]())
val ackStream = StreamIn[Bit](BurstAckBus)
// Command generator
// PIR different because FPGA VCS crashes if data gets sent before command
// if (spatialConfig.enablePIR) { // On plasticine the sequential around data and address generation is inefficient
Pipe {
val addr_bytes = (offchipAddr * bytesPerWord).to[Int64] + dram.address
val size = requestLength
val size_bytes = size * bytesPerWord
cmdStream := BurstCmd(addr_bytes.to[Int64], size_bytes, false)
// issueQueue.enq(size)
}
// Data loading
Foreach(requestLength par p){i =>
val data = mem.load(local, onchipAddr(i), true)
dataStream := pack(data,true)
}
// } else {
// Pipe {
// Pipe {
// val addr_bytes = (offchipAddr * bytesPerWord).to[Int64] + dram.address
// val size = requestLength
// val size_bytes = size * bytesPerWord
// cmdStream := BurstCmd(addr_bytes.to[Int64], size_bytes, false)
// // issueQueue.enq(size)
// }
// // Data loading
// Foreach(requestLength par p){i =>
// val data = mem.load(local, onchipAddr(i), true)
// dataStream := pack(data,true)
// }
// }
// }
// Fringe
fringe_dense_store(offchip, cmdStream.s, dataStream.s, ackStream.s)
// Ack receiver
// TODO: Assumes one ack per command
Pipe {
// val size = Reg[Index]
// Pipe{size := issueQueue.deq()}
val ack = ackStream.value()
()
// Foreach(requestLength by target.burstSize/bits[T].length) {i =>
// val ack = ackStream.value()
// }
}
}
case class AlignmentData(start: Index, end: Index, size: Index, addr_bytes: Int64, size_bytes: Index)
@virtualize
def alignmentCalc(offchipAddr: => Index) = {
/*
←--------------------------- size ----------------→
← (one burst) →
_______________________________
|-----:_________|________________|____:------------|
0
start ----⬏
end -----------------------------------⬏
extra --------⬏
*/
val elementsPerBurst = (target.burstSize/bits[T].length).to[Index]
val bytesPerBurst = target.burstSize/8
val maddr_bytes = offchipAddr * bytesPerWord // Raw address in bytes
val start_bytes = maddr_bytes % bytesPerBurst // Number of bytes offset from previous burst aligned address
val length_bytes = requestLength * bytesPerWord // Raw length in bytes
val offset_bytes = maddr_bytes - start_bytes // Burst-aligned start address, in bytes
val raw_end = maddr_bytes + length_bytes // Raw end, in bytes, with burst-aligned start
val end_bytes = Math.mux(raw_end % bytesPerBurst === 0.to[Index], 0.to[Index], bytesPerBurst - raw_end % bytesPerBurst) // Extra useless bytes at end
// FIXME: What to do for bursts which split individual words?
val start = start_bytes / bytesPerWord // Number of WHOLE elements to ignore at start
val extra = end_bytes / bytesPerWord // Number of WHOLE elements that will be ignored at end
val end = start + requestLength // Index of WHOLE elements to start ignoring at again
val size = requestLength + start + extra // Total number of WHOLE elements to expect
val size_bytes = length_bytes + start_bytes + end_bytes // Burst aligned length
val addr_bytes = offset_bytes.to[Int64] + dram.address // Burst-aligned offchip byte address
AlignmentData(start, end, size, addr_bytes, size_bytes)
}
@virtualize
def unalignedStore(offchipAddr: => Index, onchipAddr: Index => Seq[Index]): MUnit = {
val cmdStream = StreamOut[BurstCmd](BurstCmdBus)
isAligned(cmdStream.s) = false
// val issueQueue = FIFO[Index](16) // TODO: Size of issued queue?
val dataStream = StreamOut[MTuple2[T,Bit]](BurstFullDataBus[T]())
val ackStream = StreamIn[Bit](BurstAckBus)
// Command generator
Pipe{ // Outer pipe necessary or else acks may come back after extra write commands
Pipe {
val startBound = Reg[Index]
val endBound = Reg[Index]
val length = Reg[Index]
Pipe {
val aligned = alignmentCalc(offchipAddr)
cmdStream := BurstCmd(aligned.addr_bytes.to[Int64], aligned.size_bytes, false)
// issueQueue.enq(aligned.size)
startBound := aligned.start
endBound := aligned.end
length := aligned.size
}
Foreach(length par p){i =>
val en = i >= startBound && i < endBound
val data = Math.mux(en, mem.load(local,onchipAddr(i - startBound), en), implicitly[Bits[T]].zero)
dataStream := pack(data,en)
}
}
// Fringe
fringe_dense_store(offchip, cmdStream.s, dataStream.s, ackStream.s)
// Ack receive
// TODO: Assumes one ack per command
Pipe {
// val size = Reg[Index]
// Pipe{size := issueQueue.deq()}
val ack = ackStream.value()
()
// Foreach(size.value by size.value) {i => // TODO: Can we use by instead of par?
// val ack = ackStream.value()
// }
}
}
}
def alignedLoad(offchipAddr: => Index, onchipAddr: Index => Seq[Index]): MUnit = {
val cmdStream = StreamOut[BurstCmd](BurstCmdBus)
isAligned(cmdStream.s) = true
// val issueQueue = FIFO[Index](16) // TODO: Size of issued queue?
val dataStream = StreamIn[T](BurstDataBus[T]())
// Command generator
Pipe {
val addr = (offchipAddr * bytesPerWord).to[Int64] + dram.address
val size = requestLength
val addr_bytes = addr
val size_bytes = size * bytesPerWord
cmdStream := BurstCmd(addr_bytes.to[Int64], size_bytes, true)
// issueQueue.enq( size )
}
// Fringe
fringe_dense_load(offchip, cmdStream.s, dataStream.s)
// Data receiver
// Pipe {
// Pipe { val size = issueQueue.deq() }
Foreach(requestLength par p){i =>
val data = dataStream.value()
val addr = onchipAddr(i)
mem.store(local, addr, data, true)
}
// }
}
@virtualize
def unalignedLoad(offchipAddr: => Index, onchipAddr: Index => Seq[Index]): MUnit = {
val cmdStream = StreamOut[BurstCmd](BurstCmdBus)
isAligned(cmdStream.s) = false
val issueQueue = FIFO[IssuedCmd](16) // TODO: Size of issued queue?
val dataStream = StreamIn[T](BurstDataBus[T]())
// Command
Pipe {
val aligned = alignmentCalc(offchipAddr)
cmdStream := BurstCmd(aligned.addr_bytes.to[Int64], aligned.size_bytes, true)
issueQueue.enq( IssuedCmd(aligned.size, aligned.start, aligned.end) )
}
// Fringe
fringe_dense_load(offchip, cmdStream.s, dataStream.s)
// Receive
Pipe {
// TODO: Should also try Reg[IssuedCmd] here
val start = Reg[Index]
val end = Reg[Index]
val size = Reg[Index]
Pipe {
val cmd = issueQueue.deq()
start := cmd.start
end := cmd.end
size := cmd.size
}
Foreach(size par p){i =>
val en = i >= start && i < end
val addr = onchipAddr(i - start)
val data = dataStream.value()
mem.store(local, addr, data, en)
}
}
}
}
@internal def copy_sparse[T:Type:Bits](
offchip: Exp[DRAM[T]],
onchip: Exp[SRAM1[T]],
addresses: Exp[SRAM1[Index]],
size: Exp[Index],
par: Const[Index],
isLoad: Boolean
)(implicit mD: Type[DRAM[T]], ctx: SrcCtx): MUnit = {
val local = new SRAM1(onchip)
val addrs = new SRAM1(addresses)
val dram = wrap(offchip)
val requestLength = wrap(size)
val p = wrap(par)
val bytesPerWord = bits[T].length / 8 + (if (bits[T].length % 8 != 0) 1 else 0)
// FIXME: Bump up request to nearest multiple of 16 because of fringe
val iters = Reg[Index](0)
Pipe{
iters := Math.mux(requestLength < 16.to[Index], 16.to[Index],
Math.mux(requestLength % 16.to[Index] === 0.to[Index], requestLength, requestLength + 16.to[Index] - (requestLength % 16.to[Index]) ))
// (requestLength + math_mux((requestLength % 16.to[Index] === 0.to[Index]).s, (0.to[Index]).s, (16.to[Index] - (requestLength % 16.to[Index])).s )).s
}
Stream {
// Gather
if (isLoad) {
val addrBus = StreamOut[Int64](GatherAddrBus)
val dataBus = StreamIn[T](GatherDataBus[T]())
// Send
Foreach(iters par p){i =>
val addr = mux(i >= requestLength, dram.address.to[Int64], (addrs(i) * bytesPerWord).to[Int64] + dram.address)
val addr_bytes = addr
addrBus := addr_bytes
// addrBus := addr_bytes
}
// Fringe
fringe_sparse_load(offchip, addrBus.s, dataBus.s)
// Receive
Foreach(iters par p){i =>
val data = dataBus.value()
SRAM.store(local.s, stagedDimsOf(local.s), Seq(i.s), i.s/*notused*/, unwrap(data), (i < requestLength).s)
()
// local(i) = data
}
}
// Scatter
else {
val cmdBus = StreamOut[MTuple2[T,Int64]](ScatterCmdBus[T]())
val ackBus = StreamIn[Bit](ScatterAckBus)
// Send
Foreach(iters par p){i =>
val pad_addr = Math.max(requestLength - 1, 0.to[Index])
val unique_addr = addrs(pad_addr)
val addr = Math.mux(i >= requestLength,
(unique_addr * bytesPerWord).to[Int64] + dram.address,
(addrs(i) * bytesPerWord).to[Int64] + dram.address
)
val data = Math.mux(i >= requestLength, local(pad_addr), local(i))
val addr_bytes = addr
cmdBus := pack(data, addr_bytes)
}
// Fringe
fringe_sparse_store(offchip, cmdBus.s, ackBus.s)
// Receive
// TODO: Assumes one ack per address
Foreach(iters by target.burstSize/bits[T].length){i =>
val ack = ackBus.value()
}
}
}
}
@internal def copy_sparse_mem[T,C[T],A[_]](
offchip: Exp[DRAM[T]],
onchip: Exp[C[T]],
addresses: Exp[A[Index]],
size: Exp[Index],
par: Const[Index],
isLoad: Boolean
)(implicit mT: Type[T],
bT: Bits[T],
memC: Mem[T,C],
mC: Type[C[T]],
memA: Mem[Index,A],
mA: Type[A[Index]],
mD: Type[DRAM[T]],
ctx: SrcCtx
): MUnit = {
val local = wrap(onchip)
val addrs = wrap(addresses)
val dram = wrap(offchip)
val requestLength = wrap(size)
val p = wrap(par)
val bytesPerWord = bits[T].length / 8 + (if (bits[T].length % 8 != 0) 1 else 0)
// FIXME: Bump up request to nearest multiple of 16 because of fringe
val iters = Reg[Index](0)
Pipe{
iters := Math.mux(requestLength < 16.to[Index], 16.to[Index],
Math.mux(requestLength % 16.to[Index] === 0.to[Index], requestLength, requestLength + 16.to[Index] - (requestLength % 16.to[Index]) ))
// (requestLength + math_mux((requestLength % 16.to[Index] === 0.to[Index]).s, (0.to[Index]).s, (16.to[Index] - (requestLength % 16.to[Index])).s )).s
}
Stream {
// Gather
if (isLoad) {
val addrBus = StreamOut[Int64](GatherAddrBus)
val dataBus = StreamIn[T](GatherDataBus[T]())
// Send
Foreach(iters par p){i =>
val addr = ifThenElse(i >= requestLength, dram.address.to[Int64], (memA.load(addrs,Seq(i),true) * bytesPerWord).to[Int64] + dram.address )
val addr_bytes = addr
addrBus := addr_bytes
// addrBus := addr_bytes
}
// Fringe
fringe_sparse_load(offchip, addrBus.s, dataBus.s)
// Receive
Foreach(iters par p){i =>
val data = dataBus.value()
memC.store(local, Seq(i), data, i < requestLength)
}
}
// Scatter
else {
val cmdBus = StreamOut[MTuple2[T,Int64]](ScatterCmdBus[T]())
val ackBus = StreamIn[Bit](ScatterAckBus)
// Send
Foreach(iters par p){i =>
val pad_addr = Math.max(requestLength - 1, 0.to[Index])
// Using ifThenElse instead of syntax sugar out of convenience (sugar needs T <: MetaAny[T] evidence...)
//val curAddr = if (i >= requestLength) memA.load(addrs, Seq(pad_addr), true) else memA.load(addrs, Seq(i), true)
//val data = if (i >= requestLength) memC.load(local, Seq(pad_addr), true) else memC.load(local, Seq(i), true)
val curAddr = ifThenElse(i >= requestLength, memA.load(addrs, Seq(pad_addr), true), memA.load(addrs, Seq(i), true))
val data = ifThenElse(i >= requestLength, memC.load(local, Seq(pad_addr), true), memC.load(local, Seq(i), true))
val addr = (curAddr * bytesPerWord).to[Int64] + dram.address
val addr_bytes = addr
cmdBus := pack(data, addr_bytes)
}
// Fringe
fringe_sparse_store(offchip, cmdBus.s, ackBus.s)
// Receive
// TODO: Assumes one ack per address
Foreach(iters by target.burstSize/bits[T].length){i =>
val ack = ackBus.value()
}
}
}
}
}
|
stanford-ppl/spatial-lang
|
spatial/core/src/spatial/lang/DRAMTransfersInternal.scala
|
Scala
|
mit
| 18,439 |
package models.join
import models.db._
import scalikejdbc._
import scala.collection.breakOut
/**
*
* @author ponkotuy
* Date: 15/02/14.
*/
case class MasterRemodelWithName(
master: MasterRemodel,
slotitem: MasterSlotItem,
use: Option[MasterSlotItem],
to: Option[RemodelWithName],
secondShip: MasterShipBase)
object MasterRemodelWithName {
val MaxLevel = 10
def apply(
master: MasterRemodel,
slotitem: MasterSlotItem,
use: Option[MasterSlotItem],
secondShip: MasterShipBase): MasterRemodelWithName = {
val to = {
if(master.slotitemLevel == MaxLevel) {
val where = sqls"r.before_item_level = ${MaxLevel}"
.and append sqls"r.before_item_id = ${master.slotitemId}"
.and append sqls"ras.id is not null"
Remodel.findAllByWithName(where, limit = 1).headOption
} else None
}
MasterRemodelWithName(master, slotitem, use, to, secondShip)
}
}
case class MasterRemodelJson(
master: MasterRemodel,
slotitem: MasterSlotItem,
use: Option[MasterSlotItem],
to: Option[RemodelWithName],
secondShip: String)
object MasterRemodelJson {
def fromWithName(xs: Iterable[MasterRemodelWithName]): Vector[MasterRemodelJson] = {
val result: Vector[MasterRemodelJson] = xs.groupBy { x => (x.master.slotitemId, x.master.slotitemLevel) }.values.flatMap { ys =>
val groups = ys.groupBy(_.to.map(_.after.id)).values
groups.map { zs =>
val seconds = if(groups.size <= 1) "全て" else zs.map(_.secondShip.name).mkString(", ")
val z = zs.head
MasterRemodelJson(z.master, z.slotitem, z.use, z.to, seconds)
}
}(breakOut)
result.sortBy(_.master.slotitemLevel)
}
}
|
ttdoda/MyFleetGirls
|
server/app/models/join/MasterRemodelWithName.scala
|
Scala
|
mit
| 1,724 |
import scala.quoted.*
def f(sc: quoted.Expr[StringContext]): Unit = {
sc match {
case '{ StringContext(${Varargs(parts)}*) } => // error
val ps: Seq[Expr[String]] = parts // error
}
}
|
dotty-staging/dotty
|
tests/neg-macros/i6436.scala
|
Scala
|
apache-2.0
| 198 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.airframe.codec
import wvlet.airframe.msgpack.spi.{Packer, Unpacker, ValueType}
import wvlet.airframe.ulid.ULID
/**
*/
object ULIDCodec extends MessageCodec[ULID] {
override def pack(p: Packer, v: ULID): Unit = {
p.packString(v.toString)
}
override def unpack(
u: Unpacker,
v: MessageContext
): Unit = {
u.getNextValueType match {
case ValueType.STRING =>
val s = u.unpackString
try {
v.setObject(ULID.fromString(s))
} catch {
case e: IllegalArgumentException =>
v.setError(e)
}
case _ =>
u.skipValue
v.setNull
}
}
}
|
wvlet/airframe
|
airframe-codec/src/main/scala/wvlet/airframe/codec/ULIDCodec.scala
|
Scala
|
apache-2.0
| 1,221 |
package com.twitter.finagle.server
import com.twitter.finagle.filter.{MaskCancelFilter, RequestSemaphoreFilter}
import com.twitter.finagle.service.TimeoutFilter
import com.twitter.finagle.stats.{StatsReceiver, ServerStatsReceiver}
import com.twitter.finagle.tracing._
import com.twitter.finagle.transport.Transport
import com.twitter.finagle.util.{DefaultMonitor, DefaultTimer, DefaultLogger, ReporterFactory, LoadedReporterFactory}
import com.twitter.finagle.{param, Stack}
import com.twitter.finagle.{Server, Service, ServiceFactory, ListeningServer}
import com.twitter.util.{Closable, Duration, Future, Monitor, Timer}
import java.net.SocketAddress
/**
* The default Server implementation. It is given a Listener (eg.
* [[com.twitter.finagle.netty3.Netty3Listener]]) and a function,
* serveTransport, that binds a transport and a service. It will then
* dispatch requests onto a standard service stack parameterized as
* described below.
*
* @param listener The Listener from which to accept new typed
* Transports.
*
* @param serveTransport The function used to bind an accepted
* Transport with a Service. Requests read from the transport are
* dispatched onto the Service, with replies written back.
*
* @param requestTimeout The maximum amount of time the server is
* allowed to handle a request. If the timeout expires, the server
* will cancel the future and terminate the client connection.
*
* @param maxConcurrentRequests The maximum number of concurrent
* requests the server is willing to handle.
*
* @param cancelOnHangup Enabled by default. If disabled,
* exceptions on the transport do not propagate to the transport.
*
* @param prepare Prepare the given `ServiceFactory` before use.
*/
case class DefaultServer[Req, Rep, In, Out](
name: String,
listener: Listener[In, Out],
serviceTransport: (Transport[In, Out], Service[Req, Rep]) => Closable,
requestTimeout: Duration = Duration.Top,
maxConcurrentRequests: Int = Int.MaxValue,
cancelOnHangup: Boolean = true,
prepare: ServiceFactory[Req, Rep] => ServiceFactory[Req, Rep] =
(sf: ServiceFactory[Req, Rep]) => sf,
timer: Timer = DefaultTimer.twitter,
monitor: Monitor = DefaultMonitor,
logger: java.util.logging.Logger = DefaultLogger,
statsReceiver: StatsReceiver = ServerStatsReceiver,
tracer: Tracer = DefaultTracer,
reporter: ReporterFactory = LoadedReporterFactory,
newTraceInitializer: Stack.Simple[ServiceFactory[Req, Rep]] = TraceInitializerFilter.serverModule[Req, Rep]
) extends Server[Req, Rep] {
val stack = StackServer.newStack[Req, Rep]
.replace(StackServer.Role.preparer, prepare)
.replace(TraceInitializerFilter.role, newTraceInitializer)
private type _In = In
private type _Out = Out
private case class Server(
stack: Stack[ServiceFactory[Req, Rep]] = stack,
params: Stack.Params = Stack.Params.empty
) extends StdStackServer[Req, Rep, Server] {
protected def copy1(
stack: Stack[ServiceFactory[Req, Rep]] = this.stack,
params: Stack.Params = this.params
) = copy(stack, params)
protected type In = _In
protected type Out = _Out
protected def newListener() = listener
protected def newDispatcher(transport: Transport[In, Out], service: Service[Req, Rep]) =
serviceTransport(transport, service)
}
val underlying: StackServer[Req, Rep] = Server()
val configured = underlying
.configured(param.Label(name))
.configured(param.Timer(timer))
.configured(param.Monitor(monitor))
.configured(param.Logger(logger))
.configured(param.Stats(statsReceiver))
.configured(param.Tracer(tracer))
.configured(param.Reporter(reporter))
.configured(MaskCancelFilter.Param(!cancelOnHangup))
.configured(TimeoutFilter.Param(requestTimeout))
.configured(RequestSemaphoreFilter.Param(maxConcurrentRequests))
def serve(addr: SocketAddress, factory: ServiceFactory[Req, Rep]): ListeningServer =
configured.serve(addr, factory)
}
|
yancl/finagle-6.22.0
|
finagle-core/src/main/scala/com/twitter/finagle/server/DefaultServer.scala
|
Scala
|
apache-2.0
| 3,974 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.connector.catalog
import java.time.{Instant, ZoneId}
import java.time.temporal.ChronoUnit
import java.util
import java.util.OptionalLong
import scala.collection.mutable
import org.scalatest.Assertions._
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.{GenericInternalRow, JoinedRow}
import org.apache.spark.sql.catalyst.util.{CharVarcharUtils, DateTimeUtils}
import org.apache.spark.sql.connector.distributions.{Distribution, Distributions}
import org.apache.spark.sql.connector.expressions._
import org.apache.spark.sql.connector.metric.{CustomMetric, CustomTaskMetric}
import org.apache.spark.sql.connector.read._
import org.apache.spark.sql.connector.write._
import org.apache.spark.sql.connector.write.streaming.{StreamingDataWriterFactory, StreamingWrite}
import org.apache.spark.sql.sources._
import org.apache.spark.sql.types._
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.unsafe.types.UTF8String
/**
* A simple in-memory table. Rows are stored as a buffered group produced by each output task.
*/
class InMemoryTable(
val name: String,
val schema: StructType,
override val partitioning: Array[Transform],
override val properties: util.Map[String, String],
val distribution: Distribution = Distributions.unspecified(),
val ordering: Array[SortOrder] = Array.empty,
val numPartitions: Option[Int] = None)
extends Table with SupportsRead with SupportsWrite with SupportsDelete
with SupportsMetadataColumns {
private object PartitionKeyColumn extends MetadataColumn {
override def name: String = "_partition"
override def dataType: DataType = StringType
override def comment: String = "Partition key used to store the row"
}
private object IndexColumn extends MetadataColumn {
override def name: String = "index"
override def dataType: DataType = IntegerType
override def comment: String = "Metadata column used to conflict with a data column"
}
// purposely exposes a metadata column that conflicts with a data column in some tests
override val metadataColumns: Array[MetadataColumn] = Array(IndexColumn, PartitionKeyColumn)
private val metadataColumnNames = metadataColumns.map(_.name).toSet -- schema.map(_.name)
private val allowUnsupportedTransforms =
properties.getOrDefault("allow-unsupported-transforms", "false").toBoolean
partitioning.foreach {
case _: IdentityTransform =>
case _: YearsTransform =>
case _: MonthsTransform =>
case _: DaysTransform =>
case _: HoursTransform =>
case _: BucketTransform =>
case t if !allowUnsupportedTransforms =>
throw new IllegalArgumentException(s"Transform $t is not a supported transform")
}
// The key `Seq[Any]` is the partition values.
val dataMap: mutable.Map[Seq[Any], BufferedRows] = mutable.Map.empty
def data: Array[BufferedRows] = dataMap.values.toArray
def rows: Seq[InternalRow] = dataMap.values.flatMap(_.rows).toSeq
private val partCols: Array[Array[String]] = partitioning.flatMap(_.references).map { ref =>
schema.findNestedField(ref.fieldNames(), includeCollections = false) match {
case Some(_) => ref.fieldNames()
case None => throw new IllegalArgumentException(s"${ref.describe()} does not exist.")
}
}
private val UTC = ZoneId.of("UTC")
private val EPOCH_LOCAL_DATE = Instant.EPOCH.atZone(UTC).toLocalDate
private def getKey(row: InternalRow): Seq[Any] = {
def extractor(
fieldNames: Array[String],
schema: StructType,
row: InternalRow): (Any, DataType) = {
val index = schema.fieldIndex(fieldNames(0))
val value = row.toSeq(schema).apply(index)
if (fieldNames.length > 1) {
(value, schema(index).dataType) match {
case (row: InternalRow, nestedSchema: StructType) =>
extractor(fieldNames.drop(1), nestedSchema, row)
case (_, dataType) =>
throw new IllegalArgumentException(s"Unsupported type, ${dataType.simpleString}")
}
} else {
(value, schema(index).dataType)
}
}
val cleanedSchema = CharVarcharUtils.replaceCharVarcharWithStringInSchema(schema)
partitioning.map {
case IdentityTransform(ref) =>
extractor(ref.fieldNames, cleanedSchema, row)._1
case YearsTransform(ref) =>
extractor(ref.fieldNames, cleanedSchema, row) match {
case (days: Int, DateType) =>
ChronoUnit.YEARS.between(EPOCH_LOCAL_DATE, DateTimeUtils.daysToLocalDate(days))
case (micros: Long, TimestampType) =>
val localDate = DateTimeUtils.microsToInstant(micros).atZone(UTC).toLocalDate
ChronoUnit.YEARS.between(EPOCH_LOCAL_DATE, localDate)
case (v, t) =>
throw new IllegalArgumentException(s"Match: unsupported argument(s) type - ($v, $t)")
}
case MonthsTransform(ref) =>
extractor(ref.fieldNames, cleanedSchema, row) match {
case (days: Int, DateType) =>
ChronoUnit.MONTHS.between(EPOCH_LOCAL_DATE, DateTimeUtils.daysToLocalDate(days))
case (micros: Long, TimestampType) =>
val localDate = DateTimeUtils.microsToInstant(micros).atZone(UTC).toLocalDate
ChronoUnit.MONTHS.between(EPOCH_LOCAL_DATE, localDate)
case (v, t) =>
throw new IllegalArgumentException(s"Match: unsupported argument(s) type - ($v, $t)")
}
case DaysTransform(ref) =>
extractor(ref.fieldNames, cleanedSchema, row) match {
case (days, DateType) =>
days
case (micros: Long, TimestampType) =>
ChronoUnit.DAYS.between(Instant.EPOCH, DateTimeUtils.microsToInstant(micros))
case (v, t) =>
throw new IllegalArgumentException(s"Match: unsupported argument(s) type - ($v, $t)")
}
case HoursTransform(ref) =>
extractor(ref.fieldNames, cleanedSchema, row) match {
case (micros: Long, TimestampType) =>
ChronoUnit.HOURS.between(Instant.EPOCH, DateTimeUtils.microsToInstant(micros))
case (v, t) =>
throw new IllegalArgumentException(s"Match: unsupported argument(s) type - ($v, $t)")
}
case BucketTransform(numBuckets, ref) =>
val (value, dataType) = extractor(ref.fieldNames, cleanedSchema, row)
val valueHashCode = if (value == null) 0 else value.hashCode
((valueHashCode + 31 * dataType.hashCode()) & Integer.MAX_VALUE) % numBuckets
}
}
protected def addPartitionKey(key: Seq[Any]): Unit = {}
protected def renamePartitionKey(
partitionSchema: StructType,
from: Seq[Any],
to: Seq[Any]): Boolean = {
val rows = dataMap.remove(from).getOrElse(new BufferedRows(from.mkString("/")))
val newRows = new BufferedRows(to.mkString("/"))
rows.rows.foreach { r =>
val newRow = new GenericInternalRow(r.numFields)
for (i <- 0 until r.numFields) newRow.update(i, r.get(i, schema(i).dataType))
for (i <- 0 until partitionSchema.length) {
val j = schema.fieldIndex(partitionSchema(i).name)
newRow.update(j, to(i))
}
newRows.withRow(newRow)
}
dataMap.put(to, newRows).foreach { _ =>
throw new IllegalStateException(
s"The ${to.mkString("[", ", ", "]")} partition exists already")
}
true
}
protected def removePartitionKey(key: Seq[Any]): Unit = dataMap.synchronized {
dataMap.remove(key)
}
protected def createPartitionKey(key: Seq[Any]): Unit = dataMap.synchronized {
if (!dataMap.contains(key)) {
val emptyRows = new BufferedRows(key.toArray.mkString("/"))
val rows = if (key.length == schema.length) {
emptyRows.withRow(InternalRow.fromSeq(key))
} else emptyRows
dataMap.put(key, rows)
}
}
protected def clearPartition(key: Seq[Any]): Unit = dataMap.synchronized {
assert(dataMap.contains(key))
dataMap(key).clear()
}
def withData(data: Array[BufferedRows]): InMemoryTable = dataMap.synchronized {
data.foreach(_.rows.foreach { row =>
val key = getKey(row)
dataMap += dataMap.get(key)
.map(key -> _.withRow(row))
.getOrElse(key -> new BufferedRows(key.toArray.mkString("/")).withRow(row))
addPartitionKey(key)
})
this
}
override def capabilities: util.Set[TableCapability] = util.EnumSet.of(
TableCapability.BATCH_READ,
TableCapability.BATCH_WRITE,
TableCapability.STREAMING_WRITE,
TableCapability.OVERWRITE_BY_FILTER,
TableCapability.OVERWRITE_DYNAMIC,
TableCapability.TRUNCATE)
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder = {
new InMemoryScanBuilder(schema)
}
class InMemoryScanBuilder(tableSchema: StructType) extends ScanBuilder
with SupportsPushDownRequiredColumns {
private var schema: StructType = tableSchema
override def build: Scan =
new InMemoryBatchScan(data.map(_.asInstanceOf[InputPartition]), schema, tableSchema)
override def pruneColumns(requiredSchema: StructType): Unit = {
val schemaNames = metadataColumnNames ++ tableSchema.map(_.name)
schema = StructType(requiredSchema.filter(f => schemaNames.contains(f.name)))
}
}
case class InMemoryStats(sizeInBytes: OptionalLong, numRows: OptionalLong) extends Statistics
case class InMemoryBatchScan(
var data: Seq[InputPartition],
readSchema: StructType,
tableSchema: StructType)
extends Scan with Batch with SupportsRuntimeFiltering with SupportsReportStatistics {
override def toBatch: Batch = this
override def estimateStatistics(): Statistics = {
if (data.isEmpty) {
return InMemoryStats(OptionalLong.of(0L), OptionalLong.of(0L))
}
val inputPartitions = data.map(_.asInstanceOf[BufferedRows])
val numRows = inputPartitions.map(_.rows.size).sum
// we assume an average object header is 12 bytes
val objectHeaderSizeInBytes = 12L
val rowSizeInBytes = objectHeaderSizeInBytes + schema.defaultSize
val sizeInBytes = numRows * rowSizeInBytes
InMemoryStats(OptionalLong.of(sizeInBytes), OptionalLong.of(numRows))
}
override def planInputPartitions(): Array[InputPartition] = data.toArray
override def createReaderFactory(): PartitionReaderFactory = {
val metadataColumns = readSchema.map(_.name).filter(metadataColumnNames.contains)
val nonMetadataColumns = readSchema.filterNot(f => metadataColumns.contains(f.name))
new BufferedRowsReaderFactory(metadataColumns, nonMetadataColumns, tableSchema)
}
override def filterAttributes(): Array[NamedReference] = {
val scanFields = readSchema.fields.map(_.name).toSet
partitioning.flatMap(_.references)
.filter(ref => scanFields.contains(ref.fieldNames.mkString(".")))
}
override def filter(filters: Array[Filter]): Unit = {
if (partitioning.length == 1) {
filters.foreach {
case In(attrName, values) if attrName == partitioning.head.name =>
val matchingKeys = values.map(_.toString).toSet
data = data.filter(partition => {
val key = partition.asInstanceOf[BufferedRows].key
matchingKeys.contains(key)
})
case _ => // skip
}
}
}
}
override def newWriteBuilder(info: LogicalWriteInfo): WriteBuilder = {
InMemoryTable.maybeSimulateFailedTableWrite(new CaseInsensitiveStringMap(properties))
InMemoryTable.maybeSimulateFailedTableWrite(info.options)
new WriteBuilder with SupportsTruncate with SupportsOverwrite with SupportsDynamicOverwrite {
private var writer: BatchWrite = Append
private var streamingWriter: StreamingWrite = StreamingAppend
override def truncate(): WriteBuilder = {
assert(writer == Append)
writer = TruncateAndAppend
streamingWriter = StreamingTruncateAndAppend
this
}
override def overwrite(filters: Array[Filter]): WriteBuilder = {
assert(writer == Append)
writer = new Overwrite(filters)
streamingWriter = new StreamingNotSupportedOperation(s"overwrite ($filters)")
this
}
override def overwriteDynamicPartitions(): WriteBuilder = {
assert(writer == Append)
writer = DynamicOverwrite
streamingWriter = new StreamingNotSupportedOperation("overwriteDynamicPartitions")
this
}
override def build(): Write = new Write with RequiresDistributionAndOrdering {
override def requiredDistribution: Distribution = distribution
override def requiredOrdering: Array[SortOrder] = ordering
override def requiredNumPartitions(): Int = {
numPartitions.getOrElse(0)
}
override def toBatch: BatchWrite = writer
override def toStreaming: StreamingWrite = streamingWriter match {
case exc: StreamingNotSupportedOperation => exc.throwsException()
case s => s
}
override def supportedCustomMetrics(): Array[CustomMetric] = {
Array(new InMemorySimpleCustomMetric)
}
}
}
}
private abstract class TestBatchWrite extends BatchWrite {
override def createBatchWriterFactory(info: PhysicalWriteInfo): DataWriterFactory = {
BufferedRowsWriterFactory
}
override def abort(messages: Array[WriterCommitMessage]): Unit = {}
}
private object Append extends TestBatchWrite {
override def commit(messages: Array[WriterCommitMessage]): Unit = dataMap.synchronized {
withData(messages.map(_.asInstanceOf[BufferedRows]))
}
}
private object DynamicOverwrite extends TestBatchWrite {
override def commit(messages: Array[WriterCommitMessage]): Unit = dataMap.synchronized {
val newData = messages.map(_.asInstanceOf[BufferedRows])
dataMap --= newData.flatMap(_.rows.map(getKey))
withData(newData)
}
}
private class Overwrite(filters: Array[Filter]) extends TestBatchWrite {
import org.apache.spark.sql.connector.catalog.CatalogV2Implicits.MultipartIdentifierHelper
override def commit(messages: Array[WriterCommitMessage]): Unit = dataMap.synchronized {
val deleteKeys = InMemoryTable.filtersToKeys(
dataMap.keys, partCols.map(_.toSeq.quoted), filters)
dataMap --= deleteKeys
withData(messages.map(_.asInstanceOf[BufferedRows]))
}
}
private object TruncateAndAppend extends TestBatchWrite {
override def commit(messages: Array[WriterCommitMessage]): Unit = dataMap.synchronized {
dataMap.clear
withData(messages.map(_.asInstanceOf[BufferedRows]))
}
}
private abstract class TestStreamingWrite extends StreamingWrite {
def createStreamingWriterFactory(info: PhysicalWriteInfo): StreamingDataWriterFactory = {
BufferedRowsWriterFactory
}
def abort(epochId: Long, messages: Array[WriterCommitMessage]): Unit = {}
}
private class StreamingNotSupportedOperation(operation: String) extends TestStreamingWrite {
override def createStreamingWriterFactory(info: PhysicalWriteInfo): StreamingDataWriterFactory =
throwsException()
override def commit(epochId: Long, messages: Array[WriterCommitMessage]): Unit =
throwsException()
override def abort(epochId: Long, messages: Array[WriterCommitMessage]): Unit =
throwsException()
def throwsException[T](): T = throw new IllegalStateException("The operation " +
s"${operation} isn't supported for streaming query.")
}
private object StreamingAppend extends TestStreamingWrite {
override def commit(epochId: Long, messages: Array[WriterCommitMessage]): Unit = {
dataMap.synchronized {
withData(messages.map(_.asInstanceOf[BufferedRows]))
}
}
}
private object StreamingTruncateAndAppend extends TestStreamingWrite {
override def commit(epochId: Long, messages: Array[WriterCommitMessage]): Unit = {
dataMap.synchronized {
dataMap.clear
withData(messages.map(_.asInstanceOf[BufferedRows]))
}
}
}
override def canDeleteWhere(filters: Array[Filter]): Boolean = {
InMemoryTable.supportsFilters(filters)
}
override def deleteWhere(filters: Array[Filter]): Unit = dataMap.synchronized {
import org.apache.spark.sql.connector.catalog.CatalogV2Implicits.MultipartIdentifierHelper
dataMap --= InMemoryTable.filtersToKeys(dataMap.keys, partCols.map(_.toSeq.quoted), filters)
}
}
object InMemoryTable {
val SIMULATE_FAILED_WRITE_OPTION = "spark.sql.test.simulateFailedWrite"
def filtersToKeys(
keys: Iterable[Seq[Any]],
partitionNames: Seq[String],
filters: Array[Filter]): Iterable[Seq[Any]] = {
keys.filter { partValues =>
filters.flatMap(splitAnd).forall {
case EqualTo(attr, value) =>
value == extractValue(attr, partitionNames, partValues)
case EqualNullSafe(attr, value) =>
val attrVal = extractValue(attr, partitionNames, partValues)
if (attrVal == null && value === null) {
true
} else if (attrVal == null || value === null) {
false
} else {
value == attrVal
}
case IsNull(attr) =>
null == extractValue(attr, partitionNames, partValues)
case IsNotNull(attr) =>
null != extractValue(attr, partitionNames, partValues)
case AlwaysTrue() => true
case f =>
throw new IllegalArgumentException(s"Unsupported filter type: $f")
}
}
}
def supportsFilters(filters: Array[Filter]): Boolean = {
filters.flatMap(splitAnd).forall {
case _: EqualTo => true
case _: EqualNullSafe => true
case _: IsNull => true
case _: IsNotNull => true
case _: AlwaysTrue => true
case _ => false
}
}
private def extractValue(
attr: String,
partFieldNames: Seq[String],
partValues: Seq[Any]): Any = {
partFieldNames.zipWithIndex.find(_._1 == attr) match {
case Some((_, partIndex)) =>
partValues(partIndex)
case _ =>
throw new IllegalArgumentException(s"Unknown filter attribute: $attr")
}
}
private def splitAnd(filter: Filter): Seq[Filter] = {
filter match {
case And(left, right) => splitAnd(left) ++ splitAnd(right)
case _ => filter :: Nil
}
}
def maybeSimulateFailedTableWrite(tableOptions: CaseInsensitiveStringMap): Unit = {
if (tableOptions.getBoolean(SIMULATE_FAILED_WRITE_OPTION, false)) {
throw new IllegalStateException("Manual write to table failure.")
}
}
}
class BufferedRows(
val key: String = "") extends WriterCommitMessage with InputPartition with Serializable {
val rows = new mutable.ArrayBuffer[InternalRow]()
def withRow(row: InternalRow): BufferedRows = {
rows.append(row)
this
}
def clear(): Unit = rows.clear()
}
private class BufferedRowsReaderFactory(
metadataColumnNames: Seq[String],
nonMetaDataColumns: Seq[StructField],
tableSchema: StructType) extends PartitionReaderFactory {
override def createReader(partition: InputPartition): PartitionReader[InternalRow] = {
new BufferedRowsReader(partition.asInstanceOf[BufferedRows], metadataColumnNames,
nonMetaDataColumns, tableSchema)
}
}
private class BufferedRowsReader(
partition: BufferedRows,
metadataColumnNames: Seq[String],
nonMetadataColumns: Seq[StructField],
tableSchema: StructType) extends PartitionReader[InternalRow] {
private def addMetadata(row: InternalRow): InternalRow = {
val metadataRow = new GenericInternalRow(metadataColumnNames.map {
case "index" => index
case "_partition" => UTF8String.fromString(partition.key)
}.toArray)
new JoinedRow(row, metadataRow)
}
private var index: Int = -1
override def next(): Boolean = {
index += 1
index < partition.rows.length
}
override def get(): InternalRow = {
val originalRow = partition.rows(index)
val values = new Array[Any](nonMetadataColumns.length)
nonMetadataColumns.zipWithIndex.foreach { case (col, idx) =>
values(idx) = extractFieldValue(col, tableSchema, originalRow)
}
addMetadata(new GenericInternalRow(values))
}
override def close(): Unit = {}
private def extractFieldValue(
field: StructField,
schema: StructType,
row: InternalRow): Any = {
val index = schema.fieldIndex(field.name)
field.dataType match {
case StructType(fields) =>
if (row.isNullAt(index)) {
return null
}
val childRow = row.toSeq(schema)(index).asInstanceOf[InternalRow]
val childSchema = schema(index).dataType.asInstanceOf[StructType]
val resultValue = new Array[Any](fields.length)
fields.zipWithIndex.foreach { case (childField, idx) =>
val childValue = extractFieldValue(childField, childSchema, childRow)
resultValue(idx) = childValue
}
new GenericInternalRow(resultValue)
case dt =>
row.get(index, dt)
}
}
}
private object BufferedRowsWriterFactory extends DataWriterFactory with StreamingDataWriterFactory {
override def createWriter(partitionId: Int, taskId: Long): DataWriter[InternalRow] = {
new BufferWriter
}
override def createWriter(
partitionId: Int,
taskId: Long,
epochId: Long): DataWriter[InternalRow] = {
new BufferWriter
}
}
private class BufferWriter extends DataWriter[InternalRow] {
private val buffer = new BufferedRows
override def write(row: InternalRow): Unit = buffer.rows.append(row.copy())
override def commit(): WriterCommitMessage = buffer
override def abort(): Unit = {}
override def close(): Unit = {}
override def currentMetricsValues(): Array[CustomTaskMetric] = {
val metric = new CustomTaskMetric {
override def name(): String = "in_memory_buffer_rows"
override def value(): Long = buffer.rows.size
}
Array(metric)
}
}
class InMemorySimpleCustomMetric extends CustomMetric {
override def name(): String = "in_memory_buffer_rows"
override def description(): String = "number of rows in buffer"
override def aggregateTaskMetrics(taskMetrics: Array[Long]): String = {
s"in-memory rows: ${taskMetrics.sum}"
}
}
|
chuckchen/spark
|
sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/InMemoryTable.scala
|
Scala
|
apache-2.0
| 23,184 |
package org.aprsdroid.app
import _root_.android.content.Context
import _root_.android.location._
import _root_.android.util.Log
import _root_.android.os.{Build, Bundle, Handler, Looper}
import _root_.java.io.{BufferedReader, InputStream, InputStreamReader, OutputStream, OutputStreamWriter}
import _root_.net.ab0oo.aprs.parser._
class KenwoodProto(service : AprsService, is : InputStream, os : OutputStream) extends TncProto(is, null) {
val TAG = "APRSdroid.KenwoodProto"
val br = new BufferedReader(new InputStreamReader(is))
val sinkhole = new LocationSinkhole()
val locMan = service.getSystemService(Context.LOCATION_SERVICE).asInstanceOf[LocationManager]
val output = new OutputStreamWriter(os)
var listenerR5 : NmeaListenerR5 = null
var listenerR24 : NmeaListenerR24 = null
if (service.prefs.getBoolean("kenwood.gps", false)) {
new Handler(Looper.getMainLooper()).post(new Runnable() { override def run() {
locMan.requestLocationUpdates(LocationManager.GPS_PROVIDER,
0, 0, sinkhole)
if (Build.VERSION.SDK_INT < Build.VERSION_CODES.N) {
listenerR5 = new NmeaListenerR5()
locMan.addNmeaListener(listenerR5)
} else {
// TODO: if (Build.VERSION.SDK_INT < Build.VERSION_CODES.N) {
listenerR24 = new NmeaListenerR24()
locMan.addNmeaListener(listenerR24)
}
}})
}
def wpl2aprs(line : String) = {
val s = line.split("[,*]") // get and split nmea
s(0) match {
case "$PKWDWPL" =>
val lat = "%s%s".format(s(3), s(4))
val lon = "%s%s".format(s(5), s(6))
val call = s(11).trim()
val sym = s(12)
"%s>APRS:!%s%s%s%s".format(call, lat, sym(0), lon, sym(1))
case "$GPWPL" =>
val lat = "%s%s".format(s(1), s(2))
val lon = "%s%s".format(s(3), s(4))
val call = s(5).trim()
"%s>APRS:!%s/%s/".format(call, lat, lon)
case _ => line.replaceFirst("^(cmd:)+", "") // workaround for Kenwood APRS mode
}
}
// Solution for #141 - yaesu FTM-400XDR packet monitor
def yaesu2aprs(line1 : String, line2 : String) = {
Log.d(TAG, "line1: " + line1)
Log.d(TAG, "line2: " + line2)
// remove the timestamp and UI meta data from first line, concatenate with second line using ":"
line1.replaceAll(" \\\\[[0-9/: ]+\\\\] <UI ?[A-Z]?>:$", ":") + line2
}
def readPacket() : String = {
var line = br.readLine()
// loop: read a non-empty line
while (line == null || line.length() == 0)
line = br.readLine()
if (line.contains("] <UI") && line.endsWith(">:"))
return yaesu2aprs(line, br.readLine())
Log.d(TAG, "got " + line)
return wpl2aprs(line)
}
def writePacket(p : APRSPacket) {
// don't do anything. yet.
}
def onNmeaReceived(timestamp : Long, nmea : String) {
if (output != null && (nmea.startsWith("$GPGGA") || nmea.startsWith("$GPRMC"))) {
Log.d(TAG, "NMEA >>> " + nmea)
try {
implicit val ec = scala.concurrent.ExecutionContext.global
scala.concurrent.Future {
output.write(nmea)
output.flush()
}
if (service.prefs.getBoolean("kenwood.gps_debug", false))
service.postAddPost(StorageDatabase.Post.TYPE_TX,
R.string.p_conn_kwd, nmea.trim())
} catch {
case e : Exception =>
Log.e(TAG, "error sending NMEA to Kenwood: " + e)
e.printStackTrace()
}
} else
Log.d(TAG, "NMEA --- " + nmea)
}
class NmeaListenerR5 extends GpsStatus.NmeaListener() {
def onNmeaReceived(timestamp : Long, nmea : String) = KenwoodProto.this.onNmeaReceived(timestamp, nmea)
}
class NmeaListenerR24 extends OnNmeaMessageListener() {
def onNmeaMessage(nmea : String, timestamp : Long) = KenwoodProto.this.onNmeaReceived(timestamp, nmea)
}
class LocationSinkhole extends LocationListener {
override def onLocationChanged(location : Location) {
}
override def onProviderDisabled(provider : String) {
}
override def onProviderEnabled(provider : String) {
}
override def onStatusChanged(provider : String, st: Int, extras : Bundle) {
}
}
override def stop() {
locMan.removeUpdates(sinkhole)
if (listenerR5 != null)
locMan.removeNmeaListener(listenerR5)
if (listenerR24 != null)
locMan.removeNmeaListener(listenerR24)
super.stop()
}
}
|
ge0rg/aprsdroid
|
src/tncproto/KenwoodProto.scala
|
Scala
|
gpl-2.0
| 4,523 |
/* Copyright 2009-2016 EPFL, Lausanne */
object LiteralMaps {
def test(): Map[Int, Int] = {
Map(1 -> 2, 3 -> 4, (5, 6))
}
def test2(): (Int, Int) = {
1 -> 2
}
def test3(): Map[Int, Int] = {
Map[Int, Int]()
}
def test4(): Map[Int, Int] = {
Map.empty[Int, Int]
}
def test5(): Map[Int, Int] = {
Map.empty[Int, Int]
}
}
|
regb/leon
|
src/test/resources/regression/verification/newsolvers/valid/LiteralMaps.scala
|
Scala
|
gpl-3.0
| 362 |
/**
* The MIT License (MIT)
*
* Copyright (c) 2018 Israel Freitas([email protected])
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
package ifreitas.scalaaiml.elements
case class IntervalFrom(expression: TemplateExpression*) extends TemplateExpression {
def toXml = <from>{ expression.toXml }</from>
}
|
ifreitas/AimlToXml
|
src/main/scala/ifreitas/scalaaiml/elements/IntervalFrom.scala
|
Scala
|
mit
| 1,358 |
package com.sksamuel.elastic4s.requests.searches.queries.matches
import com.sksamuel.elastic4s.requests.analyzers.Analyzer
import com.sksamuel.elastic4s.requests.common.Operator
import com.sksamuel.elastic4s.requests.searches.queries.Query
import com.sksamuel.exts.OptionImplicits._
case class MatchBoolPrefix(field: String,
value: Any,
analyzer: Option[String] = None,
queryName: Option[String] = None,
boost: Option[Double] = None,
minimumShouldMatch: Option[String] = None,
operator: Option[Operator] = None,
fuzziness: Option[String] = None,
prefixLength: Option[Int] = None,
maxExpansions: Option[Int] = None,
fuzzyTranspositions: Option[Boolean] = None,
fuzzyRewrite: Option[String] = None)
extends Query {
def analyzer(a: Analyzer): MatchBoolPrefix = analyzer(a.name)
def analyzer(name: String): MatchBoolPrefix = copy(analyzer = name.some)
def queryName(queryName: String): MatchBoolPrefix = copy(queryName = queryName.some)
def boost(boost: Double): MatchBoolPrefix = copy(boost = boost.some)
def minimumShouldMatch(minimum: String): MatchBoolPrefix = copy(minimumShouldMatch = minimum.some)
def operator(operator: Operator): MatchBoolPrefix = copy(operator = operator.some)
def fuzziness(fuzziness: String): MatchBoolPrefix = copy(fuzziness = fuzziness.some)
def prefixLength(prefix: Int): MatchBoolPrefix = copy(prefixLength = prefix.some)
def maxExpansions(max: Int): MatchBoolPrefix = copy(maxExpansions = max.some)
def fuzzyTranspositions(fuzzy: Boolean): MatchBoolPrefix = copy(fuzzyTranspositions = fuzzy.some)
def fuzzyRewrite(fuzzy: String): MatchBoolPrefix = copy(fuzzyRewrite = fuzzy.some)
}
|
stringbean/elastic4s
|
elastic4s-core/src/main/scala/com/sksamuel/elastic4s/requests/searches/queries/matches/MatchBoolPrefix.scala
|
Scala
|
apache-2.0
| 2,031 |
/*
* Copyright 2017 Mediative
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mediative.sangria.codegen
package starwars
import org.scalatest.{WordSpec, Matchers}
import com.mediative.sangria.codegen.GraphQLDomain
@GraphQLDomain(
"samples/starwars/schema.graphql",
"samples/starwars/MultiQuery.graphql"
)
object StarWarsDomain
class StarWarsAnnotationSpec extends WordSpec with Matchers {
"GraphQLDomain" should {
"support the Star Wars schema and operations" in {
import StarWarsDomain.HeroAndFriends
val heroAndFriends = HeroAndFriends(
hero = HeroAndFriends.Hero(
name = Some("Luke"),
friends = Some(List(Some(HeroAndFriends.Hero.Friends(Some("R2D2")))))
)
)
assert(heroAndFriends.hero.friends.get.head.get.name.get == "R2D2")
}
}
}
|
mediative/sangria-codegen
|
sangria-codegen/src/test/scala/com.mediative.sangria.codegen.starwars/StarWarsAnnotationSpec.scala
|
Scala
|
apache-2.0
| 1,345 |
package com.edinhodzic.client.domain
class Resource(data: String)
|
edinhodzic/jersey-rest-client
|
src/test/scala/com/edinhodzic/client/domain/Resource.scala
|
Scala
|
apache-2.0
| 67 |
package net.categoricaldata.category.functor.withSmallSource
import net.categoricaldata.category._
trait withFinitelyGeneratedTarget extends functor.withSmallSource.withLocallyFinitelyGeneratedTarget with Functor.withFinitelyGeneratedTarget
|
JasonGross/categoricaldata
|
src/main/scala/net/categoricaldata/category/functor/withSmallSource/withFinitelyGeneratedTarget.scala
|
Scala
|
mit
| 241 |
package edu.usc.irds.sparkler.util
import edu.usc.irds.sparkler.model.SparklerJob
import collection.JavaConverters._
object HealthChecks {
def checkFailureRate(job: SparklerJob): Boolean ={
if(job.getConfiguration.containsKey("fetcher.kill.failure.percent")) {
import org.apache.solr.common.params.MapSolrParams
val solrClient = job.getStorageFactory.getProxy
val q = "crawl_id:" + job.getId
val queryParamMap : Map[String,String] = Map("q" -> q,
"facet.field" -> "status", "facet" -> "on", "rows"->"0")
val queryParams = new MapSolrParams(queryParamMap.asJava)
/*val response = solrClient.getClient().query(queryParams)
val documents = response.getFacetField("status")
val values = documents.getValues.asScala
var total : Long = 0
var err : Long = 0
for(v <- values){
if(v.getName == "ERROR"){
err = v.getCount
}
total = total + v.getCount
}
val currenterrrate = err/total*100
if(currenterrrate > job.getConfiguration.get("fetcher.kill.failure.percent").asInstanceOf[Double]){
true
} else{
false
}*/
false
} else{
false
}
}
}
|
USCDataScience/sparkler
|
sparkler-app/src/main/scala/edu/usc/irds/sparkler/util/HealthChecks.scala
|
Scala
|
apache-2.0
| 1,216 |
import org.scalatestplus.play._
import play.api.test._
import play.api.test.Helpers._
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
/**
* Add your integration spec here.
* An integration test will fire up a whole play application in a real (or headless) browser.
*/
@RunWith(classOf[JUnitRunner])
class IntegrationSpec extends PlaySpec with OneServerPerTest with OneBrowserPerTest with HtmlUnitFactory {
"Application" should {
"work from within a browser" in {
go to ("http://localhost:" + port)
pageSource must include ("Your new application is ready.")
}
}
}
|
play2-maven-plugin/play2-maven-test-projects
|
play25/scala/starter-example/test/IntegrationSpec.scala
|
Scala
|
apache-2.0
| 619 |
package mr.merc.music
import java.io.File
import scala.util.Random
import mr.merc.sound.Sound
import mr.merc.conf.Conf
import scalafx.scene.media.MediaPlayer.Status
object MusicPlayer {
private val pathPrefix = "/music/"
val trackList: List[String] = List("Spring_Mvt_1_Allegro.mp3", "Summer_Mvt_3_Presto.mp3",
"Autumn_Mvt_1_Allegro.mp3", "Winter_Mvt_1_Allegro_non_molto.mp3")
private def randomSong: String = pathPrefix + trackList(Random.nextInt(trackList.size))
def playMusic(): Unit = {
if (Conf.bool("Music")) {
new Sound(randomSong, playMusic).play()
}
}
}
|
RenualdMarch/merc
|
src/main/scala/mr/merc/music/MusicPlayer.scala
|
Scala
|
gpl-3.0
| 596 |
package com.gettyimages.akka.swagger
import scala.reflect.runtime.universe._
import org.json4s._
import org.json4s.jackson.JsonMethods._
import org.scalatest.Matchers
import org.scalatest.WordSpec
import com.gettyimages.akka.swagger.samples._
import akka.actor.ActorSystem
import akka.http._
import akka.http.scaladsl._
import akka.http.scaladsl.client._
import akka.http.scaladsl.client.RequestBuilding._
import akka.http.scaladsl.marshalling._
import akka.http.scaladsl.model._
import akka.http.scaladsl.model.ContentTypes
import akka.http.scaladsl.model.headers._
import akka.http.scaladsl.testkit._
import akka.http.scaladsl.unmarshalling._
import akka.stream.ActorMaterializer
import spray.json._
import spray.json.DefaultJsonProtocol._
import spray.json.pimpString
class SwaggerHttpServiceSpec
extends WordSpec
with Matchers
with ScalatestRouteTest {
val myMaterializer = materializer
val swaggerService = new SwaggerHttpService with HasActorSystem {
override implicit val actorSystem: ActorSystem = system
override implicit val materializer: ActorMaterializer = myMaterializer
override val apiTypes = Seq(typeOf[PetHttpService], typeOf[UserHttpService])
override val basePath = "/api"
override val host = "http://some.domain.com"
// override def apiVersion = "2.0"
// override def baseUrl = "http://some.domain.com/api"
// override def docsPath = "docs-are-here"
//apiInfo, not used
//authorizations, not used
}
implicit val formats = org.json4s.DefaultFormats
"The SwaggerHttpService" when {
"accessing the root doc path" should {
"return the basic set of api info" in {
Get("/swagger.json") ~> swaggerService.routes ~> check {
handled shouldBe true
contentType shouldBe ContentTypes.`application/json`
val str = responseAs[String]
val response = parse(str)
(response \\ "swagger").extract[String] shouldEqual "2.0"
val paths = (response \\ "paths").extract[JObject]
paths.values.size shouldEqual 2
val petPath = (paths \\ "/pet")
(petPath \\ "post" \\ "summary").extract[String] shouldEqual "Add a new pet to the store"
}
}
}
}
}
|
rleibman/akka-http-swagger
|
src/test/scala/com/gettyimages/akka/swagger/SwaggerHttpServiceSpec.scala
|
Scala
|
apache-2.0
| 2,244 |
package mesosphere.marathon.core.appinfo.impl
import mesosphere.marathon.Protos.MarathonTask
import mesosphere.marathon.core.appinfo.{ TaskStatsByVersion, TaskStats, AppInfo, EnrichedTask, TaskCounts }
import mesosphere.marathon.core.base.ConstantClock
import mesosphere.marathon.health.{ Health, HealthCheckManager }
import mesosphere.marathon.state._
import mesosphere.marathon.tasks.TaskTracker
import mesosphere.marathon.upgrade.DeploymentManager.DeploymentStepInfo
import mesosphere.marathon.upgrade.{ DeploymentPlan, DeploymentStep }
import mesosphere.marathon.{ MarathonSchedulerService, MarathonSpec }
import mesosphere.util.Mockito
import org.apache.mesos.Protos
import org.scalatest.{ GivenWhenThen, Matchers }
import play.api.libs.json.Json
import scala.collection.immutable.Seq
import scala.concurrent.Future
import scala.concurrent.duration._
class AppInfoBaseDataTest extends MarathonSpec with GivenWhenThen with Mockito with Matchers {
import org.scalatest.concurrent.ScalaFutures._
class Fixture {
lazy val clock = ConstantClock()
lazy val taskTracker = mock[TaskTracker]
lazy val healthCheckManager = mock[HealthCheckManager]
lazy val marathonSchedulerService = mock[MarathonSchedulerService]
lazy val taskFailureRepository = mock[TaskFailureRepository]
lazy val baseData = new AppInfoBaseData(
clock,
taskTracker,
healthCheckManager,
marathonSchedulerService,
taskFailureRepository
)
def verifyNoMoreInteractions(): Unit = {
noMoreInteractions(taskTracker)
noMoreInteractions(healthCheckManager)
noMoreInteractions(marathonSchedulerService)
noMoreInteractions(taskFailureRepository)
}
}
val app = AppDefinition(PathId("/test"))
val other = AppDefinition(PathId("/other"))
test("not embedding anything results in no calls") {
val f = new Fixture
When("getting AppInfos without embeds")
val appInfo = f.baseData.appInfoFuture(app, Set.empty).futureValue
Then("we get an empty appInfo")
appInfo should be(AppInfo(app))
And("we have no more interactions")
f.verifyNoMoreInteractions()
}
test("requesting tasks retrieves tasks from taskTracker and health infos") {
val f = new Fixture
Given("three tasks in the task tracker")
val running1 = MarathonTask
.newBuilder()
.setId("task1")
.setStatus(Protos.TaskStatus.newBuilder().setState(Protos.TaskState.TASK_RUNNING).buildPartial())
.buildPartial()
val running2 = running1.toBuilder.setId("task2").buildPartial()
val running3 = running1.toBuilder.setId("task3").buildPartial()
f.taskTracker.getTasks(app.id) returns Set(running1, running2, running3)
val alive = Health("task2", lastSuccess = Some(Timestamp(1)))
val unhealthy = Health("task3", lastFailure = Some(Timestamp(1)))
f.healthCheckManager.statuses(app.id) returns Future.successful(
Map(
running1.getId -> Seq.empty,
running2.getId -> Seq(alive),
running3.getId -> Seq(unhealthy)
)
)
When("requesting AppInfos with tasks")
val appInfo = f.baseData.appInfoFuture(app, Set(AppInfo.Embed.Tasks)).futureValue
Then("we get a tasks object in the appInfo")
appInfo.maybeTasks should not be empty
appInfo.maybeTasks.get.map(_.appId.toString) should have size (3)
appInfo.maybeTasks.get.map(_.task.getId).toSet should be (Set("task1", "task2", "task3"))
appInfo should be(AppInfo(app, maybeTasks = Some(
Seq(
EnrichedTask(app.id, running1, Seq.empty),
EnrichedTask(app.id, running2, Seq(alive)),
EnrichedTask(app.id, running3, Seq(unhealthy))
)
)))
And("the taskTracker should have been called")
verify(f.taskTracker, times(1)).getTasks(app.id)
And("the healthCheckManager as well")
verify(f.healthCheckManager, times(1)).statuses(app.id)
And("we have no more interactions")
f.verifyNoMoreInteractions()
}
test("requesting task counts only retrieves tasks from taskTracker and health stats") {
val f = new Fixture
Given("one staged and two running tasks in the taskTracker")
val staged = MarathonTask
.newBuilder()
.setId("task1")
.setStatus(Protos.TaskStatus.newBuilder().setState(Protos.TaskState.TASK_STAGING).buildPartial())
.buildPartial()
val running = MarathonTask
.newBuilder()
.setId("task2")
.setStatus(Protos.TaskStatus.newBuilder().setState(Protos.TaskState.TASK_RUNNING).buildPartial())
.buildPartial()
val running2 = running.toBuilder.setId("task3").buildPartial()
f.taskTracker.getTasks(app.id) returns Set(staged, running, running2)
f.healthCheckManager.statuses(app.id) returns Future.successful(
Map(
"task1" -> Seq(),
"task2" -> Seq(Health("task2", lastFailure = Some(Timestamp(1)))),
"task3" -> Seq(Health("task3", lastSuccess = Some(Timestamp(2))))
)
)
When("requesting AppInfos with counts")
val appInfo = f.baseData.appInfoFuture(app, Set(AppInfo.Embed.Counts)).futureValue
Then("we get counts object in the appInfo")
appInfo should be(AppInfo(app, maybeCounts = Some(
TaskCounts(tasksStaged = 1, tasksRunning = 2, tasksHealthy = 1, tasksUnhealthy = 1)
)))
And("the taskTracker should have been called")
verify(f.taskTracker, times(1)).getTasks(app.id)
And("the healthCheckManager as well")
verify(f.healthCheckManager, times(1)).statuses(app.id)
And("we have no more interactions")
f.verifyNoMoreInteractions()
}
test("requesting deployments does not request anything else") {
val f = new Fixture
Given("One related and one unrelated deployment")
val emptyGroup = Group.empty
val relatedDeployment = DeploymentPlan(emptyGroup, emptyGroup.copy(apps = Set(app)))
val unrelatedDeployment = DeploymentPlan(emptyGroup, emptyGroup.copy(apps = Set(other)))
f.marathonSchedulerService.listRunningDeployments() returns Future.successful(Seq[DeploymentStepInfo](
DeploymentStepInfo(relatedDeployment, DeploymentStep(Seq.empty), 1),
DeploymentStepInfo(unrelatedDeployment, DeploymentStep(Seq.empty), 1)
))
When("Getting AppInfos without counts")
val appInfo = f.baseData.appInfoFuture(app, Set(AppInfo.Embed.Deployments)).futureValue
Then("we get an counts in the appInfo")
appInfo should be(AppInfo(app, maybeDeployments = Some(
Seq(Identifiable(relatedDeployment.id))
)))
And("the marathonSchedulerService should have been called to retrieve the deployments")
verify(f.marathonSchedulerService, times(1)).listRunningDeployments()
And("we have no more interactions")
f.verifyNoMoreInteractions()
}
test("requesting deployments does work if no deployments are running") {
val f = new Fixture
Given("No deployments")
f.marathonSchedulerService.listRunningDeployments() returns Future.successful(
Seq.empty[DeploymentStepInfo]
)
When("Getting AppInfos with deployments")
val appInfo = f.baseData.appInfoFuture(app, Set(AppInfo.Embed.Deployments)).futureValue
Then("we get an empty list of deployments")
appInfo should be(AppInfo(app, maybeDeployments = Some(
Seq.empty
)))
And("the marathonSchedulerService should have been called to retrieve the deployments")
verify(f.marathonSchedulerService, times(1)).listRunningDeployments()
And("we have no more interactions")
f.verifyNoMoreInteractions()
}
test("requesting lastTaskFailure when one exists") {
val f = new Fixture
Given("One last taskFailure")
f.taskFailureRepository.current(app.id) returns Future.successful(Some(TaskFailureTestHelper.taskFailure))
When("Getting AppInfos with last task failures")
val appInfo = f.baseData.appInfoFuture(app, Set(AppInfo.Embed.LastTaskFailure)).futureValue
Then("we get the failure in the app info")
appInfo should be(AppInfo(app, maybeLastTaskFailure = Some(
TaskFailureTestHelper.taskFailure
)))
And("the taskFailureRepository should have been called to retrieve the failure")
verify(f.taskFailureRepository, times(1)).current(app.id)
And("we have no more interactions")
f.verifyNoMoreInteractions()
}
test("requesting lastTaskFailure when None exist") {
val f = new Fixture
Given("no taskFailure")
f.taskFailureRepository.current(app.id) returns Future.successful(None)
When("Getting AppInfos with last task failures")
val appInfo = f.baseData.appInfoFuture(app, Set(AppInfo.Embed.LastTaskFailure)).futureValue
Then("we get no failure in the app info")
appInfo should be(AppInfo(app))
And("the taskFailureRepository should have been called to retrieve the failure")
verify(f.taskFailureRepository, times(1)).current(app.id)
And("we have no more interactions")
f.verifyNoMoreInteractions()
}
test("requesting taskStats") {
val f = new Fixture
Given("one staged and two running tasks in the taskTracker")
val staged = MarathonTask
.newBuilder()
.setId("task1")
.setStatus(Protos.TaskStatus.newBuilder().setState(Protos.TaskState.TASK_STAGING).buildPartial())
.setStagedAt((f.clock.now() - 10.seconds).toDateTime.getMillis)
.buildPartial()
val running = MarathonTask
.newBuilder()
.setId("task2")
.setStatus(Protos.TaskStatus.newBuilder().setState(Protos.TaskState.TASK_RUNNING).buildPartial())
.setStagedAt((f.clock.now() - 11.seconds).toDateTime.getMillis)
.buildPartial()
val running2 = running.toBuilder.setId("task3").buildPartial()
val tasks: Set[MarathonTask] = Set(staged, running, running2)
f.taskTracker.getTasks(app.id) returns tasks
val statuses: Map[String, Seq[Health]] = Map(
"task1" -> Seq(),
"task2" -> Seq(Health("task2", lastFailure = Some(Timestamp(1)))),
"task3" -> Seq(Health("task3", lastSuccess = Some(Timestamp(2))))
)
f.healthCheckManager.statuses(app.id) returns Future.successful(statuses)
When("requesting AppInfos with taskStats")
val appInfo = f.baseData.appInfoFuture(app, Set(AppInfo.Embed.TaskStats)).futureValue
Then("we get taskStats object in the appInfo")
// we check the calculation of the stats in TaskStatsByVersionTest, so we only check some basic stats
import mesosphere.marathon.api.v2.json.Formats._
withClue(Json.prettyPrint(Json.toJson(appInfo))) {
appInfo.maybeTaskStats should not be empty
appInfo.maybeTaskStats.get.maybeTotalSummary should not be empty
appInfo.maybeTaskStats.get.maybeTotalSummary.get.counts.tasksStaged should be (1)
appInfo.maybeTaskStats.get.maybeTotalSummary.get.counts.tasksRunning should be (2)
appInfo should be(AppInfo(
app,
maybeTaskStats = Some(TaskStatsByVersion(f.clock.now(), app.versionInfo, tasks, statuses))
))
}
And("the taskTracker should have been called")
verify(f.taskTracker, times(1)).getTasks(app.id)
And("the healthCheckManager as well")
verify(f.healthCheckManager, times(1)).statuses(app.id)
And("we have no more interactions")
f.verifyNoMoreInteractions()
}
test("Combining embed options work") {
val f = new Fixture
Given("One last taskFailure and no deployments")
f.taskFailureRepository.current(app.id) returns Future.successful(Some(TaskFailureTestHelper.taskFailure))
f.marathonSchedulerService.listRunningDeployments() returns Future.successful(
Seq.empty[DeploymentStepInfo]
)
When("Getting AppInfos with last task failures and deployments")
val appInfo = f.baseData.appInfoFuture(app, Set(AppInfo.Embed.LastTaskFailure, AppInfo.Embed.Deployments)).futureValue
Then("we get the failure in the app info")
appInfo should be(AppInfo(
app,
maybeLastTaskFailure = Some(TaskFailureTestHelper.taskFailure),
maybeDeployments = Some(Seq.empty)
))
And("the taskFailureRepository should have been called to retrieve the failure")
verify(f.taskFailureRepository, times(1)).current(app.id)
And("the marathonSchedulerService should have been called to retrieve the deployments")
verify(f.marathonSchedulerService, times(1)).listRunningDeployments()
And("we have no more interactions")
f.verifyNoMoreInteractions()
}
}
|
Yhgenomics/marathon
|
src/test/scala/mesosphere/marathon/core/appinfo/impl/AppInfoBaseDataTest.scala
|
Scala
|
apache-2.0
| 12,365 |
package com.github.yuiskw
import org.scalatest.FunSuite
class EventLogSuite extends FunSuite {
test("generate dummy") {
val eventLog = EventLog.getDummy()
assert(eventLog.userId.isInstanceOf[Int])
assert(EventLog.EVENT_TYPES.contains(eventLog.eventId))
assert(eventLog.timestamp.isInstanceOf[Long])
}
test("convert to JSON string") {
val eventLog = new EventLog(1, "view", 1490159332314L)
val json = eventLog.toJson
assert(json === "{\\"userId\\":1,\\"eventId\\":\\"view\\",\\"timestamp\\":1490159332314}")
}
}
|
yu-iskw/spark-streaming-with-google-cloud-example
|
src/test/scala/com/github/yuiskw/EventLogSuite.scala
|
Scala
|
apache-2.0
| 543 |
package scalautils
/**
* Document me!
*
* @author Holger Brandl
*/
/** implicit string utilities. To use them import de.mpicbg.rink.plantx.StringUtils._ */
//https://www.safaribooksonline.com/library/view/scala-cookbook/9781449340292/ch01s11.html
object StringUtils {
implicit class ImplStringUtils(s: String) {
///http://stackoverflow.com/a/6061104/590437
def stripLeadingWS = s.split("\\n").map(_.trim).mkString("\\n").trim
def println() = Console.println(s)
def alignLeft = {
val split = s.split("\\n")
val minWS = split.filter(_.trim.length > 0).map(line => {
line.split("\\\\S").headOption.getOrElse("").length
// or use lift(3) see http://stackoverflow.com/questions/4981689/get-item-in-the-list-in-scala
}).min
// http://stackoverflow.com/questions/10922237/scala-min-max-with-optiont-for-possibly-empty-seq
// sorted.headOption.getOrElse(0)
split.map(line => if (line.trim.isEmpty) "" else line.substring(minWS)).mkString("\\n")
}
}
}
//object Test extends App{
// import StringUtils._
// Seq("", " test").mkString("\\n").alignLeft.mkString.println
//
//}
|
holgerbrandl/scalautils
|
src/main/scala/scalautils/StringUtils.scala
|
Scala
|
bsd-2-clause
| 1,158 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2015 Lookout, Inc
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.lookout.borderpatrol.example
object model {
}
|
jamescway/borderpatrol
|
example/src/main/scala/com/lookout/borderpatrol/example/model.scala
|
Scala
|
mit
| 1,205 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml
import scala.reflect.ClassTag
import com.intel.analytics.bigdl.dataset.{DataSet, MiniBatch}
import com.intel.analytics.bigdl.{Criterion, Module}
import com.intel.analytics.bigdl.optim.{Adam, Optimizer, Trigger}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import org.apache.spark.ml.param.{IntParam, ParamMap, ParamValidators}
import org.apache.spark.sql.DataFrame
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types._
/**
* [[DLEstimator]] helps to train a BigDL Model with the Spark ML Estimator/Transfomer pattern,
* thus Spark users can conveniently fit BigDL into Spark ML pipeline.
*
* The feature column holds the storage (Spark Vectors or array of Floats or Doubles) of
* the feature data, and user should specify the tensor size(dimensions) via param featureSize.
* The label column holds the storage (Spark Vectors, array of Floats or Doubles, or Double) of
* the label data, and user should specify the tensor size(dimensions) via param labelSize.
* Internally the feature and label data are converted to BigDL tensors, to further train a
* BigDL model efficiently.
*
* For details usage, please refer to example :
* [[com.intel.analytics.bigdl.example.MLPipeline.DLEstimatorLeNet]]
*
* @param model module to be optimized
* @param criterion criterion method
* @param featureSize The size (Tensor dimensions) of the feature data.
* @param labelSize The size (Tensor dimensions) of the label data.
*/
class DLEstimator[@specialized(Float, Double) T: ClassTag](
val model: Module[T],
val criterion : Criterion[T],
val featureSize : Array[Int],
val labelSize : Array[Int],
override val uid: String = "DLEstimator"
)(implicit ev: TensorNumeric[T]) extends DLEstimatorBase with DLParams with HasBatchSize {
def setFeaturesCol(featuresColName: String): this.type = set(featuresCol, featuresColName)
def setLabelCol(labelColName : String) : this.type = set(labelCol, labelColName)
def setPredictionCol(value: String): this.type = set(predictionCol, value)
def setBatchSize(value: Int): this.type = set(batchSize, value)
val maxEpoch = new IntParam(this, "maxEpoch", "number of max Epoch", ParamValidators.gt(0))
setDefault(maxEpoch -> 20)
def getMaxEpoch: Int = $(maxEpoch)
def setMaxEpoch(value: Int): this.type = set(maxEpoch, value)
override def transformSchema(schema : StructType): StructType = {
validateAndTransformSchema(schema)
}
protected override def internalFit(dataFrame: DataFrame): DLTransformerBase = {
val batches = toMiniBatch(dataFrame)
val dataset = DataSet.rdd(batches)
val optimizer = Optimizer(model, dataset, criterion)
.setOptimMethod(new Adam[T]())
.setEndWhen(Trigger.maxEpoch($(maxEpoch)))
val optimizedModel = optimizer.optimize()
val dlModel = new DLModel[T](optimizedModel, featureSize)
copyValues(dlModel.setParent(this))
}
/**
* Extract and reassemble data according to batchSize
*/
private def toMiniBatch(dataFrame: DataFrame) : RDD[MiniBatch[T]] = {
val featureArrayCol = if (dataFrame.schema($(featuresCol)).dataType.isInstanceOf[ArrayType]) {
$(featuresCol)
} else {
getFeatureArrayCol
}
val featureColIndex = dataFrame.schema.fieldIndex(featureArrayCol)
val labelArrayCol = if (dataFrame.schema($(labelCol)).dataType.isInstanceOf[ArrayType]) {
$(labelCol)
} else {
getLabelArrayCol
}
val labelColIndex = dataFrame.schema.fieldIndex(labelArrayCol)
val featureType = dataFrame.schema(featureArrayCol).dataType.asInstanceOf[ArrayType].elementType
val labelType = dataFrame.schema(labelArrayCol).dataType.asInstanceOf[ArrayType].elementType
/**
* since model data type (float or double) and feature data element type does not necessarily
* comply, we need to extract data from feature column and convert according to model type.
*/
val featureAndLabelData = dataFrame.rdd.map { row =>
val featureData = featureType match {
case DoubleType =>
row.getSeq[Double](featureColIndex).toArray.map(ev.fromType(_))
case FloatType =>
row.getSeq[Float](featureColIndex).toArray.map(ev.fromType(_))
}
require(featureData.length == featureSize.product, s"Data length mismatch:" +
s" feature data length ${featureData.length}, featureSize: ${featureSize.mkString(", ")}")
val labelData = labelType match {
case DoubleType =>
row.getSeq[Double](labelColIndex).toArray.map(ev.fromType(_))
case FloatType =>
row.getSeq[Float](labelColIndex).toArray.map(ev.fromType(_))
}
require(featureData.length == featureSize.product, s"Data length mismatch:" +
s" label data length ${featureData.length}, labelSize: ${featureSize.mkString(", ")}")
(featureData, labelData)
}
featureAndLabelData.mapPartitions { rows =>
val batches = rows.grouped($(batchSize)).map { batch =>
val featureData = batch.flatMap(_._1).toArray
val labelData = batch.flatMap(_._2).toArray
MiniBatch[T](
Tensor(featureData, Array(batch.length) ++ featureSize),
Tensor(labelData, Array(batch.length) ++ labelSize))
}
batches
}
}
override def copy(extra: ParamMap): DLEstimator[T] = {
copyValues(new DLEstimator(model, criterion, featureSize, labelSize), extra)
}
}
|
122689305/BigDL
|
spark/dl/src/main/scala/org/apache/spark/ml/DLEstimator.scala
|
Scala
|
apache-2.0
| 6,103 |
/**
* Copyright 2015, deepsense.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.deepsense.deeplang.doperables.spark.wrappers.estimators
import org.apache.spark.ml.feature.{Word2Vec => SparkWord2Vec, Word2VecModel => SparkWord2VecModel}
import io.deepsense.deeplang.doperables.SparkSingleColumnEstimatorWrapper
import io.deepsense.deeplang.doperables.spark.wrappers.models.Word2VecModel
import io.deepsense.deeplang.doperables.spark.wrappers.params.Word2VecParams
import io.deepsense.deeplang.params.Param
class Word2VecEstimator
extends SparkSingleColumnEstimatorWrapper[SparkWord2VecModel, SparkWord2Vec, Word2VecModel]
with Word2VecParams {
override lazy val stepSizeDefault = 0.025
override lazy val maxIterationsDefault = 1.0
override protected def getSpecificParams: Array[Param[_]] = Array(
maxIterations,
stepSize,
seed,
vectorSize,
numPartitions,
minCount)
}
|
deepsense-io/seahorse-workflow-executor
|
deeplang/src/main/scala/io/deepsense/deeplang/doperables/spark/wrappers/estimators/Word2VecEstimator.scala
|
Scala
|
apache-2.0
| 1,436 |
package mesosphere.marathon
package core.launcher.impl
import mesosphere.marathon.core.instance.Instance
import mesosphere.marathon.core.instance.update.InstanceUpdateOperation
import mesosphere.marathon.core.launcher.{ InstanceOp, InstanceOpFactory }
import mesosphere.marathon.core.matcher.base.util.OfferOperationFactory
import mesosphere.marathon.core.task.Task
import org.apache.mesos.{ Protos => Mesos }
class InstanceOpFactoryHelper(
private val principalOpt: Option[String],
private val roleOpt: Option[String]) {
private[this] val offerOperationFactory = new OfferOperationFactory(principalOpt, roleOpt)
def launchEphemeral(
taskInfo: Mesos.TaskInfo,
newTask: Task,
instance: Instance): InstanceOp.LaunchTask = {
assume(newTask.taskId.mesosTaskId == taskInfo.getTaskId, "marathon task id and mesos task id must be equal")
def createOperations = Seq(offerOperationFactory.launch(taskInfo))
val stateOp = InstanceUpdateOperation.LaunchEphemeral(instance)
InstanceOp.LaunchTask(taskInfo, stateOp, oldInstance = None, createOperations)
}
def launchEphemeral(
executorInfo: Mesos.ExecutorInfo,
groupInfo: Mesos.TaskGroupInfo,
launched: Instance.LaunchRequest): InstanceOp.LaunchTaskGroup = {
assume(
executorInfo.getExecutorId.getValue == launched.instance.instanceId.executorIdString,
"marathon pod instance id and mesos executor id must be equal")
def createOperations = Seq(offerOperationFactory.launch(executorInfo, groupInfo))
val stateOp = InstanceUpdateOperation.LaunchEphemeral(launched.instance)
InstanceOp.LaunchTaskGroup(executorInfo, groupInfo, stateOp, oldInstance = None, createOperations)
}
def launchOnReservation(
taskInfo: Mesos.TaskInfo,
newState: InstanceUpdateOperation.LaunchOnReservation,
oldState: Instance): InstanceOp.LaunchTask = {
assume(
oldState.isReserved,
"only a reserved instance can be re-launched")
def createOperations = Seq(offerOperationFactory.launch(taskInfo))
InstanceOp.LaunchTask(taskInfo, newState, Some(oldState), createOperations)
}
def launchOnReservation(
executorInfo: Mesos.ExecutorInfo,
groupInfo: Mesos.TaskGroupInfo,
newState: InstanceUpdateOperation.LaunchOnReservation,
oldState: Instance): InstanceOp.LaunchTaskGroup = {
def createOperations = Seq(offerOperationFactory.launch(executorInfo, groupInfo))
InstanceOp.LaunchTaskGroup(executorInfo, groupInfo, newState, Some(oldState), createOperations)
}
/**
* Returns a set of operations to reserve ALL resources (cpu, mem, ports, disk, etc.) and then create persistent
* volumes against them as needed
*/
@SuppressWarnings(Array("TraversableHead"))
def reserveAndCreateVolumes(
reservationLabels: ReservationLabels,
newState: InstanceUpdateOperation.Reserve,
resources: Seq[Mesos.Resource],
localVolumes: Seq[InstanceOpFactory.OfferedVolume]): InstanceOp.ReserveAndCreateVolumes = {
def createOperations =
offerOperationFactory.reserve(reservationLabels, resources) ++
offerOperationFactory.createVolumes(reservationLabels, localVolumes)
InstanceOp.ReserveAndCreateVolumes(newState, resources, createOperations)
}
}
|
janisz/marathon
|
src/main/scala/mesosphere/marathon/core/launcher/impl/InstanceOpFactoryHelper.scala
|
Scala
|
apache-2.0
| 3,258 |
package reactivemongo.api.commands
import reactivemongo.api.{
PackSupport,
SerializationPack,
Session,
WriteConcern
}
/**
* Implements the [[https://docs.mongodb.com/manual/reference/command/insert/ insert]] command.
*/
private[reactivemongo] trait InsertCommand[P <: SerializationPack] { self: PackSupport[P] =>
/**
* @param head the first mandatory document
* @param tail maybe other documents
*/
private[reactivemongo] final class Insert(
val head: pack.Document,
val tail: Seq[pack.Document],
val ordered: Boolean,
val writeConcern: WriteConcern,
val bypassDocumentValidation: Boolean) extends CollectionCommand with CommandWithResult[InsertResult] {
val commandKind = CommandKind.Insert
private[commands] lazy val tupled =
Tuple5(head, tail, ordered, writeConcern, bypassDocumentValidation)
override def equals(that: Any): Boolean = that match {
case other: this.type =>
other.tupled == this.tupled
case _ => false
}
override def hashCode: Int = tupled.hashCode
@inline override lazy val toString: String = {
val docs = (head +: tail).map(pack.pretty)
s"""Insert(${docs.mkString("[", ", ", "]")}, ${ordered.toString}, ${writeConcern.toString}, ${bypassDocumentValidation.toString})"""
}
}
private[reactivemongo] type InsertResult = DefaultWriteResult // for simplified imports
private[reactivemongo] final type InsertCmd = ResolvedCollectionCommand[Insert]
private[reactivemongo] def session(): Option[Session]
implicit private[reactivemongo] final lazy val insertWriter: pack.Writer[InsertCmd] = insertWriter(self.session())
private[reactivemongo] final def insertWriter(
session: Option[Session]): pack.Writer[InsertCmd] = {
val builder = pack.newBuilder
val writeWriteConcern = CommandCodecs.writeWriteConcern(pack)
val writeSession = CommandCodecs.writeSession(builder)
import builder.{ elementProducer => element }
pack.writer[InsertCmd] { insert =>
import insert.command
val documents = builder.array(command.head +: command.tail)
val ordered = builder.boolean(command.ordered)
val elements = Seq.newBuilder[pack.ElementProducer]
elements ++= Seq[pack.ElementProducer](
element("insert", builder.string(insert.collection)),
element("ordered", ordered),
element("documents", documents),
element(
"bypassDocumentValidation",
builder.boolean(command.bypassDocumentValidation)))
session.foreach { s =>
elements ++= writeSession(s)
}
if (!session.exists(_.transaction.isSuccess)) {
// writeConcern is not allowed within a multi-statement transaction
// code=72
elements += element(
"writeConcern", writeWriteConcern(command.writeConcern))
}
builder.document(elements.result())
}
}
}
|
ReactiveMongo/ReactiveMongo
|
driver/src/main/scala/api/commands/InsertCommand.scala
|
Scala
|
apache-2.0
| 2,918 |
package sexybash
case class StringBlockAstFunctionBuilder(indentation: String, content: String, partialIndentation: String = "") extends AstFunctionBuilder {
import AstFunctionBuilder._
// TODO mmm I don't have state here
override def build = StringBlockAstFunction(content)
def process(c: Char, state: State): Either[Throwable, (State, AstFunctionBuilder)] = (state, c) match {
case (s, CarriageReturn) ⇒ Right((s, this))
case (NOOP, '[') ⇒ Right((STRINGBLOCKSTARTED, this))
case (NOOP, c: Char) ⇒ Left(new RuntimeException(s"Expecting [ in NOOP state but got [$c]."))
case (STRINGBLOCKSTARTED, Space | Tab) ⇒ Right((STRINGBLOCKSTARTED, this))
case (STRINGBLOCKSTARTED, NewLine) ⇒ Right((FIRSTINDENTATION, this))
case (STRINGBLOCKSTARTED, c: Char) ⇒ Left(new RuntimeException(s"Expecting space, tab or newline in STRINGBLOCKSTARTED state but got [$c]."))
case (FIRSTINDENTATION, Space | Tab) ⇒ Right((FIRSTINDENTATION, this.copy(indentation + c)))
case (FIRSTINDENTATION, NewLine) if indentation.isEmpty ⇒ Right((FIRSTINDENTATION, this.copy("")))
case (FIRSTINDENTATION, NewLine) ⇒ Right((INDENTATION, this))
case (FIRSTINDENTATION, c: Char) ⇒ Right((CONTENT, this.copy(content = content + c)))
case (CONTENT, NewLine) ⇒ Right((INDENTATION, this.copy(content = content + NewLine)))
case (CONTENT, c: Char) ⇒ Right((CONTENT, this.copy(content = content + c)))
case (INDENTATION, ']') ⇒ Right((END, this))
case (INDENTATION, c: Char) if matchesIndentation(c) ⇒ Right((CONTENT, this.copy(partialIndentation = "")))
case (INDENTATION, c: Char) if matchesPartialIndentation(c) ⇒
Right((INDENTATION, this.copy(partialIndentation = partialIndentation + c)))
case (INDENTATION, c: Char) ⇒ Left(new RuntimeException(s"Expecting correct indentation in INDENTATION state but got [$c], content is [$content]."))
}
private def isIndentation(c: Char) = Set(Space, Tab).contains(c)
private def matchesIndentation(c: Char) = isIndentation(c) && indentation == partialIndentation + c
private def matchesPartialIndentation(c: Char) = {
isIndentation(c) && partialIndentation + c == indentation.substring(0, (partialIndentation + c).size)
}
}
object StringBlockAstFunctionBuilder {
def apply: StringBlockAstFunctionBuilder = new StringBlockAstFunctionBuilder("", "", "")
}
|
MarianoGappa/sexybash
|
src/main/scala/sexybash/StringBlockAstFunctionBuilder.scala
|
Scala
|
mit
| 2,716 |
package io.udash.web.guide.markdown
import com.avsystem.commons.misc.{AbstractValueEnum, AbstractValueEnumCompanion, EnumCtx}
final class MarkdownPage(val file: String)(implicit val ctx: EnumCtx) extends AbstractValueEnum
object MarkdownPage extends AbstractValueEnumCompanion[MarkdownPage] {
final val Intro: Value = new MarkdownPage("assets/pages/intro.md")
final val I18n: Value = new MarkdownPage("assets/pages/ext/i18n.md")
final val License: Value = new MarkdownPage("assets/pages/license.md")
}
|
UdashFramework/udash-guide
|
shared/src/main/scala/io/udash/web/guide/markdown/MarkdownPage.scala
|
Scala
|
gpl-3.0
| 510 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.stream.table.stringexpr
import org.apache.flink.api.scala._
import org.apache.flink.table.api.Tumble
import org.apache.flink.table.api.scala._
import org.apache.flink.table.planner.plan.utils.JavaUserDefinedAggFunctions.{WeightedAvg, WeightedAvgWithMergeAndReset}
import org.apache.flink.table.planner.utils.{CountAggFunction, CountMinMax, TableTestBase}
import org.junit.Test
class AggregateStringExpressionTest extends TableTestBase {
@Test
def testDistinctNonGroupedAggregate(): Unit = {
val util = streamTestUtil()
val t = util.addTableSource[(Int, Long, String)]("Table3")
val t1 = t.select('_1.sum.distinct, '_1.count.distinct, '_1.avg.distinct)
val t2 = t.select("_1.sum.distinct, _1.count.distinct, _1.avg.distinct")
val t3 = t.select("sum.distinct(_1), count.distinct(_1), avg.distinct(_1)")
verifyTableEquals(t1, t2)
verifyTableEquals(t1, t3)
}
@Test
def testDistinctGroupedAggregate(): Unit = {
val util = streamTestUtil()
val t = util.addTableSource[(Int, Long, String)]("Table3", 'a, 'b, 'c)
val t1 = t.groupBy('b).select('b, 'a.sum.distinct, 'a.sum)
val t2 = t.groupBy("b").select("b, a.sum.distinct, a.sum")
val t3 = t.groupBy("b").select("b, sum.distinct(a), sum(a)")
verifyTableEquals(t1, t2)
verifyTableEquals(t1, t3)
}
@Test
def testDistinctNonGroupAggregateWithUDAGG(): Unit = {
val util = streamTestUtil()
val t = util.addTableSource[(Int, Long, String)]("Table3", 'a, 'b, 'c)
val myCnt = new CountAggFunction
util.addFunction("myCnt", myCnt)
val myWeightedAvg = new WeightedAvgWithMergeAndReset
util.addFunction("myWeightedAvg", myWeightedAvg)
val t1 = t.select(myCnt.distinct('a) as 'aCnt, myWeightedAvg.distinct('b, 'a) as 'wAvg)
val t2 = t.select("myCnt.distinct(a) as aCnt, myWeightedAvg.distinct(b, a) as wAvg")
verifyTableEquals(t1, t2)
}
@Test
def testDistinctGroupedAggregateWithUDAGG(): Unit = {
val util = streamTestUtil()
val t = util.addTableSource[(Int, Long, String)]("Table3", 'a, 'b, 'c)
val myCnt = new CountAggFunction
util.addFunction("myCnt", myCnt)
val myWeightedAvg = new WeightedAvgWithMergeAndReset
util.addFunction("myWeightedAvg", myWeightedAvg)
val t1 = t.groupBy('b)
.select('b,
myCnt.distinct('a) + 9 as 'aCnt,
myWeightedAvg.distinct('b, 'a) * 2 as 'wAvg,
myWeightedAvg.distinct('a, 'a) as 'distAgg,
myWeightedAvg('a, 'a) as 'agg)
val t2 = t.groupBy("b")
.select("b, myCnt.distinct(a) + 9 as aCnt, myWeightedAvg.distinct(b, a) * 2 as wAvg, " +
"myWeightedAvg.distinct(a, a) as distAgg, myWeightedAvg(a, a) as agg")
verifyTableEquals(t1, t2)
}
@Test
def testGroupedAggregate(): Unit = {
val util = streamTestUtil()
val t = util.addTableSource[(Int, Long, String)]('int, 'long, 'string)
val weightAvgFun = new WeightedAvg
util.addFunction("weightAvgFun", weightAvgFun)
// Expression / Scala API
val resScala = t
.groupBy('string)
.select('int.count as 'cnt, weightAvgFun('long, 'int))
// String / Java API
val resJava = t
.groupBy("string")
.select("int.count as cnt, weightAvgFun(long, int)")
verifyTableEquals(resJava, resScala)
}
@Test
def testNonGroupedAggregate(): Unit = {
val util = streamTestUtil()
val t = util.addTableSource[(Int, Long, String)]('int, 'long, 'string)
// Expression / Scala API
val resScala = t.select('int.count as 'cnt, 'long.sum)
// String / Java API
val resJava = t.select("int.count as cnt, long.sum")
verifyTableEquals(resJava, resScala)
}
@Test
def testProctimeRename(): Unit = {
val util = streamTestUtil()
val t = util.addDataStream[(Int, Long, String)](
"T1", 'int, 'long, 'string, 'proctime.proctime)
// Expression / Scala API
val resScala = t
.window(Tumble over 50.milli on 'proctime as 'w1)
.groupBy('w1, 'string)
.select('w1.proctime as 'proctime, 'w1.start as 'start, 'w1.end as 'end, 'string, 'int.count)
// String / Java API
val resJava = t
.window(Tumble.over("50.milli").on("proctime").as("w1"))
.groupBy("w1, string")
.select("w1.proctime as proctime, w1.start as start, w1.end as end, string, int.count")
verifyTableEquals(resJava, resScala)
}
@Test
def testRowtimeRename(): Unit = {
val util = streamTestUtil()
val t = util.addDataStream[TestPojo](
"T1",'int, 'rowtime.rowtime, 'string)
// Expression / Scala API
val resScala = t
.window(Tumble over 50.milli on 'rowtime as 'w1)
.groupBy('w1, 'string)
.select('w1.rowtime as 'rowtime, 'string, 'int.count)
// String / Java API
val resJava = t
.window(Tumble.over("50.milli").on("rowtime").as("w1"))
.groupBy("w1, string")
.select("w1.rowtime as rowtime, string, int.count")
verifyTableEquals(resJava, resScala)
}
def testNonGroupedRowBasedAggregate(): Unit = {
val util = streamTestUtil()
val t = util.addTableSource[(Int, Long, String)]('a, 'b, 'c)
val testAgg = new CountMinMax
util.addFunction("testAgg", testAgg)
// Expression / Scala API
val resScala = t
.aggregate(testAgg('a))
.select('f0, 'f1)
// String / Java API
val resJava = t
.aggregate("testAgg(a)")
.select("f0, f1")
verifyTableEquals(resScala, resJava)
}
@Test
def testGroupedRowBasedAggregate(): Unit = {
val util = streamTestUtil()
val t = util.addTableSource[(Int, Long, String)]('a, 'b, 'c)
val testAgg = new CountMinMax
util.addFunction("testAgg", testAgg)
// Expression / Scala API
val resScala = t
.groupBy('b)
.aggregate(testAgg('a))
.select('b, 'f0, 'f1)
// String / Java API
val resJava = t
.groupBy("b")
.aggregate("testAgg(a)")
.select("b, f0, f1")
verifyTableEquals(resScala, resJava)
}
@Test
def testAggregateWithAlias(): Unit = {
val util = streamTestUtil()
val t = util.addTableSource[(Int, Long, String)]('a, 'b, 'c)
val testAgg = new CountMinMax
util.addFunction("testAgg", testAgg)
// Expression / Scala API
val resScala = t
.groupBy('b)
.aggregate(testAgg('a) as ('x, 'y, 'z))
.select('b, 'x, 'y)
// String / Java API
val resJava = t
.groupBy("b")
.aggregate("testAgg(a) as (x, y, z)")
.select("b, x, y")
verifyTableEquals(resScala, resJava)
}
}
class TestPojo() {
var int: Int = _
var long: Long = _
var string: String = _
}
|
hequn8128/flink
|
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/stream/table/stringexpr/AggregateStringExpressionTest.scala
|
Scala
|
apache-2.0
| 7,437 |
package benchmarks
import java.io.FileWriter
import scala.io.Source
object JMHtoCSV {
val BENCHMARK = "^# Benchmark: (.*)$".r
val THREADS = "^# Threads: (\\d+) thread.*$".r
val PARAMETERS = "^# Parameters: \\((.*)\\)$".r
val MODE = "^# Benchmark mode: (.*)$".r
val MEASUREMENT = "^Iteration .*: (\\d+)[.,](\\d+) .*?$".r
var outfiles = Map[String, FileWriter]()
var benchmark: String = _
var threads: String = _
var parameters: String = _
var mode: String = _
def main(args: Array[String]): Unit = {
try {
for (fileName <- args) {
println("processing " + fileName)
for ((line, lineNo) <- Source.fromFile(fileName).getLines().zipWithIndex) {
line match {
case BENCHMARK(benchmarkName) =>
benchmark = benchmarkName
case THREADS(threadCount) =>
threads = threadCount
case MODE(modeText) =>
mode = modeText
case PARAMETERS(allParams) =>
parameters = allParams.substring(allParams.indexOf('=') + 2).replaceAll(", [^=]+ = ", "\t")
if (!outfiles.contains(benchmark)) {
outfiles += benchmark -> new FileWriter(benchmark + ".txt")
outfiles(benchmark).write("srcfile\tthreads\t" + allParams.substring(
0,
allParams.lastIndexOf('=')
).replaceAll(" = [^=]+, ", "\t") + "\tmode\tmeasurement\n")
}
case MEASUREMENT(integer, decimal) =>
outfiles(benchmark).write(
fileName + "\t" + threads + "\t" + parameters + "\t" + mode + "\t" + integer + "," + decimal + "\n"
)
case _ => // ignore
}
}
}
} finally {
for (writer <- outfiles.values) writer.close()
}
println("done, written files:\n" + outfiles.keySet.mkString(".txt\n") + ".txt")
}
}
|
guidosalva/REScala
|
Code/Microbenchmarks/src/main/scala/benchmarks/JMHtoCSV.scala
|
Scala
|
apache-2.0
| 1,946 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.