code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.model.feats
import io.truthencode.ddo.support.requisite.RequiresCharacterLevel
/**
* @todo
* add entry levels to allow for 24 or 27 vs minimum.
*/
protected[feats] trait MasterOfKnowledge extends Passive with RequiresCharacterLevel {
self: EpicFeat =>
override val requireCharacterLevel: Int = 24
}
|
adarro/ddo-calc
|
subprojects/common/ddo-core/src/main/scala/io/truthencode/ddo/model/feats/MasterOfKnowledge.scala
|
Scala
|
apache-2.0
| 1,010 |
package io.buoyant.router.thrift
import com.twitter.finagle.buoyant.Dst
import com.twitter.finagle.thrift.{Protocols, ThriftClientRequest}
import com.twitter.finagle.{Dtab, Path}
import com.twitter.util.Future
import io.buoyant.router.RoutingFactory
import io.buoyant.router.RoutingFactory.{IdentifiedRequest, RequestIdentification}
import org.apache.thrift.protocol.TProtocolFactory
import org.apache.thrift.transport.TMemoryInputTransport
case class Identifier(
name: Path = Path.empty,
methodInDst: Boolean = false,
dtab: () => Dtab = () => Dtab.base,
protocol: TProtocolFactory = Protocols.binaryFactory()
) extends RoutingFactory.Identifier[ThriftClientRequest] {
private[this] def suffix(req: ThriftClientRequest): Path = {
if (methodInDst) {
val messageName = protocol.getProtocol(
new TMemoryInputTransport(req.message)
).readMessageBegin().name
Path.read(s"/$messageName")
} else {
Path.empty
}
}
def apply(req: ThriftClientRequest): Future[RequestIdentification[ThriftClientRequest]] = {
val dst = Dst.Path(name ++ Dest.local ++ suffix(req), dtab(), Dtab.local)
Future.value(new IdentifiedRequest[ThriftClientRequest](dst, req))
}
}
|
denverwilliams/linkerd
|
router/thrift/src/main/scala/io/buoyant/router/thrift/Identifier.scala
|
Scala
|
apache-2.0
| 1,215 |
/*
* @author Philip Stutz
*
* Copyright 2011 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect.features
import org.specs2.mutable._
import org.junit.runner.RunWith
import org.specs2.runner.JUnitRunner
import com.signalcollect._
import com.signalcollect.interfaces._
import com.signalcollect.graphproviders._
import com.signalcollect.examples._
import com.signalcollect.configuration._
trait SpecConfigurations {
def computeGraphBuilders = List(GraphBuilder)
def numberOfWorkers = List(1, 2, 4, 8, 16, 32, 64, 128)
def executionModes = List(ExecutionMode.OptimizedAsynchronous, ExecutionMode.Synchronous)
def computeGraphs: Seq[Graph] = {
var computeGraphs = Seq[Graph]()
for (workers <- numberOfWorkers) {
for (computeGraphBuilder <- computeGraphBuilders) {
computeGraphs = computeGraphBuilder.build +: computeGraphs
}
}
computeGraphs
}
def executionConfigurations: Seq[ExecutionConfiguration] = {
var executionConfigurations = Seq[ExecutionConfiguration]()
for (executionMode <- executionModes) {
executionConfigurations = ExecutionConfiguration(executionMode = executionMode) +: executionConfigurations
}
executionConfigurations
}
}
|
Tjoene/thesis
|
Case_Programs/signal-collect/src/test/scala/com/signalcollect/features/SpecConfigurations.scala
|
Scala
|
gpl-2.0
| 1,878 |
package models.daos
import com.mohiva.play.silhouette.api.LoginInfo
import com.mohiva.play.silhouette.impl.daos.DelegableAuthInfoDAO
import com.mohiva.play.silhouette.impl.providers.OAuth2Info
import models.daos.DBTableDefinitions._
import play.api.db.slick.Config.driver.simple._
import play.api.db.slick._
import scala.concurrent.Future
/**
* The DAO to store the OAuth2 information.
*/
class OAuth2InfoDAOSlick extends DelegableAuthInfoDAO[OAuth2Info] {
import play.api.Play.current
/**
* Saves the OAuth2 info.
*
* @param loginInfo The login info for which the auth info should be saved.
* @param authInfo The OAuth2 info to save.
* @return The saved OAuth2 info or None if the OAuth2 info couldn't be saved.
*/
def save(loginInfo: LoginInfo, authInfo: OAuth2Info): Future[OAuth2Info] = {
Future.successful(
DB withSession { implicit session =>
val infoId = slickLoginInfos.filter(
x => x.providerID === loginInfo.providerID && x.providerKey === loginInfo.providerKey
).first.id.get
slickOAuth2Infos.filter(_.loginInfoId === infoId).firstOption match {
case Some(info) =>
slickOAuth2Infos update DBOAuth2Info(info.id, authInfo.accessToken, authInfo.tokenType, authInfo.expiresIn, authInfo.refreshToken, infoId)
case None => slickOAuth2Infos insert DBOAuth2Info(1111, authInfo.accessToken, authInfo.tokenType, authInfo.expiresIn, authInfo.refreshToken, infoId)
}
authInfo
}
)
}
/**
* Finds the OAuth2 info which is linked with the specified login info.
*
* @param loginInfo The linked login info.
* @return The retrieved OAuth2 info or None if no OAuth2 info could be retrieved for the given login info.
*/
def find(loginInfo: LoginInfo): Future[Option[OAuth2Info]] = {
Future.successful(
DB withSession { implicit session =>
slickLoginInfos.filter(info => info.providerID === loginInfo.providerID && info.providerKey === loginInfo.providerKey).firstOption match {
case Some(info) =>
val oAuth2Info = slickOAuth2Infos.filter(_.loginInfoId === info.id).first
Some(OAuth2Info(oAuth2Info.accessToken, oAuth2Info.tokenType, oAuth2Info.expiresIn, oAuth2Info.refreshToken))
case None => None
}
}
)
}
}
|
ibnuda/hasembuh
|
app/models/daos/OAuth2InfoDAOSlick.scala
|
Scala
|
apache-2.0
| 2,229 |
package utils.auth
import javax.inject.Inject
import com.mohiva.play.silhouette.api.actions.SecuredErrorHandler
import play.api.i18n.{ I18nSupport, Messages, MessagesApi }
import play.api.libs.json.Json
import play.api.mvc.{ RequestHeader, Result }
import play.api.mvc.Results._
import scala.concurrent.Future
/**
* Custom secured error handler.
*
* @param messagesApi The Play messages API.
*/
class CustomSecuredErrorHandler @Inject() (val messagesApi: MessagesApi) extends SecuredErrorHandler with I18nSupport {
/**
* Called when a user is not authenticated.
*
* As defined by RFC 2616, the status code of the response should be 401 Unauthorized.
*
* @param request The request header.
* @return The result to send to the client.
*/
override def onNotAuthenticated(implicit request: RequestHeader): Future[Result] = {
Future.successful(Unauthorized)
}
/**
* Called when a user is authenticated but not authorized.
*
* As defined by RFC 2616, the status code of the response should be 403 Forbidden.
*
* @param request The request header.
* @return The result to send to the client.
*/
override def onNotAuthorized(implicit request: RequestHeader): Future[Result] = {
Future.successful(Forbidden(Json.obj("error" -> "You are not allowed to access this resource")))
}
}
|
SwaggerTagger/octo-tagger-backend
|
app/utils/auth/CustomSecuredErrorHandler.scala
|
Scala
|
mit
| 1,343 |
package benchmarks.lattices.delta.crdt
import org.openjdk.jmh.annotations._
import rescala.extra.lattices.delta.crdt.reactive.PNCounter
import java.util.concurrent.TimeUnit
@BenchmarkMode(Array(Mode.Throughput))
@OutputTimeUnit(TimeUnit.MILLISECONDS)
@Warmup(iterations = 3, time = 1000, timeUnit = TimeUnit.MILLISECONDS)
@Measurement(iterations = 3, time = 1000, timeUnit = TimeUnit.MILLISECONDS)
@Fork(3)
@Threads(1)
@State(Scope.Thread)
class PNCounterBench {
@Param(Array("1", "10", "100", "1000"))
var numReplicas: Int = _
var counter: PNCounter = _
@Setup
def setup(): Unit = {
counter = (1 until numReplicas).foldLeft(PNCounter("0").inc()) {
case (c, n) =>
val delta = PNCounter(n.toString).inc().deltaBuffer.head
c.applyDelta(delta)
}
}
@Benchmark
def value(): Int = counter.value
@Benchmark
def inc(): PNCounter = counter.inc()
@Benchmark
def dec(): PNCounter = counter.dec()
}
|
guidosalva/REScala
|
Code/Microbenchmarks/src/main/scala/benchmarks/lattices/delta/crdt/PNCounterBench.scala
|
Scala
|
apache-2.0
| 950 |
package pl.abankowski.musicbrainz.client.service
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import com.google.inject.Inject
import pl.abankowski.musicbrainz.client.config.Config
import pl.abankowski.musicbrainz.client.dto.RecordingId
import pl.abankowski.musicbrainz.client.dto.RecordingInfo
import pl.abankowski.musicbrainz.client.dto.ResourceResult
import pl.abankowski.musicbrainz.client.query._
import pl.abankowski.ws.WS
import play.api.http.HeaderNames._
import play.api.http.Status._
private[service] class RecordingServiceImpl @Inject() (config: Config, ws: WS)
extends MusicBrainzClient(config, ws) with RecordingService {
override def get(id: RecordingId): Future[Option[RecordingInfo]] =
url("/recording/" + id.value).withQueryString("inc" -> "aliases").get()
.map(response =>
response.status match {
case OK => Some(response.json.as[RecordingInfo])
case NOT_FOUND => None
case other if Range(500, 599).contains(other) =>
throw MusicBrainzServerError(other)
})
override def search(query: Query) =
url("/recording").withQueryString("query" -> query, "inc" -> "aliases").get()
.map { response =>
response.status match {
case OK => ResourceResult.valueOf[RecordingInfo](response.json, "recordings")
case other if Range(500, 599).contains(other) =>
throw MusicBrainzServerError(other)
}
}
}
|
abankowski/musicbrainz-scala-client
|
src/main/scala/pl/abankowski/musicbrainz/client/service/RecordingServiceImpl.scala
|
Scala
|
mit
| 1,486 |
package scl
class ChannelSocketUDP extends Channel {
var socket:java.net.DatagramSocket = null
def open(name:String) = {
try {
val ns = name.split(":")
socket = new java.net.DatagramSocket(if (ns.length == 2) ns(1).toInt else 8080, java.net.InetAddress.getByName(ns(0)))
_name = name
} catch { case _:Exception => socket = null }
opened
}
def opened = socket != null
def close = { if (opened) { socket.close; socket = null } }
def avail = if (opened) 0 else 0
def read = {
if (opened){
Nil
} else Nil
}
def write(bytes:Array[Byte]) = if (opened) {}
}
|
tardigrade888/scsvlog
|
repo/src/scl/ChannelSocketUDP.scala
|
Scala
|
mit
| 691 |
//package dhg.ccg.parse.pcfg
//
//import scala.collection.mutable.{ Map => MMap }
//import scala.collection.mutable.{ Set => MSet }
//import dhg.util._
//import scalaz._
//import scalaz.Scalaz._
//import dhg.ccg.prob._
//import dhg.ccg.cat._
//import dhg.ccg.parse._
//import dhg.ccg.util._
//import dhg.ccg.tagdict.TagDictionary
//import scala.collection.immutable.BitSet
//
///**
// * Take a sentence as a sequence of words associated with sets of tags.
// * Each AdditionalTagAdder will output a transformed tagset for each word.
// */
//trait AdditionalTagAdderI {
// type Word = Int
// type Tag = Int
// type CatSet = BitSet
//
// def apply(sentence: Vector[Word], tags: Vector[Set[Tag]], tagdict: IndirectSparseVec[CatSet], allCats: CatSet): Vector[Set[Tag]]
//}
//
//class SequentialAdditionalTagAdderI(delegates: Vector[AdditionalTagAdderI]) extends AdditionalTagAdderI {
// override def apply(sentence: Vector[Word], tags: Vector[Set[Tag]], tagdict: IndirectSparseVec[CatSet], allCats: CatSet): Vector[Set[Tag]] = delegates.foldLeft(tags) { (z, ata) => ata(sentence, z, tagdict, allCats) }
// override def toString = f"SequentialAdditionalTagAdderI(${delegates.mkString(", ")})"
//}
//
//class ParallelAdditionalTagAdderI(delegates: Vector[AdditionalTagAdderI]) extends AdditionalTagAdderI {
// override def apply(sentence: Vector[Word], tags: Vector[Set[Tag]], tagdict: IndirectSparseVec[CatSet], allCats: CatSet): Vector[Set[Tag]] = delegates.map(ata => ata(sentence, tags, tagdict, allCats)).transpose.map(_.flatten.toSet)
// override def toString = f"ParallelAdditionalTagAdderI(${delegates.mkString(", ")})"
//}
//
//class NoOpAdditionalTagAdderI extends AdditionalTagAdderI {
// override def apply(sentence: Vector[Word], tags: Vector[Set[Tag]], tagdict: IndirectSparseVec[CatSet], allCats: CatSet): Vector[Set[Tag]] = tags
// override def toString = f"NoOpAdditionalTagAdderI()"
//}
//
///**
// * For each word in the sentence, return its TD entry set if the word appears in the TD (empty set otherwise)
// */
//class PresentTagdictAdditionalTagAdderI() extends AdditionalTagAdderI {
// override def apply(sentence: Vector[Word], tags: Vector[Set[Tag]], tagdict: IndirectSparseVec[CatSet], allCats: CatSet): Vector[Set[Tag]] = (sentence zipSafe tags).mapt((w, _) => tagdict.getOrElse(w, BitSet.empty))
// override def toString = f"PresentTagdictAdditionalTagAdderI()"
//}
//
///**
// * For each word in the sentence, if there are no tags associated with the word, then return a set containing all tags known to the TD (empty set otherwise)
// */
//class TagdictEntryForMissingAdditionalTagAdderI() extends AdditionalTagAdderI {
// override def apply(sentence: Vector[Word], tags: Vector[Set[Tag]], tagdict: IndirectSparseVec[CatSet], allCats: CatSet): Vector[Set[Tag]] = (sentence zipSafe tags).mapt((w, ts) => if (ts.nonEmpty) Set.empty[Tag] else tagdict.getOrElse(w, BitSet.empty))
// override def toString = f"FullTagdictTagsetForMissingAdditionalTagAdderI()"
//}
//
///**
// * For each word in the sentence, if there are no tags associated with the word, then return a set containing all tags known to the TD (empty set otherwise)
// */
//class FullTagdictTagsetForMissingAdditionalTagAdderI() extends AdditionalTagAdderI {
// override def apply(sentence: Vector[Word], tags: Vector[Set[Tag]], tagdict: IndirectSparseVec[CatSet], allCats: CatSet): Vector[Set[Tag]] = tags.map(ts => if (ts.nonEmpty) Set.empty[Tag] else allCats)
// override def toString = f"FullTagdictTagsetForMissingAdditionalTagAdderI()"
//}
//
//class DefaultTagsetForMissingAdditionalTagAdderI(defaultTagset: Set[Tag]) extends AdditionalTagAdderI {
// override def apply(sentence: Vector[Word], tags: Vector[Set[Tag]], tagdict: IndirectSparseVec[CatSet], allCats: CatSet): Vector[Set[Tag]] = tags.map(ts => if (ts.nonEmpty) Set.empty[Tag] else defaultTagset)
// override def toString = f"DefaultTagsetForMissingAdditionalTagAdderI($defaultTagset)"
//}
//
///*
// * CCG-specific adders
// */
//
//class FwdBkdModAdditionalTagAdderI() extends AdditionalTagAdderI[Cat] {
// override def apply(sentence: Vector[Word], tags: Vector[Set[Cat]], tagdict: TagDictionary[Cat]): Vector[Set[Cat]] = {
// val n = sentence.length
// val endedTags = None +: tags.map(Option(_)) :+ None
// for (((word, Seq(prevTags, Some(tags), nextTags)), i) <- (sentence zipSafe endedTags.sliding(3)).zipWithIndex) yield {
// val prevs: Iterable[Cat] = for (prev <- prevTags.toSet; prevSupertag <- prev) yield (prevSupertag \\ prevSupertag)
// val nexts: Iterable[Cat] = for (next <- nextTags.toSet; nextSupertag <- next) yield (nextSupertag / nextSupertag)
// (prevs ++ nexts).toSet
// }
// }
//}
//
////class FwdBkdConsumerAdditionalTagAdder extends AdditionalTagAdderI[Cat] {
//// override def apply(sentence: Vector[Word], tags: Vector[Set[Cat]]): Vector[Set[Cat]] = {
//// val n = sentences.length
//// val endedTags = None +: tags.map(Option(_)) :+ None
//// for (((word, Seq(prevTags, Some(tags), nextTags)), i) <- (sentence zipSafe endedTags.sliding(3)).zipWithIndex) yield {
//// val prevs: Iterable[Cat] = for (prev<- prevTags.toSet; prevSupertag <- prev) yield (prevSupertag \\ prevSupertag)
//// val nexts: Iterable[Cat] = for (next<- nextTags.toSet; nextSupertag <- next) yield (nextSupertag / nextSupertag)
//// (tags ++ prevs ++ nexts).toSet
//// }
////
//// for (((word, tags), i) <- sentenceWithTags.zipWithIndex) yield {
//// val prevs: Iterable[Cat] = for (prevSupertag <- sentenceWithTags.map(_._2).apply(i); curSupertag <- tags) yield (curSupertag \\ prevSupertag) else Set.empty
//// val nexts: Iterable[Cat] = for (nextSupertag <- sentenceWithTags.map(_._2).apply(i + 1); curSupertag <- tags) yield (curSupertag / nextSupertag) else Set.empty
//// (prevs ++ nexts).toSet
//// }
//// }
////}
////
////class AllFwdBkdAdditionalTagAdder extends AdditionalTagAdderI[Cat] {
//// override def apply(sentence: Vector[Word], tags: Vector[Set[Cat]]): Vector[Set[Cat]] = {
//// val n = sentenceWithTags.length
//// for (((word, tags), i) <- sentenceWithTags.zipWithIndex) yield {
//// val prevs: Seq[Cat] = for (pTags <- sentenceWithTags.map(_._2).take(i); prevSupertag <- pTags) yield (prevSupertag \\ prevSupertag)
//// val nexts: Seq[Cat] = for (nTags <- sentenceWithTags.map(_._2).drop(i + 1); nextSupertag <- nTags) yield (nextSupertag / nextSupertag)
//// // TODO: Maybe this hsould be `curSupertag / nextSupertag` ??
//// (prevs ++ nexts).toSet
//// }
//// }
////}
//
////
//
//class StandardTagDictAdditionalTagAdderI() extends AdditionalTagAdderI {
// private[this] val delegate: AdditionalTagAdderI =
// new SequentialAdditionalTagAdderI(Vector(
// new ParallelAdditionalTagAdderI(Vector(
// new NoOpAdditionalTagAdderI(),
// new TagdictEntryForMissingAdditionalTagAdderI())),
// new ParallelAdditionalTagAdderI(Vector(
// new NoOpAdditionalTagAdderI(),
// new FullTagdictTagsetForMissingAdditionalTagAdderI()))))
// override def apply(sentence: Vector[Word], tags: Vector[Set[Tag]], tagdict: IndirectSparseVec[CatSet], allCats: CatSet): Vector[Set[Tag]] = delegate(sentence, tags, tagdict)
// override def toString = f"StandardTagDictAdditionalTagAdderI()"
//}
//
//class StandardTagDictAndFwdBkdAdditionalTagAdderI() extends AdditionalTagAdderI[Cat] {
// private[this] val delegate: AdditionalTagAdderI[Cat] =
// new SequentialAdditionalTagAdderI[Cat](Vector(
// new StandardTagDictAdditionalTagAdderI(),
// new ParallelAdditionalTagAdderI[Cat](Vector(
// new NoOpAdditionalTagAdderI(),
// new FwdBkdModAdditionalTagAdderI()))))
// override def apply(sentence: Vector[Word], tags: Vector[Set[Cat]], tagdict: TagDictionary[Cat]): Vector[Set[Cat]] = delegate(sentence, tags, tagdict)
// override def toString = f"StandardTagDictAndFwdBkdAdditionalTagAdderI()"
//}
|
dhgarrette/2015-ccg-parsing
|
src/main/scala/dhg/ccg/parse/pcfg/AdditionalTagAdderI.scala
|
Scala
|
apache-2.0
| 7,946 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.sparta.driver.test.cube
import java.io.{Serializable => JSerializable}
import com.stratio.sparta.driver.cube.{CubeWriterOptions, CubeWriter, Cube}
import com.stratio.sparta.driver.trigger.Trigger
import com.stratio.sparta.sdk._
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Row}
import org.junit.runner.RunWith
import org.scalatest._
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class CubeWriterTest extends FlatSpec with ShouldMatchers {
"CubeWriterTest" should "return a row with values and timeDimension" in
new CommonValues {
val cube = Cube(cubeName, Seq(dim1, dim2), Seq(op1), initSchema,
Option(ExpiringDataConfig("minute", checkpointGranularity, 100000)), Seq.empty[Trigger])
val tableSchema = TableSchema(
Seq("outputName"),
"cubeTest",
StructType(Array(
StructField("dim1", StringType, false),
StructField("dim2", StringType, false),
StructField(checkpointGranularity, TimestampType, false),
StructField("op1", LongType, true))),
Option("minute")
)
val writerOptions = CubeWriterOptions(Seq("outputName"))
val output = new OutputMock("outputName",
None,
Map(),
Seq(tableSchema)
)
val cubeWriter =
CubeWriter(cube, tableSchema, writerOptions, Seq(output), Seq.empty[Output], Seq.empty[TableSchema])
val res = cubeWriter.toRow(dimensionValuesT, measures)
res should be(Row.fromSeq(Seq("value1", "value2", 1L, "value")))
}
"CubeWriterTest" should "return a row with values without timeDimension" in
new CommonValues {
val cube = Cube(cubeName, Seq(dim1, dim2), Seq(op1), initSchema, None, Seq.empty[Trigger])
val tableSchema = TableSchema(
Seq("outputName"),
"cubeTest",
StructType(Array(
StructField("dim1", StringType, false),
StructField("dim2", StringType, false),
StructField("op1", LongType, true))),
None
)
val writerOptions = CubeWriterOptions(Seq("outputName"))
val output = new OutputMock("outputName",
None,
Map(),
Seq(tableSchema)
)
val cubeWriter =
CubeWriter(cube, tableSchema, writerOptions, Seq(output), Seq.empty[Output], Seq.empty[TableSchema])
val res = cubeWriter.toRow(dimensionValuesNoTime, measures)
res should be(Row.fromSeq(Seq("value1", "value2", "value")))
}
"CubeWriterTest" should "return a row with values with noTime and idAutoCalculated" in
new CommonValues {
val cube = Cube(cubeName, Seq(dim1, dim2), Seq(op1), initSchema, None, Seq.empty[Trigger])
val tableSchema = TableSchema(
Seq("outputName"),
"cubeTest",
StructType(Array(
StructField("dim1", StringType, false),
StructField("dim2", StringType, false),
StructField("op1", LongType, true))),
None
)
val writerOptions = CubeWriterOptions(Seq("outputName"), TypeOp.Timestamp, MeasuresValues(Map.empty), true)
val output = new OutputMock("outputName",
None,
Map(),
Seq(tableSchema)
)
val cubeWriter =
CubeWriter(cube, tableSchema, writerOptions, Seq(output), Seq.empty[Output], Seq.empty[TableSchema])
val res = cubeWriter.toRow(dimensionValuesNoTime, measures)
res should be(Row.fromSeq(Seq("value1_value2", "value1", "value2", "value")))
}
"CubeWriterTest" should "return a row with values with time and idAutoCalculated" in
new CommonValues {
val cube = Cube(cubeName, Seq(dim1, dim2), Seq(op1), initSchema, None, Seq.empty[Trigger])
val tableSchema = TableSchema(
Seq("outputName"),
"cubeTest",
StructType(Array(
StructField("dim1", StringType, false),
StructField("dim2", StringType, false),
StructField("op1", LongType, true))),
None
)
val writerOptions = CubeWriterOptions(Seq("outputName"), TypeOp.Timestamp, MeasuresValues(Map.empty), true)
val output = new OutputMock("outputName",
None,
Map(),
Seq(tableSchema)
)
val cubeWriter =
CubeWriter(cube, tableSchema, writerOptions, Seq(output), Seq.empty[Output], Seq.empty[TableSchema])
val res = cubeWriter.toRow(dimensionValuesT, measures)
res should be(Row.fromSeq(Seq("value1_value2_1", "value1", "value2", 1L, "value")))
}
"CubeWriterTest" should "return a row with values with time, idAutoCalculated and fixedMeasure" in
new CommonValues {
val cube = Cube(cubeName, Seq(dim1, dim2), Seq(op1), initSchema, None, Seq.empty[Trigger])
val tableSchema = TableSchema(
Seq("outputName"),
"cubeTest",
StructType(Array(
StructField("dim1", StringType, false),
StructField("dim2", StringType, false),
StructField("op1", LongType, true))),
None
)
val writerOptions = CubeWriterOptions(Seq("outputName"), TypeOp.Timestamp, fixedMeasure, true)
val output = new OutputMock("outputName",
None,
Map(),
Seq(tableSchema)
)
val cubeWriter =
CubeWriter(cube, tableSchema, writerOptions, Seq(output), Seq.empty[Output], Seq.empty[TableSchema])
val res = cubeWriter.toRow(dimensionValuesT, measures)
res should be(Row.fromSeq(Seq("value1_value2_1", "value1", "value2", 1L, "2", "value")))
}
class OperatorTest(name: String, schema: StructType, properties: Map[String, JSerializable])
extends Operator(name, schema, properties) {
override val defaultTypeOperation = TypeOp.Long
override val writeOperation = WriteOp.Inc
override val defaultCastingFilterType = TypeOp.Number
override def processMap(inputFields: Row): Option[Any] = {
None
}
override def processReduce(values: Iterable[Option[Any]]): Option[Long] = {
None
}
}
class OutputMock(keyName: String,
version: Option[Int],
properties: Map[String, JSerializable],
schemas: Seq[TableSchema])
extends Output(keyName, version, properties, schemas) {
override def upsert(dataFrame: DataFrame, options: Map[String, String]): Unit = {}
}
class DimensionTypeTest extends DimensionType {
override val operationProps: Map[String, JSerializable] = Map()
override val properties: Map[String, JSerializable] = Map()
override val defaultTypeOperation = TypeOp.String
override def precisionValue(keyName: String, value: Any): (Precision, Any) = {
val precision = DimensionType.getIdentity(getTypeOperation, defaultTypeOperation)
(precision, TypeOp.transformValueByTypeOp(precision.typeOp, value))
}
override def precision(keyName: String): Precision =
DimensionType.getIdentity(getTypeOperation, defaultTypeOperation)
}
trait CommonValues {
val dim1: Dimension = Dimension("dim1", "field1", "", new DimensionTypeTest)
val dim2: Dimension = Dimension("dim2", "field2", "", new DimensionTypeTest)
val dimId: Dimension = Dimension("id", "field2", "", new DimensionTypeTest)
val op1: Operator = new OperatorTest("op1", StructType(Seq(StructField("n", LongType, false))), Map())
val checkpointAvailable = 60000
val checkpointGranularity = "minute"
val cubeName = "cubeTest"
val defaultDimension = new DimensionTypeTest
val dimensionValuesT = DimensionValuesTime("testCube", Seq(DimensionValue(
Dimension("dim1", "eventKey", "identity", defaultDimension), "value1"),
DimensionValue(
Dimension("dim2", "eventKey", "identity", defaultDimension), "value2"),
DimensionValue(
Dimension("minute", "eventKey", "identity", defaultDimension), 1L)))
val dimensionValuesNoTime = DimensionValuesTime("testCube", Seq(DimensionValue(
Dimension("dim1", "eventKey", "identity", defaultDimension), "value1"),
DimensionValue(
Dimension("dim2", "eventKey", "identity", defaultDimension), "value2")))
val fixedMeasure = MeasuresValues(Map("agg2" -> Option("2")))
val measures = MeasuresValues(Map("field" -> Option("value")))
val initSchema = StructType(Seq(StructField("n", StringType, false)))
}
}
|
danielcsant/sparta
|
driver/src/test/scala/com/stratio/sparta/driver/test/cube/CubeWriterTest.scala
|
Scala
|
apache-2.0
| 8,958 |
//https://www.hackerrank.com/challenges/fp-update-list
object UpdateList extends App {
def f(arr:List[Int]):List[Int] = for (i <- arr) yield math.abs(i)
def parseLine(line:String):List[Int] = line.trim().split(" ").toList.map(_.toInt)
val line = io.Source.stdin.getLines().next
println(f(parseLine(line)))
}
|
flopezlasanta/hackerrank
|
src/functional_programming/introduction/UpdateList.scala
|
Scala
|
mit
| 320 |
/*
* Copyright (C) 2016 University of Basel, Graphics and Vision Research Group
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package scalismo.ui.rendering.actor.mixin
import scalismo.ui.model.properties.{ColorProperty, NodeProperty}
import scalismo.ui.rendering.actor.{ActorEvents, SingleActor}
import scalismo.ui.rendering.util.VtkUtil
trait ActorColor extends SingleActor with ActorEvents {
def color: ColorProperty
listenTo(color)
reactions += {
case NodeProperty.event.PropertyChanged(p) if p eq color => setAppearance()
}
private def setAppearance(): Unit = {
GetProperty().SetColor(VtkUtil.colorToArray(color.value))
actorChanged()
}
setAppearance()
}
|
unibas-gravis/scalismo-ui
|
src/main/scala/scalismo/ui/rendering/actor/mixin/ActorColor.scala
|
Scala
|
gpl-3.0
| 1,302 |
package com.yuvimasory.scallect
import com.google.caliper.Runner
import org.clapper.classutil.ClassFinder
object Main extends App {
// val classes = ClassFinder.concreteSubclasses(
// classOf[com.google.caliper.SimpleBenchmark].getName,
// ClassFinder() getClasses()
// )
// classes.foreach { info =>
// val name = info.name
// println("running " + name)
// trapExits {
// Runner main ((name :: args.toList): _*)
// }
// }
val marks = List(
classOf[PrependBenchmark],
classOf[AppendBenchmark]
).map { _.getName }
for (mark <- marks) Runner main ((mark :: args.toList): _*)
def trapExits(thunk: => Unit): Unit = {
val originalSecManager = System.getSecurityManager
case class NoExitsException() extends SecurityException
System setSecurityManager new SecurityManager() {
import java.security.Permission
override def checkPermission(perm: Permission) {
if (perm.getName startsWith "exitVM") throw NoExitsException()
}
}
try {
thunk
} catch {
case _: NoExitsException =>
} finally {
System setSecurityManager originalSecManager
}
}
}
|
ymasory/scallect
|
src/main/scala/main.scala
|
Scala
|
gpl-3.0
| 1,169 |
object Holder {
private object O
}
import Holder./* */O
println(/* accessible: false */ O)
println(classOf[/* resolved: false */ O])
|
LPTK/intellij-scala
|
testdata/resolve2/import/access/PrivateObject.scala
|
Scala
|
apache-2.0
| 137 |
trait Resilience
object Bounciness extends Enumeration {
case class _Val() extends Val with Resilience
type Bounciness = _Val
val level1, level2, level3 = _Val()
}
import Bounciness._
object Flexibility extends Enumeration {
case class _Val() extends Val with Resilience
type Flexibility = _Val
val type1, type2, type3 = _Val()
}
import Flexibility._
trait Spring[R <: Resilience] {
val res: R
}
case class BouncingBall(res: Bounciness) extends Spring[Bounciness]
println(BouncingBall(level2))
case class FlexingWall(res: Flexibility) extends Spring[Flexibility]
println(FlexingWall(type3))
//------------------------------------------------------------
class WithF {
def f(n: Int) = n * 11
}
class CallF[T <: WithF](t: T) {
def g(n: Int) = t.f(n)
}
println(new CallF(new WithF).g(2))
println(new CallF(new WithF {
override def f(n: Int): Int = n * 7
}).g(2))
|
mkoltsov/AtomicScala
|
Advanced/TypeParamsConstraints.scala
|
Scala
|
mit
| 943 |
package com.example.cloud.jackson
import com.fasterxml.jackson.databind.{Module, ObjectMapper}
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import org.springframework.context.annotation.{Bean, Configuration}
import org.springframework.http.converter.json.MappingJackson2HttpMessageConverter
@Configuration
class JacksonConfig {
@Bean
def mappingJackson2HttpMessageConverter(objectMapper: ObjectMapper, defaultScalaModule: DefaultScalaModule) = {
objectMapper.registerModule(defaultScalaModule)
new MappingJackson2HttpMessageConverter(objectMapper)
}
@Bean
def defaultScalaModule: Module = new DefaultScalaModule
}
|
okharkovskyi/scala-cloud-skeleton
|
core/src/main/scala/com/example/cloud/jackson/JacksonConfig.scala
|
Scala
|
apache-2.0
| 652 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package scalaguide.config
import javax.inject.Inject
import com.typesafe.config.Config
import org.junit.runner.RunWith
import org.specs2.runner.JUnitRunner
import play.api.{ConfigLoader, Configuration}
import play.api.mvc._
import play.api.test.PlaySpecification
import java.net.URI
@RunWith(classOf[JUnitRunner])
class ScalaConfigSpec extends PlaySpecification with Controller {
val config: Configuration = Configuration.from(Map(
"foo" -> "bar",
"bar" -> "1.25",
"baz" -> "true",
"listOfFoos" -> Seq("bar", "baz"),
"app.config" -> Map(
"title" -> "Foo",
"baseUri" -> "https://example.com"
)
))
"Scala Configuration" should {
"be injectable" in {
running() { app =>
val controller = app.injector.instanceOf[MyController]
ok
}
}
"get different types of keys" in {
//#config-get
// foo = bar
config.get[String]("foo")
// bar = 8
config.get[Int]("bar")
// baz = true
config.get[Boolean]("baz")
// listOfFoos = ["bar", "baz"]
config.get[Seq[String]]("listOfFoos")
//#config-get
//#config-validate
config.getAndValidate[String]("foo", Set("bar", "baz"))
//#config-validate
// check that a bad key doesn't work
config.get[String]("bogus") must throwAn[Exception]
ok
}
"allow defining custom config loaders" in {
//#config-loader-get
// app.config = {
// title = "My App
// baseUri = "https://example.com/"
// }
config.get[AppConfig]("app.config")
//#config-loader-get
ok
}
}
}
//#config-loader-example
case class AppConfig(title: String, baseUri: URI)
object AppConfig {
implicit val configLoader: ConfigLoader[AppConfig] = new ConfigLoader[AppConfig] {
def load(rootConfig: Config, path: String): AppConfig = {
val config = rootConfig.getConfig(path)
AppConfig(
title = config.getString("title"),
baseUri = new URI(config.getString("baseUri"))
)
}
}
}
//#config-loader-example
//#inject-config
class MyController @Inject() (config: Configuration, c: ControllerComponents) extends AbstractController(c) {
def getFoo = Action {
Ok(config.get[String]("foo"))
}
}
//#inject-config
|
wsargent/playframework
|
documentation/manual/working/scalaGuide/main/config/code/ScalaConfig.scala
|
Scala
|
apache-2.0
| 2,371 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.plans
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.trees.TreeNode
import org.apache.spark.sql.types.{DataType, StructType}
abstract class QueryPlan[PlanType <: QueryPlan[PlanType]] extends TreeNode[PlanType] {
self: PlanType =>
def output: Seq[Attribute]
/**
* Extracts the relevant constraints from a given set of constraints based on the attributes that
* appear in the [[outputSet]].
*/
protected def getRelevantConstraints(constraints: Set[Expression]): Set[Expression] = {
constraints
.union(inferAdditionalConstraints(constraints))
.union(constructIsNotNullConstraints(constraints))
.filter(constraint =>
constraint.references.nonEmpty && constraint.references.subsetOf(outputSet) &&
constraint.deterministic)
}
/**
* Infers a set of `isNotNull` constraints from a given set of equality/comparison expressions as
* well as non-nullable attributes. For e.g., if an expression is of the form (`a > 5`), this
* returns a constraint of the form `isNotNull(a)`
*/
private def constructIsNotNullConstraints(constraints: Set[Expression]): Set[Expression] = {
// First, we propagate constraints from the null intolerant expressions.
var isNotNullConstraints: Set[Expression] =
constraints.flatMap(scanNullIntolerantExpr).map(IsNotNull(_))
// Second, we infer additional constraints from non-nullable attributes that are part of the
// operator's output
val nonNullableAttributes = output.filterNot(_.nullable)
isNotNullConstraints ++= nonNullableAttributes.map(IsNotNull).toSet
isNotNullConstraints -- constraints
}
/**
* Recursively explores the expressions which are null intolerant and returns all attributes
* in these expressions.
*/
private def scanNullIntolerantExpr(expr: Expression): Seq[Attribute] = expr match {
case a: Attribute => Seq(a)
case _: NullIntolerant | IsNotNull(_: NullIntolerant) =>
expr.children.flatMap(scanNullIntolerantExpr)
case _ => Seq.empty[Attribute]
}
// Collect aliases from expressions, so we may avoid producing recursive constraints.
private lazy val aliasMap = AttributeMap(
(expressions ++ children.flatMap(_.expressions)).collect {
case a: Alias => (a.toAttribute, a.child)
})
/**
* Infers an additional set of constraints from a given set of equality constraints.
* For e.g., if an operator has constraints of the form (`a = 5`, `a = b`), this returns an
* additional constraint of the form `b = 5`.
*
* [SPARK-17733] We explicitly prevent producing recursive constraints of the form `a = f(a, b)`
* as they are often useless and can lead to a non-converging set of constraints.
*/
private def inferAdditionalConstraints(constraints: Set[Expression]): Set[Expression] = {
val constraintClasses = generateEquivalentConstraintClasses(constraints)
var inferredConstraints = Set.empty[Expression]
constraints.foreach {
case eq @ EqualTo(l: Attribute, r: Attribute) =>
val candidateConstraints = constraints - eq
inferredConstraints ++= candidateConstraints.map(_ transform {
case a: Attribute if a.semanticEquals(l) &&
!isRecursiveDeduction(r, constraintClasses) => r
})
inferredConstraints ++= candidateConstraints.map(_ transform {
case a: Attribute if a.semanticEquals(r) &&
!isRecursiveDeduction(l, constraintClasses) => l
})
case _ => // No inference
}
inferredConstraints -- constraints
}
/*
* Generate a sequence of expression sets from constraints, where each set stores an equivalence
* class of expressions. For example, Set(`a = b`, `b = c`, `e = f`) will generate the following
* expression sets: (Set(a, b, c), Set(e, f)). This will be used to search all expressions equal
* to an selected attribute.
*/
private def generateEquivalentConstraintClasses(
constraints: Set[Expression]): Seq[Set[Expression]] = {
var constraintClasses = Seq.empty[Set[Expression]]
constraints.foreach {
case eq @ EqualTo(l: Attribute, r: Attribute) =>
// Transform [[Alias]] to its child.
val left = aliasMap.getOrElse(l, l)
val right = aliasMap.getOrElse(r, r)
// Get the expression set for an equivalence constraint class.
val leftConstraintClass = getConstraintClass(left, constraintClasses)
val rightConstraintClass = getConstraintClass(right, constraintClasses)
if (leftConstraintClass.nonEmpty && rightConstraintClass.nonEmpty) {
// Combine the two sets.
constraintClasses = constraintClasses
.diff(leftConstraintClass :: rightConstraintClass :: Nil) :+
(leftConstraintClass ++ rightConstraintClass)
} else if (leftConstraintClass.nonEmpty) { // && rightConstraintClass.isEmpty
// Update equivalence class of `left` expression.
constraintClasses = constraintClasses
.diff(leftConstraintClass :: Nil) :+ (leftConstraintClass + right)
} else if (rightConstraintClass.nonEmpty) { // && leftConstraintClass.isEmpty
// Update equivalence class of `right` expression.
constraintClasses = constraintClasses
.diff(rightConstraintClass :: Nil) :+ (rightConstraintClass + left)
} else { // leftConstraintClass.isEmpty && rightConstraintClass.isEmpty
// Create new equivalence constraint class since neither expression presents
// in any classes.
constraintClasses = constraintClasses :+ Set(left, right)
}
case _ => // Skip
}
constraintClasses
}
/*
* Get all expressions equivalent to the selected expression.
*/
private def getConstraintClass(
expr: Expression,
constraintClasses: Seq[Set[Expression]]): Set[Expression] =
constraintClasses.find(_.contains(expr)).getOrElse(Set.empty[Expression])
/*
* Check whether replace by an [[Attribute]] will cause a recursive deduction. Generally it
* has the form like: `a -> f(a, b)`, where `a` and `b` are expressions and `f` is a function.
* Here we first get all expressions equal to `attr` and then check whether at least one of them
* is a child of the referenced expression.
*/
private def isRecursiveDeduction(
attr: Attribute,
constraintClasses: Seq[Set[Expression]]): Boolean = {
val expr = aliasMap.getOrElse(attr, attr)
getConstraintClass(expr, constraintClasses).exists { e =>
expr.children.exists(_.semanticEquals(e))
}
}
/**
* An [[ExpressionSet]] that contains invariants about the rows output by this operator. For
* example, if this set contains the expression `a = 2` then that expression is guaranteed to
* evaluate to `true` for all rows produced.
*/
lazy val constraints: ExpressionSet = ExpressionSet(getRelevantConstraints(validConstraints))
/**
* This method can be overridden by any child class of QueryPlan to specify a set of constraints
* based on the given operator's constraint propagation logic. These constraints are then
* canonicalized and filtered automatically to contain only those attributes that appear in the
* [[outputSet]].
*
* See [[Canonicalize]] for more details.
*/
protected def validConstraints: Set[Expression] = Set.empty
/**
* Returns the set of attributes that are output by this node.
*/
def outputSet: AttributeSet = AttributeSet(output)
/**
* All Attributes that appear in expressions from this operator. Note that this set does not
* include attributes that are implicitly referenced by being passed through to the output tuple.
*/
def references: AttributeSet = AttributeSet(expressions.flatMap(_.references))
/**
* The set of all attributes that are input to this operator by its children.
*/
def inputSet: AttributeSet =
AttributeSet(children.flatMap(_.asInstanceOf[QueryPlan[PlanType]].output))
/**
* The set of all attributes that are produced by this node.
*/
def producedAttributes: AttributeSet = AttributeSet.empty
/**
* Attributes that are referenced by expressions but not provided by this nodes children.
* Subclasses should override this method if they produce attributes internally as it is used by
* assertions designed to prevent the construction of invalid plans.
*/
def missingInput: AttributeSet = references -- inputSet -- producedAttributes
/**
* Runs [[transform]] with `rule` on all expressions present in this query operator.
* Users should not expect a specific directionality. If a specific directionality is needed,
* transformExpressionsDown or transformExpressionsUp should be used.
*
* @param rule the rule to be applied to every expression in this operator.
*/
def transformExpressions(rule: PartialFunction[Expression, Expression]): this.type = {
transformExpressionsDown(rule)
}
/**
* Runs [[transformDown]] with `rule` on all expressions present in this query operator.
*
* @param rule the rule to be applied to every expression in this operator.
*/
def transformExpressionsDown(rule: PartialFunction[Expression, Expression]): this.type = {
var changed = false
@inline def transformExpressionDown(e: Expression): Expression = {
val newE = e.transformDown(rule)
if (newE.fastEquals(e)) {
e
} else {
changed = true
newE
}
}
def recursiveTransform(arg: Any): AnyRef = arg match {
case e: Expression => transformExpressionDown(e)
case Some(e: Expression) => Some(transformExpressionDown(e))
case m: Map[_, _] => m
case d: DataType => d // Avoid unpacking Structs
case seq: Traversable[_] => seq.map(recursiveTransform)
case other: AnyRef => other
case null => null
}
val newArgs = mapProductIterator(recursiveTransform)
if (changed) makeCopy(newArgs).asInstanceOf[this.type] else this
}
/**
* Runs [[transformUp]] with `rule` on all expressions present in this query operator.
*
* @param rule the rule to be applied to every expression in this operator.
* @return
*/
def transformExpressionsUp(rule: PartialFunction[Expression, Expression]): this.type = {
var changed = false
@inline def transformExpressionUp(e: Expression): Expression = {
val newE = e.transformUp(rule)
if (newE.fastEquals(e)) {
e
} else {
changed = true
newE
}
}
def recursiveTransform(arg: Any): AnyRef = arg match {
case e: Expression => transformExpressionUp(e)
case Some(e: Expression) => Some(transformExpressionUp(e))
case m: Map[_, _] => m
case d: DataType => d // Avoid unpacking Structs
case seq: Traversable[_] => seq.map(recursiveTransform)
case other: AnyRef => other
case null => null
}
val newArgs = mapProductIterator(recursiveTransform)
if (changed) makeCopy(newArgs).asInstanceOf[this.type] else this
}
/**
* Returns the result of running [[transformExpressions]] on this node
* and all its children.
*/
def transformAllExpressions(rule: PartialFunction[Expression, Expression]): this.type = {
transform {
case q: QueryPlan[_] => q.transformExpressions(rule).asInstanceOf[PlanType]
}.asInstanceOf[this.type]
}
/** Returns all of the expressions present in this query plan operator. */
final def expressions: Seq[Expression] = {
// Recursively find all expressions from a traversable.
def seqToExpressions(seq: Traversable[Any]): Traversable[Expression] = seq.flatMap {
case e: Expression => e :: Nil
case s: Traversable[_] => seqToExpressions(s)
case other => Nil
}
productIterator.flatMap {
case e: Expression => e :: Nil
case Some(e: Expression) => e :: Nil
case seq: Traversable[_] => seqToExpressions(seq)
case other => Nil
}.toSeq
}
lazy val schema: StructType = StructType.fromAttributes(output)
/** Returns the output schema in the tree format. */
def schemaString: String = schema.treeString
/** Prints out the schema in the tree format */
// scalastyle:off println
def printSchema(): Unit = println(schemaString)
// scalastyle:on println
/**
* A prefix string used when printing the plan.
*
* We use "!" to indicate an invalid plan, and "'" to indicate an unresolved plan.
*/
protected def statePrefix = if (missingInput.nonEmpty && children.nonEmpty) "!" else ""
override def simpleString: String = statePrefix + super.simpleString
override def verboseString: String = simpleString
/**
* All the subqueries of current plan.
*/
def subqueries: Seq[PlanType] = {
expressions.flatMap(_.collect {case e: SubqueryExpression => e.plan.asInstanceOf[PlanType]})
}
override protected def innerChildren: Seq[QueryPlan[_]] = subqueries
/**
* Canonicalized copy of this query plan.
*/
protected lazy val canonicalized: PlanType = this
/**
* Returns true when the given query plan will return the same results as this query plan.
*
* Since its likely undecidable to generally determine if two given plans will produce the same
* results, it is okay for this function to return false, even if the results are actually
* the same. Such behavior will not affect correctness, only the application of performance
* enhancements like caching. However, it is not acceptable to return true if the results could
* possibly be different.
*
* By default this function performs a modified version of equality that is tolerant of cosmetic
* differences like attribute naming and or expression id differences. Operators that
* can do better should override this function.
*/
def sameResult(plan: PlanType): Boolean = {
val left = this.canonicalized
val right = plan.canonicalized
left.getClass == right.getClass &&
left.children.size == right.children.size &&
left.cleanArgs == right.cleanArgs &&
(left.children, right.children).zipped.forall(_ sameResult _)
}
/**
* All the attributes that are used for this plan.
*/
lazy val allAttributes: AttributeSeq = children.flatMap(_.output)
private def cleanExpression(e: Expression): Expression = e match {
case a: Alias =>
// As the root of the expression, Alias will always take an arbitrary exprId, we need
// to erase that for equality testing.
val cleanedExprId =
Alias(a.child, a.name)(ExprId(-1), a.qualifier, isGenerated = a.isGenerated)
BindReferences.bindReference(cleanedExprId, allAttributes, allowFailures = true)
case other =>
BindReferences.bindReference(other, allAttributes, allowFailures = true)
}
/** Args that have cleaned such that differences in expression id should not affect equality */
protected lazy val cleanArgs: Seq[Any] = {
def cleanArg(arg: Any): Any = arg match {
// Children are checked using sameResult above.
case tn: TreeNode[_] if containsChild(tn) => null
case e: Expression => cleanExpression(e).canonicalized
case other => other
}
mapProductIterator {
case s: Option[_] => s.map(cleanArg)
case s: Seq[_] => s.map(cleanArg)
case m: Map[_, _] => m.mapValues(cleanArg)
case other => cleanArg(other)
}.toSeq
}
}
|
gioenn/xSpark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/QueryPlan.scala
|
Scala
|
apache-2.0
| 16,288 |
/**
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//Commmons
import org.apache.spark.sql.crossdata._
import org.apache.spark.sql.crossdata.XDContext
import org.apache.spark.{SparkContext, SparkConf}
import org.apache.spark.SPARK_VERSION
//Scala
import scala.collection.JavaConversions._
println("""Welcome to Stratio
__________ ____ __________ ____ ___ _________
/ ____/ __ \\/ __ \\/ ___/ ___// __ \\/ |/_ __/ |
/ / / /_/ / / / /\\__ \\\\__ \\/ / / / /| | / / / /| |
/ /___/ _, _/ /_/ /___/ /__/ / /_/ / ___ |/ / / ___ |
\\____/_/ |_|\\____//____/____/_____/_/ |_/_/ /_/ |_| version %s
Powered By Apache:
____ __
/ __/__ ___ _____/ /__
_\\ \\/ _ \\/ _ `/ __/ '_/
/___/ .__/\\_,_/_/ /_/\\_\\ version %s
/_/
""".format(CrossdataVersion, SPARK_VERSION))
val xdContext = new XDContext(sc)
|
luismcl/crossdata
|
scripts/stratio-xd-init.scala
|
Scala
|
apache-2.0
| 1,469 |
package com.sksamuel.elastic4s.admin
import com.sksamuel.elastic4s.testkit.ElasticSugar
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.SpanSugar._
import org.scalatest.{Matchers, WordSpec}
class IndexStatsTest extends WordSpec with Matchers with ElasticSugar with ScalaFutures {
client.execute {
bulk(
indexInto("segments_movies" / "character") fields("name" -> "star trek", "show" -> "kirk"),
indexInto("segments_tv" / "character") fields("name" -> "michael", "show" -> "knightrider"),
indexInto("segments_theatre" / "character") fields("name" -> "glinda", "show" -> "wicked")
)
}
blockUntilCount(1, "segments_movies")
blockUntilCount(1, "segments_tv")
blockUntilCount(1, "segments_theatre")
override implicit def patienceConfig = PatienceConfig(timeout = 10.seconds, interval = 1.seconds)
"indexStats(*)" should {
"return all indexes" in {
val f = client.execute {
indexStats("*")
}
whenReady(f) { resp =>
Set("segments_movies", "segments_tv", "segments_theatre").foreach { index =>
resp.indexNames should contain(index)
}
}
}
}
"indexStats(indexName)" should {
"return stats for specified indexes" in {
val f = client.execute {
indexStats("segments_movies", "segments_tv")
}
whenReady(f) { resp =>
resp.indexNames shouldBe Set("segments_movies", "segments_tv")
}
}
}
}
|
FabienPennequin/elastic4s
|
elastic4s-tests/src/test/scala/com/sksamuel/elastic4s/admin/IndexStatsTest.scala
|
Scala
|
apache-2.0
| 1,463 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.libs.typedmap
/**
* An entry that binds a typed key and a value. These entries can be
* placed into a [[TypedMap]] or any other type of object with typed
* values.
*
* @param key The key for this entry.
* @param value The value for this entry.
* @tparam A The type of the value.
*/
final case class TypedEntry[A](key: TypedKey[A], value: A) {
/**
* Convert the entry into a standard pair.
*/
def toPair: (TypedKey[A], A) = (key, value)
}
|
wsargent/playframework
|
framework/src/play/src/main/scala/play/api/libs/typedmap/TypedEntry.scala
|
Scala
|
apache-2.0
| 551 |
package org.broadinstitute.dsde.firecloud.service
import java.lang.reflect.Field
import org.broadinstitute.dsde.firecloud.FireCloudConfig
import org.broadinstitute.dsde.firecloud.dataaccess.MockOntologyDAO
import org.broadinstitute.dsde.firecloud.model.DUOS.DuosDataUse
import org.broadinstitute.dsde.firecloud.model.DataUse.StructuredDataRequest
import org.broadinstitute.dsde.firecloud.service.DataUseRestrictionTestFixtures._
import org.broadinstitute.dsde.rawls.model._
import org.scalatest.freespec.AnyFreeSpec
import org.scalatest.matchers.should.Matchers
import spray.json._
import spray.json.DefaultJsonProtocol._
import scala.language.postfixOps
class DataUseRestrictionSupportSpec extends AnyFreeSpec with Matchers with DataUseRestrictionSupport {
"DataUseRestrictionSupport" - {
"Structured Use Restriction" - {
"when questionnaire answers are used to populate restriction fields" - {
"and all consent codes are true or filled in" in {
val ontologyDAO = new MockOntologyDAO
val request = StructuredDataRequest(generalResearchUse = true,
healthMedicalBiomedicalUseRequired = true,
diseaseUseRequired = Array("http://purl.obolibrary.org/obo/DOID_4325","http://purl.obolibrary.org/obo/DOID_2531"),
commercialUseProhibited = true,
forProfitUseProhibited = true,
methodsResearchProhibited = true,
aggregateLevelDataProhibited = true,
controlsUseProhibited = true,
genderUseRequired = "female",
pediatricResearchRequired = true,
irbRequired = true,
prefix = Some("blah"))
val expected = Map("blahconsentCodes" -> Array("NPU","RS-G","NCU","HMB","RS-FM","NCTRL","RS-PD","IRB","NAGR","GRU","NMDS","DS:Ebola hemorrhagic fever","DS:hematologic cancer").toJson,
"blahdulvn" -> FireCloudConfig.Duos.dulvn.toJson,
"blahstructuredUseRestriction" -> Map(
"NPU" -> true.toJson,
"RS-PD" -> true.toJson,
"NCU" -> true.toJson,
"RS-G" -> true.toJson,
"IRB" -> true.toJson,
"NAGR" -> true.toJson,
"RS-FM" -> true.toJson,
"RS-M" -> false.toJson,
"NMDS"-> true.toJson,
"NCTRL" -> true.toJson,
"GRU" ->true.toJson,
"HMB" -> true.toJson,
"DS" -> Array(4325,2531).toJson).toJson)
val result = generateStructuredUseRestrictionAttribute(request, ontologyDAO)
result should be (expected)
}
"and all consent codes are false or empty" in {
val ontologyDAO = new MockOntologyDAO
val request = StructuredDataRequest(generalResearchUse = false,
healthMedicalBiomedicalUseRequired = false,
diseaseUseRequired = Array(),
commercialUseProhibited = false,
forProfitUseProhibited = false,
methodsResearchProhibited = false,
aggregateLevelDataProhibited = false,
controlsUseProhibited = false,
genderUseRequired = "",
pediatricResearchRequired = false,
irbRequired = false,
prefix = None)
val expected = Map("consentCodes" -> Array.empty[String].toJson,
"dulvn" -> FireCloudConfig.Duos.dulvn.toJson,
"structuredUseRestriction" -> Map(
"NPU" -> false.toJson,
"RS-PD" -> false.toJson,
"NCU" -> false.toJson,
"RS-G" -> false.toJson,
"IRB" -> false.toJson,
"NAGR" -> false.toJson,
"RS-FM" -> false.toJson,
"RS-M" -> false.toJson,
"NMDS"-> false.toJson,
"NCTRL" -> false.toJson,
"GRU" -> false.toJson,
"HMB" -> false.toJson,
"DS" -> Array.empty[String].toJson).toJson)
val result = generateStructuredUseRestrictionAttribute(request, ontologyDAO)
result should be (expected)
}
"and consent codes are a mixture of true and false" in {
val ontologyDAO = new MockOntologyDAO
val request = StructuredDataRequest(generalResearchUse = false,
healthMedicalBiomedicalUseRequired = true,
diseaseUseRequired = Array("http://purl.obolibrary.org/obo/DOID_1240"),
commercialUseProhibited = false,
forProfitUseProhibited = true,
methodsResearchProhibited = false,
aggregateLevelDataProhibited = false,
controlsUseProhibited = true,
genderUseRequired = "Male",
pediatricResearchRequired = false,
irbRequired = true,
prefix = Some("library"))
val expected = Map("libraryconsentCodes" -> Array("NPU","RS-G","RS-M","HMB","NCTRL","IRB","DS:leukemia").toJson,
"librarydulvn" -> FireCloudConfig.Duos.dulvn.toJson,
"librarystructuredUseRestriction" -> Map(
"NPU" -> true.toJson,
"RS-PD" -> false.toJson,
"NCU" -> false.toJson,
"RS-G" -> true.toJson,
"IRB" -> true.toJson,
"NAGR" -> false.toJson,
"RS-FM" -> false.toJson,
"RS-M" -> true.toJson,
"NMDS"-> false.toJson,
"NCTRL" -> true.toJson,
"GRU" -> false.toJson,
"HMB" -> true.toJson,
"DS" -> Array(1240).toJson).toJson)
val result = generateStructuredUseRestrictionAttribute(request, ontologyDAO)
result should be (expected)
}
}
"when there are library data use restriction fields" - {
"dataset should have a fully populated data use restriction attribute" in {
allDatasets.map { ds =>
val ontologyDAO = new MockOntologyDAO
val attrs: Map[AttributeName, Attribute] = generateStructuredAndDisplayAttributes(ds, ontologyDAO).structured
val durAtt: Attribute = attrs.getOrElse(structuredUseRestrictionAttributeName, AttributeNull)
durAtt shouldNot be(AttributeNull)
val dur = makeDurFromWorkspace(ds, ontologyDAO)
dur shouldNot be(null)
}
}
"dur should have appropriate gender codes populated" in {
genderDatasets.map { ds =>
val ontologyDAO = new MockOntologyDAO
val dur: DataUseRestriction = makeDurFromWorkspace(ds, ontologyDAO)
if (ds.name.equalsIgnoreCase("Female")) {
dur.`RS-G` should be(true)
dur.`RS-FM` should be(true)
dur.`RS-M` should be(false)
} else if (ds.name.equalsIgnoreCase("Male")) {
dur.`RS-G` should be(true)
dur.`RS-FM` should be(false)
dur.`RS-M` should be(true)
} else {
dur.`RS-G` should be(false)
dur.`RS-FM` should be(false)
dur.`RS-M` should be(false)
}
}
}
"dur should have appropriate NAGR code populated" in {
nagrDatasets.map { ds =>
val ontologyDAO = new MockOntologyDAO
val dur: DataUseRestriction = makeDurFromWorkspace(ds, ontologyDAO)
if (ds.name.equalsIgnoreCase("Yes")) {
dur.NAGR should be(true)
} else {
dur.NAGR should be(false)
}
}
}
"dataset should have a true value for the consent code for which it was specified" in {
val durs: Map[String, DataUseRestriction] = booleanDatasets.flatMap { ds =>
val ontologyDAO = new MockOntologyDAO
Map(ds.name -> makeDurFromWorkspace(ds, ontologyDAO))
}.toMap
booleanCodes.map { code =>
val dur: DataUseRestriction = durs(code)
checkBooleanTrue(dur, code) should be(true)
}
}
"dataset should have the correct disease values for the consent code for which it was specified" in {
val durs: Map[String, DataUseRestriction] = diseaseDatasets.flatMap { ds =>
val ontologyDAO = new MockOntologyDAO
Map(ds.name -> makeDurFromWorkspace(ds, ontologyDAO))
}.toMap
Seq("DS").foreach { code =>
val dur: DataUseRestriction = durs(code)
checkDiseaseValues(dur, code)
}
}
}
"when there are no library data use restriction fields" - {
"dataset should not have any data use restriction for empty attributes" in {
val workspace: WorkspaceDetails = mkWorkspace(Map.empty[AttributeName, Attribute], "empty", "empty")
val ontologyDAO = new MockOntologyDAO
val attrs: Map[AttributeName, Attribute] = generateStructuredAndDisplayAttributes(workspace, ontologyDAO).structured
attrs should be(empty)
}
"dataset should not have any data use restriction for non-library attributes" in {
val ontologyDAO = new MockOntologyDAO
val nonLibraryAttributes = Map(
AttributeName.withDefaultNS("name") -> AttributeString("one"),
AttributeName.withDefaultNS("namespace") -> AttributeString("two"),
AttributeName.withDefaultNS("workspaceId") -> AttributeString("three"),
AttributeName.withDefaultNS("authorizationDomain") -> AttributeValueList(Seq(AttributeString("one"), AttributeString("two"), AttributeString("three")))
)
val workspace: WorkspaceDetails = mkWorkspace(nonLibraryAttributes, "non-library", "non-library")
val attrs: Map[AttributeName, Attribute] = generateStructuredAndDisplayAttributes(workspace, ontologyDAO).structured
attrs should be(empty)
}
}
}
"Display Use Restriction" - {
"when there are library data use restriction fields" - {
"valid datasets should have some form of data use display attribute" in {
val ontologyDAO = new MockOntologyDAO
validDisplayDatasets.map { ds =>
val attrs: Map[AttributeName, Attribute] = generateStructuredAndDisplayAttributes(ds, ontologyDAO).display
val codes: Seq[String] = getValuesFromAttributeValueListAsAttribute(attrs.get(consentCodesAttributeName))
codes shouldNot be(empty)
}
}
"datasets with single boolean code should have that single display code" in {
val ontologyDAO = new MockOntologyDAO
booleanDatasets.map { ds =>
val attrs: Map[AttributeName, Attribute] = generateStructuredAndDisplayAttributes(ds, ontologyDAO).display
val codes: Seq[String] = getValuesFromAttributeValueListAsAttribute(attrs.get(consentCodesAttributeName))
// Boolean datasets are named with the same code value
codes should contain theSameElementsAs Seq(ds.name)
}
}
"'EVERYTHING' dataset should have the right codes" in {
val ontologyDAO = new MockOntologyDAO
val attrs = generateStructuredAndDisplayAttributes(everythingDataset.head, ontologyDAO).display
val codes: Seq[String] = getValuesFromAttributeValueListAsAttribute(attrs.get(consentCodesAttributeName))
val expected = booleanCodes ++ Seq("RS-G", "RS-FM", "NAGR") ++ diseaseValuesLabels.map(s => s"DS:$s")
codes should contain theSameElementsAs expected
}
"'TOP_THREE' dataset should have the right codes" in {
val ontologyDAO = new MockOntologyDAO
val attrs: Map[AttributeName, Attribute] = generateStructuredAndDisplayAttributes(topThreeDataset.head, ontologyDAO).display
val codes: Seq[String] = getValuesFromAttributeValueListAsAttribute(attrs.get(consentCodesAttributeName))
val expected = Seq("GRU", "HMB") ++ diseaseValuesLabels.map(s => s"DS:$s")
codes should contain theSameElementsAs expected
}
}
"when there are missing/invalid library data use restriction terms" - {
"dataset should not have any data use display codes for empty attributes" in {
val ontologyDAO = new MockOntologyDAO
val workspace: WorkspaceDetails = mkWorkspace(Map.empty[AttributeName, Attribute], "empty", "empty")
val attrs: Map[AttributeName, Attribute] = generateStructuredAndDisplayAttributes(workspace, ontologyDAO).display
attrs should be(empty)
}
"dataset should not have any data use restriction for non-library attributes" in {
val ontologyDAO = new MockOntologyDAO
val nonLibraryAttributes: Map[AttributeName, Attribute] = Map(
AttributeName.withDefaultNS("name") -> AttributeString("one"),
AttributeName.withDefaultNS("namespace") -> AttributeString("two"),
AttributeName.withDefaultNS("workspaceId") -> AttributeString("three"),
AttributeName.withDefaultNS("authorizationDomain") -> AttributeValueList(Seq(AttributeString("one"), AttributeString("two"), AttributeString("three")))
)
val workspace: WorkspaceDetails = mkWorkspace(nonLibraryAttributes, "non-library", "non-library")
val attrs: Map[AttributeName, Attribute] = generateStructuredAndDisplayAttributes(workspace, ontologyDAO).display
attrs should be(empty)
}
}
}
"ORSP-based restrictions" - {
val ontologyDAO = new MockOntologyDAO
"when translating DUOS restrictions to FireCloud restrictions" - {
"should return no attributes if DUOS is empty" in {
val attrs = generateStructuredUseRestrictionAttribute(new DuosDataUse, ontologyDAO)
assert(attrs.isEmpty)
}
"should translate boolean TRUEs" in {
val attrs = generateStructuredUseRestrictionAttribute(new DuosDataUse(
generalUse = Some(true),
hmbResearch = Some(true),
commercialUse = Some(true),
pediatric = Some(true),
methodsResearch = Some(true)
), ontologyDAO)
val expected = Map(
AttributeName.withLibraryNS("GRU") -> AttributeBoolean(true),
AttributeName.withLibraryNS("HMB") -> AttributeBoolean(true),
AttributeName.withLibraryNS("NCU") -> AttributeBoolean(true),
AttributeName.withLibraryNS("RS-PD") -> AttributeBoolean(true),
AttributeName.withLibraryNS("NMDS") -> AttributeBoolean(true)
)
assertResult(expected) {attrs}
}
"should translate boolean FALSEs" in {
val attrs = generateStructuredUseRestrictionAttribute(new DuosDataUse(
generalUse = Some(false),
hmbResearch = Some(false),
commercialUse = Some(false),
pediatric = Some(false),
methodsResearch = Some(false)
), ontologyDAO)
val expected = Map(
AttributeName.withLibraryNS("GRU") -> AttributeBoolean(false),
AttributeName.withLibraryNS("HMB") -> AttributeBoolean(false),
AttributeName.withLibraryNS("NCU") -> AttributeBoolean(false),
AttributeName.withLibraryNS("RS-PD") -> AttributeBoolean(false),
AttributeName.withLibraryNS("NMDS") -> AttributeBoolean(false)
)
assertResult(expected) {attrs}
}
"should translate mixed boolean TRUE/FALSEs" in {
val attrs = generateStructuredUseRestrictionAttribute(new DuosDataUse(
generalUse = Some(true),
hmbResearch = Some(false),
commercialUse = Some(true),
pediatric = Some(false),
methodsResearch = Some(true)
), ontologyDAO)
val expected = Map(
AttributeName.withLibraryNS("GRU") -> AttributeBoolean(true),
AttributeName.withLibraryNS("HMB") -> AttributeBoolean(false),
AttributeName.withLibraryNS("NCU") -> AttributeBoolean(true),
AttributeName.withLibraryNS("RS-PD") -> AttributeBoolean(false),
AttributeName.withLibraryNS("NMDS") -> AttributeBoolean(true)
)
assertResult(expected) {attrs}
}
"should handle 'yes' string values from DUOS as boolean TRUEs" in {
val attrs = generateStructuredUseRestrictionAttribute(new DuosDataUse(
aggregateResearch = Some("Yes"),
controlSetOption = Some("yeS")
), ontologyDAO)
val expected = Map(
AttributeName.withLibraryNS("NAGR") -> AttributeBoolean(true),
AttributeName.withLibraryNS("NCTRL") -> AttributeBoolean(true)
)
assertResult(expected) {attrs}
}
"should handle 'no' string values from DUOS as boolean FALSEs" in {
val attrs = generateStructuredUseRestrictionAttribute(new DuosDataUse(
aggregateResearch = Some("No"),
controlSetOption = Some("NO")
), ontologyDAO)
val expected = Map(
AttributeName.withLibraryNS("NAGR") -> AttributeBoolean(false),
AttributeName.withLibraryNS("NCTRL") -> AttributeBoolean(false)
)
assertResult(expected) {attrs}
}
"should handle non-'yes' or 'no' string values from DUOS as invalid (Nones)" in {
val attrs = generateStructuredUseRestrictionAttribute(new DuosDataUse(
aggregateResearch = Some("yessir"),
controlSetOption = Some("nopers")
), ontologyDAO)
assert(attrs.isEmpty)
}
"should translate disease ontology nodes, including labels" in {
val attrs = generateStructuredUseRestrictionAttribute(new DuosDataUse(
diseaseRestrictions = Some(Seq(
"http://purl.obolibrary.org/obo/DOID_2531", // hematologic cancer
"http://purl.obolibrary.org/obo/DOID_1240", // leukemia 1510
"http://purl.obolibrary.org/obo/DOID_4325" // Ebola hemorrhagic fever
))
), ontologyDAO)
val expected = Map(
AttributeName.withLibraryNS("DS_URL") -> AttributeValueList(Seq(
AttributeString("http://purl.obolibrary.org/obo/DOID_2531"),
AttributeString("http://purl.obolibrary.org/obo/DOID_1240"),
AttributeString("http://purl.obolibrary.org/obo/DOID_4325")
)),
AttributeName.withLibraryNS("DS") -> AttributeValueList(Seq(
AttributeString("hematologic cancer"),
AttributeString("leukemia"),
AttributeString("Ebola hemorrhagic fever")
))
)
assertResult(expected) {attrs}
}
"should translate disease ontology nodes and use node id when label not found" in {
val attrs = generateStructuredUseRestrictionAttribute(new DuosDataUse(
diseaseRestrictions = Some(Seq(
"http://purl.obolibrary.org/obo/DOID_2531", // hematologic cancer
"http://purl.obolibrary.org/obo/DOID_1510", // -- not found in mock dao --
"http://purl.obolibrary.org/obo/DOID_4325" // Ebola hemorrhagic fever
))
), ontologyDAO)
val expected = Map(
AttributeName.withLibraryNS("DS_URL") -> AttributeValueList(Seq(
AttributeString("http://purl.obolibrary.org/obo/DOID_2531"),
AttributeString("http://purl.obolibrary.org/obo/DOID_1510"),
AttributeString("http://purl.obolibrary.org/obo/DOID_4325")
)),
AttributeName.withLibraryNS("DS") -> AttributeValueList(Seq(
AttributeString("hematologic cancer"),
AttributeString("http://purl.obolibrary.org/obo/DOID_1510"),
AttributeString("Ebola hemorrhagic fever")
))
)
assertResult(expected) {attrs}
}
"should handle empty seq of disease ontology nodes" in {
val attrs = generateStructuredUseRestrictionAttribute(new DuosDataUse(
diseaseRestrictions = Some(Seq.empty[String])
), ontologyDAO)
assert(attrs.isEmpty)
}
"should handle populationRestrictions string lists" in {
val attrs = generateStructuredUseRestrictionAttribute(new DuosDataUse(
pediatric = Some(true)
), ontologyDAO)
val expected = Map(
AttributeName.withLibraryNS("RS-PD") -> AttributeBoolean(true)
)
assertResult(expected) {attrs}
}
"should handle empty populationRestrictions" in {
val attrs = generateStructuredUseRestrictionAttribute(new DuosDataUse(
populationRestrictions = Some(Seq.empty[String])
), ontologyDAO)
assert(attrs.isEmpty)
}
"should handle gender = Male" in {
val attrs = generateStructuredUseRestrictionAttribute(new DuosDataUse(
gender = Some("Male")
), ontologyDAO)
val expected = Map(
AttributeName.withLibraryNS("RS-G") -> AttributeBoolean(true),
AttributeName.withLibraryNS("RS-M") -> AttributeBoolean(true)
)
assertResult(expected) {attrs}
}
"should handle gender = Female" in {
val attrs = generateStructuredUseRestrictionAttribute(new DuosDataUse(
gender = Some("Female")
), ontologyDAO)
val expected = Map(
AttributeName.withLibraryNS("RS-G") -> AttributeBoolean(true),
AttributeName.withLibraryNS("RS-FM") -> AttributeBoolean(true)
)
assertResult(expected) {attrs}
}
"should ignore invalid gender values" in {
val attrs = generateStructuredUseRestrictionAttribute(new DuosDataUse(
gender = Some("invalid")
), ontologyDAO)
assert(attrs.isEmpty)
}
"should ignore the DUOS keys that FireCloud doesn't implement" in {
// The properties that are commented out are the ones that FireCloud implements.
val attrs = generateStructuredUseRestrictionAttribute(new DuosDataUse(
// generalUse = None,
// hmbResearch = None,
// diseaseRestrictions = None,
// commercialUse = None,
// methodsResearch = None,
// aggregateResearch = None,
// controlSetOption = None,
// pediatric = None,
// populationRestrictions = None,
// gender = None,
populationOriginsAncestry = Some(true),
populationStructure = Some(true),
vulnerablePopulations = Some(true),
dateRestriction = Some("today"),
recontactingDataSubjects = Some(true),
recontactMay = Some("sure"),
recontactMust = Some("yes"),
genomicPhenotypicData = Some("pheno"),
otherRestrictions = Some(true),
cloudStorage = Some("cloud"),
ethicsApprovalRequired = Some(true),
geographicalRestrictions = Some("nowhere"),
other = Some("other"),
illegalBehavior = Some(true),
addiction = Some(true),
sexualDiseases = Some(true),
stigmatizeDiseases = Some(true),
psychologicalTraits = Some(true),
nonBiomedical = Some(true)
), ontologyDAO)
assert(attrs.isEmpty)
}
}
}
"when annotating a workspace with ORSP-based data use" - {
val mockDUCodes = Map(
AttributeName.withLibraryNS("GRU") -> AttributeBoolean(true),
AttributeName.withLibraryNS("HMB") -> AttributeBoolean(true),
AttributeName.withLibraryNS("NCU") -> AttributeBoolean(true),
AttributeName.withLibraryNS("NPU") -> AttributeBoolean(true),
AttributeName.withLibraryNS("NMDS") -> AttributeBoolean(true),
AttributeName.withLibraryNS("NAGR") -> AttributeBoolean(true),
AttributeName.withLibraryNS("NCTRL") -> AttributeBoolean(true),
AttributeName.withLibraryNS("RS-PD") -> AttributeBoolean(true),
AttributeName.withLibraryNS("IRB") -> AttributeBoolean(true),
AttributeName.withLibraryNS("RS-G") -> AttributeBoolean(true),
AttributeName.withLibraryNS("RS-FM") -> AttributeBoolean(true),
AttributeName.withLibraryNS("RS-M") -> AttributeBoolean(true),
AttributeName.withLibraryNS("DS_URL") -> AttributeValueList(Seq(AttributeString("one"),AttributeString("two"))),
AttributeName.withLibraryNS("DS_URL") -> AttributeValueList(Seq(AttributeString("five"),AttributeString("six"))),
AttributeName.withLibraryNS("consentCodes") -> AttributeValueList(Seq(AttributeString("seven"),AttributeString("eight"))),
AttributeName.withLibraryNS("structuredUseRestriction") -> AttributeValueRawJson("""{"foo":"bar"}""")
)
"should add attributes when no previous attributes exist" in {
val existing = Map.empty[AttributeName,Attribute]
val preferred = Map(
AttributeName.withLibraryNS("GRU") -> AttributeBoolean(true)
)
val actual = replaceDataUseAttributes(existing, preferred)
val expected = Map(
AttributeName.withLibraryNS("GRU") -> AttributeBoolean(true)
)
assertResult(expected) {actual}
}
"should not add attributes when no new attributes exist" in {
val existing = Map.empty[AttributeName,Attribute]
val preferred = Map.empty[AttributeName,Attribute]
val actual = replaceDataUseAttributes(existing, preferred)
val expected = Map.empty[AttributeName,Attribute]
assertResult(expected) {actual}
}
"should preserve pre-existing non-DU attributes" in {
val existing = Map(
AttributeName.withDefaultNS("description") -> AttributeString("my description"),
AttributeName.withLibraryNS("datasetName") -> AttributeString("my dataset")
)
val preferred = Map(
AttributeName.withLibraryNS("GRU") -> AttributeBoolean(true)
)
val actual = replaceDataUseAttributes(existing, preferred)
val expected = Map(
AttributeName.withDefaultNS("description") -> AttributeString("my description"),
AttributeName.withLibraryNS("datasetName") -> AttributeString("my dataset"),
AttributeName.withLibraryNS("GRU") -> AttributeBoolean(true)
)
assertResult(expected) {actual}
}
"should overwrite pre-existing DU attributes" in {
val existing = Map(
AttributeName.withLibraryNS("HMB") -> AttributeBoolean(true)
)
val preferred = Map(
AttributeName.withLibraryNS("HMB") -> AttributeBoolean(false)
)
val actual = replaceDataUseAttributes(existing, preferred)
val expected = Map(
AttributeName.withLibraryNS("HMB") -> AttributeBoolean(false)
)
assertResult(expected) {actual}
}
"should remove pre-existing DU attributes when adding new" in {
val existing = Map(
AttributeName.withLibraryNS("HMB") -> AttributeBoolean(true),
AttributeName.withLibraryNS("NAGR") -> AttributeBoolean(true)
)
val preferred = Map(
AttributeName.withLibraryNS("HMB") -> AttributeBoolean(false)
)
val actual = replaceDataUseAttributes(existing, preferred)
val expected = Map(
AttributeName.withLibraryNS("HMB") -> AttributeBoolean(false)
)
assertResult(expected) {actual}
}
"should remove pre-existing DU attributes even when adding nothing" in {
val existing = mockDUCodes
val preferred = Map.empty[AttributeName,Attribute]
val actual = replaceDataUseAttributes(existing, preferred)
val expected = Map.empty[AttributeName,Attribute]
assertResult(expected) {actual}
}
}
}
//////////////////
// Utility methods
//////////////////
private def getValuesFromAttributeValueListAsAttribute(attrs: Option[Attribute]): Seq[String] = {
(attrs collect {
case x: AttributeValueList => x.list.collect {
case a: AttributeString => a.value
}
}).getOrElse(Seq.empty[String])
}
private def makeDurFromWorkspace(ds: WorkspaceDetails, ontologyDAO: MockOntologyDAO): DataUseRestriction = {
val attrs = generateStructuredAndDisplayAttributes(ds, ontologyDAO).structured
val durAtt: Attribute = attrs.getOrElse(structuredUseRestrictionAttributeName, AttributeNull)
durAtt.toJson.convertTo[DataUseRestriction]
}
private def checkBooleanTrue(dur: DataUseRestriction, fieldName: String): Boolean = {
getFieldMap(dur).getOrElse(fieldName, false).asInstanceOf[Boolean]
}
private def checkListValues(dur: DataUseRestriction, fieldName: String): Unit = {
val fieldValue: Seq[String] = getFieldMap(dur).getOrElse(fieldName, Seq.empty[String]).asInstanceOf[Seq[String]]
listValues should contain theSameElementsAs fieldValue
}
private def checkDiseaseValues(dur: DataUseRestriction, fieldName: String): Unit = {
val fieldValue: Seq[Int] = getFieldMap(dur).getOrElse(fieldName, Seq.empty[Int]).asInstanceOf[Seq[Int]]
diseaseValuesInts should contain theSameElementsAs fieldValue
}
private def getFieldMap(dur: DataUseRestriction): Map[String, Object] = {
dur.getClass.getDeclaredFields map { f =>
f.setAccessible(true)
getFieldName(f) -> f.get(dur)
} toMap
}
// Since we have dashes in DUR field names, the value that comes back from Field.getName
// looks like "RS$minusPOP" instead of "RS-POP"
private def getFieldName(f: Field): String = {
f.getName.replace("$minus", "-")
}
}
|
broadinstitute/firecloud-orchestration
|
src/test/scala/org/broadinstitute/dsde/firecloud/service/DataUseRestrictionSupportSpec.scala
|
Scala
|
bsd-3-clause
| 29,779 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.transform.vision.image.augmentation
import com.intel.analytics.bigdl.transform.vision.image.{ImageFrame, LocalImageFrame}
import org.opencv.imgcodecs.Imgcodecs
import org.scalatest.{FlatSpec, Matchers}
class ChannelOrderSpec extends FlatSpec with Matchers {
val resource = getClass.getClassLoader.getResource("pascal/")
"ChannelOrder" should "work properly" in {
val data = ImageFrame.read(resource.getFile)
val transformer = ChannelOrder()
val transformed = transformer(data)
val imf = transformed.asInstanceOf[LocalImageFrame].array(0)
imf.getHeight() should be (imf.getOriginalHeight)
imf.getWidth() should be (imf.getOriginalWidth)
val tmpFile = java.io.File.createTempFile("module", ".jpg")
Imgcodecs.imwrite(tmpFile.toString, imf.opencvMat())
println(tmpFile)
}
}
|
yiheng/BigDL
|
spark/dl/src/test/scala/com/intel/analytics/bigdl/transform/vision/image/augmentation/ChannelOrderSpec.scala
|
Scala
|
apache-2.0
| 1,451 |
/*
* Copyright 2015 Foundational Development
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package pro.foundev
package object commons {
}
|
rssvihla/datastax_work
|
spark_commons/commons/src/test/scala/pro/foundev/commons/package.scala
|
Scala
|
apache-2.0
| 680 |
/*
* Copyright 2015 org.NLP4L
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.nlp4l.framework.processors
import com.typesafe.config.{Config, ConfigFactory, ConfigParseOptions, ConfigSyntax}
import org.joda.time.DateTime
import org.nlp4l.framework.builtin.{Job, JobStatus, MergeProcessor, ReplayProcessor, SortProcessor}
import org.nlp4l.framework.dao.{JobDAO, RunDAO}
import org.nlp4l.framework.models.{Dictionary, DictionaryAttribute, Record}
import play.api.Logger
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import scala.collection.JavaConversions.{asScalaBuffer, asScalaSet}
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import scala.concurrent.Await
import scala.util.{Failure, Success}
import collection.JavaConversions._
object Constants {
val WRAPPROCESSOR_CLASS = "org.nlp4l.framework.processors.WrapProcessor"
val SORTPROCESSOR_CLASS = "org.nlp4l.framework.builtin.SortProcessor"
val MERGEPROCESSOR_CLASS = "org.nlp4l.framework.builtin.MergeProcessor"
val REPLAYPROCESSOR_CLASS = "org.nlp4l.framework.builtin.ReplayProcessor"
}
class WrapProcessor(val childList: Seq[RecordProcessor]) extends Processor {
override def execute(data: Option[Dictionary]): Option[Dictionary] = {
val reclist = ListBuffer.empty[Record]
data map { dic =>
dic.recordList foreach { rec: Record =>
var rec2:Option[Record] = Some(rec)
childList foreach { recProc: RecordProcessor =>
if(rec2 != None)
rec2 = recProc.execute(rec2)
}
if(rec2 != None) reclist += rec2.get
}
}
Some(Dictionary(reclist))
}
}
class ProcessorChain (val chain: List[Processor]) {
private val logger = Logger(this.getClass)
def process(jobDAO: JobDAO, runDAO: RunDAO, jobId: Int, dicAttr: DictionaryAttribute) = {
val job = Await.result(jobDAO.get(jobId), scala.concurrent.duration.Duration.Inf)
val runId = job.lastRunId + 1
jobDAO.update(Job(job.jobId, job.name, job.config, runId, Some(new DateTime()), job.lastDeployAt))
def loop(li: List[Processor], js: JobStatus, data:Option[Dictionary] = None): Unit = {
val indata = data match {
case Some(d) => Some(d.setUserDefinedHashCode(dicAttr))
case None => None
}
try {
li match {
case Nil => ()
case head :: Nil =>
var out: Option[Dictionary] = head.execute(indata)
val cname = head.asInstanceOf[AnyRef].getClass.getName
if(cname == Constants.SORTPROCESSOR_CLASS) {
out = head.asInstanceOf[SortProcessor].sort(jobDAO, runDAO, jobId, runId, dicAttr, out)
} else if(cname == Constants.REPLAYPROCESSOR_CLASS) {
out = head.asInstanceOf[ReplayProcessor].replay(jobDAO, runDAO, jobId, dicAttr, out)
} else if(cname == Constants.MERGEPROCESSOR_CLASS) {
out = head.asInstanceOf[MergeProcessor].merge(dicAttr, out)
}
runDAO.updateJobStatus(JobStatus(js.id, js.jobId, js.runId, js.total, js.total-li.size+1))
ProcessorChain.outputResult(jobDAO, runDAO, jobId, runId, dicAttr, out)
case head :: tail =>
var out:Option[Dictionary] = head.execute(indata)
val cname = head.asInstanceOf[AnyRef].getClass.getName
if(cname == Constants.SORTPROCESSOR_CLASS) {
out = head.asInstanceOf[SortProcessor].sort(jobDAO, runDAO, jobId, runId, dicAttr, out)
} else if(cname == Constants.REPLAYPROCESSOR_CLASS) {
out = head.asInstanceOf[ReplayProcessor].replay(jobDAO, runDAO, jobId, dicAttr, out)
} else if(cname == Constants.MERGEPROCESSOR_CLASS) {
out = head.asInstanceOf[MergeProcessor].merge(dicAttr, out)
}
val newjs = JobStatus(js.id, js.jobId, js.runId, js.total, js.total-li.size+1)
runDAO.updateJobStatus(newjs)
loop(tail, newjs, out)
}
} catch {
case e: Exception => {
val errjs = JobStatus(js.id, js.jobId, js.runId, js.total, js.done, e.getMessage)
runDAO.updateJobStatus(errjs)
logger.error(e.getMessage, e)
}
}
}
val js = JobStatus(None, jobId, runId, chain.size, 0)
runDAO.insertJobStatus(js) map {newjs =>
loop(chain, newjs)
}
}
}
object ProcessorChain {
private val logger = Logger(this.getClass)
// Processor
private var mapP: Map[Int, ProcessorChain] = Map()
def chainMap: Map[Int, ProcessorChain] = mapP
// DictionaryAttribute
private var mapD: Map[Int, DictionaryAttribute] = Map()
def dicMap: Map[Int, DictionaryAttribute] = mapD
def loadChain(jobDAO: JobDAO, jobId: Int): Unit = {
jobDAO.get(jobId).map(
job => {
val pcb = new ProcessorChainBuilder()
mapP += (jobId -> pcb.procBuild(jobId, job.config).result())
val dicAttr = pcb.dicBuild(job.config)
// Replay data
var addedRecordList: Map[Int, Record] = Map()
var modifiedRecordList: Map[Int, Record] = Map()
val aa = jobDAO.fetchReplayOfAdd(jobId)
jobDAO.fetchReplayOfAdd(jobId) foreach { hd: (Int, Int) =>
val runId: Int = hd._1
val hashcode: Int = hd._2
jobDAO.fetchRecordByHashcode(jobId, runId, hashcode) map { rec: Record =>
addedRecordList += (hashcode -> rec.setUserDefinedHashCode(dicAttr))
}
}
val modifiedList: List[(Int, Int, Int)] = jobDAO.fetchReplayOfMod(jobId)
jobDAO.fetchReplayOfMod(jobId) foreach { hd: (Int, Int, Int) =>
val runId: Int = hd._1
val hashcode: Int = hd._2
val modToHashcode: Int = hd._3
jobDAO.fetchRecordByHashcode(jobId, runId, modToHashcode) map { rec: Record =>
modifiedRecordList += (hashcode -> rec.setUserDefinedHashCode(dicAttr))
}
}
dicAttr.addedRecordList = addedRecordList
dicAttr.modifiedRecordList = modifiedRecordList
dicAttr.deletedRecordList = jobDAO.fetchReplayOfDel(jobId)
mapD += (jobId -> dicAttr)
}
)
}
def getChain(jobDAO: JobDAO, runDAO: RunDAO, jobId: Int): ProcessorChain = {
val job = Await.result(jobDAO.get(jobId), scala.concurrent.duration.Duration.Inf)
try {
val pc = new ProcessorChainBuilder().procBuild(jobId, job.config).result()
pc
} catch {
case e: Exception => {
val runId = job.lastRunId + 1
val errjs = JobStatus(None, jobId, runId, 0, 0, e.getMessage)
runDAO.insertJobStatus(errjs)
logger.error(e.getMessage, e)
throw e
}
}
}
def getDictionaryAttribute(jobDAO: JobDAO, jobId: Int): DictionaryAttribute = {
val job = Await.result(jobDAO.get(jobId), scala.concurrent.duration.Duration.Inf)
val pcb = new ProcessorChainBuilder()
val dicAttr = pcb.dicBuild(job.config)
// Replay data
var addedRecordList: Map[Int, Record] = Map()
var modifiedRecordList: Map[Int, Record] = Map()
jobDAO.fetchReplayOfAdd(jobId) foreach { hd: (Int, Int) =>
val runId: Int = hd._1
val hashcode: Int = hd._2
jobDAO.fetchRecordByHashcode(jobId, runId, hashcode) map { rec: Record =>
addedRecordList += (hashcode -> rec.setUserDefinedHashCode(dicAttr))
}
}
val modifiedList: List[(Int, Int, Int)] = jobDAO.fetchReplayOfMod(jobId)
jobDAO.fetchReplayOfMod(jobId) foreach { hd: (Int, Int, Int) =>
val runId: Int = hd._1
val hashcode: Int = hd._2
val modToHashcode: Int = hd._3
jobDAO.fetchRecordByHashcode(jobId, runId, modToHashcode) map { rec: Record =>
modifiedRecordList += (hashcode -> rec.setUserDefinedHashCode(dicAttr))
}
}
dicAttr.addedRecordList = addedRecordList
dicAttr.modifiedRecordList = modifiedRecordList
dicAttr.deletedRecordList = jobDAO.fetchReplayOfDel(jobId)
dicAttr
}
/**
* Save the Dictionary to database
*/
def outputResult(jobDAO: JobDAO, runDAO: RunDAO, jobId: Int, runId: Int, dicAttr: DictionaryAttribute, dic: Option[Dictionary]): Unit = {
jobDAO.get(jobId) map {job: Job =>
dic map { d =>
val d2 = d.setUserDefinedHashCode(dicAttr)
val f1 = runDAO.dropTable(jobId, runId)
Await.ready(f1, scala.concurrent.duration.Duration.Inf)
f1.value.get match {
case Success(n) => n
case Failure(ex) => logger.debug(ex.getMessage, ex)
}
val f2 = runDAO.createTable(jobId, runId, dicAttr)
Await.ready(f2, scala.concurrent.duration.Duration.Inf)
f2.value.get match {
case Success(n) => runDAO.insertData(jobId, runId, dicAttr, d2)
case Failure(ex) => throw(ex)
}
}
}
}
/**
* Validate the uploaded job config file
*/
def validateConf(confStr: String): Boolean = {
try {
val config = ConfigFactory.parseString(confStr, ConfigParseOptions.defaults().setSyntax(ConfigSyntax.CONF))
if (!config.hasPath("dictionary") || !config.hasPath("processors")) false
else {
val b1 = config.getConfigList("dictionary").toList.forall {
pConf => pConf.hasPath("class")
}
val b2 = config.getConfigList("processors").toList.forall {
pConf => pConf.hasPath("class")
}
val b3 =
if (!config.hasPath("writer")) true
else {
config.getConfig("writer").hasPath("class")
}
val b4 =
if (!config.hasPath("validators")) true
else {
config.getConfigList("validators").toList.forall {
pConf => pConf.hasPath("class")
}
}
b1 && b2 && b3 && b4
}
} catch {
case e: Exception => {
logger.error(e.getMessage, e)
false
}
}
}
}
class ProcessorChainBuilder() {
val logger = Logger(this.getClass)
val buf = mutable.ArrayBuffer[Processor]()
def procBuild(jobId: Int, confStr: String): ProcessorChainBuilder = {
val config = ConfigFactory.parseString(confStr)
val gSettings = getConfig(config, "settings")
config.getConfigList("processors").foreach {
pConf =>
val className = pConf.getString("class")
if(className == Constants.WRAPPROCESSOR_CLASS) {
buf += wrapBuild(pConf)
} else {
val constructor = Class.forName(className).getConstructor(classOf[Config])
val lSettings = getConfig(pConf, "settings")
val settings = lSettings.withFallback(gSettings)
val facP = constructor.newInstance(settings).asInstanceOf[ProcessorFactory]
val p = facP.getInstance()
buf += p
}
}
this
}
def dicBuild(confStr: String): DictionaryAttribute = {
val config = ConfigFactory.parseString(confStr)
val gSettings = getConfig(config, "settings")
val pConf = config.getConfigList("dictionary").get(0)
val className = pConf.getString("class")
val constructor = Class.forName(className).getConstructor(classOf[Config])
val lSettings = getConfig(pConf, "settings")
val settings = lSettings.withFallback(gSettings)
val facP = constructor.newInstance(settings).asInstanceOf[DictionaryAttributeFactory]
facP.getInstance()
}
def getConfig(src: Config, key: String): Config = {
if(src.hasPath(key)) src.getConfig(key) else ConfigFactory.empty()
}
def wrapBuild(wrapConf: Config): Processor = {
var buf: Seq[RecordProcessor] = Seq()
val pConf = wrapConf.getConfigList("recordProcessors").get(0)
try {
val className = pConf.getString("class")
val constructor = Class.forName(className).getConstructor(classOf[Config])
val settings = getConfig(pConf, "settings")
val facP = constructor.newInstance(settings).asInstanceOf[RecordProcessorFactory]
val p = facP.getInstance()
buf = buf :+ p
} catch {
case e: Exception => {
logger.error(e.getMessage, e)
throw e
}
}
val className = Constants.WRAPPROCESSOR_CLASS
val constructor = Class.forName(className).getConstructor(classOf[Seq[RecordProcessor]])
constructor.newInstance(buf).asInstanceOf[WrapProcessor]
}
def result() = new ProcessorChain(buf.toList)
}
|
NLP4L/framework
|
app/org/nlp4l/framework/processors/ProcessorChain.scala
|
Scala
|
apache-2.0
| 12,963 |
package com.twitter.finagle.netty4.http.handler
import io.netty.buffer.Unpooled
import io.netty.channel.embedded.EmbeddedChannel
import io.netty.handler.codec.DecoderResult
import io.netty.handler.codec.http.{DefaultHttpContent, HttpContent}
import org.scalatest.{FunSuite, OneInstancePerTest}
class ClientExceptionMapperTest extends FunSuite with OneInstancePerTest {
val embed: EmbeddedChannel = new EmbeddedChannel(ClientExceptionMapper)
test("convert failed decoder result") {
val boom = new Exception("boom")
val o = new DefaultHttpContent(Unpooled.wrappedBuffer("zoom".getBytes("UTF-8")))
o.setDecoderResult(DecoderResult.failure(boom))
assert(o.refCnt() == 1)
assert(intercept[Exception](embed.writeInbound(o)) == boom)
assert(o.refCnt() == 0)
assert(!embed.finish())
}
test("bypass successful decoder result") {
val o = new DefaultHttpContent(Unpooled.EMPTY_BUFFER)
assert(embed.writeInbound(o))
assert(o eq embed.readInbound[HttpContent]())
assert(!embed.finish())
}
}
|
luciferous/finagle
|
finagle-netty4-http/src/test/scala/com/twitter/finagle/netty4/http/handler/ClientExceptionMapperTest.scala
|
Scala
|
apache-2.0
| 1,039 |
package org.jetbrains.sbt.shell
import com.intellij.notification.{NotificationGroup, NotificationType}
object SbtShellNotifications {
val notificationGroup: NotificationGroup = NotificationGroup.balloonGroup("Sbt Shell")
}
|
jastice/intellij-scala
|
scala/scala-impl/src/org/jetbrains/sbt/shell/SbtShellNotifications.scala
|
Scala
|
apache-2.0
| 227 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.osgi
import javax.inject.Inject
import scala.collection.JavaConverters._
import org.junit.Test
import org.junit.runner.RunWith
import org.ops4j.pax.exam.CoreOptions._
import org.ops4j.pax.exam.junit.{JUnit4TestRunner, Configuration}
import org.osgi.framework.{Bundle, BundleContext}
import org.osgi.framework.wiring._
import org.scalatest.junit._
@RunWith(classOf[JUnit4TestRunner])
class OsgiSuite extends JUnitSuite with ShouldMatchersForJUnit {
@Inject var context: BundleContext = _
@Configuration def config = options(
junitBundles,
bundle("file:target/dist/lib/scalatest.jar"),
bundle("file:target/dist/lib/scalactic.jar"),
scalaBundles
)
private def scalaBundles = composite(
mavenBundle.groupId("org.scala-lang").artifactId("scala-library").version(compiledAgainstScalaVersionString),
mavenBundle.groupId("org.scala-lang").artifactId("scala-reflect").version(compiledAgainstScalaVersionString),
mavenBundle.groupId("org.scala-lang").artifactId("scala-compiler").version(compiledAgainstScalaVersionString)
)
@Test def verifyScalaTestBundlesResolve {
bundleNamed("org.scalatest") should be ('defined)
bundleNamed("org.scalactic") should be ('defined)
}
@Test def scalaPackageImportsUseVersionRangeForCurrentMinorUpToNextMinor {
checkScalaPackage(bundleNamed("org.scalatest").get)
checkScalaPackage(bundleNamed("org.scalactic").get)
}
private def bundleNamed(symbolicName: String): Option[Bundle] =
context.getBundles.find { _.getSymbolicName == symbolicName }
private def checkScalaPackage(bundle: Bundle) = {
def packageName(wire: BundleWire) = wire.getCapability.getAttributes.get(BundleRevision.PACKAGE_NAMESPACE)
def scalaWire(wire: BundleWire) = packageName(wire) == "scala"
val scalaPackageImportFilter = bundle.adapt(classOf[BundleWiring]).
getRequiredWires(BundleRevision.PACKAGE_NAMESPACE).asScala.
find(scalaWire).
getOrElse(throw new IllegalStateException("Bundle %s does not import scala package".format(bundle.getSymbolicName))).
getRequirement.
getDirectives.asScala.
get("filter").
getOrElse(throw new IllegalStateException("Bundle %s imports scala package but does not specify a version range".format(bundle.getSymbolicName)))
val version = compiledAgainstScalaVersion
val lowerBound = "%s.%s.0".format(version.major, version.minor)
val upperBound = "%s.%s.0".format(version.major, version.minor + 1)
val expectedImportFilter = "(&(osgi.wiring.package=scala)(version>=%s)(!(version>=%s)))".format(lowerBound, upperBound)
scalaPackageImportFilter should be (expectedImportFilter)
}
private def compiledAgainstScalaVersionString: String = {
scala.util.Properties.
propOrNone("scala.version").
getOrElse(throw new IllegalStateException("scala.version system property is not set"))
}
private def compiledAgainstScalaVersion: Version = {
val ScalaVersion = """(\\d+)\\.(\\d+)\\.(\\d+).*""".r
val ScalaVersion(major, minor, micro) = compiledAgainstScalaVersionString
Version(major.toInt, minor.toInt, micro.toInt)
}
case class Version(major: Int, minor: Int, micro: Int)
}
|
travisbrown/scalatest
|
src/test-osgi/scala/org/scalatest/osgi/OsgiSuite.scala
|
Scala
|
apache-2.0
| 3,806 |
/******************************************************************************
* Copyright (c) 2014, Equal Experts Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation
* are those of the authors and should not be interpreted as representing
* official policies, either expressed or implied, of the Midas Project.
******************************************************************************/
package com.ee.midas.pipes
import org.junit.runner.RunWith
import org.specs2.mock.Mockito
import java.io.{OutputStream, InputStream, ByteArrayOutputStream, ByteArrayInputStream}
import org.mockito.runners.MockitoJUnitRunner
import org.specs2.matcher.JUnitMustMatchers
import org.junit.Test
@RunWith(classOf[MockitoJUnitRunner])
class SimplexPipeSpecs extends JUnitMustMatchers with Mockito {
val pipeName = "test-pipe"
@Test
def transferDataFromSourceToDestination() {
//given
val data = "Hello World".getBytes()
val source = new ByteArrayInputStream(data)
val destination = new ByteArrayOutputStream()
val simplexPipe = new SimplexPipe(pipeName, source, destination)
//when
simplexPipe.run()
source.close()
destination.close()
//then
destination.toByteArray() must beEqualTo(data)
}
@Test
def closeOnForceStop() {
//given
val mockInputStream = mock[InputStream]
val mockOutputStream = mock[OutputStream]
val pipe = new SimplexPipe(pipeName, mockInputStream, mockOutputStream)
//when
pipe.forceStop
//then
there was one(mockInputStream).close()
there was one(mockOutputStream).close()
pipe.isActive must beFalse
}
@Test
def stopGracefully() {
//given
val mockInputStream = mock[InputStream]
val mockOutputStream = mock[OutputStream]
val pipe = new SimplexPipe(pipeName, mockInputStream, mockOutputStream)
scheduleStopToRunAfter(pipe, 550)
//when
pipe.run
//then
pipe.isActive must beFalse
}
def scheduleStopToRunAfter(pipe: SimplexPipe, time: Int):Unit = {
new Thread {
Thread.sleep(time)
pipe.stop
}
}
}
|
EqualExperts/Midas
|
src/test/scala/com/ee/midas/pipes/SimplexPipeSpecs.scala
|
Scala
|
bsd-2-clause
| 3,412 |
package com.wavesplatform.database
import com.wavesplatform.metrics.LevelDBStats
import com.wavesplatform.metrics.LevelDBStats.DbHistogramExt
import org.iq80.leveldb.{DB, DBIterator, ReadOptions}
import scala.annotation.tailrec
class ReadOnlyDB(db: DB, readOptions: ReadOptions) {
def get[V](key: Key[V]): V = {
val bytes = db.get(key.keyBytes, readOptions)
LevelDBStats.read.recordTagged(key, bytes)
key.parse(bytes)
}
def has[V](key: Key[V]): Boolean = {
val bytes = db.get(key.keyBytes, readOptions)
LevelDBStats.read.recordTagged(key, bytes)
bytes != null
}
def newIterator: DBIterator = db.iterator(readOptions)
def iterateOver(tag: KeyTags.KeyTag)(f: DBEntry => Unit): Unit = iterateOver(tag.prefixBytes)(f)
def iterateOver(prefix: Array[Byte])(f: DBEntry => Unit): Unit = {
val iterator = db.iterator(readOptions)
try {
iterator.seek(prefix)
while (iterator.hasNext && iterator.peekNext().getKey.startsWith(prefix)) f(iterator.next())
} finally iterator.close()
}
def prefixExists(prefix: Array[Byte]): Boolean = {
val iterator = db.iterator(readOptions)
iterator.seek(prefix)
val res = iterator.hasNext && iterator.peekNext().getKey.startsWith(prefix)
iterator.close()
res
}
def read[T](keyName: String, prefix: Array[Byte], seek: Array[Byte], n: Int)(deserialize: DBEntry => T): Vector[T] = {
val iter = newIterator
@tailrec def loop(aux: Vector[T], restN: Int, totalBytesRead: Long): (Vector[T], Long) =
if (restN > 0 && iter.hasNext) {
val elem = iter.next()
if (elem.getKey.startsWith(prefix)) loop(aux :+ deserialize(elem), restN - 1, totalBytesRead + elem.getValue.length)
else (aux, totalBytesRead)
} else (aux, totalBytesRead)
try {
iter.seek(seek)
val (r, totalBytesRead) = loop(Vector.empty, n, 0)
LevelDBStats.read.recordTagged(keyName, totalBytesRead)
r
} finally iter.close()
}
}
|
wavesplatform/Waves
|
node/src/main/scala/com/wavesplatform/database/ReadOnlyDB.scala
|
Scala
|
mit
| 1,979 |
package spire
package math
import org.scalatest.FunSuite
import spire.std.int._
class LiteralsTest extends FunSuite {
test("byte literals") {
import spire.syntax.literals._
assert(b"-128" === (-128:Byte))
assert(b"-100" === (-100:Byte))
assert(b"0" === (0:Byte))
assert(b"100" === (100:Byte))
assert(b"127" === (127:Byte))
assert(b"128" === (-128:Byte))
assert(b"255" === (-1:Byte))
}
test("illegal byte literals") {
import spire.macros._
def tryit(s: String) = Macros.parseNumber(s, BigInt(-128), BigInt(255))
assert(tryit("-129") === Left("illegal constant: -129"))
assert(tryit("256") === Left("illegal constant: 256"))
assert(tryit("10000") === Left("illegal constant: 10000"))
assert(tryit("abc") === Left("illegal constant: abc"))
}
test("short literals") {
import spire.syntax.literals._
assert(h"-32768" === (-32768:Short))
assert(h"-10000" === (-10000:Short))
assert(h"0" === (0:Short))
assert(h"10012" === (10012:Short))
assert(h"32767" === (32767:Short))
assert(h"32768" === (-32768:Short))
assert(h"65535" === (-1:Short))
}
test("int operators") {
import spire.syntax.std.int._
import spire.syntax.nroot._
assert((5 ** 2) === 25)
assert((5 /~ 2) === 2)
assert((5 /% 2) === ((2, 1)))
assert(25.sqrt === 5)
}
test("inter-type operators") {
import spire.std.double._
val c = Complex(2.0, 3.0)
val q = Rational(4, 5)
val r = Algebraic(3.0)
assert(c + 1 === Complex(3.0, 3.0))
assert(1 + c === Complex(3.0, 3.0))
assert(q + 1 === Rational(9, 5))
assert(1 + q === Rational(9, 5))
assert(r + 1 === Algebraic(4.0))
assert(1 + r === Algebraic(4.0))
}
}
|
tixxit/spire
|
tests/src/test/scala/spire/math/LiteralsTest.scala
|
Scala
|
mit
| 1,738 |
/*
* La Trobe University - Distributed Deep Learning System
* Copyright 2016 Matthias Langer ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package edu.latrobe.blaze
import edu.latrobe._
import scala.collection._
/**
* Unlike modules, regularizers operate directly on the weights/gradients
* level. They can - for example - be used to constraint weights from growing
* too large or too small.
*
* Note that regularizers may or may not change the cost of the model.
*/
abstract class Regularizer
extends InstanceEx[RegularizerBuilder]
with ParameterizedInstance {
final val baseScope
: Option[NullBuffer] = builder.baseScope
/**
* Must implement as constructor argument!
*/
def platformHint
: Option[Platform]
// ---------------------------------------------------------------------------
// Forward propagation related.
// ---------------------------------------------------------------------------
def evaluate(phaseNo: Long,
weights: ValueTensorBuffer,
input: Tensor,
reference: Tensor,
output: Tensor)
: Real
final def evaluate(phaseNo: Long,
weights: ValueTensorBuffer,
batch: Batch)
: Real = evaluate(
phaseNo,
weights,
batch.input,
batch.output,
null
)
final def evaluate(phaseNo: Long,
weights: ValueTensorBuffer,
context: BackpropagationContext)
: Real = evaluate(
phaseNo,
weights,
context.input,
context.reference,
context.output
)
// ---------------------------------------------------------------------------
// Back propagation related.
// ---------------------------------------------------------------------------
def deriveGradients(phaseNo: Long,
weights: ValueTensorBuffer,
input: Tensor,
reference: Tensor,
output: Tensor,
sink: ValueTensorBuffer)
: Unit
final def deriveGradients(phaseNo: Long,
weights: ValueTensorBuffer,
context: BackpropagationContext,
sink: ValueTensorBuffer)
: Unit = deriveGradients(
phaseNo,
weights,
context.input,
context.reference,
context.output,
sink
)
// ---------------------------------------------------------------------------
// State management.
// ---------------------------------------------------------------------------
override def state
: RegularizerState = RegularizerStateEx(super.state)
override def restoreState(state: InstanceState)
: Unit = {
super.restoreState(state.parent)
state match {
case state: RegularizerStateEx =>
case _ =>
throw new MatchError(state)
}
}
}
abstract class RegularizerBuilder
extends InstanceExBuilder1[RegularizerBuilder, Regularizer, Option[Platform]]
with VariantBuilder {
final var _baseScope
: Option[NullBuffer] = None
final def baseScope
: Option[NullBuffer] = _baseScope
final def baseScope_=(value: Option[NullBuffer])
: Unit = {
require(value != null)
_baseScope = value
}
def setBaseScope(value: Option[NullBuffer])
: RegularizerBuilder
def setBaseScope(value: NullBuffer)
: RegularizerBuilder
// ---------------------------------------------------------------------------
// Object building related.
// ---------------------------------------------------------------------------
override def build(platformHint: Option[Platform],
seed: InstanceSeed)
: Regularizer
final def build(platformHint: Platform)
: Regularizer = build(platformHint, InstanceSeed.default)
final def build(platformHint: Platform,
seed: InstanceSeed)
: Regularizer = build(Option(platformHint), seed)
}
abstract class RegularizerEx[TBuilder <: RegularizerExBuilder[_]]
extends Regularizer {
override def builder
: TBuilder
}
abstract class RegularizerExBuilder[TThis <: RegularizerExBuilder[_]]
extends RegularizerBuilder
with VariantBuilderEx[TThis] {
override def repr
: TThis
override protected def doCopy()
: TThis
final override def setBaseScope(value: Option[NullBuffer])
: TThis = {
baseScope_=(value)
repr
}
final override def setBaseScope(value: NullBuffer)
: TThis = setBaseScope(Option(value))
}
abstract class RegularizerVariantDescription[TBuilder <: RegularizerExBuilder[TBuilder]]
extends VariantDescription[TBuilder] {
final def score(builder: TBuilder,
platformHint: Option[Platform],
priority: Byte)
: (Int, Array[String]) = {
val reasons = Array.newBuilder[String]
var result = baseScore(builder, priority, reasons)
// Platform
if (platformHint.exists(_ == platform)) {
result |= 1 << 24
reasons += "build level platform preference"
}
// Score overrides.
result = doScore(builder, platformHint, result, reasons)
(result, reasons.result())
}
protected def doScore(builder: TBuilder,
platformHint: Option[Platform],
scorePrev: Int,
reasons: mutable.ArrayBuilder[String])
: Int = scorePrev
def build(builder: TBuilder,
platformHint: Option[Platform],
seed: InstanceSeed)
: Regularizer
}
class RegularizerVariantTable[TBuilder <: RegularizerExBuilder[TBuilder]]
extends VariantTable[TBuilder, RegularizerVariantDescription[TBuilder]] {
final def lookup(builder: TBuilder,
platformHint: Option[Platform])
: RegularizerVariantDescription[TBuilder] = {
// Score the variants and select variant with highest score.
var highestScore: Int = 0
var highestDesc: RegularizerVariantDescription[TBuilder] = null
MapEx.foreach(variants)((desc, priority) => {
val (score, reasons) = desc.score(builder, platformHint, priority)
if (logger.isDebugEnabled) {
val sb = StringBuilder.newBuilder
ArrayEx.foreach(reasons)(reason => {
sb ++= reason
sb ++= ", "
})
sb.length = Math.max(sb.length - 2, 0)
logger.debug(f"$builder%s: $score%08x => $desc%s, $sb%s")
}
if (score > highestScore) {
highestScore = score
highestDesc = desc
}
})
if (highestDesc == null) {
throw new UnsupportedOperationException("Unable to determine a compatible variant!")
}
if (logger.isInfoEnabled) {
logger.info(f"$builder%s: $highestDesc%s selected!")
}
highestDesc
}
final def lookupAndBuild(builder: TBuilder,
platformHint: Option[Platform],
seed: InstanceSeed)
: Regularizer = {
// Score the the variants.
val desc = lookup(builder, platformHint)
// Instantiate highest and return.
desc.build(builder, platformHint, seed)
}
}
abstract class RegularizerState
extends InstanceState
final case class RegularizerStateEx(override val parent: InstanceState)
extends RegularizerState
|
bashimao/ltudl
|
blaze/src/main/scala/edu/latrobe/blaze/Regularizer.scala
|
Scala
|
apache-2.0
| 7,847 |
package dpla.ingestion3.mappers.providers
import dpla.ingestion3.mappers.utils.Document
import dpla.ingestion3.messages.{IngestMessage, MessageCollector}
import dpla.ingestion3.model._
import dpla.ingestion3.utils.FlatFileIO
import org.scalatest.{BeforeAndAfter, FlatSpec}
import scala.xml.{NodeSeq, XML}
class HathiMappingTest extends FlatSpec with BeforeAndAfter {
implicit val msgCollector: MessageCollector[IngestMessage] = new MessageCollector[IngestMessage]
val shortName = "hathi"
val xmlString: String = new FlatFileIO().readFileAsString("/hathi.xml")
val xml: Document[NodeSeq] = Document(XML.loadString(xmlString))
val extractor = new HathiMapping
it should "use the provider shortname in minting IDs " in
assert(extractor.useProviderName)
it should "extract the correct original identifier " in {
val expected = Some("009420214")
assert(extractor.originalId(xml) === expected)
}
it should "create the correct DPLA URI" in {
val expected = Some(URI("http://dp.la/api/items/0000090ef074502284f365460fc14c42"))
assert(extractor.dplaUri(xml) === expected)
}
it should "extract the correct contributor" in {
val expected = Seq("Everhart, Benjamin Matlock, 1818-1904.", "Academy of Natural Sciences of Philadelphia.")
.map(nameOnlyAgent)
assert(extractor.contributor(xml) == expected)
}
it should "extract the correct creator" in {
val expected = Seq("Ellis, Job Bicknell, 1829-1905.").map(nameOnlyAgent)
assert(extractor.creator(xml) == expected)
}
it should "extract the correct date" in {
val expected = Seq("[1893]").map(stringOnlyTimeSpan)
assert(extractor.date(xml) === expected)
}
it should "extract year-year date"in {
val xml =
<record>
<controlfield tag="008">881109m18831887iau 000 0 eng d</controlfield>
</record>
val expected = Seq("1883-1887").map(stringOnlyTimeSpan)
assert(extractor.date(Document(xml)) === expected)
}
it should "remove 9999 from end of date range" in {
val xml =
<record>
<controlfield tag="008">750822c18539999nikfrzp 0 a0eng</controlfield>
</record>
val expected = Seq("1853").map(stringOnlyTimeSpan)
assert(extractor.date(Document(xml)) === expected)
}
it should "extract the correct description" in {
val expected =
Seq("From the Proceedings of The academy of natural science of Philadelphia, 1893.", "A second description.")
assert(extractor.description(xml) == expected)
}
it should "extract the correct extent" in {
val expected = Seq("1, 128-172 p.")
assert(extractor.extent(xml) == expected)
}
it should "extract the correct format" in {
val expected = Seq("Language material", "Electronic resource", "unmediated", "volume", "Periodicals.")
assert(extractor.format(xml) === expected)
}
it should "handle multiple formats from control field" in {
val xml =
<record>
<controlfield tag="007">cr bn ---auaua</controlfield>
<controlfield tag="007">he bmb020baca</controlfield>
</record>
val expected = Seq("Electronic resource", "Microform")
assert(extractor.format(Document(xml)) === expected)
}
it should "fail gracefully if leader format key is unexpected" in {
val xml =
<record>
<leader>00737nxm a22002051 4500</leader>
</record>
val expected = Seq()
assert(extractor.format(Document(xml)) === expected)
}
it should "fail gracefully if control format key is unexpected" in {
val xml =
<record>
<controlfield tag="007">xr bn ---auaua</controlfield>
</record>
val expected = Seq()
assert(extractor.format(Document(xml)) === expected)
}
it should "extract the correct identifier" in {
val expected = Seq(
"sdr-pst.a164965",
"(OCoLC)316829673",
"LIAS256769",
"Hathi: 009420214")
assert(extractor.identifier(xml) == expected)
}
it should "extract the correct ISBN identifier" in {
val xml =
<record>
<datafield ind2=" " ind1=" " tag="020">
<subfield code="a">8436305477 (set)</subfield>
</datafield>
</record>
val expected = Seq("ISBN: 8436305477 (set)")
assert(extractor.identifier(Document(xml)) == expected)
}
it should "extract the correct language" in {
val expected = Seq("ger", "eng").map(nameOnlyConcept)
assert(extractor.language(xml) == expected)
}
it should "extract the correct place" in {
val expected = Seq("United States", "North America").map(nameOnlyPlace)
assert(extractor.place(xml) === expected)
}
it should "extract the correct publisher" in {
val expected = Seq("n.p. : n. pub.,").map(nameOnlyAgent)
assert(extractor.publisher(xml) === expected)
}
it should "extract the correct relation" in {
val expected =
Seq("Online version:. Howard, Clifford, 1868-1942. What happened at Olenberg. Chicago : The Reilly & Britton Co., 1911. (OCoLC)656701318").map(eitherStringOrUri)
assert(extractor.relation(xml) === expected)
}
it should "extract the correct rights" in {
val expected = Seq("Public domain only when viewed in the US. Learn more at http://www.hathitrust.org/access_use")
assert(extractor.rights(xml) === expected)
}
it should "fail gracefully if rights key is unexpected" in {
val xml =
<record>
<datafield>
<subfield code="r">xxx</subfield>
</datafield>
</record>
val expected = Seq()
assert(extractor.rights(Document(xml)) === expected)
}
it should "extract the correct subjects" in {
val expected = Seq(
"Fungi--North America",
"Antiquities",
"United States",
"United States--History--Periodicals",
"United States--Antiquities--Periodicals",
"Stuarts, 1603-1714",
"Periodicals",
"History",
"History Serials"
).map(nameOnlyConcept)
assert(extractor.subject(xml) === expected)
}
it should "extract the correct temporal" in {
val expected = Seq("1603 - 1714", "fast", "Stuarts, 1603-1714").map(stringOnlyTimeSpan)
assert(extractor.temporal(xml) === expected)
}
it should "extract the correct titles" in {
val expected = Seq("New species of North American fungi from various localities /")
assert(extractor.title(xml) === expected)
}
it should "extract the correct types from leader" in {
val expected = Seq("Text")
assert(extractor.`type`(xml) === expected)
}
it should "extract the correct types from datafield" in {
val xml =
<record>
<leader></leader>
<datafield ind2=" " ind1=" " tag="970">
<subfield code="a">MN</subfield>
</datafield>
</record>
val expected = Seq("Image")
assert(extractor.`type`(Document(xml)) === expected)
}
it should "fail gracefully if leader type key is unexpected" in {
val xml =
<record>
<leader>00737nxx a22002051 4500</leader>
</record>
val expected = Seq()
assert(extractor.`type`(Document(xml)) === expected)
}
it should "fail gracefully if datafield type key is unexpected" in {
val xml =
<record>
<leader></leader>
<datafield ind2=" " ind1=" " tag="970">
<subfield code="a">xx</subfield>
</datafield>
</record>
val expected = Seq()
assert(extractor.`type`(Document(xml)) === expected)
}
it should "extract the correct dataProvider" in {
val expected = Seq(nameOnlyAgent("Penn State University"))
assert(extractor.dataProvider(xml) === expected)
}
it should "fail gracefully if dataProvider key is unexpected" in {
val xml =
<record>
<datafield ind2=" " ind1=" " tag="974">
<subfield code="u">xxx.000061785779</subfield>
</datafield>
</record>
val expected = Seq()
assert(extractor.dataProvider(Document(xml)) === expected)
}
it should "extract the correct isShownAt" in {
val expected = Seq(uriOnlyWebResource(URI("http://catalog.hathitrust.org/Record/009420214")))
assert(extractor.isShownAt(xml) === expected)
}
it should "extract the correct OCLC id" in {
val xml =
<record>
<datafield ind2=" " ind1=" " tag="035">
<subfield code="a">(OCoLC)39816489</subfield>
</datafield>
</record>
val expected = Some("39816489")
assert(extractor.oclcId(Document(xml)) === expected)
}
it should "extract the correct OCLC id (with letter prefix)" in {
val xml =
<record>
<datafield ind2=" " ind1=" " tag="035">
<subfield code="a">(OCoLC)ocm13230493</subfield>
</datafield>
</record>
val expected = Some("13230493")
assert(extractor.oclcId(Document(xml)) === expected)
}
it should "extract the correct ISBN id" in {
val xml =
<record>
<datafield ind2=" " ind1=" " tag="020">
<subfield code="a">8436305477 (set)</subfield>
</datafield>
</record>
val expected = Some("8436305477")
assert(extractor.isbnId(Document(xml)) === expected)
}
it should "extract the correct google prefix" in {
val xml =
<record>
<datafield ind2=" " ind1=" " tag="974">
<subfield code="u">chi.72963127</subfield>
</datafield>
<datafield ind2=" " ind1=" " tag="974">
<subfield code="u">chi.72963110</subfield>
</datafield>
</record>
val expected = Some("CHI")
assert(extractor.googlePrefix(Document(xml)) === expected)
}
it should "extract the correct google prefix (UCAL)" in {
val xml =
<record>
<datafield ind2=" " ind1=" " tag="974">
<subfield code="u">uc1.b268676</subfield>
</datafield>
<datafield ind2=" " ind1=" " tag="974">
<subfield code="u">uc2.ark:/13960/t78s4nt84</subfield>
</datafield>
</record>
val expected = Some("UCAL")
assert(extractor.googlePrefix(Document(xml)) === expected)
}
it should "extract the correct google prefix (UCLA)" in {
val xml =
<record>
<datafield ind2=" " ind1=" " tag="974">
<subfield code="u">uc1.l0064507957</subfield>
</datafield>
</record>
val expected = Some("UCLA")
assert(extractor.googlePrefix(Document(xml)) === expected)
}
}
|
dpla/ingestion3
|
src/test/scala/dpla/ingestion3/mappers/providers/HathiMappingTest.scala
|
Scala
|
mit
| 10,331 |
package io.github.binaryfoo.lagotto
import org.joda.time.DateTime
/**
* Calculates the delay between two log entries.
*/
case class DelayTimer(current: LogEntry, previous: Option[LogEntry]) extends LogEntry {
override def timestamp: DateTime = current.timestamp
override def source: SourceRef = current.source
override def lines: String = current.lines
override def apply(id: String): String = id match {
case "delay" => delay.getOrElse(0).toString
case _ => current(id)
}
def delay: Option[Long] = previous.map { p =>
current.timestamp.getMillis - p.timestamp.getMillis
}
override def exportAsSeq: Seq[(String, String)] = current.exportAsSeq :+ ("delay", delay.map(_.toString).getOrElse(""))
}
|
binaryfoo/lagotto
|
src/main/scala/io/github/binaryfoo/lagotto/DelayTimer.scala
|
Scala
|
mit
| 731 |
package berlin.jentsch.modelchecker.akka
import akka.actor.ActorPath
import akka.actor.typed.Behavior
import akka.actor.typed.mc.BehaviorsEquals
import scala.language.implicitConversions
sealed trait Property {
def unary_! : Property = Not(this)
def &(that: Property): Property = And(this, that)
def |(that: Property): Property = And(this, that)
def ->(that: Property): Property = !this | that
/** The property will become true */
def isInevitable: Property = AlwaysEventually(this)
def show: String = toString
}
private sealed trait Atomic extends Property
/**
* An atomic property
*/
private case class ActorIs(path: ActorPath, behavior: Behavior[_])
extends Atomic {
/**
*
* @example considers behaviors equals
* {{{
* import akka.actor.typed.scaladsl.Behaviors._
* def behavior = setup[Unit] { _ => same }
*
* assert(behavior != behavior)
* assert(ActorIs(root, behavior) == ActorIs(root, behavior))
* }}}
*/
override def equals(o: Any): Boolean = {
o match {
case other: ActorIs =>
this.path == other.path && BehaviorsEquals.areEquivalent(
this.behavior,
other.behavior
)
case _ => false
}
}
override def hashCode: Int =
path.hashCode() ^ behavior.getClass.hashCode()
}
private case object ProgressIsPossible extends Atomic
private case class AlwaysEventually(property: Property) extends Property
private case class AlwaysGlobally(property: Property) extends Property
private case class AlwaysUntil(during: Property, until: Property)
extends Property
private case class ExistsEventually(property: Property) extends Property
private case class ExistsGlobally(property: Property) extends Property
private case class ExistsUntil(during: Property, until: Property)
extends Property
/** @group Bool */
private case object True extends Property
/** @group Bool */
private case class Not(property: Property) extends Property {
override def show: String = "!" ++ property.show
}
/** @group Bool */
private case class And(property1: Property, property2: Property)
extends Property
/** @group Bool */
private case class Or(property1: Property, property2: Property) extends Property
private case class Show(property: Property) extends Property {
override def show: String = property.show
}
trait PropertySyntax {
implicit class PathSyntax(path: ActorPath) {
/**
* An atomic property of a single actor.
* Use Behavior.stopped to test if an actor is stopped or not jet created
*/
def is(behavior: Behavior[_]): Property = ActorIs(path, behavior)
}
def root: ActorPath = berlin.jentsch.modelchecker.akka.root
def existsUntil(during: Property, until: Property): Property =
ExistsUntil(during, until)
def potentially(property: Property): Property =
ExistsEventually(property)
def invariantly(property: Property): Property =
AlwaysGlobally(property)
def alwaysEventually(property: Property): Property =
AlwaysEventually(property)
def progressIsPossible: Property =
ProgressIsPossible
implicit def boolToProperty(boolean: Boolean): Property =
if (boolean) True else Not(True)
/** Prints the states which fulfill the property during the evaluation */
def show(property: Property): Property =
Show(property)
}
|
Jentsch/modelchecker
|
akka/src/main/scala/berlin/jentsch/modelchecker/akka/Property.scala
|
Scala
|
mit
| 3,349 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.batch.table
import org.apache.flink.api.scala._
import org.apache.flink.table.api._
import org.apache.flink.table.planner.plan.optimize.program.FlinkBatchProgram
import org.apache.flink.table.planner.utils.{MockPythonTableFunction, TableFunc0, TableFunc1, TableTestBase}
import org.apache.calcite.rel.rules.{CalcMergeRule, FilterCalcMergeRule, ProjectCalcMergeRule}
import org.apache.calcite.tools.RuleSets
import org.junit.Test
class CorrelateTest extends TableTestBase {
@Test
def testCrossJoin(): Unit = {
val util = batchTestUtil()
val table = util.addTableSource[(Int, Long, String)]("MyTable", 'a, 'b, 'c)
val func = new TableFunc1
util.addFunction("func1", func)
val result1 = table.joinLateral(func('c) as 's).select('c, 's)
util.verifyPlan(result1)
}
@Test
def testCrossJoin2(): Unit = {
val util = batchTestUtil()
val table = util.addTableSource[(Int, Long, String)]("MyTable", 'a, 'b, 'c)
val func = new TableFunc1
util.addFunction("func1", func)
val result2 = table.joinLateral(func('c, "$") as 's).select('c, 's)
util.verifyPlan(result2)
}
@Test
def testLeftOuterJoinWithoutJoinPredicates(): Unit = {
val util = batchTestUtil()
val table = util.addTableSource[(Int, Long, String)]("MyTable", 'a, 'b, 'c)
val func = new TableFunc1
util.addFunction("func1", func)
val result = table.leftOuterJoinLateral(func('c) as 's).select('c, 's).where('s > "")
util.verifyPlan(result)
}
@Test
def testLeftOuterJoinWithLiteralTrue(): Unit = {
val util = batchTestUtil()
val table = util.addTableSource[(Int, Long, String)]("MyTable", 'a, 'b, 'c)
val func = new TableFunc1
util.addFunction("func1", func)
val result = table.leftOuterJoinLateral(func('c) as 's, true).select('c, 's)
util.verifyPlan(result)
}
@Test
def testCorrelateWithMultiFilter(): Unit = {
val util = batchTestUtil()
val sourceTable = util.addTableSource[(Int, Long, String)]("MyTable", 'a, 'b, 'c)
val func = new TableFunc0
util.addFunction("func1", func)
val result = sourceTable.select('a, 'b, 'c)
.joinLateral(func('c) as('d, 'e))
.select('c, 'd, 'e)
.where('e > 10)
.where('e > 20)
.select('c, 'd)
util.verifyPlan(result)
}
@Test
def testCorrelateWithMultiFilterAndWithoutCalcMergeRules(): Unit = {
val util = batchTestUtil()
val programs = util.getBatchProgram()
programs.getFlinkRuleSetProgram(FlinkBatchProgram.LOGICAL)
.get.remove(
RuleSets.ofList(
CalcMergeRule.INSTANCE,
FilterCalcMergeRule.INSTANCE,
ProjectCalcMergeRule.INSTANCE))
// removing
util.replaceBatchProgram(programs)
val sourceTable = util.addTableSource[(Int, Long, String)]("MyTable", 'a, 'b, 'c)
val func = new TableFunc0
util.addFunction("func1", func)
val result = sourceTable.select('a, 'b, 'c)
.joinLateral(func('c) as('d, 'e))
.select('c, 'd, 'e)
.where('e > 10)
.where('e > 20)
.select('c, 'd)
util.verifyPlan(result)
}
@Test
def testCorrelatePythonTableFunction(): Unit = {
val util = batchTestUtil()
val sourceTable = util.addTableSource[(Int, Int, String)]("MyTable", 'a, 'b, 'c)
val func = new MockPythonTableFunction
val result = sourceTable.joinLateral(func('a, 'b) as('x, 'y))
util.verifyPlan(result)
}
}
|
tzulitai/flink
|
flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/batch/table/CorrelateTest.scala
|
Scala
|
apache-2.0
| 4,252 |
package pl.writeonly.son2.path.notations
import com.fasterxml.jackson.databind.{ObjectMapper, ObjectReader}
import com.jayway.jsonpath.spi.json.{
JacksonJsonNodeJsonProvider,
JacksonJsonProvider
}
import com.jayway.jsonpath.spi.mapper.JacksonMappingProvider
import pl.writeonly.son2.apis.config.{Meta, RConfig, WConfig}
import pl.writeonly.son2.apis.core.Formats
import pl.writeonly.son2.jack.core.{Jack, JackImpl, JackObject}
import pl.writeonly.son2.jack.notation.NotationWriterJack
import pl.writeonly.son2.path.core.ProvidersPath
import pl.writeonly.son2.path.notation.{CreatorNotation, NotationReaderPath}
trait CreatorNotationJacksonLike extends CreatorNotation {
override def reader(c: RConfig) =
new NotationReaderPath(meta, defaultsPath(c))
override def writer(c: WConfig) = new NotationWriterJack(jack, c)
def jack: Jack
def mapper: ObjectMapper = jack.mapper
def meta: Meta = jack.meta
}
object CreatorNotationJackson extends CreatorNotationJacksonLike {
val jack = JackObject()
override def jsonProvider = new JacksonJsonNodeJsonProvider(mapper)
override def mappingProvider = new JacksonMappingProvider(mapper)
}
object CreatorNotationJacksonTyped extends CreatorNotationJacksonLike {
// val jack = JackObject().copy(meta = Meta(ProvidersPath.JACKSON_TYPED, Formats.OBJECT))
val jack = JackImpl(
Meta(ProvidersPath.JACKSON_TYPED, Formats.OBJECT),
new ObjectMapper,
"",
""
)
val reader: ObjectReader = mapper.reader.withType(classOf[Any])
override def jsonProvider = new JacksonJsonProvider(mapper, reader)
override def mappingProvider = new JacksonMappingProvider(mapper)
}
|
writeonly/son2
|
scallions-impl/scallions-path/src/main/scala/pl/writeonly/son2/path/notations/NotationCaseJackson.scala
|
Scala
|
apache-2.0
| 1,657 |
/*
* Copyright (c) 2013 Habla Computing
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.hablapps.dofollow.portal
import org.hablapps.updatable._
import org.hablapps.speech
import org.hablapps.dofollow._
import org.hablapps.dofollow.portal._
import org.hablapps.dofollow.portal.administration._
import org.hablapps.dofollow.portal.department._
import org.hablapps.dofollow.portal.project._
import org.hablapps.dofollow.portal.project.task._
trait Rules{ this: speech.Program
with State
with discussion.State
with meeting.State
with projectModel.State
with department.State
with administration.State
with project.State
with task.State
with Actions =>
/** Deploy Portal: initiate Administration, played Admin and charge tasks and projects models.
*
* @initiate Administration Administration is initiated when the portal is created.
* @play Admin Admin is played when the portal is created.
*
*/
when {
case New(portal1: $[Portal]@unchecked, _: Portal) if portal1.isA[Portal]=>
Sequence(
for {
admin1 <- Initiate2(Administration(), portal1)
_ <- Play2(Admin(_name = Some(adminCod), _email = adminEmail, _forename = forename,
_surname = surname), admin1)
_ <- LoadDepartments(portal1)
_ <- LoadProjectModels(admin1)
} yield()
)
}
/*
*
* When a new proyect is created it's necessary to initialize the attributes.
* Later, if the adminCode doesn't exist, a new admin will be created.
* Finally, tasks will be initiated.
*
*/
when {
case Performed(setUpProject1: SetUpProject) => implicit state =>
Sequence(
Let(setUpProject1._new_entity.get.name += setUpProject1.codProject),
Let(setUpProject1._new_entity.get.projectAdmin := setUpProject1.numProjectAdmin),
implicit state => if (!setUpProject1.operator.isDefined)
PlayProjectAdmin(setUpProject1.numProjectAdmin, setUpProject1.getAdminDepartment)
else
ActionId()
,
InitiateTasks(setUpProject1)
)
}
/** Updates the end date when a project is closed */
when {
case _Set(project: $[Project], Project._status, CLOSED, true) if project.isA[Project] => implicit state =>
LetWholeExtension(project, "endDate", now)
}
/* Rules for persistent entities */
when {
case New(i: $[Interaction]@unchecked, _: Interaction) =>
LetWholeExtension(i, "persistent", true)
}
when {
case New(a: $[Agent]@unchecked, _: Agent) =>
LetWholeExtension(a, "persistent", true)
}
when {
case New(r: $[Resource]@unchecked, _: Resource) =>
LetWholeExtension(r, "persistent", true)
}
when {
case New(r: $[Reply]@unchecked, _: Reply) =>
LetWholeExtension(r, "persistent", true)
}
when {
case New(c: $[Comment]@unchecked, _: Comment) =>
LetWholeExtension(c, "persistent", true)
}
when {
case New(p: $[ChangeDateMeeting]@unchecked, _: ChangeDateMeeting) =>
LetWholeExtension(p, "persistent", true)
}
when {
case New(p: $[ScheduleTask]@unchecked, _: ScheduleTask) =>
LetWholeExtension(p, "persistent", true)
}
}
|
hablapps/app-dofollow
|
src/main/scala/org/hablapps/dofollow/portal/Rules.scala
|
Scala
|
apache-2.0
| 3,831 |
package im.actor.server
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.time.{ Seconds, Span }
import org.scalatest.{ Inside, FlatSpecLike, Matchers }
import slick.driver.PostgresDriver
import scala.concurrent.ExecutionContext
import im.actor.server.db.DbExtension
abstract class BaseAppSuite(_system: ActorSystem = {
ActorSpecification.createSystem()
})
extends ActorSuite(_system)
with FlatSpecLike
with ScalaFutures
with MessagingSpecHelpers
with Matchers
with Inside
with ServiceSpecMatchers
with ServiceSpecHelpers
with ActorSerializerPrepare {
protected implicit val materializer: ActorMaterializer = ActorMaterializer()
implicit lazy val ec: ExecutionContext = _system.dispatcher
protected implicit lazy val db: PostgresDriver.api.Database = {
DbExtension(_system).clean()
DbExtension(_system).migrate()
DbExtension(_system).db
}
override implicit def patienceConfig: PatienceConfig =
new PatienceConfig(timeout = Span(15, Seconds))
override protected def beforeAll(): Unit = {
super.beforeAll()
db
}
}
|
ljshj/actor-platform
|
actor-server/actor-testkit/src/main/scala/im/actor/server/BaseAppSuite.scala
|
Scala
|
mit
| 1,217 |
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.app.bib.parser
import java.io.File
import cc.factorie.app.bib.parser.Dom.Name
import org.junit.Test
import org.scalatest.junit.JUnitSuite
import scala.collection.mutable.ArrayBuffer
class TestBibtexParser extends JUnitSuite with cc.factorie.util.FastLogging {
def testMichaelsStuff(): Unit = {
val path = """C:\\Users\\Luke\\Downloads\\failed\\failed\\failed"""
val fileTexts = new File(path).listFiles().toList
.filter(_.isFile)
.map(f => (f.getName, scala.io.Source.fromFile(f.getPath, "ISO-8859-1").toArray.mkString))
val results = fileTexts map {
case (name, fileText) =>
Dom.stringToDom(fileText).fold(err =>
Left("""
Error on file: "%s"
Error text: "%s" """ format (name, err)),
_ =>
Right("""
Success on file: "%s" """ format name))
}
val (failures, successes) = (new ArrayBuffer[String], new ArrayBuffer[String])
results.foreach(_.fold(failures.+=, successes.+=))
val failuresCauseNotBibtex = failures.filter(_.contains("`@' expected"))
val failuresCauseMismatchedQuote = failures.filter(_.contains("`\\"' expected but \\u001A found"))
val failuresCauseBadNames = failures.filter(_.contains("fragment between commas"))
failuresCauseNotBibtex.foreach(failures.-=)
failuresCauseMismatchedQuote.foreach(failures.-=)
failuresCauseBadNames.foreach(failures.-=)
successes.foreach(logger.debug(_))
failures.foreach(logger.debug(_))
logger.debug("Failures cause bad names:")
failuresCauseBadNames.foreach(logger.debug(_))
if (!failures.isEmpty)
sys.error(
"Failed! Successes: %d Failures %d FailuresCauseNotBibtex: %d FailuresCauseMismatchedQuote: %d FailuresCauseBadNames: %d" format
(successes.length, failures.length, failuresCauseNotBibtex.length,
failuresCauseMismatchedQuote.length, failuresCauseBadNames.length))
}
@Test def allTests(): Unit = {
def assertParse[T](parser: DocumentParser.Impl.Parser[T], str: String): DocumentParser.Impl.ParseResult[T] = {
val result = DocumentParser.Impl.parseAll(parser, str)
assert(result.successful, result.toString + " " + result.getClass.getName)
result
}
def assertParseAndDocify(parser: DocumentParser.Impl.Parser[List[AST.Entry]], str: String, print: Boolean = false): Unit = {
val parseResult = assertParse(parser, str)
assert(parseResult.successful, parseResult)
val res = Dom.astToDom(AST.Document(parseResult.get))
if (print) logger.debug(res)
}
assertParse(DocumentParser.Impl.braceDelimitedNoOuterLiteral, "{Something Great}")
assertParse(DocumentParser.Impl.literal, "{Something Great}")
assertParse(DocumentParser.Impl.literalOrSymbol, "{Something Great}")
assertParse(DocumentParser.Impl.value, "{Something Great}")
assertParse(DocumentParser.Impl.quoteDelimitedLiteral, "\\"Something Great\\"")
assertParse(DocumentParser.Impl.literal, "\\"Something Great\\"")
assertParse(DocumentParser.Impl.literalOrSymbol, "\\"Something Great\\"")
assertParse(DocumentParser.Impl.value, "\\"Something Great\\"")
assertParse(DocumentParser.Impl.numericLiteral, "123")
assertParse(DocumentParser.Impl.literal, "123")
assertParse(DocumentParser.Impl.literalOrSymbol, "123")
assertParse(DocumentParser.Impl.value, "123")
assertParse(DocumentParser.Impl.SYMBOL, "asda5")
assertParse(DocumentParser.Impl.literalOrSymbol, "asda5")
assertParse(DocumentParser.Impl.value, "asda5")
assertParse(DocumentParser.Impl.tag, "asda5 = { 132 as qwe asd }")
assertParse(DocumentParser.Impl.value, "asda5 # asda5")
assertParse(DocumentParser.Impl.commentEntry, "comment{wooooo!}")
assertParse(DocumentParser.Impl.preambleEntry, "preamble{wooooo}")
assertParse(DocumentParser.Impl.stringEntry, "string{wooooo = 1231}")
assertParse(DocumentParser.Impl.anyEntry, "@string{wooooo = 1231}")
assertParse(DocumentParser.Impl.anyEntry, "@string{ wooooo = {asd} }")
assertParse(DocumentParser.Impl.anyEntry, "@string{ wooooo = {asd} }")
assertParse(DocumentParser.Impl.anyEntry, "@preamble{ wooooo}")
assertParse(DocumentParser.Impl.anyEntry, "@comment{ wooooo }")
assertParse(DocumentParser.Impl.anyEntry, "@florb{ wooooo }")
assertParse(DocumentParser.Impl.anyEntry, "@florb{ wooooo, x = {y}, fg = sdf13, z = 123 }")
assertParse(DocumentParser.Impl.anyEntry, "@florb{ wooooo, x = {y}, fg = sdf13, z = 123, }")
assertParse(DocumentParser.Impl.anyEntry, "@florb{ wooooo, x = {y}, fg =\\"sdf13\\", z = 123, }")
assertParse(DocumentParser.Impl.anyEntry,
"""@florb{ wooooo,
x = {y},
fg ="sdf13",
z = 123 # asd,
}""")
assertParse(DocumentParser.Impl.freeComment, "i am the king of the owrld!!")
assertParse(DocumentParser.Impl.freeComment, """i am the king of the
owrld!!""")
assertParse(DocumentParser.Impl.WS ~> DocumentParser.Impl.anyEntry,
""" @florb{ wooooo,
x = {y},
fg ="sdf13",
z = 123 # asd,
}""")
assertParse((DocumentParser.Impl.WS ~> DocumentParser.Impl.anyEntry).+,
""" @florb{ wooooo,
x = {y},
fg ="sdf13",
z = 123 # asd,
}""")
assertParse(DocumentParser.Impl.bibTex,
""" @florb{ wooooo,
x = {y},
fg ="sdf13",
z = 123 # asd,
}""")
assertParse(DocumentParser.Impl.bibTex,
""" @florb{ wooooo,
x = {y},
fg ="sdf13",
z = 123 # asd,
}
"""
)
assertParse(DocumentParser.Impl.bibTex,
"""
Hi, everybody!
@florb{ wooooo,
x = {y},
fg ="sdf13",
z = 123 # asd,
}
@florb{ wooooo,
x = {y},
fg ="sdf13",
z = 123 # asd,
}
@florb{ wooooo,
x = {y},
fg ="sdf13",
z = 123 # asd,
}
free comments are coool
@florb{ wooooo,
x = {y},
fg ="sdf13",
z = 123 # asd,
}
""")
assertParse(DocumentParser.Impl.bibTex,
"""
@article {mrx05,
auTHor = "Mr. X",
Title = {Something Great},
publisher = "nob" # "ody",
YEAR = 2005
}
""")
assertParse(
DocumentParser.Impl.braceDelimitedNoOuterLiteral,
"{Interannual Variability of planet-encircling dust activity on {M}ars}")
// this sample is from: http://amath.colorado.edu/documentation/LaTeX/reference/faq/bibstyles.html
val coloradoSample = assertParse(DocumentParser.Impl.bibTex,
"""
@string{jgr = "J.~Geophys.~Res."}
@MISC{primes,
author = "Charles Louis Xavier Joseph de la Vall{\\'e}e Poussin",
note = "A strong form of the prime number theorem, 19th century",
year = 1879
}
@INBOOK{chicago,
title = "The Chicago Manual of Style",
publisher = "University of Chicago Press",
edition = "Thirteenth",
year = 1982,
pages = "400--401",
key = "Chicago"
}
@BOOK{texbook,
author = "Donald E. Knuth",
title= "The {{\\TeX}book}",
publisher = "Addison-Wesley",
year = 1984
}
@BOOK{latexbook,
author = "Leslie Lamport",
title = "{\\LaTeX \\rm:} {A} Document Preparation System",
publisher = "Addison-Wesley",
year = 1986
}
@UNPUBLISHED{btxdoc,
author = "Oren Patashnik",
title = "{Using BibTeX}",
note = "Documentation for general BibTeX users",
month = jan,
year = 1988
}
@UNPUBLISHED{btxhak,
author = "Oren Patashnik",
title = "Designing BibTeX Styles",
note = "The part of BibTeX's documentation
that's not meant for general users",
month = jan,
year = 1988
}
@BOOK{strunk,
author = "Strunk, Jr., William and E. B. White",
title = "The Elements of Style",
publisher = "Macmillan",
edition = "Third",
year = 1979
}
@book{vanleunen,
title = "A Handbook for Scholars",
author = "Mary-Claire van Leunen",
publisher = "Knopf",
year = "1979"
}
@ARTICLE{Zurek:1993,
AUTHOR = {Zurek, R. W. and Martin, L. J.},
TITLE = {Interannual Variability of planet-encircling dust activity on {M}ars},
YEAR = {1993},
JOURNAL = jgr,
VOLUME = {98},
NUMBER = {E2},
PAGES = {3247--3259}
}
@Article{Narendra_1990,
author = {K.S.Narendra and K.S.Parthsarathy},
title = {Identification and Control of Dynamical System
using Neural Networks},
journal = "IEENN",
year = {1990},
volume = {1},
number = {1},
month = {},
pages = {4-27},
note = {},
annote = {}
}
""")
assert(coloradoSample.successful, coloradoSample)
Dom.astToDom(AST.Document(coloradoSample.get))
assertParseAndDocify(DocumentParser.Impl.bibTex, """
@InProceedings{dredze-EtAl:2007:EMNLP-CoNLL2007,
author = {Dredze, Mark and Blitzer, John and Pratim Talukdar, Partha and Ganchev, Kuzman and Graca, Jo\\~ao and Pereira, Fernando},
title = {Frustratingly Hard Domain Adaptation for Dependency Parsing},
booktitle = {Proceedings of the CoNLL Shared Task Session of EMNLP-CoNLL 2007}
pages = {1051--1055},
url = {http://www.aclweb.org/anthology/D/D07/D07-1112}
}
""")
assertParseAndDocify(DocumentParser.Impl.bibTex, """
@InProceedings{BanikACL09-shortpaper,
author = {Eva Banik},
title = {Extending a Surface Realizer to Generate Coherent Discourse},
booktitle = {Proceedings of the Short Papers of the Joint conference of the Association for Computational Linguistics and the Asian Federation of Natural Language Processing (ACL-IJCNLP-09), Singapore},
year = 2009
}
@inproceedings{webdb03-smwea,
title={{ODISSEA: A Peer-to-Peer Architecture for Scalable Web Search and Information Retrieval}},
author={T. Suel and C. Mathur and J. Wu and J. Zhang and A. Delis
and M. Kharrazi and X. Long and K. Shanmugasunderam},
booktitle={{6th International Workshop on the Web and Databases (WebDB)}},
month={June},
year={2003},
address={San Diego, CA}
}
@inproceedings{1333582,
author = {Donglai Zhang and Paul Coddington and Andrew Wendelborn},
title = {Binary Data Transfer Performance over High-Latency Networks Using Web Service Attachments},
booktitle = {E-SCIENCE '07: Proceedings of the Third IEEE International Conference on e-Science and Grid Computing},
year = {2007},
isbn = {0-7695-3064-8},
pages = {261--269},
doi = {http://dx.doi.org/10.1109/E-SCIENCE.2007.16},
publisher = {IEEE Computer Society}
}
""")
assertParseAndDocify(DocumentParser.Impl.bibTex, """
@inproceedings{nahm:icml-wkshp02,
author = {Un Yong Nahm and Mikhail Bilenko and Raymond J.
Mooney},
title = {Two Approaches to Handling Noisy Variation in Text
Mining},
booktitle = {Proceedings of the ICML-2002 Workshop on
Text Learning},
pages = {18--27},
year = 2002,
}
}
""")
assertParseAndDocify(DocumentParser.Impl.bibTex, """
@article{1814808,
author = {Kauppinen, Tomi and Mantegari, Glauco and Paakkarinen, Panu and Kuittinen, Heini and Hyv\\"{o}nen, Eero and Bandini, Stefania},
title = {Determining relevance of imprecise temporal intervals for cultural heritage information retrieval},
journal = {Int. J. Hum.-Comput. Stud.},
volume = {68},
number = {9},
year = {2010},
issn = {1071-5819},
pages = {549--560},
doi = {http://dx.doi.org/10.1016/j.ijhcs.2010.03.002},
publisher = {Academic Press, Inc.},
address = {Duluth, MN, USA},
}""")
assertParseAndDocify(DocumentParser.Impl.bibTex, """
@inproceedings{sen07:coordinating,
Author = {Sen, Rohan and Hackmann, Gregory and Haitjema, Mart and Roman, Gruia-Catalin and Gill, Christopher},
Booktitle = {Lecture Notes in Computer Science},
Pages = {249--267},
Title = {Coordinating Workflow Allocation and Execution in Mobile Environments},
Url = {http://dx.doi.org/10.1007/978-3-540-72794-1_14},
Volume = {4467}
Year = {2007}
}""")
assertParseAndDocify(DocumentParser.Impl.bibTex, """
@COMMENT This file was generated by bib2html.pl <https://sourceforge.net/projects/bib2html/> version 0.94
@COMMENT written by Patrick Riley <http://sourceforge.net/users/patstg/>
@COMMENT This file came from Evgeniy Gabrilovich's publication pages at
@COMMENT http://www.gabrilovich.com/pubs.html
@Proceedings{Bunescu:2008:WikiAI,
title = "Proceedings of the AAAI Workshop on Wikipedia and Artificial Intelligence: An Evolving Synergy (WikiAI)",
year = 2008,
editor = "Razvan Bunescu and Evgeniy Gabrilovich and Rada Mihalcea",
month = "July",
organization = "Association for the Advancement of Artificial Intelligence",
publisher = "{AAAI} Press",
note = "AAAI Technical Report WS-08-15"
}
""", print = true)
assertParseAndDocify(DocumentParser.Impl.bibTex, """
@article{acmalg295,
author="H. Sp\\"{a}th",
title="Exponential Curve Fit",
volume=10,
number=2,
pages="87",
year=1967,
month="February",
journal=cacm
}
""", print = true)
expectResult(NameParser.stringToNames("Graca, Jo\\\\~ao"))(List(Name("Jo\\\\~ao", "", "Graca", "")))
expectResult(NameParser.stringToNames("Ludwig von Beethoven"))(List(Name("Ludwig", "von", "Beethoven", "")))
expectResult(NameParser.stringToNames("von Beethoven, Ludwig"))(List(Name("Ludwig", "von", "Beethoven", "")))
expectResult(NameParser.stringToNames("Jones, Jr., John-Paul"))(List(Name("John Paul", "", "Jones", "Jr.")))
expectResult(NameParser.stringToNames("John Paul Jones"))(List(Name("John Paul", "", "Jones", "")))
expectResult(NameParser.stringToNames("John Paul Jones and Jones, John Paul"))(
List(Name("John Paul", "", "Jones", ""), Name("John Paul", "", "Jones", "")))
expectResult(NameParser.stringToNames("John Paul Jones and Ludwig von Beethoven"))(
List(Name("John Paul", "", "Jones", ""), Name("Ludwig", "von", "Beethoven", "")))
expectResult(NameParser.stringToNames("Charles Louis Xavier Joseph de la Vallee Poussin"))(
List(Name("Charles Louis Xavier Joseph", "de la", "Vallee Poussin", "")))
expectResult(NameParser.stringToNames("{Barnes} {and} {Noble} {Inc.}"))(List(Name("Barnes", "and", "Noble Inc.", "")))
expectResult(NameParser.stringToNames("Ralph Alpher and Bethe, Hans and George Gamow"))(
List(Name("Ralph", "", "Alpher", ""), Name("Hans", "", "Bethe", ""), Name("George", "", "Gamow", "")))
expectResult(NameParser.stringToNames("K.S.Narendra"))(List(Name("K. S.", "", "Narendra", "")))
expectResult(NameParser.stringToNames("{\\\\e'}cole"))(List(Name("", "", "{\\\\e'}cole", "")))
expectResult(NameParser.stringToNames("John-Paul Jones and Bill Thompson"))(
List(Name("John Paul", "", "Jones", ""), Name("Bill", "", "Thompson", "")))
expectResult(NameParser.stringToNames("{\\\\e'}col{\\\\e'}"))(List(Name("", "", "{\\\\e'}col{\\\\e'}", "")))
expectResult(NameParser.stringToNames("{hey ho lotsa stu\\\\}ff}"))(List(Name("", "", "hey ho lotsa stu\\\\}ff", "")))
expectResult(NameParser.stringToNames("{Jean} {de la Fontaine du} {Bois Joli}"))(List(Name("Jean", "de la Fontaine du", "Bois Joli", "")))
expectResult(NameParser.stringToNames("Jean de la Fontaine du Bois Joli"))(List(Name("Jean", "de la Fontaine du", "Bois Joli", "")))
val clx1 = NameParser.stringToNames("Charles Louis Xavier Joseph de la Vall{\\\\'e}e Poussin").head
expectResult(clx1)(Name("Charles Louis Xavier Joseph", "de la", "Vall{\\\\'e}e Poussin", ""))
val clx2 = Dom.stringToDom("@thing{asdf, author = \\"Charles Louis Xavier Joseph de la Vall{\\\\'e}e Poussin\\"}")
.right.get.entries.head._2.authors.get.head
expectResult(clx2)(Name("Charles Louis Xavier Joseph", "de la", "Vall{\\\\'e}e Poussin", ""))
val clx3 = Dom.stringToDom("@thing{asdf, author = {Charles Louis Xavier Joseph de la Vall{\\\\'e}e Poussin}}")
.right.get.entries.head._2.authors.get.head
expectResult(clx3)(Name("Charles Louis Xavier Joseph", "de la", "Vall{\\\\'e}e Poussin", ""))
assert(clx1 == clx2 && clx2 == clx3, (clx1, clx2, clx3))
val ksn1 = NameParser.stringToNames("K.S.Narendra").head
expectResult(ksn1)(Name("K. S.", "", "Narendra", ""))
val ksn2 = Dom.stringToDom("@thing{asdf, author = \\"K.S.Narendra\\"}")
.right.get.entries.head._2.authors.get.head
expectResult(ksn2)(Name("K. S.", "", "Narendra", ""))
val ksn3 = Dom.stringToDom("@thing{asdf, author = {K.S.Narendra}}")
.right.get.entries.head._2.authors.get.head
expectResult(ksn3)(Name("K. S.", "", "Narendra", ""))
val ksn4 = Dom.stringToDom("@thing{asdf, author = {K.S.Narendra and Hugh Jass}}")
.right.get.entries.head._2.authors.get.head
expectResult(ksn4)(Name("K. S.", "", "Narendra", ""))
assert(ksn1 == ksn2 && ksn2 == ksn3 && ksn3 == ksn4, (ksn1, ksn2, ksn3, ksn4))
if (false) {
// didn't check in files for testing since they're pretty big - if interested, go to BibNet or I can provide
val fileText = scala.io.Source.fromFile("inputs/case-based-reasoning.bib.txt").mkString
val res = Dom.stringToDom(fileText, false)
//println(res)
def timed[T](showTime: Long => String)(body: => T) = {
val start = System.currentTimeMillis
val result = body
val time = showTime(System.currentTimeMillis - start)
logger.debug(time)
(result, time)
}
val filePath2 = "inputs/domain-decomp.bib.txt"
val file2 = scala.io.Source.fromFile(filePath2).toArray
val fileText2 = file2.mkString
val numLines = file2.length
val numMb = new java.io.File(filePath2).length / 1024.0 / 1024.0
val (result, time) =
timed(t =>
"domain-decomp.bib (%f MB, %d lines) parsed and dom-ified in %d ms (%f MB/sec, %f lines/sec)" format
(numMb, numLines, t, (1000.0 * numMb) / t, (1000.0 * numLines) / t)) {
Dom.stringToDom(fileText2, false)
}
// println(result)
logger.debug(time)
val sizeMult = 10
val bigtext = List.range(0, sizeMult).map(_ => fileText2).mkString
val (bigresult, bigtime) =
timed(t =>
"%d times domain-decomp.bib (%f MB, %d lines) parsed and dom-ified in %d ms (%f MB/sec, %f lines/sec)" format
(sizeMult, numMb * sizeMult, numLines * sizeMult, t, (1000.0 * numMb * sizeMult) / t, (1000.0 * numLines * sizeMult) / t)) {
Dom.stringToDom(bigtext, false)
}
}
}
}
|
patverga/factorie
|
src/test/scala/cc/factorie/app/bib/parser/TestBibtexParser.scala
|
Scala
|
apache-2.0
| 20,131 |
package edu.cmu.lti.nlp.amr
import scala.util.matching.Regex
import scala.collection.mutable.{Map, Set, ArrayBuffer}
/****************************** Driver Program *****************************/
object TestAMRDisplayCode {
val usage = """Usage: scala -classpath . edu.cmu.lti.nlp.amr.TestAMRDisplayCode < amr_file > outfile"""
type OptionMap = Map[Symbol, Any]
def parseOptions(map : OptionMap, list: List[String]) : OptionMap = {
def isSwitch(s : String) = (s(0) == '-')
list match {
case Nil => map
case "-h" :: value :: tail =>
parseOptions(map ++ Map('help -> value.toInt), tail)
case "-v" :: value :: tail =>
parseOptions(map ++ Map('verbosity -> value.toInt), tail)
case option :: tail => println("Error: Unknown option "+option)
sys.exit(1)
}
}
def main(args: Array[String]) {
val options = parseOptions(Map(),args.toList)
if (options.contains('help)) { println(usage); sys.exit(1) }
if (options.contains('verbosity)) {
verbosity = options('verbosity).asInstanceOf[Int]
}
val Block = """((?:\n|.)*)\n(\((?:\n|.)*)""".r // (?: ) is non-capturing group
// and . does not match \n
for { block <- Corpus.getAmrBlocks(Source.stdin.getLines) } {
logger(0,"**** Processsing Block *****")
logger(0,block)
logger(0,"****************************")
val Block(extrastr, amrstr) = block
val graph = Graph.parse(amrstr)
logger(1,graph.prettyString(detail = 2, pretty = true))
graph.normalizeInverseRelations
graph.makeTopologicalOrdering
println(graph.prettyString(detail = 1, pretty = true))
println()
}
}
}
|
hopshackle/wordAlignment
|
src/edu/cmu/lti/nlp/amr/TestAMRDisplayCode.scala
|
Scala
|
bsd-2-clause
| 1,983 |
/*******************************************************************************
* Copyright 2013 Kevin Hester
*
* See LICENSE.txt for license details.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
******************************************************************************/
package com.geeksville.util
import com.google.common.cache.CacheBuilder
import com.google.common.cache.CacheLoader
import com.google.common.cache.LoadingCache
import com.google.common.cache.Cache
object CacheUtil {
implicit def functionToCacheLoader[F, T](f: F => T) = {
new CacheLoader[F, T] {
def load(key: F) = f(key)
}
}
implicit def pimpCache[F, T](cache: Cache[F, T]) = {
new PimpedCache(cache)
}
class PimpedCache[F, T](cache: Cache[F, T]) {
def getOption(key: F) = {
val value = cache.getIfPresent(key)
if (value == null) None else Some(value)
}
}
}
|
dronekit/dronekit-server
|
src/main/scala/com/geeksville/util/CacheUtil.scala
|
Scala
|
gpl-3.0
| 1,174 |
package cromwell.api.model
import spray.json.DefaultJsonProtocol
object CromwellFailedResponseExceptionJsonSupport extends DefaultJsonProtocol {
implicit val CromwellFailedResponseExceptionFormat = jsonFormat2(CromwellFailedResponseException)
}
case class CromwellFailedResponseException(status: String, message: String) extends Exception(message)
|
ohsu-comp-bio/cromwell
|
cromwellApiClient/src/main/scala/cromwell/api/model/CromwellFailedResponseException.scala
|
Scala
|
bsd-3-clause
| 353 |
package org.jetbrains.plugins.scala.lang.parser.parsing.xml.pattern
import com.intellij.psi.xml.XmlTokenType
import org.jetbrains.plugins.scala.lang.lexer.ScalaXmlTokenTypes
import org.jetbrains.plugins.scala.lang.parser.{ErrMsg, ScalaElementTypes}
import org.jetbrains.plugins.scala.lang.parser.parsing.builder.ScalaPsiBuilder
/**
* @author Alexander Podkhalyuzin
* Date: 18.04.2008
*/
/*
* ETagP ::= </ Name [S] >
*/
object ETagP {
def parse(builder: ScalaPsiBuilder): Boolean = {
val tagMarker = builder.mark()
builder.getTokenType match {
case ScalaXmlTokenTypes.XML_END_TAG_START =>
builder.advanceLexer()
case _ =>
tagMarker.drop()
return false
}
builder.getTokenType match {
case ScalaXmlTokenTypes.XML_NAME =>
builder.advanceLexer()
case _ => builder error ErrMsg("xml.name.expected")
}
builder.getTokenType match {
case XmlTokenType.XML_WHITE_SPACE => builder.advanceLexer()
case _ =>
}
builder.getTokenType match {
case ScalaXmlTokenTypes.XML_TAG_END =>
builder.advanceLexer()
tagMarker.done(ScalaElementTypes.XML_END_TAG)
true
case _ =>
builder error ErrMsg("xml.tag.end.expected")
tagMarker.done(ScalaElementTypes.XML_END_TAG)
true
}
}
}
|
gtache/intellij-lsp
|
intellij-lsp-dotty/src/org/jetbrains/plugins/scala/lang/parser/parsing/xml/pattern/ETagP.scala
|
Scala
|
apache-2.0
| 1,323 |
/*
* Copyright 2013 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.storehaus.algebra
import com.twitter.util.Await
import com.twitter.storehaus.ReadableStore
import org.scalacheck.{Prop, Arbitrary, Properties}
import org.scalacheck.Prop._
object ReadableStoreAlgebraProperties extends Properties("ReadableStoreAlgebra") {
/**
* get returns none when not in either store
*/
def getLaws[K: Arbitrary, V1: Arbitrary, V2: Arbitrary](
fnA: Map[K, V1] => ReadableStore[K, V1], fnB: Map[K, V2] => ReadableStore[K, V2]): Prop =
forAll { (mA: Map[K, V1], mB: Map[K, V2], others: Set[K]) =>
val keysA = mA.keySet
val keysB = mB.keySet
val expanded: Set[K] = keysA ++ keysB ++ others
val storeA = fnA(mA)
val storeB = fnB(mB)
val combinedStore = ReadableStoreAlgebra.both(storeA, storeB)
expanded.forall {
k: K =>
val combinedRes = Await.result(combinedStore.get(k))
(mA.get(k), mB.get(k)) match {
case (Some(l), Some(r)) => combinedRes == Some(Left((l, r)))
case (Some(l), None) => combinedRes == Some(Right(Left(l)))
case (None, Some(r)) => combinedRes == Some(Right(Right(r)))
case (None, None) => combinedRes.isEmpty
}
}
}
property("Parallel store matches normal queries") =
getLaws[Int, String, Long](ReadableStore.fromMap, ReadableStore.fromMap)
}
|
twitter/storehaus
|
storehaus-algebra/src/test/scala/com/twitter/storehaus/algebra/ReadableStoreAlgebraProperties.scala
|
Scala
|
apache-2.0
| 1,951 |
package com.mesosphere.cosmos
import java.util.UUID
import cats.data.Xor
import com.mesosphere.cosmos.http.MediaTypes
import com.mesosphere.cosmos.label.v1.model.PackageMetadata
import com.mesosphere.cosmos.repository.DefaultRepositories
import com.mesosphere.cosmos.rpc.v1.circe.Decoders._
import com.mesosphere.cosmos.rpc.v1.circe.Encoders._
import com.mesosphere.cosmos.rpc.v1.model._
import com.mesosphere.cosmos.test.CosmosIntegrationTestClient
import com.mesosphere.cosmos.thirdparty.marathon.model.AppId
import com.mesosphere.universe.v2.model.{PackageDetailsVersion, PackagingVersion}
import org.scalatest.concurrent.Eventually
import org.scalatest.{AppendedClues, FreeSpec, Inside}
final class PackageListIntegrationSpec
extends FreeSpec with Inside with AppendedClues with Eventually {
import PackageListIntegrationSpec._
val apiClient = CosmosIntegrationTestClient.CosmosClient
// These tests may be better implemented as focused unit tests
// There's a bunch of setup and teardown infrastructure here which complicates the control flow
// Until then, if you need to understand the code, ask @cruhland (says @BenWhitehead)
"The package list endpoint" - {
"responds with repo and package data for packages whose repositories are in the repo list" in {
withInstalledPackage("helloworld") { installResponse =>
withInstalledPackageInListResponse(installResponse) { case Some(Installation(_, _)) =>
// Success
}
}
}
}
"Issue #251: Package list should include packages whose repositories have been removed" in {
val expectedPackageInformation = InstalledPackageInformation(
InstalledPackageInformationPackageDetails(
packagingVersion = PackagingVersion("2.0"),
name = "helloworld",
version = PackageDetailsVersion("0.1.0"),
website = Some("https://github.com/mesosphere/dcos-helloworld"),
maintainer = "[email protected]",
description = "Example DCOS application package",
preInstallNotes = Some("A sample pre-installation message"),
postInstallNotes = Some("A sample post-installation message"),
tags = List("mesosphere", "example", "subcommand"),
selected = Some(false),
framework = Some(false)
)
)
withInstalledPackage("helloworld") { installResponse =>
withDeletedRepository(helloWorldRepository) {
withInstalledPackageInListResponse(installResponse) { case Some(Installation(_, pkg)) =>
assertResult(expectedPackageInformation)(pkg)
// Success
}
}
}
}
private[this] def withInstalledPackage(packageName: String)(f: InstallResponse => Unit): Unit = {
val Xor.Right(installResponse) = apiClient.callEndpoint[InstallRequest, InstallResponse](
"package/install",
InstallRequest(packageName, appId = Some(AppId(UUID.randomUUID().toString))),
MediaTypes.InstallRequest,
MediaTypes.V1InstallResponse
) withClue "when installing package"
try {
assertResult(packageName)(installResponse.packageName)
f(installResponse)
} finally {
val actualUninstall = apiClient.callEndpoint[UninstallRequest, UninstallResponse](
"package/uninstall",
UninstallRequest(installResponse.packageName, appId = Some(installResponse.appId), all = None),
MediaTypes.UninstallRequest,
MediaTypes.UninstallResponse
) withClue "when uninstalling package"
inside (actualUninstall) {
case Xor.Right(UninstallResponse(List(UninstallResult(uninstalledPackageName, appId, Some(packageVersion), _)))) =>
assertResult(installResponse.appId)(appId)
assertResult(installResponse.packageName)(uninstalledPackageName)
assertResult(installResponse.packageVersion)(packageVersion)
}
}
}
private[this] def withDeletedRepository(repository: PackageRepository)(action: => Unit): Unit = {
val actualDelete = apiClient.callEndpoint[PackageRepositoryDeleteRequest, PackageRepositoryDeleteResponse](
"package/repository/delete",
PackageRepositoryDeleteRequest(name = Some(repository.name)),
MediaTypes.PackageRepositoryDeleteRequest,
MediaTypes.PackageRepositoryDeleteResponse
) withClue "when deleting repo"
try {
assertResult(Xor.Right(None)) {
actualDelete.map(_.repositories.find(_.name == repository.name))
}
action
} finally {
val actualAdd = apiClient.callEndpoint[PackageRepositoryAddRequest, PackageRepositoryAddResponse](
"package/repository/add",
PackageRepositoryAddRequest(repository.name, repository.uri),
MediaTypes.PackageRepositoryAddRequest,
MediaTypes.PackageRepositoryAddResponse
) withClue "when restoring deleted repo"
inside(actualAdd) { case Xor.Right(PackageRepositoryAddResponse(repositories)) =>
inside(repositories.find(_.name == repository.name)) { case Some(addedRepository) =>
assertResult(repository)(addedRepository)
}
}
}
}
private[this] def withInstalledPackageInListResponse(installResponse: InstallResponse)(
pf: PartialFunction[Option[Installation], Unit]
): Unit = {
val actualList = apiClient.callEndpoint[ListRequest, ListResponse](
"package/list",
ListRequest(),
MediaTypes.ListRequest,
MediaTypes.ListResponse
) withClue "when listing installed packages"
inside (actualList) { case Xor.Right(ListResponse(packages)) =>
inside (packages.find(_.appId == installResponse.appId)) { pf }
}
}
"Issue #124: Package list endpoint responds with packages sorted " in {
val (c, d, b, a) = ("linkerd", "zeppelin", "jenkins", "cassandra")
val installResponses = List(c, d, b, a).map(packageInstall)
val packages = packageList().packages
assert(packages.size === 4)
assert(packages(0).packageInformation.packageDefinition.name === a)
assert(packages(1).packageInformation.packageDefinition.name === b)
assert(packages(2).packageInformation.packageDefinition.name === c)
assert(packages(3).packageInformation.packageDefinition.name === d)
installResponses foreach packageUninstall
}
private[this] def packageList(): ListResponse = {
val Xor.Right(listResponse) = apiClient.callEndpoint[ListRequest, ListResponse](
"package/list",
ListRequest(),
MediaTypes.ListRequest,
MediaTypes.ListResponse
) withClue "when listing installed packages"
listResponse
}
private[this] def packageInstall(packageName: String): InstallResponse = {
val Xor.Right(installResponse: InstallResponse) = apiClient.callEndpoint[InstallRequest, InstallResponse](
"package/install",
InstallRequest(packageName, appId = Some(AppId(UUID.randomUUID().toString))),
MediaTypes.InstallRequest,
MediaTypes.V1InstallResponse
) withClue "when installing package"
assertResult(packageName)(installResponse.packageName)
installResponse
}
private[this] def packageUninstall(installResponse: InstallResponse): Unit = {
val Xor.Right(uninstallResponse: UninstallResponse) = apiClient.callEndpoint[UninstallRequest, UninstallResponse](
"package/uninstall",
UninstallRequest(installResponse.packageName, appId = Some(installResponse.appId), all = None),
MediaTypes.UninstallRequest,
MediaTypes.UninstallResponse
) withClue "when uninstalling package"
val UninstallResponse(List(UninstallResult(uninstalledPackageName, appId, Some(packageVersion), _))) =
uninstallResponse
assertResult(installResponse.appId)(appId)
assertResult(installResponse.packageName)(uninstalledPackageName)
assertResult(installResponse.packageVersion)(packageVersion)
}
}
object PackageListIntegrationSpec {
private val Some(helloWorldRepository) = DefaultRepositories().getOrThrow.find(_.name == "Hello World")
}
|
movicha/cosmos
|
cosmos-server/src/it/scala/com/mesosphere/cosmos/PackageListIntegrationSpec.scala
|
Scala
|
apache-2.0
| 7,944 |
package controllers
import dao._
import database.{CourseDb, CourseTable}
import models.{Course, CourseLike, CourseProtocol}
import org.joda.time.DateTime
import play.api.libs.json.{Reads, Writes}
import play.api.mvc.ControllerComponents
import security.LWMRole._
import security.SecurityActionChain
import java.util.UUID
import javax.inject.{Inject, Singleton}
import scala.util.{Failure, Try}
object CourseController {
lazy val labelAttribute = "label"
lazy val abbreviationAttribute = "abbreviation"
lazy val lecturerAttribute = "lecturer"
lazy val semesterIndexAttribute = "semesterIndex"
}
@Singleton
final class CourseController @Inject()(cc: ControllerComponents, val abstractDao: CourseDao, val authorityDao: AuthorityDao, val securedAction: SecurityActionChain)
extends AbstractCRUDController[CourseProtocol, CourseTable, CourseDb, CourseLike](cc) {
import CourseController._
import dao.CourseDao._
import utils.date.DateTimeOps.DateTimeConverter
import scala.concurrent.ExecutionContext.Implicits.global
override protected implicit val writes: Writes[CourseLike] = CourseLike.writes
override protected implicit val reads: Reads[CourseProtocol] = CourseProtocol.reads
override def create(secureContext: SecureContext = contextFrom(Create)) = secureContext asyncAction { implicit request =>
parsed(
None,
course => abstractDao.zip(
abstractDao.createQuery(course),
authorityDao.createAssociatedAuthorities(course)
).map(_._1)
).created
}
def updateFrom(id: String) = restrictedContext(id)(Update) asyncAction { implicit request =>
update(id, NonSecureBlock)(request)
}
override protected def toDbModel(protocol: CourseProtocol, existingId: Option[UUID]): CourseDb =
CourseDb(
protocol.label,
protocol.description,
protocol.abbreviation,
protocol.lecturer,
protocol.semesterIndex,
DateTime.now.timestamp,
None,
existingId.getOrElse(UUID.randomUUID)
)
override protected def makeTableFilter(attribute: String, value: String): Try[TableFilterPredicate] =
(attribute, value) match {
case (`labelAttribute`, l) => l.makeLabelEqualsFilter
case (`abbreviationAttribute`, a) => a.makeAbbrevFilter
case (`semesterIndexAttribute`, s) => s.int map semesterIndexFilter
case (`lecturerAttribute`, l) => l.makeUserFilter
case _ => Failure(new Throwable(s"Unknown attribute $attribute"))
}
private def toCourseDb(c: Course) =
CourseDb(c.label, c.description, c.abbreviation, c.lecturer, c.semesterIndex, id = c.id)
override protected def contextFrom: PartialFunction[Rule, SecureContext] = {
case Get => PartialSecureBlock(List(EmployeeRole, StudentRole))
case GetAll => PartialSecureBlock(List(EmployeeRole))
case Create => PartialSecureBlock(List(Admin))
case Delete => PartialSecureBlock(List(God))
}
override protected def restrictedContext(restrictionId: String): PartialFunction[Rule, SecureContext] = {
case Update => SecureBlock(restrictionId, List(CourseManager))
case _ => PartialSecureBlock(List(God))
}
}
|
THK-ADV/lwm-reloaded
|
app/controllers/CourseController.scala
|
Scala
|
mit
| 3,136 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.util.{Locale, Properties}
import scala.collection.JavaConverters._
import com.fasterxml.jackson.databind.ObjectMapper
import org.apache.spark.Partition
import org.apache.spark.annotation.Stable
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.csv.{CSVHeaderChecker, CSVOptions, UnivocityParser}
import org.apache.spark.sql.catalyst.expressions.ExprUtils
import org.apache.spark.sql.catalyst.json.{CreateJacksonParser, JacksonParser, JSONOptions}
import org.apache.spark.sql.catalyst.util.FailureSafeParser
import org.apache.spark.sql.execution.command.DDLUtils
import org.apache.spark.sql.execution.datasources.DataSource
import org.apache.spark.sql.execution.datasources.csv._
import org.apache.spark.sql.execution.datasources.jdbc._
import org.apache.spark.sql.execution.datasources.json.TextInputJsonDataSource
import org.apache.spark.sql.execution.datasources.v2.{DataSourceV2Relation, DataSourceV2Utils, FileDataSourceV2}
import org.apache.spark.sql.sources.v2._
import org.apache.spark.sql.sources.v2.TableCapability._
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.util.CaseInsensitiveStringMap
import org.apache.spark.unsafe.types.UTF8String
/**
* Interface used to load a [[Dataset]] from external storage systems (e.g. file systems,
* key-value stores, etc). Use `SparkSession.read` to access this.
*
* @since 1.4.0
*/
@Stable
class DataFrameReader private[sql](sparkSession: SparkSession) extends Logging {
/**
* Specifies the input data source format.
*
* @since 1.4.0
*/
def format(source: String): DataFrameReader = {
this.source = source
this
}
/**
* Specifies the input schema. Some data sources (e.g. JSON) can infer the input schema
* automatically from data. By specifying the schema here, the underlying data source can
* skip the schema inference step, and thus speed up data loading.
*
* @since 1.4.0
*/
def schema(schema: StructType): DataFrameReader = {
this.userSpecifiedSchema = Option(schema)
this
}
/**
* Specifies the schema by using the input DDL-formatted string. Some data sources (e.g. JSON) can
* infer the input schema automatically from data. By specifying the schema here, the underlying
* data source can skip the schema inference step, and thus speed up data loading.
*
* {{{
* spark.read.schema("a INT, b STRING, c DOUBLE").csv("test.csv")
* }}}
*
* @since 2.3.0
*/
def schema(schemaString: String): DataFrameReader = {
this.userSpecifiedSchema = Option(StructType.fromDDL(schemaString))
this
}
/**
* Adds an input option for the underlying data source.
*
* You can set the following option(s):
* <ul>
* <li>`timeZone` (default session local timezone): sets the string that indicates a timezone
* to be used to parse timestamps in the JSON/CSV datasources or partition values.</li>
* <li>`pathGlobFilter`: an optional glob pattern to only include files with paths matching
* the pattern. The syntax follows <code>org.apache.hadoop.fs.GlobFilter</code>.
* It does not change the behavior of partition discovery.</li>
* </ul>
*
* @since 1.4.0
*/
def option(key: String, value: String): DataFrameReader = {
this.extraOptions += (key -> value)
this
}
/**
* Adds an input option for the underlying data source.
*
* @since 2.0.0
*/
def option(key: String, value: Boolean): DataFrameReader = option(key, value.toString)
/**
* Adds an input option for the underlying data source.
*
* @since 2.0.0
*/
def option(key: String, value: Long): DataFrameReader = option(key, value.toString)
/**
* Adds an input option for the underlying data source.
*
* @since 2.0.0
*/
def option(key: String, value: Double): DataFrameReader = option(key, value.toString)
/**
* (Scala-specific) Adds input options for the underlying data source.
*
* You can set the following option(s):
* <ul>
* <li>`timeZone` (default session local timezone): sets the string that indicates a timezone
* to be used to parse timestamps in the JSON/CSV datasources or partition values.</li>
* <li>`pathGlobFilter`: an optional glob pattern to only include files with paths matching
* the pattern. The syntax follows <code>org.apache.hadoop.fs.GlobFilter</code>.
* It does not change the behavior of partition discovery.</li>
* </ul>
*
* @since 1.4.0
*/
def options(options: scala.collection.Map[String, String]): DataFrameReader = {
this.extraOptions ++= options
this
}
/**
* Adds input options for the underlying data source.
*
* You can set the following option(s):
* <ul>
* <li>`timeZone` (default session local timezone): sets the string that indicates a timezone
* to be used to parse timestamps in the JSON/CSV datasources or partition values.</li>
* <li>`pathGlobFilter`: an optional glob pattern to only include files with paths matching
* the pattern. The syntax follows <code>org.apache.hadoop.fs.GlobFilter</code>.
* It does not change the behavior of partition discovery.</li>
* </ul>
*
* @since 1.4.0
*/
def options(options: java.util.Map[String, String]): DataFrameReader = {
this.options(options.asScala)
this
}
/**
* Loads input in as a `DataFrame`, for data sources that don't require a path (e.g. external
* key-value stores).
*
* @since 1.4.0
*/
def load(): DataFrame = {
load(Seq.empty: _*) // force invocation of `load(...varargs...)`
}
/**
* Loads input in as a `DataFrame`, for data sources that require a path (e.g. data backed by
* a local or distributed file system).
*
* @since 1.4.0
*/
def load(path: String): DataFrame = {
// force invocation of `load(...varargs...)`
option("path", path).load(Seq.empty: _*)
}
/**
* Loads input in as a `DataFrame`, for data sources that support multiple paths.
* Only works if the source is a HadoopFsRelationProvider.
*
* @since 1.6.0
*/
@scala.annotation.varargs
def load(paths: String*): DataFrame = {
if (source.toLowerCase(Locale.ROOT) == DDLUtils.HIVE_PROVIDER) {
throw new AnalysisException("Hive data source can only be used with tables, you can not " +
"read files of Hive data source directly.")
}
val useV1Sources =
sparkSession.sessionState.conf.useV1SourceReaderList.toLowerCase(Locale.ROOT).split(",")
val lookupCls = DataSource.lookupDataSource(source, sparkSession.sessionState.conf)
val cls = lookupCls.newInstance() match {
case f: FileDataSourceV2 if useV1Sources.contains(f.shortName()) ||
useV1Sources.contains(lookupCls.getCanonicalName.toLowerCase(Locale.ROOT)) =>
f.fallbackFileFormat
case _ => lookupCls
}
if (classOf[TableProvider].isAssignableFrom(cls)) {
val provider = cls.getConstructor().newInstance().asInstanceOf[TableProvider]
val sessionOptions = DataSourceV2Utils.extractSessionConfigs(
source = provider, conf = sparkSession.sessionState.conf)
val pathsOption = if (paths.isEmpty) {
None
} else {
val objectMapper = new ObjectMapper()
Some("paths" -> objectMapper.writeValueAsString(paths.toArray))
}
val finalOptions = sessionOptions ++ extraOptions.toMap ++ pathsOption
val dsOptions = new CaseInsensitiveStringMap(finalOptions.asJava)
val table = userSpecifiedSchema match {
case Some(schema) => provider.getTable(dsOptions, schema)
case _ => provider.getTable(dsOptions)
}
import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Implicits._
table match {
case _: SupportsRead if table.supports(BATCH_READ) =>
Dataset.ofRows(sparkSession, DataSourceV2Relation.create(table, dsOptions))
case _ => loadV1Source(paths: _*)
}
} else {
loadV1Source(paths: _*)
}
}
private def loadV1Source(paths: String*) = {
// Code path for data source v1.
sparkSession.baseRelationToDataFrame(
DataSource.apply(
sparkSession,
paths = paths,
userSpecifiedSchema = userSpecifiedSchema,
className = source,
options = extraOptions.toMap).resolveRelation())
}
/**
* Construct a `DataFrame` representing the database table accessible via JDBC URL
* url named table and connection properties.
*
* @since 1.4.0
*/
def jdbc(url: String, table: String, properties: Properties): DataFrame = {
assertNoSpecifiedSchema("jdbc")
// properties should override settings in extraOptions.
this.extraOptions ++= properties.asScala
// explicit url and dbtable should override all
this.extraOptions += (JDBCOptions.JDBC_URL -> url, JDBCOptions.JDBC_TABLE_NAME -> table)
format("jdbc").load()
}
/**
* Construct a `DataFrame` representing the database table accessible via JDBC URL
* url named table. Partitions of the table will be retrieved in parallel based on the parameters
* passed to this function.
*
* Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash
* your external database systems.
*
* @param url JDBC database url of the form `jdbc:subprotocol:subname`.
* @param table Name of the table in the external database.
* @param columnName the name of a column of integral type that will be used for partitioning.
* @param lowerBound the minimum value of `columnName` used to decide partition stride.
* @param upperBound the maximum value of `columnName` used to decide partition stride.
* @param numPartitions the number of partitions. This, along with `lowerBound` (inclusive),
* `upperBound` (exclusive), form partition strides for generated WHERE
* clause expressions used to split the column `columnName` evenly. When
* the input is less than 1, the number is set to 1.
* @param connectionProperties JDBC database connection arguments, a list of arbitrary string
* tag/value. Normally at least a "user" and "password" property
* should be included. "fetchsize" can be used to control the
* number of rows per fetch and "queryTimeout" can be used to wait
* for a Statement object to execute to the given number of seconds.
* @since 1.4.0
*/
def jdbc(
url: String,
table: String,
columnName: String,
lowerBound: Long,
upperBound: Long,
numPartitions: Int,
connectionProperties: Properties): DataFrame = {
// columnName, lowerBound, upperBound and numPartitions override settings in extraOptions.
this.extraOptions ++= Map(
JDBCOptions.JDBC_PARTITION_COLUMN -> columnName,
JDBCOptions.JDBC_LOWER_BOUND -> lowerBound.toString,
JDBCOptions.JDBC_UPPER_BOUND -> upperBound.toString,
JDBCOptions.JDBC_NUM_PARTITIONS -> numPartitions.toString)
jdbc(url, table, connectionProperties)
}
/**
* Construct a `DataFrame` representing the database table accessible via JDBC URL
* url named table using connection properties. The `predicates` parameter gives a list
* expressions suitable for inclusion in WHERE clauses; each one defines one partition
* of the `DataFrame`.
*
* Don't create too many partitions in parallel on a large cluster; otherwise Spark might crash
* your external database systems.
*
* @param url JDBC database url of the form `jdbc:subprotocol:subname`
* @param table Name of the table in the external database.
* @param predicates Condition in the where clause for each partition.
* @param connectionProperties JDBC database connection arguments, a list of arbitrary string
* tag/value. Normally at least a "user" and "password" property
* should be included. "fetchsize" can be used to control the
* number of rows per fetch.
* @since 1.4.0
*/
def jdbc(
url: String,
table: String,
predicates: Array[String],
connectionProperties: Properties): DataFrame = {
assertNoSpecifiedSchema("jdbc")
// connectionProperties should override settings in extraOptions.
val params = extraOptions.toMap ++ connectionProperties.asScala.toMap
val options = new JDBCOptions(url, table, params)
val parts: Array[Partition] = predicates.zipWithIndex.map { case (part, i) =>
JDBCPartition(part, i) : Partition
}
val relation = JDBCRelation(parts, options)(sparkSession)
sparkSession.baseRelationToDataFrame(relation)
}
/**
* Loads a JSON file and returns the results as a `DataFrame`.
*
* See the documentation on the overloaded `json()` method with varargs for more details.
*
* @since 1.4.0
*/
def json(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
json(Seq(path): _*)
}
/**
* Loads JSON files and returns the results as a `DataFrame`.
*
* <a href="http://jsonlines.org/">JSON Lines</a> (newline-delimited JSON) is supported by
* default. For JSON (one record per file), set the `multiLine` option to true.
*
* This function goes through the input once to determine the input schema. If you know the
* schema in advance, use the version that specifies the schema to avoid the extra scan.
*
* You can set the following JSON-specific options to deal with non-standard JSON files:
* <ul>
* <li>`primitivesAsString` (default `false`): infers all primitive values as a string type</li>
* <li>`prefersDecimal` (default `false`): infers all floating-point values as a decimal
* type. If the values do not fit in decimal, then it infers them as doubles.</li>
* <li>`allowComments` (default `false`): ignores Java/C++ style comment in JSON records</li>
* <li>`allowUnquotedFieldNames` (default `false`): allows unquoted JSON field names</li>
* <li>`allowSingleQuotes` (default `true`): allows single quotes in addition to double quotes
* </li>
* <li>`allowNumericLeadingZeros` (default `false`): allows leading zeros in numbers
* (e.g. 00012)</li>
* <li>`allowBackslashEscapingAnyCharacter` (default `false`): allows accepting quoting of all
* character using backslash quoting mechanism</li>
* <li>`allowUnquotedControlChars` (default `false`): allows JSON Strings to contain unquoted
* control characters (ASCII characters with value less than 32, including tab and line feed
* characters) or not.</li>
* <li>`mode` (default `PERMISSIVE`): allows a mode for dealing with corrupt records
* during parsing.
* <ul>
* <li>`PERMISSIVE` : when it meets a corrupted record, puts the malformed string into a
* field configured by `columnNameOfCorruptRecord`, and sets malformed fields to `null`. To
* keep corrupt records, an user can set a string type field named
* `columnNameOfCorruptRecord` in an user-defined schema. If a schema does not have the
* field, it drops corrupt records during parsing. When inferring a schema, it implicitly
* adds a `columnNameOfCorruptRecord` field in an output schema.</li>
* <li>`DROPMALFORMED` : ignores the whole corrupted records.</li>
* <li>`FAILFAST` : throws an exception when it meets corrupted records.</li>
* </ul>
* </li>
* <li>`columnNameOfCorruptRecord` (default is the value specified in
* `spark.sql.columnNameOfCorruptRecord`): allows renaming the new field having malformed string
* created by `PERMISSIVE` mode. This overrides `spark.sql.columnNameOfCorruptRecord`.</li>
* <li>`dateFormat` (default `yyyy-MM-dd`): sets the string that indicates a date format.
* Custom date formats follow the formats at `java.time.format.DateTimeFormatter`.
* This applies to date type.</li>
* <li>`timestampFormat` (default `yyyy-MM-dd'T'HH:mm:ss.SSSXXX`): sets the string that
* indicates a timestamp format. Custom date formats follow the formats at
* `java.time.format.DateTimeFormatter`. This applies to timestamp type.</li>
* <li>`multiLine` (default `false`): parse one record, which may span multiple lines,
* per file</li>
* <li>`encoding` (by default it is not set): allows to forcibly set one of standard basic
* or extended encoding for the JSON files. For example UTF-16BE, UTF-32LE. If the encoding
* is not specified and `multiLine` is set to `true`, it will be detected automatically.</li>
* <li>`lineSep` (default covers all `\\r`, `\\r\\n` and `\\n`): defines the line separator
* that should be used for parsing.</li>
* <li>`samplingRatio` (default is 1.0): defines fraction of input JSON objects used
* for schema inferring.</li>
* <li>`dropFieldIfAllNull` (default `false`): whether to ignore column of all null values or
* empty array/struct during schema inference.</li>
* <li>`locale` (default is `en-US`): sets a locale as language tag in IETF BCP 47 format.
* For instance, this is used while parsing dates and timestamps.</li>
* </ul>
*
* @since 2.0.0
*/
@scala.annotation.varargs
def json(paths: String*): DataFrame = format("json").load(paths : _*)
/**
* Loads a `JavaRDD[String]` storing JSON objects (<a href="http://jsonlines.org/">JSON
* Lines text format or newline-delimited JSON</a>) and returns the result as
* a `DataFrame`.
*
* Unless the schema is specified using `schema` function, this function goes through the
* input once to determine the input schema.
*
* @param jsonRDD input RDD with one JSON object per record
* @since 1.4.0
*/
@deprecated("Use json(Dataset[String]) instead.", "2.2.0")
def json(jsonRDD: JavaRDD[String]): DataFrame = json(jsonRDD.rdd)
/**
* Loads an `RDD[String]` storing JSON objects (<a href="http://jsonlines.org/">JSON Lines
* text format or newline-delimited JSON</a>) and returns the result as a `DataFrame`.
*
* Unless the schema is specified using `schema` function, this function goes through the
* input once to determine the input schema.
*
* @param jsonRDD input RDD with one JSON object per record
* @since 1.4.0
*/
@deprecated("Use json(Dataset[String]) instead.", "2.2.0")
def json(jsonRDD: RDD[String]): DataFrame = {
json(sparkSession.createDataset(jsonRDD)(Encoders.STRING))
}
/**
* Loads a `Dataset[String]` storing JSON objects (<a href="http://jsonlines.org/">JSON Lines
* text format or newline-delimited JSON</a>) and returns the result as a `DataFrame`.
*
* Unless the schema is specified using `schema` function, this function goes through the
* input once to determine the input schema.
*
* @param jsonDataset input Dataset with one JSON object per record
* @since 2.2.0
*/
def json(jsonDataset: Dataset[String]): DataFrame = {
val parsedOptions = new JSONOptions(
extraOptions.toMap,
sparkSession.sessionState.conf.sessionLocalTimeZone,
sparkSession.sessionState.conf.columnNameOfCorruptRecord)
val schema = userSpecifiedSchema.getOrElse {
TextInputJsonDataSource.inferFromDataset(jsonDataset, parsedOptions)
}
ExprUtils.verifyColumnNameOfCorruptRecord(schema, parsedOptions.columnNameOfCorruptRecord)
val actualSchema =
StructType(schema.filterNot(_.name == parsedOptions.columnNameOfCorruptRecord))
val createParser = CreateJacksonParser.string _
val parsed = jsonDataset.rdd.mapPartitions { iter =>
val rawParser = new JacksonParser(actualSchema, parsedOptions, allowArrayAsStructs = true)
val parser = new FailureSafeParser[String](
input => rawParser.parse(input, createParser, UTF8String.fromString),
parsedOptions.parseMode,
schema,
parsedOptions.columnNameOfCorruptRecord)
iter.flatMap(parser.parse)
}
sparkSession.internalCreateDataFrame(parsed, schema, isStreaming = jsonDataset.isStreaming)
}
/**
* Loads a CSV file and returns the result as a `DataFrame`. See the documentation on the
* other overloaded `csv()` method for more details.
*
* @since 2.0.0
*/
def csv(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
csv(Seq(path): _*)
}
/**
* Loads an `Dataset[String]` storing CSV rows and returns the result as a `DataFrame`.
*
* If the schema is not specified using `schema` function and `inferSchema` option is enabled,
* this function goes through the input once to determine the input schema.
*
* If the schema is not specified using `schema` function and `inferSchema` option is disabled,
* it determines the columns as string types and it reads only the first line to determine the
* names and the number of fields.
*
* If the enforceSchema is set to `false`, only the CSV header in the first line is checked
* to conform specified or inferred schema.
*
* @param csvDataset input Dataset with one CSV row per record
* @since 2.2.0
*/
def csv(csvDataset: Dataset[String]): DataFrame = {
val parsedOptions: CSVOptions = new CSVOptions(
extraOptions.toMap,
sparkSession.sessionState.conf.csvColumnPruning,
sparkSession.sessionState.conf.sessionLocalTimeZone)
val filteredLines: Dataset[String] =
CSVUtils.filterCommentAndEmpty(csvDataset, parsedOptions)
// For performance, short-circuit the collection of the first line when it won't be used:
// - TextInputCSVDataSource - Only uses firstLine to infer an unspecified schema
// - CSVHeaderChecker - Only uses firstLine to check header, when headerFlag is true
// - CSVUtils - Only uses firstLine to filter headers, when headerFlag is true
// (If the downstream logic grows more complicated, consider refactoring to an approach that
// delegates this decision to the constituent consumers themselves.)
val maybeFirstLine: Option[String] =
if (userSpecifiedSchema.isEmpty || parsedOptions.headerFlag) {
filteredLines.take(1).headOption
} else {
None
}
val schema = userSpecifiedSchema.getOrElse {
TextInputCSVDataSource.inferFromDataset(
sparkSession,
csvDataset,
maybeFirstLine,
parsedOptions)
}
ExprUtils.verifyColumnNameOfCorruptRecord(schema, parsedOptions.columnNameOfCorruptRecord)
val actualSchema =
StructType(schema.filterNot(_.name == parsedOptions.columnNameOfCorruptRecord))
val linesWithoutHeader: RDD[String] = maybeFirstLine.map { firstLine =>
val headerChecker = new CSVHeaderChecker(
actualSchema,
parsedOptions,
source = s"CSV source: $csvDataset")
headerChecker.checkHeaderColumnNames(firstLine)
filteredLines.rdd.mapPartitions(CSVUtils.filterHeaderLine(_, firstLine, parsedOptions))
}.getOrElse(filteredLines.rdd)
val parsed = linesWithoutHeader.mapPartitions { iter =>
val rawParser = new UnivocityParser(actualSchema, parsedOptions)
val parser = new FailureSafeParser[String](
input => Seq(rawParser.parse(input)),
parsedOptions.parseMode,
schema,
parsedOptions.columnNameOfCorruptRecord)
iter.flatMap(parser.parse)
}
sparkSession.internalCreateDataFrame(parsed, schema, isStreaming = csvDataset.isStreaming)
}
/**
* Loads CSV files and returns the result as a `DataFrame`.
*
* This function will go through the input once to determine the input schema if `inferSchema`
* is enabled. To avoid going through the entire data once, disable `inferSchema` option or
* specify the schema explicitly using `schema`.
*
* You can set the following CSV-specific options to deal with CSV files:
* <ul>
* <li>`sep` (default `,`): sets a single character as a separator for each
* field and value.</li>
* <li>`encoding` (default `UTF-8`): decodes the CSV files by the given encoding
* type.</li>
* <li>`quote` (default `"`): sets a single character used for escaping quoted values where
* the separator can be part of the value. If you would like to turn off quotations, you need to
* set not `null` but an empty string. This behaviour is different from
* `com.databricks.spark.csv`.</li>
* <li>`escape` (default `\\`): sets a single character used for escaping quotes inside
* an already quoted value.</li>
* <li>`charToEscapeQuoteEscaping` (default `escape` or `\\0`): sets a single character used for
* escaping the escape for the quote character. The default value is escape character when escape
* and quote characters are different, `\\0` otherwise.</li>
* <li>`comment` (default empty string): sets a single character used for skipping lines
* beginning with this character. By default, it is disabled.</li>
* <li>`header` (default `false`): uses the first line as names of columns.</li>
* <li>`enforceSchema` (default `true`): If it is set to `true`, the specified or inferred schema
* will be forcibly applied to datasource files, and headers in CSV files will be ignored.
* If the option is set to `false`, the schema will be validated against all headers in CSV files
* in the case when the `header` option is set to `true`. Field names in the schema
* and column names in CSV headers are checked by their positions taking into account
* `spark.sql.caseSensitive`. Though the default value is true, it is recommended to disable
* the `enforceSchema` option to avoid incorrect results.</li>
* <li>`inferSchema` (default `false`): infers the input schema automatically from data. It
* requires one extra pass over the data.</li>
* <li>`samplingRatio` (default is 1.0): defines fraction of rows used for schema inferring.</li>
* <li>`ignoreLeadingWhiteSpace` (default `false`): a flag indicating whether or not leading
* whitespaces from values being read should be skipped.</li>
* <li>`ignoreTrailingWhiteSpace` (default `false`): a flag indicating whether or not trailing
* whitespaces from values being read should be skipped.</li>
* <li>`nullValue` (default empty string): sets the string representation of a null value. Since
* 2.0.1, this applies to all supported types including the string type.</li>
* <li>`emptyValue` (default empty string): sets the string representation of an empty value.</li>
* <li>`nanValue` (default `NaN`): sets the string representation of a non-number" value.</li>
* <li>`positiveInf` (default `Inf`): sets the string representation of a positive infinity
* value.</li>
* <li>`negativeInf` (default `-Inf`): sets the string representation of a negative infinity
* value.</li>
* <li>`dateFormat` (default `yyyy-MM-dd`): sets the string that indicates a date format.
* Custom date formats follow the formats at `java.time.format.DateTimeFormatter`.
* This applies to date type.</li>
* <li>`timestampFormat` (default `yyyy-MM-dd'T'HH:mm:ss.SSSXXX`): sets the string that
* indicates a timestamp format. Custom date formats follow the formats at
* `java.time.format.DateTimeFormatter`. This applies to timestamp type.</li>
* <li>`maxColumns` (default `20480`): defines a hard limit of how many columns
* a record can have.</li>
* <li>`maxCharsPerColumn` (default `-1`): defines the maximum number of characters allowed
* for any given value being read. By default, it is -1 meaning unlimited length</li>
* <li>`mode` (default `PERMISSIVE`): allows a mode for dealing with corrupt records
* during parsing. It supports the following case-insensitive modes.
* <ul>
* <li>`PERMISSIVE` : when it meets a corrupted record, puts the malformed string into a
* field configured by `columnNameOfCorruptRecord`, and sets malformed fields to `null`.
* To keep corrupt records, an user can set a string type field named
* `columnNameOfCorruptRecord` in an user-defined schema. If a schema does not have
* the field, it drops corrupt records during parsing. A record with less/more tokens
* than schema is not a corrupted record to CSV. When it meets a record having fewer
* tokens than the length of the schema, sets `null` to extra fields. When the record
* has more tokens than the length of the schema, it drops extra tokens.</li>
* <li>`DROPMALFORMED` : ignores the whole corrupted records.</li>
* <li>`FAILFAST` : throws an exception when it meets corrupted records.</li>
* </ul>
* </li>
* <li>`columnNameOfCorruptRecord` (default is the value specified in
* `spark.sql.columnNameOfCorruptRecord`): allows renaming the new field having malformed string
* created by `PERMISSIVE` mode. This overrides `spark.sql.columnNameOfCorruptRecord`.</li>
* <li>`multiLine` (default `false`): parse one record, which may span multiple lines.</li>
* <li>`locale` (default is `en-US`): sets a locale as language tag in IETF BCP 47 format.
* For instance, this is used while parsing dates and timestamps.</li>
* <li>`lineSep` (default covers all `\\r`, `\\r\\n` and `\\n`): defines the line separator
* that should be used for parsing. Maximum length is 1 character.</li>
* </ul>
*
* @since 2.0.0
*/
@scala.annotation.varargs
def csv(paths: String*): DataFrame = format("csv").load(paths : _*)
/**
* Loads a Parquet file, returning the result as a `DataFrame`. See the documentation
* on the other overloaded `parquet()` method for more details.
*
* @since 2.0.0
*/
def parquet(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
parquet(Seq(path): _*)
}
/**
* Loads a Parquet file, returning the result as a `DataFrame`.
*
* You can set the following Parquet-specific option(s) for reading Parquet files:
* <ul>
* <li>`mergeSchema` (default is the value specified in `spark.sql.parquet.mergeSchema`): sets
* whether we should merge schemas collected from all Parquet part-files. This will override
* `spark.sql.parquet.mergeSchema`.</li>
* </ul>
* @since 1.4.0
*/
@scala.annotation.varargs
def parquet(paths: String*): DataFrame = {
format("parquet").load(paths: _*)
}
/**
* Loads an ORC file and returns the result as a `DataFrame`.
*
* @param path input path
* @since 1.5.0
*/
def orc(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
orc(Seq(path): _*)
}
/**
* Loads ORC files and returns the result as a `DataFrame`.
*
* @param paths input paths
* @since 2.0.0
*/
@scala.annotation.varargs
def orc(paths: String*): DataFrame = format("orc").load(paths: _*)
/**
* Returns the specified table as a `DataFrame`.
*
* @since 1.4.0
*/
def table(tableName: String): DataFrame = {
assertNoSpecifiedSchema("table")
sparkSession.table(tableName)
}
/**
* Loads text files and returns a `DataFrame` whose schema starts with a string column named
* "value", and followed by partitioned columns if there are any. See the documentation on
* the other overloaded `text()` method for more details.
*
* @since 2.0.0
*/
def text(path: String): DataFrame = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
text(Seq(path): _*)
}
/**
* Loads text files and returns a `DataFrame` whose schema starts with a string column named
* "value", and followed by partitioned columns if there are any.
* The text files must be encoded as UTF-8.
*
* By default, each line in the text files is a new row in the resulting DataFrame. For example:
* {{{
* // Scala:
* spark.read.text("/path/to/spark/README.md")
*
* // Java:
* spark.read().text("/path/to/spark/README.md")
* }}}
*
* You can set the following text-specific option(s) for reading text files:
* <ul>
* <li>`wholetext` (default `false`): If true, read a file as a single row and not split by "\\n".
* </li>
* <li>`lineSep` (default covers all `\\r`, `\\r\\n` and `\\n`): defines the line separator
* that should be used for parsing.</li>
* </ul>
*
* @param paths input paths
* @since 1.6.0
*/
@scala.annotation.varargs
def text(paths: String*): DataFrame = format("text").load(paths : _*)
/**
* Loads text files and returns a [[Dataset]] of String. See the documentation on the
* other overloaded `textFile()` method for more details.
* @since 2.0.0
*/
def textFile(path: String): Dataset[String] = {
// This method ensures that calls that explicit need single argument works, see SPARK-16009
textFile(Seq(path): _*)
}
/**
* Loads text files and returns a [[Dataset]] of String. The underlying schema of the Dataset
* contains a single string column named "value".
* The text files must be encoded as UTF-8.
*
* If the directory structure of the text files contains partitioning information, those are
* ignored in the resulting Dataset. To include partitioning information as columns, use `text`.
*
* By default, each line in the text files is a new row in the resulting DataFrame. For example:
* {{{
* // Scala:
* spark.read.textFile("/path/to/spark/README.md")
*
* // Java:
* spark.read().textFile("/path/to/spark/README.md")
* }}}
*
* You can set the following textFile-specific option(s) for reading text files:
* <ul>
* <li>`wholetext` (default `false`): If true, read a file as a single row and not split by "\\n".
* </li>
* <li>`lineSep` (default covers all `\\r`, `\\r\\n` and `\\n`): defines the line separator
* that should be used for parsing.</li>
* </ul>
*
* @param paths input path
* @since 2.0.0
*/
@scala.annotation.varargs
def textFile(paths: String*): Dataset[String] = {
assertNoSpecifiedSchema("textFile")
text(paths : _*).select("value").as[String](sparkSession.implicits.newStringEncoder)
}
/**
* A convenient function for schema validation in APIs.
*/
private def assertNoSpecifiedSchema(operation: String): Unit = {
if (userSpecifiedSchema.nonEmpty) {
throw new AnalysisException(s"User specified schema not supported with `$operation`")
}
}
///////////////////////////////////////////////////////////////////////////////////////
// Builder pattern config options
///////////////////////////////////////////////////////////////////////////////////////
private var source: String = sparkSession.sessionState.conf.defaultDataSourceName
private var userSpecifiedSchema: Option[StructType] = None
private val extraOptions = new scala.collection.mutable.HashMap[String, String]
}
|
icexelloss/spark
|
sql/core/src/main/scala/org/apache/spark/sql/DataFrameReader.scala
|
Scala
|
apache-2.0
| 35,939 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2014 VoltDB Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.SparkConf
import org.apache.spark.sql.SQLContext
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.clustering.KMeans
import org.apache.spark.rdd.RDD
object KMeansReferral {
case class Center(id:Int, src:Int, dest:Int, referral:Int, agent:Int)
def main(args: Array[String]) {
if (args.length != 2) {
System.err.println("Usage: KMeans [source-parquet] [centers-parquet]")
System.exit(1)
}
val conf = new SparkConf().setAppName("KMeansReferral")
val sc = new SparkContext(conf)
val sqlContext = new SQLContext(sc)
import sqlContext.createSchemaRDD
val parquetFile = sqlContext.parquetFile(args(0))
val parsedData = parquetFile.map(r => Vectors.dense((for (f<-r) yield f.asInstanceOf[Int].toDouble).toArray))
val clusters = KMeans.train(parsedData, 20, 20)
val intarrays = for (v<-clusters.clusterCenters) yield for (d<-v.toArray) yield d.round.toInt
val centers = for ((a,i)<-intarrays.zipWithIndex) yield Center(i,a(0),a(1),a(2),a(3))
val outrdd: RDD[Center] = sc.parallelize(centers)
outrdd.saveAsParquetFile(args(1))
}
}
|
VoltDB/app-fastdata
|
hadoop/spark/src/main/scala/KMeansReferral.scala
|
Scala
|
mit
| 2,443 |
package rats
import java.util.concurrent.atomic.AtomicInteger
class GamesRepository {
private val atomicInteger = new AtomicInteger()
def newGame(): Long = atomicInteger.incrementAndGet()
}
|
jcaraballo/rats2
|
src/main/scala/rats/GamesRepository.scala
|
Scala
|
gpl-3.0
| 196 |
package models.job
import akka.actor.ActorSystem
import com.scalableminds.util.accesscontext.GlobalAccessContext
import com.scalableminds.util.mvc.Formatter
import com.scalableminds.util.tools.Fox
import com.scalableminds.webknossos.datastore.helpers.IntervalScheduler
import com.scalableminds.webknossos.schema.Tables._
import com.typesafe.scalalogging.LazyLogging
import javax.inject.Inject
import oxalis.telemetry.SlackNotificationService
import play.api.inject.ApplicationLifecycle
import play.api.libs.json.{JsObject, Json}
import slick.jdbc.PostgresProfile.api._
import slick.lifted.Rep
import utils.{ObjectId, SQLClient, SQLDAO, WkConf}
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
case class Worker(_id: ObjectId,
_dataStore: String,
key: String,
maxParallelJobs: Int,
lastHeartBeat: Long = 0,
created: Long = System.currentTimeMillis,
isDeleted: Boolean = false)
class WorkerDAO @Inject()(sqlClient: SQLClient)(implicit ec: ExecutionContext)
extends SQLDAO[Worker, WorkersRow, Workers](sqlClient) {
val collection = Workers
def idColumn(x: Workers): Rep[String] = x._Id
def isDeletedColumn(x: Workers): Rep[Boolean] = x.isdeleted
def parse(r: WorkersRow): Fox[Worker] =
Fox.successful(
Worker(
ObjectId(r._Id),
r._Datastore,
r.key,
r.maxparalleljobs,
r.lastheartbeat.getTime,
r.created.getTime,
r.isdeleted
)
)
def findOneByKey(key: String): Fox[Worker] =
for {
r: Seq[WorkersRow] <- run(sql"select #$columns from #$existingCollectionName where key = $key".as[WorkersRow])
parsed <- parseFirst(r, "key")
} yield parsed
def findOneByDataStore(dataStoreName: String): Fox[Worker] =
for {
r: Seq[WorkersRow] <- run(
sql"select #$columns from #$existingCollectionName where _dataStore = $dataStoreName".as[WorkersRow])
parsed <- parseFirst(r, "dataStoreName")
} yield parsed
def updateHeartBeat(_id: ObjectId): Unit = {
run(sqlu"update webknossos.workers set lastHeartBeat = NOW() where _id = ${_id}")
// Note that this should not block the jobs polling operation, failures here are not critical
()
}
}
class WorkerService @Inject()(conf: WkConf) {
def lastHeartBeatIsRecent(worker: Worker): Boolean =
System.currentTimeMillis() - worker.lastHeartBeat < conf.Jobs.workerLivenessTimeout.toMillis
def publicWrites(worker: Worker): JsObject =
Json.obj(
"id" -> worker._id.id,
"maxParallelJobs" -> worker.maxParallelJobs,
"created" -> worker.created,
"lastHeartBeat" -> worker.lastHeartBeat,
"lastHeartBeatIsRecent" -> lastHeartBeatIsRecent(worker)
)
}
class WorkerLivenessService @Inject()(workerService: WorkerService,
workerDAO: WorkerDAO,
slackNotificationService: SlackNotificationService,
val lifecycle: ApplicationLifecycle,
val system: ActorSystem)
extends IntervalScheduler
with Formatter
with LazyLogging {
override protected def tickerInitialDelay: FiniteDuration = 1 minute
override protected def tickerInterval: FiniteDuration = 1 minute
override protected def tick(): Unit = {
for {
workers <- workerDAO.findAll(GlobalAccessContext)
_ = workers.foreach(reportIfLivenessChanged)
} yield ()
()
}
private val reportedAsDead: scala.collection.mutable.Set[ObjectId] = scala.collection.mutable.Set()
private def reportIfLivenessChanged(worker: Worker): Unit = {
val heartBeatIsRecent = workerService.lastHeartBeatIsRecent(worker)
if (!heartBeatIsRecent && !reportedAsDead.contains(worker._id)) {
reportAsDead(worker)
reportedAsDead.add(worker._id)
}
if (heartBeatIsRecent && reportedAsDead.contains(worker._id)) {
reportAsResurrected(worker)
reportedAsDead.remove(worker._id)
}
}
private def reportAsDead(worker: Worker): Unit = {
val msg = s"Worker ${worker._id} is not reporting. Last heartbeat was at ${formatDate(worker.lastHeartBeat)}"
slackNotificationService.warn("Worker missing", msg)
logger.warn(msg)
}
private def reportAsResurrected(worker: Worker): Unit = {
val msg = s"Worker ${worker._id} is reporting again. Last heartbeat was at ${formatDate(worker.lastHeartBeat)}"
slackNotificationService.success("Worker return", msg)
logger.info(msg)
}
}
|
scalableminds/webknossos
|
app/models/job/Worker.scala
|
Scala
|
agpl-3.0
| 4,610 |
package geotrellis.data
import geotrellis.process.TestServer
import geotrellis._
import geotrellis.raster._
import geotrellis._
import org.scalatest.Spec
import org.scalatest.matchers.MustMatchers
import org.scalatest.matchers.ShouldMatchers
import org.geotools.gce.geotiff.{GeoTiffFormat}
import org.geotools.factory.{Hints}
import org.geotools.referencing.{CRS}
import org.geotools.coverage.grid.{GridCoordinates2D}
import java.io.{File,FileWriter}
import javax.imageio.ImageIO
import scala.math.{abs, round}
import org.geotools.coverage.grid.io.imageio.geotiff.{GeoTiffIIOMetadataDecoder}
import org.geotools.coverage.grid.io.imageio.IIOMetadataDumper
import java.awt.image.BufferedImage
//xyz
import Console.printf
import java.awt.image.DataBuffer
import java.awt.Transparency
import org.geotools.gce.geotiff.{GeoTiffFormat}
import org.geotools.factory.{Hints}
import org.geotools.referencing.{CRS}
import org.geotools.coverage.grid.{GridCoordinates2D}
//xyz
@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
class GeoTiffSpec extends Spec with MustMatchers with ShouldMatchers {
val server = TestServer()
describe("A GeoTiffReader") {
it ("should fail on non-existent files") {
val path = "/does/not/exist.tif"
evaluating { GeoTiffReader.readPath(path, None, None) } should produce [Exception];
}
it ("should load correct extent & gridToMap should work") {
val path = "src/test/resources/econic.tif"
val raster1 = GeoTiffReader.readPath(path, None, None)
val (xmap, ymap) = raster1.rasterExtent.gridToMap(0,0)
xmap should be (-15381.615 plusOrMinus 0.001)
ymap should be (15418.729 plusOrMinus 0.001)
}
it ("should render to PNG") {
val path = "src/test/resources/econic.tif"
val raster1 = GeoTiffReader.readPath(path, None, None)
val e = Extent(-15471.6, -15511.3, 15428.4, 15388.7)
val geo = RasterExtent(e, 60.0, 60.0, 513, 513)
val raster2 = GeoTiffReader.readPath(path, None, Some(geo))
}
it ("should draw") {
val path = "src/test/resources/econic.tif"
val raster = GeoTiffReader.readPath(path, None, None)
val (zmin, zmax) = raster.findMinMax
val chooser = new MultiColorRangeChooser(Array(0xFF0000, 0xFFFF00, 0x0000FF))
val breaks = (zmin to zmax)
val colors = chooser.getColors(breaks.length)
val pairs = breaks.zip(colors).toArray
val png = io.WritePNGFile(raster, "/tmp/fromgeo.png", pairs, NODATA, false)
server.run(png)
}
it ("should write") {
val inpath = "src/test/resources/econic.tif"
val raster = GeoTiffReader.readPath(inpath, None, None)
/*println(raster.asciiDrawRange(0,5,0,5))
println(raster.get(0,3))
println(raster.get(1,3))
println(raster.get(2,3))
println(raster.rasterExtent)
*/
val name = "foo"
val outpath = "/tmp/written.tif"
GeoTiffWriter.write(outpath, raster, name)
}
it ("should write floating point rasters") {
val e = Extent(100.0, 400.0, 120.0, 420.0)
val re = RasterExtent(e, 10.0, 10.0, 2, 2)
val data = DoubleArrayRasterData(Array(11.0, 22.0, 33.0, 44.0), 2, 2)
val r = Raster(data, re)
GeoTiffWriter.write("/tmp/float.tif", r, "float")
}
it ("should write bennet's geotiff") {
import geotiff._
val r = server.run(io.LoadFile("src/test/resources/quadborder8.arg"))
val settings = Settings(LongSample, Floating, true)
Encoder.writePath("/tmp/bennet.tif", r, settings)
}
}
}
|
Tjoene/thesis
|
Case_Programs/geotrellis-0.7.0/src/test/scala/geotrellis/data/geotiff.scala
|
Scala
|
gpl-2.0
| 3,565 |
package org.ucombinator.jaam.interpreter.snowflakes
import scala.collection.JavaConversions._
import scala.collection.mutable
import soot.{Main => SootMain, Unit => SootUnit, Value => SootValue, _}
import soot.jimple.{Stmt => SootStmt, _}
import org.ucombinator.jaam.util.Log
import org.ucombinator.jaam.interpreter._
import org.ucombinator.jaam.util.{Soot, Stmt}
// Note: currently enabled
object ClassSnowflakes {
Snowflakes.put(MethodDescription("java.lang.Class", "newInstance", List(), "java.lang.Object"), newInstance)
case object newInstance extends NonstaticSnowflakeHandler {
override def apply(state : State, nextStmt : Stmt, self : Value, args : List[D]) : Set[AbstractState] = {
val local = state.stmt.sootStmt match {
case stmt : DefinitionStmt => stmt.getLeftOp().asInstanceOf[Local]
}
val lhsAddr = state.addrsOf(local)
val exceptionClass = Soot.getSootClass("java.lang.InstantiationException")
val instatiationException = ObjectValue(exceptionClass, Snowflakes.malloc(exceptionClass))
self match {
case ObjectValue(_, ClassBasePointer(className)) =>
if (className.startsWith("[")) {
state.kontStack.handleException(instatiationException, state.stmt, state.fp)
}
else {
val sootClass = Soot.getSootClass(className)
if (sootClass.isInterface || sootClass.isAbstract) {
state.kontStack.handleException(instatiationException, state.stmt, state.fp)
}
else {
//val state2 = state.copy(store = state.newExpr(lhsAddr, sootClass, System.store))
state.newExpr(lhsAddr, sootClass)
try { // TODO: this is a bit of a hack
val expr = new soot.jimple.internal.JSpecialInvokeExpr(
local, //new soot.jimple.internal.JimpleLocal("newInstanceSnowflake", sootClass.getType()),
sootClass.getMethod(SootMethod.constructorName, List(), VoidType.v()).makeRef(),
List[soot.Value]())
//state2.handleInvoke(expr, None)
state.handleInvoke(expr, None)
} catch {
// If it doesn't have a no argument, then we must be on a spurious flow
case _: Exception => Set()
}
}
}
case _ =>
Log.error("Unimplemented: newInstance on "+self)
Set()
}
}
}
Snowflakes.put(MethodDescription("java.lang.Class", "getName", List(), "java.lang.String"), getName)
case object getName extends NonstaticSnowflakeHandler {
override def apply(state : State, nextStmt : Stmt, self : Value, args : List[D]) : Set[AbstractState] = {
val local = state.stmt.sootStmt match {
case stmt : DefinitionStmt => stmt.getLeftOp().asInstanceOf[Local]
}
val lhsAddr = state.addrsOf(local)
Log.info("getName self: "+self)
self match {
case ObjectValue(_, ClassBasePointer(className)) =>
System.store.update(lhsAddr, D(Set(ObjectValue(Soot.classes.Class, StringBasePointer(className, state)))))
Set(state.copy(stmt = nextStmt))
case _ =>
Log.error("Unimplemented: getName on "+self)
Set()
}
}
}
//private static native java.lang.Class<?> forName0(java.lang.String, boolean, java.lang.ClassLoader, java.lang.Class<?>) throws java.lang.ClassNotFoundException;
Snowflakes.put(MethodDescription("java.lang.Class", "forName0", List("java.lang.String", "boolean", "java.lang.ClassLoader", "java.lang.Class"), "java.lang.Class"), forName0)
case object forName0 extends StaticSnowflakeHandler {
override def apply(state: State, nextStmt: Stmt, args: List[D]): Set[AbstractState] = {
// Log.error(s"forName0\\n state: $state\\n nextStmt: $nextStmt\\n args: $args")
val className = args(0)
var classes = D(Set())
for (v <- className.getValues) {
v match {
case ObjectValue(_, LiteralStringBasePointer(s)) =>
if (Soot.isClass(s)) {
/*
Log.error("allow phantom: "+Scene.v().getPhantomRefs())
Log.error("forName0 ok: "+s+".")
Log.error("forName0 containsClass: "+SourceLocator.v().getClassSource("com.stac.Main"))
Log.error("forName0 containsClass: "+SourceLocator.v().getClassSource(s))
Log.error("forName0 containsClass: "+SourceLocator.v().getClassSource("foobar.poiuwer"))
//Log.error("forName0 containsClass: "+soot.Scene.v().containsClass(s))
//val sc = soot.Scene.v().getSootClassUnsafe(s)
// TryloadClass, NO loadClassAndSupport, loadClass, NO getSootClassUnsafe
//Log.error("forName0 getSootClassUnsafe: "+sc)
//Log.error("forName0 getSootClassUnsafe.isPhantom: "+sc.isPhantom)
//Log.error("forName0 foobar2: "+soot.Scene.v().loadClass("foobar", SootClass.BODIES))
*/
classes = classes.join(D(Set(ObjectValue(Soot.classes.Class, ClassBasePointer(s.replace('/', '.')))))) // TODO: replace might be unneeded; put a check in the ClassBasePointer constructor
}
case ObjectValue(_, StringBasePointerTop) =>
for (StringLiteralValue(s) <- System.store.getOrElseBot(StringLiteralAddr).getValues) {
if (Soot.isClass(s)) {
classes = classes.join(D(Set(ObjectValue(Soot.classes.Class, ClassBasePointer(s.replace('/', '.')))))) // TODO: replace might be unneeded; put a check in the ClassBasePointer constructor
}
}
case ObjectValue(c,_) if c == Soot.classes.String =>
Log.error("java.lang.Class.forName0 ignoring non-literal String: "+v)
case _ => {}
}
}
Log.error(f"forName0: $className $classes")
// TODO: factor this with ReturnSnowflake
state.stmt.sootStmt match {
case sootStmt : DefinitionStmt => System.store.update(state.addrsOf(sootStmt.getLeftOp()), classes)
case sootStmt : InvokeStmt => {}
}
Set(state.copy(stmt = nextStmt))
}
}
}
|
Ucombinator/jaam
|
src/main/scala/org/ucombinator/jaam/interpreter/snowflakes/ClassSnowflakes.scala
|
Scala
|
bsd-2-clause
| 6,119 |
package models
import models.users._
import scala.slick.lifted.TableQuery
/**
* @author Joseph Dessens
* @since 9/6/14
*/
object UserTableQueries {
object mailTokens extends TableQuery(new MailTokens(_))
object userAuthenticators extends TableQuery(new UserAuthenticators(_))
object users extends TableQuery(new Users(_))
object oauth1s extends TableQuery(new OAuth1s(_))
object oauth2s extends TableQuery(new OAuth2s(_))
object passwords extends TableQuery(new Passwords(_))
object profiles extends TableQuery(new Profiles(_))
}
|
vega113/emotracker
|
app/models/UserTableQueries.scala
|
Scala
|
apache-2.0
| 550 |
package filodb.memory
import net.jpountz.xxhash.XXHashFactory
import filodb.memory.format.UnsafeUtils
/**
* A BinaryRegion is just an area of memory (heap or offheap) with a length prefix.
* There are different implementations depending on the size of the length prefix.
* This design allows us to reference an offheap BinaryRegion and operate/compare them with nothing more than
* a Long native pointer, avoiding expensive heap/object allocations.
* Examples of their use are for UTF8Strings and BinaryRecords.
*/
object BinaryRegion {
import UnsafeUtils._
// NOTE: fastestInstance sometimes returns JNI lib, which seems much slower for shorter strings
// NOTE2: According to XXHash documentation the hash method is thread safe.
val xxhashFactory = XXHashFactory.fastestJavaInstance
val hasher32 = xxhashFactory.hash32
val hasher64 = xxhashFactory.hash64
val Seed = 0x9747b28c
/**
* A memory has a base, offset and a length. An off-heap memory usually has a null base.
* An array backed memory has the array object as the base. Offset is a Long indicating the
* location in native memory. For an array it is retrieved using Unsafe.arrayBaseOffset
* Length is the length of memory in bytes
*/
type Memory = Tuple3[Any, Long, Int]
def hash32(bytes: Array[Byte]): Int = hasher32.hash(bytes, 0, bytes.size, Seed)
// TODO: Can we PLEASE implement our own Unsafe XXHash which does not require creating a DirectBuffer?
def hash32(base: Any, offset: Long, len: Int): Int = base match {
case a: Array[Byte] => hasher32.hash(a, offset.toInt - UnsafeUtils.arayOffset, len, Seed)
case UnsafeUtils.ZeroPointer => hasher32.hash(UnsafeUtils.asDirectBuffer(offset, len), Seed)
}
/**
* Returns true if the source byte array is equal to the destination byte array, at the given
* index and # of bytes into the source array. Destination is compared whole.
*/
def equalBytes(source: Array[Byte], srcIndex: Int, srcNumBytes: Int, dest: Array[Byte]): Boolean =
dest.size == srcNumBytes && equate(dest, arayOffset, source, srcIndex + arayOffset, srcNumBytes)
def copyArray(source: Array[Byte], dest: Array[Byte], destOffset: Int): Unit =
System.arraycopy(source, 0, dest, destOffset, source.size)
// 64-bit pointer to native/offheap memory. NOTE: instead of using this, please use the Ptr*
// value classes as they are much more type safe
type NativePointer = Long
}
trait BinaryRegion {
import format.UnsafeUtils
import BinaryRegion._
// Returns the length from the initial bytes of the region
def numBytes(base: Any, offset: Long): Int
final def numBytes(address: Long): Int = numBytes(UnsafeUtils.ZeroPointer, address)
// The number of bytes used up by the length header
def lenBytes: Int
/**
* Returns 1 if region1 (base1, offset1) > region2 (base2, offset2), 0 if equal, -1 if region1 is less
* Compares byte by byte. The minimum of the lengths of two regions are compared.
* Note: for equality the equals() method is probably faster.
*/
final def compare(base1: Any, offset1: Long, base2: Any, offset2: Long): Int = {
val numBytes1 = numBytes(base1, offset1)
val numBytes2 = numBytes(base2, offset2)
UnsafeUtils.compare(base1, offset1 + lenBytes, numBytes1, base2, offset2 + lenBytes, numBytes2)
}
final def compare(address1: NativePointer, address2: NativePointer): Int =
compare(UnsafeUtils.ZeroPointer, address1, UnsafeUtils.ZeroPointer, address2)
final def asNewByteArray(base: Any, offset: Long): Array[Byte] = {
val numBytes1 = numBytes(base, offset)
val bytes = new Array[Byte](numBytes1 + lenBytes)
UnsafeUtils.unsafe.copyMemory(base, offset, bytes, UnsafeUtils.arayOffset, numBytes1 + lenBytes)
bytes
}
final def asNewByteArray(addr: NativePointer): Array[Byte] = asNewByteArray(UnsafeUtils.ZeroPointer, addr)
/**
* Allocates from the MemFactory and copies to the allocated space the entire BinaryRegion
*/
final def allocateAndCopy(base: Any, offset: Long, factory: MemFactory): Memory = {
val numBytes1 = numBytes(base, offset)
val memory = factory.allocate(numBytes1 + lenBytes)
UnsafeUtils.unsafe.copyMemory(base, offset, memory._1, memory._2, numBytes1 + lenBytes)
memory
}
/**
* Returns true if both regions are byte for byte equal and the same length.
* Uses Long compares for speed.
* Note: does not use hashcode, because we are not caching the hashcode.
*/
final def equals(base1: Any, offset1: Long, base2: Any, offset2: Long): Boolean = {
val numBytes1 = numBytes(base1, offset1)
val numBytes2 = numBytes(base2, offset2)
(numBytes1 == numBytes2) &&
UnsafeUtils.equate(base1, offset1 + lenBytes, base2, offset2 + lenBytes, numBytes1)
}
final def equals(address1: NativePointer, address2: NativePointer): Boolean =
equals(UnsafeUtils.ZeroPointer, address1, UnsafeUtils.ZeroPointer, address2)
final def hashCode(base: Any, offset: Long): Int = {
val numBytes1 = numBytes(base, offset)
base match {
case a: Array[Byte] => hasher32.hash(a, offset.toInt - UnsafeUtils.arayOffset + lenBytes, numBytes1, Seed)
case UnsafeUtils.ZeroPointer => ??? // offheap. This is not supported yet
case other: Any => throw new UnsupportedOperationException(s"Cannot compute hash for base $base")
}
}
final def hashCode64(base: Any, offset: Long): Long = {
val numBytes1 = numBytes(base, offset)
base match {
case a: Array[Byte] => hasher64.hash(a, offset.toInt - UnsafeUtils.arayOffset + lenBytes, numBytes1, Seed)
case UnsafeUtils.ZeroPointer => ??? // offheap. This is not supported yet
case other: Any => throw new UnsupportedOperationException(s"Cannot compute hash for base $base")
}
}
}
/**
* A BinaryRegionMedium uses two bytes to store the length prefix, thus a region can be up to 64KB in size.
*/
object BinaryRegionMedium extends BinaryRegion {
import format.UnsafeUtils
final def numBytes(base: Any, offset: Long): Int = UnsafeUtils.getShort(base, offset) & 0x0FFFF
val lenBytes = 2
}
/**
* A BinaryRegionLarge uses four bytes to store the length prefix, thus a region can be up to 2GB in size.
*/
object BinaryRegionLarge extends BinaryRegion {
import format.UnsafeUtils
final def numBytes(base: Any, offset: Long): Int = UnsafeUtils.getInt(base, offset)
val lenBytes = 4
}
/**
* A simple Consumer so we can consume regions without on heap allocation
*/
trait BinaryRegionConsumer {
def onNext(base: Any, offset: Long): Unit
}
|
velvia/FiloDB
|
memory/src/main/scala/filodb.memory/BinaryRegion.scala
|
Scala
|
apache-2.0
| 6,585 |
package io.udash.web.guide.views.frontend
import io.udash._
import io.udash.css.CssView
import io.udash.web.commons.components.CodeBlock
import io.udash.web.commons.views.{ClickableImageFactory, ImageFactoryPrefixSet}
import io.udash.web.guide.styles.partials.GuideStyles
import io.udash.web.guide.views.References
import io.udash.web.guide.{Context, _}
import scalatags.JsDom
case object FrontendMVPViewFactory extends StaticViewFactory[FrontendMVPState.type](() => new FrontendMVPView)
class FrontendMVPView extends View with CssView {
import Context._
import JsDom.all._
override def getTemplate: Modifier = div(
h2("Model, View, Presenter & ViewFactory"),
p("A single page in Udash application is based on four elements:"),
ul(GuideStyles.defaultList)(
li(
"Model - based on the ", a(href := FrontendPropertiesState.url)("Properties"), " mechanism, ",
"it provides one and two-ways bindings to DOM elements."
),
li(
"View - extends ", i("View"), " and creates a ", a(href := References.ScalatagsHomepage, target := "_blank")("Scalatags"), " template ",
"with a method getting a child view to render."
),
li(
"Presenter - extends ", i("Presenter"), ", it should contain a business logic of the related view. It also handles ",
"application state changes."
),
li(
"ViewFactory - extends ", i("ViewFactory"), ", it was discussed in detail ",
a(href := FrontendRoutingState(None).url)("Routing"), " chapter. ViewFactory is responsible for creating a view and a presenter. "
)
),
ClickableImageFactory(ImageFactoryPrefixSet.Frontend, "mvp.png", "MVP in the Udash", GuideStyles.imgBig, GuideStyles.frame),
h3("ViewFactory"),
p(
"The ViewFactory responsibility is simple. It has to prepare Model, View, Presenter and then link them together. ",
"If you want to create a static view, then you can use ", i("StaticViewFactory"), " which ",
"will create ", i("EmptyPresenter"), " for your view."
),
h3("Model"),
p(
"The Udash framework brings a powerful Properties mechanism, ",
"which is used as Model in Udash-based applications. All you have to do is:"
),
CodeBlock(
"""import io.udash._
|
|case class NumbersInRange(minimum: Int, maximum: Int, numbers: Seq[Int])
|object NumbersInRange extends HasModelPropertyCreator[NumbersInRange]
|
|val numbers: ModelProperty[NumbersInRange] = ModelProperty(
| NumbersInRange(0, 42, Seq.empty)
|)
|
|val s: SeqProperty[Int] = numbers.subSeq(_.numbers)
|s.set(Seq(3, 7, 20, 32))
|s.replace(idx = 1, amount = 2, values = Seq(8, 9, 10):_*)""".stripMargin
)(GuideStyles),
p("The Properties system is described in the ", a(href := FrontendPropertiesState.url)("Properties"), " chapter."),
h3("Presenter"),
p(
"The Presenter should contain all business logic of a view: user interaction callbacks and server communication. ",
"It should not call any methods of a View class. The presenter should pass data to the view via Model properties. ",
"When implementing a presenter, you should remember, that the ", i("handleState"), " method does not have to be called only on ",
"view initialization. For example:"
),
CodeBlock(
"""import io.udash._
|
|class ExamplePresenter(model: Property[Int]) extends Presenter[SomeState] {
| override def handleState(state: SomeState) =
| model.set(state.initValue)
|
| def incButtonClick(): Unit =
| model.set(model.get + 1)
|
| def decButtonClick(): Unit =
| model.set(model.get - 1)
|}""".stripMargin
)(GuideStyles),
h3("View"),
p(
"The View implementation usually gets the Model and the Presenter as constructor arguments. They can be used ",
"in the ", a(href := References.ScalatagsHomepage, target := "_blank")("Scalatags"), " template of a view as user interaction callbacks. ",
"The Model can be bound to a template and will automatically update on the Model changes."
),
CodeBlock(
"""import io.udash._
|import scalatags.JsDom.all._
|
|class ExampleView(model: Property[Int], presenter: ExamplePresenter)
| extends ContainerView {
|
| override def getTemplate: Modifier = div(
| h1("Example view"),
| p("This is example view with buttons..."),
| h3("Model bind example"),
| div(
| button(onclick :+= (ev => presenter.decButtonClick(), true))("-"),
| button(onclick :+= (ev => presenter.incButtonClick(), true))("+"),
| bind(model)
| ),
| h3("Below you can find my child view!"),
| childViewContainer // child view container provided by ContainerView
| )
|}""".stripMargin
)(GuideStyles),
h2("What's next?"),
p(
"Take a look at the ", a(href := FrontendTemplatesState.url)("Scalatags & UdashCSS"), " chapter to ",
"learn more about creating view templates and styling them in Udash. Visit the ",
a(href := FrontendPropertiesState.url)("Properties"), " chapter to read about data model in Udash applications."
)
)
}
|
UdashFramework/udash-core
|
guide/guide/.js/src/main/scala/io/udash/web/guide/views/frontend/FrontendMVPView.scala
|
Scala
|
apache-2.0
| 5,399 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2003-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala.collection
package generic
/** Some bit operations.
*
* See [[http://www.drmaciver.com/2008/08/unsigned-comparison-in-javascala/]] for
* an explanation of unsignedCompare.
*/
private[collection] object BitOperations {
trait Int {
type Int = scala.Int
def zero(i: Int, mask: Int) = (i & mask) == 0
def mask(i: Int, mask: Int) = i & (complement(mask - 1) ^ mask)
def hasMatch(key: Int, prefix: Int, m: Int) = mask(key, m) == prefix
def unsignedCompare(i: Int, j: Int) = (i < j) ^ (i < 0) ^ (j < 0)
def shorter(m1: Int, m2: Int) = unsignedCompare(m2, m1)
def complement(i: Int) = (-1) ^ i
def bits(num: Int) = 31 to 0 by -1 map (i => (num >>> i & 1) != 0)
def bitString(num: Int, sep: String = "") = bits(num) map (b => if (b) "1" else "0") mkString sep
def highestOneBit(j: Int) = java.lang.Integer.highestOneBit(j)
}
object Int extends Int
trait Long {
type Long = scala.Long
def zero(i: Long, mask: Long) = (i & mask) == 0L
def mask(i: Long, mask: Long) = i & (complement(mask - 1) ^ mask)
def hasMatch(key: Long, prefix: Long, m: Long) = mask(key, m) == prefix
def unsignedCompare(i: Long, j: Long) = (i < j) ^ (i < 0L) ^ (j < 0L)
def shorter(m1: Long, m2: Long) = unsignedCompare(m2, m1)
def complement(i: Long) = (-1L) ^ i
def bits(num: Long) = 63L to 0L by -1L map (i => (num >>> i & 1L) != 0L)
def bitString(num: Long, sep: String = "") = bits(num) map (b => if (b) "1" else "0") mkString sep
def highestOneBit(j: Long) = java.lang.Long.highestOneBit(j)
}
object Long extends Long
}
|
rorygraves/perf_tester
|
corpus/scala-library/src/main/scala/collection/generic/BitOperations.scala
|
Scala
|
apache-2.0
| 2,385 |
package de.kaufhof.pillar
import org.scalatest.{FunSpec, BeforeAndAfter}
import org.scalatest.Matchers
import java.io.{ByteArrayInputStream, FileInputStream}
import java.util.Date
class ParserSpec extends FunSpec with BeforeAndAfter with Matchers {
describe("#parse") {
describe("1370028262_creates_events_table.cql") {
val migrationPath = "src/test/resources/pillar/migrations/faker/1370028262_creates_events_table.cql"
it("returns a migration object") {
val resource = new FileInputStream(migrationPath)
Parser().parse(resource).getClass should be(classOf[IrreversibleMigration])
}
it("assigns authoredAt") {
val resource = new FileInputStream(migrationPath)
Parser().parse(resource).authoredAt should equal(new Date(1370023262))
}
it("assigns description") {
val resource = new FileInputStream(migrationPath)
Parser().parse(resource).description should equal("creates events table")
}
it("assigns up") {
val resource = new FileInputStream(migrationPath)
Parser().parse(resource).up should contain( """CREATE TABLE events (
| batch_id text,
| occurred_at uuid,
| event_type text,
| payload blob,
| PRIMARY KEY (batch_id, occurred_at, event_type)
|)""".stripMargin)
}
}
describe("1469630066000_creates_users_groups_table.cql") {
val migrationPath = "src/test/resources/pillar/migrations/faker/1469630066000_creates_users_groups_table.cql"
it("returns a migration object") {
val resource = new FileInputStream(migrationPath)
Parser().parse(resource).getClass should be(classOf[ReversibleMigration])
}
it("assigns authoredAt") {
val resource = new FileInputStream(migrationPath)
Parser().parse(resource).authoredAt should equal(new Date(1469630066000L))
}
it("assigns description") {
val resource = new FileInputStream(migrationPath)
Parser().parse(resource).description should equal("creates users and groups tables")
}
it("assigns two up stages") {
val resource = new FileInputStream(migrationPath)
val migration = Parser().parse(resource)
migration.up should contain(
"""CREATE TABLE groups (
| id uuid,
| name text,
| PRIMARY KEY (id)
|)""".stripMargin)
migration.up should contain(
"""CREATE TABLE users (
| id uuid,
| group_id uuid,
| username text,
| password text,
| PRIMARY KEY (id)
|)""".stripMargin)
}
it("assigns two down stages") {
val resource = new FileInputStream(migrationPath)
val migration = Parser().parse(resource).asInstanceOf[ReversibleMigration]
migration.down should contain("""DROP TABLE users""".stripMargin)
migration.down should contain("""DROP TABLE groups""".stripMargin)
}
}
describe("1370028263_creates_views_table.cql") {
val migrationPath = "src/test/resources/pillar/migrations/faker/1370028263_creates_views_table.cql"
it("returns a migration object") {
val resource = new FileInputStream(migrationPath)
Parser().parse(resource).getClass should be(classOf[ReversibleMigration])
}
it("assigns down") {
val resource = new FileInputStream(migrationPath)
Parser().parse(resource).asInstanceOf[ReversibleMigration].down should contain("DROP TABLE views")
}
}
describe("1370028264_adds_user_agent_to_views_table.cql") {
val migrationPath = "src/test/resources/pillar/migrations/faker/1370028264_adds_user_agent_to_views_table.cql"
it("returns a migration object") {
val resource = new FileInputStream(migrationPath)
Parser().parse(resource).getClass should be(classOf[ReversibleMigrationWithNoOpDown])
}
}
describe("a migration missing an up stanza") {
val migrationContent = """-- description: creates events table
|-- authoredAt: 1370023262""".stripMargin
it("raises an InvalidMigrationException") {
val resource = new ByteArrayInputStream(migrationContent.getBytes)
val thrown = intercept[InvalidMigrationException] { Parser().parse(resource) }
thrown.errors("up") should equal("must be present")
}
}
describe("a migration missing a description stanza") {
val migrationContent = "-- authoredAt: 1370023262"
it("raises an InvalidMigrationException") {
val resource = new ByteArrayInputStream(migrationContent.getBytes)
val thrown = intercept[InvalidMigrationException] { Parser().parse(resource) }
thrown.errors("description") should equal("must be present")
}
}
describe("a migration missing an authoredAt stanza") {
val migrationContent = "-- description: creates events table"
it("raises an InvalidMigrationException") {
val resource = new ByteArrayInputStream(migrationContent.getBytes)
val thrown = intercept[InvalidMigrationException] { Parser().parse(resource) }
thrown.errors("authoredAt") should equal("must be present")
}
}
describe("a migration with a bogus authored at stanza") {
val migrationContent = "-- authoredAt: a long, long time ago"
it("raises an InvalidMigrationException") {
val resource = new ByteArrayInputStream(migrationContent.getBytes)
val thrown = intercept[InvalidMigrationException] { Parser().parse(resource) }
thrown.errors("authoredAt") should equal("must be a number greater than zero")
}
}
}
}
|
Galeria-Kaufhof/pillar
|
src/test/scala/de/kaufhof/pillar/ParserSpec.scala
|
Scala
|
mit
| 6,068 |
package mesosphere.marathon
package core.condition
import play.api.libs.json.Json
import org.apache.mesos.Protos.{ TaskState => MesosTaskState }
/**
* To define the status of an Instance, this trait is used and stored for each Task in Task.Status.
* The existing case objects are:
* - marathon exclusive status
* - representations of the mesos.Protos.TaskStatus
* - mapping of existing (soon-to-be deprecated) mesos.Protos.TaskStatus.TASK_LOST to the new representations
*/
sealed trait Condition extends Product with Serializable {
/**
* @return whether condition is considered a lost state.
*
* UnreachableInactive is not considered Lost because it depends on the context
*/
def isLost: Boolean = {
import Condition._
this match {
case Gone | Unreachable | Unknown | Dropped => true
case _ => false
}
}
/**
* @return whether condition is a terminal state.
*/
def isTerminal: Boolean = this match {
case _: Condition.Terminal => true
case _ => false
}
/**
* @return whether considered is considered active.
*/
def isActive: Boolean = this match {
case _: Condition.Active => true
case _ => false
}
}
object Condition {
sealed trait Terminal extends Condition
sealed trait Failure extends Terminal
sealed trait Active extends Condition
/** Reserved: Task with persistent volume has reservation, but is not launched yet */
case object Reserved extends Condition
/** Created: Task is known in marathon and sent to mesos, but not staged yet */
case object Created extends Active
/** Error: indicates that a task launch attempt failed because of an error in the task specification */
case object Error extends Failure
/** Failed: task aborted with an error */
case object Failed extends Failure
/** Finished: task completes successfully */
case object Finished extends Terminal
/** Killed: task was killed */
case object Killed extends Terminal
/** Killing: the request to kill the task has been received, but the task has not yet been killed */
case object Killing extends Active
/** Running: the state after the task has begun running successfully */
case object Running extends Active
/**
* Staging: the master has received the framework’s request to launch the task but the task has not yet started to
* run
*/
case object Staging extends Active
/** Starting: task is currently starting */
case object Starting extends Active
/** Unreachable: the master has not heard from the agent running the task for a configurable period of time */
case object Unreachable extends Active
/**
* The task has been unreachable for a configurable time. A replacement task is started but this one won't be killed
* yet.
*/
case object UnreachableInactive extends Condition
/** Gone: the task was running on an agent that has been terminated */
case object Gone extends Failure
/** Dropped: the task failed to launch because of a transient error (e.g., spontaneously disconnected agent) */
case object Dropped extends Failure
/** Unknown: the master has no knowledge of the task */
case object Unknown extends Failure
private[this] val conditionToMesosTaskState = {
Map(
Error -> MesosTaskState.TASK_ERROR,
Failed -> MesosTaskState.TASK_FAILED,
Finished -> MesosTaskState.TASK_FINISHED,
Killed -> MesosTaskState.TASK_KILLED,
Killing -> MesosTaskState.TASK_KILLING,
Running -> MesosTaskState.TASK_RUNNING,
Staging -> MesosTaskState.TASK_STAGING,
Starting -> MesosTaskState.TASK_STARTING,
Unreachable -> MesosTaskState.TASK_UNREACHABLE,
UnreachableInactive -> MesosTaskState.TASK_UNREACHABLE,
Gone -> MesosTaskState.TASK_GONE,
Dropped -> MesosTaskState.TASK_DROPPED,
Unknown -> MesosTaskState.TASK_UNKNOWN)
}
/** Converts the Condition to a mesos task state where such a conversion is possible */
def toMesosTaskState(condition: Condition): Option[MesosTaskState] =
conditionToMesosTaskState.get(condition)
/**
* Converts the Condition to a mesos task state where such a conversion is possible; if not possible, return
* TASK_STAGING.
*/
def toMesosTaskStateOrStaging(condition: Condition): MesosTaskState =
conditionToMesosTaskState.getOrElse(condition, MesosTaskState.TASK_STAGING)
// scalastyle:off
def apply(str: String): Condition = str.toLowerCase match {
case "reserved" => Reserved
case "created" => Created
case "error" => Error
case "failed" => Failed
case "killed" => Killed
case "killing" => Killing
case "running" => Running
case "staging" => Staging
case "starting" => Starting
case "unreachable" => Unreachable
case "gone" => Gone
case "dropped" => Dropped
case _ => Unknown
}
// scalastyle:on
def unapply(condition: Condition): Option[String] = Some(condition.toString.toLowerCase)
implicit val conditionFormat = Json.format[Condition]
}
|
natemurthy/marathon
|
src/main/scala/mesosphere/marathon/core/condition/Condition.scala
|
Scala
|
apache-2.0
| 5,033 |
package se.gigurra.aichallenge.host
import se.gigurra.aichallenge.Message
import se.gigurra.util.TypedActorProxy
trait IGameHost extends TypedActorProxy {
def attach(session: IUserSession)
def detach(session: IUserSession)
def handleMessage(msg: Message, session: IUserSession)
}
trait IGameHostInternal extends IGameHost {
def step()
}
|
GiGurra/gigurra-game-challenge
|
src/main/scala/se/gigurra/aichallenge/host/IGameHost.scala
|
Scala
|
gpl-2.0
| 347 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.builders
import monix.eval.Task
import monix.execution.Ack.{Continue, Stop}
import monix.execution.{Callback, Cancelable}
import monix.reactive.Observable
import monix.reactive.observers.Subscriber
import scala.util.control.NonFatal
private[reactive] final class UnfoldEvalObservable[S, A](seed: S, f: S => Task[Option[(A, S)]]) extends Observable[A] {
def unsafeSubscribeFn(subscriber: Subscriber[A]): Cancelable = {
import subscriber.scheduler
var streamErrors = true
try {
val init = seed
streamErrors = false
Task
.defer(loop(subscriber, init))
.executeWithOptions(_.enableAutoCancelableRunLoops)
.runAsync(Callback.empty)
} catch {
case ex if NonFatal(ex) =>
if (streamErrors) subscriber.onError(ex)
else subscriber.scheduler.reportFailure(ex)
Cancelable.empty
}
}
def loop(subscriber: Subscriber[A], state: S): Task[Unit] =
try f(state).redeemWith(
{ ex =>
subscriber.onError(ex)
Task.unit
}, {
case Some((a, newState)) =>
Task.fromFuture(subscriber.onNext(a)).flatMap {
case Continue =>
loop(subscriber, newState)
case Stop =>
Task.unit
}
case None =>
subscriber.onComplete()
Task.unit
}
)
catch {
case ex if NonFatal(ex) =>
Task.raiseError(ex)
}
}
|
monifu/monifu
|
monix-reactive/shared/src/main/scala/monix/reactive/internal/builders/UnfoldEvalObservable.scala
|
Scala
|
apache-2.0
| 2,139 |
package breeze.linalg
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import breeze.math.MutableOptimizationSpace.SparseFieldOptimizationSpace
import breeze.math.{Complex, Field}
import breeze.numerics.{abs, inf}
import breeze.storage.Zero
import org.scalatest._
import org.scalatest.funsuite._
import org.scalatestplus.scalacheck.Checkers
import scala.reflect.ClassTag
class CSCMatrixTest extends AnyFunSuite with Checkers with MatrixTestUtils {
test("Multiply") {
val a = CSCMatrix((1.0, 2.0, 3.0), (4.0, 5.0, 6.0))
val ad = DenseMatrix((1.0, 2.0, 3.0), (4.0, 5.0, 6.0))
val b = CSCMatrix((7.0, -2.0, 8.0), (-3.0, -3.0, 1.0), (12.0, 0.0, 5.0))
val bd = DenseMatrix((7.0, -2.0, 8.0), (-3.0, -3.0, 1.0), (12.0, 0.0, 5.0))
val c = DenseVector(6.0, 2.0, 3.0)
assert((a * b: CSCMatrix[Double]) === CSCMatrix((37.0, -8.0, 25.0), (85.0, -23.0, 67.0)))
assert((a * bd: DenseMatrix[Double]) === DenseMatrix((37.0, -8.0, 25.0), (85.0, -23.0, 67.0)))
assert((ad * b: DenseMatrix[Double]) === DenseMatrix((37.0, -8.0, 25.0), (85.0, -23.0, 67.0)))
assert(a * c === DenseVector(19.0, 52.0))
assert(b * c === DenseVector(62.0, -21.0, 87.0))
// assert(b.t * c === DenseVector(72.0, -18.0, 65.0))
// assert(a.t * DenseVector(4.0, 3.0) === DenseVector(16.0, 23.0, 30.0))
// should be dense
// val x = a * a.t
// assert(x === DenseMatrix((14.0,32.0),(32.0,77.0)))
// should be dense
// val y = a.t * a
// assert(y === DenseMatrix((17.0,22.0,27.0),(22.0,29.0,36.0),(27.0,36.0,45.0)))
// val z : DenseMatrix[Double] = b * (b + 1.0)
// assert(z === DenseMatrix((164.0,5.0,107.0),(-5.0,10.0,-27.0),(161.0,-7.0,138.0)))
}
test("Multiply Int") {
val a = CSCMatrix((1, 2, 3), (4, 5, 6))
val b = CSCMatrix((7, -2, 8), (-3, -3, 1), (12, 0, 5))
val bd = DenseMatrix((7, -2, 8), (-3, -3, 1), (12, 0, 5))
val c = DenseVector(6, 2, 3)
val cs = SparseVector(3)((1, 2))
assert(a * b === CSCMatrix((37, -8, 25), (85, -23, 67)))
assert(a * bd === DenseMatrix((37, -8, 25), (85, -23, 67)))
assert(a * c === DenseVector(19, 52))
assert(b * c === DenseVector(62, -21, 87))
assert(a * cs === SparseVector(4, 10))
assert(b * cs === SparseVector(3)((0, -4), (1, -6)))
// assert(b.t * c === DenseVector(72, -18, 65))
// assert(a.t * DenseVector(4, 3) === DenseVector(16, 23, 30))
// should be dense
// val x = a * a.t
// assert(x === DenseMatrix((14,32),(32,77)))
// should be dense
// val y = a.t * a
// assert(y === DenseMatrix((17,22,27),(22,29,36),(27,36,45)))
// val z : DenseMatrix[Double] = b * (b + 1.0)
// assert(z === DenseMatrix((164,5,107),(-5,10,-27),(161,-7,138)))
}
test("Multiply Complex") {
val a = CSCMatrix((Complex(1, 1), Complex(2, 2), Complex(3, 3)), (Complex(4, 4), Complex(5, 5), Complex(6, 6)))
val b = CSCMatrix(
(Complex(7, 7), Complex(-2, -2), Complex(8, 8)),
(Complex(-3, -3), Complex(-3, -3), Complex(1, 1)),
(Complex(12, 12), Complex(0, 0), Complex(5, 5)))
val c = DenseVector(Complex(6, 0), Complex(2, 0), Complex(3, 0))
val cs = SparseVector(Complex(6, 0), Complex(2, 0), Complex(3, 0))
val value: CSCMatrix[Complex] = a * b
assert(
value === CSCMatrix(
(Complex(0, 74), Complex(0, -16), Complex(0, 50)),
(Complex(0, 170), Complex(0, -46), Complex(0, 134))))
assert(b * c === DenseVector(Complex(62, 62), Complex(-21, -21), Complex(87, 87)))
assert(b * cs === DenseVector(Complex(62, 62), Complex(-21, -21), Complex(87, 87)))
assert(b.t * c === DenseVector(Complex(72, -72), Complex(-18, 18), Complex(65, -65)))
}
test("Transpose") {
val a = CSCMatrix.zeros[Int](2, 3)
a(0, 0) = 1
a(1, 2) = 2
val expected = CSCMatrix.zeros[Int](3, 2)
expected(0, 0) = 1
expected(2, 1) = 2
assert(a.t === expected)
}
test("Transpose Complex") {
val a = CSCMatrix.zeros[Complex](2, 3)
a(0, 0) = Complex(1, 1)
a(1, 2) = Complex(-2, -2)
val expected = CSCMatrix.zeros[Complex](3, 2)
expected(0, 0) = Complex(1, -1)
expected(2, 1) = Complex(-2, 2)
assert(a.t === expected)
}
test("Generic CSC ops") {
// mostly for coverage
val a = CSCMatrix.create[String](1, 1, Array("SSS"))
intercept[IndexOutOfBoundsException] {
a(3, 3) = ":("
assert(false, "Shouldn't be here!")
}
assert(a(0, 0) === "SSS")
intercept[IndexOutOfBoundsException] {
a(3, 3)
assert(false, "Shouldn't be here!")
}
a(0, 0) = ":("
assert(a(0, 0) === ":(")
}
test("Builder, simple") {
val builder = new CSCMatrix.Builder[Double](3, 3)
builder.add(1, 1, 2.0)
}
test("Builder idempotency") {
val builder = new CSCMatrix.Builder[Double](3, 3)
builder.add(1, 1, 2.0)
val r1 = builder.result
val r2 = builder.result
assert(r1 === r2)
}
test("Builder, full") {
val builder = new CSCMatrix.Builder[Double](2, 3)
builder.add(0, 1, 2.0)
builder.add(1, 1, 5.0)
builder.add(0, 2, 3.0)
builder.add(1, 0, 4.0)
builder.add(1, 2, 6.0)
builder.add(0, 0, 1.0)
val cs = builder.result()
val a = CSCMatrix((1.0, 2.0, 3.0), (4.0, 5.0, 6.0))
assert(cs === a)
}
test("Builder, repeated full") {
val builder = new CSCMatrix.Builder[Double](2, 3)
builder.add(0, 1, 2.0)
builder.add(1, 2, 3.0)
builder.add(1, 1, 5.0)
builder.add(0, 2, 3.0)
builder.add(1, 0, 4.0)
builder.add(1, 2, 3.0)
builder.add(0, 0, 1.0)
val cs = builder.result()
val a = CSCMatrix((1.0, 2.0, 3.0), (4.0, 5.0, 6.0))
assert(cs === a)
}
test("MapValues") {
val a: CSCMatrix[Int] = CSCMatrix((1, 0, 0), (2, 3, -1))
val b1: CSCMatrix[Int] = a.mapValues(_ + 1)
assert(b1 === CSCMatrix((2, 1, 1), (3, 4, 0)))
val b2: CSCMatrix[Double] = a.mapValues(_ + 1.0)
assert(b2 === CSCMatrix((2.0, 1.0, 1.0), (3.0, 4.0, 0.0)))
}
test("addition/subtraction") {
val a: CSCMatrix[Int] = CSCMatrix((1, 0, 0), (2, 3, -1))
val b: CSCMatrix[Int] = CSCMatrix((0, 1, 0), (2, 3, -1))
assert(a + b === CSCMatrix((1, 1, 0), (4, 6, -2)))
assert(a - b === CSCMatrix((1, -1, 0), (0, 0, 0)))
}
test("addition/subtraction csc/dm") {
val a: CSCMatrix[Int] = CSCMatrix((1, 0, 0), (2, 3, -1))
val b: DenseMatrix[Int] = DenseMatrix((0, 1, 0), (2, 3, -1))
assert(a + b === DenseMatrix((1, 1, 0), (4, 6, -2)))
assert(a - b === DenseMatrix((1, -1, 0), (0, 0, 0)))
assert(b - a === -DenseMatrix((1, -1, 0), (0, 0, 0)))
}
test("inplace addition/subtraction") {
val a: CSCMatrix[Int] = CSCMatrix((1, 0, 0), (2, 3, -1))
val b: CSCMatrix[Int] = CSCMatrix((0, 1, 0), (2, 3, -1))
a += b
assert(a === CSCMatrix((1, 1, 0), (4, 6, -2)))
assert(a.activeSize === 5)
a -= b
a -= b
assert(a === CSCMatrix((1, -1, 0), (0, 0, 0)))
assert(a.activeSize === 2)
}
test("inplace set dm/csc") {
val a: CSCMatrix[Int] = CSCMatrix((1, 0, 0), (2, 3, -1))
val b: DenseMatrix[Int] = DenseMatrix((0, 1, 0), (2, 3, -1))
b := a
assert(a == b)
}
test("InPlace Ops") {
var a = CSCMatrix((1.0, 2.0, 3.0), (4.0, 5.0, 6.0))
val b = CSCMatrix((7.0, -2.0, 8.0), (-3.0, -3.0, 1.0))
a :*= b
assert(a === CSCMatrix((7.0, -4.0, 24.0), (-12.0, -15.0, 6.0)))
a = CSCMatrix((1.0, 2.0, 3.0), (4.0, 5.0, 6.0))
a /:/= b
assert(a === CSCMatrix((1.0 / 7.0, -1.0, 3.0 / 8.0), (4.0 / (-3.0), 5.0 / (-3.0), 6.0)))
}
test("csc scalar \\"bad\\" ops") {
val a: CSCMatrix[Int] = CSCMatrix((1, 0, 0), (2, 3, -1))
assert(a /:/ 3 === CSCMatrix((0, 0, 0), (0, 1, 0)))
val b: CSCMatrix[Complex] =
CSCMatrix((Complex(1, 0), Complex(0, 0), Complex(0, 0)), (Complex(2, 0), Complex(3, 0), Complex(-1, 0)))
assert(
b /:/ Complex(3, 0) === CSCMatrix(
(Complex(1.0 / 3.0, 0), Complex(0, 0), Complex(0, 0)),
(Complex(2.0 / 3.0, 0), Complex(1, 0), Complex(-1.0 / 3.0, 0))))
}
test("csc scalar \\"bad\\" pow ops") {
val a = CSCMatrix((1.0, 2.0, 3.0), (4.0, 5.0, 6.0))
val b = CSCMatrix((7.0, -2.0, 8.0), (-3.0, -3.0, 1.0))
assert((a ^:^ b) === (a.toDense ^:^ b.toDense))
val ac = convert(a, Complex)
val bc = convert(b, Complex)
assert((ac ^:^ bc) === (ac.toDense ^:^ bc.toDense))
}
test("csc scalar \\"bad\\" mod ops") {
val a = CSCMatrix((1.0, 2.0, 3.0), (4.0, 5.0, 6.0))
val b = CSCMatrix((7.0, -2.0, 8.0), (-3.0, -3.0, 1.0))
assert((a %:% b) === (a.toDense %:% b.toDense))
val ac = convert(a, Complex)
val bc = convert(b, Complex)
assert((ac %:% bc) === (ac.toDense %:% bc.toDense))
}
test("flatten") {
val a = CSCMatrix((1.0, 2.0, 3.0), (4.0, 5.0, 6.0))
val b = CSCMatrix.zeros[Double](3, 2)
b(0, 1) = 1.0; b(2, 1) = 3.0
val z = CSCMatrix.zeros[Double](5, 3)
assert(a.flatten() === SparseVector(1.0, 2.0, 3.0, 4.0, 5.0, 6.0))
assert(z.flatten() === SparseVector.zeros[Double](15))
assert(b.flatten() === SparseVector(6)((1, 1.0), (5, 3.0)))
}
test("CSCxCSC: OpAddInPlace2:Field") {
def testAddInPlace[T: Field: Zero: ClassTag](a: CSCMatrix[T], b: CSCMatrix[T]) = {
val optspace = SparseFieldOptimizationSpace.sparseOptSpace[T]
import optspace._
a += b
}
val cscA = CSCMatrix.zeros[Double](3, 4)
val cscB = CSCMatrix.zeros[Double](3, 4)
cscB(1, 1) = 1.3
cscB(0, 0) = 1.0
cscB(2, 3) = 1.8
cscB(2, 0) = 1.6
testAddInPlace[Double](cscA, cscB)
assert(cscA === cscB)
testAddInPlace[Double](cscA, cscB)
assert(cscA === cscB * 2.0)
testAddInPlace[Double](cscA, CSCMatrix.zeros[Double](3, 4))
assert(cscA === cscB * 2.0)
}
test("CSCxCSC: OpSubInPlace2:Field") {
def testSubInPlace[T: Field: Zero: ClassTag](a: CSCMatrix[T], b: CSCMatrix[T]) = {
val optspace = SparseFieldOptimizationSpace.sparseOptSpace[T]
import optspace._
a -= b
}
val cscA = CSCMatrix.zeros[Double](3, 4)
val cscB = CSCMatrix.zeros[Double](3, 4)
cscB(1, 1) = 1.3
cscB(0, 0) = 1.0
cscB(2, 3) = 1.8
cscB(2, 0) = 1.6
testSubInPlace[Double](cscA, cscB)
assert(cscA === cscB * -1.0)
testSubInPlace[Double](cscA, cscB)
assert(cscA === cscB * -2.0)
testSubInPlace[Double](cscA, CSCMatrix.zeros[Double](3, 4))
assert(cscA === cscB * -2.0)
}
test("CSCxCSC: OpMulScalarInPlace2:Field") {
def testMulScalarInPlace[T: Field: Zero: ClassTag](a: CSCMatrix[T], b: CSCMatrix[T]) = {
val optspace = SparseFieldOptimizationSpace.sparseOptSpace[T]
import optspace._
a *= b
}
val cscA = CSCMatrix.zeros[Double](3, 4)
val cscB = CSCMatrix.zeros[Double](3, 4)
cscB(1, 1) = 1.3
cscB(0, 0) = 1.0
cscB(2, 3) = 1.8
cscB(2, 0) = 1.6
testMulScalarInPlace[Double](cscA, cscB)
assert(cscA === cscA)
cscA(1, 1) = 2.0
cscA(0, 0) = 2.0
cscA(1, 0) = 2.0
testMulScalarInPlace[Double](cscA, cscB)
val cscR = CSCMatrix.zeros[Double](3, 4)
cscR(1, 1) = 2.6
cscR(0, 0) = 2.0
assert(cscA === cscR)
testMulScalarInPlace[Double](cscA, CSCMatrix.zeros[Double](3, 4))
assert(cscA === CSCMatrix.zeros[Double](3, 4))
}
test("CSCxCSC: OpSetInPlace:Scalar:Field") {
def testSetInPlace[T: Field: Zero: ClassTag](a: CSCMatrix[T], b: T) = {
val optspace = SparseFieldOptimizationSpace.sparseOptSpace[T]
import optspace._
a := b
}
val cscA = CSCMatrix.zeros[Double](3, 4)
val b = 4
testSetInPlace[Double](cscA, b)
assert(cscA === CSCMatrix.fill(3, 4)(b))
testSetInPlace[Double](cscA, 0)
assert(cscA === CSCMatrix.zeros[Double](3, 4))
}
test("ZipMapVals Test") {
def testZipMap[T: Field: Zero: ClassTag](a: CSCMatrix[T], b: CSCMatrix[T]): CSCMatrix[T] = {
val f = implicitly[Field[T]]
val optspace = SparseFieldOptimizationSpace.sparseOptSpace[T]
import optspace._
val addMapFn = (t1: T, t2: T) => f.+(t1, t2)
zipMapValuesM.map(a, b, addMapFn)
}
val cscA = CSCMatrix.zeros[Double](3, 4)
val cscB = CSCMatrix.zeros[Double](3, 4)
cscB(1, 1) = 1.3
cscB(0, 0) = 1.0
cscB(2, 3) = 1.8
cscB(2, 0) = 1.6
val cscR = testZipMap(cscA, cscB)
assert(cscR === cscB)
val cscR1 = testZipMap(cscR, cscB)
assert(cscR1 === cscB * 2.0)
cscR(1, 0) = 1.1
cscB(0, 1) = 1.2
val cscR2 = testZipMap(cscR, cscB)
val cscR3 = cscR * 2.0
cscR3(1, 0) = 1.1
cscR3(0, 1) = 1.2
assert(cscR2 === cscR3)
val cscR4 = testZipMap(cscB, cscA)
assert(cscR4 === cscB)
}
test("axpy") {
val a = CSCMatrix((1, 0, 0), (2, 3, -1))
val b = CSCMatrix((0, 1, 0), (2, 3, -1))
axpy(2, b, a)
assert(a === CSCMatrix((1, 2, 0), (6, 9, -3)))
}
test("#344") {
val builder = new CSCMatrix.Builder[Double](rows = 10, cols = 10)
builder.add(0, 0, 1.0)
builder.add(1, 0, 1.0)
val a = builder.result
a.update(0, 0, 0.0)
assert(a.t.rows === a.cols)
}
test("#348") {
val a = DenseMatrix((2, 2), (3, 3))
val b = CSCMatrix((2, 2), (3, 3))
assert(a + b === a + b.toDense)
}
test("#313") {
val builder = new CSCMatrix.Builder[Double](rows = -1, cols = -1)
builder.add(0, 0, 1.0)
builder.add(1, 0, 1.0)
val a = builder.result
assert(a.rows == 2)
assert(a.cols == 1)
}
test("#479") {
val m1: Matrix[Double] = new CSCMatrix[Double](Array(1.0, 1, 1), 3, 3, Array(0, 1, 2, 3), Array(0, 1, 2))
val m2: Matrix[Double] = new CSCMatrix[Double](Array(1.0, 2, 2, 4), 3, 3, Array(0, 0, 2, 4), Array(1, 2, 1, 2))
val sum = (m1 + m2).asInstanceOf[CSCMatrix[Double]]
require(sum.colPtrs.last == sum.rowIndices.length, s"${sum.colPtrs.last} not equal to ${sum.rowIndices.length}")
}
test("CSCMatrix Solve") {
val r2: DenseVector[Double] = CSCMatrix((1.0, 3.0, 4.0), (2.0, 0.0, 6.0)) \\ DenseVector(1.0, 3.0)
import breeze.numerics.inf
assert(norm(r2 - DenseVector(0.1813186813186811, -0.3131868131868131, 0.43956043956043944), inf) < 1E-5)
}
test("CSCMatrix solve #644") {
assert(CSCMatrix(1.0) \\ DenseVector(0.0) == DenseVector(0.0))
assert(CSCMatrix((0.0, 1.0), (0.0, 0.0)) \\ DenseVector(0.0, 0.0) == DenseVector(0.0, 0.0))
}
test("CSCMatrix solve CSCMatrix #536") {
val r1: CSCMatrix[Double] = CSCMatrix((1.0, 3.0), (2.0, 0.0)) \\ CSCMatrix((1.0, 2.0), (3.0, 4.0))
matricesNearlyEqual(r1, CSCMatrix((1.5, 2.0), (-1.0 / 6, 0.0)))
// wide matrix solve
val r3: CSCMatrix[Double] = CSCMatrix((1.0, 3.0, 4.0), (2.0, 0.0, 6.0)) \\ CSCMatrix((1.0, 2.0), (3.0, 4.0))
matricesNearlyEqual(
r3,
CSCMatrix(
(0.1813186813186811, 0.2197802197802196),
(-0.3131868131868131, -0.1978021978021977),
(0.43956043956043944, 0.5934065934065933)))
// tall matrix solve
val r4: CSCMatrix[Double] = CSCMatrix((1.0, 3.0), (2.0, 0.0), (4.0, 6.0)) \\ CSCMatrix(
(1.0, 4.0),
(2.0, 5.0),
(3.0, 6.0))
assert(max(abs(
r4 - CSCMatrix((0.9166666666666667, 1.9166666666666672), (-0.08333333333333352, -0.08333333333333436)))) < 1E-5)
val dim = 70
val largeMatrix = CSCMatrix.rand(dim, dim)
val largeMatrix2 = CSCMatrix.rand(dim, dim)
// make sure it's PD
for (i <- 0 until largeMatrix.rows)
largeMatrix(i, i) += 2.0
matricesNearlyEqual(largeMatrix \\ largeMatrix2, largeMatrix.toDense \\ largeMatrix2.toDense, 1E-3)
}
}
|
scalanlp/breeze
|
math/src/test/scala/breeze/linalg/CSCMatrixTest.scala
|
Scala
|
apache-2.0
| 15,940 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import scala.collection.mutable.ListBuffer
import org.apache.spark.annotation.DeveloperApi
/**
* :: DeveloperApi ::
* Information about a running task attempt inside a TaskSet.
* 任务集内一个正在运行任务尝试的信息
*/
@DeveloperApi
class TaskInfo(
val taskId: Long,
val index: Int,
val attemptNumber: Int,//尝试数
val launchTime: Long,//开始时间
val executorId: String,//
val host: String,//主机
val taskLocality: TaskLocality.TaskLocality,//任务位置策略
val speculative: Boolean) {//speculative是否使用推理
/**
* The time when the task started remotely getting the result. Will not be set if the
* task result was sent immediately when the task finished (as opposed to sending an
* IndirectTaskResult and later fetching the result from the block manager).
*
* 任务开始远程获取结果的时间,如果任务完成后立即发送任务结果(而不是发送IndirectTaskResult并稍后从块管理器获取结果),则不会被设置
*/
var gettingResultTime: Long = 0
/**
* Intermediate updates to accumulables during this task. Note that it is valid for the same
* accumulable to be updated multiple times in a single task or for two accumulables with the
* same name but different IDs to exist in a task.
* 在此任务期间累积的中间更新,
* 请注意,对于同一个可以在单个任务中多次更新的同一个可累积值,或对于任务中存在相同名称但不同的ID的两个累加器是有效的。
*/
val accumulables = ListBuffer[AccumulableInfo]()
/**
* The time when the task has completed successfully (including the time to remotely fetch
* results, if necessary).
* 任务完成时间,包括完成时间失败时间
*/
var finishTime: Long = 0
//任务失败
var failed = false
private[spark] def markGettingResult(time: Long = System.currentTimeMillis) {
gettingResultTime = time
}
private[spark] def markSuccessful(time: Long = System.currentTimeMillis) {
finishTime = time
}
private[spark] def markFailed(time: Long = System.currentTimeMillis) {
finishTime = time
failed = true
}
def gettingResult: Boolean = gettingResultTime != 0
def finished: Boolean = finishTime != 0
def successful: Boolean = finished && !failed
def running: Boolean = !finished
def status: String = {
if (running) {
if (gettingResult) {
"GET RESULT"
} else {
"RUNNING"
}
} else if (failed) {
"FAILED"
} else if (successful) {
"SUCCESS"
} else {
"UNKNOWN"
}
}
@deprecated("Use attemptNumber", "1.6.0")
def attempt: Int = attemptNumber
def id: String = s"$index.$attemptNumber"
//任务执行的持续的时间
def duration: Long = {
if (!finished) {
throw new UnsupportedOperationException("duration() called on unfinished task")
} else {
finishTime - launchTime
}
}
private[spark] def timeRunning(currentTime: Long): Long = currentTime - launchTime
}
|
tophua/spark1.52
|
core/src/main/scala/org/apache/spark/scheduler/TaskInfo.scala
|
Scala
|
apache-2.0
| 3,907 |
package stefansavev.demo.hyperloglog.counters
import org.apache.lucene.util.LongBitSet
import stefansavev.demo.hyperloglog.hashing.Hasher
class EfficientLinearCounter(hasher: Hasher) extends ApproximateCounter{
final val p = 14 //number of bits used to represent the buckets
final val m = 1 << p
val bitSetCounters = new LongBitSet(m)
var numSetBits: Int = 0
override def add(obj: Long): Unit = {
val h = hasher.hash(obj)
val index = (h >>> (64 - p)).toInt
if (!bitSetCounters.get(index)){
bitSetCounters.set(index)
numSetBits += 1
}
}
override def distinctCount(): Double = {
val zeros = m - numSetBits
if (zeros != 0){
m.toDouble*Math.log(m.toDouble/zeros.toDouble)
}
else{
//it's hard to say anything except that the unique elements are more than
m.toDouble*Math.log(m.toDouble)
}
}
}
|
dvgodoy/HashingAndSketching
|
out/production/hyperloglog/stefansavev/demo/hyperloglog/counters/EfficientLinearCounter.scala
|
Scala
|
apache-2.0
| 874 |
/*
* Copyright 2017-2022 John Snow Labs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.johnsnowlabs.storage
import org.apache.spark.sql.SparkSession
trait HasStorageModel extends HasStorageReader with HasExcludableStorage {
protected val databases: Array[Database.Name]
def serializeStorage(path: String, spark: SparkSession): Unit = {
if ($(includeStorage))
saveStorage(path, spark, withinStorage = true)
}
def saveStorage(path: String, spark: SparkSession, withinStorage: Boolean = false): Unit = {
databases.foreach(database => {
StorageHelper.save(path, getReader(database).getConnection, spark, withinStorage)
})
}
override protected def onWrite(path: String, spark: SparkSession): Unit = {
serializeStorage(path, spark)
}
def deserializeStorage(path: String, spark: SparkSession): Unit = {
if ($(includeStorage))
databases.foreach(database => {
StorageHelper.load(
path,
spark,
database.toString,
$(storageRef),
withinStorage = true
)
})
}
}
|
JohnSnowLabs/spark-nlp
|
src/main/scala/com/johnsnowlabs/storage/HasStorageModel.scala
|
Scala
|
apache-2.0
| 1,612 |
package com.blinkbox.books.test
import org.hamcrest.{Description, BaseMatcher, Matcher}
import scala.language.implicitConversions
import scala.language.experimental.macros
import scala.reflect.macros.Context
private object MatcherMacros {
def toMatcher[T](c: Context)(f: c.Expr[T => Boolean])(implicit wtt: c.WeakTypeTag[T]): c.Expr[Matcher[T]] = {
import c.universe._
object MessageTransformer extends Transformer {
override def transform(tree: c.universe.Tree): c.universe.Tree = tree match {
case Literal(Constant("$$MESSAGE_PLACEHOLDER$$")) => Literal(Constant(description(f)))
case x => super.transform(x)
}
private def description(f: c.type#Expr[T => Boolean]): String = show(f) // ok but not that pretty
}
val ast = reify {
new BaseMatcher[T] {
override def matches(item: Any): Boolean = f.splice(item.asInstanceOf[T])
override def describeTo(description: Description): Unit = description.appendText("$$MESSAGE_PLACEHOLDER$$")
}
}
c.Expr[BaseMatcher[T]](MessageTransformer.transform(ast.tree))
}
}
trait MatcherSugar {
implicit def toMatcher[T](f: T => Boolean): Matcher[T] = macro MatcherMacros.toMatcher[T]
/**
* Synonym for Matchers.eq, as that clashes with Scala's standard `eq` method.
*/
def eql[T](arg: T) = {
import org.mockito.Matchers.{ eq => matcherEq }
matcherEq(arg)
}
}
object MatcherSugar extends MatcherSugar
|
blinkboxbooks/common-test.scala
|
src/main/scala/com/blinkbox/books/test/MatcherSugar.scala
|
Scala
|
mit
| 1,452 |
package org.dama.datasynth
/**
* Created by aprat on 3/05/17.
*
* Class used to store the configuration of DataSynth
*
*/
object DataSynthConfig {
def apply() : DataSynthConfig = {
new DataSynthConfig()
}
def apply( args : List[String] ) : DataSynthConfig = {
nextOption( new DataSynthConfig(), args)
}
/**
* Parses the next option from the option list
* @param currentConfig The current DataSynth config
* @param list The list of remaining options to parse
* @return The new DataSynth config
*/
def nextOption(currentConfig : DataSynthConfig, list: List[String]) : DataSynthConfig = {
def isSwitch(s : String) = (s(0) == '-')
list match {
case "--output-dir" :: outputdir :: tail if !isSwitch(outputdir) => {
val config = currentConfig.setOutputDir(outputdir)
nextOption(config, tail)
}
case "--schema-file" :: schema :: tail if !isSwitch(schema) => {
val config = currentConfig.schemaFile(schema)
nextOption(config, tail)
}
case "--master-workspace-dir" :: workspace :: tail if !isSwitch(workspace) => {
val config = currentConfig.masterWorkspaceDir(workspace)
nextOption(config, tail)
}
case "--datasynth-workspace-dir" :: workspace :: tail if !isSwitch(workspace) => {
val config = currentConfig.datasynthWorkspaceDir(workspace)
nextOption(config, tail)
}
case option :: tail => {
throw new Exception(s"Unknown option $option")
}
case Nil => currentConfig
}
}
def validateConfig( config : DataSynthConfig ) = {
if(config.outputDir.equals("") ){
throw new RuntimeException(s"Output dir not specified. Use --output-dir <path> option")
}
if(config.schemaFile.equals("") ){
throw new RuntimeException(s"Schema file not specified. Use --schema-file <path> option")
}
}
}
class DataSynthConfig ( val outputDir : String = "",
val schemaFile : String = "",
val masterWorkspaceDir : String = "file:///tmp",
val datasynthWorkspaceDir : String = "file:///tmp")
{
/**
* Sets the outputDir
* @param newOutputDir The value of the output dir
* @return this
*/
def setOutputDir(newOutputDir : String ) : DataSynthConfig = {
new DataSynthConfig(newOutputDir,
schemaFile,
masterWorkspaceDir,
datasynthWorkspaceDir)
}
/**
* Sets the schema file path
* @param newSchemaFile The value of the schema file path
* @return this
*/
def schemaFile(newSchemaFile : String ) : DataSynthConfig = {
new DataSynthConfig(outputDir,
newSchemaFile,
masterWorkspaceDir,
datasynthWorkspaceDir)
}
/**
* Sets the master workspace dir
* @param newWorkspace The value of the driver workspace dir
* @return this
*/
def masterWorkspaceDir(newWorkspace: String ) : DataSynthConfig = {
if(!common.utils.FileUtils.isLocal(newWorkspace)) {
throw new RuntimeException(s"Invalid master workspace directory ${newWorkspace}." +
s" Master workspace directory must be in local file" +
s" system and thus prefixed with file://")
}
new DataSynthConfig(outputDir,
schemaFile,
newWorkspace,
datasynthWorkspaceDir)
}
/**
* Sets datasynth's workspace dir
* @param newWorkspace The value of the driver workspace dir
* @return this
*/
def datasynthWorkspaceDir(newWorkspace: String ) : DataSynthConfig = {
new DataSynthConfig(outputDir,
schemaFile,
masterWorkspaceDir,
newWorkspace)
}
}
|
DAMA-UPC/DataSynth
|
src/main/scala/org/dama/datasynth/DataSynthConfig.scala
|
Scala
|
gpl-3.0
| 3,923 |
package recommender
import org.apache.spark.mllib.recommendation.Rating
import org.apache.spark.rdd.RDD
/**
* Created by Ondra Fiedler on 3.9.14.
*/
/**
* k-NN with a tree of clusters. The tree is created by repetitive splitting the biggest cluster into two smaller by k-Means from MLlib.
* @param vectorsRDD Vectors representing a set of users. Ratings of users are taken from the Recommender's dataHolder if this field is not specified.
* @param numberOfClusters Number of clusters
* @param numberOfNeighbors Number of considered neighbors by the k-NN algorithm
* @param distanceMetric Metric which determines similarity between users in k-NN
*/
class ClusterTreeKnnRecommender(vectorsRDD: RDD[UserVector], numberOfClusters: Int, numberOfKMeansIterations: Int, numberOfNeighbors: Int, distanceMetric: DistanceMetric = CosineDistance) extends RecommenderWithUserVectorRepresentation(vectorsRDD) with Serializable {
def this(numberOfClusters: Int, numberOfKMeansIterations: Int, numberOfNeighbors: Int, distanceMetric: DistanceMetric = CosineDistance) = this(UserSparseVector.convertRatingsFromHolderToUserSparseVectors(MainHolder.getDataHolder()), numberOfClusters, numberOfKMeansIterations, numberOfNeighbors, distanceMetric)
val root = createTree()
class TreeNode(centroidVal: Option[UserVector], clusterVal: Option[RDD[UserVector]]) {
val centroid = centroidVal
var leftChild: Option[TreeNode] = None
var rightChild: Option[TreeNode] = None
var cluster: Option[RDD[UserVector]] = clusterVal
var recommender: Option[KnnRecommender] = None
val sizeInSubTree: Long = clusterVal match {
case Some(cluster) => cluster.count
case None => 0
}
}
/**
* Splits cluster in a leaf node into two smaller by MLlib's k-Means and puts those new clusters to children of this node
* @param node A leaf node
* @return Returns false if the cluster could not be split because it contained only one vector, returns true otherwise
*/
protected def splitNode(node: TreeNode): Boolean = {
node.cluster match {
case Some(rdd) => {
if (rdd.count() <= 1) return false
val (centroids, seqOfRDDs) = KMeansClustering.clustering(rdd, 2, numberOfKMeansIterations)
node.leftChild = Some(new TreeNode(Some(centroids(0)), Some(seqOfRDDs(0))))
node.rightChild = Some(new TreeNode(Some(centroids(1)), Some(seqOfRDDs(1))))
node.cluster = None
return true
}
case None => throw new CannotSplitInnerNodeException
}
}
/**
* Creates a binary tree, which contains clusters in its leaf nodes. A KnnRecommender is created for every cluster.
* @return Root of the tree
*/
protected def createTree(): TreeNode = {
val root = new TreeNode(None, Some(vectorsRDD))
var leafs = List(root)
var numberOfCreatedClusters = 1
while (numberOfCreatedClusters < numberOfClusters) {
val node = leafs.head
val splitted = splitNode(node)
if (splitted) {
leafs = leafs.drop(1)
leafs = leafs ::: List(node.leftChild.get) ::: List(node.rightChild.get)
leafs = leafs.sortBy(node => -node.sizeInSubTree)
}
numberOfCreatedClusters += 1
}
leafs.foreach(node => node.recommender = Some(new KnnRecommender(node.cluster.get, numberOfNeighbors, distanceMetric)))
root
}
/**
* Finds the nearest cluster to vector and returns a KnnRecommender that corresponds to this cluster
* @param vector Vector with ratings of target user
* @return KnnRecommender
*/
protected def getRecommender(vector: UserVector): Option[KnnRecommender] = {
var node = root
var notFound = true
var recommender: Option[KnnRecommender] = None
while (notFound) {
node.recommender match {
case Some(recommenderInNode) => {
recommender = Some(recommenderInNode)
notFound = false
}
case None => {
val leftCentroid = node.leftChild.get.centroid.get
val rightCentroid = node.rightChild.get.centroid.get
if (distanceMetric.getDistance(vector, leftCentroid) < distanceMetric.getDistance(vector, rightCentroid)) {
node = node.leftChild.get
}
else {
node = node.rightChild.get
}
}
}
}
recommender
}
class CannotSplitInnerNodeException extends Exception
class WrongFormatOfClusterTreeException extends Exception
/**
* Recommend
* @param vector Vector with ratings of target user
* @return Ids of recommended products
*/
override def recommend(vector: UserVector, numberOfRecommendedProducts: Int): Seq[Rating] = {
getRecommender(vector) match {
case Some(recommender) => recommender.recommend(vector, numberOfRecommendedProducts)
case None => throw new WrongFormatOfClusterTreeException
}
}
}
|
jibaro/spark-recommender
|
src/main/scala/recommender/ClusterTreeKnnRecommender.scala
|
Scala
|
mit
| 4,875 |
/**
* Copyright (C) 2010 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.pipeline
import collection.JavaConverters._
import java.util.{Enumeration ⇒ JEnumeration}
import javax.servlet.ServletContext
import javax.servlet.http.HttpServletRequest
import javax.servlet.http.HttpSession
import org.apache.log4j.Logger
import org.dom4j.Document
import org.dom4j.Element
import org.orbeon.errorified.Exceptions
import org.orbeon.exception.OrbeonFormatter
import org.orbeon.oxf.cache.ObjectCache
import org.orbeon.oxf.common.OrbeonLocationException.getRootLocationData
import org.orbeon.oxf.pipeline.api.ExternalContext
import org.orbeon.oxf.pipeline.api.PipelineContext
import org.orbeon.oxf.pipeline.api.ProcessorDefinition
import org.orbeon.oxf.processor._
import org.orbeon.oxf.processor.generator.DOMGenerator
import org.orbeon.oxf.properties.Properties
import org.orbeon.oxf.resources.ResourceNotFoundException
import org.orbeon.oxf.util.AttributesToMap
import org.orbeon.oxf.util.PipelineUtils
import org.orbeon.oxf.util.ScalaUtils.nonEmptyOrNone
import org.orbeon.oxf.webapp.{WebAppContext, HttpStatusCodeException, WebAppExternalContext}
import org.orbeon.oxf.xml.dom4j.Dom4jUtils
import org.orbeon.saxon.om.NodeInfo
import scala.util.control.NonFatal
object InitUtils {
private val CacheSizeProperty = "oxf.cache.size"
private val ProcessorsProperty = "oxf.pipeline.processors"
private val DeprecatedProcessorsProperty = "oxf.prologue"
private val DefaultProcessors = "oxf:/processors.xml"
// Run with a pipeline context and destroy the pipeline when done
def withPipelineContext[T](body: PipelineContext ⇒ T) = {
var success = false
val pipelineContext = new PipelineContext
try {
val result = body(pipelineContext)
success = true
result
} finally
pipelineContext.destroy(success)
}
// Run a processor with an ExternalContext
def runProcessor(processor: Processor, externalContext: ExternalContext, pipelineContext: PipelineContext, logger: Logger) {
// Record start time for this request
val tsBegin = if (logger.isInfoEnabled) System.currentTimeMillis else 0L
if (logger.isInfoEnabled)
nonEmptyOrNone(externalContext.getStartLoggerString) foreach logger.info
// Set ExternalContext into PipelineContext
pipelineContext.setAttribute(PipelineContext.EXTERNAL_CONTEXT, externalContext)
var success = false
try {
// Set cache size
val cacheMaxSize = Properties.instance.getPropertySet.getInteger(CacheSizeProperty)
if (cacheMaxSize != null) ObjectCache.instance.setMaxSize(cacheMaxSize)
// Start execution
processor.reset(pipelineContext)
processor.start(pipelineContext)
success = true
} catch {
case NonFatal(t) ⇒
def locationData = getRootLocationData(t)
def locationMessage = locationData map ("at " + _) getOrElse "with no location data"
Exceptions.getRootThrowable(t) match {
case e: HttpStatusCodeException ⇒
externalContext.getResponse.sendError(e.code)
logger.info(e.toString + " " + locationMessage)
if (logger.isDebugEnabled)
logger.debug(e.throwable map OrbeonFormatter.format getOrElse "")
case e: ResourceNotFoundException ⇒
externalContext.getResponse.sendError(404)
logger.info("Resource not found" + (Option(e.resource) map (": " + _) getOrElse "") + " " + locationMessage)
case _ ⇒
throw t
}
} finally {
if (logger.isInfoEnabled) {
val timing = System.currentTimeMillis - tsBegin
val requestPath = Option(externalContext.getRequest) map (_.getRequestPath) getOrElse "Done running processor"
logger.info(requestPath + " - Timing: " + timing)
}
try pipelineContext.destroy(success)
catch {
case NonFatal(t) ⇒
logger.debug("Exception while destroying context after exception" + OrbeonFormatter.format(t))
}
}
}
// Create a processor and connect its inputs to static URLs
def createProcessor(processorDefinition: ProcessorDefinition): Processor = {
// Create the processor
val processor = ProcessorFactoryRegistry.lookup(processorDefinition.getName).createInstance
// Connect its inputs based on the definition
for ((inputName, value) ← processorDefinition.getEntries.asScala) {
import DOMGenerator._
import ProcessorImpl.OUTPUT_DATA
import PipelineUtils._
def connectInput(file: Option[String], create: (String, Long, String) ⇒ DOMGenerator) =
connect(create("init input", ZeroValidity, file getOrElse DefaultContext), OUTPUT_DATA, processor, inputName)
value match {
case url: String ⇒
val urlGenerator = createURLGenerator(url)
connect(urlGenerator, OUTPUT_DATA, processor, inputName)
case element: Element ⇒
val locationData = ProcessorUtils.getElementLocationData(element)
connectInput(Option(locationData) map (_.getSystemID), createDOMGenerator(element, _, _, _))
case document: Document ⇒
val locationData = ProcessorUtils.getElementLocationData(document.getRootElement)
connectInput(Option(locationData) map (_.getSystemID), createDOMGenerator(document, _, _, _))
case nodeInfo: NodeInfo ⇒
connectInput(Option(nodeInfo.getSystemId), createDOMGenerator(nodeInfo, _, _, _))
case value ⇒
throw new IllegalStateException("Incorrect type in processor definition: " + value.getClass)
}
}
processor
}
// Run a processor based on definitions found in properties or the web app context. This is
// useful for context/session listeners. Don't run if a definition is not found, no exception is thrown.
def runWithServletContext(servletContext: ServletContext, session: Option[HttpSession], logger: Logger, logMessagePrefix: String, message: String, uriNamePropertyPrefix: String, processorInputProperty: String): Unit = {
require(servletContext ne null)
// Make sure the Web app context is initialized
val webAppContext = WebAppContext(servletContext)
if (message != null)
logger.info(logMessagePrefix + " - " + message)
val processorDefinitionOption =
getDefinitionFromProperties(uriNamePropertyPrefix, processorInputProperty) orElse
getDefinitionFromServletContext(servletContext, uriNamePropertyPrefix, processorInputProperty)
processorDefinitionOption foreach { processorDefinition ⇒
logger.info(logMessagePrefix + " - About to run processor: " + processorDefinition)
val processor = createProcessor(processorDefinition)
val externalContext = new WebAppExternalContext(webAppContext, session)
withPipelineContext { pipelineContext ⇒
runProcessor(processor, externalContext, pipelineContext, logger)
}
}
}
// Register processor definitions with the default XML Processor Registry. This defines the
// mapping of processor names to class names.
lazy val processorDefinitions: Unit = {
def registerProcessors(url: String) = {
val processorDefinitions = PipelineUtils.createURLGenerator(url, true)
val registry = new XMLProcessorRegistry
PipelineUtils.connect(processorDefinitions, "data", registry, "config")
withPipelineContext { pipelineContext ⇒
processorDefinitions.reset(pipelineContext)
registry.reset(pipelineContext)
registry.start(pipelineContext)
}
}
// Register processors from processors.xml and from custom properties
val propertySet = Properties.instance.getPropertySet
def fromProperty(s: String) = Option(propertySet.getString(s))
val processors = fromProperty(ProcessorsProperty) orElse fromProperty(DeprecatedProcessorsProperty) getOrElse DefaultProcessors
registerProcessors(processors)
}
def getDefinitionFromServletContext(servletContext: ServletContext, uriNamePropertyPrefix: String, inputPropertyPrefix: String) =
getDefinitionFromMap(new ServletContextInitMap(servletContext), uriNamePropertyPrefix, inputPropertyPrefix)
def getDefinitionFromProperties(uriNamePropertyPrefix: String, inputPropertyPrefix: String) =
getDefinitionFromMap(PropertiesMap, uriNamePropertyPrefix, inputPropertyPrefix)
// Create a ProcessorDefinition from a Map. Only Map.get() and Map.keySet() are used
def getDefinitionFromMap(map: Map[String, String], uriNamePropertyPrefix: String, inputPropertyPrefix: String) =
map.get(uriNamePropertyPrefix + "name") map { processorName ⇒
val processorDefinition = new ProcessorDefinition
processorDefinition.setName(Dom4jUtils.explodedQNameToQName(processorName))
for ((name, value) ← map)
if (name.startsWith(inputPropertyPrefix))
processorDefinition.addInput(name.substring(inputPropertyPrefix.length), value)
processorDefinition
}
// Read-only view of the properties as a Map
private object PropertiesMap extends Map[String, String] {
def get(key: String) = Option(Properties.instance.getPropertySet.getObject(key)) map (_.toString)
def iterator = Properties.instance.getPropertySet.keySet.asScala.toIterator map (key ⇒ key → this(key))
def -(key: String) = Map() ++ this - key
def +[B1 >: String](kv: (String, B1)) = Map() ++ this + kv
}
// Read-only view of the ServletContext initialization parameters as a Map
private class ServletContextInitMap(servletContext: ServletContext) extends Map[String, String] {
def get(key: String) = Option(servletContext.getInitParameter(key))
def iterator = servletContext.getInitParameterNames.asInstanceOf[JEnumeration[String]].asScala.toIterator map (key ⇒ key → this(key))
def -(key: String) = Map() ++ this - key
def +[B1 >: String](kv: (String, B1)) = Map() ++ this + kv
}
// View of the HttpSession properties as a Map
class SessionMap(httpSession: HttpSession) extends AttributesToMap[AnyRef](new AttributesToMap.Attributeable[AnyRef] {
def getAttribute(s: String) = httpSession.getAttribute(s)
def getAttributeNames = httpSession.getAttributeNames.asInstanceOf[JEnumeration[String]]
def removeAttribute(s: String): Unit = httpSession.removeAttribute(s)
def setAttribute(s: String, o: AnyRef): Unit = httpSession.setAttribute(s, o)
})
// View of the HttpServletRequest properties as a Map
class RequestMap(httpServletRequest: HttpServletRequest) extends AttributesToMap[AnyRef](new AttributesToMap.Attributeable[AnyRef] {
def getAttribute(s: String) = httpServletRequest.getAttribute(s)
def getAttributeNames = httpServletRequest.getAttributeNames.asInstanceOf[JEnumeration[String]]
def removeAttribute(s: String): Unit = httpServletRequest.removeAttribute(s)
def setAttribute(s: String, o: AnyRef): Unit = httpServletRequest.setAttribute(s, o)
})
}
|
evlist/orbeon-forms
|
src/main/scala/org/orbeon/oxf/pipeline/InitUtils.scala
|
Scala
|
lgpl-2.1
| 12,484 |
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.FunSuite
import org.scalatest.matchers.ShouldMatchers
import org.scalatest.{ FunSpec, BeforeAndAfterAll, BeforeAndAfterEach, BeforeAndAfter }
class Chap05Spec extends FunSpec with ShouldMatchers {
describe("ifについて"){
it("even"){
// ##@range_begin(if_as_expression_in_scala)
def even(number:Int): Boolean = {
return if(number % 2 == 0) // ifは式なのでその結果を返す
true
else
false
}
/************* テスト *************/
even(2) should equal(true)
even(3) should equal(false)
// ##@range_end(if_as_expression_in_scala)
}
}
describe("switch文"){
it("通貨の例"){
// ##@range_begin(pattern_match_in_scala)
trait Currency
case class Yen(amount: Int) extends Currency
case class Dollar(amount: Int) extends Currency
def toS(currency:Currency):String =
// 通貨の型でマッチさせる
currency match {
// Yenにマッチする場合
case Yen(amount) => { // 変数amountには日本円の値が入る
"%s yen".format(amount)
}
// Dollarにマッチする場合
case Dollar(amount) => { // 変数amountにはドルの値が入る
"%s dollar".format(amount)
}
}
/************* テスト *************/
val yen = Yen(1000)
toS(yen) should equal("1000 yen")
// ##@range_end(pattern_match_in_scala)
}
}
}
|
akimichi/functionaljs
|
src/test/scala/chap05.spec.scala
|
Scala
|
mit
| 1,591 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.runtime.stream
import java.lang.{Integer => JInt, Long => JLong}
import java.math.BigDecimal
import java.sql.Timestamp
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.functions.AssignerWithPunctuatedWatermarks
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.api.watermark.Watermark
import org.apache.flink.table.api.scala._
import org.apache.flink.table.api.{TableSchema, Tumble, Types}
import org.apache.flink.table.expressions.{ExpressionParser, TimeIntervalUnit}
import org.apache.flink.table.plan.TimeIndicatorConversionTest.TableFunc
import org.apache.flink.table.runtime.stream.TimeAttributesITCase.{AtomicTimestampWithEqualWatermark, TestPojo, TimestampWithEqualWatermark, TimestampWithEqualWatermarkPojo}
import org.apache.flink.table.runtime.utils.JavaPojos.Pojo1
import org.apache.flink.table.runtime.utils.StreamITCase
import org.apache.flink.table.utils.{MemoryTableSourceSinkUtil, TestTableSourceWithTime}
import org.apache.flink.test.util.AbstractTestBase
import org.apache.flink.types.Row
import org.junit.Assert._
import org.junit.Test
import scala.collection.mutable
import scala.collection.JavaConverters._
/**
* Tests for access and materialization of time attributes.
*/
class TimeAttributesITCase extends AbstractTestBase {
val data = List(
(1L, 1, 1d, 1f, new BigDecimal("1"), "Hi"),
(2L, 2, 2d, 2f, new BigDecimal("2"), "Hallo"),
(3L, 2, 2d, 2f, new BigDecimal("2"), "Hello"),
(4L, 5, 5d, 5f, new BigDecimal("5"), "Hello"),
(7L, 3, 3d, 3f, new BigDecimal("3"), "Hello"),
(8L, 3, 3d, 3f, new BigDecimal("3"), "Hello world"),
(16L, 4, 4d, 4f, new BigDecimal("4"), "Hello world"))
@Test
def testAtomicType1(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.testResults = mutable.MutableList()
val stream = env
.fromCollection(Seq(1L, 2L, 3L, 4L, 7L, 8L, 16L))
.assignTimestampsAndWatermarks(new AtomicTimestampWithEqualWatermark())
val table = stream.toTable(
tEnv, 'rowtime.rowtime, 'proctime.proctime)
val t = table
.where('proctime.cast(Types.LONG) > 0)
.select('rowtime.cast(Types.STRING))
val results = t.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"1970-01-01 00:00:00.001",
"1970-01-01 00:00:00.002",
"1970-01-01 00:00:00.003",
"1970-01-01 00:00:00.004",
"1970-01-01 00:00:00.007",
"1970-01-01 00:00:00.008",
"1970-01-01 00:00:00.016")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testAtomicType2(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.testResults = mutable.MutableList()
val stream = env
.fromCollection(Seq(1L, 2L, 3L, 4L, 7L, 8L, 16L))
.assignTimestampsAndWatermarks(new AtomicTimestampWithEqualWatermark())
val table = stream.toTable(
tEnv, 'l, 'rowtime.rowtime, 'proctime.proctime)
val t = table
.where('proctime.cast(Types.LONG) > 0)
.select('l, 'rowtime.cast(Types.STRING))
val results = t.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"1,1970-01-01 00:00:00.001",
"2,1970-01-01 00:00:00.002",
"3,1970-01-01 00:00:00.003",
"4,1970-01-01 00:00:00.004",
"7,1970-01-01 00:00:00.007",
"8,1970-01-01 00:00:00.008",
"16,1970-01-01 00:00:00.016")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testCalcMaterialization(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.testResults = mutable.MutableList()
val stream = env
.fromCollection(data)
.assignTimestampsAndWatermarks(new TimestampWithEqualWatermark())
val table = stream.toTable(
tEnv, 'rowtime.rowtime, 'int, 'double, 'float, 'bigdec, 'string, 'proctime.proctime)
val t = table.select('rowtime.cast(Types.STRING))
val results = t.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"1970-01-01 00:00:00.001",
"1970-01-01 00:00:00.002",
"1970-01-01 00:00:00.003",
"1970-01-01 00:00:00.004",
"1970-01-01 00:00:00.007",
"1970-01-01 00:00:00.008",
"1970-01-01 00:00:00.016")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testCalcMaterialization2(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.testResults = mutable.MutableList()
val stream = env
.fromCollection(data)
.assignTimestampsAndWatermarks(new TimestampWithEqualWatermark())
val table = stream.toTable(tEnv, 'rowtime.rowtime, 'int, 'double, 'float, 'bigdec, 'string)
val t = table
.filter('rowtime.cast(Types.LONG) > 4)
.select('rowtime, 'rowtime.floor(TimeIntervalUnit.DAY), 'rowtime.ceil(TimeIntervalUnit.DAY))
val results = t.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"1970-01-01 00:00:00.007,1970-01-01 00:00:00.0,1970-01-02 00:00:00.0",
"1970-01-01 00:00:00.008,1970-01-01 00:00:00.0,1970-01-02 00:00:00.0",
"1970-01-01 00:00:00.016,1970-01-01 00:00:00.0,1970-01-02 00:00:00.0")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testTableSink(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = StreamTableEnvironment.create(env)
MemoryTableSourceSinkUtil.clear()
tEnv.registerTableSink(
"testSink",
(new MemoryTableSourceSinkUtil.UnsafeMemoryAppendTableSink).configure(
Array[String]("rowtime", "floorDay", "ceilDay"),
Array[TypeInformation[_]](Types.SQL_TIMESTAMP, Types.SQL_TIMESTAMP, Types.SQL_TIMESTAMP)
))
val stream = env
.fromCollection(data)
.assignTimestampsAndWatermarks(new TimestampWithEqualWatermark())
stream.toTable(tEnv, 'rowtime.rowtime, 'int, 'double, 'float, 'bigdec, 'string)
.filter('rowtime.cast(Types.LONG) > 4)
.select(
'rowtime,
'rowtime.floor(TimeIntervalUnit.DAY).as('floorDay),
'rowtime.ceil(TimeIntervalUnit.DAY).as('ceilDay))
.insertInto("testSink")
tEnv.execute("job name")
val expected = Seq(
"1970-01-01 00:00:00.007,1970-01-01 00:00:00.0,1970-01-02 00:00:00.0",
"1970-01-01 00:00:00.008,1970-01-01 00:00:00.0,1970-01-02 00:00:00.0",
"1970-01-01 00:00:00.016,1970-01-01 00:00:00.0,1970-01-02 00:00:00.0")
assertEquals(expected.sorted, MemoryTableSourceSinkUtil.tableDataStrings.sorted)
}
@Test
def testTableFunction(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.testResults = mutable.MutableList()
val stream = env
.fromCollection(data)
.assignTimestampsAndWatermarks(new TimestampWithEqualWatermark())
val table = stream.toTable(
tEnv, 'rowtime.rowtime, 'int, 'double, 'float, 'bigdec, 'string, 'proctime.proctime)
val func = new TableFunc
// we test if this can be executed with any exceptions
table.joinLateral(func('proctime, 'proctime, 'string) as 's).toAppendStream[Row]
// we test if this can be executed with any exceptions
table.joinLateral(func('rowtime, 'rowtime, 'string) as 's).toAppendStream[Row]
// we can only test rowtime, not proctime
val t = table.joinLateral(func('rowtime, 'proctime, 'string) as 's).select('rowtime, 's)
val results = t.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"1970-01-01 00:00:00.001,1trueHi",
"1970-01-01 00:00:00.002,2trueHallo",
"1970-01-01 00:00:00.003,3trueHello",
"1970-01-01 00:00:00.004,4trueHello",
"1970-01-01 00:00:00.007,7trueHello",
"1970-01-01 00:00:00.008,8trueHello world",
"1970-01-01 00:00:00.016,16trueHello world")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testWindowAfterTableFunction(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.testResults = mutable.MutableList()
val stream = env
.fromCollection(data)
.assignTimestampsAndWatermarks(new TimestampWithEqualWatermark())
val table = stream.toTable(
tEnv, 'rowtime.rowtime, 'int, 'double, 'float, 'bigdec, 'string, 'proctime.proctime)
val func = new TableFunc
val t = table
.joinLateral(func('rowtime, 'proctime, 'string) as 's)
.window(Tumble over 5.millis on 'rowtime as 'w)
.groupBy('w)
.select('w.rowtime, 's.count)
val results = t.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"1970-01-01 00:00:00.004,4",
"1970-01-01 00:00:00.009,2",
"1970-01-01 00:00:00.019,1")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testUnion(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.testResults = mutable.MutableList()
val stream = env
.fromCollection(data)
.assignTimestampsAndWatermarks(new TimestampWithEqualWatermark())
val table = stream.toTable(
tEnv, 'rowtime.rowtime, 'int, 'double, 'float, 'bigdec, 'string)
val t = table.unionAll(table).select('rowtime)
val results = t.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"1970-01-01 00:00:00.001",
"1970-01-01 00:00:00.001",
"1970-01-01 00:00:00.002",
"1970-01-01 00:00:00.002",
"1970-01-01 00:00:00.003",
"1970-01-01 00:00:00.003",
"1970-01-01 00:00:00.004",
"1970-01-01 00:00:00.004",
"1970-01-01 00:00:00.007",
"1970-01-01 00:00:00.007",
"1970-01-01 00:00:00.008",
"1970-01-01 00:00:00.008",
"1970-01-01 00:00:00.016",
"1970-01-01 00:00:00.016")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testWindowWithAggregationOnRowtimeSql(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.testResults = mutable.MutableList()
val stream = env
.fromCollection(data)
.assignTimestampsAndWatermarks(new TimestampWithEqualWatermark())
val table = stream.toTable(tEnv, 'rowtime.rowtime, 'int, 'double, 'float, 'bigdec, 'string)
tEnv.registerTable("MyTable", table)
val t = tEnv.sqlQuery("SELECT COUNT(`rowtime`) FROM MyTable " +
"GROUP BY TUMBLE(rowtime, INTERVAL '0.003' SECOND)")
val results = t.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"1",
"2",
"2",
"2"
)
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testMultiWindow(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.testResults = mutable.MutableList()
val stream = env
.fromCollection(data)
.assignTimestampsAndWatermarks(new TimestampWithEqualWatermark())
val table = stream.toTable(tEnv, 'rowtime.rowtime, 'int, 'double, 'float, 'bigdec, 'string)
val t = table
.window(Tumble over 2.millis on 'rowtime as 'w)
.groupBy('w)
.select('w.rowtime as 'rowtime, 'int.count as 'int)
.window(Tumble over 4.millis on 'rowtime as 'w2)
.groupBy('w2)
.select('w2.rowtime, 'w2.end, 'int.count)
val results = t.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"1970-01-01 00:00:00.003,1970-01-01 00:00:00.004,2",
"1970-01-01 00:00:00.007,1970-01-01 00:00:00.008,2",
"1970-01-01 00:00:00.011,1970-01-01 00:00:00.012,1",
"1970-01-01 00:00:00.019,1970-01-01 00:00:00.02,1"
)
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testMultiWindowSqlNoAggregation(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.testResults = mutable.MutableList()
val stream = env
.fromCollection(data)
.assignTimestampsAndWatermarks(new TimestampWithEqualWatermark())
val table = stream.toTable(tEnv, 'rowtime.rowtime, 'int, 'double, 'float, 'bigdec, 'string)
val window1 = tEnv.sqlQuery(
s"""SELECT
TUMBLE_ROWTIME(rowtime, INTERVAL '0.002' SECOND) AS rowtime,
TUMBLE_END(rowtime, INTERVAL '0.002' SECOND) AS endtime
FROM $table
GROUP BY TUMBLE(rowtime, INTERVAL '0.002' SECOND)""")
val window2 = tEnv.sqlQuery(
s"""SELECT
TUMBLE_ROWTIME(rowtime, INTERVAL '0.004' SECOND),
TUMBLE_END(rowtime, INTERVAL '0.004' SECOND)
FROM $window1
GROUP BY TUMBLE(rowtime, INTERVAL '0.004' SECOND)""")
val results = window2.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"1970-01-01 00:00:00.003,1970-01-01 00:00:00.004",
"1970-01-01 00:00:00.007,1970-01-01 00:00:00.008",
"1970-01-01 00:00:00.011,1970-01-01 00:00:00.012",
"1970-01-01 00:00:00.019,1970-01-01 00:00:00.02"
)
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testMultiWindowSqlWithAggregation(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.testResults = mutable.MutableList()
val stream = env
.fromCollection(data)
.assignTimestampsAndWatermarks(new TimestampWithEqualWatermark())
val table = stream.toTable(tEnv, 'rowtime.rowtime, 'int, 'double, 'float, 'bigdec, 'string)
val window = tEnv.sqlQuery(
s"""SELECT
TUMBLE_ROWTIME(rowtime, INTERVAL '0.004' SECOND),
TUMBLE_END(rowtime, INTERVAL '0.004' SECOND),
COUNT(`int`) AS `int`
FROM (
SELECT
COUNT(`int`) AS `int`,
TUMBLE_ROWTIME(rowtime, INTERVAL '0.002' SECOND) AS `rowtime`
FROM $table
GROUP BY TUMBLE(rowtime, INTERVAL '0.002' SECOND)
)
GROUP BY TUMBLE(rowtime, INTERVAL '0.004' SECOND)""")
val results = window.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"1970-01-01 00:00:00.003,1970-01-01 00:00:00.004,2",
"1970-01-01 00:00:00.007,1970-01-01 00:00:00.008,2",
"1970-01-01 00:00:00.011,1970-01-01 00:00:00.012,1",
"1970-01-01 00:00:00.019,1970-01-01 00:00:00.02,1"
)
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testMultiWindowSqlWithAggregation2(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.testResults = mutable.MutableList()
val stream = env
.fromCollection(data)
.assignTimestampsAndWatermarks(new TimestampWithEqualWatermark())
val table = stream.toTable(tEnv, 'rowtime1.rowtime, 'int, 'double, 'float, 'bigdec, 'string)
val window = tEnv.sqlQuery(
s"""SELECT
TUMBLE_ROWTIME(rowtime2, INTERVAL '0.004' SECOND),
TUMBLE_END(rowtime2, INTERVAL '0.004' SECOND),
COUNT(`int`) as `int`
FROM (
SELECT
TUMBLE_ROWTIME(rowtime1, INTERVAL '0.002' SECOND) AS rowtime2,
COUNT(`int`) as `int`
FROM $table
GROUP BY TUMBLE(rowtime1, INTERVAL '0.002' SECOND)
)
GROUP BY TUMBLE(rowtime2, INTERVAL '0.004' SECOND)""")
val results = window.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"1970-01-01 00:00:00.003,1970-01-01 00:00:00.004,2",
"1970-01-01 00:00:00.007,1970-01-01 00:00:00.008,2",
"1970-01-01 00:00:00.011,1970-01-01 00:00:00.012,1",
"1970-01-01 00:00:00.019,1970-01-01 00:00:00.02,1"
)
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testCalcMaterializationWithPojoType(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.testResults = mutable.MutableList()
val stream = env
.fromCollection(data)
.assignTimestampsAndWatermarks(new TimestampWithEqualWatermark())
val table = stream.toTable(tEnv, 'rowtime.rowtime, 'int, 'double, 'float, 'bigdec, 'string)
tEnv.registerTable("T1", table)
val querySql = "select rowtime as ts, `string` as msg from T1"
val results = tEnv.sqlQuery(querySql).toAppendStream[Pojo1]
results.addSink(new StreamITCase.StringSink[Pojo1])
env.execute()
val expected = Seq(
"Pojo1{ts=1970-01-01 00:00:00.001, msg='Hi'}",
"Pojo1{ts=1970-01-01 00:00:00.002, msg='Hallo'}",
"Pojo1{ts=1970-01-01 00:00:00.003, msg='Hello'}",
"Pojo1{ts=1970-01-01 00:00:00.004, msg='Hello'}",
"Pojo1{ts=1970-01-01 00:00:00.007, msg='Hello'}",
"Pojo1{ts=1970-01-01 00:00:00.008, msg='Hello world'}",
"Pojo1{ts=1970-01-01 00:00:00.016, msg='Hello world'}")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testPojoSupport(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.testResults = mutable.MutableList()
val p1 = new TestPojo
p1.a = 12
p1.b = 42L
p1.c = "Test me."
val p2 = new TestPojo
p2.a = 13
p2.b = 43L
p2.c = "And me."
val stream = env
.fromElements(p1, p2)
.assignTimestampsAndWatermarks(new TimestampWithEqualWatermarkPojo)
// use aliases, swap all attributes, and skip b2
val table = stream.toTable(tEnv, 'b.rowtime as 'b, 'c as 'c, 'a as 'a)
// no aliases, no swapping
val table2 = stream.toTable(tEnv, 'a, 'b.rowtime, 'c)
// use proctime, no skipping
val table3 = stream.toTable(tEnv, 'a, 'b.rowtime, 'c, 'b2, 'proctime.proctime)
// Java expressions
// use aliases, swap all attributes, and skip b2
val table4 = stream.toTable(
tEnv,
ExpressionParser.parseExpressionList("b.rowtime as b, c as c, a as a").asScala: _*)
// no aliases, no swapping
val table5 = stream.toTable(
tEnv,
ExpressionParser.parseExpressionList("a, b.rowtime, c").asScala: _*)
val t = table.select('b, 'c , 'a)
.unionAll(table2.select('b, 'c, 'a))
.unionAll(table3.select('b, 'c, 'a))
.unionAll(table4.select('b, 'c, 'a))
.unionAll(table5.select('b, 'c, 'a))
val results = t.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"1970-01-01 00:00:00.042,Test me.,12",
"1970-01-01 00:00:00.042,Test me.,12",
"1970-01-01 00:00:00.042,Test me.,12",
"1970-01-01 00:00:00.042,Test me.,12",
"1970-01-01 00:00:00.042,Test me.,12",
"1970-01-01 00:00:00.043,And me.,13",
"1970-01-01 00:00:00.043,And me.,13",
"1970-01-01 00:00:00.043,And me.,13",
"1970-01-01 00:00:00.043,And me.,13",
"1970-01-01 00:00:00.043,And me.,13")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testTableSourceWithTimeIndicators(): Unit = {
StreamITCase.clear
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = StreamTableEnvironment.create(env)
val rows = Seq(
Row.of(new JInt(1), "A", new JLong(1000L)),
Row.of(new JInt(2), "B", new JLong(2000L)),
Row.of(new JInt(3), "C", new JLong(3000L)),
Row.of(new JInt(4), "D", new JLong(4000L)),
Row.of(new JInt(5), "E", new JLong(5000L)),
Row.of(new JInt(6), "F", new JLong(6000L)))
val fieldNames = Array("a", "b", "rowtime")
val schema = new TableSchema(
fieldNames :+ "proctime",
Array(Types.INT, Types.STRING, Types.SQL_TIMESTAMP, Types.SQL_TIMESTAMP))
val rowType = new RowTypeInfo(
Array(Types.INT, Types.STRING, Types.LONG).asInstanceOf[Array[TypeInformation[_]]],
fieldNames)
val tableSource = new TestTableSourceWithTime(schema, rowType, rows, "rowtime", "proctime")
tEnv.registerTableSource("testTable", tableSource)
val result = tEnv
.scan("testTable")
.where('a % 2 === 1)
.select('rowtime, 'a, 'b)
.toAppendStream[Row]
result.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"1970-01-01 00:00:01.0,1,A",
"1970-01-01 00:00:03.0,3,C",
"1970-01-01 00:00:05.0,5,E")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testSqlWindowRowtime(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.testResults = mutable.MutableList()
val stream = env
.fromCollection(data)
.assignTimestampsAndWatermarks(new TimestampWithEqualWatermark())
val table = stream.toTable(tEnv, 'rowtime.rowtime, 'int, 'double, 'float, 'bigdec, 'string)
tEnv.registerTable("MyTable", table)
val t = tEnv.sqlQuery("SELECT TUMBLE_ROWTIME(rowtime, INTERVAL '0.003' SECOND) FROM MyTable " +
"GROUP BY TUMBLE(rowtime, INTERVAL '0.003' SECOND)")
val results = t.toAppendStream[Row]
results.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = Seq(
"1970-01-01 00:00:00.002",
"1970-01-01 00:00:00.005",
"1970-01-01 00:00:00.008",
"1970-01-01 00:00:00.017"
)
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
@Test
def testMaterializedRowtimeFilter(): Unit = {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.setParallelism(1)
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val tEnv = StreamTableEnvironment.create(env)
StreamITCase.clear
val data = new mutable.MutableList[(String, Timestamp, Int)]
data.+=(("ACME", new Timestamp(1000L), 12))
data.+=(("ACME", new Timestamp(2000L), 17))
data.+=(("ACME", new Timestamp(3000L), 13))
data.+=(("ACME", new Timestamp(4000L), 11))
val t = env.fromCollection(data)
.assignAscendingTimestamps(e => e._2.toInstant.toEpochMilli)
.toTable(tEnv, 'symbol, 'tstamp.rowtime, 'price)
tEnv.registerTable("Ticker", t)
val sqlQuery =
s"""
|SELECT *
|FROM (
| SELECT symbol, SUM(price) as price,
| TUMBLE_ROWTIME(tstamp, interval '1' second) as rowTime,
| TUMBLE_START(tstamp, interval '1' second) as startTime,
| TUMBLE_END(tstamp, interval '1' second) as endTime
| FROM Ticker
| GROUP BY symbol, TUMBLE(tstamp, interval '1' second)
|)
|WHERE startTime < endTime
|""".stripMargin
val result = tEnv.sqlQuery(sqlQuery).toAppendStream[Row]
result.addSink(new StreamITCase.StringSink[Row])
env.execute()
val expected = List(
"ACME,12,1970-01-01 00:00:01.999,1970-01-01 00:00:01.0,1970-01-01 00:00:02.0",
"ACME,17,1970-01-01 00:00:02.999,1970-01-01 00:00:02.0,1970-01-01 00:00:03.0",
"ACME,13,1970-01-01 00:00:03.999,1970-01-01 00:00:03.0,1970-01-01 00:00:04.0",
"ACME,11,1970-01-01 00:00:04.999,1970-01-01 00:00:04.0,1970-01-01 00:00:05.0")
assertEquals(expected.sorted, StreamITCase.testResults.sorted)
}
}
object TimeAttributesITCase {
class AtomicTimestampWithEqualWatermark
extends AssignerWithPunctuatedWatermarks[Long] {
override def checkAndGetNextWatermark(
lastElement: Long,
extractedTimestamp: Long)
: Watermark = {
new Watermark(extractedTimestamp)
}
override def extractTimestamp(
element: Long,
previousElementTimestamp: Long): Long = {
element
}
}
class TimestampWithEqualWatermark
extends AssignerWithPunctuatedWatermarks[(Long, Int, Double, Float, BigDecimal, String)] {
override def checkAndGetNextWatermark(
lastElement: (Long, Int, Double, Float, BigDecimal, String),
extractedTimestamp: Long)
: Watermark = {
new Watermark(extractedTimestamp)
}
override def extractTimestamp(
element: (Long, Int, Double, Float, BigDecimal, String),
previousElementTimestamp: Long): Long = {
element._1
}
}
class TimestampWithEqualWatermarkPojo
extends AssignerWithPunctuatedWatermarks[TestPojo] {
override def checkAndGetNextWatermark(
lastElement: TestPojo,
extractedTimestamp: Long)
: Watermark = {
new Watermark(extractedTimestamp)
}
override def extractTimestamp(
element: TestPojo,
previousElementTimestamp: Long): Long = {
element.b
}
}
class TestPojo() {
var a: Int = _
var b: Long = _
var b2: String = "skip me"
var c: String = _
}
}
|
bowenli86/flink
|
flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/runtime/stream/TimeAttributesITCase.scala
|
Scala
|
apache-2.0
| 27,925 |
/* ------------------------------------------------------------------ */
/* The Great Computer Language Shootout */
/* http://shootout.alioth.debian.org/ */
/* */
/* Contributed by Anthony Borla */
/* ------------------------------------------------------------------ */
import java.text.DecimalFormat;
import java.text.FieldPosition;
object harmonic
{
def main(args: Array[String]): unit =
{
var n = Integer.parseInt(args(0));
var value = harmonic(n, 0.0);
val formatter = new DecimalFormat("#.000000000");
var formattedValue = formatter.format(value, new StringBuffer(), new FieldPosition(0));
System.out.println(formattedValue);
}
final def harmonic(n: int, a: double): double =
{
if (n == 0) return a;
return harmonic(n - 1, a + 1.0 / n);
}
}
|
kragen/shootout
|
bench/harmonic/harmonic.scala
|
Scala
|
bsd-3-clause
| 961 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package statements
package params
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import com.intellij.lang.ASTNode
import stubs.elements.wrappers.DummyASTNode
import stubs.ScParameterStub
import org.jetbrains.plugins.scala.lang.psi.api.statements.params._
import api.toplevel.typedef.ScClass
import api.ScalaElementVisitor
import com.intellij.psi.{PsiElementVisitor, PsiClass, PsiElement}
/**
* @author Alexander Podkhalyuzin
* Date: 22.02.2008
*/
class ScClassParameterImpl(node: ASTNode) extends ScParameterImpl(node) with ScClassParameter {
def this(stub: ScParameterStub) = {
this(DummyASTNode)
setStub(stub)
setNode(null)
}
override def toString: String = "ClassParameter: " + name
override def isVal: Boolean = {
val stub = getStub
if (stub != null) {
return stub.asInstanceOf[ScParameterStub].isVal
}
findChildByType(ScalaTokenTypes.kVAL) != null
}
override def isVar: Boolean = {
val stub = getStub
if (stub != null) {
return stub.asInstanceOf[ScParameterStub].isVar
}
findChildByType(ScalaTokenTypes.kVAR) != null
}
def isPrivateThis: Boolean = {
if (!isEffectiveVal) return true
getModifierList.accessModifier match {
case Some(am) =>
am.isThis && am.isPrivate
case _ => false
}
}
override def isStable: Boolean = {
val stub = getStub
if (stub != null) {
return stub.asInstanceOf[ScParameterStub].isStable
}
!isVar
}
override def getOriginalElement: PsiElement = {
val ccontainingClass = containingClass
if (ccontainingClass == null) return this
val originalClass: PsiClass = ccontainingClass.getOriginalElement.asInstanceOf[PsiClass]
if (ccontainingClass eq originalClass) return this
if (!originalClass.isInstanceOf[ScClass]) return this
val c = originalClass.asInstanceOf[ScClass]
val iterator = c.parameters.iterator
while (iterator.hasNext) {
val param = iterator.next()
if (param.name == name) return param
}
this
}
override def accept(visitor: ScalaElementVisitor) {
visitor.visitClassParameter(this)
}
override def accept(visitor: PsiElementVisitor) {
visitor match {
case s: ScalaElementVisitor => s.visitClassParameter(this)
case _ => super.accept(visitor)
}
}
}
|
consulo/consulo-scala
|
src/org/jetbrains/plugins/scala/lang/psi/impl/statements/params/ScClassParameterImpl.scala
|
Scala
|
apache-2.0
| 2,402 |
package com.twitter.util
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import org.scalacheck.Gen
import org.scalatest.prop.GeneratorDrivenPropertyChecks
@RunWith(classOf[JUnitRunner])
class LRUMapTest extends FunSuite with GeneratorDrivenPropertyChecks {
// don't waste too much time testing this and keep things small
implicit override val generatorDrivenConfig =
PropertyCheckConfiguration(minSuccessful=5, minSize=2, sizeRange=8)
test("LRUMap creation") {
forAll (Gen.choose(1, 200)) { size =>
val lru = new LruMap[String, String](size)
assert(lru.maxSize == size)
val slru = new SynchronizedLruMap[String, String](size)
assert(slru.maxSize == size)
}
}
test("LRUMap insertion") {
forAll (LRUEntriesGenerator[Int]) { entries =>
val lru = new LruMap[String, Int](entries.size+10) // headroom
for (entry <- entries) {
lru += entry
}
for ((key,value) <- entries) {
assert(lru.get(key) == Some(value))
}
}
}
test("LRUMap eviction") {
forAll (LRUEntriesGenerator[Double]) { entries =>
val slru = new SynchronizedLruMap[String, Double](5)
for (entry <- entries) {
slru += entry
}
val expectedKeys = entries.slice(entries.size-5, entries.size).map(_._1)
assert(slru.keySet == expectedKeys.toSet)
}
}
}
|
BuoyantIO/twitter-util
|
util-collection/src/test/scala/com/twitter/util/LRUMapTest.scala
|
Scala
|
apache-2.0
| 1,414 |
package services
import akka.actor.Actor
import java.sql.SQLException
import models._
import play.api.Play.current
import play.api.db.slick.DB
case class UpdateScore(score: Score)
case class ListScores(studentId: Long)
case class CreateScore(score: Score)
class ScoreActor extends Actor {
def receive = {
case update: UpdateScore => {
val updated = ScoreActor.updateScore(update.score)
sender ! updated
}
case list: ListScores => {
val scores = ScoreActor.listScores(list.studentId)
sender ! scores
}
case create: CreateScore => {
val score = ScoreActor.createScore(create.score)
sender ! score
}
}
}
object ScoreActor {
def createScore(score: Score) = {
DB.withSession { implicit s =>
Scores.create(score)
}
}
def updateScore(score: Score) = {
DB.withSession { implicit s =>
try {
Scores.update(score)
} catch {
case e: SQLException => 0
}
}
}
def listScores(studentId: Long) = {
DB.withSession { implicit s =>
try {
Scores.findByStudent(studentId)
} catch {
case e: SQLException => List.empty
}
}
}
}
|
silbermm/proximal
|
app/services/ScoreActor.scala
|
Scala
|
apache-2.0
| 1,187 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2003-2010, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala.collection
package generic
import mutable.Builder
/** A template for companion objects of `Set` and subclasses thereof.
*
* @define coll set
* @define Coll Set
* @define factoryInfo
* This object provides a set of operations needed to create `$Coll` values.
* @author Martin Odersky
* @version 2.8
* @since 2.8
* @define canBuildFromInfo
* The standard `CanBuildFrom` instance for `$Coll` objects.
* @see CanBuildFrom
* @define setCanBuildFromInfo
* The standard `CanBuildFrom` instance for `$Coll` objects.
* @see CanBuildFrom
* @see GenericCanBuildFrom
*/
abstract class SetFactory[CC[X] <: Set[X] with SetLike[X, CC[X]]]
extends GenericCompanion[CC] {
def newBuilder[A]: Builder[A, CC[A]]
/** $setCanBuildFromInfo
*/
def setCanBuildFrom[A] = new CanBuildFrom[CC[_], A, CC[A]] {
def apply(from: CC[_]) = newBuilder[A]
def apply() = newBuilder[A]
}
}
|
cran/rkafkajars
|
java/scala/collection/generic/SetFactory.scala
|
Scala
|
apache-2.0
| 1,471 |
package com.itszuvalex.itszulib.testing
import com.itszuvalex.itszulib.api.IPreviewable
import net.minecraft.item.Item
/**
* Created by Christopher Harris (Itszuvalex) on 8/26/15.
*/
class ItemPreviewable extends Item with IPreviewable {
/**
*
* @return The ID of IPreviewableRenderer. This is separate from Forge RenderIDs.
*/
override def renderID: Int = PreviewableIDs.testID
}
|
BlockWorker/ItszuLib
|
src/main/scala/com/itszuvalex/itszulib/testing/ItemPreviewable.scala
|
Scala
|
gpl-2.0
| 400 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.concurrent
import scala.language.implicitConversions
package object duration {
/**
* This object can be used as closing token if you prefer dot-less style but do not want
* to enable language.postfixOps:
*
* {{{
* import scala.concurrent.duration._
*
* val duration = 2 seconds span
* }}}
*/
object span
/**
* This object can be used as closing token for declaring a deadline at some future point
* in time:
*
* {{{
* import scala.concurrent.duration._
*
* val deadline = 3 seconds fromNow
* }}}
*/
object fromNow
type TimeUnit = java.util.concurrent.TimeUnit
final val DAYS = java.util.concurrent.TimeUnit.DAYS
final val HOURS = java.util.concurrent.TimeUnit.HOURS
final val MICROSECONDS = java.util.concurrent.TimeUnit.MICROSECONDS
final val MILLISECONDS = java.util.concurrent.TimeUnit.MILLISECONDS
final val MINUTES = java.util.concurrent.TimeUnit.MINUTES
final val NANOSECONDS = java.util.concurrent.TimeUnit.NANOSECONDS
final val SECONDS = java.util.concurrent.TimeUnit.SECONDS
implicit def pairIntToDuration(p: (Int, TimeUnit)): Duration = Duration(p._1.toLong, p._2)
implicit def pairLongToDuration(p: (Long, TimeUnit)): FiniteDuration = Duration(p._1, p._2)
implicit def durationToPair(d: Duration): (Long, TimeUnit) = (d.length, d.unit)
implicit final class DurationInt(private val n: Int) extends AnyVal with DurationConversions {
override protected def durationIn(unit: TimeUnit): FiniteDuration = Duration(n.toLong, unit)
}
implicit final class DurationLong(private val n: Long) extends AnyVal with DurationConversions {
override protected def durationIn(unit: TimeUnit): FiniteDuration = Duration(n, unit)
}
implicit final class DurationDouble(private val d: Double) extends AnyVal with DurationConversions {
override protected def durationIn(unit: TimeUnit): FiniteDuration =
Duration(d, unit) match {
case f: FiniteDuration => f
case _ => throw new IllegalArgumentException("Duration DSL not applicable to " + d)
}
}
/*
* Avoid reflection based invocation by using non-duck type
*/
implicit final class IntMult(private val i: Int) extends AnyVal {
def *(d: Duration): Duration = d * i.toDouble
def *(d: FiniteDuration): FiniteDuration = d * i.toLong
}
implicit final class LongMult(private val i: Long) extends AnyVal {
def *(d: Duration): Duration = d * i.toDouble
def *(d: FiniteDuration): FiniteDuration = d * i.toLong
}
implicit final class DoubleMult(private val f: Double) extends AnyVal {
def *(d: Duration): Duration = d * f.toDouble
}
}
|
scala/scala
|
src/library/scala/concurrent/duration/package.scala
|
Scala
|
apache-2.0
| 3,045 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute
import org.apache.spark.sql.catalyst.dsl.plans._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.Literal.{FalseLiteral, TrueLiteral}
import org.apache.spark.sql.catalyst.plans.PlanTest
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.types.{IntegerType, NullType}
class SimplifyConditionalSuite extends PlanTest with PredicateHelper {
object Optimize extends RuleExecutor[LogicalPlan] {
val batches = Batch("SimplifyConditionals", FixedPoint(50),
BooleanSimplification, ConstantFolding, SimplifyConditionals) :: Nil
}
protected def assertEquivalent(e1: Expression, e2: Expression): Unit = {
val correctAnswer = Project(Alias(e2, "out")() :: Nil, OneRowRelation()).analyze
val actual = Optimize.execute(Project(Alias(e1, "out")() :: Nil, OneRowRelation()).analyze)
comparePlans(actual, correctAnswer)
}
private val trueBranch = (TrueLiteral, Literal(5))
private val normalBranch = (NonFoldableLiteral(true), Literal(10))
private val unreachableBranch = (FalseLiteral, Literal(20))
private val nullBranch = (Literal.create(null, NullType), Literal(30))
val isNotNullCond = IsNotNull(UnresolvedAttribute(Seq("a")))
val isNullCond = IsNull(UnresolvedAttribute("b"))
val notCond = Not(UnresolvedAttribute("c"))
test("simplify if") {
assertEquivalent(
If(TrueLiteral, Literal(10), Literal(20)),
Literal(10))
assertEquivalent(
If(FalseLiteral, Literal(10), Literal(20)),
Literal(20))
assertEquivalent(
If(Literal.create(null, NullType), Literal(10), Literal(20)),
Literal(20))
}
test("remove unnecessary if when the outputs are semantic equivalence") {
assertEquivalent(
If(IsNotNull(UnresolvedAttribute("a")),
Subtract(Literal(10), Literal(1)),
Add(Literal(6), Literal(3))),
Literal(9))
// For non-deterministic condition, we don't remove the `If` statement.
assertEquivalent(
If(GreaterThan(Rand(0), Literal(0.5)),
Subtract(Literal(10), Literal(1)),
Add(Literal(6), Literal(3))),
If(GreaterThan(Rand(0), Literal(0.5)),
Literal(9),
Literal(9)))
}
test("remove unreachable branches") {
// i.e. removing branches whose conditions are always false
assertEquivalent(
CaseWhen(unreachableBranch :: normalBranch :: unreachableBranch :: nullBranch :: Nil, None),
CaseWhen(normalBranch :: Nil, None))
}
test("remove entire CaseWhen if only the else branch is reachable") {
assertEquivalent(
CaseWhen(unreachableBranch :: unreachableBranch :: nullBranch :: Nil, Some(Literal(30))),
Literal(30))
assertEquivalent(
CaseWhen(unreachableBranch :: unreachableBranch :: Nil, None),
Literal.create(null, IntegerType))
}
test("remove entire CaseWhen if the first branch is always true") {
assertEquivalent(
CaseWhen(trueBranch :: normalBranch :: nullBranch :: Nil, None),
Literal(5))
// Test branch elimination and simplification in combination
assertEquivalent(
CaseWhen(unreachableBranch :: unreachableBranch :: nullBranch :: trueBranch :: normalBranch
:: Nil, None),
Literal(5))
// Make sure this doesn't trigger if there is a non-foldable branch before the true branch
assertEquivalent(
CaseWhen(normalBranch :: trueBranch :: normalBranch :: Nil, None),
CaseWhen(normalBranch :: trueBranch :: Nil, None))
}
test("simplify CaseWhen, prune branches following a definite true") {
assertEquivalent(
CaseWhen(normalBranch :: unreachableBranch ::
unreachableBranch :: nullBranch ::
trueBranch :: normalBranch ::
Nil,
None),
CaseWhen(normalBranch :: trueBranch :: Nil, None))
}
test("simplify CaseWhen if all the outputs are semantic equivalence") {
// When the conditions in `CaseWhen` are all deterministic, `CaseWhen` can be removed.
assertEquivalent(
CaseWhen((isNotNullCond, Subtract(Literal(3), Literal(2))) ::
(isNullCond, Literal(1)) ::
(notCond, Add(Literal(6), Literal(-5))) ::
Nil,
Add(Literal(2), Literal(-1))),
Literal(1)
)
// For non-deterministic conditions, we don't remove the `CaseWhen` statement.
assertEquivalent(
CaseWhen((GreaterThan(Rand(0), Literal(0.5)), Subtract(Literal(3), Literal(2))) ::
(LessThan(Rand(1), Literal(0.5)), Literal(1)) ::
(EqualTo(Rand(2), Literal(0.5)), Add(Literal(6), Literal(-5))) ::
Nil,
Add(Literal(2), Literal(-1))),
CaseWhen((GreaterThan(Rand(0), Literal(0.5)), Literal(1)) ::
(LessThan(Rand(1), Literal(0.5)), Literal(1)) ::
(EqualTo(Rand(2), Literal(0.5)), Literal(1)) ::
Nil,
Literal(1))
)
// When we have mixture of deterministic and non-deterministic conditions, we remove
// the deterministic conditions from the tail until a non-deterministic one is seen.
assertEquivalent(
CaseWhen((GreaterThan(Rand(0), Literal(0.5)), Subtract(Literal(3), Literal(2))) ::
(NonFoldableLiteral(true), Add(Literal(2), Literal(-1))) ::
(LessThan(Rand(1), Literal(0.5)), Literal(1)) ::
(NonFoldableLiteral(true), Add(Literal(6), Literal(-5))) ::
(NonFoldableLiteral(false), Literal(1)) ::
Nil,
Add(Literal(2), Literal(-1))),
CaseWhen((GreaterThan(Rand(0), Literal(0.5)), Literal(1)) ::
(NonFoldableLiteral(true), Literal(1)) ::
(LessThan(Rand(1), Literal(0.5)), Literal(1)) ::
Nil,
Literal(1))
)
}
}
|
pgandhi999/spark
|
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/SimplifyConditionalSuite.scala
|
Scala
|
apache-2.0
| 6,600 |
package org.awong.fundamentals.unionfind
/**
* @see Sedgewick & Wayne, Algorithms, 4th Edition (2011), page 227-227
*/
class WeightedQuickUnionUF(n: Int) extends UnionFind {
var count = n
var id = (0 to n).toArray
var sz = for (i <- id) yield 1
def connected(p: Int, q: Int): Boolean = {
find(p) == find(q)
}
// connect p and q
def union(p: Int, q: Int): Unit = {
val i = find(p)
val j = find(q)
if (i != j) {
if (sz(i) < sz(j)) {
id(i) = j
sz(j) += sz(i)
} else {
id(j) = i
sz(i) += sz(j)
}
count = count - 1
}
}
// returns component identifier for p
def find(p: Int): Int = {
var x = p
// traverse links until you find the root, which is connected to itself
while (x != id(x)) {
x = id(x)
}
x
}
}
object WeightedQuickUnionUF {
def apply(n: Int) = new WeightedQuickUnionUF(n)
}
|
alanktwong/algorithms-scala
|
fundamentals/src/main/scala/org/awong/fundamentals/unionfind/WeightedQuickUnionUF.scala
|
Scala
|
mit
| 912 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import monix.reactive.Observable
import scala.concurrent.duration._
import scala.concurrent.duration.Duration.Zero
object ZipWithIndexSuite extends BaseOperatorSuite {
def createObservable(sourceCount: Int) = {
require(sourceCount > 0, "sourceCount should be strictly positive")
Some {
val o = Observable.range(1, sourceCount.toLong + 1)
.zipWithIndex.map { case (elem, index) => elem + index }
val c = sourceCount
val sum = c * (c + 1) / 2 + c * (c - 1) / 2
Sample(o, sourceCount, sum, Zero, Zero)
}
}
def observableInError(sourceCount: Int, ex: Throwable) = {
require(sourceCount > 0, "sourceCount should be strictly positive")
Some {
val o = createObservableEndingInError(Observable.range(1, sourceCount.toLong + 1), ex).zipWithIndex.map {
case (elem, index) => elem + index
}
val c = sourceCount
val sum = c * (c + 1) / 2 + c * (c - 1) / 2
Sample(o, sourceCount, sum, Zero, Zero)
}
}
def brokenUserCodeObservable(sourceCount: Int, ex: Throwable) = None
override def cancelableObservables(): Seq[Sample] = {
val sample = Observable.range(0, 10).delayOnNext(1.second).zipWithIndex.map { case (elem, index) => elem + index }
Seq(Sample(sample, 0, 0, 0.seconds, 0.seconds))
}
}
|
alexandru/monifu
|
monix-reactive/shared/src/test/scala/monix/reactive/internal/operators/ZipWithIndexSuite.scala
|
Scala
|
apache-2.0
| 2,018 |
package models.db
import models.Topic
import play.api.db.slick.Config.driver.simple._
class Topics(tag: Tag) extends Table[Topic](tag, "TOPICS") {
def id = column[Long]("id", O.PrimaryKey, O.AutoInc)
def title = column[String]("title")
def slug = column[String]("slug")
def * = (id.?, title, slug) <>(Topic.tupled, Topic.unapply)
}
object Topics {
val query = TableQuery[Topics]
}
|
duaiwe/vulgar
|
app/models/db/Topics.scala
|
Scala
|
mit
| 392 |
package org.monkeynuthead.playground.streams
import org.scalatest.{MustMatchers, WordSpec}
/**
* Messing around with streams.
*/
class StreamSpec extends WordSpec with MustMatchers {
"I" must {
"know how to create a basic stream (but be careful because the val will keep hold of everything)" in {
val ints: Stream[Int] = {
def loop(i: Int): Stream[Int] = i #:: loop(i + 1)
loop(10)
}
ints.take(3) must equal(List(10, 11, 12))
ints.head must equal(10)
val (head1, ints1) = (ints.head, ints.tail)
head1 must equal(10)
val (head2, ints2) = (ints1.head, ints1.tail)
head2 must equal(11)
ints2.head must equal(12)
}
"know how to create a basic stream (but not holding on to head using a def)" in {
def ints: Stream[Int] = {
def loop(i: Int): Stream[Int] = i #:: loop(i + 1)
loop(5)
}
val it = ints.iterator
it.map(2*).take(5).toSeq must equal(List(10,12,14,16,18))
}
"know how to do something slightly more complicated" in {
def repeated[A](underlying: IndexedSeq[A]): Stream[A] = {
def loop(i: Int, underlying: IndexedSeq[A]): Stream[A] = {
val index = i % underlying.length
underlying(index) #:: loop(index + 1, underlying)
}
loop(0, underlying)
}
val it = repeated(Vector("A", "B", "C")).iterator
it.take(10).toSeq must equal(List("A", "B", "C", "A", "B", "C", "A", "B", "C", "A"))
}
}
}
|
georgenicoll/scala-playground
|
src/test/scala/org/monkeynuthead/playground/streams/StreamSpec.scala
|
Scala
|
apache-2.0
| 1,567 |
// scalastyle:off
/* NSC -- new Scala compiler
* Copyright 2005-2013 LAMP/EPFL
* @author Paul Phillips
*/
package org.apache.spark.repl
import scala.tools.nsc._
import scala.tools.nsc.interpreter._
import scala.collection.{ mutable, immutable }
private[repl] trait SparkImports {
self: SparkIMain =>
import global._
import definitions.{ ScalaPackage, JavaLangPackage, PredefModule }
import memberHandlers._
def isNoImports = settings.noimports.value
def isNoPredef = settings.nopredef.value
/** Synthetic import handlers for the language defined imports. */
private def makeWildcardImportHandler(sym: Symbol): ImportHandler = {
val hd :: tl = sym.fullName.split('.').toList map newTermName
val tree = Import(
tl.foldLeft(Ident(hd): Tree)((x, y) => Select(x, y)),
ImportSelector.wildList
)
tree setSymbol sym
new ImportHandler(tree)
}
/** Symbols whose contents are language-defined to be imported. */
def languageWildcardSyms: List[Symbol] = List(JavaLangPackage, ScalaPackage, PredefModule)
def languageWildcards: List[Type] = languageWildcardSyms map (_.tpe)
def languageWildcardHandlers = languageWildcardSyms map makeWildcardImportHandler
def allImportedNames = importHandlers flatMap (_.importedNames)
def importedTerms = onlyTerms(allImportedNames)
def importedTypes = onlyTypes(allImportedNames)
/** Types which have been wildcard imported, such as:
* val x = "abc" ; import x._ // type java.lang.String
* import java.lang.String._ // object java.lang.String
*
* Used by tab completion.
*
* XXX right now this gets import x._ and import java.lang.String._,
* but doesn't figure out import String._. There's a lot of ad hoc
* scope twiddling which should be swept away in favor of digging
* into the compiler scopes.
*/
def sessionWildcards: List[Type] = {
importHandlers filter (_.importsWildcard) map (_.targetType) distinct
}
def wildcardTypes = languageWildcards ++ sessionWildcards
def languageSymbols = languageWildcardSyms flatMap membersAtPickler
def sessionImportedSymbols = importHandlers flatMap (_.importedSymbols)
def importedSymbols = languageSymbols ++ sessionImportedSymbols
def importedTermSymbols = importedSymbols collect { case x: TermSymbol => x }
def importedTypeSymbols = importedSymbols collect { case x: TypeSymbol => x }
def implicitSymbols = importedSymbols filter (_.isImplicit)
def importedTermNamed(name: String): Symbol =
importedTermSymbols find (_.name.toString == name) getOrElse NoSymbol
/** Tuples of (source, imported symbols) in the order they were imported.
*/
def importedSymbolsBySource: List[(Symbol, List[Symbol])] = {
val lang = languageWildcardSyms map (sym => (sym, membersAtPickler(sym)))
val session = importHandlers filter (_.targetType != NoType) map { mh =>
(mh.targetType.typeSymbol, mh.importedSymbols)
}
lang ++ session
}
def implicitSymbolsBySource: List[(Symbol, List[Symbol])] = {
importedSymbolsBySource map {
case (k, vs) => (k, vs filter (_.isImplicit))
} filterNot (_._2.isEmpty)
}
/** Compute imports that allow definitions from previous
* requests to be visible in a new request. Returns
* three pieces of related code:
*
* 1. An initial code fragment that should go before
* the code of the new request.
*
* 2. A code fragment that should go after the code
* of the new request.
*
* 3. An access path which can be traversed to access
* any bindings inside code wrapped by #1 and #2 .
*
* The argument is a set of Names that need to be imported.
*
* Limitations: This method is not as precise as it could be.
* (1) It does not process wildcard imports to see what exactly
* they import.
* (2) If it imports any names from a request, it imports all
* of them, which is not really necessary.
* (3) It imports multiple same-named implicits, but only the
* last one imported is actually usable.
*/
case class SparkComputedImports(prepend: String, append: String, access: String)
def fallback = System.getProperty("spark.repl.fallback", "false").toBoolean
protected def importsCode(wanted: Set[Name], definedClass: Boolean): SparkComputedImports = {
/** Narrow down the list of requests from which imports
* should be taken. Removes requests which cannot contribute
* useful imports for the specified set of wanted names.
*/
case class ReqAndHandler(req: Request, handler: MemberHandler) { }
def reqsToUse: List[ReqAndHandler] = {
/** Loop through a list of MemberHandlers and select which ones to keep.
* 'wanted' is the set of names that need to be imported.
*/
def select(reqs: List[ReqAndHandler], wanted: Set[Name]): List[ReqAndHandler] = {
// Single symbol imports might be implicits! See bug #1752. Rather than
// try to finesse this, we will mimic all imports for now.
def keepHandler(handler: MemberHandler) = handler match {
/* This case clause tries to "precisely" import only what is required. And in this
* it may miss out on some implicits, because implicits are not known in `wanted`. Thus
* it is suitable for defining classes. AFAIK while defining classes implicits are not
* needed.*/
case h: ImportHandler if definedClass && !fallback =>
h.importedNames.exists(x => wanted.contains(x))
case _: ImportHandler => true
case x => x.definesImplicit || (x.definedNames exists wanted)
}
reqs match {
case Nil => Nil //列表结尾为Nil
case rh :: rest if !keepHandler(rh.handler) => select(rest, wanted)
case rh :: rest =>
import rh.handler._
val newWanted = wanted ++ referencedNames -- definedNames -- importedNames
rh :: select(rest, newWanted)
}
}
/** Flatten the handlers out and pair each with the original request */
select(allReqAndHandlers reverseMap { case (r, h) => ReqAndHandler(r, h) }, wanted).reverse
}
val code, trailingBraces, accessPath = new StringBuilder
val currentImps = mutable.HashSet[Name]()
// add code for a new object to hold some imports
def addWrapper() {
val impname = nme.INTERPRETER_IMPORT_WRAPPER
code append "class %sC extends Serializable {\\n".format(impname)
trailingBraces append "}\\nval " + impname + " = new " + impname + "C;\\n"
accessPath append ("." + impname)
currentImps.clear
// code append "object %s {\\n".format(impname)
// trailingBraces append "}\\n"
// accessPath append ("." + impname)
// currentImps.clear
}
addWrapper()
// loop through previous requests, adding imports for each one
for (ReqAndHandler(req, handler) <- reqsToUse) {
handler match {
// If the user entered an import, then just use it; add an import wrapping
// level if the import might conflict with some other import
case x: ImportHandler =>
if (x.importsWildcard || currentImps.exists(x.importedNames contains _))
addWrapper()
code append (x.member + "\\n")
// give wildcard imports a import wrapper all to their own
if (x.importsWildcard) addWrapper()
else currentImps ++= x.importedNames
// For other requests, import each defined name.
// import them explicitly instead of with _, so that
// ambiguity errors will not be generated. Also, quote
// the name of the variable, so that we don't need to
// handle quoting keywords separately.
case x: ClassHandler if !fallback =>
// I am trying to guess if the import is a defined class
// This is an ugly hack, I am not 100% sure of the consequences.
// Here we, let everything but "defined classes" use the import with val.
// The reason for this is, otherwise the remote executor tries to pull the
// classes involved and may fail.
for (imv <- x.definedNames) {
val objName = req.lineRep.readPath
code.append("import " + objName + ".INSTANCE" + req.accessPath + ".`" + imv + "`\\n")
}
case x =>
for (imv <- x.definedNames) {
if (currentImps contains imv) addWrapper()
val objName = req.lineRep.readPath
val valName = "$VAL" + newValId()
if(!code.toString.endsWith(".`" + imv + "`;\\n")) { // Which means already imported
code.append("val " + valName + " = " + objName + ".INSTANCE;\\n")
code.append("import " + valName + req.accessPath + ".`" + imv + "`;\\n")
}
// code.append("val " + valName + " = " + objName + ".INSTANCE;\\n")
// code.append("import " + valName + req.accessPath + ".`" + imv + "`;\\n")
// code append ("import " + (req fullPath imv) + "\\n")
currentImps += imv
}
}
}
// add one extra wrapper, to prevent warnings in the common case of
// redefining the value bound in the last interpreter request.
addWrapper()
SparkComputedImports(code.toString, trailingBraces.toString, accessPath.toString)
}
private def allReqAndHandlers =
prevRequestList flatMap (req => req.handlers map (req -> _))
private def membersAtPickler(sym: Symbol): List[Symbol] =
beforePickler(sym.info.nonPrivateMembers.toList)
private var curValId = 0
private def newValId(): Int = {
curValId += 1
curValId
}
}
|
tophua/spark1.52
|
repl/scala-2.10/src/main/scala/org/apache/spark/repl/SparkImports.scala
|
Scala
|
apache-2.0
| 9,767 |
package answer
import exam.CharacterCounter
import jeqa.types.Sentence
import text.StringOption
/**
* <pre>
* Created on 3/11/15.
* </pre>
* @param score score
* @param text answer
* @author K.Sakamoto
*/
class AnswerCandidate(val score: Double, val text: StringOption, val sentenceList: Seq[Sentence]) {
val characterNumber: Int = CharacterCounter.count(text)
override def toString: String = {
"""SCORE:
|%f
|TEXT:
|%s
|CHARACTER NUMBER:
|%d
""".stripMargin.format(
score,
text,
characterNumber
)
}
}
|
ktr-skmt/FelisCatusZero
|
src/main/scala/answer/AnswerCandidate.scala
|
Scala
|
apache-2.0
| 585 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.sysml.api.dl
import caffe.Caffe.LayerParameter
import scala.collection.JavaConversions._
import org.apache.sysml.parser.LanguageException
import java.util.HashSet
import java.io.File
import org.apache.sysml.api.DMLScript
import org.apache.sysml.runtime.util.ConvolutionUtils
import caffe.Caffe.EltwiseParameter.EltwiseOp
import org.apache.sysml.runtime.DMLRuntimeException;
import java.util.ArrayList
trait CaffeLayer extends BaseDMLGenerator {
// -------------------------------------------------
// Any layer that wants to reuse SystemML-NN has to override following methods that help in generating the DML for the given layer:
def sourceFileName: String;
def init(dmlScript: StringBuilder): Unit;
def forward(dmlScript: StringBuilder, isPrediction: Boolean): Unit;
def backward(dmlScript: StringBuilder, outSuffix: String): Unit;
var computedOutputShape: (String, String, String) = null
def outputShape: (String, String, String) = {
if (computedOutputShape == null) computedOutputShape = bottomLayerOutputShape
computedOutputShape
}
// -------------------------------------------------
var computedBottomLayerOutputShape: (String, String, String) = null
def bottomLayerOutputShape: (String, String, String) = {
if (computedBottomLayerOutputShape == null) {
// Note: if you get org.apache.sysml.parser.LanguageException: Map is null exception
// from org.apache.sysml.api.dl.CaffeNetwork.org$apache$sysml$api$dl$CaffeNetwork$$convertLayerParameterToCaffeLayer
// you are attempting to get traverse the network (for example: bottomLayerOutputShape) before it is created.
val ret = net.getBottomLayers(param.getName).map(l => net.getCaffeLayer(l)).toList
if (ret.size == 0) throw new LanguageException("Expected atleast 1 bottom layer for " + param.getName)
computedBottomLayerOutputShape = ret(0).outputShape
}
computedBottomLayerOutputShape
}
def param: LayerParameter
def id: Int
def net: CaffeNetwork
// --------------------------------------------------------------------------------------
// No need to override these methods in subclasses
// Exception: Only Data layer overrides "out" method to use 'Xb' for consistency
// Naming of the below methods is consistent with the nn library:
// X (feature map from the previous layer) ----> Forward pass ----> out (feature map to the next layer)
// dX (errors to the previous layer) <---- Backward pass <---- dout (errors from the next layer)
def out = "out" + id
var computedX: String = null
def X: String = {
if (computedX == null) {
val ret = net.getBottomLayers(param.getName).map(l => net.getCaffeLayer(l)).toList
if (ret.size == 0) throw new LanguageException("Expected atleast 1 bottom layer for " + param.getName)
else if (ret.size == 1) computedX = ret(0).out
else computedX = sum(new StringBuilder, ret.map(_.out).toList).toString()
}
computedX
}
var computedDout: String = null
def dout: String = {
if (computedDout == null) {
val ret = net.getTopLayers(param.getName).map(l => net.getCaffeLayer(l)).toList
if (ret.size == 0) throw new LanguageException("Expected atleast 1 top layer for " + param.getName)
else if (ret.size == 1) computedDout = ret(0).dX(id)
else computedDout = sum(new StringBuilder, ret.map(_.dX(id)).toList).toString()
}
computedDout
}
def dX(bottomLayerID: Int) = "dOut" + id + "_" + bottomLayerID
// --------------------------------------------------------------------------------------
// No need to override these methods in subclasses, instead classes that have weights and biases
// should implement HasWeight and HasBias traits.
def dWeight(): String = throw new DMLRuntimeException("dWeight is not implemented in super class")
def dBias(): String = throw new DMLRuntimeException("dBias is not implemented in super class")
def weight(): String = null;
def weightShape(): Array[Int];
def bias(): String = null;
def biasShape(): Array[Int];
def shouldUpdateWeight(): Boolean = if (weight != null) true else false
def shouldUpdateBias(): Boolean = if (bias != null) true else false
// --------------------------------------------------------------------------------------
// Helper methods to simplify the code of subclasses
def invokeInit(dmlScript: StringBuilder, returnVariables: List[String], arguments: String*): Unit =
invoke(dmlScript, sourceFileName + "::", returnVariables, "init", arguments.toList)
def invokeForward(dmlScript: StringBuilder, returnVariables: List[String], arguments: String*): Unit =
invoke(dmlScript, sourceFileName + "::", returnVariables, "forward", arguments.toList)
// -----------------------------------------------------------------------------------
// All the layers (with the exception of Concat) call one of the below methods in the backward function.
// The preceding layer expects that 'dX(bottomLayerID) + outSuffix' is assigned.
// l1 <--- dX(1) <-----|
// |-- [current layer: dOut3 (computed by backward)] <---- "dOut" + id + outSuffix
// l2 <--- dX(2) <-----|
// The below functions perform two functions:
// 1. Compute backward: either call dml file's backward (for example: invokeBackward) or just propagate next layers errors (assignDoutToDX)
// 2. Then make sure that all the preceding layer get the errors using:
// bottomLayerIDs.map(bottomLayerID => dmlScript.append( dX(bottomLayerID) + outSuffix + " = " + "dOut" + id + outSuffix + "; "))
// The layers that have a corresponding dml script call this method.
// Assumption: the first variable of resultVariables is always dX
def invokeBackward(dmlScript: StringBuilder, outSuffix: String, resultVariables: List[String], arguments: String*): Unit = {
invoke(dmlScript, sourceFileName + "::", resultVariables.map(_ + outSuffix), "backward", arguments.toList, false)
val bottomLayerIDs = net.getBottomLayers(param.getName).map(l => net.getCaffeLayer(l).id)
dmlScript.append("; ")
bottomLayerIDs.map(bottomLayerID => dmlScript.append(dX(bottomLayerID) + outSuffix + " = " + resultVariables(0) + outSuffix + "; "))
dmlScript.append("\\n")
}
// On-the-fly layers (such as Scale and Elementwise) call this function to propagate next layers errors to the previous layer
def assignDoutToDX(dmlScript: StringBuilder, outSuffix: String): Unit = {
dmlScript.append("dOut" + id + outSuffix + " = " + dout)
val bottomLayerIDs = net.getBottomLayers(param.getName).map(l => net.getCaffeLayer(l).id)
dmlScript.append("; ")
bottomLayerIDs.map(bottomLayerID => dmlScript.append(dX(bottomLayerID) + outSuffix + " = " + "dOut" + id + outSuffix + "; "))
dmlScript.append("\\n")
}
// --------------------------------------------------------------------------------------
}
trait IsLossLayer extends CaffeLayer {
def computeLoss(dmlScript: StringBuilder, numTabs: Int): Unit
override def init(dmlScript: StringBuilder) = { }
def scores(): String = {
val ret = net.getBottomLayers(param.getName).map(l => net.getCaffeLayer(l)).toList
ret.size.toLong match {
case 0L => throw new LanguageException("Expected atleast 1 bottom layer")
case 1L => ret.get(0).out
case _ => {
val ret1 = ret.filter(!_.out.equals("Xb")).toList
ret1.size.toLong match {
case 0L => throw new LanguageException("Atleast one of the output of previous layer should be Xb")
case 1L => ret1.get(0).out
case _ => throw new LanguageException("More than 2 bottom layers is not supported")
}
}
}
}
def isSegmentationProblem(): Boolean =
try {
return outputShape._2.toInt != 1 && outputShape._3.toInt != 1
} catch {
case _: Throwable => throw new RuntimeException("Cannot infer the output dimensions:" + outputShape)
}
}
trait HasWeight extends CaffeLayer {
override def weight = param.getName + "_weight"
override def dWeight = param.getName + "_dWeight"
}
trait HasBias extends CaffeLayer {
override def bias = param.getName + "_bias"
override def dBias = param.getName + "_dBias"
}
class Data(val param: LayerParameter, val id: Int, val net: CaffeNetwork, val numChannels: String, val height: String, val width: String) extends CaffeLayer {
// -------------------------------------------------
override def sourceFileName = null
override def init(dmlScript: StringBuilder) = {
if (param.hasTransformParam && param.getTransformParam.hasScale) {
dmlScript.append("X_full = X_full * " + param.getTransformParam.getScale + "\\n")
}
if (param.hasDataParam && param.getDataParam.hasBatchSize) {
dmlScript.append("BATCH_SIZE = " + param.getDataParam.getBatchSize + "\\n")
} else {
Caffe2DML.LOG.debug("Using default batch size of 64 as batch size is not set with DataParam")
dmlScript.append("BATCH_SIZE = 64\\n")
}
}
var dataOutputShape = ("$num_channels", "$height", "$width")
override def forward(dmlScript: StringBuilder, isPrediction: Boolean) = {}
override def out = "Xb"
override def backward(dmlScript: StringBuilder, outSuffix: String) = {}
override def outputShape = (numChannels, height, width)
override def weightShape(): Array[Int] = null
override def biasShape(): Array[Int] = null
// -------------------------------------------------
}
// ------------------------------------------------------------------
// weight is ema_mean and bias is ema_var
// Fuse
class BatchNorm(val param: LayerParameter, val id: Int, val net: CaffeNetwork) extends CaffeLayer with HasWeight with HasBias {
// val scale =
override def sourceFileName = "batch_norm2d"
/*
* Initialize the parameters of this layer.
*
* Note: This is just a convenience function, and parameters
* may be initialized manually if needed.
*
* Inputs:
* - C: Number of input channels (dimensionality of input depth).
*
* Outputs:
* - gamma: Scale parameters, of shape (C, 1).
* - beta: Shift parameters, of shape (C, 1).
* - ema_mean: Exponential moving average of the mean, of
* shape (C, 1).
* - ema_var: Exponential moving average of the variance, of
* shape (C, 1).
*/
override def init(dmlScript: StringBuilder) = invokeInit(dmlScript, List[String](gamma, beta, ema_mean, ema_var), numChannels)
var update_mean_var = true
/*
* Computes the forward pass for a 2D (spatial) batch normalization
* layer. The input data has N examples, each represented as a 3D
* volume unrolled into a single vector.
*
* A spatial batch normalization layer uses the per-channel sample
* mean and per-channel uncorrected sample variance during training
* to normalize each channel of the input data. Additionally, it
* introduces learnable parameters (gamma, beta) to control the
* amount of normalization.
*
* `y = ((x-mean) / sqrt(var+eps)) * gamma + beta`
*
* This implementation maintains exponential moving averages of the
* mean and variance during training for use during testing.
*
* Reference:
* - Batch Normalization: Accelerating Deep Network Training by
* Reducing Internal Covariate Shift, S. Ioffe & C. Szegedy, 2015
* - https://arxiv.org/abs/1502.03167
*
* Inputs:
* - X: Inputs, of shape (N, C*Hin*Win).
* - gamma: Scale parameters, of shape (C, 1).
* - beta: Shift parameters, of shape (C, 1).
* - C: Number of input channels (dimensionality of input depth).
* - Hin: Input height.
* - Win: Input width.
* - mode: 'train' or 'test' to indicate if the model is currently
* being trained or tested. During training, the current batch
* mean and variance will be used to normalize the inputs, while
* during testing, the exponential average of the mean and
* variance over all previous batches will be used.
* - ema_mean: Exponential moving average of the mean, of
* shape (C, 1).
* - ema_var: Exponential moving average of the variance, of
* shape (C, 1).
* - mu: Momentum value for moving averages.
* Typical values are in the range of [0.9, 0.999].
* - epsilon: Smoothing term to avoid divide by zero errors.
* Typical values are in the range of [1e-5, 1e-3].
*
* Outputs:
* - out: Outputs, of shape (N, C*Hin*Win).
* - ema_mean_upd: Updated exponential moving average of the mean,
* of shape (C, 1).
* - ema_var_upd: Updated exponential moving average of the variance,
* of shape (C, 1).
* - cache_mean: Cache of the batch mean, of shape (C, 1).
* Note: This is used for performance during training.
* - cache_var: Cache of the batch variance, of shape (C, 1).
* Note: This is used for performance during training.
* - cache_norm: Cache of the normalized inputs, of
* shape (C, N*Hin*Win). Note: This is used for performance
* during training.
*/
def forward(dmlScript: StringBuilder, isPrediction: Boolean): Unit = {
val mode = if (isPrediction) "\\"test\\"" else "\\"train\\""
invokeForward(
dmlScript,
List[String](out, withSuffix(ema_mean), withSuffix(ema_var), withSuffix(cache_mean), withSuffix(cache_var), withSuffix(cache_norm)),
X,
gamma,
beta,
numChannels,
Hin,
Win,
mode,
ema_mean,
ema_var,
ma_fraction,
eps
)
}
/*
* Computes the backward pass for a 2D (spatial) batch normalization
* layer.
*
* Inputs:
* - dout: Gradient wrt `out` from upstream, of shape (N, C*Hin*Win).
* - out: Outputs from the forward pass, of shape (N, C*Hin*Win).
* - ema_mean_upd: Updated exponential moving average of the mean
* from the forward pass, of shape (C, 1).
* - ema_var_upd: Updated exponential moving average of the variance
* from the forward pass, of shape (C, 1).
* - cache_mean: Cache of the batch mean from the forward pass, of
* shape (C, 1). Note: This is used for performance during
* training.
* - cache_var: Cache of the batch variance from the forward pass,
* of shape (C, 1). Note: This is used for performance during
* training.
* - cache_norm: Cache of the normalized inputs from the forward
* pass, of shape (C, N*Hin*Win). Note: This is used for
* performance during training.
* - X: Input data matrix to the forward pass, of
* shape (N, C*Hin*Win).
* - gamma: Scale parameters, of shape (C, 1).
* - beta: Shift parameters, of shape (C, 1).
* - C: Number of input channels (dimensionality of input depth).
* - Hin: Input height.
* - Win: Input width.
* - mode: 'train' or 'test' to indicate if the model is currently
* being trained or tested. During training, the current batch
* mean and variance will be used to normalize the inputs, while
* during testing, the exponential average of the mean and
* variance over all previous batches will be used.
* - ema_mean: Exponential moving average of the mean, of
* shape (C, 1).
* - ema_var: Exponential moving average of the variance, of
* shape (C, 1).
* - mu: Momentum value for moving averages.
* Typical values are in the range of [0.9, 0.999].
* - epsilon: Smoothing term to avoid divide by zero errors.
* Typical values are in the range of [1e-5, 1e-3].
*
* Outputs:
* - dX: Gradient wrt `X`, of shape (N, C*Hin*Win).
* - dgamma: Gradient wrt `W`, of shape (C, 1).
* - dbeta: Gradient wrt `b`, of shape (C, 1).
*
*/
def backward(dmlScript: StringBuilder, outSuffix: String): Unit =
invokeBackward(
dmlScript,
outSuffix,
List[String]("dOut" + id, dgamma, dbeta),
dout,
out,
ema_mean,
ema_var,
cache_mean,
cache_var,
cache_norm,
X,
gamma,
beta,
numChannels,
Hin,
Win,
"\\"train\\"",
ema_mean,
ema_var,
ma_fraction,
eps
)
private def withSuffix(str: String): String = if (update_mean_var) str else str + "_ignore"
override def weightShape(): Array[Int] = Array(numChannels.toInt, 1)
override def biasShape(): Array[Int] = Array(numChannels.toInt, 1)
def cache_mean(): String = "cache_mean" + id
def cache_var(): String = "cache_mean" + id
def cache_norm(): String = "cache_norm" + id
var scaleLayer: Scale = null
def gamma(): String = { checkNextLayer(); scaleLayer.weight }
def ma_fraction(): String = if (param.getBatchNormParam.hasMovingAverageFraction()) param.getBatchNormParam.getMovingAverageFraction.toString else "0.999"
def eps(): String = if (param.getBatchNormParam.hasEps()) param.getBatchNormParam.getEps.toString else "1e-5"
def beta(): String = { checkNextLayer(); scaleLayer.bias }
def dgamma(): String = { checkNextLayer(); scaleLayer.dWeight }
def dbeta(): String = { checkNextLayer(); scaleLayer.dBias }
override def shouldUpdateWeight(): Boolean = false
override def shouldUpdateBias(): Boolean = false
def ema_mean(): String = weight
def ema_var(): String = bias
def checkNextLayer(): Unit =
if (scaleLayer == null) {
val topLayers = net.getTopLayers(param.getName).map(l => net.getCaffeLayer(l)).toList
if (topLayers.length != 1 && !topLayers(0).isInstanceOf[Scale]) throw new LanguageException("Only one top layer of type Scale allowed for BatchNorm")
scaleLayer = topLayers(0).asInstanceOf[Scale]
}
def numChannels = bottomLayerOutputShape._1
def Hin = bottomLayerOutputShape._2
def Win = bottomLayerOutputShape._3
}
// weight is gamma and bias is beta
class Scale(val param: LayerParameter, val id: Int, val net: CaffeNetwork) extends CaffeLayer with HasWeight with HasBias {
if (!param.getScaleParam.getBiasTerm) throw new LanguageException("Add \\"scale_param { bias_term: true }\\" to the layer " + param.getName)
override def sourceFileName = null
override def init(dmlScript: StringBuilder): Unit = {}
// TODO: Generalize this !!
def forward(dmlScript: StringBuilder, isPrediction: Boolean): Unit = assign(dmlScript, out, X)
override def backward(dmlScript: StringBuilder, outSuffix: String): Unit = assignDoutToDX(dmlScript, outSuffix)
override def weightShape(): Array[Int] = Array(bottomLayerOutputShape._1.toInt, 1)
override def biasShape(): Array[Int] = Array(bottomLayerOutputShape._1.toInt, 1)
}
// ------------------------------------------------------------------
class Elementwise(val param: LayerParameter, val id: Int, val net: CaffeNetwork) extends CaffeLayer {
override def sourceFileName = null
override def init(dmlScript: StringBuilder): Unit = {}
if (param.getEltwiseParam.hasOperation && param.getEltwiseParam.getOperation != EltwiseOp.SUM)
throw new LanguageException("Currently only elementwise sum operation supported")
override def forward(dmlScript: StringBuilder, isPrediction: Boolean): Unit =
addAndAssign(dmlScript, out, param.getBottomList.map(b => net.getCaffeLayer(b).out).toList)
override def backward(dmlScript: StringBuilder, outSuffix: String): Unit = assignDoutToDX(dmlScript, outSuffix)
override def outputShape = {
if (_out == null) _out = net.getCaffeLayer(net.getBottomLayers(param.getName).take(1).toSeq.get(0)).outputShape
_out
}
var _out: (String, String, String) = null
override def weightShape(): Array[Int] = null
override def biasShape(): Array[Int] = null
}
class Concat(val param: LayerParameter, val id: Int, val net: CaffeNetwork) extends CaffeLayer {
override def sourceFileName = null
override def init(dmlScript: StringBuilder): Unit = {}
var _childLayers: List[CaffeLayer] = null
// Utility function to create string of format:
// fn(fn(fn(_childLayers(0).out, _childLayers(1).out), _childLayers(2).out), ...)
// This is useful because we do not support multi-input cbind and rbind in DML.
def _getMultiFn(fn: String): String = {
if (_childLayers == null) _childLayers = net.getBottomLayers(param.getName).map(l => net.getCaffeLayer(l)).toList
var tmp = fn + "(" + _childLayers(0).out + ", " + _childLayers(1).out + ")"
for (i <- 2 until _childLayers.size) {
tmp = fn + "(" + tmp + ", " + _childLayers(i).out + ")"
}
tmp
}
/*
* Computes the forward pass for a concatenation layer.
*
* Inputs:
* - n_i * c_i * h * w for each input blob i from 1 to K.
*
* Outputs:
* - out: Outputs, of shape
* - if axis = 0: (n_1 + n_2 + ... + n_K) * c_1 * h * w, and all input c_i should be the same.
* - if axis = 1: n_1 * (c_1 + c_2 + ... + c_K) * h * w, and all input n_i should be the same.
*/
override def forward(dmlScript: StringBuilder, isPrediction: Boolean): Unit =
if (param.getConcatParam.getAxis == 0) {
// rbind the inputs
assign(dmlScript, out, _getMultiFn("rbind"))
} else if (param.getConcatParam.getAxis == 1) {
// cbind the inputs
assign(dmlScript, out, _getMultiFn("cbind"))
} else {
throw new DMLRuntimeException("Incorrect axis parameter for the layer " + param.getName)
}
def startIndex(outSuffix: String): String = "concat_start_index_" + outSuffix
def endIndex(outSuffix: String): String = "concat_start_index_" + outSuffix
def getConcatIndex(bottomLayerOut: String, outSuffix: String): String =
startIndex(outSuffix) + " = " + endIndex(outSuffix) + " + 1; " +
endIndex(outSuffix) + " = " + startIndex(outSuffix) + " + nrow(" + bottomLayerOut + "); "
/*
* Computes the backward pass for a concatenation layer.
*
* The top gradients are deconcatenated back to the inputs.
*
*/
override def backward(dmlScript: StringBuilder, outSuffix: String): Unit = {
val bottomLayers = net.getBottomLayers(param.getName).map(l => net.getCaffeLayer(l)).toList
val dOutVar = "dOut" + id + outSuffix
// concat_end_index = 0
dmlScript.append(dOutVar + " = " + dout + "; concat_end_index" + outSuffix + " = 0; ")
val indexString = "concat_start_index" + outSuffix + " : concat_end_index" + outSuffix
val doutVarAssignment =
if (param.getConcatParam.getAxis == 0) " = " + dOutVar + "[" + indexString + ", ]; "
else " = " + dOutVar + "[," + indexString + " ]; "
// concat_start_index = concat_end_index + 1
// concat_end_index = concat_start_index + $$ - 1
val initializeIndexString = "concat_start_index" + outSuffix + " = concat_end_index" + outSuffix + " + 1; concat_end_index" + outSuffix +
" = concat_start_index" + outSuffix + " + $$ - 1; "
if (param.getConcatParam.getAxis == 0) {
bottomLayers.map(l => {
dmlScript
.append(initializeIndexString.replaceAll("$$", nrow(l.out)))
// X1 = Z[concat_start_index:concat_end_index,]
.append(dX(l.id) + outSuffix + doutVarAssignment)
})
} else {
bottomLayers.map(l => {
dmlScript
.append(initializeIndexString.replaceAll("$$", int_mult(l.outputShape._1, l.outputShape._2, l.outputShape._3)))
// X1 = Z[concat_start_index:concat_end_index,]
.append(dX(l.id) + outSuffix + doutVarAssignment)
})
}
dmlScript.append("\\n")
}
def sumChannels(): String = {
val channels = _childLayers.map(_.outputShape._1)
try {
channels.reduce((c1, c2) => (c1.toInt + c2.toInt).toString())
} catch {
case _: Throwable => sum(new StringBuilder, channels).toString
}
}
override def outputShape = {
if (_out == null) {
if (_childLayers == null) _childLayers = net.getBottomLayers(param.getName).map(l => net.getCaffeLayer(l)).toList
if (param.getConcatParam.getAxis == 0) {
_out = _childLayers(0).outputShape
} else if (param.getConcatParam.getAxis == 1) {
_out = (sumChannels(), _childLayers(0).outputShape._2, _childLayers(0).outputShape._3)
} else {
throw new DMLRuntimeException("Incorrect axis parameter for the layer " + param.getName)
}
}
_out
}
var _out: (String, String, String) = null
override def weightShape(): Array[Int] = null
override def biasShape(): Array[Int] = null
}
// L2 loss function.
class EuclideanLoss(val param: LayerParameter, val id: Int, val net: CaffeNetwork) extends CaffeLayer with IsLossLayer {
override def sourceFileName: String = if (!isSegmentationProblem()) "l2_loss" else throw new DMLRuntimeException("Segmentation is not supported for EuclideanLoss in Caffe2DML yet")
override def weightShape(): Array[Int] = null
override def biasShape(): Array[Int] = null
override def forward(dmlScript: StringBuilder, isPrediction: Boolean) =
invokeForward(dmlScript, List[String](out), scores, "yb")
override def backward(dmlScript: StringBuilder,outSuffix: String): Unit =
invokeBackward(dmlScript, outSuffix, List[String]("dOut" + id + outSuffix), scores, "yb")
override def computeLoss(dmlScript: StringBuilder,numTabs: Int): Unit =
if (!isSegmentationProblem()) {
val tabBuilder = new StringBuilder
for (i <- 0 until numTabs) tabBuilder.append("\\t")
val tabs = tabBuilder.toString
dmlScript.append("tmp_loss = l2_loss::forward(" + commaSep(out, "yb") + ")\\n")
dmlScript.append(tabs).append("loss = loss + tmp_loss\\n")
dmlScript.append(tabs).append("accuracy = -1\\n")
} else {
throw new RuntimeException("Computation of loss for SoftmaxWithLoss is not implemented for segmentation problem")
}
}
class SoftmaxWithLoss(val param: LayerParameter, val id: Int, val net: CaffeNetwork) extends CaffeLayer with IsLossLayer {
// -------------------------------------------------
override def sourceFileName = if (!isSegmentationProblem()) "softmax" else "softmax2d"
override def init(dmlScript: StringBuilder) = {}
override def forward(dmlScript: StringBuilder, isPrediction: Boolean) =
if (!isSegmentationProblem()) {
invokeForward(dmlScript, List[String](out), scores)
} else {
invokeForward(dmlScript, List[String](out), scores, outputShape._1)
}
override def backward(dmlScript: StringBuilder, outSuffix: String) =
if (!isSegmentationProblem()) {
invoke(dmlScript, "cross_entropy_loss::", List[String]("dProbs" + outSuffix), "backward", false, out, "yb")
dmlScript.append("; ")
invoke(dmlScript, "softmax::", List[String]("dOut" + id + outSuffix), "backward", false, "dProbs", scores)
val bottomLayerIDs = net.getBottomLayers(param.getName).map(l => net.getCaffeLayer(l).id)
dmlScript.append("; ")
bottomLayerIDs.map(bottomLayerID => dmlScript.append(dX(bottomLayerID) + outSuffix + " = " + "dOut" + id + outSuffix + "; "))
dmlScript.append("\\n")
} else {
throw new RuntimeException("backward for SoftmaxWithLoss is not implemented for segmentation problem")
}
override def computeLoss(dmlScript: StringBuilder, numTabs: Int) =
if (!isSegmentationProblem()) {
val tabBuilder = new StringBuilder
for (i <- 0 until numTabs) tabBuilder.append("\\t")
val tabs = tabBuilder.toString
dmlScript.append("tmp_loss = cross_entropy_loss::forward(" + commaSep(out, "yb") + ")\\n")
dmlScript.append(tabs).append("loss = loss + tmp_loss\\n")
dmlScript.append(tabs).append("true_yb = rowIndexMax(yb)\\n")
dmlScript.append(tabs).append("predicted_yb = rowIndexMax(" + out + ")\\n")
dmlScript.append(tabs).append("accuracy = mean(predicted_yb == true_yb)*100\\n")
} else {
throw new RuntimeException("Computation of loss for SoftmaxWithLoss is not implemented for segmentation problem")
}
override def weightShape(): Array[Int] = null
override def biasShape(): Array[Int] = null
// -------------------------------------------------
override def bottomLayerOutputShape: (String, String, String) = {
if (computedBottomLayerOutputShape == null) {
val ret = net.getBottomLayers(param.getName).map(l => net.getCaffeLayer(l)).filter(l => !l.isInstanceOf[Data]).toList
if (ret.size != 1) throw new LanguageException("Expected exactly 1 bottom non-Data layer for " + param.getName)
computedBottomLayerOutputShape = ret(0).outputShape
}
computedBottomLayerOutputShape
}
}
class Sigmoid(val param: LayerParameter, val id: Int, val net: CaffeNetwork) extends CaffeLayer {
override def sourceFileName = "sigmoid"
override def init(dmlScript: StringBuilder) = {}
/*
* Computes the forward pass for a sigmoid nonlinearity layer.
*
* `sigmoid(x) = 1 / (1 + e^-x)`
*
* If `X` contains a single feature column, the output of a sigmoid
* layer can be interpreted as a predicted probability of a true
* class when paired with a log loss function in a binary
* classification problem.
*
* Inputs:
* - X: Inputs, of shape (any, any).
*
* Outputs:
* - out: Outputs, of same shape as `X`.
*/
override def forward(dmlScript: StringBuilder, isPrediction: Boolean) = invokeForward(dmlScript, List[String](out), X)
/*
* Computes the backward pass for a sigmoid nonlinearity layer.
*
* Inputs:
* - dout: Gradient wrt `out` from upstream, of same shape as `X`.
* - X: Inputs, of shape (any, any).
*
* Outputs:
* - dX: Gradient wrt `X`, of same shape as `X`.
*/
override def backward(dmlScript: StringBuilder, outSuffix: String) = invokeBackward(dmlScript, outSuffix, List[String]("dOut" + id), dout, X)
override def weightShape(): Array[Int] = null
override def biasShape(): Array[Int] = null
// -------------------------------------------------
}
class TanH(val param: LayerParameter, val id: Int, val net: CaffeNetwork) extends CaffeLayer {
override def sourceFileName = "tanh"
override def init(dmlScript: StringBuilder) = {}
/*
* Computes the forward pass for a tanh nonlinearity layer.
*
* Inputs:
* - X: Inputs, of shape (any, any).
*
* Outputs:
* - out: Outputs, of same shape as `X`.
*/
override def forward(dmlScript: StringBuilder, isPrediction: Boolean) = invokeForward(dmlScript, List[String](out), X)
/*
* Computes the backward pass for a tanh nonlinearity layer.
*
* Inputs:
* - dout: Gradient wrt `out` from upstream, of same shape as `X`.
* - X: Inputs, of shape (any, any).
*
* Outputs:
* - dX: Gradient wrt `X`, of same shape as `X`.
*/
override def backward(dmlScript: StringBuilder, outSuffix: String) = invokeBackward(dmlScript, outSuffix, List[String]("dOut" + id), dout, X)
override def weightShape(): Array[Int] = null
override def biasShape(): Array[Int] = null
// -------------------------------------------------
}
class ReLU(val param: LayerParameter, val id: Int, val net: CaffeNetwork) extends CaffeLayer {
// TODO: Leaky ReLU: negative_slope [default 0]: specifies whether to leak the negative part by multiplying it with the slope value rather than setting it to 0.
// -------------------------------------------------
override def sourceFileName = "relu"
override def init(dmlScript: StringBuilder) = {}
/*
* Computes the forward pass for a ReLU nonlinearity layer.
*
* Performs an element-wise evaluation of `f(input) = max(0, input)`.
*
* Inputs:
* - X: Inputs, of shape (any, any).
*
* Outputs:
* - out: Outputs, of same shape as `X`.
*/
override def forward(dmlScript: StringBuilder, isPrediction: Boolean) = invokeForward(dmlScript, List[String](out), X)
/*
* Computes the backward pass for a ReLU nonlinearity layer.
*
* Essentially performs a pass-through of the upstream gradient
* for cells > 0.
*
* Inputs:
* - dout: Gradient wrt `out` from upstream, of same shape as `X`.
* - X: Previous input data matrix, of shape (any, any).
*
* Outputs:
* - dX: Gradient wrt `X`, of same shape as `X`.
*/
override def backward(dmlScript: StringBuilder, outSuffix: String) = invokeBackward(dmlScript, outSuffix, List[String]("dOut" + id), dout, X)
override def weightShape(): Array[Int] = null
override def biasShape(): Array[Int] = null
// -------------------------------------------------
}
class Softmax(val param: LayerParameter, val id: Int, val net: CaffeNetwork) extends CaffeLayer {
// -------------------------------------------------
override def sourceFileName = "softmax"
override def init(dmlScript: StringBuilder) = {}
/*
* Computes the forward pass for a softmax classifier. The inputs
* are interpreted as unnormalized, log-probabilities for each of
* N examples, and the softmax function transforms them to normalized
* probabilities.
*
* This can be interpreted as a generalization of the sigmoid
* function to multiple classes.
*
* `probs_ij = e^scores_ij / sum(e^scores_i)`
*
* Inputs:
* - scores: Inputs, of shape (N, D).
*
* Outputs:
* - probs: Outputs, of shape (N, D).
*/
override def forward(dmlScript: StringBuilder, isPrediction: Boolean) = invokeForward(dmlScript, List[String](out), X)
/*
* Computes the backward pass for a softmax classifier.
*
* Note that dscores_ij has multiple source branches:
*
* ```
* dprobs_ij/dscores_ij = probs_ij * (1 - probs_ij)
* dprobs_ik/dscores_ij = -probs_ik * probs_ij, for all k != j
*
* dloss/dscores_ij =
* (dloss/dprobs_ij * dprobs_ij/dscores_ij)
* + sum_{k!=j}(dloss/dprobs_ik * dprobs_ik/dscores_ij)
* ```
*
* Inputs:
* - dprobs: Gradient wrt `probs` from upstream, of shape (N, D).
* - scores: Inputs, of shape (N, D).
*
* Outputs:
* - dscores: Gradient wrt `scores`, of shape (N, D).
*/
override def backward(dmlScript: StringBuilder, outSuffix: String) = invokeBackward(dmlScript, outSuffix, List[String]("dOut" + id), dout, X)
override def weightShape(): Array[Int] = null
override def biasShape(): Array[Int] = null
// -------------------------------------------------
}
class Threshold(val param: LayerParameter, val id: Int, val net: CaffeNetwork) extends CaffeLayer {
override def sourceFileName = null
override def init(dmlScript: StringBuilder) = {}
val threshold = if (param.getThresholdParam.hasThreshold) param.getThresholdParam.getThreshold else 0
override def forward(dmlScript: StringBuilder, isPrediction: Boolean) = assign(dmlScript, out, X + " > " + threshold)
override def backward(dmlScript: StringBuilder, outSuffix: String) = throw new DMLRuntimeException("Backward operation for Threshold layer is not supported.")
override def weightShape(): Array[Int] = null
override def biasShape(): Array[Int] = null
}
class Dropout(val param: LayerParameter, val id: Int, val net: CaffeNetwork) extends CaffeLayer {
// -------------------------------------------------
override def sourceFileName = "dropout"
override def init(dmlScript: StringBuilder) = {}
/*
* Computes the forward pass for an inverted dropout layer.
*
* Drops the inputs element-wise with a probability p, and divides
* by p to maintain the expected values of those inputs (which are
* the outputs of neurons) at test time.
*
* Inputs:
* - X: Inputs, of shape (any, any).
* - p: Probability of keeping a neuron output.
* - seed: [Optional: -1] Random number generator seed to allow for
* deterministic evaluation. Set to -1 for a random seed.
*
* Outputs:
* - out: Outputs, of same shape as `X`.
* - mask: Dropout mask used to compute the output.
*/
override def forward(dmlScript: StringBuilder, isPrediction: Boolean) =
if (!isPrediction)
invokeForward(dmlScript, List[String](out, mask), X, p, seed)
else
assign(dmlScript, out, X) // Forward-pass not required to be performed during prediction for Dropout layer
/*
* Computes the backward pass for an inverted dropout layer.
*
* Applies the mask to the upstream gradient, and divides by p to
* maintain the expected values at test time.
*
* Inputs:
* - dout: Gradient wrt `out`, of same shape as `X`.
* - X: Inputs, of shape (any, any).
* - p: Probability of keeping a neuron output.
* - mask: Dropout mask used to compute the output.
*
* Outputs:
* - dX: Gradient wrt `X`, of same shape as `X`.
*/
override def backward(dmlScript: StringBuilder, outSuffix: String) = invokeBackward(dmlScript, outSuffix, List[String]("dOut" + id), dout, X, p, mask)
// -------------------------------------------------
def mask = "mask" + id
// dropout ratio
def p = if (param.getDropoutParam.hasDropoutRatio()) param.getDropoutParam.getDropoutRatio.toString else "0.5"
def seed = "-1"
override def weightShape(): Array[Int] = null
override def biasShape(): Array[Int] = null
}
class InnerProduct(val param: LayerParameter, val id: Int, val net: CaffeNetwork) extends CaffeLayer with HasWeight with HasBias {
// -------------------------------------------------
// TODO: bias_filler [default type: 'constant' value: 0]; bias_term [default true]: specifies whether to learn and apply a set of additive biases to the filter outputs
override def sourceFileName = "affine"
/*
* Initialize the parameters of this layer.
*
* Note: This is just a convenience function, and parameters
* may be initialized manually if needed.
*
* We use the heuristic by He et al., which limits the magnification
* of inputs/gradients during forward/backward passes by scaling
* unit-Gaussian weights by a factor of sqrt(2/n), under the
* assumption of relu neurons.
* - http://arxiv.org/abs/1502.01852
*
* Inputs:
* - D: Dimensionality of the input features (number of features).
* - M: Number of neurons in this layer.
*
* Outputs:
* - W: Weights, of shape (D, M).
* - b: Biases, of shape (1, M).
*/
override def init(dmlScript: StringBuilder) = invokeInit(dmlScript, List[String](weight, bias), numFeatures, numNeurons)
/*
* Computes the forward pass for an affine (fully-connected) layer
* with M neurons. The input data has N examples, each with D
* features.
*
* Inputs:
* - X: Inputs, of shape (N, D).
* - W: Weights, of shape (D, M).
* - b: Biases, of shape (1, M).
*
* Outputs:
* - out: Outputs, of shape (N, M).
*/
override def forward(dmlScript: StringBuilder, isPrediction: Boolean) =
invokeForward(dmlScript, List[String](out), X, weight, bias)
/*
* Computes the backward pass for a fully-connected (affine) layer
* with M neurons.
*
* Inputs:
* - dout: Gradient wrt `out` from upstream, of shape (N, M).
* - X: Inputs, of shape (N, D).
* - W: Weights, of shape (D, M).
* - b: Biases, of shape (1, M).
*
* Outputs:
* - dX: Gradient wrt `X`, of shape (N, D).
* - dW: Gradient wrt `W`, of shape (D, M).
* - db: Gradient wrt `b`, of shape (1, M).
*/
override def backward(dmlScript: StringBuilder, outSuffix: String) =
invokeBackward(dmlScript, outSuffix, List[String]("dOut" + id, dWeight, dBias), dout, X, weight, bias)
// -------------------------------------------------
// num_output (c_o): the number of filters
def numNeurons = param.getInnerProductParam.getNumOutput.toString
def numFeatures = int_mult(bottomLayerOutputShape._1, bottomLayerOutputShape._2, bottomLayerOutputShape._3)
// n * c_o * 1 * 1
override def outputShape = (param.getInnerProductParam.getNumOutput.toString, "1", "1")
override def weightShape(): Array[Int] = Array(numFeatures.toInt, numNeurons.toInt)
override def biasShape(): Array[Int] = Array(1, numNeurons.toInt)
}
class MaxPooling(val param: LayerParameter, val id: Int, val net: CaffeNetwork) extends CaffeLayer {
// -------------------------------------------------
override def sourceFileName = "max_pool2d_builtin"
override def init(dmlScript: StringBuilder) = {}
/*
* Computes the forward pass for a 2D spatial max pooling layer.
* The input data has N examples, each represented as a 3D volume
* unrolled into a single vector.
*
* This implementation uses a built-in operator for higher
* performance.
*
* Inputs:
* - X: Inputs, of shape (N, C*Hin*Win).
* - C: Number of input channels (dimensionality of input depth).
* - Hin: Input height.
* - Win: Input width.
* - Hf: Filter height.
* - Wf: Filter width.
* - strideh: Stride over height.
* - stridew: Stride over width.
* - padh: Padding for top and bottom sides.
* A typical value is 0.
* - padw: Padding for left and right sides.
* A typical value is 0.
*
* Outputs:
* - out: Outputs, of shape (N, C*Hout*Wout).
* - Hout: Output height.
* - Wout: Output width.
*/
override def forward(dmlScript: StringBuilder, isPrediction: Boolean) =
invokeForward(dmlScript, List[String](out, "ignoreHout_" + id, "ignoreWout_" + id), X, numChannels, Hin, Win, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w)
/*
* Computes the backward pass for a 2D spatial max pooling layer.
* The input data has N examples, each represented as a 3D volume
* unrolled into a single vector.
*
* Inputs:
* - dout: Gradient wrt `out` from upstream, of
* shape (N, C*Hout*Wout).
* - Hout: Output height.
* - Wout: Output width.
* - X: Inputs, of shape (N, C*Hin*Win).
* - C: Number of input channels (dimensionality of input depth).
* - Hin: Input height.
* - Win: Input width.
* - Hf: Filter height.
* - Wf: Filter width.
* - strideh: Stride over height.
* - stridew: Stride over width.
* - padh: Padding for top and bottom sides.
* A typical value is 0.
* - padw: Padding for left and right sides.
* A typical value is 0.
*
* Outputs:
* - dX: Gradient wrt `X`, of shape (N, C*Hin*Win).
*/
override def backward(dmlScript: StringBuilder, outSuffix: String) =
invokeBackward(dmlScript, outSuffix, List[String]("dOut" + id), dout, Hout, Wout, X, numChannels, Hin, Win, kernel_h, kernel_w, stride_h, stride_w, pad_h, pad_w)
// n * c * h_o * w_o, where h_o and w_o are computed in the same way as convolution.
override def outputShape = (numChannels, Hout, Wout)
// -------------------------------------------------
def Hin = bottomLayerOutputShape._2
def Win = bottomLayerOutputShape._3
def Hout = ConvolutionUtils.getConv2dOutputMap(bottomLayerOutputShape._2, kernel_h, stride_h, pad_h)
def Wout = ConvolutionUtils.getConv2dOutputMap(bottomLayerOutputShape._3, kernel_w, stride_w, pad_w)
def poolingParam = param.getPoolingParam
def numChannels = bottomLayerOutputShape._1
// kernel_size (or kernel_h and kernel_w): specifies height and width of each filter
def kernel_h =
if (poolingParam.hasKernelH) poolingParam.getKernelH.toString
else poolingParam.getKernelSize.toString
def kernel_w =
if (poolingParam.hasKernelW) poolingParam.getKernelW.toString
else poolingParam.getKernelSize.toString
// stride (or stride_h and stride_w) [default 1]: specifies the intervals at which to apply the filters to the input
def stride_h =
if (poolingParam.hasStrideH) poolingParam.getStrideH.toString
else if (poolingParam.hasStride) poolingParam.getStride.toString
else "1"
def stride_w =
if (poolingParam.hasStrideW) poolingParam.getStrideW.toString
else if (poolingParam.hasStride) poolingParam.getStride.toString
else "1"
// pad (or pad_h and pad_w) [default 0]: specifies the number of pixels to (implicitly) add to each side of the input
def pad_h =
if (poolingParam.hasPadH) poolingParam.getPadH.toString
else if (poolingParam.hasPad) poolingParam.getPad.toString
else "0"
def pad_w =
if (poolingParam.hasPadW) poolingParam.getPadW.toString
else if (poolingParam.hasPad) poolingParam.getPad.toString
else "0"
override def weightShape(): Array[Int] = null
override def biasShape(): Array[Int] = null
}
class Convolution(val param: LayerParameter, val id: Int, val net: CaffeNetwork) extends CaffeLayer with HasWeight with HasBias {
def isDepthWise(): Boolean = {
if (param.getConvolutionParam.hasGroup && param.getConvolutionParam.getGroup != 1 && numChannels.toInt % param.getConvolutionParam.getGroup != 0)
throw new DMLRuntimeException(
"The number of groups=" + param.getConvolutionParam.getGroup + " is not supported as it is not divisible by number of channels" + numChannels + "."
)
param.getConvolutionParam.hasGroup && param.getConvolutionParam.getGroup != 1
}
def depthMultiplier(): String = if (isDepthWise) (numChannels.toInt / param.getConvolutionParam.getGroup).toString else throw new DMLRuntimeException("Incorrect usage of depth")
// -------------------------------------------------
override def sourceFileName = if (isDepthWise) "conv2d_builtin_depthwise" else "conv2d_builtin"
/*
* Initialize the parameters of this layer.
*
* Note: This is just a convenience function, and parameters
* may be initialized manually if needed.
*
* We use the heuristic by He et al., which limits the magnification
* of inputs/gradients during forward/backward passes by scaling
* unit-Gaussian weights by a factor of sqrt(2/n), under the
* assumption of relu neurons.
* - http://arxiv.org/abs/1502.01852
*
* Inputs without depthwise:
* - F: Number of filters.
* - C: Number of input channels (dimensionality of depth).
* - Hf: Filter height.
* - Wf: Filter width.
*
* Inputs with depthwise:
* - C: Number of input channels (dimensionality of depth).
* - M: Number of filters per input channel (i.e. depth multiplier).
* - Hf: Filter height.
* - Wf: Filter width.
*
* Outputs:
* - W: Weights, of shape (F, C*Hf*Wf).
* - b: Biases, of shape (F, 1).
*/
override def init(dmlScript: StringBuilder) =
if (isDepthWise)
invokeInit(dmlScript, List[String](weight, bias), numChannels, depthMultiplier, kernel_h, kernel_w)
else
invokeInit(dmlScript, List[String](weight, bias), numKernels, numChannels, kernel_h, kernel_w)
/*
* Computes the forward pass for a 2D spatial convolutional layer with
* F filters. The input data has N examples, each represented as a 3D
* volume unrolled into a single vector.
*
* This implementation uses a built-in operator for higher
* performance.
*
* Inputs:
* - X: Inputs, of shape (N, C*Hin*Win).
* - W: Weights, of shape (F, C*Hf*Wf).
* - b: Biases, of shape (F, 1).
* - C: Number of input channels (dimensionality of depth).
* - Hin: Input height.
* - Win: Input width.
* (only for depthwise) - M: Number of filters per input channel (i.e. depth multiplier).
* - Hf: Filter height.
* - Wf: Filter width.
* - strideh: Stride over height.
* - stridew: Stride over width.
* - padh: Padding for top and bottom sides.
* For same output height as input, set `padh = (Hf - 1) / 2`,
* assuming `strideh = 1`.
* More generally, `padh = (Hin*(strideh-1) + Hf - strideh) / 2`
* preserves the spatial dimensions of the input.
* - padw: Padding for left and right sides.
* For same output width as input, set `padw = (Wf - 1) / 2`,
* assuming `stridew = 1`.
* More generally, `padw = (Win*(stridew-1) + Wf - stridew) / 2`
* preserves the spatial dimensions of the input.
*
* Outputs:
* - out: Outputs, of shape (N, F*Hout*Wout).
* - Hout: Output height.
* - Wout: Output width.
*/
override def forward(dmlScript: StringBuilder, isPrediction: Boolean) =
if (isDepthWise)
invokeForward(
dmlScript,
List[String](out, "ignoreHout_" + id, "ignoreWout_" + id),
X,
weight,
bias,
numChannels,
Hin,
Win,
depthMultiplier,
kernel_h,
kernel_w,
stride_h,
stride_w,
pad_h,
pad_w
)
else
invokeForward(dmlScript,
List[String](out, "ignoreHout_" + id, "ignoreWout_" + id),
X,
weight,
bias,
numChannels,
Hin,
Win,
kernel_h,
kernel_w,
stride_h,
stride_w,
pad_h,
pad_w)
/*
* Computes the backward pass for a 2D spatial convolutional layer
* with F filters.
*
* Inputs:
* - dout: Gradient wrt `out` from upstream, of
* shape (N, F*Hout*Wout).
* - Hout: Output height.
* - Wout: Output width.
* - X: Inputs, of shape (N, C*Hin*Win).
* - W: Weights, of shape (F, C*Hf*Wf).
* - b: Biases, of shape (F, 1).
* - C: Number of input channels (dimensionality of depth).
* - Hin: Input height.
* - Win: Input width.
* (only for depthwise) - M: Number of filters per input channel (i.e. depth multiplier).
* - Hf: Filter height.
* - Wf: Filter width.
* - strideh: Stride over height.
* - stridew: Stride over width.
* - padh: Padding for top and bottom sides.
* For same output height as input, set `padh = (Hf - 1) / 2`,
* assuming `strideh = 1`.
* More generally, `padh = (Hin*(strideh-1) + Hf - strideh) / 2`
* preserves the spatial dimensions of the input.
* - padw: Padding for left and right sides.
* For same output width as input, set `padw = (Wf - 1) / 2`,
* assuming `stridew = 1`.
* More generally, `padw = (Win*(stridew-1) + Wf - stridew) / 2`
* preserves the spatial dimensions of the input.
*
* Outputs:
* - dX: Gradient wrt `X`, of shape (N, C*Hin*Win).
* - dW: Gradient wrt `W`, of shape (F, C*Hf*Wf).
* - db: Gradient wrt `b`, of shape (F, 1).
*/
override def backward(dmlScript: StringBuilder, outSuffix: String) =
if (isDepthWise)
invokeBackward(
dmlScript,
outSuffix,
List[String]("dOut" + id, dWeight, dBias),
dout,
Hout,
Wout,
X,
weight,
bias,
numChannels,
Hin,
Win,
depthMultiplier,
kernel_h,
kernel_w,
stride_h,
stride_w,
pad_h,
pad_w
)
else
invokeBackward(
dmlScript,
outSuffix,
List[String]("dOut" + id, dWeight, dBias),
dout,
Hout,
Wout,
X,
weight,
bias,
numChannels,
Hin,
Win,
kernel_h,
kernel_w,
stride_h,
stride_w,
pad_h,
pad_w
)
// if not depthwise, n * c_o * h_o * w_o, where h_o = (h_i + 2 * pad_h - kernel_h) / stride_h + 1 and w_o likewise.
// else (N, C*M*Hout*Wout)
override def outputShape =
if (isDepthWise) ((numChannels.toInt * depthMultiplier.toInt).toString, Hout, Wout)
else (numKernels, Hout, Wout)
// -------------------------------------------------
def numChannels = bottomLayerOutputShape._1
def Hin = bottomLayerOutputShape._2
def Win = bottomLayerOutputShape._3
def Hout = ConvolutionUtils.getConv2dOutputMap(bottomLayerOutputShape._2, kernel_h, stride_h, pad_h)
def Wout = ConvolutionUtils.getConv2dOutputMap(bottomLayerOutputShape._3, kernel_w, stride_w, pad_w)
// -------------------------------------------------
def convParam = param.getConvolutionParam
// if depthwise (C, M*Hf*Wf) else (F, C*Hf*Wf)
override def weightShape(): Array[Int] =
if (isDepthWise) Array(numChannels.toInt, int_mult(depthMultiplier, kernel_h, kernel_w).toInt)
else Array(numKernels.toInt, int_mult(numChannels, kernel_h, kernel_w).toInt)
// if depthwise (C*M, 1) else (F, 1)
override def biasShape(): Array[Int] =
if (isDepthWise) Array(numChannels.toInt * depthMultiplier.toInt, 1)
else Array(numKernels.toInt, 1)
// num_output (c_o): the number of filters
def numKernels = convParam.getNumOutput.toString
// kernel_size (or kernel_h and kernel_w): specifies height and width of each filter
def kernel_h =
if (convParam.hasKernelH) convParam.getKernelH.toString
else if (convParam.getKernelSizeCount > 0) convParam.getKernelSize(0).toString
else throw new LanguageException("Incorrect kernel parameters")
def kernel_w =
if (convParam.hasKernelW) convParam.getKernelW.toString
else if (convParam.getKernelSizeCount > 0) convParam.getKernelSize(0).toString
else throw new LanguageException("Incorrect kernel parameters")
// stride (or stride_h and stride_w) [default 1]: specifies the intervals at which to apply the filters to the input
def stride_h =
if (convParam.hasStrideH) convParam.getStrideH.toString
else if (convParam.getStrideCount > 0) convParam.getStride(0).toString
else "1"
def stride_w =
if (convParam.hasStrideW) convParam.getStrideW.toString
else if (convParam.getStrideCount > 0) convParam.getStride(0).toString
else "1"
// pad (or pad_h and pad_w) [default 0]: specifies the number of pixels to (implicitly) add to each side of the input
def pad_h =
if (convParam.hasPadH) convParam.getPadH.toString
else if (convParam.getPadCount > 0) convParam.getPad(0).toString
else "0"
def pad_w =
if (convParam.hasPadW) convParam.getPadW.toString
else if (convParam.getPadCount > 0) convParam.getPad(0).toString
else "0"
}
class DeConvolution(val param: LayerParameter, val id: Int, val net: CaffeNetwork) extends CaffeLayer with HasWeight with HasBias {
def isDepthWise(): Boolean = {
if (param.getConvolutionParam.hasGroup && param.getConvolutionParam.getGroup != 1 && numChannels.toInt % param.getConvolutionParam.getGroup != 0)
throw new DMLRuntimeException(
"The number of groups=" + param.getConvolutionParam.getGroup + " is not supported as it is not divisible by number of channels" + numChannels + "."
)
param.getConvolutionParam.hasGroup && param.getConvolutionParam.getGroup != 1
}
def depthMultiplier(): String = if (isDepthWise) (numChannels.toInt / param.getConvolutionParam.getGroup).toString else throw new DMLRuntimeException("Incorrect usage of depth")
override def sourceFileName: String = if (isDepthWise) "conv2d_transpose_depthwise" else "conv2d_transpose"
/*
* Utility function to initialize the parameters of this layer.
*
* We use the heuristic by He et al., which limits the magnification
* of inputs/gradients during forward/backward passes by scaling
* unit-Gaussian weights by a factor of sqrt(2/n), under the
* assumption of relu neurons.
* - http://arxiv.org/abs/1502.01852
*
* Inputs without depthwise:
* - F: Number of filters.
* - C: Number of input channels (dimensionality of depth).
* - Hf: Filter height.
* - Wf: Filter width.
*
* Inputs with depthwise:
* - C: Number of input channels (dimensionality of depth).
* - M: Depth of each filter (C must be divisible by M).
* - Hf: Filter height.
* - Wf: Filter width.
*
* Outputs:
* - W: Weights, of shape (C, F*Hf*Wf).
* - b: Biases, of shape (F, 1).
*/
override def init(dmlScript: StringBuilder): Unit =
if (isDepthWise)
invokeInit(dmlScript, List[String](weight, bias), numChannels, depthMultiplier, kernel_h, kernel_w)
else
invokeInit(dmlScript, List[String](weight, bias), numKernels, numChannels, kernel_h, kernel_w)
private def C_DivideBy_M(): Int = numChannels.toInt / depthMultiplier.toInt
// if depthwise (C/M, M*Hf*Wf), else (C, F*Hf*Wf)
override def weightShape(): Array[Int] =
if (isDepthWise)
Array(C_DivideBy_M, int_mult(depthMultiplier, kernel_h, kernel_w).toInt)
else
Array(numChannels.toInt, int_mult(numKernels, kernel_h, kernel_w).toInt)
// if depthwise (C/M, 1), else (F, 1)
override def biasShape(): Array[Int] =
if (isDepthWise)
Array(C_DivideBy_M, 1)
else
Array(numKernels.toInt, 1)
private def numGroups: Int = if (param.getConvolutionParam.hasGroup) param.getConvolutionParam.getGroup else 1
/*
* Computes the forward pass for a 2D spatial transpose convolutional
* layer with F filters. The input data has N examples, each
* represented as a 3D tensor flattened into a single vector.
*
* Inputs:
* - X: Inputs, of shape (N, C*Hin*Win).
* - W: Weights, of shape (C, F*Hf*Wf).
* - b: Biases, of shape (F, 1).
* - C: Number of input channels (dimensionality of depth).
* - Hin: Input height.
* - Win: Input width.
* (only for depthwise): - M: Depth of each filter (C must be divisible by M).
* - Hf: Filter height.
* - Wf: Filter width.
* - strideh: Stride over height.
* - stridew: Stride over width.
* - padh: Padding for top and bottom sides.
* - padw: Padding for left and right sides.
* - out_padh: extra padding for top side. This should
* lie in [0, strideh-1].
* - out_padw: extra padding for right side. This should
* lie in [0, stridew-1].
*
* Outputs:
* - out: Outputs, of shape (N, F*Hout*Wout).
* - Hout: Output height.
* - Wout: Output width.
*/
override def forward(dmlScript: StringBuilder, isPrediction: Boolean): Unit =
if (isDepthWise)
invokeForward(
dmlScript,
List[String](out, "ignoreHout_" + id, "ignoreWout_" + id),
X,
weight,
bias,
numChannels,
Hin,
Win,
depthMultiplier,
kernel_h,
kernel_w,
stride_h,
stride_w,
pad_h,
pad_w,
"0",
"0"
)
else
invokeForward(
dmlScript,
List[String](out, "ignoreHout_" + id, "ignoreWout_" + id),
X,
weight,
bias,
numChannels,
Hin,
Win,
kernel_h,
kernel_w,
stride_h,
stride_w,
pad_h,
pad_w,
"0",
"0"
)
/*
* Computes the backward pass for a 2D spatial transpose
* convolutional layer with F filters.
*
* Inputs:
* - dout: Gradient wrt `out` from upstream, of
* shape (N, F*Hout*Wout).
* - Hout: Output height.
* - Wout: Output width.
* - X: Inputs, of shape (N, C*Hin*Win).
* - W: Weights, of shape (C, F*Hf*Wf).
* - b: Biases, of shape (F, 1).
* - C: Number of input channels (dimensionality of depth).
* - Hin: Input height.
* - Win: Input width.
* (only for depthwise): - M: Depth of each filter (C must be divisible by M).
* - Hf: Filter height.
* - Wf: Filter width.
* - strideh: Stride over height.
* - stridew: Stride over width.
* - padh: Padding for top and bottom sides.
* - padw: Padding for left and right sides.
*
* Outputs:
* - dX: Gradient wrt `X`, of shape (N, C*Hin*Win).
* - dW: Gradient wrt `W`, of shape (C, F*Hf*Wf).
* - db: Gradient wrt `b`, of shape (F, 1).
*/
override def backward(dmlScript: StringBuilder, outSuffix: String) =
if (isDepthWise)
invokeBackward(
dmlScript,
outSuffix,
List[String]("dOut" + id, dWeight, dBias),
dout,
Hout,
Wout,
X,
weight,
bias,
numChannels,
Hin,
Win,
depthMultiplier,
kernel_h,
kernel_w,
stride_h,
stride_w,
pad_h,
pad_w
)
else
invokeBackward(
dmlScript,
outSuffix,
List[String]("dOut" + id, dWeight, dBias),
dout,
Hout,
Wout,
X,
weight,
bias,
numChannels,
Hin,
Win,
kernel_h,
kernel_w,
stride_h,
stride_w,
pad_h,
pad_w
)
// if not depthwise n * c_o * h_o * w_o, where h_o = (h_i + 2 * pad_h - kernel_h) / stride_h + 1 and w_o likewise.
// else (N, C/M*Hout*Wout)
override def outputShape = if (isDepthWise) (C_DivideBy_M().toString, Hout, Wout) else (numChannels, Hout, Wout)
// -------------------------------------------------
def numChannels = bottomLayerOutputShape._1
def Hin = bottomLayerOutputShape._2
def Win = bottomLayerOutputShape._3
// Hout = strideh * (Hin-1) - 2*padh + Hf + out_padh
def Hout: String =
try {
(stride_h.toInt * (Hin.toInt - 1) - 2 * pad_h.toInt + kernel_h.toInt).toString()
} catch {
case _: Throwable => stride_h + " * " + "(" + Hin + "-1) - 2*" + pad_h + " + " + kernel_h
}
// Wout = stridew * (Win-1) - 2*padw + Wf + out_padw
def Wout: String =
try {
(stride_w.toInt * (Win.toInt - 1) - 2 * pad_w.toInt + kernel_w.toInt).toString()
} catch {
case _: Throwable => stride_w + " * " + "(" + Win + "-1) - 2*" + pad_w + " + " + kernel_w
}
// -------------------------------------------------
def convParam = param.getConvolutionParam
// num_output (c_o): the number of filters
def numKernels = convParam.getNumOutput.toString
// kernel_size (or kernel_h and kernel_w): specifies height and width of each filter
def kernel_h =
if (convParam.hasKernelH) convParam.getKernelH.toString
else if (convParam.getKernelSizeCount > 0) convParam.getKernelSize(0).toString
else throw new LanguageException("Incorrect kernel parameters")
def kernel_w =
if (convParam.hasKernelW) convParam.getKernelW.toString
else if (convParam.getKernelSizeCount > 0) convParam.getKernelSize(0).toString
else throw new LanguageException("Incorrect kernel parameters")
// stride (or stride_h and stride_w) [default 1]: specifies the intervals at which to apply the filters to the input
def stride_h =
if (convParam.hasStrideH) convParam.getStrideH.toString
else if (convParam.getStrideCount > 0) convParam.getStride(0).toString
else "1"
def stride_w =
if (convParam.hasStrideW) convParam.getStrideW.toString
else if (convParam.getStrideCount > 0) convParam.getStride(0).toString
else "1"
// pad (or pad_h and pad_w) [default 0]: specifies the number of pixels to (implicitly) add to each side of the input
def pad_h =
if (convParam.hasPadH) convParam.getPadH.toString
else if (convParam.getPadCount > 0) convParam.getPad(0).toString
else "0"
def pad_w =
if (convParam.hasPadW) convParam.getPadW.toString
else if (convParam.getPadCount > 0) convParam.getPad(0).toString
else "0"
}
|
asurve/incubator-systemml
|
src/main/scala/org/apache/sysml/api/dl/CaffeLayer.scala
|
Scala
|
apache-2.0
| 64,818 |
package org.openmole.buildsystem
import org.apache.commons.compress.archivers.tar.{ TarArchiveEntry, TarArchiveOutputStream }
import sbt._
import Keys._
import scala.util.matching.Regex
import OMKeys._
import java.util.zip.GZIPOutputStream
import resource._
import java.io.{ BufferedOutputStream, FileOutputStream }
import scala.io.Source
import com.typesafe.sbt.osgi.OsgiKeys._
/**
* Created with IntelliJ IDEA.
* User: luft
* Date: 6/5/13
* Time: 3:42 PM
*/
trait Assembly { self: BuildSystemDefaults ⇒
lazy val tarProject: Seq[Setting[_]] = Seq(
Tar.name := "assemble.tar.gz",
Tar.innerFolder := "",
Tar.tar <<= (Tar.folder, streams, target, Tar.name, Tar.innerFolder, streams) map tarImpl
)
private def recursiveCopy(from: File, to: File, streams: TaskStreams): Unit = {
if (from.isDirectory) {
to.mkdirs()
for {
f ← from.listFiles()
} recursiveCopy(f, new File(to, f.getName), streams)
}
else if (!to.exists() || from.lastModified() > to.lastModified) {
streams.log.info(s"Copy file $from to $to ")
from.getParentFile.mkdirs
IO.copyFile(from, to, preserveLastModified = true)
}
}
private def copyFileTask(from: File, destinationDir: File, streams: TaskStreams, name: Option[String] = None) = {
val to: File = if (from.isDirectory) destinationDir else destinationDir / name.getOrElse(from.getName)
recursiveCopy(from, to, streams)
from -> to
}
private def rename(srcPath: File, depMap: Map[Regex, String ⇒ String]) =
depMap.keys.find(
_.findFirstIn(srcPath.getName).isDefined
).map(k ⇒ depMap(k)(srcPath.getName)).getOrElse { srcPath.getName }
private def copyLibraryDependencies(
cp: Seq[Attributed[File]],
out: File,
depMap: Map[Regex, String ⇒ String],
depFilter: ModuleID ⇒ Boolean,
streams: TaskStreams) = {
cp.flatMap { attributed ⇒
attributed.get(Keys.moduleID.key) match {
case Some(moduleId) ⇒
if (depFilter(moduleId)) Some(attributed.data) else None
case None ⇒ None
}
}.map { srcPath ⇒
val name = rename(srcPath, depMap)
copyFileTask(srcPath, out, streams, name = Some(name))
}
}
def assemblySettings = Seq(
downloads := Nil,
resourcesAssemble := Seq.empty,
setExecutable := Seq.empty,
assemblyPath := target.value / "assemble",
assemblyDependenciesPath := assemblyPath.value,
assemble <<=
(assemblyPath, setExecutable) map {
(path, files) ⇒
files.foreach(f ⇒ new File(path, f).setExecutable(true))
path
} dependsOn (copyResources, (downloads, assemblyPath, target, streams) map urlDownloader),
Tar.folder <<= assemble,
bundleProj := false,
install := true,
installRemote := true,
dependencyNameMap := Map.empty[Regex, String ⇒ String],
dependencyFilter := { _ ⇒ true },
copyResources <<=
(resourcesAssemble, streams) map {
case (resources, s) ⇒
resources.toSeq.map { case (from, to) ⇒ copyFileTask(from, to, s) }
},
copyResources <++= (externalDependencyClasspath in Compile, assemblyDependenciesPath, dependencyNameMap, dependencyFilter, streams) map copyLibraryDependencies
)
def generateConfigImpl(plugins: File, header: String, config: File, startLevels: Seq[(String, Int)]): File = {
def line(file: File) = {
val name = file.getName
val level = startLevels.find { case (s, _) ⇒ name.contains(s) }.map { case (_, l) ⇒ l }
level match {
case None ⇒ name
case Some(l) ⇒ s"$name@$l:start"
}
}
def content =
s"""
|$header
|osgi.bundles=${plugins.listFiles().filter(!_.getName.startsWith("org.eclipse.osgi")).map(line).mkString(",")}
""".stripMargin
config.getParentFile.mkdirs
IO.write(config, content)
config
}
import OMKeys.OSGiApplication._
def osgiApplicationSettings =
Seq(
startLevels := Seq.empty,
assemble <<= assemble dependsOn {
(pluginsDirectory, header, config, startLevels) map generateConfigImpl dependsOn (copyResources)
}
)
def tarImpl(folder: File, s: TaskStreams, t: File, name: String, innerFolder: String, streams: TaskStreams): File = {
val out = t / name
val tgzOS = managed {
val tos = new TarArchiveOutputStream(new BufferedOutputStream(new GZIPOutputStream(new FileOutputStream(out))))
tos.setLongFileMode(TarArchiveOutputStream.LONGFILE_GNU)
tos
}
def findFiles(f: File): Set[File] = if (f.isDirectory) (f.listFiles map findFiles flatten).toSet else Set(f)
val files: Set[File] = findFiles(folder).toSet
val fn = FileFunction.cached(t / "zip-cache", FilesInfo.lastModified, FilesInfo.exists) {
fileSet ⇒
s.log.info("Zipping:\\n\\t")
val lCP = folder //targetFolders reduceLeft findLeastCommonPath
// s.log.info(lCP.getAbsolutePath)
// s.log.info(targetFolders.last.relativeTo(lCP).get.getPath)
for {
os ← tgzOS
file ← fileSet
is ← managed(Source.fromFile(file)(scala.io.Codec.ISO8859))
} {
val relativeFile = innerFolder + "/" + (file relativeTo lCP).get.getPath
s.log.info("\\t - " + relativeFile)
val entry = new TarArchiveEntry(file, relativeFile)
entry.setSize(file.length)
if (file.canExecute) entry.setMode(TarArchiveEntry.DEFAULT_FILE_MODE | 111)
os.putArchiveEntry(entry)
for (c ← is.iter) { os.write(c.toByte) }
os.closeArchiveEntry()
}
Set(out)
}
fn(files).head
}
def urlDownloader(urls: Seq[(URL, String)], assembleDir: File, targetDir: File, s: TaskStreams) = {
def cache(url: URL) = targetDir / s"url-cache-${Hash.toHex(Hash(url.toString))}"
targetDir.mkdirs
for {
(url, file) ← urls
} yield {
val cacheFile = cache(url)
val destFile = new File(assembleDir, file)
destFile.getParentFile.mkdirs
if (!cacheFile.exists) {
s.log.info("Downloading " + url + " to " + destFile)
val os = managed(new BufferedOutputStream(new FileOutputStream(destFile)))
os.foreach(BasicIO.transferFully(url.openStream, _))
cacheFile.createNewFile()
}
file
}
}
}
object Assembly {
//checks to see if settingkey key exists for project p in Seq s. If it does, applies the filter function to key's value, and if that returns true, the project stays in the seq.
def projFilter[T](s: Seq[ProjectReference], key: SettingKey[T], filter: T ⇒ Boolean, intransitive: Boolean): Def.Initialize[Seq[ProjectReference]] = {
// (key in p) ? returns Initialize[Option[T]]
// Project.Initialize.join takes a Seq[Initialize[_]] and gives back an Initialize[Seq[_]]
val ret = Def.Initialize.join(s map { p ⇒ (key in p).?(i ⇒ i -> p) })(_ filter {
case (None, _) ⇒ false
case (Some(v), _) ⇒ filter(v)
})(_ map { _._2 })
lazy val ret2 = Def.bind(ret) { r ⇒
val x = r.map(expandToDependencies)
val y = Def.Initialize.join(x)
y { _.flatten.toSet.toSeq } //make sure all references are unique
}
if (intransitive) ret else ret2
}
//recursively explores the dependency tree of pr and adds all dependencies to the list of projects to be copied
def expandToDependencies(pr: ProjectReference): Def.Initialize[Seq[ProjectReference]] = {
val r = (thisProject in pr) { _.dependencies.map(_.project) }
val r3 = Def.bind(Def.bind(r) { ret ⇒ Def.Initialize.join(ret map expandToDependencies) }) { ret ⇒ r(first ⇒ pr +: ret.flatten) }
r3
}
implicit def ProjRefs2RichProjectSeq(s: Seq[ProjectReference]) = new RichProjectSeq(Def.value(s))
implicit def InitProjRefs2RichProjectSeq(s: Def.Initialize[Seq[ProjectReference]]) = new RichProjectSeq(s)
class RichProjectSeq(s: Def.Initialize[Seq[ProjectReference]]) {
def keyFilter[T](key: SettingKey[T], filter: (T) ⇒ Boolean, intransitive: Boolean = false) = projFilter(s, key, filter, intransitive)
def sendTo(to: Def.Initialize[File]) = sendBundles(s zip to) //TODO: This function is specific to OSGI bundled projects. Make it less specific?
}
def projFilter[T](s: Def.Initialize[Seq[ProjectReference]], key: SettingKey[T], filter: T ⇒ Boolean, intransitive: Boolean): Def.Initialize[Seq[ProjectReference]] = {
Def.bind(s)(j ⇒ projFilter(j, key, filter, intransitive))
}
//TODO: New API makes this much simpler
//val bundles: Seq[FIle] = bundle.all( ScopeFilter( inDependencies(ref) ) ).value
def sendBundles(bundles: Def.Initialize[(Seq[ProjectReference], File)]): Def.Initialize[Task[Seq[(File, File)]]] = Def.bind(bundles) {
case (projs, to) ⇒
require(projs.nonEmpty)
val seqOTasks: Def.Initialize[Seq[Task[Seq[(File, File)]]]] = Def.Initialize.join(projs.map(p ⇒ (bundle in p) map {
f ⇒ Seq(f -> to)
}))
seqOTasks { seq ⇒ seq.reduceLeft[Task[Seq[(File, File)]]] { case (a, b) ⇒ a flatMap { i ⇒ b map { _ ++ i } } } }
}
}
|
ISCPIF/PSEExperiments
|
openmole-src/build-system/src/main/scala/org/openmole/buildsystem/Assembly.scala
|
Scala
|
agpl-3.0
| 9,144 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import java.util.UUID
import scala.collection.mutable
import scala.language.reflectiveCalls
import org.scalactic.TolerantNumerics
import org.scalatest.concurrent.AsyncAssertions.Waiter
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatest.BeforeAndAfter
import org.scalatest.PrivateMethodTester._
import org.apache.spark.SparkException
import org.apache.spark.scheduler._
import org.apache.spark.sql.{Encoder, SparkSession}
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.streaming.StreamingQueryListener._
import org.apache.spark.sql.streaming.util.StreamManualClock
import org.apache.spark.util.JsonProtocol
class StreamingQueryListenerSuite extends StreamTest with BeforeAndAfter {
import testImplicits._
// To make === between double tolerate inexact values
implicit val doubleEquality = TolerantNumerics.tolerantDoubleEquality(0.01)
after {
spark.streams.active.foreach(_.stop())
assert(spark.streams.active.isEmpty)
assert(addedListeners().isEmpty)
// Make sure we don't leak any events to the next test
spark.sparkContext.listenerBus.waitUntilEmpty(10000)
}
testQuietly("single listener, check trigger events are generated correctly") {
val clock = new StreamManualClock
val inputData = new MemoryStream[Int](0, sqlContext)
val df = inputData.toDS().as[Long].map { 10 / _ }
val listener = new EventCollector
case class AssertStreamExecThreadToWaitForClock()
extends AssertOnQuery(q => {
eventually(Timeout(streamingTimeout)) {
if (q.exception.isEmpty) {
assert(clock.asInstanceOf[StreamManualClock].isStreamWaitingAt(clock.getTimeMillis))
}
}
if (q.exception.isDefined) {
throw q.exception.get
}
true
}, "")
try {
// No events until started
spark.streams.addListener(listener)
assert(listener.startEvent === null)
assert(listener.progressEvents.isEmpty)
assert(listener.terminationEvent === null)
testStream(df, OutputMode.Append)(
// Start event generated when query started
StartStream(ProcessingTime(100), triggerClock = clock),
AssertOnQuery { query =>
assert(listener.startEvent !== null)
assert(listener.startEvent.id === query.id)
assert(listener.startEvent.runId === query.runId)
assert(listener.startEvent.name === query.name)
assert(listener.progressEvents.isEmpty)
assert(listener.terminationEvent === null)
true
},
// Progress event generated when data processed
AddData(inputData, 1, 2),
AdvanceManualClock(100),
AssertStreamExecThreadToWaitForClock(),
CheckAnswer(10, 5),
AssertOnQuery { query =>
assert(listener.progressEvents.nonEmpty)
// SPARK-18868: We can't use query.lastProgress, because in progressEvents, we filter
// out non-zero input rows, but the lastProgress may be a zero input row trigger
val lastNonZeroProgress = query.recentProgress.filter(_.numInputRows > 0).lastOption
.getOrElse(fail("No progress updates received in StreamingQuery!"))
assert(listener.progressEvents.last.json === lastNonZeroProgress.json)
assert(listener.terminationEvent === null)
true
},
// Termination event generated when stopped cleanly
StopStream,
AssertOnQuery { query =>
eventually(Timeout(streamingTimeout)) {
assert(listener.terminationEvent !== null)
assert(listener.terminationEvent.id === query.id)
assert(listener.terminationEvent.runId === query.runId)
assert(listener.terminationEvent.exception === None)
}
listener.checkAsyncErrors()
listener.reset()
true
},
// Termination event generated with exception message when stopped with error
StartStream(ProcessingTime(100), triggerClock = clock),
AssertStreamExecThreadToWaitForClock(),
AddData(inputData, 0),
AdvanceManualClock(100), // process bad data
ExpectFailure[SparkException](),
AssertOnQuery { query =>
eventually(Timeout(streamingTimeout)) {
assert(listener.terminationEvent !== null)
assert(listener.terminationEvent.id === query.id)
assert(listener.terminationEvent.exception.nonEmpty)
// Make sure that the exception message reported through listener
// contains the actual exception and relevant stack trace
assert(!listener.terminationEvent.exception.get.contains("StreamingQueryException"))
assert(
listener.terminationEvent.exception.get.contains("java.lang.ArithmeticException"))
assert(listener.terminationEvent.exception.get.contains("StreamingQueryListenerSuite"))
}
listener.checkAsyncErrors()
true
}
)
} finally {
spark.streams.removeListener(listener)
}
}
test("SPARK-19594: all of listeners should receive QueryTerminatedEvent") {
val df = MemoryStream[Int].toDS().as[Long]
val listeners = (1 to 5).map(_ => new EventCollector)
try {
listeners.foreach(listener => spark.streams.addListener(listener))
testStream(df, OutputMode.Append)(
StartStream(),
StopStream,
AssertOnQuery { query =>
eventually(Timeout(streamingTimeout)) {
listeners.foreach(listener => assert(listener.terminationEvent !== null))
listeners.foreach(listener => assert(listener.terminationEvent.id === query.id))
listeners.foreach(listener => assert(listener.terminationEvent.runId === query.runId))
listeners.foreach(listener => assert(listener.terminationEvent.exception === None))
}
listeners.foreach(listener => listener.checkAsyncErrors())
listeners.foreach(listener => listener.reset())
true
}
)
} finally {
listeners.foreach(spark.streams.removeListener)
}
}
test("adding and removing listener") {
def isListenerActive(listener: EventCollector): Boolean = {
listener.reset()
testStream(MemoryStream[Int].toDS)(
StartStream(),
StopStream
)
listener.startEvent != null
}
try {
val listener1 = new EventCollector
val listener2 = new EventCollector
spark.streams.addListener(listener1)
assert(isListenerActive(listener1) === true)
assert(isListenerActive(listener2) === false)
spark.streams.addListener(listener2)
assert(isListenerActive(listener1) === true)
assert(isListenerActive(listener2) === true)
spark.streams.removeListener(listener1)
assert(isListenerActive(listener1) === false)
assert(isListenerActive(listener2) === true)
} finally {
addedListeners().foreach(spark.streams.removeListener)
}
}
test("event ordering") {
val listener = new EventCollector
withListenerAdded(listener) {
for (i <- 1 to 100) {
listener.reset()
require(listener.startEvent === null)
testStream(MemoryStream[Int].toDS)(
StartStream(),
Assert(listener.startEvent !== null, "onQueryStarted not called before query returned"),
StopStream,
Assert { listener.checkAsyncErrors() }
)
}
}
}
test("QueryStartedEvent serialization") {
def testSerialization(event: QueryStartedEvent): Unit = {
val json = JsonProtocol.sparkEventToJson(event)
val newEvent = JsonProtocol.sparkEventFromJson(json).asInstanceOf[QueryStartedEvent]
assert(newEvent.id === event.id)
assert(newEvent.runId === event.runId)
assert(newEvent.name === event.name)
}
testSerialization(new QueryStartedEvent(UUID.randomUUID, UUID.randomUUID, "name"))
testSerialization(new QueryStartedEvent(UUID.randomUUID, UUID.randomUUID, null))
}
test("QueryProgressEvent serialization") {
def testSerialization(event: QueryProgressEvent): Unit = {
import scala.collection.JavaConverters._
val json = JsonProtocol.sparkEventToJson(event)
val newEvent = JsonProtocol.sparkEventFromJson(json).asInstanceOf[QueryProgressEvent]
assert(newEvent.progress.json === event.progress.json) // json as a proxy for equality
assert(newEvent.progress.durationMs.asScala === event.progress.durationMs.asScala)
assert(newEvent.progress.eventTime.asScala === event.progress.eventTime.asScala)
}
testSerialization(new QueryProgressEvent(StreamingQueryStatusAndProgressSuite.testProgress1))
testSerialization(new QueryProgressEvent(StreamingQueryStatusAndProgressSuite.testProgress2))
}
test("QueryTerminatedEvent serialization") {
def testSerialization(event: QueryTerminatedEvent): Unit = {
val json = JsonProtocol.sparkEventToJson(event)
val newEvent = JsonProtocol.sparkEventFromJson(json).asInstanceOf[QueryTerminatedEvent]
assert(newEvent.id === event.id)
assert(newEvent.runId === event.runId)
assert(newEvent.exception === event.exception)
}
val exception = new RuntimeException("exception")
testSerialization(
new QueryTerminatedEvent(UUID.randomUUID, UUID.randomUUID, Some(exception.getMessage)))
}
test("only one progress event per interval when no data") {
// This test will start a query but not push any data, and then check if we push too many events
withSQLConf(SQLConf.STREAMING_NO_DATA_PROGRESS_EVENT_INTERVAL.key -> "100ms") {
@volatile var numProgressEvent = 0
val listener = new StreamingQueryListener {
override def onQueryStarted(event: QueryStartedEvent): Unit = {}
override def onQueryProgress(event: QueryProgressEvent): Unit = {
numProgressEvent += 1
}
override def onQueryTerminated(event: QueryTerminatedEvent): Unit = {}
}
spark.streams.addListener(listener)
try {
val input = new MemoryStream[Int](0, sqlContext) {
@volatile var numTriggers = 0
override def getOffset: Option[Offset] = {
numTriggers += 1
super.getOffset
}
}
val clock = new StreamManualClock()
val actions = mutable.ArrayBuffer[StreamAction]()
actions += StartStream(trigger = ProcessingTime(10), triggerClock = clock)
for (_ <- 1 to 100) {
actions += AdvanceManualClock(10)
}
actions += AssertOnQuery { _ =>
eventually(timeout(streamingTimeout)) {
assert(input.numTriggers > 100) // at least 100 triggers have occurred
}
true
}
// `recentProgress` should not receive too many no data events
actions += AssertOnQuery { q =>
q.recentProgress.size > 1 && q.recentProgress.size <= 11
}
testStream(input.toDS)(actions: _*)
spark.sparkContext.listenerBus.waitUntilEmpty(10000)
// 11 is the max value of the possible numbers of events.
assert(numProgressEvent > 1 && numProgressEvent <= 11)
} finally {
spark.streams.removeListener(listener)
}
}
}
test("listener only posts events from queries started in the related sessions") {
val session1 = spark.newSession()
val session2 = spark.newSession()
val collector1 = new EventCollector
val collector2 = new EventCollector
def runQuery(session: SparkSession): Unit = {
collector1.reset()
collector2.reset()
val mem = MemoryStream[Int](implicitly[Encoder[Int]], session.sqlContext)
testStream(mem.toDS)(
AddData(mem, 1, 2, 3),
CheckAnswer(1, 2, 3)
)
session.sparkContext.listenerBus.waitUntilEmpty(5000)
}
def assertEventsCollected(collector: EventCollector): Unit = {
assert(collector.startEvent !== null)
assert(collector.progressEvents.nonEmpty)
assert(collector.terminationEvent !== null)
}
def assertEventsNotCollected(collector: EventCollector): Unit = {
assert(collector.startEvent === null)
assert(collector.progressEvents.isEmpty)
assert(collector.terminationEvent === null)
}
assert(session1.ne(session2))
assert(session1.streams.ne(session2.streams))
withListenerAdded(collector1, session1) {
assert(addedListeners(session1).nonEmpty)
withListenerAdded(collector2, session2) {
assert(addedListeners(session2).nonEmpty)
// query on session1 should send events only to collector1
runQuery(session1)
assertEventsCollected(collector1)
assertEventsNotCollected(collector2)
// query on session2 should send events only to collector2
runQuery(session2)
assertEventsCollected(collector2)
assertEventsNotCollected(collector1)
}
}
}
testQuietly("ReplayListenerBus should ignore broken event jsons generated in 2.0.0") {
// query-event-logs-version-2.0.0.txt has all types of events generated by
// Structured Streaming in Spark 2.0.0.
// SparkListenerApplicationEnd is the only valid event and it's the last event. We use it
// to verify that we can skip broken jsons generated by Structured Streaming.
testReplayListenerBusWithBorkenEventJsons("query-event-logs-version-2.0.0.txt")
}
testQuietly("ReplayListenerBus should ignore broken event jsons generated in 2.0.1") {
// query-event-logs-version-2.0.1.txt has all types of events generated by
// Structured Streaming in Spark 2.0.1.
// SparkListenerApplicationEnd is the only valid event and it's the last event. We use it
// to verify that we can skip broken jsons generated by Structured Streaming.
testReplayListenerBusWithBorkenEventJsons("query-event-logs-version-2.0.1.txt")
}
testQuietly("ReplayListenerBus should ignore broken event jsons generated in 2.0.2") {
// query-event-logs-version-2.0.2.txt has all types of events generated by
// Structured Streaming in Spark 2.0.2.
// SparkListenerApplicationEnd is the only valid event and it's the last event. We use it
// to verify that we can skip broken jsons generated by Structured Streaming.
testReplayListenerBusWithBorkenEventJsons("query-event-logs-version-2.0.2.txt")
}
private def testReplayListenerBusWithBorkenEventJsons(fileName: String): Unit = {
val input = getClass.getResourceAsStream(s"/structured-streaming/$fileName")
val events = mutable.ArrayBuffer[SparkListenerEvent]()
try {
val replayer = new ReplayListenerBus() {
// Redirect all parsed events to `events`
override def doPostEvent(
listener: SparkListenerInterface,
event: SparkListenerEvent): Unit = {
events += event
}
}
// Add a dummy listener so that "doPostEvent" will be called.
replayer.addListener(new SparkListener {})
replayer.replay(input, fileName)
// SparkListenerApplicationEnd is the only valid event
assert(events.size === 1)
assert(events(0).isInstanceOf[SparkListenerApplicationEnd])
} finally {
input.close()
}
}
private def withListenerAdded(
listener: StreamingQueryListener,
session: SparkSession = spark)(body: => Unit): Unit = {
try {
failAfter(streamingTimeout) {
session.streams.addListener(listener)
body
}
} finally {
session.streams.removeListener(listener)
}
}
private def addedListeners(session: SparkSession = spark): Array[StreamingQueryListener] = {
val listenerBusMethod =
PrivateMethod[StreamingQueryListenerBus]('listenerBus)
val listenerBus = session.streams invokePrivate listenerBusMethod()
listenerBus.listeners.toArray.map(_.asInstanceOf[StreamingQueryListener])
}
/** Collects events from the StreamingQueryListener for testing */
class EventCollector extends StreamingQueryListener {
// to catch errors in the async listener events
@volatile private var asyncTestWaiter = new Waiter
@volatile var startEvent: QueryStartedEvent = null
@volatile var terminationEvent: QueryTerminatedEvent = null
private val _progressEvents = new mutable.Queue[StreamingQueryProgress]
def progressEvents: Seq[StreamingQueryProgress] = _progressEvents.synchronized {
_progressEvents.filter(_.numInputRows > 0)
}
def reset(): Unit = {
startEvent = null
terminationEvent = null
_progressEvents.clear()
asyncTestWaiter = new Waiter
}
def checkAsyncErrors(): Unit = {
asyncTestWaiter.await(timeout(streamingTimeout))
}
override def onQueryStarted(queryStarted: QueryStartedEvent): Unit = {
asyncTestWaiter {
startEvent = queryStarted
}
}
override def onQueryProgress(queryProgress: QueryProgressEvent): Unit = {
asyncTestWaiter {
assert(startEvent != null, "onQueryProgress called before onQueryStarted")
_progressEvents.synchronized { _progressEvents += queryProgress.progress }
}
}
override def onQueryTerminated(queryTerminated: QueryTerminatedEvent): Unit = {
asyncTestWaiter {
assert(startEvent != null, "onQueryTerminated called before onQueryStarted")
terminationEvent = queryTerminated
}
asyncTestWaiter.dismiss()
}
}
}
|
someorz/spark
|
sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQueryListenerSuite.scala
|
Scala
|
apache-2.0
| 18,399 |
package com.github.leifker.spark.test
import com.github.leifker.cassandra.config.{CassandraConfig, KeyspaceConfig}
import com.github.leifker.spark.config.CassandraSparkConfig
/**
* Created by dleifker on 2/16/17.
*/
object ITestContext {
val localConfig = CassandraSparkConfig(new CassandraConfig("alpha,epsilon,eta,delta"), "local[4]")
val clusterConfig = CassandraSparkConfig(new CassandraConfig("alpha,epsilon,eta,delta"), "spark://zeta:7077", Seq("spark/build/libs/spark-0.0.1-SNAPSHOT-shadow.jar"))
val amazonReviewsKeyspace = new KeyspaceConfig("amazon_reviews")
}
|
leifker/geo-sentiment
|
spark/src/integration-test/scala/com/github/leifker/spark/test/ITestContext.scala
|
Scala
|
gpl-3.0
| 583 |
package mesosphere.marathon
package core.deployment.impl
import java.util.concurrent.LinkedBlockingDeque
import akka.Done
import akka.actor.{ActorRef, Props}
import akka.event.EventStream
import akka.stream.scaladsl.Source
import akka.testkit.TestActor.{AutoPilot, NoAutoPilot}
import akka.testkit.{ImplicitSender, TestActor, TestActorRef, TestProbe}
import mesosphere.AkkaUnitTest
import mesosphere.marathon.MarathonSchedulerActor.{DeploymentFailed, DeploymentStarted}
import mesosphere.marathon.core.deployment.{DeploymentPlan, DeploymentStepInfo}
import mesosphere.marathon.core.deployment.impl.DeploymentActor.Cancel
import mesosphere.marathon.core.deployment.impl.DeploymentManagerActor._
import mesosphere.marathon.core.health.HealthCheckManager
import mesosphere.marathon.core.launchqueue.LaunchQueue
import mesosphere.marathon.core.leadership.AlwaysElectedLeadershipModule
import mesosphere.marathon.core.readiness.ReadinessCheckExecutor
import mesosphere.marathon.core.storage.store.impl.memory.InMemoryPersistenceStore
import mesosphere.marathon.core.task.termination.KillService
import mesosphere.marathon.core.task.tracker.InstanceTracker
import mesosphere.marathon.metrics.Metrics
import mesosphere.marathon.metrics.dummy.DummyMetrics
import mesosphere.marathon.state.AppDefinition
import mesosphere.marathon.state.PathId._
import mesosphere.marathon.storage.repository.{AppRepository, DeploymentRepository}
import mesosphere.marathon.test.{GroupCreation, MarathonTestHelper}
import org.apache.mesos.SchedulerDriver
import org.rogach.scallop.ScallopConf
import org.scalatest.concurrent.Eventually
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
import scala.util.Success
class DeploymentManagerActorTest extends AkkaUnitTest with ImplicitSender with GroupCreation with Eventually {
"DeploymentManager" should {
"Deployment" in {
val f = new Fixture
val manager = f.deploymentManager()
val app = AppDefinition("app".toRootPath, cmd = Some("sleep"))
val oldGroup = createRootGroup()
val newGroup = createRootGroup(Map(app.id -> app))
val plan = DeploymentPlan(oldGroup, newGroup)
manager ! StartDeployment(plan, ActorRef.noSender)
awaitCond(manager.underlyingActor.runningDeployments.contains(plan.id), 5.seconds)
manager.underlyingActor.runningDeployments(plan.id).status should be(DeploymentStatus.Deploying)
}
"Finished deployment" in {
val f = new Fixture
val manager = f.deploymentManager()
val app = AppDefinition("app".toRootPath, cmd = Some("sleep"))
val oldGroup = createRootGroup()
val newGroup = createRootGroup(Map(app.id -> app))
val plan = DeploymentPlan(oldGroup, newGroup)
manager ! StartDeployment(plan, ActorRef.noSender)
awaitCond(manager.underlyingActor.runningDeployments.contains(plan.id), 5.seconds)
manager.underlyingActor.runningDeployments(plan.id).status should be(DeploymentStatus.Deploying)
manager ! DeploymentFinished(plan, Success(Done))
awaitCond(manager.underlyingActor.runningDeployments.isEmpty, 5.seconds)
}
"Able to see deployment when listing deployments after it was started" in {
import akka.pattern.ask
val f = new Fixture
val manager = f.deploymentManager()
val app = AppDefinition("app".toRootPath, cmd = Some("sleep"))
val oldGroup = createRootGroup()
val newGroup = createRootGroup(Map(app.id -> app))
val plan = DeploymentPlan(oldGroup, newGroup)
manager ! StartDeployment(plan, ActorRef.noSender)
eventually {
val runningDeployments = (manager.actorRef ? ListRunningDeployments).mapTo[Future[Seq[DeploymentStepInfo]]].futureValue.futureValue
runningDeployments.size should be(1)
runningDeployments.head.plan should be (plan)
}
}
"Conflicting not forced deployment" in {
val f = new Fixture
val manager = f.deploymentManager()
val app = AppDefinition("app".toRootPath, cmd = Some("sleep"))
val oldGroup = createRootGroup()
val newGroup = createRootGroup(Map(app.id -> app))
val plan = DeploymentPlan(oldGroup, newGroup, id = Some("d1"))
val probe = TestProbe()
manager ! StartDeployment(plan, ActorRef.noSender)
awaitCond(manager.underlyingActor.runningDeployments.contains(plan.id), 5.seconds)
manager.underlyingActor.runningDeployments(plan.id).status should be(DeploymentStatus.Deploying)
manager ! StartDeployment(plan.copy(id = "d2"), probe.ref, force = false)
probe.expectMsgType[DeploymentFailed]
manager.underlyingActor.runningDeployments.size should be (1)
manager.underlyingActor.runningDeployments(plan.id).status should be (DeploymentStatus.Deploying)
}
"Conflicting forced deployment" in {
val f = new Fixture
val manager = f.deploymentManager()
val app = AppDefinition("app".toRootPath, cmd = Some("sleep"))
val oldGroup = createRootGroup()
val newGroup = createRootGroup(Map(app.id -> app))
val plan = DeploymentPlan(oldGroup, newGroup, id = Some("b1"))
val probe = TestProbe()
manager ! StartDeployment(plan, probe.ref)
probe.expectMsgType[DeploymentStarted]
awaitCond(manager.underlyingActor.runningDeployments.contains(plan.id), 5.seconds)
manager.underlyingActor.runningDeployments(plan.id).status should be(DeploymentStatus.Deploying)
manager ! StartDeployment(plan.copy(id = "d2"), probe.ref, force = true)
probe.expectMsgType[DeploymentStarted]
manager.underlyingActor.runningDeployments(plan.id).status should be (DeploymentStatus.Canceling)
eventually(manager.underlyingActor.runningDeployments("d2").status should be (DeploymentStatus.Deploying))
}
"Multiple conflicting forced deployments" in {
val f = new Fixture
val manager = f.deploymentManager()
val app = AppDefinition("app".toRootPath, cmd = Some("sleep"))
val oldGroup = createRootGroup()
val newGroup = createRootGroup(Map(app.id -> app))
val plan = DeploymentPlan(oldGroup, newGroup, id = Some("d1"))
val probe = TestProbe()
manager ! StartDeployment(plan, probe.ref)
probe.expectMsgType[DeploymentStarted]
manager.underlyingActor.runningDeployments("d1").status should be (DeploymentStatus.Deploying)
manager ! StartDeployment(plan.copy(id = "d2"), probe.ref, force = true)
probe.expectMsgType[DeploymentStarted]
manager.underlyingActor.runningDeployments("d1").status should be (DeploymentStatus.Canceling)
manager.underlyingActor.runningDeployments("d2").status should be (DeploymentStatus.Deploying)
manager ! StartDeployment(plan.copy(id = "d3"), probe.ref, force = true)
probe.expectMsgType[DeploymentStarted]
// Since deployments are not really started (DeploymentActor is not spawned), DeploymentFinished event is not
// sent and the deployments are staying in the list of runningDeployments
manager.underlyingActor.runningDeployments("d1").status should be(DeploymentStatus.Canceling)
manager.underlyingActor.runningDeployments("d2").status should be(DeploymentStatus.Canceling)
manager.underlyingActor.runningDeployments("d3").status should be(DeploymentStatus.Scheduled)
}
"StopActor" in {
val f = new Fixture
val manager = f.deploymentManager()
val probe = TestProbe()
probe.setAutoPilot(new AutoPilot {
override def run(sender: ActorRef, msg: Any): AutoPilot = msg match {
case Cancel(_) =>
system.stop(probe.ref)
NoAutoPilot
}
})
val ex = new Exception("")
val res = manager.underlyingActor.stopActor(probe.ref, ex)
res.futureValue should be(Done)
}
"Cancel deployment" in {
val f = new Fixture
val manager = f.deploymentManager()
val app = AppDefinition("app".toRootPath, cmd = Some("sleep"))
val oldGroup = createRootGroup()
val newGroup = createRootGroup(Map(app.id -> app))
val plan = DeploymentPlan(oldGroup, newGroup)
val probe = TestProbe()
manager ! StartDeployment(plan, probe.ref)
probe.expectMsgType[DeploymentStarted]
manager ! CancelDeployment(plan)
eventually(manager.underlyingActor.runningDeployments(plan.id).status should be (DeploymentStatus.Canceling))
}
}
class Fixture {
val driver: SchedulerDriver = mock[SchedulerDriver]
val deploymentRepo = mock[DeploymentRepository]
val eventBus: EventStream = mock[EventStream]
val launchQueue: LaunchQueue = mock[LaunchQueue]
val config: MarathonConf = new ScallopConf(Seq("--master", "foo")) with MarathonConf {
verify()
}
implicit val ctx: ExecutionContext = ExecutionContext.Implicits.global
val taskTracker: InstanceTracker = MarathonTestHelper.createTaskTracker(
AlwaysElectedLeadershipModule.forRefFactory(system)
)
val taskKillService: KillService = mock[KillService]
val metrics: Metrics = DummyMetrics
val appRepo: AppRepository = AppRepository.inMemRepository(new InMemoryPersistenceStore(metrics))
val hcManager: HealthCheckManager = mock[HealthCheckManager]
val readinessCheckExecutor: ReadinessCheckExecutor = mock[ReadinessCheckExecutor]
// A method that returns dummy props. Used to control the deployments progress. Otherwise the tests become racy
// and depending on when DeploymentActor sends DeploymentFinished message.
val deploymentActorProps: (Any, Any, Any, Any, Any, Any, Any, Any) => Props = (_, _, _, _, _, _, _, _) => TestActor.props(new LinkedBlockingDeque())
def deploymentManager(): TestActorRef[DeploymentManagerActor] = TestActorRef (
DeploymentManagerActor.props(
metrics,
taskTracker,
taskKillService,
launchQueue,
hcManager,
eventBus,
readinessCheckExecutor,
deploymentRepo,
deploymentActorProps)
)
deploymentRepo.store(any[DeploymentPlan]) returns Future.successful(Done)
deploymentRepo.delete(any[String]) returns Future.successful(Done)
deploymentRepo.all() returns Source.empty
launchQueue.add(any, any) returns Future.successful(Done)
}
}
|
gsantovena/marathon
|
src/test/scala/mesosphere/marathon/core/deployment/impl/DeploymentManagerActorTest.scala
|
Scala
|
apache-2.0
| 10,306 |
#set( $symbol_pound = '#' )
#set( $symbol_dollar = '$' )
#set( $symbol_escape = '\\' )
package ${package}
import nl.knaw.dans.lib.logging.DebugEnhancedLogging
import org.scalatra._
class ${javaName}Servlet(app: ${javaName}App,
version: String) extends ScalatraServlet with DebugEnhancedLogging {
get("/") {
contentType = "text/plain"
Ok(s"${name} Service running (${symbol_dollar}version)")
}
}
|
DANS-KNAW/easy-module-archetype
|
src/main/resources/archetype-resources/src/main/scala/__javaName__Servlet.scala
|
Scala
|
apache-2.0
| 434 |
/*
* Copyright 2020 Precog Data
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.impl.datasources
import quasar.connector.ByteStore
import quasar.impl.storage.PrefixStore
import java.lang.String
import scala.{Array, Boolean, Byte, Option, Unit}
import cats.Applicative
import shapeless._
import scodec._
final class PrefixByteStores[F[_]: Applicative, K: Codec] private (
store: PrefixStore.SCodec[F, K :: String :: HNil, Array[Byte]])
extends ByteStores[F, K] {
private class PrefixByteStore(prefix: K) extends ByteStore[F] {
def lookup(key: String): F[Option[Array[Byte]]] =
store.lookup(prefix :: key :: HNil)
def insert(key: String, value: Array[Byte]): F[Unit] =
store.insert(prefix :: key :: HNil, value)
def delete(key: String): F[Boolean] =
store.delete(prefix :: key :: HNil)
}
def get(prefix: K): F[ByteStore[F]] =
Applicative[F].pure(new PrefixByteStore(prefix))
def clear(prefix: K): F[Unit] =
store.deletePrefixed(prefix :: HNil)
}
object PrefixByteStores {
def apply[F[_]: Applicative, K: Codec](
store: PrefixStore.SCodec[F, K :: String :: HNil, Array[Byte]])
: ByteStores[F, K] =
new PrefixByteStores(store)
}
|
quasar-analytics/quasar
|
impl/src/main/scala/quasar/impl/datasources/PrefixByteStores.scala
|
Scala
|
apache-2.0
| 1,740 |
package controllers.s_your_partner
import play.api.test.FakeRequest
import utils.WithApplication
import controllers.mappings.Mappings
import models.view.CachedClaim
import org.specs2.mutable._
import models.{ NationalInsuranceNumber, DayMonthYear }
class GYourPartnerPersonalDetailsFormSpec extends Specification {
val title = "Mr"
val firstName = "John"
val middleName = "Mc"
val surname = "Doe"
val otherNames = "Duck"
val nino = "AB123456C"
val dateOfBirthDay = 5
val dateOfBirthMonth = 12
val dateOfBirthYear = 1990
val nationality = "British"
val separatedFromPartner = "yes"
section ("unit", models.domain.YourPartner.id)
"Your Partner Personal Details Form" should {
"map data into case class when partner answer is yes" in new WithApplication {
GYourPartnerPersonalDetails.form(models.domain.Claim(CachedClaim.key), FakeRequest()).bind(
Map("title" -> title,
"firstName" -> firstName,
"middleName" -> middleName,
"surname" -> surname,
"otherNames" -> otherNames,
"nationalInsuranceNumber.nino" -> nino,
"dateOfBirth.day" -> dateOfBirthDay.toString,
"dateOfBirth.month" -> dateOfBirthMonth.toString,
"dateOfBirth.year" -> dateOfBirthYear.toString,
"partner.nationality" -> nationality,
"separated.fromPartner" -> separatedFromPartner,
"isPartnerPersonYouCareFor"->"yes",
"hadPartnerSinceClaimDate" -> "yes")).fold(
formWithErrors => "This mapping should not happen." must equalTo("Error"),
f => {
f.title must equalTo(Some(title))
f.firstName must equalTo(Some(firstName))
f.middleName must equalTo(Some(middleName))
f.surname must equalTo(Some(surname))
f.otherSurnames must equalTo(Some(otherNames))
f.nationalInsuranceNumber must equalTo(Some(NationalInsuranceNumber(Some(nino))))
f.dateOfBirth must equalTo(Some(DayMonthYear(Some(dateOfBirthDay), Some(dateOfBirthMonth), Some(dateOfBirthYear), None, None)))
f.nationality must equalTo(Some(nationality))
f.separatedFromPartner must equalTo(Some(separatedFromPartner))
f.isPartnerPersonYouCareFor must equalTo(Some("yes"))
f.hadPartnerSinceClaimDate must equalTo("yes")
})
}
"reject too many characters in text fields" in new WithApplication {
GYourPartnerPersonalDetails.form(models.domain.Claim(CachedClaim.key), FakeRequest()).bind(
Map("title" -> title,
"firstName" -> "CHARACTERS,CHARACTERS,CHARACTERS,CHARACTERS,CHARACTERS,CHARACTERS",
"middleName" -> "CHARACTERS,CHARACTERS,CHARACTERS,CHARACTERS,CHARACTERS,CHARACTERS",
"surname" -> "CHARACTERS,CHARACTERS,CHARACTERS,CHARACTERS,CHARACTERS,CHARACTERS",
"otherNames" -> "CHARACTERS,CHARACTERS,CHARACTERS,CHARACTERS,CHARACTERS,CHARACTERS",
"dateOfBirth.day" -> dateOfBirthDay.toString,
"dateOfBirth.month" -> dateOfBirthMonth.toString,
"dateOfBirth.year" -> dateOfBirthYear.toString,
"partner.nationality" -> "CHARACTERS,CHARACTERS,CHARACTERS,CHARACTERS,CHARACTERS,CHARACTERS",
"separated.fromPartner" -> separatedFromPartner,
"isPartnerPersonYouCareFor"->"yes",
"hadPartnerSinceClaimDate" -> "yes")).fold(
formWithErrors => {
formWithErrors.errors.length must equalTo(5)
formWithErrors.errors(0).message must equalTo(Mappings.maxLengthError)
formWithErrors.errors(1).message must equalTo(Mappings.maxLengthError)
formWithErrors.errors(2).message must equalTo(Mappings.maxLengthError)
formWithErrors.errors(3).message must equalTo(Mappings.maxLengthError)
formWithErrors.errors(4).message must equalTo(Mappings.maxLengthError)
},
theirPersonalDetails => "This mapping should not happen." must equalTo("Valid"))
}
"have 7 mandatory fields" in new WithApplication {
GYourPartnerPersonalDetails.form(models.domain.Claim(CachedClaim.key), FakeRequest()).bind(
Map("hadPartnerSinceClaimDate" -> "yes","middleName" -> "middle optional")).fold(
formWithErrors => {
formWithErrors.errors.length must equalTo(7)
formWithErrors.errors(0).message must equalTo("title.required")
formWithErrors.errors(1).message must equalTo("firstName.required")
formWithErrors.errors(2).message must equalTo("surname.required")
formWithErrors.errors(3).message must equalTo("dateOfBirth.required")
formWithErrors.errors(4).message must equalTo("separated.fromPartner.required")
formWithErrors.errors(5).message must equalTo("isPartnerPersonYouCareFor.required")
formWithErrors.errors(6).message must equalTo("partner.nationality.required")
},
theirPersonalDetails => "This mapping should not happen." must equalTo("Valid"))
}
"reject form when partner question not answered" in new WithApplication {
GYourPartnerPersonalDetails.form(models.domain.Claim(CachedClaim.key), FakeRequest()).bind(
Map("hadPartnerSinceClaimDate" -> "")).fold(
formWithErrors => {
formWithErrors.errors.length must equalTo(2) // error.required and yesNo.invalid
},
theirPersonalDetails => "This mapping should not happen." must equalTo("Valid"))
}
"reject invalid national insurance number" in new WithApplication {
GYourPartnerPersonalDetails.form(models.domain.Claim(CachedClaim.key), FakeRequest()).bind(
Map("title" -> title,
"firstName" -> firstName,
"middleName" -> middleName,
"surname" -> surname,
"otherNames" -> otherNames,
"nationalInsuranceNumber.nino" -> "INVALID",
"dateOfBirth.day" -> dateOfBirthDay.toString,
"dateOfBirth.month" -> dateOfBirthMonth.toString,
"dateOfBirth.year" -> dateOfBirthYear.toString,
"partner.nationality" -> nationality,
"separated.fromPartner" -> separatedFromPartner,
"isPartnerPersonYouCareFor"->"yes",
"hadPartnerSinceClaimDate" -> "yes")).fold(
formWithErrors => {
formWithErrors.errors.head.message must equalTo("error.nationalInsuranceNumber")
formWithErrors.errors.length must equalTo(1)
},
f => "This mapping should not happen." must equalTo("Valid"))
}
"reject invalid date" in new WithApplication {
GYourPartnerPersonalDetails.form(models.domain.Claim(CachedClaim.key), FakeRequest()).bind(
Map("title" -> title,
"firstName" -> firstName,
"middleName" -> middleName,
"surname" -> surname,
"otherNames" -> otherNames,
"nationalInsuranceNumber.nino" -> nino,
"dateOfBirth.day" -> dateOfBirthDay.toString,
"dateOfBirth.month" -> dateOfBirthMonth.toString,
"dateOfBirth.year" -> "12345",
"partner.nationality" -> nationality,
"separated.fromPartner" -> separatedFromPartner,
"isPartnerPersonYouCareFor"->"yes",
"hadPartnerSinceClaimDate" -> "yes")).fold(
formWithErrors => {
formWithErrors.errors.head.message must equalTo(Mappings.errorInvalid)
formWithErrors.errors.length must equalTo(1)
},
f => "This mapping should not happen." must equalTo("Valid"))
}
"reject form without partnerispersonyoucarefor" in new WithApplication {
GYourPartnerPersonalDetails.form(models.domain.Claim(CachedClaim.key), FakeRequest()).bind(
Map("title" -> title,
"firstName" -> firstName,
"middleName" -> middleName,
"surname" -> surname,
"otherNames" -> otherNames,
"nationalInsuranceNumber.nino" -> nino.toString,
"dateOfBirth.day" -> dateOfBirthDay.toString,
"dateOfBirth.month" -> dateOfBirthMonth.toString,
"dateOfBirth.year" -> dateOfBirthYear.toString,
"partner.nationality" -> "United States",
"separated.fromPartner" -> separatedFromPartner,
"hadPartnerSinceClaimDate" -> "yes")).fold(
formWithErrors => {
formWithErrors.errors.length must equalTo(1)
formWithErrors.errors.head.message must equalTo("isPartnerPersonYouCareFor.required")
},f => "This mapping should not happen." must equalTo("Valid"))
}
"accept nationality with space character, uppercase, lowercase and apostrophe" in new WithApplication {
GYourPartnerPersonalDetails.form(models.domain.Claim(CachedClaim.key), FakeRequest()).bind(
Map("title" -> title,
"firstName" -> firstName,
"middleName" -> middleName,
"surname" -> surname,
"otherNames" -> otherNames,
"nationalInsuranceNumber.nino" -> nino.toString,
"dateOfBirth.day" -> dateOfBirthDay.toString,
"dateOfBirth.month" -> dateOfBirthMonth.toString,
"dateOfBirth.year" -> dateOfBirthYear.toString,
"partner.nationality" -> "United State's",
"separated.fromPartner" -> separatedFromPartner,
"isPartnerPersonYouCareFor"->"yes",
"hadPartnerSinceClaimDate" -> "yes")).fold(
formWithErrors => "This mapping should not happen." must equalTo("Error"),
f => {
f.nationality must equalTo(Some("United State's"))
})
}
"reject invalid nationality with numbers" in new WithApplication {
GYourPartnerPersonalDetails.form(models.domain.Claim(CachedClaim.key), FakeRequest()).bind(
Map("title" -> title,
"firstName" -> firstName,
"middleName" -> middleName,
"surname" -> surname,
"otherNames" -> otherNames,
"nationalInsuranceNumber.nino" -> nino.toString,
"dateOfBirth.day" -> dateOfBirthDay.toString,
"dateOfBirth.month" -> dateOfBirthMonth.toString,
"dateOfBirth.year" -> dateOfBirthYear.toString,
"partner.nationality" -> "a$123456",
"separated.fromPartner" -> separatedFromPartner,
"isPartnerPersonYouCareFor"->"yes",
"hadPartnerSinceClaimDate" -> "yes")).fold(
formWithErrors => {
formWithErrors.errors.length must equalTo(1)
formWithErrors.errors.head.message must equalTo("error.restricted.characters")
},
f => "This mapping should not happen." must equalTo("Valid"))
}
"reject invalid nationality with special characters" in new WithApplication {
GYourPartnerPersonalDetails.form(models.domain.Claim(CachedClaim.key), FakeRequest()).bind(
Map("title" -> title,
"firstName" -> firstName,
"middleName" -> middleName,
"surname" -> surname,
"otherNames" -> otherNames,
"nationalInsuranceNumber.nino" -> nino.toString,
"dateOfBirth.day" -> dateOfBirthDay.toString,
"dateOfBirth.month" -> dateOfBirthMonth.toString,
"dateOfBirth.year" -> dateOfBirthYear.toString,
"partner.nationality" -> "a!@£$%^&*(){}",
"separated.fromPartner" -> separatedFromPartner,
"isPartnerPersonYouCareFor"->"yes",
"hadPartnerSinceClaimDate" -> "yes")).fold(
formWithErrors => {
formWithErrors.errors.length must equalTo(1)
formWithErrors.errors.head.message must equalTo("error.restricted.characters")
},
f => "This mapping should not happen." must equalTo("Valid"))
}
/* at symbol accepted in most fields now */
"reject special characters" in new WithApplication {
GYourPartnerPersonalDetails.form(models.domain.Claim(CachedClaim.key), FakeRequest()).bind(
Map("title" -> title,
"firstName" -> "MyNa>me",
"middleName" -> "middleNam©e",
"surname" -> ";My Surn˙h∫ame;",
"otherNames" -> "at like col@bt ok now",
"nationalInsuranceNumber.nino" -> nino.toString,
"dateOfBirth.day" -> dateOfBirthDay.toString,
"dateOfBirth.month" -> dateOfBirthMonth.toString,
"dateOfBirth.year" -> dateOfBirthYear.toString,
"partner.nationality" -> "United States",
"separated.fromPartner" -> separatedFromPartner,
"isPartnerPersonYouCareFor"->"yes",
"hadPartnerSinceClaimDate" -> "yes")).fold(
formWithErrors => {
formWithErrors.errors.length must equalTo(3)
formWithErrors.errors.head.message must equalTo(Mappings.errorRestrictedCharacters)
},
f => "This mapping should not happen." must equalTo("Valid"))
}
}
section ("unit", models.domain.YourPartner.id)
}
|
Department-for-Work-and-Pensions/ClaimCapture
|
c3/test/controllers/s_your_partner/GYourPartnerPersonalDetailsFormSpec.scala
|
Scala
|
mit
| 12,844 |
package io.avici.truffle
import argonaut._
import Argonaut._
/**
* Created by Baqiao (Charles) Liu on 1/15/2016.
*/
/**
* Copyright 2016 Baqiao (Charles) Liu
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
object Playground extends App{
import io.avici.truffle.esprima.JsAst._
println(
Program(
List(
ExpressionStatement(
CallExpression(Identifier("println"), List(StringLiteral("hi")))
)
)
).asJson.spaces2
)
}
|
BakaBBQ/truffle-lang
|
src/main/scala/io/avici/truffle/Playground.scala
|
Scala
|
apache-2.0
| 1,002 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.livy.thriftserver.auth.ldap
import java.util.Hashtable
import javax.naming.{Context, NamingException}
import javax.naming.directory.InitialDirContext
import javax.security.sasl.AuthenticationException
import org.apache.livy.{LivyConf, Logging}
/**
* A factory for LDAP search objects.
*/
object LdapSearchFactory extends Logging {
@throws[NamingException]
private def createDirContext(
conf: LivyConf,
principal: String,
password: String): InitialDirContext = {
val env = new Hashtable[String, String]
val ldapUrl = conf.get(LivyConf.AUTH_LDAP_URL)
env.put(Context.INITIAL_CONTEXT_FACTORY, "com.sun.jndi.ldap.LdapCtxFactory")
env.put(Context.PROVIDER_URL, ldapUrl)
env.put(Context.SECURITY_AUTHENTICATION, "simple")
env.put(Context.SECURITY_CREDENTIALS, password)
env.put(Context.SECURITY_PRINCIPAL, principal)
new InitialDirContext(env)
}
}
class LdapSearchFactory extends DirSearchFactory with Logging {
@throws(classOf[AuthenticationException])
def getInstance(conf: LivyConf, principal: String, password: String): InitialDirContext = {
try {
LdapSearchFactory.createDirContext(conf, principal, password)
} catch {
case e: NamingException =>
throw new AuthenticationException("Error validating LDAP user", e)
}
}
}
|
ajbozarth/incubator-livy
|
thriftserver/server/src/main/scala/org/apache/livy/thriftserver/auth/ldap/LdapSearchFactory.scala
|
Scala
|
apache-2.0
| 2,141 |
/*
* Copyright 2015 the original author or authors.
* @https://github.com/scouter-project/scouter
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package scouter.server.db;
import scouter.lang.pack.StatusPack
import scouter.io.DataOutputX
import scouter.server.Logger
import scouter.server.db.status.StatusIndex
import scouter.server.db.status.StatusWriter
import scouter.util.DateUtil
import scouter.util.FileUtil
import scouter.util.IClose
import scouter.util.RequestQueue
import java.io.File
import scouter.server.util.ThreadScala
import scouter.server.util.OftenAction
object StatusWR {
val status = "status";
val queue = new RequestQueue[StatusPack](DBCtr.MAX_QUE_SIZE);
ThreadScala.start("scouter.server.db.StatusWR") {
var currentDateUnit = 0L
while (DBCtr.running) {
val p = queue.get();
try {
if (currentDateUnit != DateUtil.getDateUnit(p.time)) {
currentDateUnit = DateUtil.getDateUnit(p.time);
close();
open(DateUtil.yyyymmdd(p.time));
}
if (index == null) {
OftenAction.act("StatusWR", 10) {
queue.clear();
currentDateUnit = 0;
}
Logger.println("S135", 10, "can't open db");
} else {
val b = new DataOutputX().writePack(p).toByteArray()
val location = writer.write(b);
index.add(p.time, location);
}
} catch {
case t: Throwable => t.printStackTrace();
}
}
close()
}
def add(p: StatusPack) {
val ok = queue.put(p);
if (ok == false) {
Logger.println("S136", 10, "queue exceeded!!");
}
}
var index: StatusIndex = null
var writer: StatusWriter = null
def close() {
FileUtil.close(index);
FileUtil.close(writer);
index = null;
writer = null;
}
def open(date: String) {
try {
val path = getDBPath(date);
val f = new File(path);
if (f.exists() == false)
f.mkdirs();
val file = path + "/" + status;
index = StatusIndex.open(file);
writer = StatusWriter.open(file);
} catch {
case e: Throwable => {
e.printStackTrace();
close();
}
}
}
def getDBPath(date: String): String = {
val sb = new StringBuffer();
sb.append(DBCtr.getRootPath());
sb.append("/").append(date).append("/").append(status);
return sb.toString();
}
}
|
yuyupapa/OpenSource
|
scouter.server/src/scouter/server/db/StatusWR.scala
|
Scala
|
apache-2.0
| 3,281 |
package spire.math
import scala.reflect.ClassTag
// scalatest
import org.scalatest.FunSuite
// we need to disable our own === to avoid messing up ScalaTest.
import spire.implicits.{eqOps => _, _}
import java.math.MathContext
// nice alias
import scala.{specialized => spec}
class NumericTest extends FunSuite {
/**
* We use this function to avoid duplicating our tests for all the different
* A's that we want to test. We expect the actual values to be:
*
* a=-3 b=3 c=9
*/
def runWith[@spec A:Numeric:ClassTag](cls:String)(a:A, b:A, c:A) {
// the name to use for this A
//val cls = implicitly[ClassTag[A]].erasure.getSimpleName
//val cls = implicitly[ClassTag[A]].runtimeClass.getName
// test runner which constructs a unique name for each test we run.
def runTest(name:String)(f: => Unit) = test("%s:%s" format(cls, name))(f)
// Numeric[A]'s zero
val z = Numeric[A].zero
// abs
runTest("(-3).abs")(assert(a.abs === b))
runTest("3.abs")(assert(b.abs === b))
// unary_-
runTest("-(3)")(assert(-b === a))
runTest("-(0)")(assert(-z === z))
// +
runTest("3 + 0")(assert(b + z === b))
runTest("3 + (-3)")(assert(b + a === z))
// -
runTest("3 - 0")(assert(b - z === b))
runTest("3 - 3)")(assert(b - b === z))
runTest("0 - 3)")(assert(z - b === a))
// *
runTest("3 * 0")(assert(b * z === z))
runTest("3 * (-3)")(assert(b * a === c))
// toInt
runTest("3.toInt")(assert(b.toInt === 3))
}
implicit val mc: MathContext = MathContext.DECIMAL128
// here's where we actually run all the tests, for each type we care about.
runWith[Int]("Int")(-3, 3, -9)
runWith[Long]("Long")(-3, 3, -9)
runWith[Float]("Float")(-3, 3, -9)
runWith[Double]("Double")(-3, 3, -9)
runWith[BigInt]("BigInt")(-3, 3, -9)
runWith[BigDecimal]("BigDecimal")(-3, 3, -9)
runWith[Rational]("Rational")(-3, 3, -9)
//runWith[Complex[Double]](-3, 3, -9) // There seems to be a bug.
runWith[Complex[BigDecimal]]("Complex[BigDecimal]")(
Complex(BigDecimal(-3), BigDecimal(0)),
Complex(BigDecimal(3), BigDecimal(0)),
Complex(BigDecimal(-9), BigDecimal(0))
)
}
|
lrytz/spire
|
tests/src/test/scala/spire/math/NumericTest.scala
|
Scala
|
mit
| 2,189 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hbase.util
import org.apache.hadoop.hbase.util.Bytes
import org.apache.spark.sql.types._
import org.apache.spark.sql.hbase._
object BytesUtils {
def create(dataType: DataType): BytesUtils = {
dataType match {
case BooleanType => new BytesUtils(new HBaseRawType(Bytes.SIZEOF_BOOLEAN), BooleanType)
case ByteType => new BytesUtils(new HBaseRawType(Bytes.SIZEOF_BYTE), ByteType)
case DoubleType => new BytesUtils(new HBaseRawType(Bytes.SIZEOF_DOUBLE), DoubleType)
case FloatType => new BytesUtils(new HBaseRawType(Bytes.SIZEOF_FLOAT), FloatType)
case IntegerType => new BytesUtils(new HBaseRawType(Bytes.SIZEOF_INT), IntegerType)
case LongType => new BytesUtils(new HBaseRawType(Bytes.SIZEOF_LONG), LongType)
case ShortType => new BytesUtils(new HBaseRawType(Bytes.SIZEOF_SHORT), ShortType)
case StringType => new BytesUtils(null, StringType)
}
}
def toUTF8String(input: HBaseRawType, offset: Int, length: Int): UTF8String = {
UTF8String(input.slice(offset, offset + length))
}
def toByte(input: HBaseRawType, offset: Int): Byte = {
// Flip sign bit back
val v: Int = input(offset) ^ 0x80
v.asInstanceOf[Byte]
}
def toBoolean(input: HBaseRawType, offset: Int): Boolean = {
input(offset) != 0
}
def toDouble(input: HBaseRawType, offset: Int): Double = {
var l: Long = Bytes.toLong(input, offset, Bytes.SIZEOF_DOUBLE)
l = l - 1
l ^= (~l >> java.lang.Long.SIZE - 1) | java.lang.Long.MIN_VALUE
java.lang.Double.longBitsToDouble(l)
}
def toShort(input: HBaseRawType, offset: Int): Short = {
// flip sign bit back
var v: Int = input(offset) ^ 0x80
v = (v << 8) + (input(1 + offset) & 0xff)
v.asInstanceOf[Short]
}
def toFloat(input: HBaseRawType, offset: Int): Float = {
var i = Bytes.toInt(input, offset)
i = i - 1
i ^= (~i >> Integer.SIZE - 1) | Integer.MIN_VALUE
java.lang.Float.intBitsToFloat(i)
}
def toInt(input: HBaseRawType, offset: Int): Int = {
// Flip sign bit back
var v: Int = input(offset) ^ 0x80
for (i <- 1 to Bytes.SIZEOF_INT - 1) {
v = (v << 8) + (input(i + offset) & 0xff)
}
v
}
def toLong(input: HBaseRawType, offset: Int): Long = {
// Flip sign bit back
var v: Long = input(offset) ^ 0x80
for (i <- 1 to Bytes.SIZEOF_LONG - 1) {
v = (v << 8) + (input(i + offset) & 0xff)
}
v
}
/**
* append one to the byte array
* @param input the byte array
* @return the modified byte array
*/
def addOneString(input: HBaseRawType): HBaseRawType = {
val len = input.length
val result = new HBaseRawType(len + 1)
Array.copy(input, 0, result, 0, len)
result(len) = 0x01.asInstanceOf[Byte]
result
}
/**
* add one to the unsigned byte array
* @param input the unsigned byte array
* @return null if the byte array is all 0xff, otherwise increase by 1
*/
def addOne(input: HBaseRawType): HBaseRawType = {
val len = input.length
val result = new HBaseRawType(len)
Array.copy(input, 0, result, 0, len)
var setValue = false
for (index <- len - 1 to 0 by -1 if !setValue) {
val item: Byte = input(index)
if (item != 0xff.toByte) {
setValue = true
if ((item & 0x01.toByte) == 0.toByte) {
result(index) = (item ^ 0x01.toByte).toByte
} else if ((item & 0x02.toByte) == 0.toByte) {
result(index) = (item ^ 0x03.toByte).toByte
} else if ((item & 0x04.toByte) == 0.toByte) {
result(index) = (item ^ 0x07.toByte).toByte
} else if ((item & 0x08.toByte) == 0.toByte) {
result(index) = (item ^ 0x0f.toByte).toByte
} else if ((item & 0x10.toByte) == 0.toByte) {
result(index) = (item ^ 0x1f.toByte).toByte
} else if ((item & 0x20.toByte) == 0.toByte) {
result(index) = (item ^ 0x3f.toByte).toByte
} else if ((item & 0x40.toByte) == 0.toByte) {
result(index) = (item ^ 0x7f.toByte).toByte
} else {
result(index) = (item ^ 0xff.toByte).toByte
}
// after increment, set remaining bytes to zero
for (rest <- index + 1 until len) {
result(rest) = 0x00.toByte
}
}
}
if (!setValue) {
null
} else {
result
}
}
}
class BytesUtils(var buffer: HBaseRawType, dt: DataType) {
val dataType = dt
def toBytes(input: UTF8String): HBaseRawType = {
buffer = input.getBytes
buffer
}
def toBytes(input: Byte): HBaseRawType = {
// Flip sign bit so that Byte is binary comparable
buffer(0) = (input ^ 0x80).asInstanceOf[Byte]
buffer
}
def toBytes(input: Boolean): HBaseRawType = {
if (input) {
buffer(0) = (-1).asInstanceOf[Byte]
} else {
buffer(0) = 0.asInstanceOf[Byte]
}
buffer
}
def toBytes(input: Double): HBaseRawType = {
var l: Long = java.lang.Double.doubleToLongBits(input)
l = (l ^ ((l >> java.lang.Long.SIZE - 1) | java.lang.Long.MIN_VALUE)) + 1
Bytes.putLong(buffer, 0, l)
buffer
}
def toBytes(input: Short): HBaseRawType = {
buffer(0) = ((input >> 8) ^ 0x80).asInstanceOf[Byte]
buffer(1) = input.asInstanceOf[Byte]
buffer
}
def toBytes(input: Float): HBaseRawType = {
var i: Int = java.lang.Float.floatToIntBits(input)
i = (i ^ ((i >> Integer.SIZE - 1) | Integer.MIN_VALUE)) + 1
Bytes.putInt(buffer, 0, i)
buffer
}
def toBytes(input: Int): HBaseRawType = {
// Flip sign bit so that INTEGER is binary comparable
buffer(0) = ((input >> 24) ^ 0x80).asInstanceOf[Byte]
buffer(1) = (input >> 16).asInstanceOf[Byte]
buffer(2) = (input >> 8).asInstanceOf[Byte]
buffer(3) = input.asInstanceOf[Byte]
buffer
}
def toBytes(input: Long): HBaseRawType = {
buffer(0) = ((input >> 56) ^ 0x80).asInstanceOf[Byte]
buffer(1) = (input >> 48).asInstanceOf[Byte]
buffer(2) = (input >> 40).asInstanceOf[Byte]
buffer(3) = (input >> 32).asInstanceOf[Byte]
buffer(4) = (input >> 24).asInstanceOf[Byte]
buffer(5) = (input >> 16).asInstanceOf[Byte]
buffer(6) = (input >> 8).asInstanceOf[Byte]
buffer(7) = input.asInstanceOf[Byte]
buffer
}
def toBytes(input: Any): HBaseRawType = {
input match {
case item: Boolean => toBytes(item)
case item: Byte => toBytes(item)
case item: Double => toBytes(item)
case item: Float => toBytes(item)
case item: Int => toBytes(item)
case item: Long => toBytes(item)
case item: Short => toBytes(item)
case item: String => toBytes(UTF8String(item))
case item: UTF8String => toBytes(item)
}
}
}
|
nkhuyu/Spark-SQL-on-HBase
|
src/main/scala/org/apache/spark/sql/hbase/util/BytesUtils.scala
|
Scala
|
apache-2.0
| 7,480 |
package ml.linalg
import scala.collection.mutable.ArrayBuffer
import ml.traits.MatrixT
class Ones[T: Numeric: ClassManifest](r: Int, c: Int) extends MatrixT[T]{
var rows: Int = r;
var cols: Int = c;
var data = ArrayBuffer.fill(rows, cols)(1.asInstanceOf[T]);
// define a secondary constructor that takes only one argument
def this(n: Int) = {
this(n, n);
}
}
|
jccarrasco/maleta
|
src/ml/linalg/Ones.scala
|
Scala
|
gpl-2.0
| 376 |
package com.twitter.finagle.protobuf.rpc
trait RpcServer {
def close(d: Duration): Unit;
}
|
firebase/finagle
|
finagle-protobuf/src/main/scala/com/twitter/finagle/protobuf/rpc/RpcServer.scala
|
Scala
|
apache-2.0
| 96 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.