code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package play.api
import play.utils.Threads
import java.io._
import scala.util.control.NonFatal
import javax.xml.parsers.SAXParserFactory
import org.apache.xerces.impl.Constants
import javax.xml.XMLConstants
/** Application mode, either `DEV`, `TEST`, or `PROD`. */
object Mode extends Enumeration {
type Mode = Value
val Dev, Test, Prod = Value
}
/**
* High-level API to access Play global features.
*
* Note that this API depends on a running application.
* You can import the currently running application in a scope using:
* {{{
* import play.api.Play.current
* }}}
*/
object Play {
/*
* A general purpose logger for Play. Intended for internal usage.
*/
private[play] val logger = Logger("play")
/*
* We want control over the sax parser used so we specify the factory required explicitly. We know that
* SAXParserFactoryImpl will yield a SAXParser having looked at its source code, despite there being
* no explicit doco stating this is the case. That said, there does not appear to be any other way than
* declaring a factory in order to yield a parser of a specific type.
*/
private[play] val xercesSaxParserFactory =
SAXParserFactory.newInstance("org.apache.xerces.jaxp.SAXParserFactoryImpl", Play.getClass.getClassLoader)
xercesSaxParserFactory.setFeature(Constants.SAX_FEATURE_PREFIX + Constants.EXTERNAL_GENERAL_ENTITIES_FEATURE, false)
xercesSaxParserFactory.setFeature(Constants.SAX_FEATURE_PREFIX + Constants.EXTERNAL_PARAMETER_ENTITIES_FEATURE, false)
xercesSaxParserFactory.setFeature(Constants.XERCES_FEATURE_PREFIX + Constants.DISALLOW_DOCTYPE_DECL_FEATURE, true)
xercesSaxParserFactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true)
/*
* A parser to be used that is configured to ensure that no schemas are loaded.
*/
private[play] def XML = scala.xml.XML.withSAXParser(xercesSaxParserFactory.newSAXParser())
/**
* Returns the currently running application, or `null` if not defined.
*/
def unsafeApplication: Application = _currentApp
/**
* Optionally returns the current running application.
*/
def maybeApplication: Option[Application] = Option(_currentApp)
/**
* Implicitly import the current running application in the context.
*
* Note that by relying on this, your code will only work properly in
* the context of a running application.
*/
implicit def current: Application = maybeApplication.getOrElse(sys.error("There is no started application"))
private[play] var _currentApp: Application = _
/**
* Starts this application.
*
* @param the application to start
*/
def start(app: Application) {
// First stop previous app if exists
stop()
_currentApp = app
// Ensure routes are eagerly loaded, so that the reverse routers are correctly initialised before plugins are
// started.
app.routes
Threads.withContextClassLoader(classloader(app)) {
app.plugins.foreach(_.onStart())
}
app.mode match {
case Mode.Test =>
case mode => logger.info("Application started (" + mode + ")")
}
}
/**
* Stops the current application.
*/
def stop() {
Option(_currentApp).map { app =>
Threads.withContextClassLoader(classloader(app)) {
app.plugins.reverse.foreach { p =>
try { p.onStop() } catch { case NonFatal(e) => logger.warn("Error stopping plugin", e) }
}
}
}
_currentApp = null
}
/**
* Scans the current application classloader to retrieve a resources contents as a stream.
*
* For example, to retrieve a configuration file:
* {{{
* val maybeConf = application.resourceAsStream("conf/logger.xml")
* }}}
*
* @param name Absolute name of the resource (from the classpath root).
* @return Maybe a stream if found.
*/
def resourceAsStream(name: String)(implicit app: Application): Option[InputStream] = {
app.resourceAsStream(name)
}
/**
* Scans the current application classloader to retrieve a resource.
*
* For example, to retrieve a configuration file:
* {{{
* val maybeConf = application.resource("conf/logger.xml")
* }}}
*
* @param name absolute name of the resource (from the classpath root)
* @return the resource URL, if found
*/
def resource(name: String)(implicit app: Application): Option[java.net.URL] = {
app.resource(name)
}
/**
* Retrieves a file relative to the current application root path.
*
* For example, to retrieve a configuration file:
* {{{
* val myConf = application.getFile("conf/myConf.yml")
* }}}
*
* @param relativePath the relative path of the file to fetch
* @return a file instance; it is not guaranteed that the file exists
*/
def getFile(relativePath: String)(implicit app: Application): File = {
app.getFile(relativePath)
}
/**
* Retrieves a file relative to the current application root path.
*
* For example, to retrieve a configuration file:
* {{{
* val myConf = application.getExistingFile("conf/myConf.yml")
* }}}
*
* @param relativePath relative path of the file to fetch
* @return an existing file
*/
def getExistingFile(relativePath: String)(implicit app: Application): Option[File] = {
app.getExistingFile(relativePath)
}
/**
* Returns the current application.
*/
def application(implicit app: Application): Application = app
/**
* Returns the current application classloader.
*/
def classloader(implicit app: Application): ClassLoader = app.classloader
/**
* Returns the current application configuration.
*/
def configuration(implicit app: Application): Configuration = app.configuration
/**
* Returns the current application router.
*/
def routes(implicit app: Application): Option[play.core.Router.Routes] = app.routes
/**
* Returns the current application global settings.
*/
def global(implicit app: Application): GlobalSettings = app.global
/**
* Returns the current application mode.
*/
def mode(implicit app: Application): Mode.Mode = app.mode
/**
* Returns `true` if the current application is `DEV` mode.
*/
def isDev(implicit app: Application): Boolean = (app.mode == Mode.Dev)
/**
* Returns `true` if the current application is `PROD` mode.
*/
def isProd(implicit app: Application): Boolean = (app.mode == Mode.Prod)
/**
* Returns `true` if the current application is `TEST` mode.
*/
def isTest(implicit app: Application): Boolean = (app.mode == Mode.Test)
/**
* Returns the name of the cookie that can be used to permanently set the user's language.
*/
def langCookieName(implicit app: Application): String = app.configuration.getString("application.lang.cookie").getOrElse("PLAY_LANG")
}
|
michaelahlers/team-awesome-wedding
|
vendor/play-2.2.1/framework/src/play/src/main/scala/play/api/Play.scala
|
Scala
|
mit
| 6,802 |
/*
* Copyright 2015 Webtrends (http://www.webtrends.com)
*
* See the LICENCE.txt file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.webtrends.harness.component.spray.route
import java.io.InputStream
import java.util.concurrent.TimeUnit
import akka.actor._
import com.webtrends.harness.logging.LoggingAdapter
import spray.can.Http
import spray.http.HttpHeaders.`Content-Length`
import spray.http._
import spray.routing.RequestContext
import scala.concurrent.duration.FiniteDuration
import scala.util.control.NonFatal
case class SprayStreamResponse(stream: InputStream, maxStreamBytes: Option[Long] = None, chunkBytes: Int = 100000)
case object SendNextChunk
case object BackOffLimitExceeded
class SprayStreamingResponder(response: SprayStreamResponse, context: ActorContext, status: StatusCode) extends LoggingAdapter {
implicit val executionContext = context.dispatcher
val buffer = new Array[Byte](response.chunkBytes)
var remaining = response.maxStreamBytes.getOrElse(Long.MaxValue)
def respond(ctx: RequestContext): Unit = {
context.actorOf {
Props {
new Actor with ActorLogging {
val backoffManager = new {
var backoffMillis = 100
var attempts = 0
def reset(): Unit = {
backoffMillis = 100
attempts = 0
}
def backOff(): Unit = {
attempts += 1
backoffMillis *= 2
if (attempts > 10) {
self ! BackOffLimitExceeded
} else {
context.system.scheduler.scheduleOnce(
FiniteDuration(backoffMillis.toLong, TimeUnit.MILLISECONDS), self, SendNextChunk)
}
}
}
val headers = response.maxStreamBytes match {
case Some(size) => List(`Content-Length`(size))
case None => List()
}
ctx.responder ! ChunkedResponseStart(HttpResponse(status = status, entity = HttpEntity(""), headers = headers))
.withAck(SendNextChunk)
def receive = {
case SendNextChunk if remaining == 0L =>
close(ctx.responder)
case SendNextChunk =>
readNext() match {
case Some(-1) => // End of stream
close(ctx.responder)
case Some(0) => // Not expected, but possible depending on InputStream implementation
backoffManager.backOff()
case Some(size) =>
backoffManager.reset()
ctx.responder ! MessageChunk(buffer.slice(0, size)).withAck(SendNextChunk)
case None =>
close(ctx.responder)
}
case BackOffLimitExceeded =>
log.warning(s"Stopping response streaming due to $BackOffLimitExceeded")
close(ctx.responder)
case connectionClosed: Http.ConnectionClosed =>
log.warning(s"Stopping response streaming due to $connectionClosed")
close(ctx.responder)
}
def close(responder: ActorRef): Unit = {
responder ! ChunkedMessageEnd
try {
response.stream.close()
}
catch {
case NonFatal(nf) => log.error("Failed to close stream", nf)
}
finally {
context.stop(self)
}
}
}
}
}
}
def readNext(): Option[Int] = {
try {
var bytesRead = response.stream.read(buffer, 0, Math.min(buffer.size, remaining).toInt)
remaining -= bytesRead
Some(bytesRead)
}
catch {
case NonFatal(nf) =>
log.error("Unable to read from stream.", nf)
None
}
}
}
|
mjwallin1/wookiee-spray
|
src/main/scala/com/webtrends/harness/component/spray/route/SprayStreamingResponder.scala
|
Scala
|
apache-2.0
| 4,392 |
case class C()
object arrays2 {
def main(args: Array[String]): Unit = {
val a: Array[Array[C]] = new Array[Array[C]](2)
a(0) = new Array[C](2)
a(0)(0) = new C()
}
}
// #2422
object arrays4 {
val args = Array[String]("World")
"Hello %1$s".format(args*)
}
/*
test/files/pos/arrays2.scala:15: warning: Passing an explicit array value to a Scala varargs method is deprecated (since 2.13.0) and will result in a defensive copy; Use the more efficient non-copying ArraySeq.unsafeWrapArray or an explicit toIndexedSeq call
"Hello %1$s".format(args*)
^
one warning found
*/
// #2461
object arrays3 {
def apply[X <: AnyRef](xs : X*) : java.util.List[X] = java.util.Arrays.asList(xs*)
def apply2[X](xs : X*) : java.util.List[X] = java.util.Arrays.asList(xs*)
}
|
lampepfl/dotty
|
tests/pos/arrays2.scala
|
Scala
|
apache-2.0
| 803 |
package com.jamontes79.scala.movielist.utils
/**
* Created by alberto on 29/9/15.
*/
object StringRes {
val jsonFilename = "peliculas.json"
val jsonConfigFilename = "config.json"
val trailerFilename = "trailer.json"
}
|
jamontes79/movieList
|
src/main/scala/com/jamontes79/scala/movielist/utils/StringRes.scala
|
Scala
|
apache-2.0
| 228 |
package at.fabricate.liftdev.common
package snippet
import model.AddRating
import model.AddRatingMeta
import net.liftweb.util.CssSel
import net.liftweb.http.js.JsCmds
import net.liftweb.http.js.jquery.JqJsCmds.AppendHtml
import net.liftweb.http.SHtml
import net.liftweb.util.Helpers._
import scala.xml.NodeSeq
import scala.xml.Text
import model.BaseEntity
import model.BaseMetaEntityWithTitleAndDescription
import model.BaseEntityWithTitleAndDescription
import net.liftweb.http.js.JsCmds.SetHtml
import net.liftweb.common.Empty
trait AddRatingSnippet[T <: BaseEntityWithTitleAndDescription[T] with AddRating[T]] extends BaseEntityWithTitleAndDescriptionSnippet[T] {
var listOfRatingOptionsCSStoInt : List[(String,Int)] = List(
"rating1"->1,
"rating2"->2,
"rating3"->3,
"rating4"->4,
"rating5"->5
)
def bindNewRatingCSS(item : ItemType) : CssSel = {
def createNewItem : item.TheRating = item.TheRating.create.ratedItem(item)
listOfRatingOptionsCSStoInt.map({
case (css, value) => {
val newRating = createNewItem
newRating.rating.set(value)
"#%s [onclick]".format(css) #> SHtml.ajaxInvoke(() => {
saveAndDisplayAjaxMessages(newRating,
() => {
// update the ratings
SetHtml("rating", generateDisplayRating(item)) &
// // hide the form
SetHtml("newrating",NodeSeq.Empty )
},
errors => {
errors.map(println(_))
JsCmds.Alert("adding rating failed! " )
},"ratingMessages")
} )
}
}).reduce(_ & _)
}
def generateDisplayRating(item : ItemType) : NodeSeq = {
// if (item.accumulatedRatings == null)
// return Text("no ratings available")
item.accumulatedRatings.get match {
case 0.0 => Text("no ratings available")
case someNumber if someNumber != null => Text("%1.2f".format(someNumber))
case _ => Text("no ratings available")
}
}
// maybe save that value as a precomputed field (addidtionally with a date) into the database in case of performance issues
abstract override def asHtml(item : ItemType) : CssSel = {
// println("chaining asHtml from AddCommentSnippet")
("#rating" #> generateDisplayRating(item) &
"#newrating" #> bindNewRatingCSS(item)) &
// chain the css selectors
(super.asHtml(item))
}
}
|
Fabricate/OpenthingsImplementation
|
src/main/scala/at/fabricate/liftdev/common/snippet/AddRatingSnippet.scala
|
Scala
|
lgpl-3.0
| 2,395 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions.aggregate
import org.apache.spark.sql.catalyst.analysis.{DecimalPrecision, FunctionRegistry}
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.types._
@ExpressionDescription(
usage = "_FUNC_(expr) - Returns the mean calculated from values of a group.",
examples = """
Examples:
> SELECT _FUNC_(col) FROM VALUES (1), (2), (3) AS tab(col);
2.0
> SELECT _FUNC_(col) FROM VALUES (1), (2), (NULL) AS tab(col);
1.5
> SELECT _FUNC_(cast(v as interval)) FROM VALUES ('-1 weeks'), ('2 seconds'), (null) t(v);
-3 days -11 hours -59 minutes -59 seconds
""",
since = "1.0.0")
case class Average(child: Expression) extends DeclarativeAggregate with ImplicitCastInputTypes {
override def prettyName: String = getTagValue(FunctionRegistry.FUNC_ALIAS).getOrElse("avg")
override def children: Seq[Expression] = child :: Nil
override def inputTypes: Seq[AbstractDataType] = Seq(TypeCollection.NumericAndInterval)
override def nullable: Boolean = true
// Return data type.
override def dataType: DataType = resultType
private lazy val resultType = child.dataType match {
case DecimalType.Fixed(p, s) =>
DecimalType.bounded(p + 4, s + 4)
case interval: CalendarIntervalType => interval
case _ => DoubleType
}
private lazy val sumDataType = child.dataType match {
case _ @ DecimalType.Fixed(p, s) => DecimalType.bounded(p + 10, s)
case interval: CalendarIntervalType => interval
case _ => DoubleType
}
private lazy val sum = AttributeReference("sum", sumDataType)()
private lazy val count = AttributeReference("count", LongType)()
override lazy val aggBufferAttributes = sum :: count :: Nil
override lazy val initialValues = Seq(
/* sum = */ Literal.default(sumDataType),
/* count = */ Literal(0L)
)
override lazy val mergeExpressions = Seq(
/* sum = */ sum.left + sum.right,
/* count = */ count.left + count.right
)
// If all input are nulls, count will be 0 and we will get null after the division.
override lazy val evaluateExpression = child.dataType match {
case _: DecimalType =>
DecimalPrecision.decimalAndDecimal(sum / count.cast(DecimalType.LongDecimal)).cast(resultType)
case CalendarIntervalType =>
val newCount = If(EqualTo(count, Literal(0L)), Literal(null, LongType), count)
DivideInterval(sum.cast(resultType), newCount.cast(DoubleType))
case _ =>
sum.cast(resultType) / count.cast(resultType)
}
override lazy val updateExpressions: Seq[Expression] = Seq(
/* sum = */
Add(
sum,
coalesce(child.cast(sumDataType), Literal.default(sumDataType))),
/* count = */ If(child.isNull, count, count + 1L)
)
}
|
jkbradley/spark
|
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/Average.scala
|
Scala
|
apache-2.0
| 3,644 |
package me.yingrui.segment.word2vec.apps
import java.io.File
import java.lang.Math._
import java.nio.file.Files
import me.yingrui.segment.math.Matrix
import me.yingrui.segment.math.Matrix.randomize
import me.yingrui.segment.neural.errors.CrossEntropyLoss
import me.yingrui.segment.neural._
import me.yingrui.segment.util.SerializeHandler
import me.yingrui.segment.word2vec.{MNNSegmentViterbiClassifier, SegmentCorpus, Vocabulary}
import scala.collection.mutable.ListBuffer
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.util.Random
object MNNSegmentTrainingApp extends App {
implicit val executionContext = ExecutionContext.Implicits.global
val random = new Random(System.currentTimeMillis())
val word2VecModelFile = if (args.indexOf("--word2vec-model") >= 0) args(args.indexOf("--word2vec-model") + 1) else "vectors.dat"
val trainFile = if (args.indexOf("--train-file") >= 0) args(args.indexOf("--train-file") + 1) else "training.txt"
val saveFile = if (args.indexOf("--save-file") >= 0) args(args.indexOf("--save-file") + 1) else "segment-vector.dat"
val ngram = if (args.indexOf("-ngram") >= 0) args(args.indexOf("-ngram") + 1).toInt else 2
val maxIteration = if (args.indexOf("-iter") >= 0) args(args.indexOf("-iter") + 1).toInt else 40
val punishment = if (args.indexOf("-punishment") >= 0) args(args.indexOf("-punishment") + 1).toInt else 0
val skipSelf = if (args.indexOf("-skip-self") >= 0) args(args.indexOf("-skip-self") + 1).toBoolean else true
val taskCount = if (args.indexOf("-thread") >= 0) args(args.indexOf("-thread") + 1).toInt else Runtime.getRuntime().availableProcessors()
print("loading word2vec model...\\r")
val reader = SerializeHandler(new File(word2VecModelFile), SerializeHandler.READ_ONLY)
val vocab = Vocabulary(reader)
val word2VecModel = reader.deserialize2DArrayDouble()
assert(vocab.size == word2VecModel.length, "vocab size is not equal to word2vec model size")
val numberOfFeatures = word2VecModel(0).length
val numberOfClasses = pow(4, ngram).toInt
val networks = initializeNetworks(numberOfFeatures, numberOfClasses, vocab.size)
print("loading training corpus...\\r")
val corpus = new SegmentCorpus(word2VecModel, vocab, ngram, ngram)
val transitionProb = corpus.getLabelTransitionProb(trainFile)
val files = corpus.splitCorpus(trainFile, taskCount)
print("training...\\r")
var iteration = 0
var cost = 0D
var lastCost = Double.MaxValue
val costs = new ListBuffer[Double]()
var lastAverageCost = Double.MaxValue
var hasImprovement = true
var learningRate = 0.01D
while (shouldContinue && iteration < maxIteration && hasImprovement) {
val tic = System.currentTimeMillis()
cost = takeARound(iteration, learningRate)
val toc = System.currentTimeMillis()
costs += cost
val averageCost = costs.takeRight(5).sum / costs.takeRight(5).size.toDouble
val improvement = (lastCost - cost) / lastCost
println("Iteration: %2d learning rate: %1.7f improved: %2.5f cost: %2.5f average cost: %2.5f elapse: %ds".format(iteration, learningRate, improvement, cost, averageCost, (toc - tic) / 1000))
updateLearningRate(improvement)
hasImprovement = (lastAverageCost - averageCost) > 1e-5
lastAverageCost = averageCost
lastCost = cost
iteration += 1
}
def updateLearningRate(improvement: Double): Unit = {
if (improvement <= 0.03D)
learningRate = learningRate * 0.1
else
learningRate = learningRate * 0.9
if (learningRate < 0.000001) learningRate = 0.0000001D
}
println("testing...")
displayResult(test(trainFile))
displayResult(testSegmentCorpus(trainFile))
println("saving...")
// saveModel()
private def displayResult(result: (Double, Double)): Unit = result match {
case (errorCount, numberOfSamples) => {
val accuracy = 1.0D - errorCount / numberOfSamples
println("error = " + errorCount + " total = " + numberOfSamples)
println("accuracy = " + accuracy)
}
case _ =>
}
def testSegmentCorpus(file: String): (Double, Double) = {
var errors = 0.0
var total = 0.0
val neuralNetworks = networks.map(network => network.getNetwork)
corpus.foreachDocuments(file) { data =>
val document = corpus.convertToSegmentDataSet(data, skipSelf)
val expectedOutputs = document.map(_._3)
val inputs = splitByUnknownWords(document)
val outputs = inputs.map(input => classify(input, neuralNetworks)).flatten
assert(outputs.length == expectedOutputs.length)
for (i <- 0 until document.length) {
total += 1D
if (expectedOutputs(i) != outputs(i)) errors += 1D
}
}
(errors, total)
}
def classify(input: Seq[(Int, Matrix)], networks: Seq[NeuralNetwork]): Seq[Int] = {
if (input.forall(data => data._1 <= 0)) {
input.map(ele => 0)
} else {
val classifier = new MNNSegmentViterbiClassifier(networks, transitionProb, ngram)
val result = classifier.classify(input)
result.getBestPath
}
}
def splitByUnknownWords(document: Seq[(Int, Matrix, Int)]): Seq[Seq[(Int, Matrix)]] = {
val inputs = document.map(data => (data._1, data._2))
var start = 0
var unknownWordIndex = inputs.indexWhere(input => input._1 <= 0, start)
val result = ListBuffer[Seq[(Int, Matrix)]]()
while (start < inputs.length) {
if (unknownWordIndex < 0) {
result += inputs.slice(start, inputs.length)
start = inputs.length
} else {
if (start < unknownWordIndex) result += inputs.slice(start, unknownWordIndex)
result += inputs.slice(unknownWordIndex, unknownWordIndex + 1)
start = unknownWordIndex + 1
unknownWordIndex = inputs.indexWhere(input => input._1 <= 0, start)
}
}
result
}
private def saveModel(): Unit = {
val dumper = SerializeHandler(new File(saveFile), SerializeHandler.WRITE_ONLY)
dumper.serializeInt(networks.size)
for (network <- networks) {
network.getNetwork.save(dumper)
}
dumper.serializeMatrix(transitionProb)
dumper.close()
}
private def test(file: String): (Double, Double) = {
var errors = 0.0
var total = 0.0
corpus.foreachDocuments(file) { document =>
val wordIndexesAndLabelIndexes = corpus.getWordIndexesAndLabelIndexes(document)
for (position <- 0 until wordIndexesAndLabelIndexes.length) {
total += 1.0
val wordIndex = wordIndexesAndLabelIndexes(position)._1
val expectedOutput = corpus.getOutputMatrix(wordIndexesAndLabelIndexes, position)
val input = corpus.convertToMatrix(corpus.getContextWords(wordIndexesAndLabelIndexes, position, skipSelf))
val network = networks(wordIndex)
val output = classify(network, input)
if ((expectedOutput - output).map(abs(_)).sum > 0)
errors += 1.0D
else
0.0D
}
}
(errors, total)
}
def classify(classifier: BackPropagation, input: Matrix): Matrix = {
val actualOutput = classifier computeOutput input
var maxIndex = 0
var maxValue = 0D
for (i <- 0 until actualOutput.col) {
if (actualOutput(0, i) > maxValue) {
maxValue = actualOutput(0, i)
maxIndex = i
}
}
for (i <- 0 until actualOutput.col) {
actualOutput(0, i) = if (i == maxIndex) 1D else 0D
}
actualOutput
}
private def takeARound(currentIteration: Int, learningRate: Double): Double = {
networks.foreach(network => network.errorCalculator.clear)
val tasks = for (file <- files) yield {
Future {
def train(expectedOutput: Matrix, input: Matrix, network: BackPropagation): Unit = {
val output = network.computeOutput(input)
network.computeError(output, expectedOutput)
network.update(learningRate)
}
def trainPunishment(wordIndex: Int, input: Matrix, times: Int): Unit = {
for (index <- 0 until times;
randomWordIndex = random.nextInt(networks.size)
if randomWordIndex != wordIndex) {
train(corpus.getDefaultOutputMatrix(), input, networks(randomWordIndex))
}
}
corpus.foreachDocuments(file) { document =>
val wordIndexesAndLabelIndexes = corpus.getWordIndexesAndLabelIndexes(document)
for (position <- 0 until wordIndexesAndLabelIndexes.length) {
val wordIndex = wordIndexesAndLabelIndexes(position)._1
val expectedOutput = corpus.getOutputMatrix(wordIndexesAndLabelIndexes, position)
val input = corpus.convertToMatrix(corpus.getContextWords(wordIndexesAndLabelIndexes, position, skipSelf))
train(expectedOutput, input, networks(wordIndex))
trainPunishment(wordIndex, input, punishment)
}
}
}
}
tasks.foreach(f => Await.result(f, Duration.Inf))
val loss = networks.map(network => network.getLoss)
loss.sum
}
def shouldContinue: Boolean = {
val tmpFile = new File("stop-training.tmp")
if (Files.exists(tmpFile.toPath)) {
tmpFile.delete()
false
} else {
true
}
}
private def initializeNetworks(numberOfFeatures: Int, numberOfClasses: Int, size: Int) = {
val softmax = SoftmaxLayer(randomize(numberOfClasses, numberOfClasses, -0.0001D, 0.0001D))
for (i <- 0 until size) yield {
val loss = new CrossEntropyLoss
val network = new BackPropagation(numberOfFeatures, numberOfClasses, 0.01D, 0.3D, loss)
val layer = new BPSigmoidLayer(Matrix.randomize(numberOfFeatures, numberOfClasses, -0.0001D, 0.0001D), Matrix.randomize(1, numberOfClasses, -0.001D, 0.001D), false)
network.addLayer(layer)
network.addLayer(softmax)
network
}
}
}
|
yingrui/mahjong
|
lib-segment-apps/src/main/scala/me/yingrui/segment/word2vec/apps/MNNSegmentTrainingApp.scala
|
Scala
|
gpl-3.0
| 9,798 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package es.alvsanand.sgc.ftp.normal
import java.io.ByteArrayOutputStream
import java.nio.file.{Files, Paths}
import java.util.Date
import es.alvsanand.sgc.core.connector.SgcConnectorException
import es.alvsanand.sgc.ftp.{FTPCredentials, FTPSlot, ProxyConfiguration}
import org.apache.ftpserver.listener.ListenerFactory
import org.apache.ftpserver.usermanager.PropertiesUserManagerFactory
import org.apache.ftpserver.usermanager.impl.BaseUser
import org.apache.ftpserver.{FtpServer, FtpServerFactory}
import org.scalatest._
class FTPSgcConnectorTest extends FlatSpec with Matchers with OptionValues with Inside
with Inspectors with BeforeAndAfterAll {
private val HOST = "127.0.0.1"
private val PORT = 12021
private val TEST_USER = "test"
private val TEST_PASSWORD = "test"
private val ANON_USER = "anonymous"
private var server: FtpServer = null
private val TEST_CONFIG_FILE = "/servers/config/org.apache.ftpserver_users.properties"
private val TEST_HOME_DIR = "/servers/files"
private val TEST_SAMPLES_DIR = "samples"
private val TEST_EMPTY_DIR = "empty"
override def beforeAll(): Unit = {
val propertiesUserManagerFactory = new PropertiesUserManagerFactory()
val users = getClass.getResource(TEST_CONFIG_FILE)
propertiesUserManagerFactory.setUrl(users)
val userManager = propertiesUserManagerFactory.createUserManager()
val testUser = userManager.getUserByName(TEST_USER).asInstanceOf[BaseUser]
testUser.setHomeDirectory(getClass.getResource(TEST_HOME_DIR).getFile)
userManager.save(testUser)
val anonymousUser = userManager.getUserByName(ANON_USER).asInstanceOf[BaseUser]
anonymousUser.setHomeDirectory(getClass.getResource(TEST_HOME_DIR).getFile)
userManager.save(anonymousUser)
val serverFactory = new FtpServerFactory()
serverFactory.setUserManager(userManager);
val factory = new ListenerFactory();
factory.setPort(PORT);
serverFactory.addListener("default", factory.createListener());
server = serverFactory.createServer();
server.start();
}
override def afterAll(): Unit = {
server.stop();
}
it should "fail with obligatory parameters" in {
a[IllegalArgumentException] shouldBe thrownBy(FTPSgcConnectorFactory
.get(FTPParameters(null, 21, null, null)))
a[IllegalArgumentException] shouldBe thrownBy(FTPSgcConnectorFactory
.get(FTPParameters("host", 21, null, null)))
a[IllegalArgumentException] shouldBe thrownBy(FTPSgcConnectorFactory
.get(FTPParameters("host", 21, "directory", null)))
a[IllegalArgumentException] shouldBe thrownBy(FTPSgcConnectorFactory
.get(FTPParameters("host", 21, "directory", FTPCredentials(null))))
a[IllegalArgumentException] shouldBe thrownBy(FTPSgcConnectorFactory
.get(FTPParameters("host", 21, "directory", FTPCredentials("user"),
proxy = Option(ProxyConfiguration("")))))
}
it should "work with obligatory parameters" in {
noException should be thrownBy(
new FTPSgcConnector(FTPParameters("host", 21, "directory", FTPCredentials("user")))
)
noException should be thrownBy(
new FTPSgcConnector(FTPParameters("host", 21, "directory", FTPCredentials("user"),
proxy = Option(ProxyConfiguration("proxyHost"))))
)
}
it should "work with proxy parameters" in {
var p = Option(ProxyConfiguration("proxyHost", user = Option("user")))
var parameters = FTPParameters("host", 21, "directory", FTPCredentials("user"),
proxy = p)
noException should be thrownBy(new FTPSgcConnector(parameters))
new FTPSgcConnector(parameters).asInstanceOf[FTPSgcConnector].usesProxy() should
be(true)
p = Option(ProxyConfiguration("proxyHost", user = Option("user"), password = Option("")))
parameters = FTPParameters("host", 21, "directory", FTPCredentials("user"),
proxy = p)
noException should be thrownBy(new FTPSgcConnector(parameters))
new FTPSgcConnector(parameters).asInstanceOf[FTPSgcConnector].usesProxy() should
be(true)
}
it should "work with test user and empty/not existing directory" in {
val parameters = FTPParameters(HOST, PORT, TEST_EMPTY_DIR,
FTPCredentials(TEST_USER, Option(TEST_PASSWORD)))
val connector = new FTPSgcConnector(parameters)
a[SgcConnectorException] shouldBe thrownBy(connector.list())
}
it should "work with anonymous user and existing directory" in {
val parameters = FTPParameters(HOST, PORT, TEST_SAMPLES_DIR,
FTPCredentials(ANON_USER))
val connector = new FTPSgcConnector(parameters)
connector.list().map(_.name) should be(List[String]("sampleFile.txt", "sampleFile2.txt"))
}
it should "work with test user and existing directory" in {
val parameters = FTPParameters(HOST, PORT, TEST_SAMPLES_DIR,
FTPCredentials(TEST_USER, Option(TEST_PASSWORD)))
val connector = new FTPSgcConnector(parameters)
connector.list().map(_.name) should be(List[String]("sampleFile.txt", "sampleFile2.txt"))
}
it should "work with existing name" in {
val parameters = FTPParameters(HOST, PORT, TEST_SAMPLES_DIR,
FTPCredentials(TEST_USER, Option(TEST_PASSWORD)))
val connector = new FTPSgcConnector(parameters)
val fileName = s"sampleFile.txt"
val file = s"$TEST_HOME_DIR/$TEST_SAMPLES_DIR/$fileName"
val data = Files.readAllBytes(Paths.get(getClass.getResource(file).getFile))
val out: ByteArrayOutputStream = new ByteArrayOutputStream
connector.fetch(FTPSlot(fileName, new Date), out)
out.toByteArray should be(data)
}
it should "fail with bad name" in {
val parameters = FTPParameters(HOST, PORT, TEST_SAMPLES_DIR,
FTPCredentials(TEST_USER, Option(TEST_PASSWORD)))
val connector = new FTPSgcConnector(parameters)
val fileName = s"badSampleFile.txt"
val out: ByteArrayOutputStream = new ByteArrayOutputStream
noException should be thrownBy(connector.fetch(FTPSlot(fileName, new Date), out))
out.size() should be(0)
}
}
|
alvsanand/spark-generic-connector
|
sgc-ftp/src/test/scala/es/alvsanand/sgc/ftp/normal/FTPSgcConnectorTest.scala
|
Scala
|
apache-2.0
| 6,818 |
package com.ft.membership.http.testsupport
import org.junit.rules.{RuleChain, TestRule}
import org.junit.runner.Description
import org.junit.runners.model.Statement
class TestRules extends TestRule{
val simpleStub = new SimpleStub(9000)
val ruleChain = RuleChain.outerRule(simpleStub)
override def apply(statement: Statement, description: Description): Statement = {
ruleChain.apply(statement, description)
}
}
|
Financial-Times/scala-commons
|
src/main/test/scala/com/ft/membership/http/testsupport/TestRules.scala
|
Scala
|
mit
| 427 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.examples.localEstimator
import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.dllib.models.lenet.LeNet5
// import com.intel.analytics.bigdl.ZooClassNLLCriterion
import com.intel.analytics.bigdl.dllib.keras.objectives.ZooClassNLLCriterion
import com.intel.analytics.bigdl.dllib.optim.{Adam, Loss, Top1Accuracy}
import com.intel.analytics.bigdl.dllib.estimator.LocalEstimator
import org.slf4j.LoggerFactory
import scopt.OptionParser
case class LenetLocalEstimatorParams(imageDirPath: String = "./",
batchSize: Int = 1000,
epoch: Int = 15,
threadNum: Int = 44)
object LenetLocalEstimator {
val logger = LoggerFactory.getLogger(getClass)
def main(args: Array[String]) : Unit = {
val parser = new OptionParser[LenetLocalEstimatorParams]("LenetLocalEstimator Example") {
opt[String]('d', "imageDirPath")
.required()
.text("The directory of mnist dataset")
.action((x, c) => c.copy(imageDirPath = x))
opt[Int]('b', "batchSize")
.required()
.text("The number of batchSize")
.action((x, c) => c.copy(batchSize = x))
opt[Int]('e', "epoch")
.required()
.text("The number of epoch")
.action((x, c) => c.copy(epoch = x))
opt[Int]('t', "threadNum")
.required()
.text("The number of threadNum")
.action((x, c) => c.copy(threadNum = x))
}
parser.parse(args, LenetLocalEstimatorParams()).map { params =>
logger.info(s"params parsed as $params")
val imageDirPath = params.imageDirPath
val batchSize = params.batchSize
val epoch = params.epoch
val threadNum = params.threadNum
val model: Module[Float] = LeNet5(10)
val criterion = ZooClassNLLCriterion[Float]()
val adam = new Adam[Float]()
val validations = Array(new Top1Accuracy[Float], new Loss[Float])
val localEstimator = LocalEstimator(model, criterion, adam, validations, threadNum)
logger.info(s"LocalEstimator loaded as $localEstimator")
val trainData = MnistDataLoader.loadTrainData(imageDirPath)
val testData = MnistDataLoader.loadTestData(imageDirPath)
localEstimator.fit(trainData,
testData,
ImageProcessing.labeledGreyImageToMiniBatchTransformer,
batchSize,
epoch)
}
}
}
|
intel-analytics/BigDL
|
scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/example/localEstimator/LenetLocalEstimator.scala
|
Scala
|
apache-2.0
| 3,056 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.producer
import async._
import java.util.Properties
import kafka.serializer.Encoder
import java.util.concurrent.{ConcurrentMap, ConcurrentHashMap}
import kafka.cluster.{Partition, Broker}
import kafka.api.ProducerRequest
import kafka.common.{UnavailableProducerException, InvalidConfigException}
import kafka.utils.{Utils, Logging}
import kafka.message.{NoCompressionCodec, ByteBufferMessageSet}
class ProducerPool[V](private val config: ProducerConfig,
private val serializer: Encoder[V],
private val syncProducers: ConcurrentMap[Int, SyncProducer],
private val asyncProducers: ConcurrentMap[Int, AsyncProducer[V]],
private val inputEventHandler: EventHandler[V] = null,
private val cbkHandler: CallbackHandler[V] = null) extends Logging {
private var eventHandler = inputEventHandler
if(eventHandler == null)
eventHandler = new DefaultEventHandler(config, cbkHandler)
if(serializer == null)
throw new InvalidConfigException("serializer passed in is null!")
private var sync: Boolean = true
config.producerType match {
case "sync" =>
case "async" => sync = false
case _ => throw new InvalidConfigException("Valid values for producer.type are sync/async")
}
def this(config: ProducerConfig, serializer: Encoder[V],
eventHandler: EventHandler[V], cbkHandler: CallbackHandler[V]) =
this(config, serializer,
new ConcurrentHashMap[Int, SyncProducer](),
new ConcurrentHashMap[Int, AsyncProducer[V]](),
eventHandler, cbkHandler)
def this(config: ProducerConfig, serializer: Encoder[V]) = this(config, serializer,
new ConcurrentHashMap[Int, SyncProducer](),
new ConcurrentHashMap[Int, AsyncProducer[V]](),
Utils.getObject(config.eventHandler),
Utils.getObject(config.cbkHandler))
/**
* add a new producer, either synchronous or asynchronous, connecting
* to the specified broker
* @param bid the id of the broker
* @param host the hostname of the broker
* @param port the port of the broker
*/
def addProducer(broker: Broker) {
val props = new Properties()
props.put("host", broker.host)
props.put("port", broker.port.toString)
props.putAll(config.props)
if(sync) {
val producer = new SyncProducer(new SyncProducerConfig(props))
info("Creating sync producer for broker id = " + broker.id + " at " + broker.host + ":" + broker.port)
syncProducers.put(broker.id, producer)
} else {
val producer = new AsyncProducer[V](new AsyncProducerConfig(props),
new SyncProducer(new SyncProducerConfig(props)),
serializer,
eventHandler, config.eventHandlerProps,
cbkHandler, config.cbkHandlerProps)
producer.start
info("Creating async producer for broker id = " + broker.id + " at " + broker.host + ":" + broker.port)
asyncProducers.put(broker.id, producer)
}
}
/**
* selects either a synchronous or an asynchronous producer, for
* the specified broker id and calls the send API on the selected
* producer to publish the data to the specified broker partition
* @param poolData the producer pool request object
*/
def send(poolData: ProducerPoolData[V]*) {
val distinctBrokers = poolData.map(pd => pd.getBidPid.brokerId).distinct
var remainingRequests = poolData.toSeq
distinctBrokers.foreach { bid =>
val requestsForThisBid = remainingRequests partition (_.getBidPid.brokerId == bid)
remainingRequests = requestsForThisBid._2
if(sync) {
val producerRequests = requestsForThisBid._1.map(req => new ProducerRequest(req.getTopic, req.getBidPid.partId,
new ByteBufferMessageSet(compressionCodec = config.compressionCodec,
messages = req.getData.map(d => serializer.toMessage(d)): _*)))
debug("Fetching sync producer for broker id: " + bid)
val producer = syncProducers.get(bid)
if(producer != null) {
if(producerRequests.size > 1)
producer.multiSend(producerRequests.toArray)
else
producer.send(topic = producerRequests(0).topic,
partition = producerRequests(0).partition,
messages = producerRequests(0).messages)
config.compressionCodec match {
case NoCompressionCodec => debug("Sending message to broker " + bid)
case _ => debug("Sending compressed messages to broker " + bid)
}
}else
throw new UnavailableProducerException("Producer pool has not been initialized correctly. " +
"Sync Producer for broker " + bid + " does not exist in the pool")
}else {
debug("Fetching async producer for broker id: " + bid)
val producer = asyncProducers.get(bid)
if(producer != null) {
requestsForThisBid._1.foreach { req =>
req.getData.foreach(d => producer.send(req.getTopic, d, req.getBidPid.partId))
}
if(logger.isDebugEnabled)
config.compressionCodec match {
case NoCompressionCodec => debug("Sending message")
case _ => debug("Sending compressed messages")
}
}
else
throw new UnavailableProducerException("Producer pool has not been initialized correctly. " +
"Async Producer for broker " + bid + " does not exist in the pool")
}
}
}
/**
* Closes all the producers in the pool
*/
def close() = {
config.producerType match {
case "sync" =>
info("Closing all sync producers")
val iter = syncProducers.values.iterator
while(iter.hasNext)
iter.next.close
case "async" =>
info("Closing all async producers")
val iter = asyncProducers.values.iterator
while(iter.hasNext)
iter.next.close
}
}
/**
* This constructs and returns the request object for the producer pool
* @param topic the topic to which the data should be published
* @param bidPid the broker id and partition id
* @param data the data to be published
*/
def getProducerPoolData(topic: String, bidPid: Partition, data: Seq[V]): ProducerPoolData[V] = {
new ProducerPoolData[V](topic, bidPid, data)
}
class ProducerPoolData[V](topic: String,
bidPid: Partition,
data: Seq[V]) {
def getTopic: String = topic
def getBidPid: Partition = bidPid
def getData: Seq[V] = data
}
}
|
piavlo/operations-debs-kafka
|
core/src/main/scala/kafka/producer/ProducerPool.scala
|
Scala
|
apache-2.0
| 7,827 |
package com.catinthedark.lib
import scala.collection.mutable.ListBuffer
/**
* Created by over on 29.01.15.
*/
case class Holder(val timeout: Long, val f: () => Unit)
trait LocalDeferred extends SimpleUnit with Deferred {
private[this] var tasks = new ListBuffer[Holder]
override def defer(timeout: Float, f: () => Unit) =
tasks += Holder(System.currentTimeMillis() + (timeout * 1000).toLong, f)
override def run(delay: Float) = {
val now = System.currentTimeMillis()
//make dirty things and than
val needFilter = tasks.map(h =>
if (now > h.timeout) {
h.f()
1
}
else 0
).sum != 0
//remove executed tasks if exits
if (needFilter)
tasks = tasks.filter(h => now < h.timeout)
super.run(delay)
}
}
|
cat-in-the-dark/old48_32_game
|
src/main/scala/com/catinthedark/lib/LocalDeferred.scala
|
Scala
|
apache-2.0
| 781 |
package redis.protocol
import akka.util.ByteString
import scala.annotation.tailrec
import scala.collection.mutable
import scala.util.Try
import redis.MultiBulkConverter
sealed trait RedisReply {
def toByteString: ByteString
def asOptByteString: Option[ByteString]
}
case class Status(status: ByteString) extends RedisReply {
def toBoolean: Boolean = status == Status.okByteString
override def toString = status.utf8String
def toByteString: ByteString = status
def asOptByteString: Option[ByteString] = Some(status)
}
object Status {
val okByteString = ByteString("OK")
}
case class Error(error: ByteString) extends RedisReply {
override def toString = error.utf8String
def toByteString: ByteString = error
def asOptByteString: Option[ByteString] = Some(error)
}
case class Integer(i: ByteString) extends RedisReply {
def toLong: Long = ParseNumber.parseLong(i)
def toInt: Int = ParseNumber.parseInt(i)
def toBoolean = i == Integer.trueByteString
override def toString = i.utf8String
def toByteString: ByteString = i
def asOptByteString: Option[ByteString] = Some(i)
}
object Integer {
val trueByteString = ByteString("1")
}
case class Bulk(response: Option[ByteString]) extends RedisReply {
// looks wrong
override def toString = response.map(_.utf8String).get
def toByteString: ByteString = response.get
def toOptString: Option[String] = response.map(_.utf8String)
def asOptByteString: Option[ByteString] = response
}
case class MultiBulk(responses: Option[Vector[RedisReply]]) extends RedisReply {
def toByteString: ByteString = throw new NoSuchElementException()
def asOptByteString: Option[ByteString] = throw new NoSuchElementException()
def asTry[A](implicit convert: MultiBulkConverter[A]): Try[A] = convert.to(this)
def asOpt[A](implicit convert: MultiBulkConverter[A]): Option[A] = asTry(convert).toOption
}
case class PartialMultiBulk(i: Int, acc: mutable.Buffer[RedisReply]) extends RedisReply {
override def toByteString: ByteString = throw new NoSuchElementException()
override def asOptByteString: Option[ByteString] = throw new NoSuchElementException()
}
sealed trait DecodeResult[+A] {
def rest: ByteString
def isFullyDecoded: Boolean
def foreach[B](f: A => Unit): DecodeResult[Unit] = this match {
case p: PartiallyDecoded[A] => PartiallyDecoded(ByteString(), bs => p.f(p.rest ++ bs).foreach(f))
case fd: FullyDecoded[A] => FullyDecoded(f(fd.result), fd.rest)
}
def map[B](f: A => B): DecodeResult[B] = this match {
case p: PartiallyDecoded[A] => PartiallyDecoded(ByteString(), bs => p.f(p.rest ++ bs).map(f))
case fd: FullyDecoded[A] => FullyDecoded(f(fd.result), fd.rest)
}
def flatMap[B](f: (A, ByteString) => DecodeResult[B]): DecodeResult[B] = this match {
case p: PartiallyDecoded[A] => PartiallyDecoded(ByteString(), bs => p.f(p.rest ++ bs).flatMap(f))
case fd: FullyDecoded[A] => f(fd.result, fd.rest)
}
def run(next: ByteString): DecodeResult[A] = this match {
case p: PartiallyDecoded[A] => p.f(p.rest ++ next)
case fd: FullyDecoded[A] => FullyDecoded(fd.result, fd.rest ++ next)
}
}
case class PartiallyDecoded[A](rest: ByteString, f: ByteString => DecodeResult[A]) extends DecodeResult[A] {
override def isFullyDecoded: Boolean = false
}
case class FullyDecoded[A](result: A, rest: ByteString) extends DecodeResult[A] {
override def isFullyDecoded: Boolean = true
}
object DecodeResult {
val unit: DecodeResult[Unit] = FullyDecoded((), ByteString.empty)
}
object RedisProtocolReply {
val ERROR = '-'
val STATUS = '+'
val INTEGER = ':'
val BULK = '$'
val MULTIBULK = '*'
val LS = "\\r\\n".getBytes("UTF-8")
def decodeReply(bs: ByteString): DecodeResult[RedisReply] = {
if (bs.isEmpty) {
PartiallyDecoded(bs, decodeReply)
} else {
bs.head match {
case ERROR => decodeString(bs.tail).map(Error(_))
case INTEGER => decodeInteger(bs.tail)
case STATUS => decodeString(bs.tail).map(Status(_))
case BULK => decodeBulk(bs.tail)
case MULTIBULK => decodeMultiBulk(bs.tail)
case _ => throw new Exception("Redis Protocol error: Got " + bs.head + " as initial reply byte >>"+ bs.tail.utf8String)
}
}
}
val decodeReplyPF: PartialFunction[ByteString, DecodeResult[RedisReply]] = {
case bs if bs.head == INTEGER => decodeInteger(bs.tail)
case bs if bs.head == STATUS => decodeString(bs.tail).map(Status(_))
case bs if bs.head == BULK => decodeBulk(bs.tail)
case bs if bs.head == MULTIBULK => decodeMultiBulk(bs.tail)
}
val decodeReplyStatus: PartialFunction[ByteString, DecodeResult[Status]] = {
case bs if bs.head == STATUS => decodeString(bs.tail).map(Status(_))
}
val decodeReplyInteger: PartialFunction[ByteString, DecodeResult[Integer]] = {
case bs if bs.head == INTEGER => decodeInteger(bs.tail)
}
val decodeReplyBulk: PartialFunction[ByteString, DecodeResult[Bulk]] = {
case bs if bs.head == BULK => decodeBulk(bs.tail)
}
val decodeReplyMultiBulk: PartialFunction[ByteString, DecodeResult[MultiBulk]] = {
case bs if bs.head == MULTIBULK => decodeMultiBulk(bs.tail)
}
val decodeReplyError: PartialFunction[ByteString, DecodeResult[Error]] = {
case bs if bs.head == ERROR => decodeString(bs.tail).map(Error(_))
}
def decodeInteger(bs: ByteString): DecodeResult[Integer] = {
decodeString(bs).map { (string) => Integer(string) }
}
def decodeString(bs: ByteString): DecodeResult[ByteString] = {
val index = bs.indexOf('\\n')
if (index >= 0 && bs.length >= index + 1) {
val reply = bs.take(index + 1 - LS.length)
val tail = bs.drop(index + 1)
val r = FullyDecoded(reply, tail)
r
} else {
PartiallyDecoded(bs, decodeString)
}
}
def decodeBulk(bs: ByteString): DecodeResult[Bulk] = {
def decodeBulkBody(integer: Integer, bsRest: ByteString): DecodeResult[Bulk] = {
val i = integer.toInt
if (i < 0) {
FullyDecoded(Bulk(None), bsRest)
} else if (bsRest.length < (i + LS.length)) {
PartiallyDecoded(bsRest, decodeBulkBody(integer, _))
} else {
val data = bsRest.take(i)
FullyDecoded(Bulk(Some(data)), bsRest.drop(i).drop(LS.length))
}
}
decodeInteger(bs).flatMap(decodeBulkBody)
}
def decodeMultiBulk(bs: ByteString): DecodeResult[MultiBulk] = {
decodeInteger(bs).flatMap { (integer, bsRest) =>
val i = integer.toInt
if (i < 0) {
FullyDecoded(MultiBulk(None), bsRest)
} else if (i == 0) {
FullyDecoded(MultiBulk(Some(Vector.empty)), bsRest)
} else {
val builder = Vector.newBuilder[RedisReply]
builder.sizeHint(i)
bulks(i, builder, bsRest)
}
}
}
def bulks(i: Int, builder: mutable.Builder[RedisReply, Vector[RedisReply]], byteString: ByteString): DecodeResult[MultiBulk] = {
@tailrec
def helper(i: Int, bs: ByteString): DecodeResult[Int] = {
if (i > 0) {
val reply = decodeReply(bs)
.map { r =>
builder += r
i - 1
}
if (reply.isFullyDecoded)
helper(i - 1, reply.rest)
else
reply
} else {
FullyDecoded(0, bs)
}
}
helper(i, byteString).flatMap { (i, bs) =>
if (i > 0) {
bulks(i, builder, bs)
} else {
FullyDecoded[MultiBulk](MultiBulk(Some(builder.result())), bs)
}
}
}
}
|
xuwei-k/rediscala
|
src/main/scala/redis/protocol/RedisProtocolReply.scala
|
Scala
|
apache-2.0
| 7,487 |
package org.shapelogic.sc.polygon
/**
*
* @author Sami Badawi
*
*/
trait IPoint2D extends Comparable[IPoint2D] with Cloneable with GeometricShape2D with PointReplacable[IPoint2D] {
def setLocation(x: Double, y: Double): Unit
def minus(that: IPoint2D): IPoint2D
def add(that: IPoint2D): IPoint2D
def multiply(multiplier: Double): IPoint2D
def isNull(): Boolean
def toDoubleArray(): Array[Double]
def getX(): Double
def getY(): Double
def min(that: IPoint2D): IPoint2D
def max(that: IPoint2D): IPoint2D
def distance(that: IPoint2D): Double
def distanceFromOrigin(): Double
def round(): IPoint2D
def copy(): IPoint2D
def angle(): Double
def isOnAxis(): Boolean
def isOnDiagonal(): Boolean
def turn90(): IPoint2D
}
|
sami-badawi/shapelogic-scala
|
src/main/scala/org/shapelogic/sc/polygon/IPoint2D.scala
|
Scala
|
mit
| 769 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.utils.tf.loaders
import java.nio.ByteOrder
import com.intel.analytics.bigdl.Module
import com.intel.analytics.bigdl.dllib.nn.ops.{NotEqual => NotEqualOperation}
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.dllib.utils.tf.Context
import org.tensorflow.framework.NodeDef
import scala.reflect.ClassTag
class NotEqual extends TensorflowOpsLoader {
import Utils._
override def build[T: ClassTag](nodeDef: NodeDef, byteOrder: ByteOrder
, context: Context[T])(implicit ev: TensorNumeric[T]): Module[T] = {
new NotEqualOperation[T]()
}
}
|
intel-analytics/BigDL
|
scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/utils/tf/loaders/NotEqual.scala
|
Scala
|
apache-2.0
| 1,258 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.consumers
import monix.execution.Callback
import monix.execution.Ack.{Continue, Stop}
import monix.execution.{Ack, Cancelable, Scheduler}
import monix.execution.atomic.{Atomic, PaddingStrategy}
import monix.execution.cancelables.{AssignableCancelable, SingleAssignCancelable}
import scala.util.control.NonFatal
import monix.reactive.Consumer
import monix.reactive.internal.consumers.LoadBalanceConsumer.IndexedSubscriber
import monix.reactive.observers.Subscriber
import scala.annotation.tailrec
import scala.collection.immutable.{BitSet, Queue}
import scala.collection.mutable.ListBuffer
import scala.concurrent.{Future, Promise}
import scala.util.{Failure, Success}
/** Implementation for [[monix.reactive.Consumer.loadBalance]]. */
private[reactive] final class LoadBalanceConsumer[-In, R](parallelism: Int, consumers: Array[Consumer[In, R]])
extends Consumer[In, List[R]] {
require(parallelism > 0, s"parallelism = $parallelism, should be > 0")
require(consumers.length > 0, "consumers list must not be empty")
// NOTE: onFinish MUST BE synchronized by `self` and
// double-checked by means of `isDone`
def createSubscriber(onFinish: Callback[Throwable, List[R]], s: Scheduler): (Subscriber[In], AssignableCancelable) = {
// Assignable cancelable returned, can be used to cancel everything
// since it will be assigned the stream subscription
val mainCancelable = SingleAssignCancelable()
val balanced = new Subscriber[In] { self =>
implicit val scheduler = s
// Trying to prevent contract violations, once this turns
// true, then no final events are allowed to happen.
// MUST BE synchronized by `self`.
private[this] var isUpstreamComplete = false
// Trying to prevent contract violations. Turns true in case
// we already signaled a result upstream.
// MUST BE synchronized by `self`.
private[this] var isDownstreamDone = false
// Stores the error that was reported upstream - basically
// multiple subscribers can report multiple errors, but we
// emit the first one, so in case multiple errors happen we
// want to log them, but only if they aren't the same reference
// MUST BE synchronized by `self`
private[this] var reportedError: Throwable = _
// Results accumulator - when length == parallelism,
// that's when we need to trigger `onFinish.onSuccess`.
// MUST BE synchronized by `self`
private[this] val accumulator = ListBuffer.empty[R]
/** Builds cancelables for subscribers. */
private def newCancelableFor(out: IndexedSubscriber[In]): Cancelable =
new Cancelable {
private[this] var isCanceled = false
// Forcing an asynchronous boundary, to avoid any possible
// initialization issues (in building subscribersQueue) or
// stack overflows and other problems
def cancel(): Unit = scheduler.executeAsync { () =>
// We are required to synchronize, because we need to
// make sure that subscribersQueue is fully created before
// triggering any cancellation!
self.synchronized {
// Guards the idempotency contract of cancel(); not really
// required, because `deactivate()` should be idempotent, but
// since we are doing an expensive synchronize, we might as well
if (!isCanceled) {
isCanceled = true
interruptOne(out, null)
}
}
}
}
// Asynchronous queue that serves idle subscribers waiting
// for something to process, or that puts the stream on wait
// until there are subscribers available
private[this] val subscribersQueue = self.synchronized {
var initial = Queue.empty[IndexedSubscriber[In]]
// When the callback gets called by each subscriber, on success we
// do nothing because for normal completion we are listing on
// `Stop` events from onNext, but on failure we deactivate all.
val callback = new Callback[Throwable, R] {
def onSuccess(value: R): Unit =
accumulate(value)
def onError(ex: Throwable): Unit =
interruptAll(ex)
}
val arrLen = consumers.length
var i = 0
while (i < parallelism) {
val (out, c) = consumers(i % arrLen).createSubscriber(callback, s)
val indexed = IndexedSubscriber(i, out)
// Every created subscriber has the opportunity to cancel the
// main subscription if needed, cancellation thus happening globally
c := newCancelableFor(indexed)
initial = initial.enqueue(indexed)
i += 1
}
new LoadBalanceConsumer.AsyncQueue(initial, parallelism)
}
def onNext(elem: In): Future[Ack] = {
// Declares a stop event, completing the callback
def stop(): Ack = self.synchronized {
// Protecting against contract violations
isUpstreamComplete = true
Stop
}
// Are there subscribers available?
val sf = subscribersQueue.poll()
// Doing a little optimization to prevent one async boundary
sf.value match {
case Some(Success(subscriber)) =>
// As a matter of protocol, if null values happen, then
// this means that all subscribers have been deactivated and so
// we should cancel the streaming.
if (subscriber == null) stop()
else {
signalNext(subscriber, elem)
Continue
}
case _ =>
sf.map {
case null => stop()
case subscriber =>
signalNext(subscriber, elem)
Continue
}
}
}
/** Triggered whenever the subscribers are finishing with onSuccess */
private def accumulate(value: R): Unit = self.synchronized {
if (!isDownstreamDone) {
accumulator += value
if (accumulator.length == parallelism) {
isDownstreamDone = true
onFinish.onSuccess(accumulator.toList)
// GC relief
accumulator.clear()
}
}
}
/** Triggered whenever we need to signal an `onError` upstream */
private def reportErrorUpstream(ex: Throwable) = self.synchronized {
if (isDownstreamDone) {
// We only report errors that we haven't
// reported to upstream by means of `onError`!
if (reportedError != ex)
scheduler.reportFailure(ex)
} else {
isDownstreamDone = true
reportedError = ex
onFinish.onError(ex)
// GC relief
accumulator.clear()
}
}
/** Called whenever a subscriber stops its subscription, or
* when an error gets thrown.
*/
private def interruptOne(out: IndexedSubscriber[In], ex: Throwable): Unit = {
// Deactivating the subscriber. In case all subscribers
// have been deactivated, then we are done
if (subscribersQueue.deactivate(out))
interruptAll(ex)
}
/** When Stop or error is received, this makes sure the
* streaming gets interrupted!
*/
private def interruptAll(ex: Throwable): Unit = self.synchronized {
// All the following operations are idempotent!
isUpstreamComplete = true
mainCancelable.cancel()
subscribersQueue.deactivateAll()
// Is this an error to signal?
if (ex != null) reportErrorUpstream(ex)
}
/** Given a subscriber, signals the given element, then return
* the subscriber to the queue if possible.
*/
private def signalNext(out: IndexedSubscriber[In], elem: In): Unit = {
// We are forcing an asynchronous boundary here, since we
// don't want to block the main thread!
scheduler.executeAsync { () =>
try out.out.onNext(elem).syncOnComplete {
case Success(ack) =>
ack match {
case Continue =>
// We have permission to continue from this subscriber
// so returning it to the queue, to be reused
subscribersQueue.offer(out)
case Stop =>
interruptOne(out, null)
}
case Failure(ex) =>
interruptAll(ex)
} catch {
case ex if NonFatal(ex) =>
interruptAll(ex)
}
}
}
def onComplete(): Unit =
signalComplete(null)
def onError(ex: Throwable): Unit =
signalComplete(ex)
private def signalComplete(ex: Throwable): Unit = {
def loop(activeCount: Int): Future[Unit] = {
// If we no longer have active subscribers to
// push events into, then the loop is finished
if (activeCount <= 0)
Future.successful(())
else
subscribersQueue.poll().flatMap {
// By protocol, if a null happens, then there are
// no more active subscribers available
case null =>
Future.successful(())
case subscriber =>
try {
if (ex == null) subscriber.out.onComplete()
else subscriber.out.onError(ex)
} catch {
case err if NonFatal(err) => s.reportFailure(err)
}
if (activeCount > 0) loop(activeCount - 1)
else Future.successful(())
}
}
self.synchronized {
// Protecting against contract violations.
if (!isUpstreamComplete) {
isUpstreamComplete = true
// Starting the loop
loop(subscribersQueue.activeCount).onComplete {
case Success(()) =>
if (ex != null) reportErrorUpstream(ex)
case Failure(err) =>
reportErrorUpstream(err)
}
} else if (ex != null) {
reportErrorUpstream(ex)
}
}
}
}
(balanced, mainCancelable)
}
}
private[reactive] object LoadBalanceConsumer {
/** Wraps a subscriber implementation into one
* that exposes an ID.
*/
private[reactive] final case class IndexedSubscriber[-In](id: Int, out: Subscriber[In])
private final class AsyncQueue[In](initialQueue: Queue[IndexedSubscriber[In]], parallelism: Int) {
private[this] val stateRef = {
val initial: State[In] = Available(initialQueue, BitSet.empty, parallelism)
Atomic.withPadding(initial, PaddingStrategy.LeftRight256)
}
def activeCount: Int =
stateRef.get().activeCount
@tailrec
def offer(value: IndexedSubscriber[In]): Unit =
stateRef.get() match {
case current @ Available(queue, canceledIDs, ac) =>
if (ac > 0 && !canceledIDs(value.id)) {
val update = Available(queue.enqueue(value), canceledIDs, ac)
if (!stateRef.compareAndSet(current, update))
offer(value)
}
case current @ Waiting(promise, canceledIDs, ac) =>
if (!canceledIDs(value.id)) {
val update = Available[In](Queue.empty, canceledIDs, ac)
if (!stateRef.compareAndSet(current, update))
offer(value)
else {
promise.success(value)
()
}
}
}
@tailrec
def poll(): Future[IndexedSubscriber[In]] =
stateRef.get() match {
case current @ Available(queue, canceledIDs, ac) =>
if (ac <= 0)
Future.successful(null)
else if (queue.isEmpty) {
val p = Promise[IndexedSubscriber[In]]()
val update = Waiting(p, canceledIDs, ac)
if (!stateRef.compareAndSet(current, update))
poll()
else
p.future
} else {
val (ref, newQueue) = queue.dequeue
val update = Available(newQueue, canceledIDs, ac)
if (!stateRef.compareAndSet(current, update))
poll()
else
Future.successful(ref)
}
case Waiting(_, _, _) =>
Future.failed(new IllegalStateException("waiting in poll()"))
}
@tailrec
def deactivateAll(): Unit =
stateRef.get() match {
case current @ Available(_, canceledIDs, _) =>
val update: State[In] = Available(Queue.empty, canceledIDs, 0)
if (!stateRef.compareAndSet(current, update))
deactivateAll()
case current @ Waiting(promise, canceledIDs, _) =>
val update: State[In] = Available(Queue.empty, canceledIDs, 0)
if (!stateRef.compareAndSet(current, update))
deactivateAll()
else {
promise.success(null)
()
}
}
@tailrec
def deactivate(ref: IndexedSubscriber[In]): Boolean =
stateRef.get() match {
case current @ Available(queue, canceledIDs, count) =>
if (count <= 0) true
else {
val update =
if (canceledIDs(ref.id)) current
else {
val newQueue = queue.filterNot(_.id == ref.id)
Available(newQueue, canceledIDs + ref.id, count - 1)
}
if (update.activeCount == current.activeCount)
false // nothing to update
else if (!stateRef.compareAndSet(current, update))
deactivate(ref) // retry
else
update.activeCount == 0
}
case current @ Waiting(promise, canceledIDs, count) =>
if (canceledIDs(ref.id)) count <= 0
else {
val update =
if (count - 1 > 0) Waiting(promise, canceledIDs + ref.id, count - 1)
else Available[In](Queue.empty, canceledIDs + ref.id, 0)
if (!stateRef.compareAndSet(current, update))
deactivate(ref) // retry
else if (update.activeCount <= 0) {
promise.success(null)
true
} else
false
}
}
}
private[reactive] sealed trait State[In] {
def activeCount: Int
def canceledIDs: Set[Int]
}
private[reactive] final case class Available[In](
available: Queue[IndexedSubscriber[In]],
canceledIDs: BitSet,
activeCount: Int)
extends State[In]
private[reactive] final case class Waiting[In](
promise: Promise[IndexedSubscriber[In]],
canceledIDs: BitSet,
activeCount: Int)
extends State[In]
}
|
alexandru/monifu
|
monix-reactive/shared/src/main/scala/monix/reactive/internal/consumers/LoadBalanceConsumer.scala
|
Scala
|
apache-2.0
| 15,482 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.mxnet.annotation
import java.lang.annotation.{ElementType, Retention, Target, _}
@Retention(RetentionPolicy.RUNTIME)
@Target(Array(ElementType.TYPE, ElementType.FIELD, ElementType.METHOD, ElementType.PARAMETER,
ElementType.CONSTRUCTOR, ElementType.LOCAL_VARIABLE, ElementType.PACKAGE))
class Experimental {}
|
indhub/mxnet
|
scala-package/core/src/main/scala/org/apache/mxnet/annotation/Experimental.scala
|
Scala
|
apache-2.0
| 1,133 |
// Copyright (c) 2016 Yuichiroh Matsubayashi
package yuima.nuimo.handler
import yuima.nuimo.Nuimode
import yuima.nuimo.action.{Key, KeyCode, KeyCodes}
object LightroomHandler extends DefaultHandler {
override val leftRotationSensitivity: Int = 20
override val rightRotationSensitivity: Int = 20
override val actionSpeed = 1
val sliderDelta = 5
val largeDelta = 20
override def onSwipeLeft(client: Nuimode, uuid: String): Unit = {
KeyCode(Key.LeftArrow).runScript()
}
override def onSwipeRight(client: Nuimode, uuid: String): Unit = {
KeyCode(Key.RightArrow).runScript()
}
override def onSwipeUp(client: Nuimode, uuid: String): Unit = {
KeyCode(Key.Comma).runScript()
}
override def onSwipeDown(client: Nuimode, uuid: String): Unit = {
KeyCode(Key.Period).runScript()
}
override def onRotateLeft(client: Nuimode, uuid: String, velocity: Int): Unit = {
moveSlider(math.abs(velocity), KeyCode(Key.Minus))
}
def moveSlider(velocity: Int, key: KeyCode): Unit = {
println(velocity)
val large = velocity / largeDelta
val normal = velocity / sliderDelta
val small = velocity % sliderDelta
if (large > 0) KeyCodes(Seq.fill(large)(key.withShift)).runScript()
if (normal > 0) KeyCodes(Seq.fill(normal)(key)).runScript()
if (small > 0) KeyCodes(Seq.fill(small)(key.withOpt)).runScript()
}
override def onPressRotateLeft(client: Nuimode, uuid: String, velocity: Int): Unit = {
KeyCodes(Seq.fill(math.abs(velocity / 2))(KeyCode(Key.Minus).withShift)).runScript()
}
override def onRotateRight(client: Nuimode, uuid: String, velocity: Int): Unit = {
moveSlider(velocity, KeyCode(Key.Equal))
}
override def onPressRotateRight(client: Nuimode, uuid: String, velocity: Int): Unit = {
KeyCodes(Seq.fill(velocity / 2)(KeyCode(Key.Equal).withShift)).runScript()
}
override def onRelease(client: Nuimode, uuid: String, clickCount: Int): Unit = {
KeyCode(Key.Period).runScript()
}
}
|
Yuichiroh/nuimo-manager
|
src/main/scala/yuima/nuimo/handler/LightroomHandler.scala
|
Scala
|
mit
| 1,987 |
package mesosphere.marathon
package api
import javax.servlet.http.HttpServletRequest
import javax.ws.rs._
import javax.ws.rs.core.{ Context, MediaType, Response }
import akka.actor.ActorSystem
import ch.qos.logback.classic.{ Level, Logger, LoggerContext }
import com.google.inject.Inject
import com.typesafe.config.{ Config, ConfigRenderOptions }
import com.typesafe.scalalogging.StrictLogging
import com.wix.accord.Validator
import mesosphere.marathon.metrics.Metrics
import mesosphere.marathon.plugin.auth.AuthorizedResource.{ SystemConfig, SystemMetrics }
import mesosphere.marathon.plugin.auth.{ Authenticator, Authorizer, UpdateResource, ViewResource }
import mesosphere.marathon.raml.{ LoggerChange, Raml }
import mesosphere.marathon.raml.MetricsConversion._
import org.slf4j.LoggerFactory
import play.api.libs.json.Json
import stream.Implicits._
import com.wix.accord.dsl._
import scala.concurrent.duration._
/**
* System Resource gives access to system level functionality.
* All system resources can be protected via ACLs.
*/
@Path("")
@Consumes(Array(MediaType.APPLICATION_JSON))
@Produces(Array(MarathonMediaType.PREFERRED_APPLICATION_JSON))
class SystemResource @Inject() (val config: MarathonConf, cfg: Config)(implicit
val authenticator: Authenticator,
val authorizer: Authorizer,
actorSystem: ActorSystem) extends RestResource with AuthResource with StrictLogging {
@GET
@Path("ping")
def ping(): Response = ok("pong")
@GET
@Path("metrics")
def metrics(@Context req: HttpServletRequest): Response = authenticated(req) { implicit identity =>
withAuthorization(ViewResource, SystemMetrics){
ok(jsonString(Raml.toRaml(Metrics.snapshot())))
}
}
@GET
@Path("config")
def config(@Context req: HttpServletRequest): Response = authenticated(req) { implicit identity =>
withAuthorization(ViewResource, SystemConfig) {
ok(cfg.root().render(ConfigRenderOptions.defaults().setJson(true)))
}
}
@GET
@Path("logging")
def showLoggers(@Context req: HttpServletRequest): Response = authenticated(req) { implicit identity =>
withAuthorization(ViewResource, SystemConfig) {
LoggerFactory.getILoggerFactory match {
case lc: LoggerContext =>
ok(lc.getLoggerList.map { logger =>
logger.getName -> Option(logger.getLevel).map(_.levelStr).getOrElse(logger.getEffectiveLevel.levelStr + " (inherited)")
}.toMap)
}
}
}
@POST
@Path("logging")
def changeLogger(body: Array[Byte], @Context req: HttpServletRequest): Response = authenticated(req) { implicit identity =>
withAuthorization(UpdateResource, SystemConfig) {
withValid(Json.parse(body).as[LoggerChange]) { change =>
LoggerFactory.getILoggerFactory.getLogger(change.logger) match {
case logger: Logger =>
val level = Level.valueOf(change.level.value.toUpperCase)
// current level can be null, which means: use the parent level
// the current level should be preserved, no matter what the effective level is
val currentLevel = logger.getLevel
val currentEffectiveLevel = logger.getEffectiveLevel
logger.info(s"Set logger ${logger.getName} to $level current: $currentEffectiveLevel")
logger.setLevel(level)
// if a duration is given, we schedule a timer to reset to the current level
import mesosphere.marathon.core.async.ExecutionContexts.global
change.durationSeconds.foreach(duration => actorSystem.scheduler.scheduleOnce(duration.seconds, new Runnable {
override def run(): Unit = {
logger.info(s"Duration expired. Reset Logger ${logger.getName} back to $currentEffectiveLevel")
logger.setLevel(currentLevel)
}
}))
ok(change)
}
}
}
}
implicit lazy val validLoggerChange: Validator[LoggerChange] = validator[LoggerChange] { change =>
change.logger is notEmpty
}
}
|
natemurthy/marathon
|
src/main/scala/mesosphere/marathon/api/SystemResource.scala
|
Scala
|
apache-2.0
| 4,021 |
package PACKAGE_UI.CLASS_NAME_UNDERSCORED
import scala.util.Try
import android.app.ActionBar
import android.app.Fragment
import android.content.DialogInterface
import android.content.Intent
import android.os.Bundle
import android.view.LayoutInflater
import android.view.View
import android.view.ViewGroup
import android.widget.Button
import android.widget.LinearLayout
import android.widget.TextView
import android.widget.FrameLayout
import org.scaloid.common._
import com.google.gson.Gson
IMPORT_EDIT_FRAGMENT_FIELDS_DEPENDENCIES
import PACKAGE_TYPEDRESOURCE_IMPLICITS
import PACKAGE_TR
import PACKAGE_R
import PACKAGE_MODELS.CLASS_NAME_AS_IS
IMPORT_OTHER_MODELS
IMPORT_OTHER_MODELS_UI_NO_ARRAYS
object EditCLASS_NAME_AS_ISFragment {
val BUNDLE_MODEL_JSON = "model_json"
val BUNDLE_CREATE_NEW = "create_new"
FRAGMENT_EDIT_ACTIVITIES_REQUEST_MODELS_ID
def newInstance(model: CLASS_NAME_AS_IS): EditCLASS_NAME_AS_ISFragment = {
val arguments = new Bundle()
arguments.putString(BUNDLE_MODEL_JSON, new Gson().toJson(model))
val fragment = new EditCLASS_NAME_AS_ISFragment()
fragment.setArguments(arguments)
fragment
}
}
class EditCLASS_NAME_AS_ISFragment extends Fragment {
var mModel: CLASS_NAME_AS_IS = _
FRAGMENT_EDIT_FIELDS
FRAGMENT_EDIT_OTHER_MODELS_FIELDS
private val mActionBarListener = (view: View) => {
view.getId() match {
case R.id.action_cancel => {
getActivity().setResult(EditCLASS_NAME_AS_ISActivity.RESULT_NOTHING_CHANGED)
getActivity().finish()
}
case R.id.action_done => {
val finalCLASS_NAME_AS_IS = new CLASS_NAME_AS_IS(
FRAGMENT_EDIT_VIEW_GET_FIELDS
)
val data = new Intent()
data.putExtra(EditCLASS_NAME_AS_ISFragment.BUNDLE_MODEL_JSON, new Gson().toJson(finalCLASS_NAME_AS_IS))
getActivity().setResult(EditCLASS_NAME_AS_ISActivity.RESULT_EDIT_OCCURRED, data)
getActivity().finish()
}
}
}
override def onCreate(bundle: Bundle): Unit = {
super.onCreate(bundle)
if (getArguments() != null) {
if (getArguments().getBoolean(EditCLASS_NAME_AS_ISFragment.BUNDLE_CREATE_NEW))
{
mModel = new CLASS_NAME_AS_IS(
RANDOM_DATA_COMMA_SEPARATED
)
}
else
{
val json = getArguments().getString(EditCLASS_NAME_AS_ISFragment.BUNDLE_MODEL_JSON)
mModel = new Gson().fromJson(json, classOf[CLASS_NAME_AS_IS])
}
}
}
override def onCreateView(inflater: LayoutInflater, container: ViewGroup, savedInstanceState: Bundle): View = {
val actionBarButtons = inflater.inflate(TR.layout.actionbar_edit_cancel_done, new LinearLayout(getActivity()), false)
val cancelActionView = actionBarButtons.findView(TR.action_cancel)
cancelActionView.setOnClickListener(mActionBarListener)
val doneActionView = actionBarButtons.findView(TR.action_done)
doneActionView.setOnClickListener(mActionBarListener)
getActivity().getActionBar().setCustomView(actionBarButtons)
getActivity().getActionBar().setDisplayOptions(
ActionBar.DISPLAY_SHOW_CUSTOM,
ActionBar.DISPLAY_HOME_AS_UP | ActionBar.DISPLAY_SHOW_HOME | ActionBar.DISPLAY_SHOW_TITLE | ActionBar.DISPLAY_SHOW_CUSTOM)
val view = inflater.inflate(TR.layout.fragment_CLASS_NAME_UNDERSCORED, container, false)
val CLASS_NAME_UNCAPITALIZEDView = inflater.inflate(TR.layout.fragment_edit_CLASS_NAME_UNDERSCORED, container, false)
val CLASS_NAME_UNCAPITALIZEDFrameLayout = view.findView(TR.CLASS_NAME_UNDERSCORED_container)
CLASS_NAME_UNCAPITALIZEDFrameLayout.addView(CLASS_NAME_UNCAPITALIZEDView)
FRAGMENT_EDIT_ASSIGN_FIELDS
FRAGMENT_EDIT_ASSIGN_FIELDS_OTHER_MODELS
if (mModel != null)
{
FRAGMENT_EDIT_VIEW_SET_FIELDS }
return view
}
FRAGMENT_EDIT_DISPLAY_OTHER_MODELS_FIELDS
FRAGMENT_EDIT_ON_ACTIVITY_RESULT_IF_NEEDED
override def onDestroyView(): Unit = {
super.onDestroyView()
getActivity().getActionBar().setCustomView(null)
}
}
|
luismfonseca/agile-scala-android
|
src/main/resources/scaffold/main/scala/PACKAGE_NAME_AS_DIR/ui/CLASS_NAME_UNDERSCORED/EditCLASS_NAME_AS_ISFragment.scala
|
Scala
|
mit
| 4,119 |
package org.genivi.sota.resolver.test
import akka.http.scaladsl.model.StatusCodes
import eu.timepit.refined.api.Refined
import io.circe.generic.auto._
import org.genivi.sota.data.Device.{DeviceId, DeviceName}
import org.genivi.sota.data.{Device, DeviceT, PackageId, Uuid}
import org.genivi.sota.marshalling.CirceMarshallingSupport._
import org.genivi.sota.rest.{ErrorCodes, ErrorRepresentation}
import org.scalatest.concurrent.ScalaFutures
import Device._
import cats.syntax.show._
import scala.concurrent.Future
/**
* Spec for testing Resolver REST actions
*/
class ResolveResourceSpec extends ResourceWordSpec with ScalaFutures {
val pkgName = "resolvePkg"
lazy val testDevices: Seq[(DeviceT, Uuid)] = {
Future.sequence {
(0 to 4).map { i =>
val d = DeviceT(DeviceName(s"Name $i"),
Some(DeviceId(s"${i}0RES0LVEV1N12345")))
deviceRegistry.createDevice(d).map((d, _))
}
}.futureValue
}
"Resolve resource" should {
"return all VINs if the filter is trivially true" in {
val deviceIds = testDevices.map(_._2)
// Add a package.
addPackageOK("resolvePkg", "0.0.1", None, None)
// Add a trival filter that lets all vins through.
addFilterOK("truefilter", "TRUE")
addPackageFilterOK(pkgName, "0.0.1", "truefilter")
resolveOK(pkgName, "0.0.1", deviceIds)
}
"support filtering by VIN" in {
// Add another filter.
addPackageOK("resolvePkg", "0.0.1", None, None)
addFilterOK("0xfilter", s"""vin_matches "^00.*" OR vin_matches "^01.*"""")
addPackageFilterOK(pkgName, "0.0.1", "0xfilter")
val deviceIds = testDevices
.map(e => (e._1.deviceId.get, e._2))
.filter(e => e._1.show.startsWith("00") || e._1.show.startsWith("01"))
.map(_._2)
resolveOK(pkgName, "0.0.1", deviceIds)
}
//noinspection ZeroIndexToHead
"support filtering by installed packages on VIN" in {
// Delete the previous filter and add another one which uses
// has_package instead.
deletePackageFilterOK(pkgName, "0.0.1", "0xfilter")
addPackageOK("apa", "1.0.0", None, None)
addPackageOK("bepa", "1.0.0", None, None)
val (_, id0) = testDevices(0)
val (_, id1) = testDevices(1)
val (_, id2) = testDevices(2)
installPackageOK(id0, "apa", "1.0.0")
installPackageOK(id1, "apa", "1.0.0")
installPackageOK(id2, "bepa", "1.0.0")
addFilterOK("1xfilter", s"""has_package "^a.*" "1.*"""")
addPackageFilterOK(pkgName, "0.0.1", "1xfilter")
resolveOK(pkgName, "0.0.1", List(id0, id1))
}
//noinspection ZeroIndexToHead
"support filtering by hardware components on VIN" in {
// Delete the previous filter and add another one which uses
// has_component instead.
val (_, id0) = testDevices(0)
val (_, id1) = testDevices(1)
val (_, id2) = testDevices(2)
deletePackageFilterOK(pkgName, "0.0.1", "1xfilter")
addComponentOK(Refined.unsafeApply("jobby0"), "nice")
addComponentOK(Refined.unsafeApply("jobby1"), "nice")
installComponentOK(id0, Refined.unsafeApply("jobby0"))
installComponentOK(id1, Refined.unsafeApply("jobby0"))
installComponentOK(id2, Refined.unsafeApply("jobby1"))
addFilterOK("components", s"""has_component "^.*y0"""")
addPackageFilterOK(pkgName, "0.0.1", "components")
resolveOK(pkgName, "0.0.1", List(id0, id1))
}
"return no VINs if the filter is trivially false" in {
// Add trivially false filter.
addFilterOK("falsefilter", "FALSE")
addPackageFilterOK(pkgName, "0.0.1", "falsefilter")
resolveOK(pkgName, "0.0.1", List())
}
"fail if a non-existing package name is given" in {
resolve(defaultNs, "resolvePkg2", "0.0.1") ~> route ~> check {
status shouldBe StatusCodes.NotFound
responseAs[ErrorRepresentation].code shouldBe ErrorCodes.MissingEntity
}
}
"fail if a non-existing package version is given" in {
resolve(defaultNs, pkgName, "0.0.2") ~> route ~> check {
status shouldBe StatusCodes.NotFound
responseAs[ErrorRepresentation].code shouldBe ErrorCodes.MissingEntity
}
}
//noinspection ZeroIndexToHead
"return a string that the core server can parse" in {
val (_, id0) = testDevices(0)
val (_, id1) = testDevices(1)
deletePackageFilter(pkgName, "0.0.1", "falseFilter") ~> route ~> check {
status shouldBe StatusCodes.OK
resolve(defaultNs, pkgName, "0.0.1") ~> route ~> check {
status shouldBe StatusCodes.OK
responseAs[Map[Uuid, Set[PackageId]]] shouldBe
Map(id0 ->
Set(PackageId(Refined.unsafeApply(pkgName), Refined.unsafeApply("0.0.1"))),
id1 ->
Set(PackageId(Refined.unsafeApply(pkgName), Refined.unsafeApply("0.0.1"))))
}
}
}
}
}
|
PDXostc/rvi_sota_server
|
external-resolver/src/test/scala/org/genivi/sota/resolver/test/ResolveResourceSpec.scala
|
Scala
|
mpl-2.0
| 4,941 |
package collins.util
import java.security.SecureRandom
import java.security.Security
import scala.util.Random
import org.bouncycastle.crypto.PBEParametersGenerator
import org.bouncycastle.crypto.digests.SHA256Digest
import org.bouncycastle.crypto.engines.AESEngine
import org.bouncycastle.crypto.generators.PKCS12ParametersGenerator
import org.bouncycastle.crypto.modes.CBCBlockCipher
import org.bouncycastle.crypto.paddings.PKCS7Padding
import org.bouncycastle.crypto.paddings.PaddedBufferedBlockCipher
import org.bouncycastle.crypto.params.ParametersWithIV
import org.bouncycastle.jce.provider.BouncyCastleProvider
import play.api.Play
import javax.crypto.Cipher
import javax.crypto.SecretKeyFactory
import javax.crypto.spec.PBEKeySpec
import javax.crypto.spec.PBEParameterSpec
trait CryptoAccessor {
def getCryptoKey(): String
}
object CryptoCodec {
def apply(key: String) = new CryptoCodec(key)
private val allowedChars = Vector(
('a' to 'z'),
('A' to 'Z'),
('0' to '9')).flatten
private val allowedCharsSz = allowedChars.length
def randomString(length: Int = 12): String = {
val chars = for (i <- 0 until length) yield allowedChars(Random.nextInt(allowedCharsSz))
chars.mkString
}
def withKeyFromFramework = new CryptoCodec(getCryptoKeyFromFramework)
protected def getCryptoKeyFromFramework(): String = {
Play.maybeApplication.map { app =>
app.global match {
case c: CryptoAccessor => c.getCryptoKey()
case _ => throw new RuntimeException("Application global settings is not a CryptoAccessor")
}
}.getOrElse(throw new RuntimeException("Not in application context"))
}
}
class CryptoCodec(privateKey: String, saltSize: Int = 8, iterations: Int = 100) {
private val secretKey = privateKey.toArray
protected def combiner(values: String*) = values.mkString(":")
protected def createSalt(size: Int): Array[Byte] = {
val salt = new Array[Byte](size)
val saltGen = SecureRandom.getInstance("SHA1PRNG")
saltGen.nextBytes(salt)
salt
}
object Decode {
def toUsernamePassword(value: String): Option[(String, String)] = {
apply(value).flatMap { decoded =>
decoded.split(":", 2).toList match {
case username :: password :: Nil =>
Some((username, password))
case _ => None
}
}
}
protected def splitWithSalt(value: String): (Array[Byte], Array[Byte]) = {
val hex = Hex.fromHexString(value)
val splitAt = hex.length - saltSize
hex.splitAt(splitAt)
}
def apply(value: String): Option[String] = {
try {
val (cipher, salt) = splitWithSalt(value)
val pGen = new PKCS12ParametersGenerator(new SHA256Digest())
val pkcs12PasswordBytes = PBEParametersGenerator.PKCS12PasswordToBytes(secretKey)
pGen.init(pkcs12PasswordBytes, salt, iterations)
val aesCBC = new CBCBlockCipher(new AESEngine())
val aesCBCParams = pGen.generateDerivedParameters(256, 128).asInstanceOf[ParametersWithIV]
aesCBC.init(false, aesCBCParams)
val aesCipher = new PaddedBufferedBlockCipher(aesCBC, new PKCS7Padding())
val plainTemp = new Array[Byte](aesCipher.getOutputSize(cipher.length))
val offset = aesCipher.processBytes(cipher, 0, cipher.length, plainTemp, 0)
val last = aesCipher.doFinal(plainTemp, offset)
val plain = new Array[Byte](offset + last)
Array.copy(plainTemp, 0, plain, 0, plain.length)
Some(new String(plain))
} catch {
case _: Throwable => None
}
}
}
object Encode {
private val encodeType = "PBEWithSHA256And256BitAES-CBC-BC"
def apply(values: String*): String = {
apply(combiner(values: _*).getBytes)
}
def apply(value: Array[Byte]): String = {
val salt = createSalt(saltSize)
Security.addProvider(new BouncyCastleProvider())
val pbeParamSpec = new PBEParameterSpec(salt, iterations)
val pbeKeySpec = new PBEKeySpec(secretKey)
val keyFac = SecretKeyFactory.getInstance(encodeType)
val pbeKey = keyFac.generateSecret(pbeKeySpec)
val encryptionCipher = Cipher.getInstance(encodeType)
encryptionCipher.init(Cipher.ENCRYPT_MODE, pbeKey, pbeParamSpec)
Hex.toHexString(Array(encryptionCipher.doFinal(value), salt).flatten)
}
}
}
object Hex {
import org.apache.commons.codec.binary.Hex
def hex = new Hex()
def toHexString(value: Array[Byte]): String = {
new String(hex.encode(value))
}
def fromHexString(value: String): Array[Byte] = {
hex.decode(value.getBytes("UTF-8"))
}
}
|
discordianfish/collins
|
app/collins/util/Codec.scala
|
Scala
|
apache-2.0
| 4,622 |
package name.denyago.yasc.config
/**
* Configuration for http.Service
*/
case class HttpServiceConf(host: String, port: Int)
|
denyago/yet-another-simple-chat
|
src/main/scala/name/denyago/yasc/config/HttpServiceConf.scala
|
Scala
|
mit
| 130 |
package dotty.tools.dotc
package ast
object PluggableTransformers {
/*
import Trees._, Contexts._
abstract class PluggableTransformer[T] extends TreeTransformer[T, Context] {
type PluginOp[-N <: Tree[T]] = N => Tree[T]
private[this] var _ctx: Context = _
private[this] var _oldTree: Tree[T] = _
protected implicit def ctx: Context = _ctx
protected def oldTree: Tree[T] = _oldTree
protected def thisTransformer: PluggableTransformer[T] = this
class PluginOps[-N <: Tree[T]](op: PluginOp[N], val next: Plugins) {
def apply(tree: N, old: Tree[T], c: Context): Tree[T] = {
val savedCtx = _ctx
val savedOld = _oldTree
try {
op(tree)
} finally {
_oldTree = savedOld
_ctx = savedCtx
}
}
}
val NoOp: PluginOp[Tree[T]] = identity
val NoOps = new PluginOps(NoOp, null)
class Plugins {
def next: Plugins = null
def processIdent: PluginOp[Ident[T]] = NoOp
def processSelect: PluginOp[Select[T]] = NoOp
val IdentOps: PluginOps[Ident[T]] = NoOps
val SelectOps: PluginOps[Select[T]] = NoOps
}
val EmptyPlugin = new Plugins
private[this] var _plugins: Plugins = EmptyPlugin
override def plugins: Plugins = _plugins
class Plugin extends Plugins {
override val next = _plugins
_plugins = this
private def push[N <: Tree[T]](op: PluginOp[N], ops: => PluginOps[N]): PluginOps[N] =
if (op == NoOp) ops else new PluginOps(op, next)
override val IdentOps: PluginOps[Ident[T]] = push(processIdent, next.IdentOps)
override val SelectOps: PluginOps[Select[T]] = push(processSelect, next.SelectOps)
}
def postIdent(tree: Ident[T], old: Tree[T], c: Context, ops: PluginOps[Ident[T]]) =
if (ops eq NoOps) tree
else finishIdent(ops(tree, old, c), old, c, ops.next)
override def finishIdent(tree: Tree[T], old: Tree[T], c: Context, plugins: Plugins): Tree[T] = tree match {
case tree: Ident[_] => postIdent(tree, old, c, plugins.IdentOps)
case _ => postProcess(tree, old, c, plugins)
}
def postSelect(tree: Select[T], old: Tree[T], c: Context, ops: PluginOps[Select[T]]) =
if (ops eq NoOps) tree
else finishSelect(ops(tree, old, c), old, c, ops.next)
override def finishSelect(tree: Tree[T], old: Tree[T], c: Context, plugins: Plugins): Tree[T] = tree match {
case tree: Select[_] => postSelect(tree, old, c, plugins.SelectOps)
case _ => postProcess(tree, old, c, plugins)
}
protected def postProcess(tree: Tree[T], old: Tree[T], c: Context, plugins: Plugins): Tree[T] = tree match {
case tree: Ident[_] => finishIdent(tree, old, c, plugins)
case tree: Select[_] => finishSelect(tree, old, c, plugins)
}
}
}
import PluggableTransformers._, Types._, Trees._, Contexts._
class ExampleTransformer extends PluggableTransformer[Type] {
object ExamplePlugin extends Plugin {
override def processIdent = {
case tree @ Ident(x) if x.isTypeName => tree.derivedSelect(tree, x)
case tree => tpd.Ident(???)
}
override def processSelect = { tree =>
if (tree.isType) tree.derivedIdent(tree.name)
else tpd.EmptyTree
}
}
override def transform(tree: tpd.Tree, ctx: Context) =
super.transform(tree, ctx)
*/
}
|
yusuke2255/dotty
|
src/dotty/tools/dotc/ast/PluggableTransformers.scala
|
Scala
|
bsd-3-clause
| 3,352 |
package doodle
package examples
import doodle.core._
import doodle.core.Image._
import doodle.syntax._
import doodle.random._
import cats.instances.list._
import cats.syntax.cartesian._
import cats.syntax.traverse._
object Spirals {
def scale(factor: Double): Point => Point =
(pt: Point) => {
Point.polar(pt.r * factor, pt.angle)
}
def spiral(weight: Angle => Double): Angle => Point =
(angle: Angle) => Point.polar(weight(angle), angle)
val linearWeight: Angle => Double =
angle => angle.toTurns
val quadraticWeight: Angle => Double =
angle => angle.toTurns * angle.toTurns
def roseWeight(k: Int): Angle => Double =
angle => (angle * k).cos
val symmetricDecreasingWeight: Angle => Double =
angle => {
val turns = {
val t = angle.toTurns
if(t < 0.5) t else (t - 0.5)
}
(1 - turns)
}
val randomSpiral: Random[Angle => Point] =
Random.oneOf(
spiral { linearWeight },
spiral { quadraticWeight },
spiral { symmetricDecreasingWeight },
spiral { roseWeight(1) },
spiral { roseWeight(3) },
spiral { roseWeight(4) },
spiral { roseWeight(5) }
)
def jitter(point: Point): Random[Point] = {
val noise = Random.normal(0, 10.0)
(noise |@| noise) map { (dx, dy) =>
Point.cartesian(point.x + dx, point.y + dy)
}
}
val smoke: Random[Image] = {
val alpha = Random.normal(0.5, 0.1) map (a => a.normalized)
val hue = Random.double.map(h => (h * 0.1 + 0.7).turns)
val saturation = Random.double.map(s => (s * 0.8).normalized)
val lightness = Random.normal(0.4, 0.1) map (a => a.normalized)
val color =
(hue |@| saturation |@| lightness |@| alpha) map {
(h, s, l, a) => Color.hsla(h, s, l, a)
}
val c = Random.normal(2, 1) map (r => circle(r))
(c |@| color) map { (circle, line) => circle.lineColor(line).noFill }
}
val pts: Random[List[Image]] =
(1 to 3).toList.map { (i: Int) =>
randomSpiral flatMap { spiral =>
((1 to 720 by 10).toList.map { angle =>
val pt = (spiral andThen scale(200) andThen jitter)(angle.degrees)
(smoke |@| pt) map { _ at _.toVec }
}).sequence
}
}.foldLeft(Random.always(List.empty[Image])){ (accum, elt) =>
accum.flatMap { imgs1 =>
elt.map { imgs2 => imgs1 ++ imgs2}}
}
val image: Random[Image] =
pts.map(pt => pt.foldLeft(Image.empty){ _ on _ })
}
|
Angeldude/doodle
|
shared/src/main/scala/doodle/examples/Spirals.scala
|
Scala
|
apache-2.0
| 2,468 |
package views.html.streamer
import controllers.routes
import play.api.data.Form
import lila.api.Context
import lila.app.templating.Environment._
import lila.app.ui.ScalatagsTemplate._
import lila.common.String.html.richText
object edit {
import trans.streamer._
def apply(
s: lila.streamer.Streamer.WithUserAndStream,
form: Form[_],
modData: Option[((List[lila.mod.Modlog], List[lila.user.Note]), List[lila.streamer.Streamer])]
)(implicit ctx: Context) = {
views.html.base.layout(
title = s"${s.user.titleUsername} ${lichessStreamer.txt()}",
moreCss = cssTag("streamer.form")
) {
main(cls := "page-menu")(
bits.menu("edit", s.withoutStream.some),
div(cls := "page-menu__content box streamer-edit")(
if (ctx.is(s.user))
div(cls := "streamer-header")(
if (s.streamer.hasPicture)
a(
targetBlank,
cls := "picture-edit",
href := routes.Streamer.picture,
title := changePicture.txt()
)(
picture.thumbnail(s.streamer, s.user)
)
else
div(cls := "picture-create")(
ctx.is(s.user) option
a(targetBlank, cls := "button", href := routes.Streamer.picture)(
uploadPicture()
)
),
div(cls := "overview")(
h1(s.streamer.name),
bits.rules
)
)
else views.html.streamer.header(s),
div(cls := "box-pad") {
val granted = s.streamer.approval.granted
frag(
(ctx.is(s.user) && s.streamer.listed.value) option div(
cls := s"status is${granted ?? "-green"}",
dataIcon := (if (granted) "" else "")
)(
if (granted)
frag(
approved(),
s.streamer.approval.tier > 0 option frag(
br,
strong("You have been selected for frontpage featuring!"),
p(
"Note that we can only show a limited number of streams on the homepage, ",
"so yours may not always appear."
)
)
)
else
frag(
if (s.streamer.approval.requested) pendingReview()
else
frag(
if (s.streamer.completeEnough)
whenReady(
postForm(action := routes.Streamer.approvalRequest)(
button(tpe := "submit", cls := "button", (!ctx.is(s.user)) option disabled)(
requestReview()
)
)
)
else pleaseFillIn()
)
)
),
ctx.is(s.user) option div(cls := "status")(
anotherLanguage(
a(href := "https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes")(
"2-letter ISO 639-1 code"
)
)
),
modData.map { case ((log, notes), same) =>
div(cls := "mod_log status")(
strong(cls := "text", dataIcon := "")(
"Moderation history",
log.isEmpty option ": nothing to show."
),
log.nonEmpty option ul(
log.map { e =>
li(
userIdLink(e.mod.some, withTitle = false),
" ",
b(e.showAction),
" ",
e.details,
" ",
momentFromNow(e.date)
)
}
),
br,
strong(cls := "text", dataIcon := "")(
"Moderator notes",
notes.isEmpty option ": nothing to show."
),
notes.nonEmpty option ul(
notes.map { note =>
(isGranted(_.Admin) || !note.dox) option
li(
p(cls := "meta")(userIdLink(note.from.some), " ", momentFromNow(note.date)),
p(cls := "text")(richText(note.text))
)
}
),
br,
strong(cls := "text", dataIcon := "")(
"Streamers with same Twitch or YouTube",
same.isEmpty option ": nothing to show."
),
same.nonEmpty option table(cls := "slist")(
same.map { s =>
tr(
td(userIdLink(s.userId.some)),
td(s.name),
td(s.twitch.map(t => a(href := s"https://twitch.tv/${t.userId}")(t.userId))),
td(
s.youTube.map(t =>
a(href := s"https://youtube.com/channel/${t.channelId}")(t.channelId)
)
),
td(momentFromNow(s.createdAt))
)
}
)
)
},
postForm(
cls := "form3",
action := s"${routes.Streamer.edit}${!ctx.is(s.user) ?? s"?u=${s.user.id}"}"
)(
isGranted(_.Streamers) option div(cls := "mod")(
form3.split(
form3.checkbox(
form("approval.granted"),
frag("Publish on the streamers list"),
half = true
),
form3.checkbox(
form("approval.requested"),
frag("Active approval request"),
half = true
)
),
form3.split(
form3.checkbox(
form("approval.chat"),
frag("Embed stream chat too"),
half = true
),
if (granted)
form3.group(
form("approval.tier"),
raw("Homepage tier"),
help =
frag("Higher tier has more chance to hit homepage. Set to zero to unfeature.").some,
half = true
)(form3.select(_, lila.streamer.Streamer.tierChoices))
else
form3.checkbox(
form("approval.ignored"),
frag("Ignore further approval requests"),
half = true
)
),
form3.actions(
form3
.submit("Approve and next")(
cls := "button-green",
name := "approval.quick",
value := "approve"
),
form3.submit("Decline and next", icon = "".some)(
cls := "button-red",
name := "approval.quick",
value := "decline"
),
form3.submit(trans.apply())
)
),
form3.globalError(form),
form3.split(
form3.group(
form("twitch"),
twitchUsername(),
help = optionalOrEmpty().some,
half = true
)(form3.input(_)),
form3.group(
form("youTube"),
youtubeChannel(),
help = optionalOrEmpty().some,
half = true
)(form3.input(_))
),
form3.split(
form3.group(
form("name"),
streamerName(),
help = keepItShort(25).some,
half = true
)(form3.input(_)),
form3.checkbox(
form("listed"),
visibility(),
help = whenApproved().some,
half = true
)
),
form3.group(
form("headline"),
headline(),
help = tellUsAboutTheStream().some
)(form3.input(_)),
form3.group(form("description"), longDescription())(form3.textarea(_)(rows := 10)),
form3.actions(
a(href := routes.Streamer.show(s.user.username))(trans.cancel()),
form3.submit(trans.apply())
)
)
)
}
)
)
}
}
}
|
luanlv/lila
|
app/views/streamer/edit.scala
|
Scala
|
mit
| 9,385 |
package by.bsuir.verpav.misoi.ui.table
import java.awt.{BorderLayout, Color, Component, Dimension}
import javax.swing.{JFrame, JLabel, JScrollPane, JTable}
import javax.swing.table.{AbstractTableModel, TableCellRenderer}
class ObjectPropertiesTable(data: Array[List[Any]]) extends JFrame("Objects properties") {
setLayout(new BorderLayout())
val table = new JTable(TableModel(data))
table.setPreferredScrollableViewportSize(new Dimension(500, 70))
table.setFillsViewportHeight(true)
table.setDefaultRenderer(classOf[Color], ColorRenderer)
add(new JScrollPane(table))
setPreferredSize(new Dimension(500, 70))
pack()
setLocationRelativeTo(null)
setVisible(true)
object TableModel extends AbstractTableModel {
var data: Array[List[Any]] = _
val columnNames = Array[String]("Color", "Square", "Perimeter", "Density", "Central moment 2nd order", "Elongation")
def getColumnCount = { columnNames.length }
def getRowCount = { data.length }
override def getColumnName(col: Int) = { columnNames(col) }
def getValueAt(row: Int, col: Int) = { data(row)(col).asInstanceOf[AnyRef] }
override def getColumnClass(c: Int) = { getValueAt(0, c).getClass }
override def isCellEditable(row: Int, col: Int) = { false }
override def setValueAt(value: AnyRef, row: Int, col: Int) = {}
private def setData(data: Array[List[Any]]) = {this.data = data; this}
def apply(data: Array[List[Any]]) = TableModel.setData(data)
}
object ColorRenderer extends JLabel with TableCellRenderer {
setOpaque(true)
def getTableCellRendererComponent(table: JTable, color: Any, isSelected: Boolean, hasFocus: Boolean, row: Int, column: Int): Component = {
val newColor: Color = color.asInstanceOf[Color]
setBackground(newColor)
this
}
}
}
|
VerkhovtsovPavel/BSUIR_Labs
|
Labs/MISOI/MISOI-2/src/by/bsuir/verpav/misoi/ui/table/ObjectPropertiesTable.scala
|
Scala
|
mit
| 1,817 |
package nounou.util
import java.io.{FileWriter, BufferedWriter, FileReader}
import java.nio.charset.StandardCharsets
import java.nio.file.Files
import com.google.gson.Gson
import nounou.NN
import org.apache.commons.io.IOUtils
import org.eclipse.jgit.lib.Repository
import org.eclipse.jgit.storage.file.FileRepositoryBuilder
/**This object uses jgit to read current Git head information, via the
* companion class [[NNGit]]. There is no real need for this to be a
* companion object, just that it is more aesthetically pleasing that way.
*
* All serialized objects should be tagged with this Git information
* for reproducibility.
*/
object NNGit {
@transient val jsonFileName = "NNGit.gson.txt"
/**File for saved git information. It will not be accessible at this path
* (i.e. jsonFile.exists == false) if this library is accessed from a .jar file.
*
* This file will be overwritten with new information if this object is initialized
* from within an active Git repository ( contained in "./.git" ).
* */
@transient val jsonFile = new java.io.File( "./src/main/resources/" + jsonFileName)
/** Resource stream for pre-serialized Git version information.
* It will be null if the resource does not exist.
* This resource stream will be used to read Git information if this
* library is accessed from within a *.jar file.
* Use of stream (and not java.io.File) is necessary to read from within a compressed jar file.
*/
@transient val jsonStream = NN.getClass.getClassLoader.getResourceAsStream(jsonFileName)
/**Current repository in "./.git". Repository will be invalid if this library is accessed via
* jar file (repository.getRef("HEAD") == null). In this case,
* [[jsonStream]] will be used as the source for pre-serialized
* Git information.
*/
@transient private val repository = (new FileRepositoryBuilder()).
setGitDir( new java.io.File( "./.git" ) ). //( null )
readEnvironment().findGitDir().build()
/**Whether the info was loaded from a repository*/
@transient private var repoLoaded: Boolean = false
/**Whether the info was loaded from a file*/
@transient private var fileLoaded: Boolean = false
@transient
private val obj: NNGit = {
//If not in active git repository, see if the Git info has been previously serialized
if( repository.getRef("HEAD") == null /*repository.isBare*/ ) {
repoLoaded = false
//return either the deserialized object or empty (if file not present)
if( jsonFile.exists ){
fileLoaded = true
nounou.gson.fromJson( Files.readAllLines(jsonFile.toPath, StandardCharsets.UTF_8).toArray.mkString("\\n"),
classOf[NNGit] )
} else if (jsonStream != null ){
fileLoaded = true
nounou.gson.fromJson( IOUtils.toString(jsonStream, "UTF-8"), classOf[NNGit])
} else {
fileLoaded = false
//If no information available, default (empty) class is initialized
new NNGit
}
}else{ //If valid git repository present, use it to get latest data
val tempRet = new NNGit()
tempRet.initializeFromRepository(repository)
// write newest repo information to serialized file, if exists (do not do for *.jar/resource)
if (jsonFile.exists()) jsonFile.delete
jsonFile.createNewFile()
val writer = new BufferedWriter( new FileWriter( jsonFile ) )
writer.write( nounou.gson.toJson(tempRet) )
writer.close
repoLoaded = true
fileLoaded = false
//write latest repository information to serialization file
tempRet
}
}
def getGitHead: String = {
if(obj==null) "NNGit is not yet initialized!"
else obj.head
}
// def getGitHeadShort: String = {
// if(obj==null) "NNGit is not yet initialized!"
// else obj.headShort
// }
def gitRepoLoaded = repoLoaded
def gitFileLoaded = fileLoaded
def contentText() =
" + current HEAD is: " + obj.head + "\\n" +
" + current branch is: " + obj.branch + "\\n" +
" + remote names are: " + obj.remotes.mkString(", ") + "\\n"
def repoText() = "GIT repo directory: " + obj.gitDirectory + "\\n"
def fileText() = "Last GIT info from file resource: " + jsonFileName + "\\n"
def infoPrintout() = {
if(gitRepoLoaded) repoText() + contentText()
else if(gitFileLoaded) fileText() + contentText()
else s"Could not initialize GIT information with $jsonFile or current repository."
}
}
/**This class will be serialized by gson.
*/
class NNGit {
var head: String = "Head not initialized"
// var headShort: String = "Short head not initialized"
var branch: String = "Branch not initialized"
var remotes: Array[String] = Array("Remotes not initialized")
var gitDirectory: String = "Git directory not initialized"
/**Initializes the NNGit class based on a given input repository.
* This initialization is NOT done in constructor, due to gson
* requiring a no-argument constructor for correct serialization.
*/
def initializeFromRepository(repository: Repository): Unit = {
NN.loggerRequire(repository != null, "Called with null repository!" )
head = repository.getRef("HEAD").getObjectId.name match {
case x: String => x
case _ => "Head not detected"
}
// headShort = head.take(10)
branch = repository.getBranch match {
case x: String => x
case _ => "Head not detected"
}
val tempRemotes = repository.getConfig.getSubsections("remote").toArray.map( _.asInstanceOf[String] )
//repository.getRemoteNames.toArray.map( _.asInstanceOf[String]
remotes = tempRemotes.map(repository.getConfig.getString("remote", _, "url")) match {
case x: Array[String] => x
case _ => Array("Remotes not detected")
}
gitDirectory = repository.getDirectory.getCanonicalPath
}
}
//Notes:
// attempts at trying to access git information outside a *.jar
// should not be necessary, since jar is packaged, should be reading pre-generated git head anyway.
// println(NN.getClass.getProtectionDomain.getCodeSource.getLocation.toURI)
|
ktakagaki/nounou.rebooted150527
|
src/main/scala/nounou/util/NNGit.scala
|
Scala
|
apache-2.0
| 6,106 |
package chapter.three
object ExerciseThree extends App {
// todo: make generic
// todo: preconditions check
def swapAdjacent(initial: Array[Int]): Array[Int] = {
for (i <- initial.indices.toArray) yield {
if (i == initial.size - 1 & i % 2 == 0) initial(i)
else if (i % 2 == 0) initial(i + 1)
else initial(i - 1)
}
}
}
|
deekim/impatient-scala
|
src/main/scala/chapter/three/ExerciseThree.scala
|
Scala
|
apache-2.0
| 356 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.request.builder.sse
import io.gatling.commons.validation.Validation
import io.gatling.core.CoreComponents
import io.gatling.core.session.Session
import io.gatling.http.ahc.AhcRequestBuilder
import io.gatling.http.protocol.HttpComponents
import io.gatling.http.request.builder.{ CommonAttributes, RequestExpressionBuilder }
import org.asynchttpclient.uri.Uri
class SseRequestExpressionBuilder(commonAttributes: CommonAttributes, coreComponents: CoreComponents, httpComponents: HttpComponents)
extends RequestExpressionBuilder(commonAttributes, coreComponents, httpComponents) {
override protected def configureRequestBuilder(session: Session, uri: Uri, requestBuilder: AhcRequestBuilder): Validation[AhcRequestBuilder] = {
// disable request timeout for SSE
requestBuilder.setRequestTimeout(-1)
super.configureRequestBuilder(session, uri, requestBuilder)
}
}
|
MykolaB/gatling
|
gatling-http/src/main/scala/io/gatling/http/request/builder/sse/SseRequestExpressionBuilder.scala
|
Scala
|
apache-2.0
| 1,524 |
import akka.actor.Props
import akka.io.IO
import akka.pattern.ask
import akka.util.Timeout
import rest.RoutesActor
import service.{BugsService, IssuesService, SubIssuesService}
import spray.can.Http
import utils._
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
object Boot extends App {
// configuring modules for application, cake pattern for DI
val modules = new ConfigurationModuleImpl with ActorModuleImpl
val issuesService = new IssuesService with H2DbModule
val subIssuesService = new SubIssuesService with H2DbModule
val bugsService = new BugsService with H2DbModule
// create and start our service actor
val service = modules.system.actorOf(Props(classOf[RoutesActor],
issuesService, subIssuesService, bugsService), "routesActor")
implicit val system = modules.system
implicit val timeout = Timeout(5.seconds)
Await.result(
for (_ <- issuesService.createTable; _ <- bugsService.createTable; _ <- subIssuesService.createTable) yield {},
Duration.Inf)
// start a new HTTP server on port 8080 with our service actor as the handler
IO(Http) ? Http.Bind(service, interface = "localhost", port = 8080)
}
|
Kanris826/spray-slick-swagger
|
src/main/scala/Boot.scala
|
Scala
|
apache-2.0
| 1,228 |
package universe
import universe.Animal._
import universe.Globals.engine._
object Animal {
val StartEnergy = 200
// radius that animals can see the world around them
val ViewRadius = 9
// energy required to move
val MoveCost = 1
// energy required to procreate
val ProcreateCost = 10
// maximum age in days when an animal dies, regardless of energy
val MaxAge = 25
// energy rate gained when eating plants
val PlantEatRate = 3
// minimum energy required for male animals to seek a mate
val ProcreateThreshold = 60
// minimum age in days for animals to be fertile
val FertileAge = 1
// time in hours for female sheep to be pregnant
val PregnancyTime = 30
// minimum energy for carnivores to attack
val AttackThreshold = 100
// energy stolen when carnivores attack
val AttackAmount = 50
// minimum energy for carnivores to start sleeping
val SleepThreshold = 30
// energy gained while sleeping
val SleepRate = 2
/** An animal is in a state */
sealed trait AnimalState
case object Idling extends AnimalState
case class Eating(plant: Plant) extends AnimalState
case class Attacking(other: Animal) extends AnimalState
case class Moving(dir: Pos) extends AnimalState
case class Procreating(female: Animal) extends AnimalState
case object FallPrey extends AnimalState
case object Sleeping extends AnimalState
}
abstract class Animal(implicit world: World) extends BoardElement {
final override def isAnimal: Boolean = true
final val step: Evt[(Pos, Boolean)] = Evt[(Pos, Boolean)]()
private val statePos: Signal[(AnimalState, Pos)] = step.fold((Idling: AnimalState, Pos(0, 0))) { (p1, p2) =>
(p1, p2) match {
case ((oldState, oldPos), (newPos, prey)) =>
if (prey) (FallPrey, oldPos)
else (nextAction(newPos), newPos)
}
}
private val state: Signal[AnimalState] = statePos.map(_._1)
statePos.observe {
case (cstate, pos) =>
world.plan {
cstate match {
case Moving(dir) => world.board.moveIfPossible(pos, dir)
case Eating(plant) => plant.takeEnergy(energyGain.readValueOnce)
case Attacking(prey) => prey.savage()
case Procreating(female: Female) => female.procreate(this)
case _ =>
}
}
}
/** Some imperative code that is called each tick */
final override def doStep(pos: Pos): Unit = step.fire((pos, false))
private def savage() = step.fire((Pos(0, 0), true))
// partial function for collecting food, dependant on state of the object
val findFood: Signal[PartialFunction[BoardElement, BoardElement]] // Abstract (//#SIG)
// function for creating a state upon reaching target
def reachedState(target: BoardElement): AnimalState
protected def nextAction(pos: Pos): AnimalState = {
val neighbors = world.board.neighbors(pos)
val food = neighbors.collectFirst(findFood.readValueOnce)
val nextAction: AnimalState = food match {
case Some(target) => reachedState(target) // I'm near food, eat it!
case None => // I have to look for food nearby
world.board.nearby(pos, Animal.ViewRadius).collectFirst(findFood.readValueOnce) match {
case Some(target) =>
val destination = world.board.getPosition(target)
if (destination.isDefined)
Moving(pos.directionTo(destination.get))
else
randomMove
case None => randomMove
}
}
nextAction
}
private def randomMove: AnimalState = {
val randx = 1 - world.randomness.nextInt(3)
val randy = 1 - world.randomness.nextInt(3)
Moving(Pos(randx, randy))
}
final val age: Signal[Int] = world.time.day.changed.count() // #SIG //#IF //#IF
final val isAdult: Signal[Boolean] = age.map(_ > Animal.FertileAge)
val isFertile: Signal[Boolean] = isAdult
private val energyDrain: Signal[Int] =
Signal.static {
(world.board.animalsAlive.value / (world.board.width + world.board.height)) +
(age.value / 2) +
(state.value match {
case Moving(_) => Animal.MoveCost
case Procreating(_) => Animal.ProcreateCost
case FallPrey => Animal.AttackAmount
case _ => 0
})
}
private val energyGain: Signal[Int] =
state map {
case Eating(_) => Animal.PlantEatRate
case Sleeping => Animal.SleepRate
case Attacking(prey) => Animal.AttackAmount
case _ => 0
}
// we do not have a built in method for this kind of “fold some snapshot” but its not that hard to write one
final protected val energy: Signal[Int] =
Event { world.time.tick().map(_ => energyGain() - energyDrain()) }.fold(Animal.StartEnergy)((current, change) =>
current + change
)
final override val isDead = Signals.lift(age, energy) { (a, e) => a > Animal.MaxAge || e < 0 }
}
|
guidosalva/REScala
|
Code/Examples/Universe/src/main/scala/universe/Animal.scala
|
Scala
|
apache-2.0
| 5,050 |
package org.scalatest.tools
import org.scalatest.events.{Event, SuiteStarting}
private[scalatest] case class SuiteResult(
suiteId: String,
suiteName: String,
suiteClassName: Option[String],
duration: Option[Long],
startEvent: SuiteStarting,
endEvent: Event,
eventList: collection.immutable.IndexedSeq[Event],
testsSucceededCount: Int,
testsFailedCount: Int,
testsIgnoredCount: Int,
testsPendingCount: Int,
testsCanceledCount: Int,
isCompleted: Boolean)
|
hubertp/scalatest
|
src/main/scala/org/scalatest/tools/SuiteResult.scala
|
Scala
|
apache-2.0
| 492 |
package com.hamishdickson.helix.protein
import com.hamishdickson.helix.genomes.rna.{MRna, MRnaGenome, RnaNucleotide, RnaNucleotideU}
import org.scalatest.{Matchers, FlatSpec}
class CodonTest extends FlatSpec with Matchers {
"The Codon apply method" should "turn a list into a Codon" in {
val c: List[RnaNucleotide] = List(RnaNucleotideU, RnaNucleotideU, RnaNucleotideU)
Codon(c) should be (UUU)
}
"The toProtein method" should "take a codon and return a protein" in {
val c: Codon = UUU
c.toProtein should be (ProteinF)
}
"The toProteinList method" should "take a mRNA string and return a protein list" in {
val mRnaGenome: MRnaGenome = MRna("AUGGCCAUGGCGCCCAGAACUGAGAUCAAUAGUACCCGUAUUAACGGGUGA")
val p: ProteinChain = ProteinRope("MAMAPRTEINSTRING")
mRnaGenome.toProteinList should be (p)
}
"The toString method" should "take a protein list and return it's string counterpart" in {
val mRnaGenome: MRnaGenome = MRna("AUGGCCAUGGCGCCCAGAACUGAGAUCAAUAGUACCCGUAUUAACGGGUGA")
val r: String = "MAMAPRTEINSTRING"
val ps: ProteinChain = mRnaGenome.toProteinList
ps.toString should be (r)
}
}
|
hamishdickson/helix
|
src/test/scala/com/hamishdickson/helix/protein/CodonTest.scala
|
Scala
|
mit
| 1,155 |
package com.twitter.scrooge.java_generator
import com.twitter.scrooge.ast._
import com.twitter.scrooge.ast.SetType
import com.twitter.scrooge.ast.MapType
class FieldValueMetadataController(
fieldType: FieldType,
generator: ApacheJavaGenerator,
ns: Option[Identifier])
extends BaseController(generator, ns) {
val field_type: FieldTypeController = new FieldTypeController(fieldType, generator)
def map_element: Any = {
fieldType match {
case MapType(k, v, _) => {
Map(
"field_value_meta_data_key" -> generateMetadata(k),
"field_value_meta_data_val" -> generateMetadata(v)
)
}
case _ => false
}
}
def set_or_list_element: Any = {
fieldType match {
case SetType(x, _) => elem(x)
case ListType(x, _) => elem(x)
case _ => false
}
}
def elem(x: FieldType): Map[String, Object] = {
Map("field_value_meta_data_elem" -> generateMetadata(x))
}
def generateMetadata(k: FieldType): String = {
indent(generator.fieldValueMetaData(k, ns), 4, skipFirst = true, addLast = false)
}
}
|
twitter/scrooge
|
scrooge-generator/src/main/scala/com/twitter/scrooge/java_generator/FieldValueMetadataController.scala
|
Scala
|
apache-2.0
| 1,096 |
import org.junit.Assert._
import org.junit.Test
import org.junit.Ignore
import scala.util.Random
import scala.collection.mutable.ArrayBuffer
import Chisel._
import OLK.Kernel._
class GaussianSuite extends TestSuite {
@Test def gaussTest {
class GaussianTests(c : Gaussian) extends Tester(c) {
val r = scala.util.Random
val cycles = 3*(c.kCycles + 1)
val pipeline = ArrayBuffer.fill(c.pCycles){ArrayBuffer.fill(c.features){BigInt(r.nextInt(1 << c.fracWidth))}}
val dictionary = ArrayBuffer.fill(c.dictSize){ArrayBuffer.fill(c.features){BigInt(r.nextInt(1 << c.fracWidth))}}
val addToDict = ArrayBuffer.fill(cycles + c.kCycles){ r.nextInt(2) == 1 }
val gamma = BigInt(r.nextInt(1 << c.fracWidth))
for (j <- 0 until c.features) {
for (i <- 0 until c.pCycles)
poke(c.io.pipeline(i)(j), pipeline(i)(j))
for (i <- 0 until c.dictSize)
poke(c.io.dictionary(i)(j), dictionary(i)(j))
}
poke(c.io.gamma, gamma)
// Pad inital cycles with zeros
// expectedPipeline(cycle)(index)
val expectedPipeline = ArrayBuffer.fill(c.kCycles){ArrayBuffer.fill(c.pCycles - c.kCycles)(BigInt(0))}
// expectedDict(cycle)(index)
val expectedDict = ArrayBuffer.fill(c.kCycles){ArrayBuffer.fill(c.dictSize)(BigInt(0))}
val log2Table = { (scala.math.log10(c.tableSize)/scala.math.log10(2)).ceil.toInt }
// Generate Table for linear interpolation
val gradients = new ArrayBuffer[BigInt]()
val offsets = new ArrayBuffer[BigInt]()
// Fixed point increment
val increment = 1.0 / (1 << log2Table)
val tableEnd = 1.0
var x = 0.0
// NOTE: x is positive, therefore gradient is negitive
while (x < tableEnd) {
// m = (y1 - y2)/(x1 - x2)
val m = -(scala.math.pow(2, - x) - scala.math.pow(2,- x - increment))/increment
// convert to Fixed
gradients += BigInt((m * (1 << c.fracWidth)).toLong)
// b = y1 - m*x1
val b = scala.math.pow(2, - x) - m*x
// convert to Fixed
offsets += BigInt((b * (1 << c.fracWidth)).toLong)
x += increment
}
for ( cyc <- 0 until cycles ) {
val example = ArrayBuffer.fill(c.features){BigInt(r.nextInt(1 << c.fracWidth))}
// calculate the outputs for this example
val subPipe = pipeline.map(x => {
val subAry = new ArrayBuffer[BigInt]()
for (i <- 0 until c.features)
subAry += x(i) - example(i)
subAry
})
val subDict = dictionary.map(x => {
val subAry = new ArrayBuffer[BigInt]()
for (i <- 0 until c.features)
subAry += x(i) - example(i)
subAry
})
val l2Pipe = subPipe.map(x => { x.map(y => { ((y*y) >> c.fracWidth) }).sum }).map( x => { (gamma*x) >> c.fracWidth })
val l2Dict = subDict.map(x => { x.map(y => { ((y*y) >> c.fracWidth) }).sum }).map( x => { (gamma*x) >> c.fracWidth })
val xIntPipe = l2Pipe.map(x => { x >> c.fracWidth }).map(x => x.toInt)
val xIntDict = l2Dict.map(x => { x >> c.fracWidth }).map(x => x.toInt)
val xTabPipe = l2Pipe.map(x => { (x >> (c.fracWidth - log2Table)) & ((1 << log2Table) - 1) }).map(x => x.toInt)
val xTabDict = l2Dict.map(x => { (x >> (c.fracWidth - log2Table)) & ((1 << log2Table) - 1) }).map(x => x.toInt)
val xFracPipe = l2Pipe.map(x => { x & ((1 << c.fracWidth) - 1) })
val xFracDict = l2Dict.map(x => { x & ((1 << c.fracWidth) - 1) })
val yPipe = (xFracPipe zip xTabPipe).map(pair => { ((gradients(pair._2)*pair._1) >> c.fracWidth) + offsets(pair._2) })
val yDict = (xFracDict zip xTabDict).map(pair => { ((gradients(pair._2)*pair._1) >> c.fracWidth) + offsets(pair._2) })
val yOutPipe = (yPipe zip xIntPipe).map(pair => { pair._1 >> pair._2 })
val yPipeAdded = new ArrayBuffer[BigInt]()
for (i <- 0 until c.kCycles) {
if (addToDict(c.kCycles - 1 - i + cyc))
yPipeAdded += yOutPipe(yOutPipe.length - c.kCycles + i)
}
val yOutDictTmp = (yDict zip xIntDict).map(pair => { pair._1 >> pair._2 })
val yOutDict = yOutDictTmp.take(c.dictSize - yPipeAdded.length)
// Add the values for k cycles in advance
expectedPipeline += yOutPipe.take(c.pCycles - c.kCycles)
yPipeAdded.appendAll(yOutDict)
expectedDict += yPipeAdded
poke(c.io.addToDict, Bool(addToDict(cyc)).litValue())
for (i <- 0 until c.features)
poke(c.io.example(i), example(i))
if (cyc >= c.kCycles) {
for (i <- 0 until (c.pCycles - c.kCycles))
expect(c.io.pipelineOut(i), expectedPipeline(cyc)(i))
for (i <- 0 until (c.dictSize))
expect(c.io.dictOut(i), expectedDict(cyc)(i))
}
step(1)
}
}
val myRand = new Random
val fracWidth = myRand.nextInt(24) + 1
val bitWidth = myRand.nextInt(24) + fracWidth + 4
val dictSize = myRand.nextInt(250) + 1
val log2Features = myRand.nextInt(5) + 1
val features = 1 << log2Features
val pCycles = myRand.nextInt(20) + 8
val stages = ArrayBuffer.fill(7 + log2Features){ myRand.nextInt(2) == 1 }
val tableSize = 1 << (myRand.nextInt(8) + 1)
println("val fracWidth = " + fracWidth)
println("val bitWidth = " + bitWidth)
println("val dictSize = " + dictSize)
println("val features = " + features)
println("val pCycles = " + pCycles)
println("val stages = " + stages)
println("val tableSize = " + tableSize)
chiselMainTest(Array("--genHarness", "--compile", "--test", "--backend", "c"), () => {
Module( new Gaussian( bitWidth, fracWidth, dictSize, features, pCycles, stages, tableSize) )
} ) { c => new GaussianTests( c ) }
}
}
|
da-steve101/chisel-pipelined-olk
|
src/test/scala/GaussianSuite.scala
|
Scala
|
gpl-2.0
| 5,846 |
package com.arcusys.learn.liferay.constants
import com.liferay.portal.kernel.util.StringPool
object StringPoolHelper {
val BLANK = StringPool.BLANK
val QUESTION = StringPool.QUESTION
val EQUAL = StringPool.EQUAL
val AMPERSAND = StringPool.AMPERSAND
}
|
arcusys/Valamis
|
learn-liferay700-services/src/main/scala/com/arcusys/learn/liferay/constants/StringPoolHelper.scala
|
Scala
|
gpl-3.0
| 261 |
package com.weez.mercury.common
import akka.event.LoggingAdapter
import com.weez.mercury.imports.packable
sealed trait DBType
object DBType {
sealed trait DBTypeRef
import shapeless._
implicit val typeRefPacker: Packer[DBTypeRef] = Packer.poly[DBTypeRef,
String.type :: Int.type :: Long.type :: Double.type :: Boolean.type :: DateTime.type :: Raw.type :: AbstractEntity.type ::
Coll :: Tuple :: Ref :: Struct :: HNil].asInstanceOf[Packer[DBTypeRef]]
sealed abstract class SimpleType(val typeCode: Int, name: String) extends DBType with DBTypeRef {
override def toString = name
}
object String extends SimpleType(1, "String")
object Int extends SimpleType(2, "Int")
object Long extends SimpleType(3, "Long")
object Double extends SimpleType(4, "Double")
object Boolean extends SimpleType(5, "Boolean")
object DateTime extends SimpleType(6, "DateTime")
object Raw extends SimpleType(7, "Raw")
object AbstractEntity extends SimpleType(10, "AbstractEntity")
def fromTypeCode(typeCode: Int): SimpleType = {
typeCode match {
case String.typeCode => String
case Int.typeCode => Int
case Long.typeCode => Long
case Double.typeCode => Double
case Boolean.typeCode => Boolean
case DateTime.typeCode => DateTime
case Raw.typeCode => Raw
case AbstractEntity.typeCode => AbstractEntity
}
}
@packable
case class Coll(element: DBTypeRef) extends DBType with DBTypeRef
@packable
case class Tuple(parts: Seq[DBTypeRef]) extends DBType with DBTypeRef
@packable
case class Ref(tpe: DBTypeRef) extends DBType with DBTypeRef
@packable
case class Struct(name: String) extends DBTypeRef
sealed trait Meta extends DBType with Entity {
def name: String
}
object Meta {
implicit val packer: Packer[Meta] = Packer.poly[Meta,
StructMeta :: InterfaceMeta :: ValueMeta :: CollectionMeta :: DataViewMeta :: HNil].asInstanceOf[Packer[Meta]]
}
@packable
case class ColumnMeta(name: String, tpe: DBTypeRef)
@packable
case class StructMeta(name: String, columns: Seq[ColumnMeta], interfaces: Seq[String], isEntity: Boolean) extends Meta
@packable
case class InterfaceMeta(name: String, isSealed: Boolean, subs: Seq[String], interfaces: Seq[String]) extends Meta
@packable
case class ValueMeta(name: String, interfaces: Seq[String]) extends Meta
@packable
case class IndexMeta(name: String, key: DBTypeRef, unique: Boolean, prefix: Int)
@packable
case class CollectionMeta(name: String, valueType: DBTypeRef, indexes: Seq[IndexMeta], isRoot: Boolean, prefix: Int) extends Meta {
def indexPrefixOf(name: String) = indexes.find(_.name == name).get.prefix
}
@packable
case class DataViewMeta(name: String, prefix: Int) extends Meta
}
object MetaCollection extends RootCollection[DBType.Meta] {
def name = "meta-collection"
val byName = defUniqueIndex("by-name", _.name)
val prefix = 1
}
object DBMetas {
import DBType._
val metaCollectionMeta = CollectionMeta(
MetaCollection.name,
Struct("meta"),
IndexMeta("by-name", String, unique = true, 2) :: Nil,
isRoot = true, MetaCollection.prefix)
}
import scala.reflect.runtime.universe._
class DBTypeCollector(types: Map[String, Seq[Symbol]]) {
import scala.collection.mutable
import org.joda.time.DateTime
val resolvedMetas = mutable.Map[String, DBType.Meta]()
val resolvedStructTypes = mutable.Map[String, String]()
val unresolvedStructTypes = mutable.Map[String, mutable.Set[String]]()
val delayResolvedStructTypes = mutable.Set[Symbol]()
val stringType = typeOf[String]
val datetimeType = typeOf[DateTime]
val rawType = typeOf[Array[Byte]]
val refType = typeOf[Ref[_]]
val traversableType = typeOf[Traversable[_]]
val entityType = typeOf[Entity]
val collType = typeOf[EntityCollection[_]]
val rootCollType = typeOf[RootCollection[_]]
val indexBaseType = typeOf[View[_, _]]
val uniqueIndexType = typeOf[UniqueView[_, _]]
def clear() = {
resolvedMetas.clear()
resolvedStructTypes.clear()
unresolvedStructTypes.clear()
delayResolvedStructTypes.clear()
}
def collectDBTypes(log: LoggingAdapter) = {
clear()
val nameMapping = mutable.Map[String, String]()
def resolve(dbtype: DBType.Meta, symbolName: String) = {
nameMapping.put(dbtype.name, symbolName) match {
case Some(x) => throw new Error(s"db-name conflict: $symbolName and $x")
case None =>
resolvedMetas.put(dbtype.name, dbtype)
resolvedStructTypes.put(symbolName, dbtype.name)
unresolvedStructTypes.remove(symbolName)
}
log.debug("found dbtype: {} -> {}", dbtype.name, symbolName)
}
def resolveStruct(symbol: Symbol): Unit = {
if (!resolvedStructTypes.contains(symbol.fullName)) {
if (symbol.isClass) {
val classSymbol = symbol.asClass
if (classSymbol.isModuleClass) {
resolveStruct(classSymbol.selfType.termSymbol)
} else if (classSymbol.isAbstract) {
if (classSymbol.isSealed) {
val subs = classSymbol.knownDirectSubclasses
subs foreach resolveStruct
val dbtype = DBType.InterfaceMeta(fullName(symbol), isSealed = true, (subs map fullName).toSeq, Nil)
resolve(dbtype, symbol.fullName)
} else {
val dbtype = DBType.InterfaceMeta(fullName(symbol), isSealed = false, Nil, Nil)
resolve(dbtype, symbol.fullName)
}
} else if (classSymbol.isCaseClass) {
val tpe = classSymbol.toType
val ctor = tpe.decl(termNames.CONSTRUCTOR).asMethod
val dbtype = DBType.StructMeta(fullName(symbol),
ctor.paramLists(0) map { p =>
DBType.ColumnMeta(localName(p), getTypeRef(p.typeSignature, p.fullName))
}, Nil, isEntity = tpe <:< entityType)
resolve(dbtype, symbol.fullName)
} else
throw new Error(s"expect case class: ${symbol.fullName}")
} else if (symbol.isModule) {
resolve(DBType.ValueMeta(fullName(symbol), Nil), symbol.fullName)
} else
throw new IllegalStateException()
}
}
types(entityType.typeSymbol.fullName) foreach resolveStruct
types(collType.typeSymbol.fullName) withFilter {
!_.isAbstract
} foreach { symbol =>
val classSymbol = if (symbol.isModule) symbol.asModule.moduleClass.asClass else symbol.asClass
val tpe = classSymbol.toType
val valueType = getTypeRef(tpe.baseType(collType.typeSymbol).typeArgs(0), symbol.fullName)
val builder = Seq.newBuilder[DBType.IndexMeta]
tpe.members foreach { member =>
if (member.owner == classSymbol && member.isMethod) {
val returnType = member.asMethod.returnType
if (returnType <:< indexBaseType) {
val indexType = returnType.baseType(indexBaseType.typeSymbol)
builder += DBType.IndexMeta(fullName(member),
getTypeRef(indexType.typeArgs(0), member.fullName),
returnType <:< uniqueIndexType, 0)
}
}
}
val dbtype = DBType.CollectionMeta(fullName(symbol), valueType,
builder.result(), tpe <:< rootCollType, 0)
resolve(dbtype, symbol.fullName)
}
types(typeOf[DataView[_, _]].typeSymbol.fullName) withFilter {
!_.isAbstract
} foreach { symbol =>
val dbtype = DBType.DataViewMeta(fullName(symbol), 0)
resolve(dbtype, symbol.fullName)
}
delayResolvedStructTypes foreach resolveStruct
delayResolvedStructTypes.clear()
if (unresolvedStructTypes.nonEmpty) {
val sb = new StringBuilder
sb.append("following types unresolved and used as dbtypes:\\r\\n")
unresolvedStructTypes foreach {
case (symbolName, refs) =>
sb.append(s"\\t'$symbolName' used by\\r\\n")
refs foreach { ref =>
sb.append(s"\\t\\t$ref\\r\\n")
}
}
log.error(sb.toString())
throw new Error("unresolve types used as dbtypes!")
}
this
}
def localName(s: Symbol) = Util.camelCase2seqStyle(s.name.toString)
def fullName(s: Symbol) = Util.camelCase2seqStyle(s.name.toString)
def getTypeRef(tpe: Type, ref: String): DBType.DBTypeRef = {
if (tpe =:= stringType) DBType.String
else if (tpe =:= definitions.IntTpe) DBType.Int
else if (tpe =:= definitions.LongTpe) DBType.Long
else if (tpe =:= definitions.DoubleTpe) DBType.Double
else if (tpe =:= definitions.BooleanTpe) DBType.Boolean
else if (tpe =:= datetimeType) DBType.DateTime
else if (tpe =:= rawType) DBType.Raw
else if (tpe.typeSymbol.fullName startsWith "scala.Tuple") {
DBType.Tuple(tpe.typeArgs.map(getTypeRef(_, ref)))
} else if (tpe <:< traversableType) {
val tType = tpe.baseType(traversableType.typeSymbol)
DBType.Coll(getTypeRef(tType.typeArgs(0), tpe.typeSymbol.fullName))
} else if (tpe <:< refType) {
val entityTpe = tpe.typeArgs.head
if (entityTpe =:= entityType) {
DBType.Ref(DBType.AbstractEntity)
} else {
DBType.Ref(getTypeRef(entityTpe, ref))
}
} else {
resolvedStructTypes.get(tpe.typeSymbol.fullName) match {
case Some(x) =>
resolvedMetas(x) match {
case m: DBType.StructMeta => DBType.Struct(x)
case m: DBType.InterfaceMeta =>
if (!m.isSealed) throw new Error(s"expect sealed trait or case class: ${tpe.typeSymbol.fullName} referenced by $ref")
DBType.Struct(x)
case _ => throw new IllegalStateException()
}
case None =>
if (tpe <:< entityType)
unresolvedStructTypes.getOrElseUpdate(tpe.typeSymbol.fullName, mutable.Set()).add(ref)
else
delayResolvedStructTypes.add(tpe.typeSymbol)
DBType.Struct(fullName(tpe.typeSymbol))
}
DBType.Struct(fullName(tpe.typeSymbol))
}
}
}
|
weeztech/weez-mercury
|
main/src/main/scala/com/weez/mercury/common/DBSchema.scala
|
Scala
|
apache-2.0
| 10,011 |
package ru.maizy.ambient7.core.util
/**
* Copyright (c) Nikita Kovaliov, maizy.ru, 2016-2017
* See LICENSE.txt for details.
*/
import java.time.{ Instant, ZoneId, ZonedDateTime }
import scala.concurrent.duration.Duration
object Dates {
def dateTimeForUser(dateTime: ZonedDateTime, timeZone: ZoneId = ZoneId.systemDefault()): String =
dateTime.withZoneSameInstant(timeZone).toString
def truncateDateTime(dateTime: ZonedDateTime, step: Long): ZonedDateTime = {
val epochSeconds = dateTime.toEpochSecond
val mod = epochSeconds % step
val instant = Instant.ofEpochSecond(epochSeconds - mod)
ZonedDateTime.ofInstant(instant, dateTime.getZone)
}
def truncateDateTime(dateTime: ZonedDateTime, step: Duration): ZonedDateTime = {
require(step.isFinite)
truncateDateTime(dateTime, step.toSeconds)
}
}
|
maizy/ambient7
|
core/src/main/scala/ru/maizy/ambient7/core/util/Dates.scala
|
Scala
|
apache-2.0
| 837 |
/*
Copyright (c) 2010, Tobias Knerr
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
import java.io.File;
import org.openstreetmap.wiki.tttbot.ConversionFacade
object TemplatesToTables {
def main (args : Array[String]) {
/* interpret command line */
if (args.length == 0) {
println("usage: pass config file as first parameter")
return
}
val configPath = args(0)
/* run conversion */
ConversionFacade.performConversion(new File(configPath), args)
}
}
|
tordanik/TemplatesToTables
|
src/TemplatesToTables.scala
|
Scala
|
bsd-2-clause
| 1,709 |
/*
* Copyright 2019 ACINQ SAS
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fr.acinq.eclair
import java.io.File
import java.net.InetSocketAddress
import akka.Done
import akka.actor.{ActorSystem, Address, Props, RootActorPath, SupervisorStrategy}
import akka.pattern.ask
import akka.util.Timeout
import com.amazonaws.services.secretsmanager.AWSSecretsManagerClient
import com.amazonaws.services.secretsmanager.model.GetSecretValueRequest
import fr.acinq.bitcoin.Crypto.{PrivateKey, PublicKey}
import fr.acinq.eclair.crypto.Noise.KeyPair
import fr.acinq.eclair.crypto.keymanager.LocalNodeKeyManager
import fr.acinq.eclair.io.Switchboard.{GetRouterPeerConf, RouterPeerConf}
import fr.acinq.eclair.io.{ClientSpawner, Server}
import fr.acinq.eclair.router.FrontRouter
import grizzled.slf4j.Logging
import scodec.bits.ByteVector
import java.nio.file.Files
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future, Promise}
class FrontSetup(datadir: File)(implicit system: ActorSystem) extends Logging {
implicit val timeout = Timeout(30 seconds)
implicit val ec: ExecutionContext = system.dispatcher
logger.info(s"hello!")
logger.info(s"version=${getClass.getPackage.getImplementationVersion} commit=${getClass.getPackage.getSpecificationVersion}")
logger.info(s"datadir=${datadir.getCanonicalPath}")
logger.info(s"initializing secure random generator")
// this will force the secure random instance to initialize itself right now, making sure it doesn't hang later
randomGen.init()
datadir.mkdirs()
val config = system.settings.config.getConfig("eclair")
val keyPair = {
val pub = ByteVector.fromValidHex(config.getString("front.pub"))
val priv = config.getString("front.priv-key-provider") match {
case "aws-sm" =>
val sm = AWSSecretsManagerClient.builder().build()
try {
// we retrieve the node key from AWS secrets manager and we compare the corresponding pub key with the expected one
val secretId = config.getString("front.aws-sm.priv-key-name")
ByteVector.fromValidHex(sm.getSecretValue(new GetSecretValueRequest().withSecretId(secretId)).getSecretString)
} finally {
sm.shutdown()
}
case "env" => ByteVector.fromValidHex(config.getString("front.priv-key"))
case "seed" =>
// demo in single-server setup
val chain = config.getString("chain")
val nodeSeedFilename: String = "node_seed.dat"
val seedPath = new File(datadir, nodeSeedFilename)
val nodeSeed = ByteVector(Files.readAllBytes(seedPath.toPath))
new LocalNodeKeyManager(nodeSeed, NodeParams.hashFromChain(chain)).nodeKey.privateKey.value.bytes
}
val keyPair = KeyPair(pub, priv)
require(PrivateKey(priv).publicKey == PublicKey(pub), "priv/pub keys mismatch")
keyPair
}
logger.info(s"nodeid=${keyPair.pub.toHex}")
val serverBindingAddress = new InetSocketAddress(
config.getString("server.binding-ip"),
config.getInt("server.port"))
def bootstrap: Future[Unit] = {
val frontJoinedCluster = Promise[Done]()
val backendAddressFound = Promise[Address]()
val tcpBound = Promise[Done]()
for {
_ <- Future.successful(0)
_ = system.actorOf(Props(new ClusterListener(frontJoinedCluster, backendAddressFound)), name = "cluster-listener")
_ <- frontJoinedCluster.future
backendAddress <- backendAddressFound.future
// we give time for the cluster to be ready
_ <- akka.pattern.after(5.seconds)(Future.successful((): Unit))
switchBoardSelection = system.actorSelection(RootActorPath(backendAddress) / "user" / "*" / "switchboard")
remoteSwitchboard <- switchBoardSelection.resolveOne()
routerSelection = system.actorSelection(RootActorPath(backendAddress) / "user" / "*" / "router")
remoteRouter <- routerSelection.resolveOne()
RouterPeerConf(routerConf, peerConnectionConf) <- (remoteSwitchboard ? GetRouterPeerConf).mapTo[RouterPeerConf]
frontRouterInitialized = Promise[Done]()
frontRouter = system.actorOf(SimpleSupervisor.props(FrontRouter.props(routerConf, remoteRouter, Some(frontRouterInitialized)), "front-router", SupervisorStrategy.Resume))
_ <- frontRouterInitialized.future
clientSpawner = system.actorOf(Props(new ClientSpawner(keyPair, None, peerConnectionConf, remoteSwitchboard, frontRouter)), name = "client-spawner")
server = system.actorOf(SimpleSupervisor.props(Server.props(keyPair, peerConnectionConf, remoteSwitchboard, frontRouter, serverBindingAddress, Some(tcpBound)), "server", SupervisorStrategy.Restart))
} yield ()
}
}
|
ACINQ/eclair
|
eclair-front/src/main/scala/fr/acinq/eclair/FrontSetup.scala
|
Scala
|
apache-2.0
| 5,189 |
package com.seanshubin.learn.datomic.domain
trait Notifications {
def effectiveConfiguration(configuration: Configuration)
def configurationError(lines: Seq[String])
def topLevelException(exception: Throwable)
def schemaUpdateResult(singleUpdateResult: SingleUpdateResult)
}
|
SeanShubin/learn-datomic
|
domain/src/main/scala/com/seanshubin/learn/datomic/domain/Notifications.scala
|
Scala
|
unlicense
| 287 |
import java.io.{File, FileOutputStream}
import scala.tools.partest.DirectTest
import scala.tools.partest.nest.StreamCapture
import scala.tools.asm
import asm.{ClassWriter, Opcodes}
import Opcodes._
// This test ensures that we can read JDK 8 (classfile format 52) files, including those
// with default methods. To do that it first uses ASM to generate an interface called
// HasDefaultMethod. Then it runs a normal compile on Scala source that extends that
// interface. Any failure will be dumped to std out.
//
// By its nature the test can only work on JDK 8+ because under JDK 7- the
// interface won't verify.
object Test extends DirectTest {
override def extraSettings: String = s"-opt:inline:** -usejavacp -cp ${testOutput.path}"
def generateInterface(): Unit = {
val interfaceName = "HasDefaultMethod"
val methodType = "()Ljava/lang/String;"
val cw = new ClassWriter(0)
cw.visit(52, ACC_PUBLIC+ACC_ABSTRACT+ACC_INTERFACE, interfaceName, null, "java/lang/Object", null)
def createMethod(flags:Int, name: String): Unit = {
val method = cw.visitMethod(flags, name, methodType, null, null)
method.visitCode()
method.visitLdcInsn(s"hello from $name")
method.visitInsn(ARETURN)
method.visitMaxs(1, 1)
method.visitEnd()
}
createMethod(ACC_PUBLIC, "publicMethod")
createMethod(ACC_PUBLIC+ACC_STATIC, "staticMethod")
createMethod(ACC_PRIVATE, "privateMethod")
cw.visitEnd()
val bytes = cw.toByteArray()
val fos = new FileOutputStream(new File(s"${testOutput.path}/$interfaceName.class"))
try
fos write bytes
finally
fos.close()
}
def code =
"""
class Driver extends HasDefaultMethod {
println(publicMethod())
println(HasDefaultMethod.staticMethod())
}
"""
override def show(): Unit = StreamCapture.redirErr {
generateInterface()
compile()
Class.forName("Driver").getDeclaredConstructor().newInstance()
()
}
}
|
scala/scala
|
test/files/run/classfile-format-52.scala
|
Scala
|
apache-2.0
| 1,957 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package toplevel
import com.intellij.psi.tree.IElementType
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes._
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScTypeElement
import org.jetbrains.plugins.scala.lang.psi.types.ScType
import org.jetbrains.plugins.scala.lang.psi.types.result.TypeResult
trait ScTypeBoundsOwner extends ScalaPsiElement {
def lowerBound: TypeResult
def upperBound: TypeResult
def viewBound: Seq[ScType] = Nil
def contextBound: Seq[ScType] = Nil
def hasBounds: Boolean = lowerTypeElement.nonEmpty || upperTypeElement.nonEmpty
def hasImplicitBound: Boolean = viewTypeElement.nonEmpty || contextBoundTypeElement.nonEmpty
def lowerTypeElement: Option[ScTypeElement] = None
def upperTypeElement: Option[ScTypeElement] = None
def viewTypeElement: Seq[ScTypeElement] = Nil
def contextBoundTypeElement: Seq[ScTypeElement] = Nil
def removeImplicitBounds() {}
def boundsText: String = {
def toString(bounds: Traversable[ScTypeElement], elementType: IElementType) = bounds.map {
case e => s"${elementType.toString} ${e.getText}"
}
(toString(lowerTypeElement, tLOWER_BOUND) ++
toString(upperTypeElement, tUPPER_BOUND) ++
toString(viewTypeElement, tVIEW) ++
toString(contextBoundTypeElement, tCOLON))
.mkString(" ")
}
}
|
jastice/intellij-scala
|
scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/api/toplevel/ScTypeBoundsOwner.scala
|
Scala
|
apache-2.0
| 1,405 |
/*
* Copyright 1998-2019 Linux.org.ru
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ru.org.linux.util.markdown
import com.vladsch.flexmark.ast.{Image, ImageRef}
import com.vladsch.flexmark.ast.util.TextCollectingVisitor
import com.vladsch.flexmark.html.HtmlRenderer
import com.vladsch.flexmark.html.renderer.{NodeRenderer, NodeRenderingHandler}
import com.vladsch.flexmark.util.options.MutableDataHolder
import scala.jdk.CollectionConverters._
class SuppressImagesExtension extends HtmlRenderer.HtmlRendererExtension {
override def rendererOptions(options: MutableDataHolder): Unit = {}
override def extend(rendererBuilder: HtmlRenderer.Builder, rendererType: String): Unit = {
if (rendererBuilder.isRendererType("HTML")) {
rendererBuilder.nodeRendererFactory(_ => new SuppressImagesRenderer)
}
}
}
class SuppressImagesRenderer extends NodeRenderer {
override def getNodeRenderingHandlers = {
Set(new NodeRenderingHandler[Image](classOf[Image], (node, _, html) => {
val altText = new TextCollectingVisitor().collectAndGetText(node)
html
.withAttr()
.attr("href", node.getUrl)
.attr("rel", "nofollow")
.tag("a")
.text(altText)
.closeTag("a")
}), new NodeRenderingHandler[ImageRef](classOf[ImageRef], (node, _, html) => {
val altText = new TextCollectingVisitor().collectAndGetText(node)
html.text(altText)
})
).asJava.asInstanceOf[java.util.Set[NodeRenderingHandler[_]]]
}
}
|
maxcom/lorsource
|
src/main/scala/ru/org/linux/util/markdown/SuppressImagesExtension.scala
|
Scala
|
apache-2.0
| 2,044 |
package net.liftweb.util
import _root_.java.text.SimpleDateFormat
import _root_.java.util.{TimeZone, Calendar, Date, Locale}
/*
* Copyright 2006-2008 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* The TimeHelpers object extends the TimeHelpers. It can be imported to access all of the trait functions.
*/
object TimeHelpers extends TimeHelpers with ControlHelpers with ClassHelpers
/**
* The TimeHelpers trait provide functions to create TimeSpans (an object representing an amount of time), to manage date formats
* or general utility functions (get the date for today, get year/month/day number,...)
*/
trait TimeHelpers { self: ControlHelpers =>
/** private variable allowing the access to all TimeHelpers functions from inside the TimeSpan class */
private val outer = this
/** transforms a long to a TimeSpanBuilder object. Usage: 3L.seconds returns a TimeSpan of 3000L millis */
implicit def longToTimeSpanBuilder(in: Long): TimeSpanBuilder = TimeSpanBuilder(in)
/** transforms an int to a TimeSpanBuilder object. Usage: 3.seconds returns a TimeSpan of 3000L millis */
implicit def intToTimeSpanBuilder(in: Int): TimeSpanBuilder = TimeSpanBuilder(in)
/** transforms a long to a TimeSpan object. Usage: 3000L returns a TimeSpan of 3000L millis */
implicit def longToTimeSpan(in: Long): TimeSpan = TimeSpan(in)
/** transforms an int to a TimeSpan object. Usage: 3000 returns a TimeSpan of 3000L millis */
implicit def intToTimeSpan(in: Int): TimeSpan = TimeSpan(in)
/** class building TimeSpans given an amount (len) and a method specify the time unit */
case class TimeSpanBuilder(val len: Long) {
def seconds = TimeSpan(outer.seconds(len))
def second = seconds
def minutes = TimeSpan(outer.minutes(len))
def minute = minutes
def hours = TimeSpan(outer.hours(len))
def hour = hours
def days = TimeSpan(outer.days(len))
def day = days
def weeks = TimeSpan(outer.weeks(len))
def week = weeks
}
/**
* transforms a TimeSpan to a date by converting the TimeSpan expressed as millis and creating
* a Date lasting that number of millis from the Epoch time (see the documentation for java.util.Date)
*/
implicit def timeSpanToDate(in: TimeSpan): Date = in.date
/** transforms a TimeSpan to its long value as millis */
implicit def timeSpanToLong(in: TimeSpan): Long = in.millis
/**
* The TimeSpan class represents an amount of time.
* It can be translated to a date with the date method. In that case, the number of millis seconds will be used to create a Date
* object starting from the Epoch time (see the documentation for java.util.Date)
*/
class TimeSpan(val millis: Long) {
/** @return a Date as the amount of time represented by the TimeSpan after the Epoch date */
def date = new Date(millis)
/** @return a Date as the amount of time represented by the TimeSpan after now */
def later = TimeSpan(millis + outer.millis).date
/** @return a Date as the amount of time represented by the TimeSpan before now */
def ago = TimeSpan(outer.millis - millis).date
/** @return a TimeSpan representing the addition of 2 TimeSpans */
def +(in: TimeSpan) = TimeSpan(this.millis + in.millis)
/** @return a TimeSpan representing the substraction of 2 TimeSpans */
def -(in: TimeSpan) = TimeSpan(this.millis - in.millis)
/** override the equals method so that TimeSpans can be compared to long, int and TimeSpan */
override def equals(cmp: Any) = {
cmp match {
case lo: Long => lo == this.millis
case i: Int => i == this.millis
case ti: TimeSpan => ti.millis == this.millis
case _ => false
}
}
/** override the toString method to display a readable amount of time */
override def toString = TimeSpan.format(millis)
}
/**
* The TimeSpan object provides class represents an amount of time.
* It can be translated to a date with the date method. In that case, the number of millis seconds will be used to create a Date
* object starting from the Epoch time (see the documentation for java.util.Date)
*/
object TimeSpan {
/** time units and values used when converting a total number of millis to those units (see the format function) */
val scales = List((1000L, "milli"), (60L, "second"), (60L, "minute"), (24L, "hour"), (7L, "day"), (10000L, "week"))
/** explicit constructor for a TimeSpan */
def apply(in: Long) = new TimeSpan(in)
/**
* Formats a number of millis to a string representing the number of weeks, days, hours, minutes, seconds, millis
*/
def format(millis: Long): String = {
def divideInUnits(millis: Long) = scales.foldLeft[(Long, List[(Long, String)])]((millis, Nil)){ (total, div) =>
(total._1 / div._1, (total._1 % div._1, div._2) :: total._2)
}._2
def formatAmount(amountUnit: (Long, String)) = amountUnit match {
case (amount, unit) if (amount == 1) => amount + " " + unit
case (amount, unit) => amount + " " + unit + "s"
}
divideInUnits(millis).filter(_._1 > 0).map(formatAmount(_)).mkString(", ")
}
}
/** @return the current number of millis: System.currentTimeMillis */
def millis = System.currentTimeMillis
/** @return the number of millis corresponding to 'in' seconds */
def seconds(in: Long): Long = in * 1000L
/** @return the number of millis corresponding to 'in' minutes */
def minutes(in: Long): Long = seconds(in) * 60L
/** @return the number of millis corresponding to 'in' hours */
def hours(in: Long): Long = minutes(in) * 60L
/** @return the number of millis corresponding to 'in' days */
def days(in: Long): Long = hours(in) * 24L
/** @return the number of millis corresponding to 'in' weeks */
def weeks(in: Long): Long = days(in) * 7L
/** implicit def used to add the noTime method to the Date class */
implicit def toDateExtension(d: Date) = new DateExtension(d)
/** This class adds a noTime method the Date class, in order to get at Date object starting at 00:00 */
class DateExtension(date: Date) {
/** @returns a Date object starting at 00:00 from date */
def noTime = {
val calendar = Calendar.getInstance
calendar.set(Calendar.HOUR_OF_DAY, 0)
calendar.set(Calendar.MINUTE, 0)
calendar.set(Calendar.SECOND, 0)
calendar.set(Calendar.MILLISECOND, 0)
calendar.getTime
}
}
/** implicit def used to add the setXXX methods to the Calendar class */
implicit def toCalendarExtension(c: Calendar) = new CalendarExtension(c)
/** This class adds the setXXX methods to the Calendar class. Each setter returns the updated Calendar */
class CalendarExtension(c: Calendar) {
/** set the day of the month (1 based) and return the calendar */
def setDay(d: Int) = { c.set(Calendar.DAY_OF_MONTH, d); c }
/** set the month (0 based) and return the calendar */
def setMonth(m: Int) = { c.set(Calendar.MONTH, m); c }
/** set the year and return the calendar */
def setYear(y: Int) = { c.set(Calendar.YEAR, y); c }
/** set the TimeZone and return the calendar */
def setTimezone(tz: TimeZone) = { c.setTimeZone(tz); c }
/** set the time to 00:00:00.000 and return the calendar */
def noTime = { c.setTime(c.getTime.noTime); c }
}
/** @return the date object for now */
def now = new Date
/** @return the Calendar object for today (the TimeZone is the local TimeZone). Its time is 00:00:00.000 */
def today = Calendar.getInstance.noTime
/** @return the current year */
def currentYear: Int = Calendar.getInstance.get(Calendar.YEAR)
/**
* @deprecated use now instead
* @return the current time as a Date object
*/
def timeNow = new Date
/**
* @deprecated use today instead
* @return the current Day as a Date object
*/
def dayNow: Date = 0.seconds.later.noTime
/** alias for new Date(millis) */
def time(when: Long) = new Date(when)
/** @return the month corresponding to today (0 based, relative to UTC) */
def month(in: Date): Int = {
val cal = Calendar.getInstance(utc)
cal.setTimeInMillis(in.getTime)
cal.get(Calendar.MONTH)
}
/** @return the year corresponding to today (relative to UTC) */
def year(in: Date): Int = {
val cal = Calendar.getInstance(utc)
cal.setTimeInMillis(in.getTime)
cal.get(Calendar.YEAR)
}
/** @return the day of month corresponding to the input date (1 based) */
def day(in: Date): Int = {
val cal = Calendar.getInstance(utc)
cal.setTimeInMillis(in.getTime)
cal.get(Calendar.DAY_OF_MONTH)
}
/** The UTC TimeZone */
val utc = TimeZone.getTimeZone("UTC")
/** @return the number of days since epoch converted from millis */
def millisToDays(millis: Long): Long = millis / (1000L * 60L * 60L * 24L)
/** @return the number of days since epoch */
def daysSinceEpoch: Long = millisToDays(millis)
/** @return the time taken to evaluate f in millis and the result */
def calcTime[T](f: => T): (Long, T) = {
val start = millis
val result = f
(millis - start, result)
}
/**
* Log a message with the time taken in millis to do something and retrun the result
* @return the result
*/
def logTime[T](msg: String)(f: => T): T = {
val (time, ret) = calcTime(f)
Log.info(msg + " took " + time + " Milliseconds")
ret
}
/**
* @return a standard format HH:mm:ss
*/
val hourFormat = new SimpleDateFormat("HH:mm:ss")
/**
* @return the formatted time for a given Date
*/
def hourFormat(in: Date): String = hourFormat.format(in)
/** @return a standard format for the date yyyy/MM/dd */
def dateFormatter = new SimpleDateFormat("yyyy/MM/dd")
/** @return a format for the time which includes the TimeZone: HH:mm zzz*/
def timeFormatter = new SimpleDateFormat("HH:mm zzz")
/** @return today's date formatted as yyyy/MM/dd */
def formattedDateNow = dateFormatter.format(now)
/** @return now's time formatted as HH:mm zzz */
def formattedTimeNow = timeFormatter.format(now)
/** @return a formatter for internet dates including: the day of week, the month, day of month, time and time zone */
def internetDateFormatter = {
val ret = new SimpleDateFormat("EEE, d MMM yyyy HH:mm:ss z", Locale.US)
ret.setTimeZone(utc)
ret
}
/** @return a date from a string using the internet format. Return the Epoch date if the parse is unsuccesfull */
def boxParseInternetDate(dateString: String): Box[Date] = tryo {
internetDateFormatter.parse(dateString)
}
/** @return a date from a string using the internet format. Return the Epoch date if the parse is unsuccesfull */
def parseInternetDate(dateString: String): Date = tryo {
internetDateFormatter.parse(dateString)
} openOr new Date(0L)
/** @return a date formatted with the internet format */
def toInternetDate(in: Date): String = internetDateFormatter.format(in)
/** @return a date formatted with the internet format (from a number of millis) */
def toInternetDate(in: Long): String = internetDateFormatter.format(new Date(in))
/** @return the current time as an internet date */
def nowAsInternetDate: String = toInternetDate(millis)
/** @return a Full(date) or a failure if the input couldn't be translated to date (or Empty if the input is null)*/
def toDate(in: Any): Box[Date] = {
try {
in match {
case null => Empty
case d: Date => Full(d)
case lng: Long => Full(new Date(lng))
case lng: Number => Full(new Date(lng.longValue))
case Nil | Empty | None | Failure(_, _, _) => Empty
case Full(v) => toDate(v)
case Some(v) => toDate(v)
case v :: vs => toDate(v)
case s : String => tryo(internetDateFormatter.parse(s)) or tryo(dateFormatter.parse(s))
case o => toDate(o.toString)
}
} catch {
case e => Log.debug("Error parsing date "+in, e); Failure("Bad date: "+in, Full(e), Empty)
}
}
}
|
beni55/liftweb
|
lift-util/src/main/scala/net/liftweb/util/TimeHelpers.scala
|
Scala
|
apache-2.0
| 12,556 |
package com.plasmaconduit.framework.mvc
import com.plasmaconduit.framework.HttpRequest
trait Middleware[R <: HttpRequest[R]] {
def intercept(next: Controller[R]): Controller[R]
}
object Middleware {
def compose[R <: HttpRequest[R]](action: Controller[R], middleware: Seq[Middleware[R]]): Controller[R] = {
middleware.reverse.foldLeft(action) {(m, n) =>
n.intercept(m)
}
}
}
|
plasmaconduit/plasmaconduit-framework
|
src/main/scala/com/plasmaconduit/framework/mvc/Middleware.scala
|
Scala
|
mit
| 400 |
/*
* Copyright 2011-2018 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.ahc
import org.asynchttpclient.RequestBuilderBase
import org.asynchttpclient.uri.Uri
class AhcRequestBuilder(method: String, disableUrlEncoding: Boolean)
extends RequestBuilderBase[AhcRequestBuilder](method, disableUrlEncoding, false) {
def getUri: Uri = uri
}
|
wiacekm/gatling
|
gatling-http/src/main/scala/io/gatling/http/ahc/AhcRequestBuilder.scala
|
Scala
|
apache-2.0
| 910 |
package org.loklak.crawler
import org.scalatest.{ShouldMatchers, FlatSpec}
/**
* Created by Scott on 6/2/16.
*/
class TwitterSearchFuncTest extends FlatSpec with ShouldMatchers{
"Twitter Crawler" should "be able to perform search on any topic" in{
TwitterSearchFunc.grouped_search("loklak").flatten.length shouldBe > (0)
}
}
|
DengYiping/loklak-scala
|
src/test/scala/org/loklak/crawler/TwitterSearchFuncTest.scala
|
Scala
|
mit
| 339 |
package mesosphere.marathon.event.http
import javax.inject.Inject
import akka.actor._
import akka.pattern.ask
import mesosphere.marathon.api.v2.json.Formats._
import mesosphere.marathon.core.base.Clock
import mesosphere.marathon.event._
import mesosphere.marathon.event.http.HttpEventActor._
import mesosphere.marathon.event.http.SubscribersKeeperActor.GetSubscribers
import mesosphere.marathon.metrics.{ MetricPrefixes, Metrics }
import spray.client.pipelining.{ sendReceive, _ }
import spray.http.{ HttpRequest, HttpResponse }
import spray.httpx.PlayJsonSupport
import scala.concurrent.{ ExecutionContext, Future }
import scala.concurrent.duration._
import scala.util.control.NonFatal
import scala.util.{ Failure, Success, Try }
/**
* This actor subscribes to the event bus and distributes every event to all http callback listener.
* The list of active subscriptions is handled in the subscribersKeeper.
* If a callback handler can not be reached or is slow, an exponential backoff is applied.
*/
object HttpEventActor {
case class NotificationFailed(url: String)
case class NotificationSuccess(url: String)
case class EventNotificationLimit(failedCount: Long, backoffUntil: Option[Deadline]) {
def nextFailed: EventNotificationLimit = {
val next = failedCount + 1
EventNotificationLimit(next, Some(math.pow(2, next.toDouble).seconds.fromNow))
}
def notLimited: Boolean = backoffUntil.fold(true)(_.isOverdue())
def limited: Boolean = !notLimited
}
val NoLimit = EventNotificationLimit(0, None)
private case class Broadcast(event: MarathonEvent, subscribers: EventSubscribers)
class HttpEventActorMetrics @Inject() (metrics: Metrics) {
private val pre = MetricPrefixes.SERVICE
private val clazz = classOf[HttpEventActor]
// the number of requests that are open without response
val outstandingCallbacks = metrics.counter(metrics.name(pre, clazz, "outstanding-callbacks"))
// the number of events that are broadcast
val eventMeter = metrics.meter(metrics.name(pre, clazz, "events"))
// the number of events that are not send to callback listeners due to backoff
val skippedCallbacks = metrics.meter(metrics.name(pre, clazz, "skipped-callbacks"))
// the number of callbacks that have failed during delivery
val failedCallbacks = metrics.meter(metrics.name(pre, clazz, "failed-callbacks"))
// the response time of the callback listeners
val callbackResponseTime = metrics.timer(metrics.name(pre, clazz, "callback-response-time"))
}
}
class HttpEventActor(
conf: HttpEventConfiguration,
subscribersKeeper: ActorRef,
metrics: HttpEventActorMetrics,
clock: Clock)
extends Actor with ActorLogging with PlayJsonSupport {
implicit val timeout = conf.eventRequestTimeout
def pipeline(implicit ec: ExecutionContext): HttpRequest => Future[HttpResponse] = {
addHeader("Accept", "application/json") ~> sendReceive
}
var limiter = Map.empty[String, EventNotificationLimit].withDefaultValue(NoLimit)
def receive: Receive = {
case event: MarathonEvent => resolveSubscribersForEventAndBroadcast(event)
case Broadcast(event, subscribers) => broadcast(event, subscribers)
case NotificationSuccess(url) => limiter += url -> NoLimit
case NotificationFailed(url) => limiter += url -> limiter(url).nextFailed
case _ => log.warning("Message not understood!")
}
def resolveSubscribersForEventAndBroadcast(event: MarathonEvent): Unit = {
metrics.eventMeter.mark()
log.info("POSTing to all endpoints.")
val me = self
import context.dispatcher
(subscribersKeeper ? GetSubscribers).mapTo[EventSubscribers].map { subscribers =>
me ! Broadcast(event, subscribers)
}.onFailure {
case NonFatal(e) => log.error("While trying to resolve subscribers for event {}", event)
}
}
def broadcast(event: MarathonEvent, subscribers: EventSubscribers): Unit = {
val (active, limited) = subscribers.urls.partition(limiter(_).notLimited)
if (limited.nonEmpty) {
log.info(s"""Will not send event ${event.eventType} to unresponsive hosts: ${limited.mkString(" ")}""")
}
//remove all unsubscribed callback listener
limiter = limiter.filterKeys(subscribers.urls).iterator.toMap.withDefaultValue(NoLimit)
metrics.skippedCallbacks.mark(limited.size)
active.foreach(url => Try(post(url, event, self)) match {
case Success(res) =>
case Failure(ex) =>
log.warning(s"Failed to post $event to $url because ${ex.getClass.getSimpleName}: ${ex.getMessage}")
metrics.failedCallbacks.mark()
self ! NotificationFailed(url)
})
}
def post(url: String, event: MarathonEvent, eventActor: ActorRef): Unit = {
log.info("Sending POST to:" + url)
metrics.outstandingCallbacks.inc()
val start = clock.now()
val request = Post(url, eventToJson(event))
val response = pipeline(context.dispatcher)(request)
import context.dispatcher
response.onComplete {
case _ =>
metrics.outstandingCallbacks.dec()
metrics.callbackResponseTime.update(start.until(clock.now()))
}
response.onComplete {
case Success(res) if res.status.isSuccess =>
val inTime = start.until(clock.now()) < conf.slowConsumerDuration
eventActor ! (if (inTime) NotificationSuccess(url) else NotificationFailed(url))
case Success(res) =>
log.warning(s"No success response for post $event to $url")
metrics.failedCallbacks.mark()
eventActor ! NotificationFailed(url)
case Failure(ex) =>
log.warning(s"Failed to post $event to $url because ${ex.getClass.getSimpleName}: ${ex.getMessage}")
metrics.failedCallbacks.mark()
eventActor ! NotificationFailed(url)
}
}
}
|
yp-engineering/marathon
|
src/main/scala/mesosphere/marathon/event/http/HttpEventActor.scala
|
Scala
|
apache-2.0
| 5,783 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.aliyun.emr.examples.sql.streaming
import java.util.UUID
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.streaming.Trigger
object StructuredDatahubSample {
def main(args: Array[String]): Unit = {
if (args.length < 8) {
// scalastyle:off
println(
"""
|Usage: Usage: StructuredDatahubSample <endpoint> <project> <topic> <access key id>
| <access key secret> <zookeeper host:port> <max offset per trigger>
| [checkpoint directory=/tmp/datahub/test/checkpoint]
|
""".stripMargin)
// scalastyle:on
sys.exit(1)
}
val Array(endpoint, project, topic, accessKeyId, accessKeySecret,
zkHosts, maxOffset, triggerInterval, _*) = args
val checkpointDir = if (args.length > 8) {
args(8)
} else {
"/tmp/temporary-" + UUID.randomUUID.toString
}
val spark = SparkSession.builder()
.appName("StructuredDatahubSample")
.getOrCreate()
spark.sparkContext.setLogLevel("WARN")
val value = spark.readStream.format("datahub")
.option("endpoint", endpoint)
.option("project", project)
.option("topic", topic)
.option("access.key.id", accessKeyId)
.option("access.key.secret", accessKeySecret)
.option("max.offset.per.trigger", maxOffset)
.option("zookeeper.connect.address", zkHosts)
.option("decimal.precision", "5")
.option("decimal.scale", "5")
.load()
val query = value.select("*").writeStream.format("console")
.option("checkpointLocation", checkpointDir)
.outputMode("append")
.trigger(Trigger.ProcessingTime(triggerInterval.toLong))
.start()
query.awaitTermination()
}
}
|
aliyun/aliyun-emapreduce-sdk
|
examples/src/main/scala/com/aliyun/emr/examples/sql/streaming/StructuredDatahubSample.scala
|
Scala
|
artistic-2.0
| 2,545 |
package oauthorize.grants
import oauthorize.utils._
import oauthorize.model._
import oauthorize.service._
import oauth2.spec.Req._
import oauth2.spec.AccessTokenErrors._
import oauth2.spec._
import oauth2.spec.model._
import scala.concurrent.Future
import scala.concurrent.ExecutionContext
class ResourceOwnerCredentialsGrant(
val config: Oauth2Config,
val oauthStore: Oauth2Store,
val clientSecretHasher: ClientSecretHasher,
val userStore: UserStore,
val userPasswordHasher: UserPasswordHasher,
val tokens: TokenGenerator) {
def processOwnerCredentialsRequest(
req: OauthRequest,
clientAuth: Option[ClientAuthentication])(implicit ctx: ExecutionContext): Future[Either[Err, AccessTokenResponse]] = {
clientAuth match {
case None => error(unauthorized_client, "unauthorized client", StatusCodes.Unauthorized)
case Some(basicAuth) => oauthStore.getClient(basicAuth.clientId) flatMap {
case None => error(invalid_client, "unregistered client", StatusCodes.Unauthorized)
case Some(client) if (!clientSecretHasher.secretMatches(basicAuth.clientSecret, client.secretInfo)) =>
error(invalid_client, "bad credentials", StatusCodes.Unauthorized)
case Some(client) =>
(req.param(grant_type), req.param(username), req.param(password), req.param(scope)) match {
case (Some(grantType), Some(userName), Some(pwd), Some(authScope)) => {
val rq = ResourceOwnerCredentialsRequest(grantType, userName, pwd, authScope.split(ScopeSeparator))
doProcess(rq, client)
} case _ => error(invalid_request, s"mandatory: $grant_type, $username, $password, $scope")
}
}
}
}
private def doProcess(
rq: ResourceOwnerCredentialsRequest,
oauthClient: Oauth2Client)(implicit ctx: ExecutionContext): Future[Either[Err, AccessTokenResponse]] = {
import oauth2.spec.AccessTokenErrors._
rq.getError(oauthClient) match {
case Some(error) => Future.successful(Left(error))
case None => {
userStore.getUser(UserId(rq.username, None)) match {
case None => error(invalid_request, "no such user", 401)
case Some(usr) if (usr.pwd.map(info => !userPasswordHasher.secretMatches(rq.password, info)).getOrElse(true)) =>
error(invalid_request, "bad user credentials", 401)
case Some(usr) => {
val accessToken = tokens.generateAccessToken(oauthClient, rq.authScope, Some(usr.id))
val refreshToken = if (oauthClient.authorizedGrantTypes.contains(GrantTypes.refresh_token)) {
Some(tokens.generateRefreshToken(oauthClient, rq.authScope, Some(usr.id)))
} else None
oauthStore.storeTokens(AccessAndRefreshTokens(accessToken, refreshToken), oauthClient) map { stored =>
val response = AccessTokenResponse(
stored.accessToken.value,
stored.refreshToken.map(_.value),
TokenType.bearer,
stored.accessToken.validity,
rq.authScope.mkString(ScopeSeparator))
Right(response)
}
}
}
}
}
}
}
|
adaptorel/oauthorize
|
oauthorize-core/src/main/scala/grants/ResourceOwnerCredentialsGrant.scala
|
Scala
|
apache-2.0
| 3,178 |
package net.surguy.slidegame.shared
import scala.collection.mutable.ListBuffer
/**
* Support stepping forwards and backwards through the history.
*/
class TimeTravelActions(observable: Observable) {
private val pastActions = new ListBuffer[Message]()
private val futureActions = new ListBuffer[Message]()
private val recordingObservers: Seq[PartialFunction[Message, Unit]] = List(
{ case msg: MoveActive => pastActions.append(msg); futureActions.clear() },
{ case msg: SetActive => pastActions.append(msg); futureActions.clear() },
{ case Reset(_) => pastActions.clear(); futureActions.clear() },
)
private val ttObserver: Seq[PartialFunction[Message, Unit]] = List(
{ case TimeTravel(true) if pastActions.isEmpty => // Do nothing
case TimeTravel(true) =>
println("Stepping backward")
val actionToReplay = pastActions.remove(pastActions.size-1)
notifyWithoutRecording(invert(actionToReplay))
futureActions.insert(0, actionToReplay)
case TimeTravel(false) if futureActions.isEmpty => // Do nothing
case TimeTravel(false) =>
println("Stepping forward")
val actionToReplay = futureActions.remove(0)
notifyWithoutRecording(actionToReplay)
pastActions.append(actionToReplay)
})
private def notifyWithoutRecording(msg: Message) = {
recordingObservers.foreach(observable.unregisterObserver)
observable.notifyObservers(msg)
recordingObservers.foreach(observable.registerObserver)
}
private def invert(msg: Message): Message = {
msg match {
case MoveActive(Left) => MoveActive(Right)
case MoveActive(Right) => MoveActive(Left)
case MoveActive(Up) => MoveActive(Down)
case MoveActive(Down) => MoveActive(Up)
case SetActive(previousPiece, newPiece) => SetActive(newPiece, previousPiece)
case _ => msg
}
}
ttObserver.foreach(observable.registerObserver)
recordingObservers.foreach(observable.registerObserver)
}
|
inigo/scalajs-slidegame
|
shared/src/main/scala/net/surguy/slidegame/shared/TimeTravelActions.scala
|
Scala
|
gpl-3.0
| 1,991 |
package com.avast.cactus
import org.scalatest.FunSuite
class ConverterMethodsTest extends FunSuite {
test("map") {
case class A(value: Int)
case class B(value: String)
case class C(values: String)
val convAtoB = Converter[A, B] { a =>
B(a.value.toString)
}
val convSeqAtoC: Converter[A, C] = convAtoB.map(b => C(b.value + "42"))
assertResult(Right(C("1842")))(convSeqAtoC.apply("fieldName")(A(18)))
}
test("andThen") {
case class A(value: Int)
case class B(value: String)
case class C(values: String)
val convAtoB = Converter[A, B] { a =>
B(a.value.toString)
}
val convBtoC = Converter[B, C] { b =>
C(b.value + "42")
}
val convAtoC: Converter[A, C] = convAtoB.andThen(convBtoC)
assertResult(Right(C("1842")))(convAtoC.apply("fieldName")(A(18)))
}
test("contraMap") {
case class A(value: Int)
case class B(value: String)
case class AA(value: Double)
val convAtoB = Converter[A, B] { a =>
B(a.value.toString)
}
val convAAtoB: Converter[AA, B] = convAtoB.contraMap(aa => A(aa.value.toInt))
assertResult(Right(B("42")))(convAAtoB.apply("fieldName")(AA(42.3)))
}
test("compose") {
case class A(value: Int)
case class B(value: String)
case class AA(value: Double)
val convAtoB = Converter[A, B] { a =>
B(a.value.toString)
}
val convAAtoA = Converter[AA, A] { aa =>
A(aa.value.toInt)
}
val convAAtoB: Converter[AA, B] = convAtoB.compose(convAAtoA)
assertResult(Right(B("42")))(convAAtoB.apply("fieldName")(AA(42.3)))
}
}
|
avast/cactus
|
common/src/test/scala/com/avast/cactus/ConverterMethodsTest.scala
|
Scala
|
apache-2.0
| 1,607 |
package com.sksamuel.avro4s
import java.io.File
import java.nio.file.Paths
import java.util.UUID
import org.apache.avro.file.{DataFileReader, DataFileWriter}
import org.apache.avro.generic.GenericData.Record
import org.apache.avro.generic.{GenericData, GenericDatumReader, GenericDatumWriter, GenericRecord}
object AvroTest extends App {
Paths.get("")
val s = org.apache.avro.SchemaBuilder
.record("HandshakeRequest").namespace("org.apache.avro.ipc")
.fields()
.name("clientHash").`type`().fixed("MD5").size(16).noDefault()
.name("clientProtocol").`type`().nullable().stringType().noDefault()
.name("meta").`type`().nullable().map().values().bytesType().noDefault()
.endRecord()
println(s)
val schema = new org.apache.avro.Schema.Parser().parse(getClass.getResourceAsStream("/students.avsc"))
val avroFile = new File("students.avro")
// Create a writer to serialize the record
val datumWriter = new GenericDatumWriter[GenericRecord](schema)
val dataFileWriter = new DataFileWriter[GenericRecord](datumWriter)
dataFileWriter.create(schema, avroFile)
val record = new Record(schema)
record.put("id", UUID.randomUUID.toString)
record.put("student_id", UUID.randomUUID.toString)
record.put("university_id", UUID.randomUUID.toString)
val courseRec = new GenericData.Record(schema.getField("course_details").schema())
record.put("course_details", courseRec)
courseRec.put("course_id", UUID.randomUUID.toString)
courseRec.put("enroll_date", "qwewqe")
courseRec.put("verb", "qwewqe")
courseRec.put("result_score", 2.0)
dataFileWriter.append(record)
dataFileWriter.close()
val datumReader = new GenericDatumReader[GenericRecord](schema)
val dataFileReader = new DataFileReader[GenericRecord](avroFile, datumReader)
println("Deserialized data is :")
while (dataFileReader.hasNext) {
val rec = dataFileReader.next(new GenericData.Record(schema))
println(rec)
}
}
|
YuvalItzchakov/avro4s
|
avro4s-core/src/test/scala/com/sksamuel/avro4s/AvroTest.scala
|
Scala
|
mit
| 1,949 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.filter
import org.locationtech.geomesa.filter.Bounds.Bound
/**
* Single typed bound. If filter is unbounded on one or both sides, the associated bound will be None.
*
* For example, bounds for 'foo < 5' would be (None, Some(5))
* Special case for 'foo NOT NULL' will have both bounds be None
*
* @param lower lower bound, if any
* @param upper upper bound, if any
* @tparam T binding of the attribute type
*/
case class Bounds[T](lower: Bound[T], upper: Bound[T]) {
def bounds: (Option[T], Option[T]) = (lower.value, upper.value)
/**
* Bounded on at least one side
*
* @return
*/
def isBounded: Boolean = lower.value.nonEmpty || upper.value.nonEmpty
/**
* Bounded on both sides
*
* @return
*/
def isBoundedBothSides: Boolean = lower.value.nonEmpty && upper.value.nonEmpty
/**
* Covers multiple values
*
* @return
*/
def isRange: Boolean = lower.value.isEmpty || lower.value != upper.value
/**
* Covers a single value
*
* @return
*/
def isEquals: Boolean = !isRange
override def toString: String = {
(if (lower.inclusive) { "[" } else { "(" }) + lower.value.getOrElse("-\u221E") + "," +
upper.value.getOrElse("+\u221E") + (if (upper.inclusive) { "]" } else { ")" })
}
}
object Bounds {
/**
* Single bound (lower or upper).
*
* Bound may be unbounded, in which case value is None. Note by convention unbounded bounds are exclusive
*
* @param value value of this bound, if bounded
* @param inclusive whether the bound is inclusive or exclusive.
* for example, 'foo < 5' is exclusive, 'foo <= 5' is inclusive
*/
case class Bound[T](value: Option[T], inclusive: Boolean) {
def exclusive: Boolean = !inclusive
}
object Bound {
private val unboundedBound = Bound[Any](None, inclusive = false)
def unbounded[T]: Bound[T] = unboundedBound.asInstanceOf[Bound[T]]
}
private val allValues = Bounds(Bound.unbounded, Bound.unbounded)
def everything[T]: Bounds[T] = allValues.asInstanceOf[Bounds[T]]
/**
* Gets the smaller value between two lower bounds, taking into account exclusivity.
* If the bounds are equal, the first bound will always be returned
*
* @param bound1 first bound
* @param bound2 second bound
* @return smaller bound
*/
def smallerLowerBound[T](bound1: Bound[T], bound2: Bound[T]): Bound[T] = {
if (bound1.value.isEmpty) {
bound1
} else if (bound2.value.isEmpty) {
bound2
} else {
val c = bound1.value.get.asInstanceOf[Comparable[Any]].compareTo(bound2.value.get)
if (c < 0 || (c == 0 && (bound1.inclusive || bound2.exclusive))) { bound1 } else { bound2 }
}
}
/**
* Gets the larger value between two upper bounds, taking into account exclusivity.
* If the bounds are equal, the first bound will always be returned
*
* @param bound1 first bound
* @param bound2 second bound
* @return larger bound
*/
def largerUpperBound[T](bound1: Bound[T], bound2: Bound[T]): Bound[T] = {
if (bound1.value.isEmpty) {
bound1
} else if (bound2.value.isEmpty) {
bound2
} else {
val c = bound1.value.get.asInstanceOf[Comparable[Any]].compareTo(bound2.value.get)
if (c > 0 || (c == 0 && (bound1.inclusive || bound2.exclusive))) { bound1 } else { bound2 }
}
}
/**
* Gets the smaller value between two upper bounds, taking into account exclusivity.
* If the bounds are equal, the first bound will always be returned
*
* @param bound1 first bound
* @param bound2 second bound
* @return smaller bound
*/
def smallerUpperBound[T](bound1: Bound[T], bound2: Bound[T]): Bound[T] = {
if (bound2.value.isEmpty) {
bound1
} else if (bound1.value.isEmpty) {
bound2
} else {
val c = bound1.value.get.asInstanceOf[Comparable[Any]].compareTo(bound2.value.get)
if (c < 0 || (c == 0 && (bound2.inclusive || bound1.exclusive))) { bound1 } else { bound2 }
}
}
/**
* Gets the larger value between two upper bounds, taking into account exclusivity.
* If the bounds are equal, the first bound will always be returned
*
* @param bound1 first bound
* @param bound2 second bound
* @return larger bound
*/
def largerLowerBound[T](bound1: Bound[T], bound2: Bound[T]): Bound[T] = {
if (bound2.value.isEmpty) {
bound1
} else if (bound1.value.isEmpty) {
bound2
} else {
val c = bound1.value.get.asInstanceOf[Comparable[Any]].compareTo(bound2.value.get)
if (c > 0 || (c == 0 && (bound2.inclusive || bound1.exclusive))) { bound1 } else { bound2 }
}
}
/**
* Takes the intersection of two bounds. If they are disjoint, will return None.
*
* @param left first bounds
* @param right second bounds
* @tparam T type parameter
* @return intersection
*/
def intersection[T](left: Bounds[T], right: Bounds[T]): Option[Bounds[T]] = {
val lower = largerLowerBound(left.lower, right.lower)
val upper = smallerUpperBound(right.upper, left.upper)
(lower.value, upper.value) match {
case (Some(lo), Some(up)) if lo.asInstanceOf[Comparable[Any]].compareTo(up) > 0 => None
case _ => Some(Bounds(lower, upper))
}
}
/**
* Takes the union of two bound sequences. Naive implementation that just concatenates
*
* @param left first bounds
* @param right second bounds
* @tparam T type parameter
* @return union
*/
def union[T](left: Seq[Bounds[T]], right: Seq[Bounds[T]]): Seq[Bounds[T]] = left ++ right
}
|
ddseapy/geomesa
|
geomesa-filter/src/main/scala/org/locationtech/geomesa/filter/Bounds.scala
|
Scala
|
apache-2.0
| 6,130 |
/**
* (c) Copyright 2012 WibiData, Inc.
*
* See the NOTICE file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kiji.schema.shell.ddl
import org.kiji.schema.shell.DDLException
import org.kiji.schema.shell.Environment
import org.kiji.schema.KConstants
/** Return a modified environment that uses a different Kiji instance name. */
class UseInstanceCommand(val env: Environment, val instance: String) extends DDLCommand {
override def exec(): Environment = {
val instances = env.kijiSystem.listInstances()
// TODO: Eventually eliminate hilarity around instance names.
if (instances.contains(instance)
|| (instance.equals(KConstants.DEFAULT_INSTANCE_NAME)
&& instances.contains("(default)"))) {
echo("Using Kiji instance \"" + instance + "\"")
return env.withInstance(instance)
} else {
throw new DDLException("No such Kiji instance: " + instance)
}
}
}
|
alexandre-normand/kiji-schema-shell
|
src/main/scala/org/kiji/schema/shell/ddl/UseInstanceCommand.scala
|
Scala
|
apache-2.0
| 1,535 |
package pipelines
import scala.reflect.{BeanProperty, ClassTag}
class CKMConf extends Serializable {
@BeanProperty var dataset: String = "imagenet-small"
@BeanProperty var expid: String = "imagenet-small-run"
@BeanProperty var mode: String = "scala"
@BeanProperty var seed: Int = 0
@BeanProperty var layers: Int = 1
/* Mandatory Architechture params must be layers long*/
@BeanProperty var filters: Array[Int] = Array(1)
@BeanProperty var bandwidth : Array[Double] = Array(1.8)
@BeanProperty var pool: Array[Int] = Array(2)
@BeanProperty var patch_sizes: Array[Int] = Array(5)
/* Optional Bells and whistles */
/* Whether the output of this layer should be serialized as float */
@BeanProperty var float: Array[Int] = Array()
/* Whether this convolution layer should be zero padded*/
@BeanProperty var zeroPad: Array[Int] = Array()
/* Whether to use FWHT as opposed to regular matrix multiply */
@BeanProperty var fastfood: Array[Int] = Array()
/* If stride is not provided default stride of 1 will be used */
@BeanProperty var convStride: Map[Int, Int] = Map()
@BeanProperty var preProcess: Boolean = false
/* If stride is not provided default stride of poolSize will be used (for that layer) */
/* TODO: THIS IS IGNORED RIGHT NOW */
@BeanProperty var poolStride: Array[Int] = Array(2)
@BeanProperty var nonLinearity: String = "cosine"
@BeanProperty var loss: String = "WeightedLeastSquares"
@BeanProperty var reg: Double = 0.001
@BeanProperty var numClasses: Int = 1000
@BeanProperty var yarn: Boolean = true
@BeanProperty var solverWeight: Double = 0
@BeanProperty var kernelGamma: Double = 5e-5
@BeanProperty var blockSize: Int = 4000
@BeanProperty var numIters: Int = 1
@BeanProperty var whiten: Array[Int] = Array()
@BeanProperty var whitenerValue: Double = 0.1
@BeanProperty var whitenerOffset: Double = 0.001
@BeanProperty var solve: Boolean = true
@BeanProperty var sampleCov: Boolean = false
@BeanProperty var solver: String = "ls"
@BeanProperty var insanity: Boolean = false
@BeanProperty var saveFeatures: Boolean = false
@BeanProperty var saveModel: Boolean = false
@BeanProperty var checkpointDir: String = "/tmp/spark-checkpoint"
@BeanProperty var augment: Boolean = false
@BeanProperty var augmentPatchSize: Int = 24
@BeanProperty var augmentType: String = "random"
@BeanProperty var filterLoc: String = ""
@BeanProperty var featureDir: String = "/"
@BeanProperty var labelDir: String = "/"
@BeanProperty var modelDir: String = "/tmp"
@BeanProperty var loadWhitener: Boolean = false
@BeanProperty var loadFilters: Boolean = false
@BeanProperty var loadLayer: Boolean = false
@BeanProperty var hashFeatureId: Boolean = false
@BeanProperty var ben: Boolean = false
@BeanProperty var layerToLoad: Int = 0
}
object CKMConf { val LEGACY_CUTOFF: Int = 1250
def genFeatureId(conf: CKMConf, legacy:Boolean = false) = {
/* Any random seed below 1250 is considered legacy mode */
val featureId =
if (legacy) {
conf.seed + "_" +
conf.dataset + "_" +
conf.expid + "_" +
conf.layers + "_" +
conf.patch_sizes.mkString("-") + "_" +
conf.bandwidth.mkString("-") + "_" +
conf.pool.mkString("-") + "_" +
conf.poolStride.mkString("-") + "_" +
conf.filters.mkString("-")
} else {
val fastFood = if (conf.fastfood.filter(_ < conf.layers).size != 0 ) "ff_" + conf.fastfood.filter(_ < conf.layers).mkString("-") + "_" else ""
val augment = if (conf.augment) "Augment_" else ""
val float = if (conf.float.filter(_ < conf.layers).size != 0 ) "ff_" + conf.float.filter(_ < conf.layers).mkString("-") + "_" else ""
val zeroPad = if (conf.zeroPad.filter(_ < conf.layers).size != 0 ) "ff_" + conf.zeroPad.filter(_ < conf.layers).mkString("-") + "_" else ""
val sampleCov = if (conf.sampleCov) "sampleCov_" else ""
val ben = if (conf.ben) "_ben_" else ""
ben +
conf.seed + "_" +
conf.dataset + "_" +
conf.layers + "_" +
sampleCov +
float +
fastFood +
zeroPad +
augment +
conf.nonLinearity + "_" +
conf.patch_sizes.slice(0,conf.layers).mkString("-") + "_" +
conf.convStride.keys.filter(_ < conf.layers).mkString("-") + "_" +
conf.bandwidth.slice(0,conf.layers).mkString("-") + "_" +
conf.pool.slice(0,conf.layers).mkString("-") + "_" +
conf.poolStride.filter(_ < conf.layers).mkString("-") + "_" +
conf.filters.slice(0,conf.layers).mkString("-") + "_" +
conf.whiten.filter(_ < conf.layers).mkString("-") + "_"
}
if(conf.hashFeatureId) {
println("CONF BEN " + conf.ben)
println("CONF BEN " + conf.ben)
println("legacy " + legacy)
println("HASHING FEATURE ID " + featureId)
Math.abs(featureId.hashCode()).toString()
} else {
featureId
}
}
}
|
Vaishaal/ckm
|
keystone_pipeline/src/main/scala/pipelines/CKMConf.scala
|
Scala
|
apache-2.0
| 5,005 |
package com.github.leifh.seglcoverage
trait AnotherService {
def isUnder18AndSwiss(customer : Customer) : Boolean = if(customer.age < 18) true else if(customer.country == Switzerland) true else false
}
|
leifh/segl-coverage
|
src/main/scala/com/github/leifh/seglcoverage/AnotherService.scala
|
Scala
|
apache-2.0
| 205 |
package dao.impl
import javax.inject.Inject
import com.google.inject.Singleton
import dao.{LaboratoryDAO, RoomDAO}
import model.table._
import model._
import org.h2.jdbc.JdbcSQLException
import play.Logger
import play.api.db.slick.DatabaseConfigProvider
import play.api.libs.concurrent.Execution.Implicits._
import services.state.ActionState
import services.state
import slick.driver.JdbcProfile
import scala.concurrent.Future
import scala.util.{Failure, Success}
/**
* Implements DAO operations of Laboratories
*
* @author Camilo Sampedro <[email protected]>
* @param dbConfigProvider DB manager injected.
*/
@Singleton
class LaboratoryDAOImpl @Inject()
(dbConfigProvider: DatabaseConfigProvider, roomDAO: RoomDAO) extends LaboratoryDAO {
/**
* Database configuration
*/
val dbConfig = dbConfigProvider.get[JdbcProfile]
import dbConfig._
import driver.api._
/**
* Table with all laboratories, like select * from laboratory
*/
implicit val laboratories = TableQuery[LaboratoryTable]
implicit val rooms = TableQuery[RoomTable]
implicit val computers = TableQuery[ComputerTable]
implicit val computerStates = TableQuery[ComputerStateTable]
implicit val connectedUsers = TableQuery[ConnectedUserTable]
implicit val computersAndRoomsPentaJoin = {
laboratories joinLeft rooms on {
_.id === _.laboratoryId
} joinLeft computers on { (x, y) => x._2.map(_.id) === y.roomId } joinLeft computerStates on { (x, y) => x._2.map(_.ip) === y.computerIp } joinLeft connectedUsers on { (x, y) => x._2.map(_.computerIp) === y.computerStateComputerIp && x._2.map(_.registeredDate) === y.computerStateRegisteredDate }
}
/**
* Adds a new laboratory to database.
*
* @param laboratory Laboratory to add.
* @return Result string message.
*/
override def add(laboratory: Laboratory): Future[ActionState] = {
Logger.debug(s"""Adding to database: $laboratory""")
db.run(laboratories += laboratory).map(res => state.ActionCompleted).recover {
case ex: Exception =>
Logger.error(s"There was an exception adding the laboratory $laboratory to the database", ex)
state.Failed
}
}
/**
* Removes a laboratory
*
* @param id Laboratory's ID.
* @return Operation result.
*/
override def delete(id: Long): Future[ActionState] = {
db.run(search(id).delete).map {
case 1 => state.ActionCompleted
case 0 => state.NotFound
case error =>
play.Logger.error(s"Error code deleting that laboratory: $error")
state.Failed
}
}
private def search(id: Long) = laboratories.filter(_.id === id)
/**
* List all the laboratories on the database.
*
* @return All the laboratories found.
*/
override def listAll: Future[Seq[Laboratory]] = {
db.run(laboratories.result)
}
/**
* Gets the laboratory with all the rooms an computers associated.
*
* @param id Laboratory's ID.
* @return Found laboratory with all its rooms and computers.
*/
override def getWithChildren(id: Long): Future[Seq[(Laboratory, Option[Room], (Option[Computer], Option[ComputerState], Option[ConnectedUser]))]] = {
db.run {
computersAndRoomsPentaJoin
.filter(_._1._1._1._1.id === id)
.map(x => (x._1._1._1._1, x._1._1._1._2, (x._1._1._2, x._1._2, x._2)))
.result
}
}
/**
* Gets a laboratory by its ID.
*
* @param id Laboratory's ID.
* @return Some found laboratory or None if its not found.
*/
override def get(id: Long): Future[Option[Laboratory]] = {
// Select * from laboratory where id = $id
db.run(search(id).result.headOption)
}
override def update(laboratory: Laboratory): Future[ActionState] = {
db.run {
val foundLaboratory = search(laboratory.id)
foundLaboratory.update(laboratory).asTry
}.map{
case Success(res) if res == 1 =>
play.Logger.info(s"updated with result: $res")
state.ActionCompleted
case Success(_) =>
play.Logger.info("Laboratory not found")
state.NotFound
case Failure(e: JdbcSQLException) =>
play.Logger.error("There was an error looking for that laboratory",e)
state.NotFound
case _ => state.Failed
}
}
}
|
ProjectAton/AtonLab
|
app/dao/impl/LaboratoryDAOImpl.scala
|
Scala
|
gpl-3.0
| 4,289 |
package com.neusoft.spark.examples
import org.apache.spark._
import com.neusoft.spark.examples.Utils._
object ToolGrep {
def main(args: Array[String]) {
val conf = new SparkConf().setAppName("Grep file content")
val sc = new SparkContext(conf)
val options = argOptions(args)
val input = options.remove("input").getOrElse("hdfs:///user/root/data/")
val word = options.remove("word").getOrElse("hello")
sc.textFile(input).
filter( _.contains(word) ).
collect.
foreach( println(_) )
sc.stop()
}
}
|
ruoyousi/spark-examples
|
src/main/scala/com/neusoft/spark/examples/ToolGrep.scala
|
Scala
|
apache-2.0
| 585 |
package io.finch.benchmarks
import com.twitter.finagle.http.Request
import com.twitter.io.Buf
import io.finch._
import org.openjdk.jmh.annotations.{Benchmark, Scope, State}
@State(Scope.Benchmark)
class BodyBenchmark extends FinchBenchmark {
val input = {
val r = Request()
val content = Buf.Utf8("x" * 1024)
r.content = content
r.contentLength = content.length.toLong
Input(r)
}
@Benchmark
def stringOption: Option[String] = bodyOption(input).value.get
@Benchmark
def string: String = body(input).value.get
@Benchmark
def byteArrayOption: Option[Array[Byte]] = binaryBodyOption(input).value.get
@Benchmark
def byteArray: Array[Byte] = binaryBody(input).value.get
}
|
travisbrown/finch
|
benchmarks/src/main/scala/io/finch/benchmarks/BodyBenchmark.scala
|
Scala
|
apache-2.0
| 714 |
/**
* Copyright (c) 2014, MoonGene. All rights reserved.
*
* This source code is licensed under the GPL license found in the
* LICENSE_GPL file in the root directory of this source tree. An alternative
* commercial license is also available upon request.
*/
package controllers
import _root_.models.{AccountSegment, DataAccess, SegmentQuery}
import play.api._
import play.api.data._
import play.api.data.Forms._
import play.api.mvc._
import play.api.libs.json._
import reactivemongo.bson.{BSONObjectID, BSONDocument}
import org.joda.time.{DateTimeZone, DateTime}
import concurrent.{Promise, Future}
import services.{EmailMessage, EmailService}
/*
DataFetcher:
- fetches analytical data from DB for displaying various analytics pages
- support messages sending and processing
*/
object DataFetcher extends Controller with Secured with DataAccess {
def getDayStartTime(date: DateTime) = date.millisOfDay.setCopy(0)
def getDayEndTime(date: DateTime) = date.millisOfDay.setCopy(86399999)
def getDocIDsFromTimeRange(from: DateTime, to: DateTime) = {
val monthIdsSet = collection.mutable.HashSet[String]()
//We always work in months, so we just set this to first day of the month
var currentDate = from.dayOfMonth.setCopy(1)
val lastDate = to.dayOfMonth.setCopy(28) //28 is enough because it will be always before 1 in starting date
while({currentDate.isBefore(to)}) {
monthIdsSet.add(currentDate.getYear + "-" + currentDate.getMonthOfYear)
currentDate = currentDate.plusMonths(1)
}
monthIdsSet
}
def getDocIDForDate(date: DateTime) = date.getYear + "-" + date.getMonthOfYear
def dashboard(appId: String, dateFromMs: Long, dateToMs: Long = 0) = getMonthDocs(appId, dateFromMs, dateToMs)
def getMonthDocs(appId: String, dateFromMs: Long, dateToMs: Long = 0) = IsAuthenticated{ email => request =>
//Check whether this user has access to this app
Async {
accountByEmail(email).map( acc =>
if(acc == None || acc.get.apps.filter(_.stringify == appId).size < 1) {
Forbidden("You are not authorized to view this app details.")
} else {
val dateFrom = getDayStartTime(new DateTime(dateFromMs, DateTimeZone.UTC))
val dateTo = getDayEndTime(if(dateToMs < dateFromMs) DateTime.now.toDateTime(DateTimeZone.UTC) else new DateTime(dateToMs, DateTimeZone.UTC))
val docsIds = getDocIDsFromTimeRange(dateFrom, dateTo)
val appColl = DataFetcherDB.getCollection("gate", appId)
val query = BSONDocument("_id" -> BSONDocument( "$in" -> docsIds ))
val filter = BSONDocument("_id" -> 1, "v" -> 2, "va" -> 3)
val appMonthFutureDocs = appColl.find(query, filter).cursor[BSONDocument].toList()
Async {
appMonthFutureDocs.map( appMonthDocs => {
val jsonStr = DataFetcherDB.json(appMonthDocs)
Ok(jsonStr).as("application/json")
})
}
}
)
}
}
def getMonthDoc(appId: String, date: Long, includeVersion: Boolean, includeFlow: Boolean, includeEcoProfile: Boolean, includeHW: Boolean) = IsAuthenticated{ email => implicit request =>
//Check whether this user has access to this app
Async {
accountByEmail(email).map( acc =>
if(acc == None || acc.get.apps.filter(_.stringify == appId).size < 1) {
Forbidden("You are not authorized to view this app details.")
} else {
val forDate = new DateTime(date, DateTimeZone.UTC)
val docId = getDocIDForDate(forDate)
val appColl = DataFetcherDB.getCollection("gate", appId)
val query = BSONDocument("_id" -> docId)
var incVal = 1
var filter = BSONDocument("_id" -> incVal)
if(includeVersion) { filter = filter.add(BSONDocument("v" -> (incVal + 1), "va" -> (incVal + 2))); incVal += 2 }
if(includeFlow) { filter = filter.add(BSONDocument("vf" -> (incVal + 1), "vfa" -> (incVal + 2))); incVal += 2 }
if(includeEcoProfile) { filter = filter.add(BSONDocument("ecoprof" -> (incVal + 1))); incVal += 1 }
if(includeHW) { filter = filter.add(BSONDocument("hw" -> (incVal + 1))); incVal += 1 }
val appMonthFutureDoc = appColl.find(query, filter).one[BSONDocument]
Async {
appMonthFutureDoc.map( appMonthDoc => {
if(appMonthDoc == None)
Ok(Json.obj("code" -> -1, "message" -> "No data found."))
else
Ok(DataFetcherDB.json(appMonthDoc.get)).as("application/json")
})
}
}
)
}
}
def behaviorAppFlow(appId: String, fromMS: Long, toMS: Long = 0) = getMonthDoc(appId, fromMS, includeVersion = false, includeFlow = true, includeEcoProfile = false, includeHW = true)
def behaviorProfile(appId: String, date: Long) = getMonthDoc(appId, date, includeVersion = true, includeFlow = false, includeEcoProfile = false, includeHW = true)
def geo(appId: String, date: Long) = getMonthDoc(appId, date, includeVersion = true, includeFlow = false, includeEcoProfile = false, includeHW = true)
def ecobalance(appId: String, date: Long) = getMonthDoc(appId, date, includeVersion = true, includeFlow = false, includeEcoProfile = false, includeHW = true)
def ecoprofile(appId: String, date: Long) = getMonthDoc(appId, date, includeVersion = true, includeFlow = false, includeEcoProfile = true, includeHW = true)
def ecotrigger(appId: String, date: Long) = getMonthDoc(appId, date, includeVersion = false, includeFlow = false, includeEcoProfile = true, includeHW = true)
def behaviorFirstSession(appId: String) = IsAuthenticated{ email => implicit request =>
//Check whether this user has access to this app
Async {
accountByEmail(email).map( acc =>
if(acc == None || acc.get.apps.filter(_.stringify == appId).size < 1) {
Forbidden("You are not authorized to view this app details.")
} else {
val appColl = DataFetcherDB.getCollection("gate", appId)
val query = BSONDocument("_id" -> "first_session")
val filter = BSONDocument("_id" -> 1, "v" -> 2)
val firstSessioFutureDoc = appColl.find(query, filter).one[BSONDocument]
Async {
firstSessioFutureDoc.map( firstSessionDoc => {
if(firstSessionDoc == None)
Ok(Json.obj("code" -> -1, "message" -> "No data found."))
else
Ok(DataFetcherDB.json(firstSessionDoc.get)).as("application/json")
})
}
}
)
}
}
val supportMessageSendForm = Form(
tuple(
"name" -> optional(text),
"email" -> optional(text),
"topic" -> text,
"message" -> text
)
)
def messagessend = Action { implicit request =>
supportMessageSendForm.bindFromRequest().fold(
errors => Ok(Json.obj("code" -> -1, "message" -> "Invalid form details..")),
formDetails => {
val cookieEmail = request.cookies.get("login_email")
val cookieToken = request.cookies.get("login_token")
//TODO Load this from a config file
val sysAccEmail = "[email protected]"
//If we are logged in, let's send a message from a user
if (cookieEmail != None && cookieToken != None) {
EmailService.sendEmail(new EmailMessage(sysAccEmail, cookieEmail.get.value, null, null,
formDetails._3 + " From: " + formDetails._2.getOrElse("None"), formDetails._4, null))
} else {
//We are not logged in, let's send a message from Contact Us view, non-registered users
EmailService.sendEmail(new EmailMessage(sysAccEmail, sysAccEmail, null, formDetails._2,
formDetails._3 + " From: " + formDetails._2.getOrElse("None"), formDetails._4, null))
}
Ok(Json.obj("code" -> 0, "msg" -> "Success."))
}
)
}
def segmentFilterValues(appId: String) = IsAuthenticated { email => implicit request => {
//TODO Check if user has access to this app
Async {
getAppUsedValues(appId).map(doc => {
if(doc != None) {
Ok(DataFetcherDB.json(doc.get)).as("application/json")
} else
Ok(Json.obj("code" -> -1, "msg" -> "Can't get used values for this doc."))
})
}
}}
def segmentdelete(name: String) = IsAuthenticated { email => implicit request => {
Async {
accountByEmail(email).map { maybeAcc =>
if(maybeAcc != None) {
val acc = maybeAcc.get
val curSegments = if(acc.segments == None) List[AccountSegment]() else acc.segments.get.filter(_.name != name)
accountUpdateSegments(acc, Some(curSegments))
Ok(Json.obj("code" -> 0))
} else {
Ok(Json.obj("code" -> -1, "msg" -> "Can't get user by email."))
}
}
}
}}
def segmentsave = IsAuthenticated{ email => implicit request => {
Async {
request.body.asJson.map{ json =>
accountByEmail(email).map { maybeAcc =>
if(maybeAcc != None) {
val acc = maybeAcc.get
val saveSegm = AccountSegment(
name = (json \ "name").as[String],
countries = (json \ "countries").as[Option[List[String]]],
cities = (json \ "cities").as[Option[List[Int]]],
platform = (json \ "platform").as[Option[List[Int]]],
platformV = (json \ "platformV").as[Option[List[String]]],
language = (json \ "language").as[Option[List[String]]],
vendor = (json \ "vendor").as[Option[List[String]]],
model = (json \ "model").as[Option[List[String]]],
carrier = (json \ "carrier").as[Option[List[String]]],
connection = (json \ "connection").as[Option[List[String]]],
appversion = (json \ "appversion").as[Option[List[String]]],
usertype = (json \ "usertype").as[Option[List[String]]],
trafficsource = (json \ "trafficsource").as[Option[List[String]]],
resolution = (json \ "resolution").as[Option[List[String]]]
)
val curSegments = if(acc.segments == None) List[AccountSegment](saveSegm) else (acc.segments.get.filter(_.name != saveSegm.name) ++ List[AccountSegment](saveSegm))
accountUpdateSegments(acc, Some(curSegments))
Ok(Json.obj("code" -> 0))
} else {
Ok(Json.obj("code" -> -2, "msg" -> "Can't get user by email."))
}
}
}.getOrElse {
Future(Ok(Json.obj("code" -> -1, "msg" -> "Can't get user by email.")))
}
}
}}
def segment = IsAuthenticated{ email => implicit request => {
Async {
request.body.asJson.map{ json =>
//TODO Check if user has access to this app
val appId = (json \ "appId").as[String]
appById(appId).map(maybeApp => {
//TODO Add a check if this failed and it's NONE
val app = maybeApp.get
var appTimezone = DateTimeZone.UTC
try { appTimezone = DateTimeZone.forID(app.timezone) } catch {
case e: IllegalArgumentException => appTimezone = DateTimeZone.UTC
}
val segmQuery = SegmentQuery(
timezone = appTimezone,
dateFromMsUTC = (json \ "dateFromMs").as[Long],
dateToMsUTC = (json \ "dateToMs").as[Long],
countries = (json \ "countries").asOpt[List[String]],
resolution = (json \ "resolution").asOpt[List[String]],
vendor = (json \ "vendor").asOpt[List[String]],
model = (json \ "model").asOpt[List[String]],
carrier = (json \ "carrier").asOpt[List[String]],
platform = (json \ "platform").asOpt[List[Int]],
platformV = (json \ "platformV").asOpt[List[String]]
)
//TODO we need to make caching here and check if some dates were already calculated before
//Use segmQuery.generateId() to store it's unique combination of values
Async {
DataFetcherDB.getDatabase("gate").command(segmQuery.buildAggregateCommand(appId)).map( res => {
val docs = res.toList
if(docs.size > 0) {
Ok(DataFetcherDB.json(docs(0))) //There should be one doc only anyway
} else {
Ok(Json.obj("code" -> -1, "msg" -> "No aggregated data document has been created."))
}
})
}
})
}.getOrElse {
Future(Ok(Json.obj("code" -> -1, "msg" -> "Can't transform segment request.")))
}
}}
}
def retention = IsAuthenticated{ email => implicit request => {
Async {
request.body.asJson.map{ json =>
//TODO Check if user has access to this app
val appId = (json \ "appId").as[String]
appById(appId).map(maybeApp => {
//TODO Add check if this failed and it's NONE
val app = maybeApp.get
var appTimezone = DateTimeZone.UTC
try { appTimezone = DateTimeZone.forID(app.timezone) } catch {
case e: IllegalArgumentException => appTimezone = DateTimeZone.UTC
}
val segmQuery = SegmentQuery(
timezone = appTimezone,
dateFromMsUTC = (json \ "dateFromMs").as[Long],
dateToMsUTC = (json \ "dateToMs").as[Long],
countries = (json \ "countries").asOpt[List[String]],
resolution = (json \ "resolution").asOpt[List[String]],
vendor = (json \ "vendor").asOpt[List[String]],
model = (json \ "model").asOpt[List[String]],
carrier = (json \ "carrier").asOpt[List[String]],
platform = (json \ "platform").asOpt[List[Int]],
platformV = (json \ "platformV").asOpt[List[String]],
queryFor = "retention"
)
//TODO we need to make caching here and check if some dates were already calculated before
//Use segmQuery.generateId() to store it's unique combination of values
Async {
DataFetcherDB.getDatabase("gate").command(segmQuery.buildAggregateCommand(appId)).map( res => {
val docs = res.toList
if(docs.size > 0) {
Ok(DataFetcherDB.json(docs(0))) //There should be one doc only anyway
} else {
Ok(Json.obj("code" -> -1, "msg" -> "No aggregated data document has been created."))
}
})
}
})
}.getOrElse {
Future(Ok(Json.obj("code" -> -1, "msg" -> "Can't transform the request ids.")))
}
}}
}
}
|
MoonGene/Analytics
|
src/moon/app/controllers/DataFetcher.scala
|
Scala
|
gpl-3.0
| 14,582 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.utils.io
/**
* FileLocator is a trait which is meant to combine aspects of
* - Java's File
* - Hadoop's Path
* - S3 locations (including bucket and key)
* - classpath-relative URLs (classpath://, used in testing)
*
* It provides methods for relative addressing (parent and child locators,
* which are equivalent to the File(parent, child) constructor and the getParentFile method
* on the Java File class), as well as accessing the bytes named by a locator
* by retrieving a ByteAccess value.
*
* We're using implementations of FileLocator to provide a uniform access interface
* to Parquet files, whether they're in HDFS, a local filesystem, S3, or embedded in the
* classpath as part of tests.
*/
trait FileLocator extends Serializable {
def parentLocator(): Option[FileLocator]
def relativeLocator(relativePath: String): FileLocator
def bytes: ByteAccess
}
object FileLocator {
val slashDivided = "^(.*)/([^/]+/?)$".r
def parseSlash(path: String): Option[(String, String)] =
slashDivided.findFirstMatchIn(path) match {
case None => None
case Some(m) => Some(m.group(1), m.group(2))
}
}
|
nfergu/bdg-utils
|
utils-io/src/main/scala/org/bdgenomics/utils/io/FileLocator.scala
|
Scala
|
apache-2.0
| 1,959 |
class M[_]
trait T {
def foo(m: M[_ >: String]) = 42
}
object Test {
def t: T = new T {
def foo(m: M[_ >: Any]) = 0 // Expected: "same type after erasure"
}
def main(args: Array[String]): Unit = {
val m: M[String] = null
t.foo(m) // VerifyError: Duplicate method name&signature
}
}
|
lrytz/scala
|
test/files/neg/t9286c.scala
|
Scala
|
apache-2.0
| 305 |
package io.bartholomews.spotify4s.circe.api
import cats.data.NonEmptySet
import com.github.tomakehurst.wiremock.client.MappingBuilder
import com.github.tomakehurst.wiremock.client.WireMock._
import com.github.tomakehurst.wiremock.stubbing.StubMapping
import io.bartholomews.fsclient.core.oauth.NonRefreshableTokenSigner
import io.bartholomews.iso_country.CountryCodeAlpha2
import io.bartholomews.spotify4s.CirceWordSpec
import io.bartholomews.spotify4s.client.ClientData.{sampleClient, sampleNonRefreshableToken, sampleSpotifyId}
import io.bartholomews.spotify4s.core.entities.TimeInterval.Bar
import io.bartholomews.spotify4s.core.entities.{
AudioFeatures,
AudioKey,
AudioMode,
AudioSection,
Confidence,
FromToken,
IsoCountry,
Modality,
PitchClass,
SpotifyId,
SpotifyUri,
Tempo,
TimeSignature
}
import sttp.client.UriContext
class TracksApiSpec extends CirceWordSpec with ServerBehaviours {
implicit val signer: NonRefreshableTokenSigner = sampleNonRefreshableToken
import io.bartholomews.spotify4s.circe._
"`getAudioAnalysis`" should {
def endpoint: MappingBuilder =
get(urlPathEqualTo(s"$basePath/audio-analysis/${sampleSpotifyId.value}"))
def request = sampleClient.tracks.getAudioAnalysis(sampleSpotifyId)
behave like clientReceivingUnexpectedResponse(endpoint, request)
def stub: StubMapping =
stubFor(
endpoint
.willReturn(
aResponse()
.withStatus(200)
.withBodyFile("tracks/audio_analysis.json")
)
)
"return the correct entity" in matchResponseBody(stub, request) {
case Right(audioAnalysis) =>
audioAnalysis.bars.head should matchTo(
Bar(
start = 0.06443,
duration = 2.44911,
confidence = Confidence(0.057)
)
)
audioAnalysis.sections.head should matchTo(
AudioSection(
start = 0.0,
duration = 23.33163,
confidence = Confidence(1.0),
loudness = -21.61,
tempo = Tempo(
value = 98.015,
confidence = Confidence(0.782)
),
key = AudioKey(value = Some(PitchClass(7)), confidence = Confidence(0.609)),
mode = AudioMode(value = Modality.NoResult, confidence = Confidence(0.6)),
timeSignature = TimeSignature(value = 4, confidence = Confidence(1))
)
)
audioAnalysis.sections.last.mode should matchTo(
AudioMode(value = Modality.Major, confidence = Confidence(0.566))
)
}
}
"`getAudioFeatures` for a track" should {
def endpoint: MappingBuilder =
get(urlPathEqualTo(s"$basePath/audio-features/${sampleSpotifyId.value}"))
def request = sampleClient.tracks.getAudioFeatures(sampleSpotifyId)
behave like clientReceivingUnexpectedResponse(endpoint, request)
def stub: StubMapping =
stubFor(
endpoint
.willReturn(
aResponse()
.withStatus(200)
.withBodyFile("tracks/audio_features.json")
)
)
"return the correct entity" in matchResponseBody(stub, request) {
case Right(audioFeatures) =>
audioFeatures shouldBe AudioFeatures(
durationMs = 255349,
key = PitchClass(5),
mode = Modality.Minor,
timeSignature = 4,
acousticness = Confidence(0.514),
danceability = Confidence(0.735),
energy = Confidence(0.578),
instrumentalness = Confidence(0.0902),
liveness = Confidence(0.159),
loudness = -11.84,
speechiness = Confidence(0.0461),
valence = Confidence(0.636),
tempo = 98.002,
id = SpotifyId("06AKEBrKUckW0KREUWRnvT"),
uri = SpotifyUri("spotify:track:06AKEBrKUckW0KREUWRnvT"),
trackHref = uri"https://api.spotify.com/v1/tracks/06AKEBrKUckW0KREUWRnvT",
analysisUrl = uri"https://api.spotify.com/v1/audio-analysis/06AKEBrKUckW0KREUWRnvT"
)
}
}
"`getAudioFeatures` for several tracks" should {
def endpoint: MappingBuilder =
get(urlPathEqualTo(s"$basePath/audio-features"))
def request = sampleClient.tracks.getAudioFeatures(
NonEmptySet.of(
SpotifyId("3n3Ppam7vgaVa1iaRUc9Lp"),
SpotifyId("3twNvmDtFQtAd5gMKedhLD")
)
)
behave like clientReceivingUnexpectedResponse(endpoint, request)
def stub: StubMapping =
stubFor(
endpoint
.willReturn(
aResponse()
.withStatus(200)
.withBodyFile("tracks/audio_features_list.json")
)
)
"return the correct entity" in matchResponseBody(stub, request) {
case Right(af1 :: af2 :: Nil) =>
af1 should matchTo(
AudioFeatures(
durationMs = 222200,
key = PitchClass(1),
mode = Modality.Major,
timeSignature = 4,
acousticness = Confidence(0.00119),
danceability = Confidence(0.355),
energy = Confidence(0.918),
instrumentalness = Confidence(0),
liveness = Confidence(0.0971),
loudness = -4.36,
speechiness = Confidence(0.0746),
valence = Confidence(0.24),
tempo = 148.114,
id = SpotifyId("3n3Ppam7vgaVa1iaRUc9Lp"),
uri = SpotifyUri("spotify:track:3n3Ppam7vgaVa1iaRUc9Lp"),
trackHref = uri"https://api.spotify.com/v1/tracks/3n3Ppam7vgaVa1iaRUc9Lp",
analysisUrl = uri"https://api.spotify.com/v1/audio-analysis/3n3Ppam7vgaVa1iaRUc9Lp"
)
)
af2 should matchTo(
AudioFeatures(
durationMs = 197280,
key = PitchClass(10),
mode = Modality.Minor,
timeSignature = 4,
acousticness = Confidence(0.0000678),
danceability = Confidence(0.502),
energy = Confidence(0.972),
instrumentalness = Confidence(0.000702),
liveness = Confidence(0.0627),
loudness = -3.96,
speechiness = Confidence(0.0793),
valence = Confidence(0.729),
tempo = 138.019,
id = SpotifyId("3twNvmDtFQtAd5gMKedhLD"),
uri = SpotifyUri("spotify:track:3twNvmDtFQtAd5gMKedhLD"),
trackHref = uri"https://api.spotify.com/v1/tracks/3twNvmDtFQtAd5gMKedhLD",
analysisUrl = uri"https://api.spotify.com/v1/audio-analysis/3twNvmDtFQtAd5gMKedhLD"
)
)
}
}
"`getTracks`" when {
def endpoint: MappingBuilder = get(urlPathEqualTo(s"$basePath/tracks"))
"market is not defined" should {
def request = sampleClient.tracks.getTracks(
ids = NonEmptySet.of(
SpotifyId("3n3Ppam7vgaVa1iaRUc9Lp"),
SpotifyId("3twNvmDtFQtAd5gMKedhLD")
),
market = None
)
val endpointRequest =
endpoint
.withQueryParam(
"ids",
equalTo("3n3Ppam7vgaVa1iaRUc9Lp,3twNvmDtFQtAd5gMKedhLD")
)
behave like clientReceivingUnexpectedResponse(endpointRequest, request)
def stub: StubMapping =
stubFor(
endpointRequest
.willReturn(
aResponse()
.withStatus(200)
.withBodyFile("tracks/tracks.json")
)
)
"return the correct entity" in matchResponseBody(stub, request) {
case Right(List(track1, track2)) =>
track1.uri shouldBe SpotifyUri("spotify:track:3n3Ppam7vgaVa1iaRUc9Lp")
track2.uri shouldBe SpotifyUri("spotify:track:3twNvmDtFQtAd5gMKedhLD")
}
}
"market is defined" should {
def request = sampleClient.tracks.getTracks(
ids = NonEmptySet.of(
SpotifyId("3n3Ppam7vgaVa1iaRUc9Lp"),
SpotifyId("3twNvmDtFQtAd5gMKedhLD")
),
market = Some(IsoCountry(CountryCodeAlpha2.SPAIN))
)
val endpointRequest =
endpoint
.withQueryParam("ids", equalTo("3n3Ppam7vgaVa1iaRUc9Lp,3twNvmDtFQtAd5gMKedhLD"))
.withQueryParam("market", equalTo("ES"))
behave like clientReceivingUnexpectedResponse(endpointRequest, request)
def stub: StubMapping =
stubFor(
endpointRequest
.willReturn(
aResponse()
.withStatus(200)
.withBodyFile("tracks/tracks_es.json")
)
)
"return the correct entity" in matchResponseBody(stub, request) {
case Left(wat) =>
println(wat.getMessage)
succeed
case Right(List(track1, track2)) =>
track1.uri shouldBe SpotifyUri("spotify:track:3n3Ppam7vgaVa1iaRUc9Lp")
track2.uri shouldBe SpotifyUri("spotify:track:3twNvmDtFQtAd5gMKedhLD")
track2.availableMarkets shouldBe List.empty
}
}
}
"`getTrack`" when {
val sampleTrackId: SpotifyId = SpotifyId("3n3Ppam7vgaVa1iaRUc9Lp")
def endpoint(trackId: SpotifyId): MappingBuilder = get(urlPathEqualTo(s"$basePath/tracks/${trackId.value}"))
"market is not defined" should {
def request = sampleClient.tracks.getTrack(
sampleTrackId,
market = None
)
behave like clientReceivingUnexpectedResponse(endpoint(sampleTrackId), request)
def stub: StubMapping =
stubFor(
endpoint(sampleTrackId)
.willReturn(
aResponse()
.withStatus(200)
.withBodyFile("tracks/track.json")
)
)
"return the correct entity" in matchResponseBody(stub, request) {
case Right(track) =>
track.uri shouldBe SpotifyUri("spotify:track:3n3Ppam7vgaVa1iaRUc9Lp")
}
}
"market is defined" should {
def request = sampleClient.tracks.getTrack(
sampleTrackId,
market = Some(IsoCountry(CountryCodeAlpha2.SPAIN))
)
behave like clientReceivingUnexpectedResponse(endpoint(sampleTrackId), request)
def stub: StubMapping =
stubFor(
endpoint(sampleTrackId)
.withQueryParam("market", equalTo("ES"))
.willReturn(
aResponse()
.withStatus(200)
.withBodyFile("tracks/track_es.json")
)
)
"return the correct entity" in matchResponseBody(stub, request) {
case Right(track) =>
track.uri shouldBe SpotifyUri("spotify:track:3n3Ppam7vgaVa1iaRUc9Lp")
}
}
"market is defined as `from_token`" should {
def request = sampleClient.tracks.getTrack(
sampleTrackId,
market = Some(FromToken)
)
behave like clientReceivingUnexpectedResponse(endpoint(sampleTrackId), request)
def stub: StubMapping = {
stubFor(
endpoint(sampleTrackId)
.withQueryParam("market", equalTo("from_token"))
.willReturn(
aResponse()
.withStatus(200)
.withBodyFile("tracks/track_relinking.json")
)
)
}
"return the correct entity" in matchResponseBody(stub, request) {
case Right(track) =>
track.uri shouldBe SpotifyUri("spotify:track:3n3Ppam7vgaVa1iaRUc9Lp")
}
}
}
}
|
bartholomews/spotify-scala-client
|
modules/circe/src/test/scala/io/bartholomews/spotify4s/circe/api/TracksApiSpec.scala
|
Scala
|
mit
| 11,333 |
package ui.pages
import japgolly.scalajs.react.vdom.prefix_<^._
import japgolly.scalajs.react.{BackendScope, ReactComponentB, ReactElement}
import model._
import ui.WorkbenchRef
import ui.widgets._
import ui.widgets.dialogs.ContainerRequestForm
import util.{EventAction, EventCategory, PlatformService}
import util.logger._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
object ImagePage {
case class State(info: Option[ImageInfo] = None,
history: Seq[ImageHistory] = Seq.empty,
error: Option[String] = None,
showCreateDialog: Boolean = false)
case class Props(ref: WorkbenchRef, image: Image)
case class Backend(t: BackendScope[Props, State]) extends ContainerRequestForm.ActionsBackend {
def willMount(): Unit = t.props.ref.client.map { client =>
val result = for {
info <- client.imageInfo(t.props.image.Id)
history <- client.imageHistory(t.props.image.Id)
} yield t.modState(s => s.copy(info = Some(info), history = history))
result.onFailure {
case ex: Exception =>
log.error("ImagePage", s"Unable to get imageInfo for ${t.props.image.id}", ex)
t.modState(s => s.copy(error = Some(s"Unable to connect")))
}
}
def showCreateDialog(): Future[Unit] = Future {
PlatformService.current.sendEvent(EventCategory.Image, EventAction.Show, "CreateDialog")
t.modState(_.copy(showCreateDialog = true))
}
def removeImage(): Future[Unit] = {
PlatformService.current.sendEvent(EventCategory.Image, EventAction.Remove)
t.props.ref.client.get.removeImage(t.props.image).map { info =>
t.props.ref.show(ImagesPage)
}.recoverWith {
case ex: Exception =>
val msg = s"${ex.getMessage}. You can also try to garbage collect unused containers and images."
Future.successful(t.modState(_.copy(error = Some(msg))))
}
}
def containerConfig: ContainerConfig = t.state.info match {
case Some(info) => info.Config
case None => ContainerConfig()
}
override def newContainerCreated(containerId: String) = {
log.info(s"Container created $containerId")
PlatformService.current.sendEvent(EventCategory.Image, EventAction.Start)
t.props.ref.show(ContainerPage(containerId, t.props.ref))
}
}
def apply(image: Image, ref: WorkbenchRef) = new Page {
val id = ImagesPage.id
def component(ref: WorkbenchRef) = {
val props = Props(ref, image)
ImagePageRender.component(props)
}
}
}
object ImagePageRender {
import ui.pages.ImagePage._
val component = ReactComponentB[Props]("ImagePage")
.initialState(State())
.backend(new Backend(_))
.render((P, S, B) => vdom(P, S, B))
.componentWillMount(_.backend.willMount)
.build
def vdom(P: Props, S: State, B: Backend): ReactElement =
<.div(
S.error.map(Alert(_)),
S.info.map(vdomInfo(_, S, P, B))
)
def vdomInfo(imageInfo: ImageInfo, S: State, P: Props, B: Backend) = {
import util.StringUtils._
val imageName = substringBeforeLast(P.image.RepoTags.headOption.getOrElse(""), ":")
val generalInfo = Map(
"Id" -> P.image.id,
"Name" -> imageName,
"Tags" -> P.image.RepoTags.map(substringAfter(_, ":")).mkString(", ")
)
val executionInfo = Map(
"Command" -> imageInfo.Config.cmd.mkString(" "),
"Environment" -> imageInfo.Config.env.mkString(" "),
"WorkingDir" -> imageInfo.Config.WorkingDir
)
val extraInfo = Map(
"Container exposed ports" -> imageInfo.Config.exposedPorts.keySet.mkString(", "),
"Author" -> imageInfo.Author,
"OS" -> imageInfo.Os,
"Created" -> P.image.created
)
<.div(
InfoCard(generalInfo, InfoCard.SMALL, None, Seq.empty, vdomCommands(S, B)),
InfoCard(executionInfo),
InfoCard(extraInfo),
vdomHistory(S.history),
S.showCreateDialog ?= ContainerRequestForm(B, P.image, B.containerConfig, P.ref)
)
}
def vdomCommands(state: State, B: Backend) =
Some(<.div(^.className := "panel-footer",
<.div(^.className := "btn-group btn-group-justified",
<.div(^.className := "btn-group", Button("Deploy", "glyphicon-play", "Create container using this image")(B.showCreateDialog)),
<.div(^.className := "btn-group", Button("Remove", "glyphicon-trash")(B.removeImage))
)
)
)
def vdomHistory(history: Seq[ImageHistory]): ReactElement = {
val values = history.map(row => Map( "Created By" -> row.CreatedBy, "Id" -> row.id, "Size" -> row.size, "Created" -> row.created))
<.div(^.className := "container col-sm-12",
<.div(^.className := "panel panel-default bootcards-summary",
<.div(^.className := "panel-heading clearfix",
<.h3(^.className := "panel-title pull-left",
<.i(^.className := "fa fa-history"), " Creation History"
)
),
TableCard(values, "Created" -> "col-sm-2", "Size" -> "col-sm-2")
)
)
}
}
|
felixgborrego/simple-docker-ui
|
src/main/scala/ui/pages/ImagePage.scala
|
Scala
|
mit
| 5,242 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.observers.buffers
import monix.execution.internal.Platform
import monix.reactive.observers.Subscriber
import scala.collection.mutable.ListBuffer
/** A `BufferedSubscriber` implementation for the
* [[monix.reactive.OverflowStrategy.BackPressure BackPressured]]
* buffer overflowStrategy that sends events in bundles.
*/
private[monix] final class BatchedBufferedSubscriber[A] private
(out: Subscriber[List[A]], _bufferSize: Int)
extends AbstractBackPressuredBufferedSubscriber[A, ListBuffer[A]](
subscriberBufferToList(out), _bufferSize) { self =>
@volatile protected var p50, p51, p52, p53, p54, p55, p56, p57 = 5
@volatile protected var q50, q51, q52, q53, q54, q55, q56, q57 = 5
override protected def fetchSize(r: ListBuffer[A]): Int =
r.length
override protected def fetchNext(): ListBuffer[A] = {
val batchSize = Platform.recommendedBatchSize
val buffer = ListBuffer.empty[A]
queue.drain(buffer, batchSize)
if (buffer.nonEmpty) buffer else null
}
}
private[monix] object BatchedBufferedSubscriber {
/** Builder for [[BatchedBufferedSubscriber]] */
def apply[A](underlying: Subscriber[List[A]], bufferSize: Int): BatchedBufferedSubscriber[A] =
new BatchedBufferedSubscriber[A](underlying, bufferSize)
}
|
ddworak/monix
|
monix-reactive/jvm/src/main/scala/monix/reactive/observers/buffers/BatchedBufferedSubscriber.scala
|
Scala
|
apache-2.0
| 1,962 |
package gitbucket.core.model
trait IssueLabelComponent extends TemplateComponent { self: Profile =>
import profile.simple._
lazy val IssueLabels = TableQuery[IssueLabels]
class IssueLabels(tag: Tag) extends Table[IssueLabel](tag, "ISSUE_LABEL") with IssueTemplate with LabelTemplate {
def * = (userName, repositoryName, issueId, labelId) <> (IssueLabel.tupled, IssueLabel.unapply)
def byPrimaryKey(owner: String, repository: String, issueId: Int, labelId: Int) =
byIssue(owner, repository, issueId) && (this.labelId === labelId.bind)
}
}
case class IssueLabel(
userName: String,
repositoryName: String,
issueId: Int,
labelId: Int
)
|
intermezzo-fr/gitbucket
|
src/main/scala/gitbucket/core/model/IssueLabels.scala
|
Scala
|
apache-2.0
| 666 |
package leo.modules.proofCalculi
import leo.datastructures._
import leo.datastructures.term.Term
import leo.modules.output.Output
trait TermComparison {
type Substitute = (Subst,Seq[Type])
/**
*
* @param t - First term to unify
* @param s - Second term to unify
* @param n - Offset for new implicit Bindings (n+1 will be the next binding)
* @return None, if not unifiable, Some(sub, tys) with s substitution s.t. sub[s] = sub[t] and tys the new additional implicit bindings.
*/
def equals(t : Term, s : Term, n : Int) : Option[Substitute]
}
/**
* Tests solely for equality
*/
object IdComparison extends TermComparison{
override def equals(t : Term, s : Term, n : Int) : Option[Substitute] = if (s == t) Some((Subst.id, Nil)) else None
}
trait ParamodStep extends Output{
/**
* Executes a step of the Paramodulation.
*
* @param c - First clause
* @param d - Second clause
* @param lc - Term in first clause
* @param ld - Literal in second clause (not contained)
* @param s - Substitution of the paramodulation
* @return new generated clause
*/
def exec(c : Clause, d : Clause, lc : Term, ld : Literal, s :TermComparison#Substitute) : Clause
def find(c1: Clause, c2: Clause, comp: TermComparison): Option[(Term, Literal, TermComparison#Substitute)]
}
object PropParamodulation extends ParamodStep{
/**
*
* Executes Propositional Resolution in Superposition
*
*
* C[l'] D \\/ [l] = \\alpha s(l') = s(l)
* --------------------------------------------
* (C[\\alpha] \\/ D) s
*
* @param c - First Clause
* @param d - Second Clause
* @param lc - Term to be replaced in first clause
* @param ld - Literal of form [l] = \\alpha, NOT CONTAINED IN d
* @param s - s(lc) = s(ld.term) according to comparrison
* @return
*/
override def exec(c: Clause, d: Clause, lc: Term, ld: Literal, s: TermComparison#Substitute): Clause = {
val alpha: Term = if (ld.polarity) LitTrue else LitFalse
val cSub = c.replace(lc, alpha)
val merged = cSub.merge(d)
// leo.Out.severe("What: "+lc.pretty)
// leo.Out.severe("By: "+alpha.pretty)
val res = Clause.mkClause(merged.substitute(s._1).lits, s._2 ++ merged.implicitBindings, Derived)
return TrivRule.triv(TrivRule.teqf(Simp(res)))
}
/**
* TODO: Use Term comparison. Currently simple equality is used.
*
* @param c1 - First clause
* @param c2 - Second clause
* @param comp - comparison object, if two terms are unifiable
* @return (t,l,s), where t is the selected first term, l is the literal and s is a substitiontion, that makes both equal.
*/
override def find(c1: Clause, c2: Clause, comp: TermComparison): Option[(Term, Literal, TermComparison#Substitute)] = {
if(c1.lits.isEmpty || c2.lits.isEmpty) return None
val lits = c2.lits.iterator
while (lits.hasNext) {
val lit = lits.next()
val t = lit.term
if (c1.lits.exists { l => (l.term.occurrences.keys.toSet).contains(t)})
return Some(t, lit, (Subst.id, Nil))
}
return None
}
override def output: String = "Paramod-Propositional"
}
object Paramodulation extends ParamodStep{
private def decomp(l: Literal): Option[(Term, Term)] = l.term match {
case ===(t1,t2) => Some(t1,t2)
case _ => None
}
/**
*
* Executes Propositional Resolution in Superposition
*
*
* C[l'] D \\/ [l = r] = T s(l') = s(l)
* --------------------------------------------
* (C[r] \\/ D) s
*
* @param c - First Clause
* @param d - Second Clause
* @param lc - Term to be replaced in first clause
* @param ld - Literal in the form [l = r] = T, NOT CONTAINED IN d
* @param s - s(lc) = s(ld.term) according to comparrison
* @return
*/
override def exec(c: Clause, d: Clause, lc: Term, ld: Literal, s: TermComparison#Substitute): Clause = {
val (l,r) = decomp(ld).get
val cSub = c.replace(lc, r)
val merged = cSub.merge(d)
// leo.Out.severe("What: "+lc.pretty)
// leo.Out.severe("By: "+alpha.pretty)
val res = Clause.mkClause(merged.substitute(s._1).lits, s._2 ++ merged.implicitBindings, Derived)
return TrivRule.triv(TrivRule.teqf(Simp(res)))
}
/**
* TODO: Use Term comparison. Currently simple equality is used.
*
* @param c1 - First clause
* @param c2 - Second clause
* @param comp - comparison object, if two terms are unifiable
* @return (t,l,s), where t is the selected first term, l is the literal and s is a substitiontion, that makes both equal.
*/
override def find(c1: Clause, c2: Clause, comp: TermComparison): Option[(Term, Literal, TermComparison#Substitute)] = {
if(c1.lits.isEmpty || c2.lits.isEmpty) return None
val lits = c2.lits.iterator
while (lits.hasNext) {
val lit = lits.next()
decomp(lit) match {
case Some((l,r)) if lit.polarity =>
if (c1.lits.exists { lt => (lt.term.occurrences.keys.toSet).contains(l)})
return Some(l, lit, (Subst.id, Nil))
case _ =>
}
}
return None
}
override def output: String = "Paramod-Full"
}
// TODO: Optimize
object Simp {
def apply (c : Clause) : Clause = {
import leo.modules.normalization.Simplification
val litNorm = Simplification.normalize(c).mapLit(flipNeg)
// Remove unnused Quantifiers.
val looseBounds : Set[Int] = litNorm.map(_.term.looseBounds).toSet.flatten
val implicitQuan : Seq[Type] = c.implicitBindings
val misBound = looseBounds.diff(Range.apply(1,implicitQuan.size).toSet)
val liftLits = litNorm.map(_.termMap(_.closure(liftMissingBound(misBound, implicitQuan.size)).betaNormalize))
return Clause.mkClause(liftLits, removeBounds(implicitQuan, misBound, implicitQuan.length), Derived)
}
private def flipNeg(l : Literal) : Literal = l.term match {
case Not(f) => l.flipPolarity.termMap(_ => f)
case _ => l
}
/*
* Returns subsitution and positions of implicitQuan to delete
*/
private def liftMissingBound(m : Set[Int], maxBind : Int) : Subst = {
var pos : Int = 1
var free : Int = 1
var s = Subst.id
while(pos <= maxBind) {
s = s.cons(BoundFront(free)) // If it is not contained, it will never substitute this value
if(m.contains(pos)) free += 1
}
s
}
private def removeBounds(b : Seq[Type], m : Set[Int], pos : Int) : Seq[Type] = b match {
case Seq() => Seq()
case x +: xs if m.contains(pos) => removeBounds(xs, m, pos-1)
case x +: xs => x +: removeBounds(xs, m, pos-1)
}
}
|
cbenzmueller/LeoPARD
|
src/main/scala/leo/modules/proofCalculi/CalculusTest.scala
|
Scala
|
bsd-3-clause
| 6,751 |
package com.lucidchart.open.cashy.models
import javax.inject.Inject
import play.api.Play.current
import play.api.db.Database
import com.lucidchart.open.relate.interp._
import com.lucidchart.open.relate.SqlResult
case class User(
id: Long,
googleId: String,
email: String
)
object UserModel extends UserModel(play.api.Play.current.injector.instanceOf[Database])
class UserModel @Inject() (db: Database) {
private val userParser = { row: SqlResult =>
User(
row.long("id"),
row.string("google_id"),
row.string("email")
)
}
def findById(userId: Long): Option[User] = {
db.withConnection { implicit connection =>
sql"""SELECT `id`, `google_id`, `email`
FROM `users`
WHERE `id` = $userId""".asSingleOption(userParser)
}
}
def findByIds(userIds: List[Long]): List[User] = {
db.withConnection { implicit connection =>
sql"""SELECT `id`, `google_id`, `email`
FROM `users`
WHERE `id` IN ($userIds)""".asList(userParser)
}
}
def findByGoogleId(googleId: String): Option[User] = {
db.withConnection { implicit connection =>
sql"""SELECT `id`, `google_id`, `email`
FROM `users`
WHERE `google_id` = $googleId""".asSingleOption(userParser)
}
}
def createUser(googleId: String, email: String): User = {
db.withConnection { implicit connection =>
sql"""INSERT INTO `users`
(`google_id`, `email`)
VALUES ($googleId, $email)""".execute()
findByGoogleId(googleId).getOrElse {
throw new Exception("Mysql insert failed")
}
}
}
}
|
lucidsoftware/cashy
|
app/com/lucidchart/open/cashy/models/UserModel.scala
|
Scala
|
apache-2.0
| 1,609 |
package com.wordnik.client.model
import com.wordnik.client.model.ShowcaseDatatypePrimitives
import com.wordnik.client.model.Category
import java.util.Date
import com.wordnik.client.model.ShowcaseDatatypeDate
import com.wordnik.client.model.ShowcaseDatatypeMath
import com.wordnik.client.model.Location
case class User (
id: Long,
name: String,
firstName: String,
state: String,
photo: List[String],
categories: List[Category],
locations: List[Location],
primitives: ShowcaseDatatypePrimitives,
math: ShowcaseDatatypeMath,
date: ShowcaseDatatypeDate,
createTimestamp: Date,
modifyTimestamp: Date
)
|
jfiala/swagger-spring-demo
|
user-rest-service-1.0.2/generated-code/scalatra/src/main/scala/com/wordnik/client/model/User.scala
|
Scala
|
apache-2.0
| 628 |
package edu.umass.ciir.kbbridge
import data.{ScoredWikipediaEntity, EntityMention}
import text2kb.KnowledgeBaseCandidateGenerator
import util.{ConfInfo, KbBridgeProperties}
/**
* User: jdalton
* Date: 6/12/13
*/
object SimpleEntityLinker {
lazy val candidateGenerator = KnowledgeBaseCandidateGenerator()
val reranker = new RankLibReranker(KbBridgeProperties.rankerModelFile)
def rankKbs(mention: EntityMention, numberOfCandidates: Int = ConfInfo.maxCandidates) : Seq[ScoredWikipediaEntity] = {
println("Fetching candidates for mention: " + mention.mentionId + " d:" + mention.docId + " name:" + mention.entityName)
val candidates = candidateGenerator.retrieveCandidates(mention, numberOfCandidates)
val rerankedResults = reranker.rerankCandidatesGenerateFeatures(mention, candidates)
rerankedResults
}
}
|
daltonj/KbBridge
|
src/main/scala/edu/umass/ciir/kbbridge/SimpleEntityLinker.scala
|
Scala
|
apache-2.0
| 836 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.ann
import java.util.Random
import breeze.linalg.{sum => Bsum, DenseMatrix => BDM, DenseVector => BDV}
import breeze.numerics.{log => brzlog}
/**
* Trait for loss function
*/
private[ann] trait LossFunction {
/**
* Returns the value of loss function.
* Computes loss based on target and output.
* Writes delta (error) to delta in place.
* Delta is allocated based on the outputSize
* of model implementation.
*
* @param output actual output
* @param target target output
* @param delta delta (updated in place)
* @return loss
*/
def loss(output: BDM[Double], target: BDM[Double], delta: BDM[Double]): Double
}
private[ann] class SigmoidLayerWithSquaredError extends Layer {
override val weightSize = 0
override val inPlace = true
override def getOutputSize(inputSize: Int): Int = inputSize
override def createModel(weights: BDV[Double]): LayerModel =
new SigmoidLayerModelWithSquaredError()
override def initModel(weights: BDV[Double], random: Random): LayerModel =
new SigmoidLayerModelWithSquaredError()
}
private[ann] class SigmoidLayerModelWithSquaredError
extends FunctionalLayerModel(new FunctionalLayer(new SigmoidFunction)) with LossFunction {
override def loss(output: BDM[Double], target: BDM[Double], delta: BDM[Double]): Double = {
ApplyInPlace(output, target, delta, (o: Double, t: Double) => o - t)
val error = Bsum(delta :* delta) / 2 / output.cols
ApplyInPlace(delta, output, delta, (x: Double, o: Double) => x * (o - o * o))
error
}
}
private[ann] class SoftmaxLayerWithCrossEntropyLoss extends Layer {
override val weightSize = 0
override val inPlace = true
override def getOutputSize(inputSize: Int): Int = inputSize
override def createModel(weights: BDV[Double]): LayerModel =
new SoftmaxLayerModelWithCrossEntropyLoss()
override def initModel(weights: BDV[Double], random: Random): LayerModel =
new SoftmaxLayerModelWithCrossEntropyLoss()
}
private[ann] class SoftmaxLayerModelWithCrossEntropyLoss extends LayerModel with LossFunction {
// loss layer models do not have weights
val weights = new BDV[Double](0)
override def eval(data: BDM[Double], output: BDM[Double]): Unit = {
var j = 0
// find max value to make sure later that exponent is computable
while (j < data.cols) {
var i = 0
var max = Double.MinValue
while (i < data.rows) {
if (data(i, j) > max) {
max = data(i, j)
}
i += 1
}
var sum = 0.0
i = 0
while (i < data.rows) {
val res = math.exp(data(i, j) - max)
output(i, j) = res
sum += res
i += 1
}
i = 0
while (i < data.rows) {
output(i, j) /= sum
i += 1
}
j += 1
}
}
override def computePrevDelta(
nextDelta: BDM[Double],
input: BDM[Double],
delta: BDM[Double]): Unit = {
/* loss layer model computes delta in loss function */
}
override def grad(delta: BDM[Double], input: BDM[Double], cumGrad: BDV[Double]): Unit = {
/* loss layer model does not have weights */
}
override def loss(output: BDM[Double], target: BDM[Double], delta: BDM[Double]): Double = {
ApplyInPlace(output, target, delta, (o: Double, t: Double) => o - t)
-Bsum( target :* brzlog(output)) / output.cols
}
}
|
Panos-Bletsos/spark-cost-model-optimizer
|
mllib/src/main/scala/org/apache/spark/ml/ann/LossFunction.scala
|
Scala
|
apache-2.0
| 4,170 |
/* Copyright 2009-2016 EPFL, Lausanne */
import leon.annotation.extern
object DefaultParameter {
def foo(x: Int = 1) = x
def bar = foo() + foo(2) == 3
def _main() = if (bar) 0 else 1
@extern
def main(args: Array[String]): Unit = _main()
}
|
regb/leon
|
src/test/resources/regression/genc/valid/DefaultParameter.scala
|
Scala
|
gpl-3.0
| 257 |
package com.twitter.finagle.service
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
import com.twitter.finagle.{NotServableException, Service}
import org.mockito.Mockito.{times, verify, when}
import org.mockito.Matchers._
import com.twitter.util.{Await, Future}
@RunWith(classOf[JUnitRunner])
class OptionallyServableFilterTest extends FunSuite with MockitoSugar {
class OptionnallyServableFilterHelper {
val underlying = mock[Service[String, String]]
when(underlying.close(any)) thenReturn Future.Done
val fn = mock[String => Future[Boolean]]
val service = new OptionallyServableFilter(fn) andThen underlying
val request = "request"
val response = Future.value("response")
}
test("OptionallyServableFilter should passes through when fn returns true") {
val h = new OptionnallyServableFilterHelper
import h._
when(fn.apply(request)) thenReturn Future.value(true)
when(underlying(request)) thenReturn response
assert(Await.result(service(request)) === Await.result(response))
verify(fn).apply(request)
}
test("OptionallyServableFilter should throws NotServableException when fn returns false") {
val h = new OptionnallyServableFilterHelper
import h._
when(fn.apply(request)) thenReturn Future.value(false)
intercept[NotServableException] {
Await.result(service(request))
}
verify(underlying, times(0)).apply(any[String])
verify(fn).apply(request)
}
}
|
thirstycrow/finagle
|
finagle-core/src/test/scala/com/twitter/finagle/service/OptionallyServableFilterTest.scala
|
Scala
|
apache-2.0
| 1,547 |
package scala.slick.compiler
import scala.collection.mutable.{HashSet, HashMap}
import scala.slick.SlickException
import scala.slick.ast._
import Util._
/**
* Remove TableExpansions and TableRefExpansions, and flatten ProductNodes
* into StructNodes and remove unnecessary columns from them.
*/
class RewritePaths extends Phase {
val name = "rewritePaths"
def apply(n: Node, state: CompilationState): Node = {
def flattenToStruct(n: Node): (Node, Vector[(Symbol, Node)]) = n match {
case ProductNode(ch) =>
val chf = ch.map(flattenToStruct)
(ProductNode(chf.map(_._1)), chf.map(_._2).foldLeft[Vector[(Symbol, Node)]](Vector())(_ ++ _))
case n =>
val sym = new AnonSymbol
(Ref(sym), Vector((sym, n)))
}
val flattened = new HashMap[List[Symbol], Node]
val defs = new HashMap[Symbol, Node]
def narrowRef(s: Symbol): Symbol = defs.get(s) match {
case Some(u: Union) => narrowRef(u.leftGen)
case Some(FilteredQuery(gen, _)) => narrowRef(gen)
case _ => s
}
def findFlattened(syms: List[Symbol], base: List[Symbol]): Option[(Node, List[Symbol], List[Symbol])] = syms match {
case Nil => None
case h :: t =>
val nh = narrowRef(h)
defs.get(nh) match {
case Some(j: Join) =>
logger.debug(" found Join for "+nh+" (from "+h+")")
t match {
case (e @ ElementSymbol(1)) :: tt => findFlattened(j.leftGen :: tt, e :: base)
case (e @ ElementSymbol(2)) :: tt => findFlattened(j.rightGen :: tt, e :: base)
case _ => None
}
case Some(g: GroupBy) =>
logger.debug(" found GroupBy for "+nh+" (from "+h+")")
t match {
case (e @ ElementSymbol(1)) :: tt => findFlattened(g.byGen :: tt, e :: base)
case (e @ ElementSymbol(2)) :: tt => findFlattened(g.fromGen :: tt, e :: base)
case _ => None
}
case Some(Path(syms)) =>
logger.debug(" found path for "+nh+" (from "+h+"): "+Path.toString(syms)+", remaining: "+t)
val target = findFlattened(syms.reverse, Nil)
logger.debug(" pointing to "+target)
target.map { case (struct, _, _) => (struct, t, Nil) }
case o =>
logger.debug(" found non-Join/GroupBy for "+nh+" (from "+h+"): "+o)
flattened.get(List(nh)).map(n => (n, t, base))
}
}
/** Remove expansions, flatten structs, and gather defs and flattened structs */
def gather(refO: Option[Symbol], n: Node): Node = removeExpansion(n) match {
case Bind(gen, from, Pure(x)) =>
val from2 = from match {
case Pure(_) =>
val x2 = gather(Some(gen), from).asInstanceOf[Pure].child
val (mapping, repl) = flattenToStruct(x2)
logger.debug("Storing flattened Pure struct as "+Path.toString(List(gen)))
flattened += List(gen) -> mapping
Pure(StructNode(repl))
case n =>
gather(Some(gen), n)
}
logger.debug("Storing def for "+gen+" from Pure Bind")
defs += gen -> from2
val x2 = gather(None, x)
val pure2 = refO match {
case Some(ref) =>
val (mapping, repl) = flattenToStruct(x2)
logger.debug("Storing flattened struct as "+Path.toString(List(ref)))
flattened += List(ref) -> mapping
StructNode(repl)
case None =>
ProductNode(x2.flattenProduct)
}
Bind(gen, from2, Pure(pure2))
case b @ Bind(gen, from, sel) =>
val from2 = gather(Some(gen), from)
logger.debug("Storing def for "+gen+" from non-Pure Bind")
defs += gen -> from2
val sel2 = gather(refO, sel) // the "select" clause inherits our ref
if((from2 eq from) && (sel2 eq sel)) b
else Bind(gen, from2, sel2)
case d: DefNode =>
d.nodeMapScopedChildren { case (symO, ch) =>
val ch2 = gather(symO, ch)
symO.foreach { sym =>
logger.debug("Storing def for "+sym)
defs += sym -> ch2
}
ch2
}
case n =>
n.nodeMapChildren(ch => gather(None, ch))
}
def replaceRefs(n: Node): Node = n match {
case Path(syms) => syms.head match {
case f: FieldSymbol => n // inside a former TableExpansion - no need to go down this path
case _ =>
logger.debug("Trying to replace "+Path.toString(syms))
val rsyms = syms.reverse
findFlattened(rsyms, Nil) match {
case Some(fl @ (struct, rest, base)) =>
logger.debug(" found flattened: "+fl)
findFieldSymbol(struct, rest) match {
case Some(fsym) => Path(fsym :: base ::: rsyms.head :: Nil)
case None => n
}
case _ => n
}
}
case n => n.nodeMapChildren(replaceRefs)
}
val n2 = gather(None, n)
if(n2 ne n) logger.debug("Expansions removed, ProductNodes rewritten to StructNodes", n2)
val n3 = replaceRefs(n2)
if(n3 ne n2) logger.debug("Refs replaced", n3)
n3
}
def findFieldSymbol(n: Node, path: List[Symbol]): Option[Symbol] = (path, n) match {
case (ElementSymbol(idx) :: t, ProductNode(ch)) => findFieldSymbol(ch(idx-1), t)
case (Nil, Ref(sym)) => Some(sym)
case (Nil, _) => None
case _ => throw new SlickException("Illegal "+Path.toString(path)+" into TableExpansion structure "+n)
}
def removeExpansion(n: Node) = n match {
case TableExpansion(gen, t, cols) => Bind(gen, t, Pure(cols))
case TableRefExpansion(_, ref, cols) => cols
case n => n
}
}
/** Assign the AnonSymbols of fields from the left side of a Union to the
* right side. This ensures that both sides are protected when we prune
* unused references pointing to left-side Symbols. */
class RelabelUnions extends Phase {
val name = "relabelUnions"
def apply(n: Node, state: CompilationState) = relabelUnions(n)
def relabelUnions(n: Node): Node = n match {
case u @ Union(BindTarget(Pure(StructNode(ls))), rb @ BindTarget(Pure(StructNode(rs))), _, _, _)
if ls.size == rs.size =>
val rs2 = (ls, rs).zipped.map { case ((s, _), (_, n)) => (s, n) }
u.copy(right = BindTarget.replace(rb, Pure(StructNode(rs2)))).nodeMapChildren(relabelUnions)
case n => n.nodeMapChildren(relabelUnions)
}
object BindTarget {
def unapply(n: Node): Option[Node] = n match {
case Bind(_, _, t) =>
if(t.isInstanceOf[Bind]) unapply(t)
else Some(t)
case _ => None
}
def replace(n: Node, sel: Node): Node = n match {
case b @ Bind(_, _, t) =>
if(t.isInstanceOf[Bind]) b.copy(select = replace(t, sel))
else b.copy(select = sel)
}
}
}
/** Remove unreferenced fields from StructNodes */
class PruneFields extends Phase {
val name = "pruneFields"
def apply(n: Node, state: CompilationState) = prune.repeat(n)
def prune = new Transformer {
val refs = new HashSet[Symbol]()
override def initTree(n: Node) {
super.initTree(n)
refs.clear()
refs ++= n.collect[Symbol] { case Select(_, f: Symbol) => f }
logger.debug("Protecting refs: "+refs)
}
def replace = {
case n @ StructNode(ch) =>
val ch2 = ch.filter { case (sym, n) => refs.contains(sym) }
if(ch2.length == ch.length) n else StructNode(ch2)
}
}
}
|
zefonseca/slick-1.0.0-scala.2.11.1
|
src/main/scala/scala/slick/compiler/RewritePaths.scala
|
Scala
|
bsd-2-clause
| 7,513 |
package edu.duke.oit.vw.models
import edu.duke.oit.vw.utils._
import edu.duke.oit.vw.solr.Vivo
case class Newsfeed(uri:String,
vivoType: String,
label: String,
attributes:Option[Map[String, String]])
extends VivoAttributes(uri, vivoType, label, attributes) with AddToJson
{
override def uris():List[String] = {
uri :: super.uris
}
override def officialDateKey = {
"newsDatetime"
}
}
object Newsfeed extends AttributeParams {
def fromUri(vivo: Vivo, uriContext:Map[String, Any]) = {
val data = vivo.selectFromTemplate("sparql/newsfeeds.ssp", uriContext)
val existingData = data.filter(datum => !datum.isEmpty)
existingData.map(build(_)).asInstanceOf[List[Newsfeed]]
}
def build(newsfeed:Map[Symbol,String]) = {
new Newsfeed(uri = newsfeed('newsfeed).stripBrackets(),
vivoType = newsfeed('type).stripBrackets(),
label = newsfeed('label),
attributes = parseAttributes(newsfeed, List('newsfeed,'type,'label)))
}
}
|
OIT-ADS-Web/vivo_widgets
|
src/main/scala/models/Newsfeed.scala
|
Scala
|
bsd-3-clause
| 1,084 |
package org.template.similar
import io.prediction.controller.PAlgorithm
import io.prediction.controller.Params
import io.prediction.controller.IPersistentModel
import io.prediction.controller.IPersistentModelLoader
import io.prediction.data.storage.BiMap
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import org.apache.spark.mllib.recommendation.ALS
import org.apache.spark.mllib.recommendation.{Rating => MLlibRating}
import org.apache.spark.mllib.linalg.distributed.MatrixEntry
import org.apache.spark.mllib.linalg.distributed.CoordinateMatrix
import grizzled.slf4j.Logger
import scala.collection.mutable.PriorityQueue
case class ALSAlgorithmParams(
val rank: Int,
val numIterations: Int
) extends Params
class ALSModel(
val productFeatures: RDD[(Int, Array[Double])],
val itemStringIntMap: BiMap[String, Int],
val items: Map[Int, Item]
) extends IPersistentModel[ALSAlgorithmParams] with Serializable {
@transient lazy val itemIntStringMap = itemStringIntMap.inverse
def save(id: String, params: ALSAlgorithmParams,
sc: SparkContext): Boolean = {
productFeatures.saveAsObjectFile(s"/tmp/${id}/productFeatures")
sc.parallelize(Seq(itemStringIntMap))
.saveAsObjectFile(s"/tmp/${id}/itemStringIntMap")
sc.parallelize(Seq(items))
.saveAsObjectFile(s"/tmp/${id}/items")
true
}
override def toString = {
s" productFeatures: [${productFeatures.count()}]" +
s"(${productFeatures.take(2).toList}...)" +
s" itemStringIntMap: [${itemStringIntMap.size}]" +
s"(${itemStringIntMap.take(2).toString}...)]" +
s" items: [${items.size}]" +
s"(${items.take(2).toString}...)]"
}
}
object ALSModel
extends IPersistentModelLoader[ALSAlgorithmParams, ALSModel] {
def apply(id: String, params: ALSAlgorithmParams,
sc: Option[SparkContext]) = {
new ALSModel(
productFeatures = sc.get.objectFile(s"/tmp/${id}/productFeatures"),
itemStringIntMap = sc.get
.objectFile[BiMap[String, Int]](s"/tmp/${id}/itemStringIntMap").first,
items = sc.get
.objectFile[Map[Int, Item]](s"/tmp/${id}/items").first)
}
}
/**
* Use ALS to build item x feature matrix
*/
class ALSAlgorithm(val ap: ALSAlgorithmParams)
extends PAlgorithm[PreparedData, ALSModel, Query, PredictedResult] {
@transient lazy val logger = Logger[this.type]
def train(data: PreparedData): ALSModel = {
// create User and item's String ID to integer index BiMap
val userStringIntMap = BiMap.stringInt(data.users.keys)
val itemStringIntMap = BiMap.stringInt(data.items.keys)
// collect Item as Map and convert ID to Int index
val items: Map[Int, Item] = data.items.map { case (id, item) =>
(itemStringIntMap(id), item)
}.collectAsMap.toMap
val itemCount = items.size
val sc = data.viewEvents.context
val mllibRatings = data.viewEvents
.map { r =>
// Convert user and item String IDs to Int index for MLlib
val uindex = userStringIntMap.getOrElse(r.user, -1)
val iindex = itemStringIntMap.getOrElse(r.item, -1)
if (uindex == -1)
logger.info(s"Couldn't convert nonexistent user ID ${r.user}"
+ " to Int index.")
if (iindex == -1)
logger.info(s"Couldn't convert nonexistent item ID ${r.item}"
+ " to Int index.")
((uindex, iindex), 1)
}.filter { case ((u, i), v) =>
// keep events with valid user and item index
(u != -1) && (i != -1)
}.reduceByKey(_ + _) // aggregate all view events of same user-item pair
.map { case ((u, i), v) =>
// MLlibRating requires integer index for user and item
MLlibRating(u, i, v)
}
val m = ALS.trainImplicit(mllibRatings, ap.rank, ap.numIterations)
new ALSModel(
productFeatures = m.productFeatures,
itemStringIntMap = itemStringIntMap,
items = items
)
}
def predict(model: ALSModel, query: Query): PredictedResult = {
// convert items to Int index
val queryList: Set[Int] = query.items.map(model.itemStringIntMap.get(_))
.flatten.toSet
val queryFeatures: Vector[Array[Double]] = queryList.toVector.par
.map { item =>
val qf: Array[Double] = model.productFeatures.lookup(item).head
qf
}.seq
val whiteList: Option[Set[Int]] = query.whiteList.map( set =>
set.map(model.itemStringIntMap.get(_)).flatten
)
val blackList: Option[Set[Int]] = query.blackList.map ( set =>
set.map(model.itemStringIntMap.get(_)).flatten
)
val ord = Ordering.by[(Int, Double), Double](_._2).reverse
val indexScores: Array[(Int, Double)] = if (queryFeatures.isEmpty) {
logger.info(s"No valid items in ${query.items}.")
Array[(Int, Double)]()
} else {
model.productFeatures
.mapValues { f =>
queryFeatures.map{ qf =>
cosine(qf, f)
}.reduce(_ + _)
}
.collect()
}
val filteredScore = indexScores.view.filter { case (i, v) =>
isCandidateItem(
i = i,
items = model.items,
categories = query.categories,
queryList = queryList,
whiteList = whiteList,
blackList = blackList
)
}
val topScores = getTopN(filteredScore, query.num)(ord).toArray
val itemScores = topScores.map { case (i, s) =>
new ItemScore(
item = model.itemIntStringMap(i),
score = s
)
}
new PredictedResult(itemScores)
}
private
def getTopN[T](s: Seq[T], n: Int)(implicit ord: Ordering[T]): Seq[T] = {
val q = PriorityQueue()
for (x <- s) {
if (q.size < n)
q.enqueue(x)
else {
// q is full
if (ord.compare(x, q.head) < 0) {
q.dequeue()
q.enqueue(x)
}
}
}
q.dequeueAll.toSeq.reverse
}
private
def cosine(v1: Array[Double], v2: Array[Double]): Double = {
val size = v1.size
var i = 0
var n1: Double = 0
var n2: Double = 0
var d: Double = 0
while (i < size) {
n1 += v1(i) * v1(i)
n2 += v2(i) * v2(i)
d += v1(i) * v2(i)
i += 1
}
d / (math.sqrt(n1) * math.sqrt(n2))
}
private
def isCandidateItem(
i: Int,
items: Map[Int, Item],
categories: Option[Set[String]],
queryList: Set[Int],
whiteList: Option[Set[Int]],
blackList: Option[Set[Int]]
): Boolean = {
whiteList.map(_.contains(i)).getOrElse(true) &&
blackList.map(!_.contains(i)).getOrElse(true) &&
// discard items in query as well
(!queryList.contains(i)) &&
// filter categories
categories.map { cat =>
items(i).categories.map { itemCat =>
// keep this item if has ovelap categories with the query
!(itemCat.toSet.intersect(cat).isEmpty)
}.getOrElse(false) // discard this item if it has no categories
}.getOrElse(true)
}
}
|
TheDataShed/PredictionIO
|
templates/scala-parallel-similar/src/main/scala/ALSAlgorithm.scala
|
Scala
|
apache-2.0
| 6,945 |
package jgo.tools.compiler
package parser
import scala.util.parsing.input.Reader
import lexer._
import scope._
import interm._
import interm.types._
import stmts._
import funcs._
/**
* An old class used for testing the parser.
* @todo remove this
*/
class BlockLang(in: Reader[Token], res: List[Type] = Nil, resNamed: Boolean = false) extends FuncContext with Statements {
//def, not val. See comment in StackScoped
def initialEnclosing = UniverseScope
def targetFuncType = FuncType(Nil, res)
def hasNamedResults = resNamed
lazy val result = phrase(block)(in)
}
object BlockLang {
import java.io.{File, InputStream, FileInputStream, InputStreamReader}
import scala.collection.immutable.PagedSeq
def apply(in: Reader[Char]): BlockLang = new BlockLang(Scanner(in))
def apply(inStr: String): BlockLang = new BlockLang(Scanner(inStr))
def apply(in: InputStream): BlockLang = new BlockLang(Scanner(in))
def apply(file: File): BlockLang = new BlockLang(Scanner(file))
def from(fileName: String): BlockLang = new BlockLang(Scanner.from(fileName))
}
|
thomasmodeneis/jgo
|
src/src/main/scala/jgo/tools/compiler/parser/BlockLang.scala
|
Scala
|
gpl-3.0
| 1,103 |
package com.datastax.spark.connector.sql
import org.apache.spark.Logging
import org.apache.spark.sql.{DataFrame, SQLContext}
import org.apache.spark.sql.SaveMode._
import org.apache.spark.sql.cassandra.{TableRef, CassandraSourceRelation}
import com.datastax.spark.connector.SparkCassandraITFlatSpecBase
import com.datastax.spark.connector.cql.CassandraConnector
import com.datastax.spark.connector.embedded.SparkTemplate._
import com.datastax.spark.connector.embedded.EmbeddedCassandra._
class CassandraDataSourceSpec extends SparkCassandraITFlatSpecBase with Logging {
useCassandraConfig(Seq("cassandra-default.yaml.template"))
useSparkConf(defaultSparkConf)
val conn = CassandraConnector(defaultConf)
conn.withSessionDo { session =>
session.execute("CREATE KEYSPACE IF NOT EXISTS sql_ds_test WITH REPLICATION = " +
"{ 'class': 'SimpleStrategy', 'replication_factor': 1 }")
session.execute("CREATE TABLE IF NOT EXISTS sql_ds_test.test1 (a INT, b INT, c INT, d INT, e INT, f INT, g INT, " +
"h INT, PRIMARY KEY ((a, b, c), d , e, f))")
session.execute("USE sql_ds_test")
session.execute("INSERT INTO sql_ds_test.test1 (a, b, c, d, e, f, g, h) VALUES (1, 1, 1, 1, 1, 1, 1, 1)")
session.execute("INSERT INTO sql_ds_test.test1 (a, b, c, d, e, f, g, h) VALUES (1, 1, 1, 1, 2, 1, 1, 2)")
session.execute("INSERT INTO sql_ds_test.test1 (a, b, c, d, e, f, g, h) VALUES (1, 1, 1, 2, 1, 1, 2, 1)")
session.execute("INSERT INTO sql_ds_test.test1 (a, b, c, d, e, f, g, h) VALUES (1, 1, 1, 2, 2, 1, 2, 2)")
session.execute("INSERT INTO sql_ds_test.test1 (a, b, c, d, e, f, g, h) VALUES (1, 2, 1, 1, 1, 2, 1, 1)")
session.execute("INSERT INTO sql_ds_test.test1 (a, b, c, d, e, f, g, h) VALUES (1, 2, 1, 1, 2, 2, 1, 2)")
session.execute("INSERT INTO sql_ds_test.test1 (a, b, c, d, e, f, g, h) VALUES (1, 2, 1, 2, 1, 2, 2, 1)")
session.execute("INSERT INTO sql_ds_test.test1 (a, b, c, d, e, f, g, h) VALUES (1, 2, 1, 2, 2, 2, 2, 2)")
}
val sqlContext: SQLContext = new SQLContext(sc)
def pushDown: Boolean = true
override def beforeAll() {
createTempTable("sql_ds_test", "test1", "tmpTable")
}
override def afterAll() {
super.afterAll()
conn.withSessionDo { session =>
session.execute("DROP KEYSPACE sql_ds_test")
}
sqlContext.dropTempTable("tmpTable")
}
def createTempTable(keyspace: String, table: String, tmpTable: String) = {
sqlContext.sql(
s"""
|CREATE TEMPORARY TABLE $tmpTable
|USING org.apache.spark.sql.cassandra
|OPTIONS (
| table "$table",
| keyspace "$keyspace",
| pushdown "$pushDown")
""".stripMargin.replaceAll("\\n", " "))
}
def cassandraTable(tableRef: TableRef) : DataFrame = {
sqlContext.baseRelationToDataFrame(CassandraSourceRelation(tableRef, sqlContext))
}
it should "allow to select all rows" in {
val result = cassandraTable(TableRef("test1", "sql_ds_test")).select("a").collect()
result should have length 8
result.head should have length 1
}
it should "allow to register as a temp table" in {
cassandraTable(TableRef("test1", "sql_ds_test")).registerTempTable("test1")
val temp = sqlContext.sql("SELECT * from test1").select("b").collect()
temp should have length 8
temp.head should have length 1
sqlContext.dropTempTable("test1")
}
it should "allow to insert data into a cassandra table" in {
conn.withSessionDo { session =>
session.execute("CREATE TABLE IF NOT EXISTS sql_ds_test.test_insert (a INT PRIMARY KEY, b INT)")
}
createTempTable("sql_ds_test", "test_insert", "insertTable")
sqlContext.sql("SELECT * FROM insertTable").collect() should have length 0
sqlContext.sql("INSERT OVERWRITE TABLE insertTable SELECT a, b FROM tmpTable")
sqlContext.sql("SELECT * FROM insertTable").collect() should have length 1
sqlContext.dropTempTable("insertTable")
}
it should "allow to save data to a cassandra table" in {
conn.withSessionDo { session =>
session.execute("CREATE TABLE IF NOT EXISTS sql_ds_test.test_insert1 (a INT PRIMARY KEY, b INT)")
}
sqlContext.sql("SELECT a, b from tmpTable")
.write
.format("org.apache.spark.sql.cassandra")
.mode(ErrorIfExists)
.options(Map("table" -> "test_insert1", "keyspace" -> "sql_ds_test"))
.save()
cassandraTable(TableRef("test_insert1", "sql_ds_test")).collect() should have length 1
val message = intercept[UnsupportedOperationException] {
sqlContext.sql("SELECT a, b from tmpTable")
.write
.format("org.apache.spark.sql.cassandra")
.mode(ErrorIfExists)
.options(Map("table" -> "test_insert1", "keyspace" -> "sql_ds_test"))
.save()
}.getMessage
assert(
message.contains("Writing to a non-empty Cassandra Table is not allowed."),
"We should complain that 'Writing to a non-empty Cassandra table is not allowed.'")
}
it should "allow to overwrite a cassandra table" in {
conn.withSessionDo { session =>
session.execute("CREATE TABLE IF NOT EXISTS sql_ds_test.test_insert2 (a INT PRIMARY KEY, b INT)")
session.execute("INSERT INTO sql_ds_test.test_insert2 (a, b) VALUES (3,4)")
session.execute("INSERT INTO sql_ds_test.test_insert2 (a, b) VALUES (5,6)")
}
sqlContext.sql("SELECT a, b from tmpTable")
.write
.format("org.apache.spark.sql.cassandra")
.mode(Overwrite)
.options(Map("table" -> "test_insert2", "keyspace" -> "sql_ds_test"))
.save()
createTempTable("sql_ds_test", "test_insert2", "insertTable2")
sqlContext.sql("SELECT * FROM insertTable2").collect() should have length 1
sqlContext.dropTempTable("insertTable2")
}
it should "allow to filter a table" in {
sqlContext.sql("SELECT a, b FROM tmpTable WHERE a=1 and b=2 and c=1 and e=1").collect() should have length 2
}
it should "allow to filter a table with a function for a column alias" in {
sqlContext.sql("SELECT * FROM (SELECT (a + b + c) AS x, d FROM tmpTable) " +
"AS tmpTable1 WHERE x= 3").collect() should have length 4
}
it should "allow to filter a table with alias" in {
sqlContext.sql("SELECT * FROM (SELECT a AS a1, b AS b1, c AS c1, d AS d1, e AS e1" +
" FROM tmpTable) AS tmpTable1 WHERE a1=1 and b1=2 and c1=1 and e1=1 ").collect() should have length 2
}
}
|
clakech/spark-cassandra-connector
|
spark-cassandra-connector/src/it/scala/com/datastax/spark/connector/sql/CassandraDataSourceSpec.scala
|
Scala
|
apache-2.0
| 6,408 |
package com.datastax.spark.connector.writer
import java.io.IOException
import com.datastax.driver.core.ProtocolVersion
import scala.collection.JavaConversions._
import scala.concurrent.Future
import com.datastax.spark.connector.mapper.{ColumnMapper, DefaultColumnMapper}
import com.datastax.spark.connector.embedded.SparkTemplate._
import com.datastax.spark.connector._
import com.datastax.spark.connector.cql._
import com.datastax.spark.connector.SomeColumns
import com.datastax.spark.connector.types._
case class KeyValue(key: Int, group: Long, value: String)
case class KeyValueWithTransient(key: Int, group: Long, value: String, @transient transientField: String)
case class KeyValueWithTTL(key: Int, group: Long, value: String, ttl: Int)
case class KeyValueWithTimestamp(key: Int, group: Long, value: String, timestamp: Long)
case class KeyValueWithConversion(key: String, group: Int, value: String)
case class ClassWithWeirdProps(devil: String, cat: Int, value: String)
case class Address(street: String, city: String, zip: Int)
class SuperKeyValue(val key: Int, val value: String) extends Serializable
class SubKeyValue(k: Int, v: String, val group: Long) extends SuperKeyValue(k, v)
case class CustomerId(id: String)
object CustomerIdConverter extends TypeConverter[String] {
def targetTypeTag = scala.reflect.runtime.universe.typeTag[String]
def convertPF = { case CustomerId(id) => id }
}
class TableWriterSpec extends SparkCassandraITFlatSpecBase {
useCassandraConfig(Seq("cassandra-default.yaml.template"))
useSparkConf(defaultConf)
val conn = CassandraConnector(defaultConf)
conn.withSessionDo { session =>
createKeyspace(session)
awaitAll(
Future {
session.execute( s"""CREATE TABLE $ks.key_value (key INT, group BIGINT, value TEXT, PRIMARY KEY (key, group))""")
},
Future {
session.execute( s"""CREATE TABLE $ks.nulls (key INT PRIMARY KEY, text_value TEXT, int_value INT)""")
},
Future {
session.execute( s"""CREATE TABLE $ks.collections (key INT PRIMARY KEY, l list<text>, s set<text>, m map<text, text>)""")
},
Future {
session.execute( s"""CREATE TABLE $ks.collections_mod (key INT PRIMARY KEY, lcol list<text>, scol set<text>, mcol map<text, text>)""")
},
Future {
session.execute( s"""CREATE TABLE $ks.blobs (key INT PRIMARY KEY, b blob)""")
},
Future {
session.execute( s"""CREATE TABLE $ks.counters (pkey INT, ckey INT, c1 counter, c2 counter, PRIMARY KEY (pkey, ckey))""")
},
Future {
session.execute( s"""CREATE TABLE $ks.counters2 (pkey INT PRIMARY KEY, c counter)""")
},
Future {
session.execute( s"""CREATE TABLE $ks.\"camelCase\" (\"primaryKey\" INT PRIMARY KEY, \"textValue\" text)""")
},
Future {
session.execute( s"""CREATE TABLE $ks.single_column (pk INT PRIMARY KEY)""")
},
Future {
session.execute( s"""CREATE TABLE $ks.map_tuple (a TEXT, b TEXT, c TEXT, PRIMARY KEY (a))""")
},
Future {
session.execute( s"""CREATE TABLE $ks.unset_test (a TEXT, b TEXT, c TEXT, PRIMARY KEY (a))""")
},
Future {
session.execute( s"""CREATE TYPE $ks.address (street text, city text, zip int)""")
session.execute( s"""CREATE TABLE $ks.udts(key INT PRIMARY KEY, name text, addr frozen<address>)""")
},
Future {
session.execute( s"""CREATE TABLE $ks.tuples (key INT PRIMARY KEY, value frozen<tuple<int, int, varchar>>)""")
},
Future {
session.execute( s"""CREATE TABLE $ks.tuples2 (key INT PRIMARY KEY, value frozen<tuple<int, int, varchar>>)""")
},
Future {
session.execute( s"""CREATE TYPE $ks.address2 (street text, number frozen<tuple<int, int>>)""")
session.execute( s"""CREATE TABLE $ks.nested_tuples (key INT PRIMARY KEY, addr frozen<address2>)""")
})
}
def protocolVersion = conn.withClusterDo(cluster =>
cluster.getConfiguration.getProtocolOptions.getProtocolVersion)
private def verifyKeyValueTable(tableName: String) {
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT * FROM $ks.""" + tableName).all()
result should have size 3
for (row <- result) {
Some(row.getInt(0)) should contain oneOf(1, 2, 3)
Some(row.getLong(1)) should contain oneOf(1, 2, 3)
Some(row.getString(2)) should contain oneOf("value1", "value2", "value3")
}
}
}
"A TableWriter" should "write RDD of tuples to an existing table" in {
conn.withSessionDo(_.execute(s"""TRUNCATE $ks.key_value"""))
val col = Seq((1, 1L, "value1"), (2, 2L, "value2"), (3, 3L, "value3"))
sc.parallelize(col).saveToCassandra(ks, "key_value", SomeColumns("key", "group", "value"))
verifyKeyValueTable("key_value")
}
it should "write RDD of tuples to a new table" in {
val pkey = ColumnDef("key", PartitionKeyColumn, IntType)
val group = ColumnDef("group", ClusteringColumn(0), BigIntType)
val value = ColumnDef("value", RegularColumn, TextType)
val table = TableDef(ks, "new_kv_table", Seq(pkey), Seq(group), Seq(value))
val rows = Seq((1, 1L, "value1"), (2, 2L, "value2"), (3, 3L, "value3"))
sc.parallelize(rows).saveAsCassandraTableEx(table, SomeColumns("key", "group", "value"))
verifyKeyValueTable("new_kv_table")
}
it should "write RDD of tuples applying proper data type conversions" in {
conn.withSessionDo(_.execute(s"""TRUNCATE $ks.key_value"""))
val col = Seq(("1", "1", "value1"), ("2", "2", "value2"), ("3", "3", "value3"))
sc.parallelize(col).saveToCassandra(ks, "key_value")
verifyKeyValueTable("key_value")
}
it should "write RDD of case class objects" in {
conn.withSessionDo(_.execute(s"""TRUNCATE $ks.key_value"""))
val col = Seq(KeyValue(1, 1L, "value1"), KeyValue(2, 2L, "value2"), KeyValue(3, 3L, "value3"))
sc.parallelize(col).saveToCassandra(ks, "key_value")
verifyKeyValueTable("key_value")
}
it should "write RDD of case class objects to a new table using auto mapping" in {
val col = Seq(KeyValue(1, 1L, "value1"), KeyValue(2, 2L, "value2"), KeyValue(3, 3L, "value3"))
sc.parallelize(col).saveAsCassandraTable(ks, "new_kv_table_from_case_class")
verifyKeyValueTable("new_kv_table_from_case_class")
}
it should "write RDD of case class objects applying proper data type conversions" in {
conn.withSessionDo(_.execute(s"""TRUNCATE $ks.key_value"""))
val col = Seq(
KeyValueWithConversion("1", 1, "value1"),
KeyValueWithConversion("2", 2, "value2"),
KeyValueWithConversion("3", 3, "value3")
)
sc.parallelize(col).saveToCassandra(ks, "key_value")
verifyKeyValueTable("key_value")
}
it should "write RDD of CassandraRow objects" in {
conn.withSessionDo(_.execute(s"""TRUNCATE $ks.key_value"""))
val col = Seq(
CassandraRow.fromMap(Map("key" -> 1, "group" -> 1L, "value" -> "value1")),
CassandraRow.fromMap(Map("key" -> 2, "group" -> 2L, "value" -> "value2")),
CassandraRow.fromMap(Map("key" -> 3, "group" -> 3L, "value" -> "value3"))
)
sc.parallelize(col).saveToCassandra(ks, "key_value")
verifyKeyValueTable("key_value")
}
it should "write RDD of CassandraRow objects applying proper data type conversions" in {
conn.withSessionDo(_.execute(s"""TRUNCATE $ks.key_value"""))
val col = Seq(
CassandraRow.fromMap(Map("key" -> "1", "group" -> BigInt(1), "value" -> "value1")),
CassandraRow.fromMap(Map("key" -> "2", "group" -> BigInt(2), "value" -> "value2")),
CassandraRow.fromMap(Map("key" -> "3", "group" -> BigInt(3), "value" -> "value3"))
)
sc.parallelize(col).saveToCassandra(ks, "key_value")
verifyKeyValueTable("key_value")
}
it should "ignore unset inserts" in {
conn.withSessionDo {
_.execute(
s"""INSERT into $ks.unset_test (A, B, C) VALUES ('Original', 'Original', 'Original')""")
}
sc.parallelize(Seq(("Original", Unset, "New"))).saveToCassandra(ks, "unset_test")
val result = sc.cassandraTable[(String, Option[String], Option[String])](ks, "unset_test")
.collect
if (protocolVersion.toInt >= ProtocolVersion.V4.toInt) {
result(0) should be(("Original", Some("Original"), Some("New")))
} else {
result(0) should be(("Original", None, Some("New")))
}
}
it should "ignore CassandraOptions set to UNSET" in {
conn.withSessionDo {
_.execute(
s"""INSERT into $ks.unset_test (A, B, C) VALUES ('Original', 'Original','Original')"""
)
}
sc.parallelize(Seq(("Original", CassandraOption.Unset, "New")))
.saveToCassandra(ks, "unset_test")
val result = sc.cassandraTable[(String, Option[String], Option[String])](ks, "unset_test")
.collect
if (protocolVersion.toInt >= ProtocolVersion.V4.toInt) {
result(0) should be(("Original", Some("Original"), Some("New")))
} else {
result(0) should be(("Original", None, Some("New")))
}
}
it should "delete with Cassandra Options set to Null" in {
conn.withSessionDo {
_.execute(
s"""INSERT into $ks.unset_test (A, B, C) VALUES ('Original', 'Original', 'Original')"""
)
}
sc.parallelize(Seq(("Original", CassandraOption.Null, "New"))).saveToCassandra(ks, "unset_test")
val result = sc.cassandraTable[(String, Option[String], Option[String])](ks, "unset_test")
.collect
result(0) should be(("Original", None, Some("New")))
}
it should "write RDD of tuples to a table with camel case column names" in {
val col = Seq((1, "value1"), (2, "value2"), (3, "value3"))
sc.parallelize(col).saveToCassandra(ks, "camelCase", SomeColumns("primaryKey", "textValue"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT * FROM $ks."camelCase"""").all()
result should have size 3
for (row <- result) {
Some(row.getInt(0)) should contain oneOf(1, 2, 3)
Some(row.getString(1)) should contain oneOf("value1", "value2", "value3")
}
}
}
it should "write empty values" in {
conn.withSessionDo(_.execute(s"""TRUNCATE $ks.key_value"""))
val col = Seq((1, 1L, None))
sc.parallelize(col).saveToCassandra(ks, "key_value", SomeColumns("key", "group", "value"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT * FROM $ks.key_value""").all()
result should have size 1
for (row <- result) {
row.getString(2) should be (null)
}
}
}
it should "write null values" in {
val key = 1.asInstanceOf[AnyRef]
val row = new CassandraRow(IndexedSeq("key", "text_value", "int_value"), IndexedSeq(key, null, null))
sc.parallelize(Seq(row)).saveToCassandra(ks, "nulls")
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT * FROM $ks.nulls""").all()
result should have size 1
for (r <- result) {
r.getInt(0) shouldBe key
r.isNull(1) shouldBe true
r.isNull(2) shouldBe true
}
}
}
it should "write only specific column data if ColumnNames is passed as 'columnNames'" in {
conn.withSessionDo(_.execute(s"""TRUNCATE $ks.key_value"""))
val col = Seq((1, 1L, None))
sc.parallelize(col).saveToCassandra(ks, "key_value", SomeColumns("key", "group"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT * FROM $ks.key_value""").all()
result should have size 1
for (row <- result) {
row.getInt(0) should be (1)
row.getString(2) should be (null)
}
}
}
it should "distinguish (deprecated) implicit `seqToSomeColumns`" in {
conn.withSessionDo(_.execute(s"""TRUNCATE $ks.key_value"""))
val col = Seq((2, 1L, None))
sc.parallelize(col).saveToCassandra(ks, "key_value", SomeColumns("key", "group"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT * FROM $ks.key_value""").all()
result should have size 1
for (row <- result) {
row.getInt(0) should be (2)
row.getString(2) should be (null)
}
}
}
it should "write collections" in {
val col = Seq(
(1, Vector("item1", "item2"), Set("item1", "item2"), Map("key1" -> "value1", "key2" -> "value2")),
(2, Vector.empty[String], Set.empty[String], Map.empty[String, String]))
sc.parallelize(col).saveToCassandra(ks, "collections", SomeColumns("key", "l", "s", "m"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT * FROM $ks.collections""").all()
result should have size 2
val rows = result.groupBy(_.getInt(0)).mapValues(_.head)
val row0 = rows(1)
val row1 = rows(2)
row0.getList("l", classOf[String]).toSeq shouldEqual Seq("item1", "item2")
row0.getSet("s", classOf[String]).toSeq shouldEqual Seq("item1", "item2")
row0.getMap("m", classOf[String], classOf[String]).toMap shouldEqual Map("key1" -> "value1", "key2" -> "value2")
row1.isNull("l") shouldEqual true
row1.isNull("m") shouldEqual true
row1.isNull("s") shouldEqual true
}
}
it should "write blobs" in {
val col = Seq((1, Some(Array[Byte](0, 1, 2, 3))), (2, None))
sc.parallelize(col).saveToCassandra(ks, "blobs", SomeColumns("key", "b"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT * FROM $ks.blobs""").all()
result should have size 2
val rows = result.groupBy(_.getInt(0)).mapValues(_.head)
val row0 = rows(1)
val row1 = rows(2)
row0.getBytes("b").remaining shouldEqual 4
row1.isNull("b") shouldEqual true
}
}
it should "increment and decrement counters" in {
val col1 = Seq((0, 0, 1, 1))
sc.parallelize(col1).saveToCassandra(ks, "counters", SomeColumns("pkey", "ckey", "c1", "c2"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT * FROM $ks.counters""").one()
result.getLong("c1") shouldEqual 1L
result.getLong("c2") shouldEqual 1L
}
val col2 = Seq((0, 0, 1))
sc.parallelize(col1).saveToCassandra(ks, "counters", SomeColumns("pkey", "ckey", "c2"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT * FROM $ks.counters""").one()
result.getLong("c1") shouldEqual 1L
result.getLong("c2") shouldEqual 2L
}
}
it should "increment and decrement counters in batches" in {
val rowCount = 10000
val col = for (i <- 1 to rowCount) yield (i, 1)
sc.parallelize(col).saveToCassandra(ks, "counters2", SomeColumns("pkey", "c"))
sc.cassandraTable(ks, "counters2").cassandraCount() should be(rowCount)
}
it should "write values of user-defined classes" in {
conn.withSessionDo(_.execute(s"""TRUNCATE $ks.key_value"""))
TypeConverter.registerConverter(CustomerIdConverter)
val col = Seq((1, 1L, CustomerId("foo")))
sc.parallelize(col).saveToCassandra(ks, "key_value", SomeColumns("key", "group", "value"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT * FROM $ks.key_value""").all()
result should have size 1
for (row <- result)
row.getString(2) shouldEqual "foo"
}
}
it should "write values of user-defined-types from case classes into Cassandra" in {
val address = Address(city = "Oakland", zip = 90210, street = "Broadway")
val col = Seq((1, "Joe", address))
sc.parallelize(col).saveToCassandra(ks, "udts", SomeColumns("key", "name", "addr"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT key, name, addr FROM $ks.udts""").all()
result should have size 1
for (row <- result) {
row.getInt(0) shouldEqual 1
row.getString(1) shouldEqual "Joe"
row.getUDTValue(2).getString("city") shouldEqual "Oakland"
row.getUDTValue(2).getInt("zip") shouldEqual 90210
}
}
}
it should "write values of user-defined-types in Cassandra" in {
val address = UDTValue.fromMap(Map("city" -> "Warsaw", "zip" -> 10000, "street" -> "Marszałkowska"))
val col = Seq((1, "Joe", address))
sc.parallelize(col).saveToCassandra(ks, "udts", SomeColumns("key", "name", "addr"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT key, name, addr FROM $ks.udts""").all()
result should have size 1
for (row <- result) {
row.getInt(0) shouldEqual 1
row.getString(1) shouldEqual "Joe"
row.getUDTValue(2).getString("city") shouldEqual "Warsaw"
row.getUDTValue(2).getInt("zip") shouldEqual 10000
}
}
}
it should "write null values of user-defined-types in Cassandra" in {
val address = null
val col = Seq((1, "Joe", address))
sc.parallelize(col).saveToCassandra(ks, "udts", SomeColumns("key", "name", "addr"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT key, name, addr FROM "$ks".udts""").all()
result should have size 1
for (row <- result) {
row.getInt(0) shouldEqual 1
row.getString(1) shouldEqual "Joe"
row.getUDTValue(2) should be (null)
}
}
}
it should "write values of user-defined-types with null fields in Cassandra" in {
val address = UDTValue.fromMap(Map("city" -> "Warsaw", "zip" -> 10000, "street" -> null))
val col = Seq((1, "Joe", address))
sc.parallelize(col).saveToCassandra(ks, "udts", SomeColumns("key", "name", "addr"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT key, name, addr FROM "$ks".udts""").all()
result should have size 1
for (row <- result) {
row.getInt(0) shouldEqual 1
row.getString(1) shouldEqual "Joe"
row.getUDTValue(2).getString("city") shouldEqual "Warsaw"
row.getUDTValue(2).getString("street") should be (null)
row.getUDTValue(2).getInt("zip") shouldEqual 10000
}
}
}
it should "write values of TupleValue type" in {
val tuple = TupleValue(1, 2, "three")
val col = Seq((1, tuple))
sc.parallelize(col).saveToCassandra(ks, "tuples", SomeColumns("key", "value"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT key, value FROM $ks.tuples""").all()
result should have size 1
for (row <- result) {
row.getInt(0) shouldEqual 1
row.getTupleValue(1).getInt(0) shouldEqual 1
row.getTupleValue(1).getInt(1) shouldEqual 2
row.getTupleValue(1).getString(2) shouldEqual "three"
}
}
}
it should "write column values of tuple type given as Scala tuples" in {
val tuple = (1, 2, "three") // Scala tuple
val col = Seq((1, tuple))
sc.parallelize(col).saveToCassandra(ks, "tuples", SomeColumns("key", "value"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT key, value FROM $ks.tuples""").all()
result should have size 1
for (row <- result) {
row.getInt(0) shouldEqual 1
row.getTupleValue(1).getInt(0) shouldEqual 1
row.getTupleValue(1).getInt(1) shouldEqual 2
row.getTupleValue(1).getString(2) shouldEqual "three"
}
}
}
it should "write Scala tuples nested in UDTValues" in {
val number = (1, 2)
val address = UDTValue.fromMap(Map("street" -> "foo", "number" -> number))
val col = Seq((1, address))
sc.parallelize(col).saveToCassandra(ks, "nested_tuples", SomeColumns("key", "addr"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT key, addr FROM $ks.nested_tuples""").all()
result should have size 1
for (row <- result) {
row.getInt(0) shouldEqual 1
row.getUDTValue(1).getString(0) shouldEqual "foo"
row.getUDTValue(1).getTupleValue(1).getInt(0) shouldEqual 1
row.getUDTValue(1).getTupleValue(1).getInt(1) shouldEqual 2
}
}
}
it should "convert components in nested Scala tuples to proper types" in {
val number = ("1", "2") // Strings, but should be Ints
val address = UDTValue.fromMap(Map("street" -> "foo", "number" -> number))
val col = Seq((1, address))
sc.parallelize(col).saveToCassandra(ks, "nested_tuples", SomeColumns("key", "addr"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT key, addr FROM $ks.nested_tuples""").all()
for (row <- result) {
row.getUDTValue(1).getTupleValue(1).getInt(0) shouldEqual 1
row.getUDTValue(1).getTupleValue(1).getInt(1) shouldEqual 2
}
}
}
it should "write to single-column tables" in {
val col = Seq(1, 2, 3, 4, 5).map(Tuple1.apply)
sc.parallelize(col).saveToCassandra(ks, "single_column", SomeColumns("pk"))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT * FROM $ks.single_column""").all()
result should have size 5
result.map(_.getInt(0)).toSet should be (Set(1, 2, 3, 4, 5))
}
}
it should "throw IOException if table is not found" in {
val col = Seq(("1", "1", "value1"), ("2", "2", "value2"), ("3", "3", "value3"))
intercept[IOException] {
sc.parallelize(col).saveToCassandra(ks, "unknown_table")
}
}
it should "write RDD of case class objects with default TTL" in {
conn.withSessionDo(_.execute(s"""TRUNCATE $ks.key_value"""))
val col = Seq(KeyValue(1, 1L, "value1"), KeyValue(2, 2L, "value2"), KeyValue(3, 3L, "value3"))
sc.parallelize(col).saveToCassandra(ks, "key_value", writeConf = WriteConf(ttl = TTLOption.constant(100)))
verifyKeyValueTable("key_value")
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT TTL(value) FROM $ks.key_value""").all()
result should have size 3
result.foreach(_.getInt(0) should be > 50)
result.foreach(_.getInt(0) should be <= 100)
}
}
it should "write RDD of case class objects with default timestamp" in {
conn.withSessionDo(_.execute(s"""TRUNCATE $ks.key_value"""))
val col = Seq(KeyValue(1, 1L, "value1"), KeyValue(2, 2L, "value2"), KeyValue(3, 3L, "value3"))
val ts = System.currentTimeMillis() - 1000L
sc.parallelize(col).saveToCassandra(ks, "key_value", writeConf = WriteConf(timestamp = TimestampOption.constant(ts * 1000L)))
verifyKeyValueTable("key_value")
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT WRITETIME(value) FROM $ks.key_value""").all()
result should have size 3
result.foreach(_.getLong(0) should be (ts * 1000L))
}
}
it should "write RDD of case class objects with per-row TTL" in {
conn.withSessionDo(_.execute(s"""TRUNCATE $ks.key_value"""))
val col = Seq(KeyValueWithTTL(1, 1L, "value1", 100), KeyValueWithTTL(2, 2L, "value2", 200), KeyValueWithTTL(3, 3L, "value3", 300))
sc.parallelize(col).saveToCassandra(ks, "key_value", writeConf = WriteConf(ttl = TTLOption.perRow("ttl")))
verifyKeyValueTable("key_value")
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT key, TTL(value) FROM $ks.key_value""").all()
result should have size 3
result.foreach(row => {
row.getInt(1) should be > (100 * row.getInt(0) - 50)
row.getInt(1) should be <= (100 * row.getInt(0))
})
}
}
it should "write RDD of case class objects with per-row timestamp" in {
conn.withSessionDo(_.execute(s"""TRUNCATE $ks.key_value"""))
val ts = System.currentTimeMillis() - 1000L
val col = Seq(KeyValueWithTimestamp(1, 1L, "value1", ts * 1000L + 100L), KeyValueWithTimestamp(2, 2L, "value2", ts * 1000L + 200L), KeyValueWithTimestamp(3, 3L, "value3", ts * 1000L + 300L))
sc.parallelize(col).saveToCassandra(ks, "key_value", writeConf = WriteConf(timestamp = TimestampOption.perRow("timestamp")))
verifyKeyValueTable("key_value")
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT key, WRITETIME(value) FROM $ks.key_value""").all()
result should have size 3
result.foreach(row => {
row.getLong(1) should be (ts * 1000L + row.getInt(0) * 100L)
})
}
}
it should "write RDD of case class objects with per-row TTL with custom mapping" in {
conn.withSessionDo(_.execute(s"""TRUNCATE $ks.key_value"""))
val col = Seq(KeyValueWithTTL(1, 1L, "value1", 100), KeyValueWithTTL(2, 2L, "value2", 200), KeyValueWithTTL(3, 3L, "value3", 300))
implicit val mapping = new DefaultColumnMapper[KeyValueWithTTL](Map("ttl" -> "ttl_placeholder"))
sc.parallelize(col).saveToCassandra(ks, "key_value",
writeConf = WriteConf(ttl = TTLOption.perRow("ttl_placeholder")))
verifyKeyValueTable("key_value")
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT key, TTL(value) FROM $ks.key_value""").all()
result should have size 3
result.foreach(row => {
row.getInt(1) should be > (100 * row.getInt(0) - 50)
row.getInt(1) should be <= (100 * row.getInt(0))
})
}
}
it should "write RDD of case class objects with per-row timestamp with custom mapping" in {
conn.withSessionDo(_.execute(s"""TRUNCATE $ks.key_value"""))
val ts = System.currentTimeMillis() - 1000L
val col = Seq(KeyValueWithTimestamp(1, 1L, "value1", ts * 1000L + 100L), KeyValueWithTimestamp(2, 2L, "value2", ts * 1000L + 200L), KeyValueWithTimestamp(3, 3L, "value3", ts * 1000L + 300L))
implicit val mapper =
new DefaultColumnMapper[KeyValueWithTimestamp](Map("timestamp" -> "timestamp_placeholder"))
sc.parallelize(col).saveToCassandra(ks, "key_value",
writeConf = WriteConf(timestamp = TimestampOption.perRow("timestamp_placeholder")))
verifyKeyValueTable("key_value")
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT key, WRITETIME(value) FROM $ks.key_value""").all()
result should have size 3
result.foreach(row => {
row.getLong(1) should be (ts * 1000L + row.getInt(0) * 100L)
})
}
}
it should "write RDD of case class objects applying proper data type conversions and aliases" in {
conn.withSessionDo(_.execute(s"""TRUNCATE $ks.key_value"""))
val col = Seq(
ClassWithWeirdProps("1", 1, "value1"),
ClassWithWeirdProps("2", 2, "value2"),
ClassWithWeirdProps("3", 3, "value3")
)
sc.parallelize(col).saveToCassandra(ks, "key_value", columns = SomeColumns(
"key" as "devil", "group" as "cat", "value"
))
verifyKeyValueTable("key_value")
}
it should "write an RDD of tuples mapped to different ordering of fields" in {
val col = Seq (("x","a","b"))
sc.parallelize(col)
.saveToCassandra(ks,
"map_tuple",
SomeColumns(("a" as "_2"), ("c" as "_1")))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT * FROM $ks.map_tuple""").all()
result should have size 1
val row = result(0)
row.getString("a") should be ("a")
row.getString("c") should be ("x")
}
}
it should "write an RDD of tuples with only some fields aliased" in {
val col = Seq (("c","a","b"))
sc.parallelize(col)
.saveToCassandra(ks,
"map_tuple",
SomeColumns(("a" as "_2"),("b" as "_3"), ("c" as "_1")))
conn.withSessionDo { session =>
val result = session.execute(s"""SELECT * FROM $ks.map_tuple""").all()
result should have size 1
val row = result(0)
row.getString("a") should be ("a")
row.getString("b") should be ("b")
row.getString("c") should be ("c")
}
}
it should "throw an exception if you try to alias tuple fields which don't exist" in {
val col = Seq (("c"))
intercept[IllegalArgumentException] {
sc.parallelize(col).saveToCassandra(ks,
"map_tuple",
SomeColumns(("a" as "_2"),("b" as "_3"), ("c" as "_1")))
}
}
it should "throw an exception when aliasing some tuple fields explicitly and others implicitly" in {
val col = Seq (("c","a"))
intercept[IllegalArgumentException] {
sc.parallelize(col).saveToCassandra(ks,
"map_tuple",
SomeColumns(("a" as "_2"),("b")))
}
}
it should "write RDD of objects with inherited fields" in {
conn.withSessionDo(_.execute(s"""TRUNCATE $ks.key_value"""))
val col = Seq(
new SubKeyValue(1, "value1", 1L),
new SubKeyValue(2, "value2", 2L),
new SubKeyValue(3, "value3", 3L)
)
sc.parallelize(col).saveToCassandra(ks, "key_value")
verifyKeyValueTable("key_value")
}
it should "write RDD of case class objects with transient fields" in {
conn.withSessionDo(_.execute(s"""TRUNCATE $ks.key_value"""))
val col = Seq(KeyValueWithTransient(1, 1L, "value1", "a"), KeyValueWithTransient(2, 2L, "value2", "b"), KeyValueWithTransient(3, 3L, "value3", "c"))
sc.parallelize(col).saveToCassandra(ks, "key_value")
verifyKeyValueTable("key_value")
}
it should "be able to append and prepend elements to a C* list" in {
val listElements = sc.parallelize(Seq(
(1, Vector("One")),
(1, Vector("Two")),
(1, Vector("Three"))))
val prependElements = sc.parallelize(Seq(
(1, Vector("PrependOne")),
(1, Vector("PrependTwo")),
(1, Vector("PrependThree"))))
listElements.saveToCassandra(ks, "collections_mod", SomeColumns("key", "lcol" append))
prependElements.saveToCassandra(ks, "collections_mod", SomeColumns("key", "lcol" prepend))
val testList = sc.cassandraTable[(Seq[String])](ks, "collections_mod")
.where("key = 1")
.select("lcol").take(1)(0)
testList.take(3) should contain allOf("PrependOne", "PrependTwo", "PrependThree")
testList.drop(3) should contain allOf("One", "Two", "Three")
}
it should "be able to remove elements from a C* list " in {
val listElements = sc.parallelize(Seq(
(2, Vector("One")),
(2, Vector("Two")),
(2, Vector("Three"))))
listElements.saveToCassandra(ks, "collections_mod", SomeColumns("key", "lcol" append))
sc.parallelize(Seq(
(2, Vector("Two")),
(2, Vector("Three"))))
.saveToCassandra(ks, "collections_mod", SomeColumns("key", "lcol" remove))
val testList = sc.cassandraTable[(Seq[String])](ks, "collections_mod")
.where("key = 2")
.select("lcol").take(1)(0)
testList should contain noneOf("Two", "Three")
testList should contain("One")
}
it should "be able to add elements to a C* set " in {
val setElements = sc.parallelize(Seq(
(3, Set("One")),
(3, Set("Two")),
(3, Set("Three"))))
setElements.saveToCassandra(ks, "collections_mod", SomeColumns("key", "scol" append))
val testSet = sc.cassandraTable[(Set[String])](ks, "collections_mod")
.where("key = 3")
.select("scol").take(1)(0)
testSet should contain allOf("One", "Two", "Three")
}
it should "be able to remove elements from a C* set" in {
val setElements = sc.parallelize(Seq(
(4, Set("One")),
(4, Set("Two")),
(4, Set("Three"))))
setElements.saveToCassandra(ks, "collections_mod", SomeColumns("key", "scol" append))
sc.parallelize(Seq((4, Set("Two")), (4, Set("Three"))))
.saveToCassandra(ks, "collections_mod", SomeColumns("key", "scol" remove))
val testSet = sc.cassandraTable[(Set[String])](ks, "collections_mod")
.where("key = 4")
.select("scol").take(1)(0)
testSet should contain noneOf("Two", "Three")
testSet should contain("One")
}
it should "be able to add key value pairs to a C* map" in {
val setElements = sc.parallelize(Seq(
(5, Map("One" -> "One")),
(5, Map("Two" -> "Two")),
(5, Map("Three" -> "Three"))))
setElements.saveToCassandra(ks, "collections_mod", SomeColumns("key", "mcol" append))
val testMap = sc.cassandraTable[(Map[String, String])](ks, "collections_mod")
.where("key = 5")
.select("mcol").take(1)(0)
testMap.toSeq should contain allOf(("One", "One"), ("Two", "Two"), ("Three", "Three"))
}
it should "throw an exception if you try to apply a collection behavior to a normal column" in {
conn.withSessionDo(_.execute(s"""TRUNCATE $ks.key_value"""))
val col = Seq((1, 1L, "value1"), (2, 2L, "value2"), (3, 3L, "value3"))
val e = intercept[IllegalArgumentException] {
sc.parallelize(col).saveToCassandra(ks, "key_value", SomeColumns("key", "group"
overwrite, "value"))
}
e.getMessage should include("group")
}
it should "throw an exception if you try to remove values from a map" in {
val setElements = sc.parallelize(Seq(
(5, Map("One" -> "One")),
(5, Map("Two" -> "Two")),
(5, Map("Three" -> "Three"))))
val e = intercept[IllegalArgumentException] {
setElements.saveToCassandra(ks, "collections_mod", SomeColumns("key", "mcol" remove))
}
e.getMessage should include("mcol")
}
it should "throw an exception if you prepend anything but a list" in {
val setElements = sc.parallelize(Seq(
(5, Map("One" -> "One"), Set("One"))))
val e = intercept[IllegalArgumentException] {
setElements.saveToCassandra(ks, "collections_mod", SomeColumns("key", "mcol" prepend,
"scol" prepend))
}
e.getMessage should include("mcol")
e.getMessage should include("scol")
}
}
|
jimenefe/spark-cassandra-connector
|
spark-cassandra-connector/src/it/scala/com/datastax/spark/connector/writer/TableWriterSpec.scala
|
Scala
|
apache-2.0
| 33,349 |
package de.mineformers.visum.bean.property
import de.mineformers.visum.bean.value.{ObservableValue, MutableObservableValue}
/**
* Property
*
* @author PaleoCrafter
*/
trait MutableProperty[@specialized A] extends Property[A] with MutableObservableValue[A]
{
protected var valid = false
def invalidate(): Unit = {
if(valid) {
valid = false
invalidated()
fireChangeEvent()
}
}
def invalidated(): Unit = ()
def bind(value: ObservableValue[A]): Unit
def unbind(): Unit
def bound: Boolean
def bindBidirectional(property: MutableProperty[A]): Unit
def unbindBidirectional(property: MutableProperty[A]): Unit
def <==(value: ObservableValue[A]) = bind(value)
def <===>(property: MutableProperty[A]) = bindBidirectional(property)
def <=!=>(property: MutableProperty[A]) = unbindBidirectional(property)
}
|
MineFormers/Visum
|
src/main/scala/de.mineformers.visum/bean/property/MutableProperty.scala
|
Scala
|
mit
| 861 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.sources.v2
import java.io.{BufferedReader, InputStreamReader, IOException}
import java.util.Optional
import scala.collection.JavaConverters._
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.spark.SparkContext
import org.apache.spark.sql.SaveMode
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.sources.v2.reader._
import org.apache.spark.sql.sources.v2.writer._
import org.apache.spark.sql.types.{DataType, StructType}
import org.apache.spark.util.SerializableConfiguration
/**
* A HDFS based transactional writable data source.
* Each task writes data to `target/_temporary/queryId/$jobId-$partitionId-$attemptNumber`.
* Each job moves files from `target/_temporary/queryId/` to `target`.
*/
class SimpleWritableDataSource extends DataSourceV2
with BatchReadSupportProvider
with BatchWriteSupportProvider
with SessionConfigSupport {
private val schema = new StructType().add("i", "long").add("j", "long")
override def keyPrefix: String = "simpleWritableDataSource"
class ReadSupport(path: String, conf: Configuration) extends SimpleReadSupport {
override def fullSchema(): StructType = schema
override def planInputPartitions(config: ScanConfig): Array[InputPartition] = {
val dataPath = new Path(path)
val fs = dataPath.getFileSystem(conf)
if (fs.exists(dataPath)) {
fs.listStatus(dataPath).filterNot { status =>
val name = status.getPath.getName
name.startsWith("_") || name.startsWith(".")
}.map { f =>
CSVInputPartitionReader(f.getPath.toUri.toString)
}.toArray
} else {
Array.empty
}
}
override def createReaderFactory(config: ScanConfig): PartitionReaderFactory = {
val serializableConf = new SerializableConfiguration(conf)
new CSVReaderFactory(serializableConf)
}
}
class WritSupport(queryId: String, path: String, conf: Configuration) extends BatchWriteSupport {
override def createBatchWriterFactory(): DataWriterFactory = {
SimpleCounter.resetCounter
new CSVDataWriterFactory(path, queryId, new SerializableConfiguration(conf))
}
override def onDataWriterCommit(message: WriterCommitMessage): Unit = {
SimpleCounter.increaseCounter
}
override def commit(messages: Array[WriterCommitMessage]): Unit = {
val finalPath = new Path(path)
val jobPath = new Path(new Path(finalPath, "_temporary"), queryId)
val fs = jobPath.getFileSystem(conf)
try {
for (file <- fs.listStatus(jobPath).map(_.getPath)) {
val dest = new Path(finalPath, file.getName)
if(!fs.rename(file, dest)) {
throw new IOException(s"failed to rename($file, $dest)")
}
}
} finally {
fs.delete(jobPath, true)
}
}
override def abort(messages: Array[WriterCommitMessage]): Unit = {
val jobPath = new Path(new Path(path, "_temporary"), queryId)
val fs = jobPath.getFileSystem(conf)
fs.delete(jobPath, true)
}
}
override def createBatchReadSupport(options: DataSourceOptions): BatchReadSupport = {
val path = new Path(options.get("path").get())
val conf = SparkContext.getActive.get.hadoopConfiguration
new ReadSupport(path.toUri.toString, conf)
}
override def createBatchWriteSupport(
queryId: String,
schema: StructType,
mode: SaveMode,
options: DataSourceOptions): Optional[BatchWriteSupport] = {
assert(DataType.equalsStructurally(schema.asNullable, this.schema.asNullable))
assert(!SparkContext.getActive.get.conf.getBoolean("spark.speculation", false))
val path = new Path(options.get("path").get())
val conf = SparkContext.getActive.get.hadoopConfiguration
val fs = path.getFileSystem(conf)
if (mode == SaveMode.ErrorIfExists) {
if (fs.exists(path)) {
throw new RuntimeException("data already exists.")
}
}
if (mode == SaveMode.Ignore) {
if (fs.exists(path)) {
return Optional.empty()
}
}
if (mode == SaveMode.Overwrite) {
fs.delete(path, true)
}
val pathStr = path.toUri.toString
Optional.of(new WritSupport(queryId, pathStr, conf))
}
}
case class CSVInputPartitionReader(path: String) extends InputPartition
class CSVReaderFactory(conf: SerializableConfiguration)
extends PartitionReaderFactory {
override def createReader(partition: InputPartition): PartitionReader[InternalRow] = {
val path = partition.asInstanceOf[CSVInputPartitionReader].path
val filePath = new Path(path)
val fs = filePath.getFileSystem(conf.value)
new PartitionReader[InternalRow] {
private val inputStream = fs.open(filePath)
private val lines = new BufferedReader(new InputStreamReader(inputStream))
.lines().iterator().asScala
private var currentLine: String = _
override def next(): Boolean = {
if (lines.hasNext) {
currentLine = lines.next()
true
} else {
false
}
}
override def get(): InternalRow = InternalRow(currentLine.split(",").map(_.trim.toLong): _*)
override def close(): Unit = {
inputStream.close()
}
}
}
}
private[v2] object SimpleCounter {
private var count: Int = 0
def increaseCounter: Unit = {
count += 1
}
def getCounter: Int = {
count
}
def resetCounter: Unit = {
count = 0
}
}
class CSVDataWriterFactory(path: String, jobId: String, conf: SerializableConfiguration)
extends DataWriterFactory {
override def createWriter(
partitionId: Int,
taskId: Long): DataWriter[InternalRow] = {
val jobPath = new Path(new Path(path, "_temporary"), jobId)
val filePath = new Path(jobPath, s"$jobId-$partitionId-$taskId")
val fs = filePath.getFileSystem(conf.value)
new CSVDataWriter(fs, filePath)
}
}
class CSVDataWriter(fs: FileSystem, file: Path) extends DataWriter[InternalRow] {
private val out = fs.create(file)
override def write(record: InternalRow): Unit = {
out.writeBytes(s"${record.getLong(0)},${record.getLong(1)}\\n")
}
override def commit(): WriterCommitMessage = {
out.close()
null
}
override def abort(): Unit = {
try {
out.close()
} finally {
fs.delete(file, false)
}
}
}
|
michalsenkyr/spark
|
sql/core/src/test/scala/org/apache/spark/sql/sources/v2/SimpleWritableDataSource.scala
|
Scala
|
apache-2.0
| 7,221 |
package com.sksamuel.elastic4s.scalaz
import com.sksamuel.elastic4s.http.{ElasticRequest, Executor, HttpClient, HttpResponse}
import scalaz.\\/
import scalaz.concurrent.Task
class TaskExecutor extends Executor[Task] {
override def exec(client: HttpClient, request: ElasticRequest): Task[HttpResponse] = {
Task.async { k =>
client.send(request, { j => k(\\/.fromEither(j)) })
}
}
}
|
Tecsisa/elastic4s
|
elastic4s-scalaz/src/main/scala/com/sksamuel/elastic4s/scalaz/TaskExecutor.scala
|
Scala
|
apache-2.0
| 400 |
/*
* Copyright 2014 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.spark.interpreter.imports.printers
import java.io._
import com.ibm.spark.utils.DynamicReflectionSupport
/**
* Represents a wrapper for the scala.Console for Scala 2.10.4 implementation.
* @param in The input stream used for standard in
* @param out The output stream used for standard out
* @param err The output stream used for standard error
*/
class WrapperConsole(
val in: BufferedReader,
val out: PrintStream,
val err: PrintStream
) extends DynamicReflectionSupport(Class.forName("scala.Console$"), scala.Console) {
require(in != null)
require(out != null)
require(err != null)
//
// SUPPORTED PRINT OPERATIONS
//
def print(obj: Any): Unit = out.print(obj)
def printf(text: String, args: Any*): Unit =
out.print(text.format(args: _*))
def println(x: Any): Unit = out.println(x)
def println(): Unit = out.println()
}
|
bpburns/spark-kernel
|
kernel-api/src/main/scala/com/ibm/spark/interpreter/imports/printers/WrapperConsole.scala
|
Scala
|
apache-2.0
| 1,471 |
package chapter.eight
object ExerciseFive extends App {
class Point(val x: Double, val y: Double)
class LabeledPoint(val label: String, x: Double, y: Double)
extends Point(x, y)
}
|
deekim/impatient-scala
|
src/main/scala/chapter/eight/ExerciseFive.scala
|
Scala
|
apache-2.0
| 191 |
import scala.collection._
object eclat{
val minSup = 0.5*6
def eclat(_prefix:Set[Int], _x:mutable.Map[Int,Set[Int]], _xx:mutable.Map[Int,Set[Int]], result:mutable.Map[Set[Int], Int]):mutable.Map[Set[Int],Int] = {//:Unit = {
var xx = _xx
var prefix = _prefix
while(!xx.isEmpty){
var itemtid = xx.last
var isup = itemtid._2.size
var prefixRcs:Set[Int] = Set()
xx = xx.dropRight(1)
if(isup >= minSup && _x.keys.toSet.contains(itemtid._1)){ //zi 只对出现在_x里面的item生成频繁集。//zi 取最后一个item_tid,如果满足最小支持度,并与剩余的(itemSet,TIDs)做交集。
prefixRcs = prefix + itemtid._1
println(prefixRcs -> isup)
if(prefix.isEmpty) //zi prefix若为空,则非递归。
result += Set(itemtid._1) -> isup
else
result += prefixRcs -> isup
var suffix:mutable.Map[Int,Set[Int]] = mutable.Map()
for(itremain <- xx){ //zi 剩余的(itemSet,TIDs)与之交集,且满足最小支持度的,递归到下一次计算。
var tids = itemtid._2 & itremain._2
if(tids.size >= minSup){
suffix += itremain._1 -> tids //zi 剩余项与itemA交集,仍大于支持度的;留下,成为新剩余项(item,TIDS)->(item,TIDs')。TID数目改变,决于与A相交的事务个数。
//zi 假设itemB与A的事务交集大于minSup,则递归时,看似存项B的支持度,实际存的是(A,B)项集的支持度。
}
}
eclat(prefixRcs, _x, suffix, result)
}
}
result
}
def main(args:Array[String]){
var xx:mutable.Map[Int,Set[Int]] = mutable.Map(11->Set(3,4,5,6), 22->Set(1,2,3), 33->Set(4,6), 44->Set(1,3,5), 55->Set(1,2,4,5,6), 66->Set(1,2,4,6))
var x:mutable.Map[Int,Set[Int]] = mutable.Map(11->Set(3,4,5,6), 55->Set(1,2,4,5,6))//var x = xx
var results:mutable.Map[Set[Int],Int] = mutable.Map()
eclat(Set(), x, xx, results)
println("\\n")
println(results)
}
}
/*
1 22、 44、55、66
2 22、 55、66
3 11、22、 44
4 11、 33、 55、66
5 11、 44、55
6 11、 33、 55、66
*/
|
heming621/postgraduate-
|
sparkEclatV1/eclat.scala
|
Scala
|
mit
| 2,519 |
package chapter.seven
// todo: is this correct in terms of best design practices?
package object random {
// todo: http://stackoverflow.com/questions/31275887/underscores-in-numeric-literals-in-scala
val a = 1664525
val b = 1013904223
val n = 32
var seed : Int = 0
def setSeed(seed: Int) = this.seed = seed
def nextInt(): Int = next().toInt
def nextDouble(): Double = next()
private def next(): Double = {
val next = seed * a + b % (2 ^ n)
seed = next
next
}
}
|
deekim/impatient-scala
|
src/main/scala/chapter/seven/ExerciseThree.scala
|
Scala
|
apache-2.0
| 500 |
package latis.ops
import latis.units.UnitOfMeasure
import latis.dm.Dataset
import latis.dm.Scalar
import latis.units.UnitConverter
import latis.time.TimeScale
import latis.time.TimeConverter
import latis.time.Time
import latis.dm.Sample
import latis.time.TimeFormat
/**
* Convert all Time variables to a Time of type Text with the given format.
* The resulting time scale is assumed to be UTC.
*/
class TimeFormatter(format: TimeFormat) extends Operation {
//TODO: consider implementing as a UnitConversion
//TODO: apply to primary time variable only? otherwise may have diff units and we'd like to reuse the same converter
//don't worry about optimizing with reusable converter, yet
val fmt = format.toString
/**
* Convert any Time variables to a Time of type Text with the desired format.
*/
override def applyToScalar(scalar: Scalar): Option[Scalar] = scalar match {
case t: Time => {
val formatted_time = t.format(format)
val md = t.getMetadata + ("units" -> fmt) + ("type" -> "text") + ("length" -> fmt.filter(_ != ''').length.toString)
val time = Time(md, formatted_time)
Some(time)
}
case _ => Some(scalar) //no-op
}
/**
* Override to apply to both domain and range variables.
*/
override def applyToSample(sample: Sample): Option[Sample] = {
for (d <- applyToVariable(sample.domain); r <- applyToVariable(sample.range)) yield Sample(d,r)
}
}
object TimeFormatter extends OperationFactory {
/**
* Constructor used by OperationFactory.
*/
override def apply(args: Seq[String]): TimeFormatter = {
//TODO: error handling
TimeFormatter(args.head)
}
/**
* Construct a TimeFormatter with the given format string.
* The format must be supported by Java's SimpleDataFormat.
*/
def apply(format: String): TimeFormatter = {
TimeFormatter(TimeFormat(format))
}
def apply(format: TimeFormat): TimeFormatter = new TimeFormatter(format)
}
|
dlindhol/LaTiS
|
src/main/scala/latis/ops/TimeFormatter.scala
|
Scala
|
epl-1.0
| 1,970 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util.concurrent.TimeUnit
import kafka.server.QuotaType.ControllerMutation
import org.apache.kafka.common.errors.ThrottlingQuotaExceededException
import org.apache.kafka.common.metrics.MetricConfig
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.metrics.Quota
import org.apache.kafka.common.metrics.QuotaViolationException
import org.apache.kafka.common.metrics.stats.TokenBucket
import org.apache.kafka.common.utils.MockTime
import org.junit.jupiter.api.Assertions._
import org.junit.jupiter.api.Assertions.assertEquals
import org.junit.jupiter.api.Assertions.assertFalse
import org.junit.jupiter.api.Test
class StrictControllerMutationQuotaTest {
@Test
def testControllerMutationQuotaViolation(): Unit = {
val time = new MockTime(0, System.currentTimeMillis, 0)
val metrics = new Metrics(time)
val sensor = metrics.sensor("sensor", new MetricConfig()
.quota(Quota.upperBound(10))
.timeWindow(1, TimeUnit.SECONDS)
.samples(10))
val metricName = metrics.metricName("rate", "test-group")
assertTrue(sensor.add(metricName, new TokenBucket))
val quota = new StrictControllerMutationQuota(time, sensor)
assertFalse(quota.isExceeded)
// Recording a first value at T to bring the tokens to 10. Value is accepted
// because the quota is not exhausted yet.
quota.record(90)
assertFalse(quota.isExceeded)
assertEquals(0, quota.throttleTime)
// Recording a second value at T to bring the tokens to -80. Value is accepted
quota.record(90)
assertFalse(quota.isExceeded)
assertEquals(0, quota.throttleTime)
// Recording a third value at T is rejected immediately because there are not
// tokens available in the bucket.
assertThrows(classOf[ThrottlingQuotaExceededException], () => quota.record(90))
assertTrue(quota.isExceeded)
assertEquals(8000, quota.throttleTime)
// Throttle time is adjusted with time
time.sleep(5000)
assertEquals(3000, quota.throttleTime)
metrics.close()
}
}
class PermissiveControllerMutationQuotaTest {
@Test
def testControllerMutationQuotaViolation(): Unit = {
val time = new MockTime(0, System.currentTimeMillis, 0)
val metrics = new Metrics(time)
val sensor = metrics.sensor("sensor", new MetricConfig()
.quota(Quota.upperBound(10))
.timeWindow(1, TimeUnit.SECONDS)
.samples(10))
val metricName = metrics.metricName("rate", "test-group")
assertTrue(sensor.add(metricName, new TokenBucket))
val quota = new PermissiveControllerMutationQuota(time, sensor)
assertFalse(quota.isExceeded)
// Recording a first value at T to bring the tokens 10. Value is accepted
// because the quota is not exhausted yet.
quota.record(90)
assertFalse(quota.isExceeded)
assertEquals(0, quota.throttleTime)
// Recording a second value at T to bring the tokens to -80. Value is accepted
quota.record(90)
assertFalse(quota.isExceeded)
assertEquals(8000, quota.throttleTime)
// Recording a second value at T to bring the tokens to -170. Value is accepted
// even though the quota is exhausted.
quota.record(90)
assertFalse(quota.isExceeded) // quota is never exceeded
assertEquals(17000, quota.throttleTime)
// Throttle time is adjusted with time
time.sleep(5000)
assertEquals(12000, quota.throttleTime)
metrics.close()
}
}
class ControllerMutationQuotaManagerTest extends BaseClientQuotaManagerTest {
private val User = "ANONYMOUS"
private val ClientId = "test-client"
private val config = ClientQuotaManagerConfig(
numQuotaSamples = 10,
quotaWindowSizeSeconds = 1
)
private def withQuotaManager(f: ControllerMutationQuotaManager => Unit): Unit = {
val quotaManager = new ControllerMutationQuotaManager(config, metrics, time,"", None)
try {
f(quotaManager)
} finally {
quotaManager.shutdown()
}
}
@Test
def testThrottleTime(): Unit = {
import ControllerMutationQuotaManager._
val time = new MockTime(0, System.currentTimeMillis, 0)
val metrics = new Metrics(time)
val sensor = metrics.sensor("sensor")
val metricName = metrics.metricName("tokens", "test-group")
sensor.add(metricName, new TokenBucket)
val metric = metrics.metric(metricName)
assertEquals(0, throttleTimeMs(new QuotaViolationException(metric, 0, 10), time.milliseconds()))
assertEquals(500, throttleTimeMs(new QuotaViolationException(metric, -5, 10), time.milliseconds()))
assertEquals(1000, throttleTimeMs(new QuotaViolationException(metric, -10, 10), time.milliseconds()))
}
@Test
def testControllerMutationQuotaViolation(): Unit = {
withQuotaManager { quotaManager =>
quotaManager.updateQuota(Some(User), Some(ClientId), Some(ClientId),
Some(Quota.upperBound(10)))
val queueSizeMetric = metrics.metrics().get(
metrics.metricName("queue-size", ControllerMutation.toString, ""))
// Verify that there is no quota violation if we remain under the quota.
for (_ <- 0 until 10) {
assertEquals(0, maybeRecord(quotaManager, User, ClientId, 10))
time.sleep(1000)
}
assertEquals(0, queueSizeMetric.metricValue.asInstanceOf[Double].toInt)
// Create a spike worth of 110 mutations.
// Current tokens in the bucket = 100
// As we use the Strict enforcement, the quota is checked before updating the rate. Hence,
// the spike is accepted and no quota violation error is raised.
var throttleTime = maybeRecord(quotaManager, User, ClientId, 110)
assertEquals(0, throttleTime, "Should not be throttled")
// Create a spike worth of 110 mutations.
// Current tokens in the bucket = 100 - 110 = -10
// As the quota is already violated, the spike is rejected immediately without updating the
// rate. The client must wait:
// 10 / 10 = 1s
throttleTime = maybeRecord(quotaManager, User, ClientId, 110)
assertEquals(1000, throttleTime, "Should be throttled")
// Throttle
throttle(quotaManager, User, ClientId, throttleTime, callback)
assertEquals(1, queueSizeMetric.metricValue.asInstanceOf[Double].toInt)
// After a request is delayed, the callback cannot be triggered immediately
quotaManager.throttledChannelReaper.doWork()
assertEquals(0, numCallbacks)
// Callback can only be triggered after the delay time passes
time.sleep(throttleTime)
quotaManager.throttledChannelReaper.doWork()
assertEquals(0, queueSizeMetric.metricValue.asInstanceOf[Double].toInt)
assertEquals(1, numCallbacks)
// Retry to spike worth of 110 mutations after having waited the required throttle time.
// Current tokens in the bucket = 0
throttleTime = maybeRecord(quotaManager, User, ClientId, 110)
assertEquals(0, throttleTime, "Should be throttled")
}
}
@Test
def testNewStrictQuotaForReturnsUnboundedQuotaWhenQuotaIsDisabled(): Unit = {
withQuotaManager { quotaManager =>
assertEquals(UnboundedControllerMutationQuota,
quotaManager.newStrictQuotaFor(buildSession(User), ClientId))
}
}
@Test
def testNewStrictQuotaForReturnsStrictQuotaWhenQuotaIsEnabled(): Unit = {
withQuotaManager { quotaManager =>
quotaManager.updateQuota(Some(User), Some(ClientId), Some(ClientId),
Some(Quota.upperBound(10)))
val quota = quotaManager.newStrictQuotaFor(buildSession(User), ClientId)
assertTrue(quota.isInstanceOf[StrictControllerMutationQuota])
}
}
@Test
def testNewPermissiveQuotaForReturnsUnboundedQuotaWhenQuotaIsDisabled(): Unit = {
withQuotaManager { quotaManager =>
assertEquals(UnboundedControllerMutationQuota,
quotaManager.newPermissiveQuotaFor(buildSession(User), ClientId))
}
}
@Test
def testNewPermissiveQuotaForReturnsStrictQuotaWhenQuotaIsEnabled(): Unit = {
withQuotaManager { quotaManager =>
quotaManager.updateQuota(Some(User), Some(ClientId), Some(ClientId),
Some(Quota.upperBound(10)))
val quota = quotaManager.newPermissiveQuotaFor(buildSession(User), ClientId)
assertTrue(quota.isInstanceOf[PermissiveControllerMutationQuota])
}
}
}
|
guozhangwang/kafka
|
core/src/test/scala/unit/kafka/server/ControllerMutationQuotaManagerTest.scala
|
Scala
|
apache-2.0
| 9,084 |
/* Copyright 2017-19, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.tensorflow.api.learn.hooks
import org.platanios.tensorflow.api.core.Graph
import org.platanios.tensorflow.api.core.client.Session
import org.platanios.tensorflow.api.io.events.{SummaryFileWriter, SummaryFileWriterCache}
import org.platanios.tensorflow.api.ops.{Output, Summary, UntypedOp}
import org.platanios.tensorflow.api.tensors.Tensor
import org.platanios.tensorflow.proto.SessionLog
import java.nio.file.Path
/** Saves summaries to files based on a [[HookTrigger]].
*
* @param directory Directory in which to save the summaries.
* @param trigger Hook trigger specifying when this hook is triggered (i.e., when it executes). If you only want
* to save the summary values at the end of a run and not during, then you should set `trigger` to
* [[NoHookTrigger]] and `triggerAtEnd` to `true`.
* @param triggerAtEnd If `true`, this hook will be triggered at the end of the run. Note that if this flag is set to
* `true`, then all summaries must be computable without using a feed map for the
* [[Session.run()]] call.
* @param collection Graph collection from which to obtain the summaries. Defaults to `Graph.Keys.SUMMARIES`.
*
* @author Emmanouil Antonios Platanios
*/
class SummarySaver protected (
val directory: Path,
val trigger: HookTrigger = StepHookTrigger(10),
val triggerAtEnd: Boolean = true,
val collection: Graph.Key[Output[Any]] = Graph.Keys.SUMMARIES
) extends TriggeredHook(trigger, triggerAtEnd) {
protected var summary : Option[Output[String]] = None
protected var summaryWriter: Option[SummaryFileWriter] = None
override protected def begin(): Unit = {
summary = Summary.mergeAll(collection)
if (summary.isDefined)
summaryWriter = Some(SummaryFileWriterCache.get(directory))
}
override protected def end(session: Session): Unit = {
summaryWriter.foreach(_.flush())
}
override protected def fetches: Seq[Output[Any]] = summary.toSeq.map(_.asUntyped)
override protected def targets: Set[UntypedOp] = Set.empty
override protected def onTrigger(
step: Long,
elapsed: Option[(Double, Int)],
runResult: Hook.SessionRunResult[Seq[Tensor[Any]]],
session: Session
): Unit = {
summaryWriter.foreach(writer => {
if (step == 0L)
writer.writeSessionLog(SessionLog.newBuilder().setStatus(SessionLog.SessionStatus.START).build(), step)
writer.writeSummaryString(runResult.result.head.scalar.asInstanceOf[String], step)
writer.flush()
})
}
}
object SummarySaver {
def apply(
directory: Path,
trigger: HookTrigger = StepHookTrigger(10),
triggerAtEnd: Boolean = true,
collection: Graph.Key[Output[Any]] = Graph.Keys.SUMMARIES
): SummarySaver = {
new SummarySaver(directory, trigger, triggerAtEnd, collection)
}
}
|
eaplatanios/tensorflow_scala
|
modules/api/src/main/scala/org/platanios/tensorflow/api/learn/hooks/SummarySaver.scala
|
Scala
|
apache-2.0
| 3,560 |
package dotty.tools
package dotc
package fromtasty
import scala.language.unsafeNulls
import io.{JarArchive, AbstractFile, Path}
import core.Contexts._
import java.io.File
class TASTYRun(comp: Compiler, ictx: Context) extends Run(comp, ictx) {
override def compile(files: List[AbstractFile]): Unit = {
val units = tastyUnits(files)
compileUnits(units)
}
private def tastyUnits(files: List[AbstractFile]): List[TASTYCompilationUnit] =
val fromTastyIgnoreList = ctx.settings.YfromTastyIgnoreList.value.toSet
// Resolve class names of tasty and jar files
val classNames = files.flatMap { file =>
file.extension match
case "jar" =>
JarArchive.open(Path(file.path), create = false).allFileNames()
.map(_.stripPrefix(File.separator)) // change paths from absolute to relative
.filter(e => Path.extension(e) == "tasty" && !fromTastyIgnoreList(e))
.map(e => e.stripSuffix(".tasty").replace(File.separator, "."))
.toList
case "tasty" => TastyFileUtil.getClassName(file)
case _ =>
report.error(s"File extension is not `tasty` or `jar`: ${file.path}")
Nil
}
classNames.map(new TASTYCompilationUnit(_))
}
|
dotty-staging/dotty
|
compiler/src/dotty/tools/dotc/fromtasty/TASTYRun.scala
|
Scala
|
apache-2.0
| 1,238 |
package akka.persistence.hbase.snapshot
import java.util.{ArrayList => JArrayList}
import akka.actor.ActorSystem
import akka.persistence.hbase.common.TestingEventProtocol.DeletedSnapshotsFor
import akka.persistence.hbase.common._
import akka.persistence.hbase.journal._
import akka.persistence.serialization.Snapshot
import akka.persistence.{SelectedSnapshot, SnapshotMetadata, SnapshotSelectionCriteria}
import org.apache.hadoop.hbase.CellUtil
import org.apache.hadoop.hbase.client.HTable
import org.hbase.async.{HBaseClient, KeyValue}
import scala.collection.JavaConverters._
import scala.collection.immutable
import scala.concurrent.Future
import scala.util.{Failure, Success}
class HBaseSnapshotter(val system: ActorSystem, val hBasePersistenceSettings: PersistencePluginSettings, val client: HBaseClient)
extends HadoopSnapshotter
with HBaseUtils with AsyncBaseUtils with HBaseSerialization
with DeferredConversions {
val log = system.log
implicit val settings = hBasePersistenceSettings
lazy val table = hBasePersistenceSettings.snapshotTable
lazy val family = hBasePersistenceSettings.snapshotFamily
lazy val hTable = new HTable(settings.hadoopConfiguration, tableBytes)
implicit override val pluginDispatcher = system.dispatchers.lookup("akka-hbase-persistence-dispatcher")
type AsyncBaseRows = JArrayList[JArrayList[KeyValue]]
/** Snapshots we're in progress of saving */
private var saving = immutable.Set.empty[SnapshotMetadata]
import akka.persistence.hbase.common.Columns._
import akka.persistence.hbase.journal.RowTypeMarkers._
def loadAsync(persistenceId: String, criteria: SnapshotSelectionCriteria): Future[Option[SelectedSnapshot]] = {
log.debug("Loading async for persistenceId: [{}] on criteria: {}", persistenceId, criteria)
def scanPartition(): Option[SelectedSnapshot] = {
val startScanKey = SnapshotRowKey.firstForPersistenceId(persistenceId)
val stopScanKey = SnapshotRowKey.lastForPersistenceId(persistenceId, toSequenceNr = criteria.maxSequenceNr)
val scan = preparePrefixScan(tableBytes, familyBytes, stopScanKey, startScanKey, persistenceId, onlyRowKeys = false)
scan.addColumn(familyBytes, Message)
scan.setReversed(true)
scan.setMaxResultSize(1)
val scanner = hTable.getScanner(scan)
try {
var res = scanner.next()
while (res != null) {
val seqNr = RowKey.extractSeqNr(res.getRow)
val messageCell = res.getColumnLatestCell(familyBytes, Message)
val snapshot = snapshotFromBytes(CellUtil.cloneValue(messageCell))
if (seqNr <= criteria.maxSequenceNr)
return Some(SelectedSnapshot(SnapshotMetadata(persistenceId, seqNr), snapshot.data)) // todo timestamp)
res = scanner.next()
}
None
} finally {
scanner.close()
}
}
val f = Future(scanPartition())
f onFailure { case x => log.error(x, "Unable to read snapshot for persistenceId: {}, on criteria: {}", persistenceId, criteria) }
f
}
def saveAsync(meta: SnapshotMetadata, snapshot: Any): Future[Unit] = {
log.debug("Saving async, of {}", meta)
saving += meta
serialize(Snapshot(snapshot)) match {
case Success(serializedSnapshot) =>
executePut(
SnapshotRowKey(meta.persistenceId, meta.sequenceNr).toBytes,
Array(Marker, Message),
Array(SnapshotMarkerBytes, serializedSnapshot)
)
case Failure(ex) =>
Future failed ex
}
}
def saved(meta: SnapshotMetadata): Unit = {
log.debug("Saved snapshot for meta: {}", meta)
saving -= meta
}
def delete(meta: SnapshotMetadata): Unit = {
log.debug("Deleting snapshot for meta: {}", meta)
saving -= meta
executeDelete(SnapshotRowKey(meta.persistenceId, meta.sequenceNr).toBytes)
}
def delete(persistenceId: String, criteria: SnapshotSelectionCriteria): Unit = {
log.debug("Deleting snapshot for persistenceId: [{}], criteria: {}", persistenceId, criteria)
val scanner = newScanner()
val start = SnapshotRowKey.firstForPersistenceId(persistenceId)
val stop = SnapshotRowKey.lastForPersistenceId(persistenceId, criteria.maxSequenceNr)
scanner.setStartKey(start.toBytes)
scanner.setStopKey(stop.toBytes)
scanner.setKeyRegexp(s"""$persistenceId-.*""")
def handleRows(in: AnyRef): Future[Unit] = in match {
case null =>
log.debug("Finished scanning for snapshots to delete")
flushWrites()
scanner.close()
Future.successful()
case rows: AsyncBaseRows =>
val deletes = for {
row <- rows.asScala
col <- row.asScala.headOption
if isSnapshotRow(row.asScala)
} yield deleteRow(col.key)
go() flatMap { _ => Future.sequence(deletes) }
}
def go(): Future[Unit] = scanner.nextRows() flatMap handleRows
go() map {
case _ if settings.publishTestingEvents => system.eventStream.publish(DeletedSnapshotsFor(persistenceId, criteria))
}
}
}
|
ktoso/akka-persistence-hbase
|
src/main/scala/akka/persistence/hbase/snapshot/HBaseSnapshotter.scala
|
Scala
|
apache-2.0
| 5,067 |
package mesosphere.marathon.api.v2
import mesosphere.marathon.api.TaskKiller
import mesosphere.marathon.health.HealthCheckManager
import mesosphere.marathon.state.{ GroupManager, PathId, Timestamp }
import mesosphere.marathon.tasks.{ MarathonTasks, TaskIdUtil, TaskTracker }
import mesosphere.marathon.{ MarathonConf, MarathonSchedulerService, MarathonSpec }
import org.mockito.Matchers.{ any, eq => equalTo }
import org.mockito.Mockito._
import org.scalatest.Matchers
import scala.concurrent.duration._
class TasksResourceTest extends MarathonSpec with Matchers {
var service: MarathonSchedulerService = _
var taskTracker: TaskTracker = _
var taskKiller: TaskKiller = _
var config: MarathonConf = _
var groupManager: GroupManager = _
var healthCheckManager: HealthCheckManager = _
var taskResource: TasksResource = _
var taskIdUtil: TaskIdUtil = _
before {
service = mock[MarathonSchedulerService]
taskTracker = mock[TaskTracker]
taskKiller = mock[TaskKiller]
config = mock[MarathonConf]
groupManager = mock[GroupManager]
healthCheckManager = mock[HealthCheckManager]
taskIdUtil = mock[TaskIdUtil]
taskResource = new TasksResource(
service,
taskTracker,
taskKiller,
config,
groupManager,
healthCheckManager,
taskIdUtil
)
}
test("killTasks") {
val body = """{"ids": ["task-1", "task-2"]}"""
val bodyBytes = body.toCharArray.map(_.toByte)
val taskId1 = "task-1"
val taskId2 = "task-2"
val task1 = MarathonTasks.makeTask(taskId1, "host", ports = Nil, attributes = Nil, version = Timestamp.now())
val task2 = MarathonTasks.makeTask(taskId2, "host", ports = Nil, attributes = Nil, version = Timestamp.now())
val app1 = PathId("/my/app-1")
val app2 = PathId("/my/app-2")
when(config.zkTimeoutDuration).thenReturn(5.seconds)
when(taskIdUtil.appId(taskId1)).thenReturn(app1)
when(taskIdUtil.appId(taskId2)).thenReturn(app2)
when(taskTracker.fetchTask(app1, taskId1)).thenReturn(Some(task1))
when(taskTracker.fetchTask(app2, taskId2)).thenReturn(Some(task2))
val response = taskResource.killTasks(scale = false, body = bodyBytes)
response.getStatus shouldEqual 200
verify(taskIdUtil, atLeastOnce).appId(taskId1)
verify(taskIdUtil, atLeastOnce).appId(taskId2)
verify(taskKiller, times(1)).kill(equalTo(app1), any(), force = equalTo(true))
verify(taskKiller, times(1)).kill(equalTo(app2), any(), force = equalTo(true))
verifyNoMoreInteractions(taskKiller)
}
}
|
bsideup/marathon
|
src/test/scala/mesosphere/marathon/api/v2/TasksResourceTest.scala
|
Scala
|
apache-2.0
| 2,541 |
package net.snowflake.spark.snowflake
import net.snowflake.client.jdbc.SnowflakeSQLException
import net.snowflake.spark.snowflake.Utils.SNOWFLAKE_SOURCE_NAME
import net.snowflake.spark.snowflake.test.TestHook
import net.snowflake.spark.snowflake.test.TestHookFlag.{
TH_WRITE_ERROR_AFTER_COPY_INTO,
TH_WRITE_ERROR_AFTER_CREATE_NEW_TABLE,
TH_WRITE_ERROR_AFTER_DROP_OLD_TABLE,
TH_WRITE_ERROR_AFTER_TRUNCATE_TABLE
}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Row, SaveMode}
import scala.util.Random
// scalastyle:off println
class TruncateTableSuite extends IntegrationSuiteBase {
import testImplicits._
val normalTable = s"test_table_$randomSuffix"
val specialTable = s""""test_table_.'!@#$$%^&*"" $randomSuffix""""""
val targetTable = s""""test_table_target_$randomSuffix""""""
// This test will test normal table and table name including special characters
val tableNames = Array(normalTable, specialTable)
lazy val st1 = new StructType(
Array(
StructField("num1", LongType, nullable = false),
StructField("num2", FloatType, nullable = false)
)
)
lazy val df1: DataFrame = sparkSession.createDataFrame(
sc.parallelize(1 to 100)
.map[Row](_ => {
val rand = new Random(System.nanoTime())
Row(rand.nextLong(), rand.nextFloat())
}),
st1
)
lazy val st2 = new StructType(
Array(
StructField("num1", IntegerType, nullable = false),
StructField("num2", IntegerType, nullable = false)
)
)
lazy val df2: DataFrame = sparkSession.createDataFrame(
sc.parallelize(1 to 100)
.map[Row](_ => {
val rand = new Random(System.nanoTime())
Row(rand.nextInt(), rand.nextInt())
}),
st2
)
override def beforeAll(): Unit = {
super.beforeAll()
}
test("use truncate table with staging table") {
tableNames.foreach(table => {
println(s"""Test table: "$table"""")
jdbcUpdate(s"drop table if exists $table")
// create one table
df2.write
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", table)
.option("truncate_table", "off")
.option("usestagingtable", "on")
.mode(SaveMode.Overwrite)
.save()
// replace previous table and overwrite schema
df1.write
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", table)
.option("truncate_table", "off")
.option("usestagingtable", "on")
.mode(SaveMode.Overwrite)
.save()
// truncate previous table and keep schema
df2.write
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", table)
.option("truncate_table", "on")
.option("usestagingtable", "on")
.mode(SaveMode.Overwrite)
.save()
// check schema
assert(checkSchema1(table))
})
}
test("use truncate table without staging table") {
tableNames.foreach(table => {
println(s"""Test table: "$table"""")
jdbcUpdate(s"drop table if exists $table")
// create table with Append mode
df2.write
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", table)
.option("truncate_table", "on")
.option("usestagingtable", "off")
.mode(SaveMode.Append)
.save()
assert(checkSchema2(table))
jdbcUpdate(s"drop table if exists $table")
// create table with Overwrite mode
df2.write
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", table)
.option("truncate_table", "on")
.option("usestagingtable", "off")
.mode(SaveMode.Overwrite)
.save()
assert(checkSchema2(table))
// replace previous table and overwrite schema
df1.write
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", table)
.option("truncate_table", "off")
.option("usestagingtable", "off")
.mode(SaveMode.Overwrite)
.save()
assert(checkSchema1(table))
// truncate table and keep schema
df2.write
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", table)
.option("truncate_table", "on")
.option("usestagingtable", "off")
.mode(SaveMode.Overwrite)
.save()
// checker schema
assert(checkSchema1(table))
})
}
test("don't truncate table with staging table") {
tableNames.foreach(table => {
println(s"""Test table: "$table"""")
jdbcUpdate(s"drop table if exists $table")
// create one table
df2.write
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", table)
.option("truncate_table", "off")
.option("usestagingtable", "on")
.mode(SaveMode.Overwrite)
.save()
// replace previous table and overwrite schema
df1.write
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", table)
.option("truncate_table", "off")
.option("usestagingtable", "on")
.mode(SaveMode.Overwrite)
.save()
// truncate previous table and overwrite schema
df2.write
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", table)
.option("truncate_table", "off")
.option("usestagingtable", "on")
.mode(SaveMode.Overwrite)
.save()
// check schema
assert(checkSchema2(table))
})
}
test("don't truncate table without staging table") {
tableNames.foreach(table => {
println(s"""Test table: "$table"""")
jdbcUpdate(s"drop table if exists $table")
// create one table
df2.write
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", table)
.option("truncate_table", "off")
.option("usestagingtable", "off")
.mode(SaveMode.Overwrite)
.save()
// replace previous table and overwrite schema
df1.write
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", table)
.option("truncate_table", "off")
.option("usestagingtable", "off")
.mode(SaveMode.Overwrite)
.save()
// truncate previous table and overwrite schema
df2.write
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", table)
.option("truncate_table", "off")
.option("usestagingtable", "off")
.mode(SaveMode.Overwrite)
.save()
// check schema
assert(checkSchema2(table))
})
}
test("negative test 1: original table doesn't exist, error happen when writing") {
tableNames.foreach(table => {
println(s"""Test table: "$table"""")
// Make sure table doesnt' exist
jdbcUpdate(s"drop table if exists $table")
assert(!DefaultJDBCWrapper.tableExists(params, table.toString))
// Old table doesn't exist so DROP table and TRUNCATE table never happen
val testConditions = Array(
(TH_WRITE_ERROR_AFTER_CREATE_NEW_TABLE, df2, table, "on", "off", SaveMode.Append),
(TH_WRITE_ERROR_AFTER_COPY_INTO, df2, table, "on", "off", SaveMode.Overwrite)
)
testConditions.map(x => {
println(s"Test case 1 condition: $x")
val testFlag = x._1
val df = x._2
val tableName = x._3
val truncate_table = x._4
val usestagingtable = x._5
val saveMode = x._6
assertThrows[SnowflakeSQLException] {
TestHook.enableTestFlagOnly(testFlag)
df.write
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", tableName)
.option("truncate_table", truncate_table)
.option("usestagingtable", usestagingtable)
.mode(saveMode)
.save()
}
// The original table should not exist
assert(!DefaultJDBCWrapper.tableExists(params, table.toString))
})
// Disable test hook in the end
TestHook.disableTestHook()
})
}
test("negative test 2: original table exists, error happen when writing") {
tableNames.foreach(table => {
println(s"""Test table: "$table"""")
// Make sure table doesnt' exist
jdbcUpdate(s"drop table if exists $table")
// create one table
df2.write
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", table)
.option("truncate_table", "off")
.option("usestagingtable", "off")
.mode(SaveMode.Overwrite)
.save()
val oldRowCount = sparkSession.read
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", s"$table")
.load()
.count()
// Test only covers truncate_table=on and usestagingtable=off
val testConditions = Array(
// In Append mode, failure may happen after COPY_INTO
(TH_WRITE_ERROR_AFTER_COPY_INTO, df2, table, "on", "off", SaveMode.Append),
// In Overwrite mode, failure may happen after after truncate table
(TH_WRITE_ERROR_AFTER_TRUNCATE_TABLE, df2, table, "on", "off", SaveMode.Overwrite),
// In Overwrite mode, failure may happen after after copy into
(TH_WRITE_ERROR_AFTER_COPY_INTO, df2, table, "on", "off", SaveMode.Overwrite)
)
testConditions.map(x => {
println(s"Test case 2 condition: $x")
val testFlag = x._1
val df = x._2
val tableName = x._3
val truncate_table = x._4
val usestagingtable = x._5
val saveMode = x._6
assertThrows[SnowflakeSQLException] {
TestHook.enableTestFlagOnly(testFlag)
df.write
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", tableName)
.option("truncate_table", truncate_table)
.option("usestagingtable", usestagingtable)
.mode(saveMode)
.save()
}
val newRowCount = sparkSession.read
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", s"$table")
.load()
.count()
assert(newRowCount == oldRowCount)
})
// Disable test hook in the end
TestHook.disableTestHook()
})
}
def checkSchema2(tableName: String): Boolean = {
val st = DefaultJDBCWrapper.resolveTable(conn, tableName, params)
val st1 = new StructType(
Array(
StructField("NUM1", DecimalType(38, 0), nullable = false),
StructField("NUM2", DecimalType(38, 0), nullable = false)
)
)
st.equals(st1)
}
// This test case is used to reproduce/test SNOW-222104
// The reproducing conditions are:
// 1. Write data frame to a table with OVERWRITE and (usestagingtable=on truncate_table=off (they are default)).
// 2. table name includes database name and schema name.
// 3. sfSchema is configured to a different schema
// 4. The user has privilege to create stage but doesn't have privilege to create table on sfSchema
//
// Below is how to create the env to reproduce it and test
// 1. create a new role TESTROLE_SPARK_2 with ADMIN.
// 2. ADMIN grants USAGE and CREATE SCHEMA privilege for testdb_spark to TESTROLE_SPARK_2
// 3. ADMIN GRANT ROLE TESTROLE_SPARK_2 to USER TEST_SPARK;
// 4. TEST_SPARK logins, switchs to TESTROLE_SPARK_2.
// 5. create a managed schema: create schema TEST_SCHEMA_NO_CREATE_TABLE with managed access;
// 6. grant USAGE and CREATE STAGE on TEST_SCHEMA_NO_CREATE_TABLE to TESTROLE_SPARK.
//
// TESTROLE_SPARK is the default role for TEST_SPARK.
// 1. set sfSchema as TEST_SCHEMA_NO_CREATE_TABLE
// 2. write with OverWrite to table: testdb_spark.spark_test.table_name
//
// NOTE:
// 1. The test env is only setup for sfctest0 on AWS. So this test only run on AWS.
// 2. Configure truncate_table = on and usestagingtable=off can workaround this issue.
test("write table with different schema") {
val accountName = System.getenv(SNOWFLAKE_TEST_ACCOUNT)
val isAWS = accountName == null || accountName.equals("aws")
// It is necessary to check extraTestForCoverage because the released SC back-compatibility
// regress test for preprod3/QA is on AWS too, but the test schema is not set up there.
if (isAWS && extraTestForCoverage) {
tableNames.foreach(table => {
println(s"""Test table: "$table"""")
jdbcUpdate(s"drop table if exists $table")
// Use a different schema, current user has no permission to CREATE TABLE
// but has permission to CREATE STAGE.
val sfOptions = replaceOption(connectorOptionsNoTable, "sfschema", "TEST_SCHEMA_NO_CREATE_TABLE")
val tableFullName = s"testdb_spark.spark_test.$table"
// create one table
df2.write
.format(SNOWFLAKE_SOURCE_NAME)
.options(sfOptions)
.option("dbtable", tableFullName)
.option("truncate_table", "off")
.option("usestagingtable", "on")
.mode(SaveMode.Overwrite)
.save()
// check schema
assert(checkSchema2(tableFullName))
})
}
}
test("Write empty DataFrame and target table doesn't exist: SNOW-297134") {
import testImplicits._
val emptyDf = Seq.empty[(Int, String)].toDF("key", "value")
// Below CSV and PARQUET WRITE generate empty file.
// So snowflake should also create an empty table.
// emptyDf.write.csv("/tmp/output/csv")
// emptyDf.write.parquet("/tmp/output/parquet")
// create a table has 3 columns.
// Make sure target table doesn't exist
jdbcUpdate(s"drop table if exists $targetTable")
// Write empty DataFrame with Overwrite mode
emptyDf.write
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", targetTable)
.mode(SaveMode.Overwrite)
.save()
// success reads the target table
var readDF = sparkSession.read
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", targetTable)
.load()
assert(readDF.count() == 0 && readDF.schema.fields.length == 2)
// Make sure target table doesn't exist
jdbcUpdate(s"drop table if exists $targetTable")
// Write empty DataFrame with Append mode
emptyDf.write
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", targetTable)
.mode(SaveMode.Append)
.save()
// success reads the target table
readDF = sparkSession.read
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", targetTable)
.load()
assert(readDF.count() == 0 && readDF.schema.fields.length == 2)
}
test("Write empty DataFrame and target table exists: SNOW-495389") {
import testImplicits._
val emptyDf = Seq.empty[(Int, String)].toDF("key", "value")
// Create target table
jdbcUpdate(s"create or replace table $targetTable (c1 int, c2 string)")
jdbcUpdate(s"insert into $targetTable values (123, 'abc')")
// case 1: Write DF with Overwrite, truncate_table = on, and usestagingtable = off
emptyDf.write
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", targetTable)
.option("truncate_table", "on")
.option("usestagingtable", "off")
.mode(SaveMode.Overwrite)
.save()
// target table is empty, and table is not re-created.
assert(sparkSession.read
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("query", s"select c1 from $targetTable") // "c1" is old table column name
.load().count() == 0)
// case 2: Write DF with Overwrite, truncate_table = off, and usestagingtable = off
jdbcUpdate(s"create or replace table $targetTable (c1 int, c2 string)")
jdbcUpdate(s"insert into $targetTable values (123, 'abc')")
emptyDf.write
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", targetTable)
.option("truncate_table", "off")
.option("usestagingtable", "off")
.mode(SaveMode.Overwrite)
.save()
// target table is empty, and table is recreated.
assert(sparkSession.read
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("query", s"select key from $targetTable") // "key" is new table column name
.load().count() == 0)
}
test("Write empty DataFrame and target table exists: SNOW-297134") {
import testImplicits._
val emptyDf = Seq.empty[(Int, String)].toDF("key", "value")
// Below CSV and PARQUET WRITE generate empty file.
// So snowflake should also create an empty table.
// emptyDf.write.csv("/tmp/output/csv")
// emptyDf.write.parquet("/tmp/output/parquet")
// create a table has 3 columns.
jdbcUpdate(s"create or replace table $targetTable (c1 int, c2 int, c3 int)")
jdbcUpdate(s"insert into $targetTable values (1, 2 ,3)")
// Write empty DataFrame with Override mode
emptyDf.write
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", targetTable)
.mode(SaveMode.Overwrite)
.save()
// success reads the target table
var readDF = sparkSession.read
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", targetTable)
.load()
// The table has been over written, so it only has 2 columns.
assert(readDF.schema.fields.length == 2 && readDF.count() == 0)
jdbcUpdate(s"create or replace table $targetTable (c1 int, c2 int, c3 int)")
jdbcUpdate(s"insert into $targetTable values (1, 2 ,3)")
// Write empty DataFrame with Append mode
emptyDf.write
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", targetTable)
.mode(SaveMode.Append)
.save()
// success reads the target table
readDF = sparkSession.read
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", targetTable)
.load()
// The table has not been over written, so it has 3 columns
assert(readDF.schema.fields.length == 3 && readDF.count() == 1)
}
test("Negative test to write empty DataFrame: SNOW-297134") {
import testImplicits._
val emptyDf = Seq.empty[(Int, String)].toDF("key", "value")
// Make sure target table doesn't exist
jdbcUpdate(s"drop table if exists $targetTable")
// Write empty DataFrame
emptyDf.write
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", targetTable)
.option(Parameters.PARAM_INTERNAL_SKIP_WRITE_WHEN_WRITING_EMPTY_DATAFRAME, "true")
.mode(SaveMode.Overwrite)
.save()
// Fail to read table because no table is created in last step.
assertThrows[Exception]({
sparkSession.read
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", targetTable)
.load()
.count()
})
}
test("AWS use region url with small data") {
// This test case only affect AWS account
val accountName: String = Option(System.getenv(SNOWFLAKE_TEST_ACCOUNT)).getOrElse("aws")
if (accountName.equalsIgnoreCase("aws")) {
// Make sure target table doesn't exist
jdbcUpdate(s"create or replace table $targetTable (c1 int, c2 string)")
// Write DataFrame and commit
val df = Seq[(Int, String)]((1, "a"), (2, "b")).toDF("key", "value")
df.write
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", targetTable)
.option(Parameters.PARAM_INTERNAL_USE_AWS_REGION_URL, "false")
.mode(SaveMode.Append)
.save()
assert(getRowCount(targetTable) == 2)
df.write
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", targetTable)
.option(Parameters.PARAM_INTERNAL_USE_AWS_REGION_URL, "true")
.mode(SaveMode.Append)
.save()
assert(getRowCount(targetTable) == 4)
}
}
test("AWS use region url with large data") {
// This test case only affect AWS account
val accountName: String = Option(System.getenv(SNOWFLAKE_TEST_ACCOUNT)).getOrElse("aws")
if (accountName.equalsIgnoreCase("aws")) {
def getRandomString(len: Int): String = {
Random.alphanumeric take len mkString ""
}
val partitionCount = 4
val rowCountPerPartition = 1024
val strValue = getRandomString(512)
// Create RDD which generates 1 large partition
val testRDD: RDD[Row] = sparkSession.sparkContext
.parallelize(Seq[Int](), partitionCount)
.mapPartitions { _ => {
(1 to rowCountPerPartition).map { _ => {
Row(strValue, strValue, strValue, strValue)
}
}.iterator
}
}
val schema = StructType(
List(
StructField("str1", StringType),
StructField("str2", StringType),
StructField("str3", StringType),
StructField("str4", StringType)
)
)
// Write to snowflake
sparkSession.createDataFrame(testRDD, schema).write
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", targetTable)
.option(Parameters.PARAM_INTERNAL_USE_AWS_REGION_URL, "false")
.mode(SaveMode.Overwrite)
.save()
assert(getRowCount(targetTable) == partitionCount * rowCountPerPartition)
// Write to the table again
sparkSession.createDataFrame(testRDD, schema).write
.format(SNOWFLAKE_SOURCE_NAME)
.options(connectorOptionsNoTable)
.option("dbtable", targetTable)
.option(Parameters.PARAM_INTERNAL_USE_AWS_REGION_URL, "true")
.mode(SaveMode.Append)
.save()
assert(getRowCount(targetTable) == partitionCount * rowCountPerPartition * 2)
}
}
def checkSchema1(tableName: String): Boolean = {
val st = DefaultJDBCWrapper.resolveTable(conn, tableName, params)
val st1 = new StructType(
Array(
StructField("NUM1", DecimalType(38, 0), nullable = false),
StructField("NUM2", DoubleType, nullable = false)
)
)
st.equals(st1)
}
override def afterAll(): Unit = {
TestHook.disableTestHook()
jdbcUpdate(s"drop table if exists $normalTable")
jdbcUpdate(s"drop table if exists $specialTable")
jdbcUpdate(s"drop table if exists $targetTable")
super.afterAll()
}
}
// scalastyle:on println
|
snowflakedb/spark-snowflake
|
src/it/scala/net/snowflake/spark/snowflake/TruncateTableSuite.scala
|
Scala
|
apache-2.0
| 23,227 |
package models
import anorm.SqlParser._
import anorm._
import play.api.Play.current
import play.api.db._
import scala.language.postfixOps
case class User(username: String, password: String, admin: Boolean = false) {
def toAdmin =
copy(admin = true)
}
object User {
/**
* Parse a User from a ResultSet
*/
val simple = {
get[String]("username") ~ get[String]("password") ~ get[Boolean]("admin") map {
case username ~ password ~ admin => User(username, password, admin)
}
}
/**
* Get a user by its username
*/
def findByUsername(username: String) = {
DB.withConnection { implicit connection =>
SQL("select * from user where username = {username}")
.on('username -> username)
.as(simple.singleOpt)
}
}
/**
* Get a user ID by its username and password
*/
def findByCredentials(username: String, password: String) = {
DB.withConnection { implicit connection =>
SQL(s"select * from user where username = '${username}' and password = '${password}'")
.as(simple.singleOpt)
}
}
/**
* Count number of users
*/
def count() = {
DB.withConnection { implicit connection =>
SQL("select count(*) from user")
.as(scalar[Long].single)
}
}
/**
* Check whether the user table is empty
*/
def isEmpty = count() == 0
/**
* Insert a new user
*
* @param user The user values.
*/
def insert(user: User) = {
DB.withConnection { implicit connection =>
SQL(
"""
insert into user set
username = {username},
password = {password},
admin = {admin}
"""
).on(
'username -> user.username,
'password -> user.password,
'admin -> user.admin
).executeUpdate()
}
}
/**
* Delete a computer.
*
* @param username Username of the user to delete.
*/
def delete(username: String) = {
DB.withConnection { implicit connection =>
SQL("delete from user where username = {username}").on('username -> username).executeUpdate()
}
}
}
|
gewoonrik/MAP
|
bad/app/models/User.scala
|
Scala
|
mit
| 2,123 |
package x7c1.wheat.harvest
import java.io.PrintWriter
import play.twirl.api.TxtFormat
import sbt._
case class JavaSource(
code: String,
file: File
)
class JavaSourceFactory [A <: ResourceParts](
targetDir: File,
className: String,
template: A => TxtFormat.Appendable,
partsFactory: ResourcePartsFactory[A] ){
def createFrom(resource: ParsedResource): JavaSource = {
val parts = partsFactory.createFrom(resource)
JavaSource(
code = template(parts).body,
file = targetDir / s"$className.java"
)
}
}
trait JavaSourcesFactory {
def createFrom(resource: ParsedResource): Seq[JavaSource]
}
object JavaSourceWriter {
def write(source: JavaSource): Unit = {
val parent = source.file.getParentFile
if (!parent.exists()){
parent.mkdirs()
}
val writer = new PrintWriter(source.file)
writer.write(source.code)
writer.close()
}
}
|
x7c1/Wheat
|
wheat-harvest/src/main/scala/x7c1/wheat/harvest/JavaSource.scala
|
Scala
|
mit
| 901 |
package scaffvis.client.components.common
import scaffvis.layout.Rect
import japgolly.scalajs.react.vdom.ReactTagOf
import japgolly.scalajs.react.vdom.prefix_<^._
import org.scalajs.dom.html.Image
import scala.scalajs.js.URIUtils
object Svg {
/*
def fromSvgContent(position: Rect, viewBox: Rect)(innerHtml: String): ReactTagOf[SVG] = <.svg.svg(
^.dangerouslySetInnerHtml(innerHtml),
^.svg.x := position.x, ^.svg.y := position.y, ^.svg.width := position.w, ^.svg.height := position.h,
^.svg.viewBox := s"${viewBox.x} ${viewBox.y} ${viewBox.w} ${viewBox.h}"
)
def fromSvgContent(width: Int, height: Int, viewBox: Rect)(innerHtml: String): ReactTagOf[SVG] = <.svg.svg(
^.dangerouslySetInnerHtml(innerHtml),
^.svg.width := width, ^.svg.height := height,
^.svg.viewBox := s"${viewBox.x} ${viewBox.y} ${viewBox.w} ${viewBox.h}"
)
*/
def uriEncodedSvgFromContent(svgContent: String, viewBox: Rect) = {
import viewBox._
val svg = s"<svg xmlns='http://www.w3.org/2000/svg' viewBox='$x $y $w $h'>$svgContent</svg>"
//use import scalajs.js.JSStringOps.enableJSStringOps to enable jsIndexOf
val uriEncodedSvg = URIUtils.encodeURIComponent(svg)
s"data:image/svg+xml,$uriEncodedSvg"
}
def htmlImgFromSvgContent(svgContent: String, viewBox: Rect, tagMods: TagMod*): ReactTagOf[Image] = {
val uriEncodedSvg = uriEncodedSvgFromContent(svgContent, viewBox)
<.img(^.src := uriEncodedSvg, tagMods)
}
def svgImageFromSvgContent(position: Rect, viewBox: Rect, svgContent: String, tagMods: TagMod*) = {
import position._
val uriEncodedSvg = uriEncodedSvgFromContent(svgContent, viewBox)
val mods = Seq(^.svg.x := x, ^.svg.y := y, ^.svg.width := w, ^.svg.height := h, ^.href := uriEncodedSvg) ++ tagMods
<.svg.image.apply(mods:_*)
}
def viewBoxFromRect(rect: Rect, padding: Int) = {
val minX = rect.x - padding
val minY = rect.y - padding
val width = rect.w + 2 * padding
val height = rect.h + 2 * padding
s"$minX $minY $width $height"
}
val moleculeSvgViewBox = Rect(0, 0, 400, 400)
//val moleculeSvgViewBoxAttr = s"${moleculeSvgViewBox.x} ${moleculeSvgViewBox.y} ${moleculeSvgViewBox.w} ${moleculeSvgViewBox.h}"
}
|
velkoborsky/scaffvis
|
client/src/main/scala/scaffvis/client/components/common/Svg.scala
|
Scala
|
gpl-3.0
| 2,221 |
package pl.touk.nussknacker.sql.service
import pl.touk.nussknacker.engine.api.typed.TypedMap
import pl.touk.nussknacker.sql.db.query.ResultSetStrategy
import pl.touk.nussknacker.sql.db.schema.{MetaDataProviderFactory, TableDefinition}
import pl.touk.nussknacker.sql.utils.BaseHsqlQueryEnricherTest
import scala.concurrent.Await
class DatabaseQueryEnricherTest extends BaseHsqlQueryEnricherTest {
import scala.collection.JavaConverters._
import scala.concurrent.duration._
override val service = new DatabaseQueryEnricher(hsqlDbPoolConfig, new MetaDataProviderFactory().create(hsqlDbPoolConfig))
override val prepareHsqlDDLs: List[String] = List(
"CREATE TABLE persons (id INT, name VARCHAR(40));",
"INSERT INTO persons (id, name) VALUES (1, 'John')"
)
test("DatabaseQueryEnricher#implementation without cache") {
val query = "select * from persons where id = ?"
val st = conn.prepareStatement(query)
val meta = st.getMetaData
st.close()
val state = DatabaseQueryEnricher.TransformationState(
query = query,
argsCount = 1,
tableDef = TableDefinition(meta),
strategy = ResultSetStrategy
)
val invoker = service.implementation(Map.empty, dependencies = Nil, Some(state))
returnType(service, state).display shouldBe "List[{ID: Integer, NAME: String}]"
val resultF = invoker.invokeService(Map("arg1" -> 1))
val result = Await.result(resultF, 5 seconds).asInstanceOf[java.util.List[TypedMap]].asScala.toList
result shouldBe List(
TypedMap(Map("ID" -> 1, "NAME" -> "John"))
)
conn.prepareStatement("UPDATE persons SET name = 'Alex' WHERE id = 1").execute()
val resultF2 = invoker.invokeService(Map("arg1" -> 1))
val result2 = Await.result(resultF2, 5 seconds).asInstanceOf[java.util.List[TypedMap]].asScala.toList
result2 shouldBe List(
TypedMap(Map("ID" -> 1, "NAME" -> "Alex"))
)
}
}
|
TouK/nussknacker
|
components/sql/src/test/scala/pl/touk/nussknacker/sql/service/DatabaseQueryEnricherTest.scala
|
Scala
|
apache-2.0
| 1,913 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.