code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.python
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression
import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, LogicalPlan, Project}
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.execution
import org.apache.spark.sql.execution.{FilterExec, SparkPlan}
/**
* Extracts all the Python UDFs in logical aggregate, which depends on aggregate expression or
* grouping key, evaluate them after aggregate.
*/
object ExtractPythonUDFFromAggregate extends Rule[LogicalPlan] {
/**
* Returns whether the expression could only be evaluated within aggregate.
*/
private def belongAggregate(e: Expression, agg: Aggregate): Boolean = {
e.isInstanceOf[AggregateExpression] ||
agg.groupingExpressions.exists(_.semanticEquals(e))
}
private def hasPythonUdfOverAggregate(expr: Expression, agg: Aggregate): Boolean = {
expr.find {
e => e.isInstanceOf[PythonUDF] && e.find(belongAggregate(_, agg)).isDefined
}.isDefined
}
private def extract(agg: Aggregate): LogicalPlan = {
val projList = new ArrayBuffer[NamedExpression]()
val aggExpr = new ArrayBuffer[NamedExpression]()
agg.aggregateExpressions.foreach { expr =>
if (hasPythonUdfOverAggregate(expr, agg)) {
// Python UDF can only be evaluated after aggregate
val newE = expr transformDown {
case e: Expression if belongAggregate(e, agg) =>
val alias = e match {
case a: NamedExpression => a
case o => Alias(e, "agg")()
}
aggExpr += alias
alias.toAttribute
}
projList += newE.asInstanceOf[NamedExpression]
} else {
aggExpr += expr
projList += expr.toAttribute
}
}
// There is no Python UDF over aggregate expression
Project(projList, agg.copy(aggregateExpressions = aggExpr))
}
def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
case agg: Aggregate if agg.aggregateExpressions.exists(hasPythonUdfOverAggregate(_, agg)) =>
extract(agg)
}
}
/**
* Extracts PythonUDFs from operators, rewriting the query plan so that the UDF can be evaluated
* alone in a batch.
*
* Only extracts the PythonUDFs that could be evaluated in Python (the single child is PythonUDFs
* or all the children could be evaluated in JVM).
*
* This has the limitation that the input to the Python UDF is not allowed include attributes from
* multiple child operators.
*/
object ExtractPythonUDFs extends Rule[SparkPlan] with PredicateHelper {
private def hasPythonUDF(e: Expression): Boolean = {
e.find(_.isInstanceOf[PythonUDF]).isDefined
}
private def canEvaluateInPython(e: PythonUDF): Boolean = {
e.children match {
// single PythonUDF child could be chained and evaluated in Python
case Seq(u: PythonUDF) => canEvaluateInPython(u)
// Python UDF can't be evaluated directly in JVM
case children => !children.exists(hasPythonUDF)
}
}
private def collectEvaluatableUDF(expr: Expression): Seq[PythonUDF] = expr match {
case udf: PythonUDF if canEvaluateInPython(udf) => Seq(udf)
case e => e.children.flatMap(collectEvaluatableUDF)
}
def apply(plan: SparkPlan): SparkPlan = plan transformUp {
case plan: SparkPlan => extract(plan)
}
/**
* Extract all the PythonUDFs from the current operator and evaluate them before the operator.
*/
private def extract(plan: SparkPlan): SparkPlan = {
val udfs = plan.expressions.flatMap(collectEvaluatableUDF)
// ignore the PythonUDF that come from second/third aggregate, which is not used
.filter(udf => udf.references.subsetOf(plan.inputSet))
if (udfs.isEmpty) {
// If there aren't any, we are done.
plan
} else {
val attributeMap = mutable.HashMap[PythonUDF, Expression]()
val splitFilter = trySplitFilter(plan)
// Rewrite the child that has the input required for the UDF
val newChildren = splitFilter.children.map { child =>
// Pick the UDF we are going to evaluate
val validUdfs = udfs.filter { udf =>
// Check to make sure that the UDF can be evaluated with only the input of this child.
udf.references.subsetOf(child.outputSet)
}
if (validUdfs.nonEmpty) {
val resultAttrs = udfs.zipWithIndex.map { case (u, i) =>
AttributeReference(s"pythonUDF$i", u.dataType)()
}
val evaluation = validUdfs.partition(_.vectorized) match {
case (vectorizedUdfs, plainUdfs) if plainUdfs.isEmpty =>
ArrowEvalPythonExec(vectorizedUdfs, child.output ++ resultAttrs, child)
case (vectorizedUdfs, plainUdfs) if vectorizedUdfs.isEmpty =>
BatchEvalPythonExec(plainUdfs, child.output ++ resultAttrs, child)
case _ =>
throw new IllegalArgumentException("Can not mix vectorized and non-vectorized UDFs")
}
attributeMap ++= validUdfs.zip(resultAttrs)
evaluation
} else {
child
}
}
// Other cases are disallowed as they are ambiguous or would require a cartesian
// product.
udfs.filterNot(attributeMap.contains).foreach { udf =>
sys.error(s"Invalid PythonUDF $udf, requires attributes from more than one child.")
}
val rewritten = splitFilter.withNewChildren(newChildren).transformExpressions {
case p: PythonUDF if attributeMap.contains(p) =>
attributeMap(p)
}
// extract remaining python UDFs recursively
val newPlan = extract(rewritten)
if (newPlan.output != plan.output) {
// Trim away the new UDF value if it was only used for filtering or something.
execution.ProjectExec(plan.output, newPlan)
} else {
newPlan
}
}
}
// Split the original FilterExec to two FilterExecs. Only push down the first few predicates
// that are all deterministic.
private def trySplitFilter(plan: SparkPlan): SparkPlan = {
plan match {
case filter: FilterExec =>
val (candidates, containingNonDeterministic) =
splitConjunctivePredicates(filter.condition).span(_.deterministic)
val (pushDown, rest) = candidates.partition(!hasPythonUDF(_))
if (pushDown.nonEmpty) {
val newChild = FilterExec(pushDown.reduceLeft(And), filter.child)
FilterExec((rest ++ containingNonDeterministic).reduceLeft(And), newChild)
} else {
filter
}
case o => o
}
}
}
| minixalpha/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/python/ExtractPythonUDFs.scala | Scala | apache-2.0 | 7,548 |
package ru.imho.dddmt.core.util
object TemplateSupport {
abstract sealed class Op
case class Str(s: String) extends Op
case class Var(id: String, format: Option[String]) extends Op
val idStart = Character.isLetter _
val idPart = Character.isLetterOrDigit _
def parseSimpleTemplate(template: String): List[Op] =
template.foldLeft[(Symbol, List[Op])]('S, Nil) {
//String part
case (('S, l), '$') => ('WBR, l)
case (('S, Str(s) :: l), c) => ('S, Str(s + c) :: l)
case (('S, l), c) => ('S, Str(c.toString) :: l)
//$_{
case (('WBR, l), '{') => ('IDBR, Var("", None) :: l)
case (('WBR, Str(s) :: l), '$') => ('S, Str(s + '$') :: l)
case (('WBR, l), '$') => ('S, Str("$") :: l)
case (('WBR, l), c) if idStart(c) => ('ID, Var(c.toString, None) :: l)
//${id_}
case (('IDBR, l), '}') => ('S, l)
case (('IDBR, Var(id, _) :: l), '%') => ('IDBRFMT, Var(id, Some("%")) :: l)
case (('IDBR, Var(id, fmt) :: l), c) => ('IDBR, Var(id + c, fmt) :: l)
//${id%fmt_}
case (('IDBRFMT, l), '}') => ('S, l)
case (('IDBRFMT, Var(id, Some(fmt)) :: r), c) => ('IDBRFMT, Var(id, Some(fmt + c)) :: r)
//$id_
case (('ID, Var(id, fmt) :: r), c) if idPart(c) => ('ID, Var(id + c, fmt) :: r)
case (('ID, l), '$') => ('WBR, l)
case (('ID, l), c) => ('S, Str(c.toString) :: l)
case ((s, l), c) =>
throw new IllegalStateException(s"Invalid template: `$template`, at char `$c`, s=`$s`")
} match {
case ('S, l) => l.reverse
case ('ID, l) => l.reverse
case _ => throw new IllegalStateException("Incorrectly terminated template: " + template)
}
def simpleTemplateExpander[T](
template: String,
varAccessors: Map[String, T => Any]): T => String = {
def w(in: T => StringBuilder => Unit) = in
val ops: List[T => StringBuilder => Unit] = parseSimpleTemplate(template).map {
case Str(s) =>
w(_ => _.append(s))
case Var(v, f) =>
val op = try {
var r = varAccessors(v)
f.map(fmt => r andThen (_ formatted fmt)).getOrElse(r)
} catch {
case t: NoSuchElementException =>
throw new IllegalArgumentException(s"variable not found: `$v`")
}
w(t => _.append(op(t)))
}
def fx(t: T, sb: StringBuilder): Unit = ops foreach (_(t)(sb))
t => {
var sb = new StringBuilder
fx(t, sb)
sb.toString
}
}
} | IMHOVi/dddmt | dddmt-engine/src/main/scala/ru/imho/dddmt/core/util/TemplateSupport.scala | Scala | apache-2.0 | 2,626 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.query
import com.twitter.finagle.tracing.{Trace => FTrace}
import com.twitter.logging.Logger
import com.twitter.zipkin.common.{BinaryAnnotation, Endpoint, Span}
import java.nio.ByteBuffer
import scala.collection.mutable
/**
* A chunk of time, between a start and an end.
*/
case class Timespan(start: Long, end: Long)
/**
* Represents a trace, a bundle of spans.
*/
object Trace {
def apply(spanTree: SpanTreeEntry): Trace = Trace(spanTree.toList)
}
case class Trace(private val s: Seq[Span]) {
lazy val spans = mergeBySpanId(s).toSeq.sortWith {
(a, b) =>
val aTimestamp = a.firstAnnotation.map(_.timestamp).getOrElse(Long.MaxValue)
val bTimestamp = b.firstAnnotation.map(_.timestamp).getOrElse(Long.MaxValue)
aTimestamp < bTimestamp
}
/**
* Find the trace id for this trace.
* Returns none if we have no spans to look up id by
*/
def id: Option[Long] =
spans.headOption.map(_.traceId)
/**
* Find the root span of this trace and return
*/
def getRootSpan: Option[Span] =
spans.find { !_.parentId.isDefined }
/**
* Find a span by the id. Note that this iterates through all the spans.
*/
def getSpanById(spanId: Long): Option[Span] =
spans.find { _.id == spanId }
/**
* In some cases we don't care if it's the actual root span or just the span
* that is closes to the root. For example it could be that we don't yet log spans
* from the root service, then we want the one just below that.
* FIXME if there are holes in the trace this might not return the correct span
*/
lazy val getRootMostSpan: Option[Span] = {
getRootSpan orElse {
val idSpan = getIdToSpanMap
spans.headOption map { recursiveGetRootMostSpan(idSpan, _) }
}
}
def getRootSpans(idSpan: Map[Long, Span] = getIdToSpanMap): Seq[Span] =
spans filter { !_.parentId.flatMap(idSpan.get).isDefined }
private def recursiveGetRootMostSpan(idSpan: Map[Long, Span], prevSpan: Span): Span = {
// parent id shouldn't be none as then we would have returned already
val span = for ( id <- prevSpan.parentId; s <- idSpan.get(id) ) yield
recursiveGetRootMostSpan(idSpan, s)
span.getOrElse(prevSpan)
}
/**
* Get the start and end timestamps for this trace.
*/
def getStartAndEndTimestamp: Option[Timespan] = {
spans.flatMap(_.annotations.map(_.timestamp)) match {
case Nil => None // No annotations
case s => Some(Timespan(s.min, s.max))
}
}
/**
* How long did this span take to run?
* Returns microseconds between start annotation and end annotation
*/
def duration: Long = {
val startEnd = getStartAndEndTimestamp.getOrElse(Timespan(0, 0))
(startEnd.end - startEnd.start)
}
/**
* Returns all the endpoints involved in this trace.
*/
def endpoints: Set[Endpoint] = {
spans.flatMap(_.endpoints).toSet
}
/**
* Returns all the services involved in this trace.
*/
def services: Set[String] = {
spans.flatMap(_.serviceNames).toSet
}
/**
* Returns a map of services involved in this trace to the
* number of times they are invoked
*/
def serviceCounts: Map[String, Int] = {
spans.flatMap(_.serviceNames).groupBy(s => s).mapValues {
l: Seq[String] => l.length
}
}
/**
* Figures out the "span depth". This is used in the ui
* to figure out how to lay out the spans in the visualization.
* @return span id -> depth in the tree
*/
def toSpanDepths: Option[Map[Long, Int]] = {
FTrace.record("toSpanDepths")
getRootMostSpan match {
case None => return None
case Some(s) => {
val spanTree = getSpanTree(s, getIdToChildrenMap)
Some(spanTree.depths(1))
}
}
}
/**
* Get all the binary annotations with this key in the whole trace.
*/
def getBinaryAnnotationsByKey(key: String): Seq[ByteBuffer] = {
spans.flatMap(_.binaryAnnotations.collect {
case BinaryAnnotation(bKey, bValue, _, _) if (bKey == key) => bValue
}.toSeq)
}
/**
* Get all the binary annotations in this trace.
*/
def getBinaryAnnotations: Seq[BinaryAnnotation] =
spans.map(_.binaryAnnotations).flatten
/**
* Merge all the spans objects with the same span ids into one per id.
* We store parts of spans in different columns in order to make writes
* faster and simpler. This means we have to merge them correctly on read.
*/
private def mergeBySpanId(spans: Iterable[Span]): Iterable[Span] = {
val spanMap = new mutable.HashMap[Long, Span]
spans.foreach(s => {
val oldSpan = spanMap.get(s.id)
oldSpan match {
case Some(oldS) => {
val merged = oldS.mergeSpan(s)
spanMap.put(merged.id, merged)
}
case None => spanMap.put(s.id, s)
}
})
spanMap.values
}
/*
* Turn the Trace into a map of Span Id -> One or more children Spans
*/
def getIdToChildrenMap: mutable.MultiMap[Long, Span] = {
val map = new mutable.HashMap[Long, mutable.Set[Span]] with mutable.MultiMap[Long, Span]
for ( s <- spans; pId <- s.parentId ) map.addBinding(pId, s)
map
}
/*
* Turn the Trace into a map of Span Id -> Span
*/
def getIdToSpanMap: Map[Long, Span] =
spans.map { s => (s.id, s) }.toMap
/**
* Get the spans of this trace in a tree form. SpanTreeEntry wraps a Span and it's children.
*/
def getSpanTree(span: Span, idToChildren: mutable.MultiMap[Long, Span]): SpanTreeEntry = {
val children = idToChildren.get(span.id)
children match {
case Some(cSet) => SpanTreeEntry(span, cSet.map(getSpanTree(_, idToChildren)).toList)
case None => SpanTreeEntry(span, List[SpanTreeEntry]())
}
}
/**
* Print the trace tree to give the user an overview.
*/
def printTraceTree = {
getRootSpan match {
case Some(s) => getSpanTree(s, getIdToChildrenMap).printTree(0)
case None => println("No root node found")
}
}
}
| srijs/zipkin | zipkin-common/src/main/scala/com/twitter/zipkin/query/Trace.scala | Scala | apache-2.0 | 6,586 |
import java.io.BufferedWriter
import java.io.FileReader
import java.io.FileWriter
import java.io.InputStreamReader
import java.io.IOException
import java.io.LineNumberReader
import java.io.OutputStreamWriter
import org.mozilla.javascript.Context
import org.mozilla.javascript.Function
import org.mozilla.javascript.Scriptable
import org.mozilla.javascript.ScriptableObject
import org.mozilla.javascript.annotations.JSConstructor
import org.mozilla.javascript.annotations.JSFunction
import org.mozilla.javascript.annotations.JSGetter
/**
* Define a simple JavaScript File object.
*
* This isn't intended to be any sort of definitive attempt at a
* standard File object for JavaScript, but instead is an example
* of a more involved definition of a host object.
*
* Example of use of the File object:
* <pre>
* js> defineClass("File")
* js> file = new File("myfile.txt");
* [object File]
* js> file.writeLine("one"); <i>only now is file actually opened</i>
* js> file.writeLine("two");
* js> file.writeLine("thr", "ee");
* js> file.close(); <i>must close file before we can reopen for reading</i>
* js> var a = file.readLines(); <i>creates and fills an array with the contents of the file</i>
* js> a;
* one,two,three
* js>
* </pre>
*
* File errors or end-of-file signaled by thrown Java exceptions will
* be wrapped as JavaScript exceptions when called from JavaScript,
* and may be caught within JavaScript.
*
* The zero-parameter constructor.
*
* When Context.defineClass is called with this class, it will
* construct File.prototype using this constructor.
*/
class File extends ScriptableObject {
override def getClassName: String = "File"
/**
* Get the name of the file.
*
* Used to define the "name" property.
*/
@JSGetter
def getName: String = name
/**
* Read the remaining lines in the file and return them in an array.
*
* Implements a JavaScript function.<p>
*
* This is a good example of creating a new array and setting
* elements in that array.
*/
@JSFunction
def readLines: AnyRef = {
val iterator = Iterator.continually(readLine()).takeWhile(_ != null)
val lines: Array[String] = iterator.toArray
val scope = ScriptableObject.getTopLevelScope(this)
val cx = Context.getCurrentContext
cx.newObject(scope, "Array", lines.asInstanceOf[Array[AnyRef]])
}
/**
* Read a line.
*
* Implements a JavaScript function.
*/
@JSFunction
def readLine(): String = getReader.readLine()
/**
* Read a character.
*/
@JSFunction
def readChar(): String = {
val i = getReader.read()
if (i == -1) {
null
} else {
val charArray = new Array[Char](i)
new String(charArray)
}
}
@JSGetter
def getLineNumber: Int = getReader.getLineNumber
@JSFunction
def close() {
if (reader != null) {
reader.close()
reader = null
} else if (writer != null) {
writer.close()
writer = null
}
}
/**
* Finalizer.
*
* Close the file when this object is collected.
*/
override def finalize() {
try {
close()
} catch {
case _: IOException =>
}
}
/**
* Get the Java reader.
*/
@JSFunction("getReader")
def getJSReader: AnyRef = {
if (reader == null) {
null
} else {
// Here we use toObject() to "wrap" the BufferedReader object
// in a Scriptable object so that it can be manipulated by
// JavaScript.
val parent: Scriptable = ScriptableObject.getTopLevelScope(this)
Context.javaToJS(reader, parent)
}
}
/**
* Get the Java writer.
*
* @see File#getReader
*/
@JSFunction
def getWriter: AnyRef = {
if (writer == null) {
null
} else {
val parent: Scriptable = ScriptableObject.getTopLevelScope(this)
Context.javaToJS(writer, parent)
}
}
/**
* Get the reader, checking that we're not already writing this file.
*/
private def getReader: LineNumberReader = {
if (writer != null) {
throw Context.reportRuntimeError("already writing file \"" + name + "\"")
}
if (reader == null) {
val underlyingReader = if (file == null) {
new InputStreamReader(System.in)
} else {
new FileReader(file)
}
reader = new LineNumberReader(underlyingReader)
}
reader
}
private var name: String = _
private var file: java.io.File = _
private var reader: LineNumberReader = _
private var writer: BufferedWriter = _
}
object File {
/**
* The Scala method defining the JavaScript File constructor.
*
* If the constructor has one or more arguments, and the
* first argument is not undefined, the argument is converted
* to a string as used as the filename.<p>
*
* Otherwise System.in or System.out is assumed as appropriate
* to the use.
*/
@JSConstructor
def jsConstructor(cx: Context, args: Array[AnyRef], ctorObj: Function, inNewExpr: Boolean): File = {
val result: File = new File
if (args.length == 0 || args(0) == Context.getUndefinedValue) {
result.name = ""
result.file = null
} else {
result.name = Context.toString(args(0))
result.file = new java.io.File(result.name)
}
result
}
/**
* Write strings.
*
* Implements a JavaScript function.
*
* This function takes a variable number of arguments, converts
* each argument to a string, and writes that string to the file.
*/
@JSFunction
def write(cx: Context, thisObj: Scriptable, args: Array[AnyRef], funObj: Function) = {
write0(thisObj, args, eol = false)
}
/**
* Write strings and a newline.
*
* Implements a JavaScript function.
*/
@JSFunction
def writeLine(cx: Context, thisObj: Scriptable, args: Array[AnyRef], funObj: Function) = {
write0(thisObj, args, eol = true)
}
/**
* Perform the instanceof check and return the downcasted File object.
*
* This is necessary since methods may reside in the File.prototype
* object and scripts can dynamically alter prototype chains. For example:
* <pre>
* js> defineClass("File");
* js> o = {};
* [object Object]
* js> o.__proto__ = File.prototype;
* [object File]
* js> o.write("hi");
* js: called on incompatible object
* </pre>
* The runtime will take care of such checks when non-static Java methods
* are defined as JavaScript functions.
*/
private def checkInstance(obj: Scriptable): File = {
if (obj == null || !obj.isInstanceOf[File]) {
throw Context.reportRuntimeError("called on incompatible object")
}
obj.asInstanceOf[File]
}
/**
* Perform the guts of write and writeLine.
*
* Since the two functions differ only in whether they write a
* newline character, move the code into a common subroutine.
*/
private def write0(thisObj: Scriptable, args: Array[AnyRef], eol: Boolean) = {
val thisFile: File = checkInstance(thisObj)
if (thisFile.reader != null) {
throw Context.reportRuntimeError("already writing file \"" + thisFile.name + "\"")
}
if (thisFile.writer == null) {
val underlyingWriter = if (thisFile.file == null) {
new OutputStreamWriter(System.out)
} else {
new FileWriter(thisFile.file)
}
thisFile.writer = new BufferedWriter(underlyingWriter)
}
args.foreach { arg =>
val s = Context.toString(arg)
thisFile.writer.write(s, 0, s.length())
}
if (eol) {
thisFile.writer.newLine()
}
}
}
| SollmoStudio/beyond | rhinoScalaBinding/src/examples/scala/File.scala | Scala | apache-2.0 | 7,606 |
/**
* Copyright 2011-2013 StackMob
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stackmob.customcode.dev.server.sdk.simulator
import com.twitter.util.Time
import scala.util.Random
/**
* a single throwable and a frequency with which it should be thrown
* @param err the error to throw
* @param freq the frequency with which to throw it
* @param rand the random number generator to be used to decide when to throw
*/
class ThrowableFrequency(val err: Throwable,
val freq: Frequency,
val rand: Random = defaultRandom) {
private var count = 0
private var lastRollover = Time.now
private val lock = new Object
def getCount: Int = count
def getLastRollover: Time = lastRollover
/**
* simulate a call to op, randomly selecting when to throw based on freq
* @tparam T the return type of op
* @return the result of executing op (assuming this method didn't throw)
*/
def simulate[T]() {
lock.synchronized {
if(lastRollover + freq.every <= Time.now) {
//if a rollover happened, reset stuff
lastRollover = Time.now
count = 0
} else if(count >= freq.number) {
//if the counter is at max, run the op normally
count += 1
} else {
//if the counter is not at max, randomly decide if there's an error
val shouldErr = rand.nextBoolean()
if(shouldErr) {
count += 1
throw err
}
}
}
}
}
object ThrowableFrequency {
def apply(err: Throwable, freq: Frequency, rand: Random = defaultRandom): ThrowableFrequency = {
new ThrowableFrequency(err, freq, rand)
}
}
| matthewfarwell/stackmob-customcode-dev | src/main/scala/com/stackmob/customcode/dev/server/sdk/simulator/ThrowableFrequency.scala | Scala | apache-2.0 | 2,175 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import java.nio.ByteBuffer
import java.nio.charset.StandardCharsets
import java.util.{List => JList, Map => JMap}
import scala.collection.JavaConverters._
import org.apache.avro.Schema
import org.apache.avro.generic.IndexedRecord
import org.apache.hadoop.fs.Path
import org.apache.parquet.avro.AvroParquetWriter
import org.apache.parquet.hadoop.ParquetWriter
import org.apache.spark.SparkConf
import org.apache.spark.sql.Row
import org.apache.spark.sql.execution.datasources.parquet.test.avro._
import org.apache.spark.sql.test.SharedSparkSession
class ParquetAvroCompatibilitySuite extends ParquetCompatibilityTest with SharedSparkSession {
override def sparkConf: SparkConf =
super.sparkConf
.setAppName("test")
.set("spark.sql.parquet.columnarReaderBatchSize", "4096")
.set("spark.sql.sources.useV1SourceList", "avro")
.set("spark.sql.extensions", "com.intel.oap.ColumnarPlugin")
.set("spark.sql.execution.arrow.maxRecordsPerBatch", "4096")
//.set("spark.shuffle.manager", "org.apache.spark.shuffle.sort.ColumnarShuffleManager")
.set("spark.memory.offHeap.enabled", "true")
.set("spark.memory.offHeap.size", "50m")
.set("spark.sql.join.preferSortMergeJoin", "false")
.set("spark.sql.columnar.codegen.hashAggregate", "false")
.set("spark.oap.sql.columnar.wholestagecodegen", "false")
.set("spark.sql.columnar.window", "false")
.set("spark.unsafe.exceptionOnMemoryLeak", "false")
//.set("spark.sql.columnar.tmp_dir", "/codegen/nativesql/")
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
private def withWriter[T <: IndexedRecord]
(path: String, schema: Schema)
(f: ParquetWriter[T] => Unit): Unit = {
logInfo(
s"""Writing Avro records with the following Avro schema into Parquet file:
|
|${schema.toString(true)}
""".stripMargin)
val writer = AvroParquetWriter.builder[T](new Path(path)).withSchema(schema).build()
try f(writer) finally writer.close()
}
ignore("required primitives") {
withTempPath { dir =>
val path = dir.getCanonicalPath
withWriter[AvroPrimitives](path, AvroPrimitives.getClassSchema) { writer =>
(0 until 10).foreach { i =>
writer.write(
AvroPrimitives.newBuilder()
.setBoolColumn(i % 2 == 0)
.setIntColumn(i)
.setLongColumn(i.toLong * 10)
.setFloatColumn(i.toFloat + 0.1f)
.setDoubleColumn(i.toDouble + 0.2d)
.setBinaryColumn(ByteBuffer.wrap(s"val_$i".getBytes(StandardCharsets.UTF_8)))
.setStringColumn(s"val_$i")
.build())
}
}
logParquetSchema(path)
checkAnswer(spark.read.parquet(path), (0 until 10).map { i =>
Row(
i % 2 == 0,
i,
i.toLong * 10,
i.toFloat + 0.1f,
i.toDouble + 0.2d,
s"val_$i".getBytes(StandardCharsets.UTF_8),
s"val_$i")
})
}
}
ignore("optional primitives") {
withTempPath { dir =>
val path = dir.getCanonicalPath
withWriter[AvroOptionalPrimitives](path, AvroOptionalPrimitives.getClassSchema) { writer =>
(0 until 10).foreach { i =>
val record = if (i % 3 == 0) {
AvroOptionalPrimitives.newBuilder()
.setMaybeBoolColumn(null)
.setMaybeIntColumn(null)
.setMaybeLongColumn(null)
.setMaybeFloatColumn(null)
.setMaybeDoubleColumn(null)
.setMaybeBinaryColumn(null)
.setMaybeStringColumn(null)
.build()
} else {
AvroOptionalPrimitives.newBuilder()
.setMaybeBoolColumn(i % 2 == 0)
.setMaybeIntColumn(i)
.setMaybeLongColumn(i.toLong * 10)
.setMaybeFloatColumn(i.toFloat + 0.1f)
.setMaybeDoubleColumn(i.toDouble + 0.2d)
.setMaybeBinaryColumn(ByteBuffer.wrap(s"val_$i".getBytes(StandardCharsets.UTF_8)))
.setMaybeStringColumn(s"val_$i")
.build()
}
writer.write(record)
}
}
logParquetSchema(path)
checkAnswer(spark.read.parquet(path), (0 until 10).map { i =>
if (i % 3 == 0) {
Row.apply(Seq.fill(7)(null): _*)
} else {
Row(
i % 2 == 0,
i,
i.toLong * 10,
i.toFloat + 0.1f,
i.toDouble + 0.2d,
s"val_$i".getBytes(StandardCharsets.UTF_8),
s"val_$i")
}
})
}
}
ignore("non-nullable arrays") {
withTempPath { dir =>
val path = dir.getCanonicalPath
withWriter[AvroNonNullableArrays](path, AvroNonNullableArrays.getClassSchema) { writer =>
(0 until 10).foreach { i =>
val record = {
val builder =
AvroNonNullableArrays.newBuilder()
.setStringsColumn(Seq.tabulate(3)(i => s"val_$i").asJava)
if (i % 3 == 0) {
builder.setMaybeIntsColumn(null).build()
} else {
builder.setMaybeIntsColumn(Seq.tabulate(3)(Int.box).asJava).build()
}
}
writer.write(record)
}
}
logParquetSchema(path)
checkAnswer(spark.read.parquet(path), (0 until 10).map { i =>
Row(
Seq.tabulate(3)(i => s"val_$i"),
if (i % 3 == 0) null else Seq.tabulate(3)(identity))
})
}
}
ignore("nullable arrays (parquet-avro 1.7.0 does not properly support this)") {
// TODO Complete this test case after upgrading to parquet-mr 1.8+
}
ignore("SPARK-10136 array of primitive array") {
withTempPath { dir =>
val path = dir.getCanonicalPath
withWriter[AvroArrayOfArray](path, AvroArrayOfArray.getClassSchema) { writer =>
(0 until 10).foreach { i =>
writer.write(AvroArrayOfArray.newBuilder()
.setIntArraysColumn(
Seq.tabulate(3, 3)((i, j) => i * 3 + j: Integer).map(_.asJava).asJava)
.build())
}
}
logParquetSchema(path)
checkAnswer(spark.read.parquet(path), (0 until 10).map { i =>
Row(Seq.tabulate(3, 3)((i, j) => i * 3 + j))
})
}
}
ignore("map of primitive array") {
withTempPath { dir =>
val path = dir.getCanonicalPath
withWriter[AvroMapOfArray](path, AvroMapOfArray.getClassSchema) { writer =>
(0 until 10).foreach { i =>
writer.write(AvroMapOfArray.newBuilder()
.setStringToIntsColumn(
Seq.tabulate(3) { i =>
i.toString -> Seq.tabulate(3)(j => i + j: Integer).asJava
}.toMap.asJava)
.build())
}
}
logParquetSchema(path)
checkAnswer(spark.read.parquet(path), (0 until 10).map { i =>
Row(Seq.tabulate(3)(i => i.toString -> Seq.tabulate(3)(j => i + j)).toMap)
})
}
}
ignore("various complex types") {
withTempPath { dir =>
val path = dir.getCanonicalPath
withWriter[ParquetAvroCompat](path, ParquetAvroCompat.getClassSchema) { writer =>
(0 until 10).foreach(i => writer.write(makeParquetAvroCompat(i)))
}
logParquetSchema(path)
checkAnswer(spark.read.parquet(path), (0 until 10).map { i =>
Row(
Seq.tabulate(3)(n => s"arr_${i + n}"),
Seq.tabulate(3)(n => n.toString -> (i + n: Integer)).toMap,
Seq.tabulate(3) { n =>
(i + n).toString -> Seq.tabulate(3) { m =>
Row(Seq.tabulate(3)(j => i + j + m), s"val_${i + m}")
}
}.toMap)
})
}
}
def makeParquetAvroCompat(i: Int): ParquetAvroCompat = {
def makeComplexColumn(i: Int): JMap[String, JList[Nested]] = {
Seq.tabulate(3) { n =>
(i + n).toString -> Seq.tabulate(3) { m =>
Nested
.newBuilder()
.setNestedIntsColumn(Seq.tabulate(3)(j => i + j + m: Integer).asJava)
.setNestedStringColumn(s"val_${i + m}")
.build()
}.asJava
}.toMap.asJava
}
ParquetAvroCompat
.newBuilder()
.setStringsColumn(Seq.tabulate(3)(n => s"arr_${i + n}").asJava)
.setStringToIntColumn(Seq.tabulate(3)(n => n.toString -> (i + n: Integer)).toMap.asJava)
.setComplexColumn(makeComplexColumn(i))
.build()
}
ignore("SPARK-9407 Push down predicates involving Parquet ENUM columns") {
import testImplicits._
withTempPath { dir =>
val path = dir.getCanonicalPath
withWriter[ParquetEnum](path, ParquetEnum.getClassSchema) { writer =>
(0 until 4).foreach { i =>
writer.write(ParquetEnum.newBuilder().setSuit(Suit.values.apply(i)).build())
}
}
checkAnswer(spark.read.parquet(path).filter('suit === "SPADES"), Row("SPADES"))
}
}
}
| Intel-bigdata/OAP | oap-native-sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetAvroCompatibilitySuite.scala | Scala | apache-2.0 | 9,837 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.index.utils
import java.util.concurrent._
import org.locationtech.geomesa.utils.collection.CloseableIterator
import scala.collection.JavaConversions._
abstract class AbstractBatchScan[T, R <: AnyRef](ranges: Seq[T], threads: Int, buffer: Int)
extends CloseableIterator[R] {
require(threads > 0, "Thread count must be greater than 0")
private val inQueue = new ConcurrentLinkedQueue(ranges)
private val outQueue = new LinkedBlockingQueue[R](buffer)
private val pool = Executors.newFixedThreadPool(threads + 1)
private val latch = new CountDownLatch(threads)
private val sentinel: R = singletonSentinel
private var retrieved: R = _
(0 until threads).foreach(_ => pool.submit(new SingleThreadScan()))
pool.submit(new Terminator)
pool.shutdown()
protected def scan(range: T, out: BlockingQueue[R])
protected def singletonSentinel: R
override def hasNext: Boolean = {
if (retrieved != null) {
true
} else {
retrieved = outQueue.take
if (!retrieved.eq(sentinel)) {
true
} else {
outQueue.put(sentinel) // re-queue in case hasNext is called again
retrieved = null.asInstanceOf[R]
false
}
}
}
override def next(): R = {
val n = retrieved
retrieved = null.asInstanceOf[R]
n
}
override def close(): Unit = pool.shutdownNow()
private class SingleThreadScan extends Runnable {
override def run(): Unit = {
try {
var range = inQueue.poll
while (range != null && !Thread.currentThread().isInterrupted) {
scan(range, outQueue)
range = inQueue.poll
}
} finally {
latch.countDown()
}
}
}
private class Terminator extends Runnable {
override def run(): Unit = {
try {
latch.await()
} finally {
outQueue.put(sentinel)
}
}
}
}
| ddseapy/geomesa | geomesa-index-api/src/main/scala/org/locationtech/geomesa/index/utils/AbstractBatchScan.scala | Scala | apache-2.0 | 2,372 |
class S {
val j = new J()
val x: Array[Dog] = ???
// Check that the java varargs for `foo` gets typed as `Array[_ <: Animal]`.
// Otherwise, the call below would fail in -Ycheck:elimRepeated because arrays are invariant before erasure.
// This is unsound but allowed.
j.foo(x*)
j.foo(new Dog, new Dog)
}
| dotty-staging/dotty | tests/pos/i5140/S.scala | Scala | apache-2.0 | 318 |
package com.mentatlabs.nsa
package scalac
package options
import javac.JavacVersion
/* -target:<target>
* ================
* 2.0.0 - 2.1.4: Specify which backend to use (jvm,msil)
* 2.1.5 - 2.5.1: Specify which backend to use (jvm-1.5,jvm-1.4,msil,cldc)
* 2.6.0 - 2.7.1: Specify for which target object files should be built (jvm-1.5,jvm-1.4,msil,cldc)
* 2.7.2 - 2.7.7: Specify for which target object files should be built (jvm-1.5,jvm-1.4,msil)
* 2.8.0 - 2.8.2: Specify for which target object files should be built (jvm-1.5,msil)
* 2.9.0 - 2.9.3: Target platform for object files. (jvm-1.5,msil) default:jvm-1.5
* 2.10.0 - 2.10.6: Target platform for object files. All JVM 1.5 targets are deprecated. (jvm-1.5,jvm-1.5-fjbg,jvm-1.5-asm,jvm-1.6,jvm-1.7,msil) default:jvm-1.6
* 2.11.0 - 2.11.2: Target platform for object files. All JVM 1.5 targets are deprecated. (jvm-1.5,jvm-1.6,jvm-1.7) default:jvm-1.6
* 2.11.3 - 2.11.8: Target platform for object files. All JVM 1.5 targets are deprecated. (jvm-1.5,jvm-1.6,jvm-1.7,jvm-1.8) default:jvm-1.6
* 2.12.0: Target platform for object files. All JVM 1.5 - 1.7 targets are deprecated. (jvm-1.5,jvm-1.6,jvm-1.7,jvm-1.8) default:jvm-1.8
*/
object ScalacTarget
extends ScalacOptionChoiceContainer[String]("-target", ScalacVersions.`2.0.0`) {
// FIXME: <1.6 are deprecated in new Scalac versions
// FIXME: <1.7 are removed in new Scalac versions
def jvm(version: Double) = apply("jvm" + version)
def `jvm-1.4` = apply("jvm-1.4")
def `jvm-1.5` = apply("jvm-1.5")
def `jvm-1.6` = apply("jvm-1.6")
def `jvm-1.7` = apply("jvm-1.7")
def `jvm-1.8` = apply("jvm-1.8")
def apply(javac: JavacVersion): ScalacOptionChoice[String] =
apply("jvm" + javac)
}
| mentat-labs/sbt-nsa | nsa-core/src/main/scala/com/mentatlabs/nsa/scalac/options/ScalacTarget.scala | Scala | bsd-3-clause | 1,772 |
package views.html
import lila.api.Context
import lila.app.templating.Environment._
import lila.app.ui.ScalatagsTemplate._
import controllers.routes
object bookmark {
def toggle(g: lila.game.Game, bookmarked: Boolean)(implicit ctx: Context) =
if (ctx.isAuth)
a(
cls := List(
"bookmark" -> true,
"bookmarked" -> bookmarked
),
href := routes.Game.bookmark(g.id),
title := trans.bookmarkThisGame.txt()
)(
iconTag("")(cls := "on is3"),
iconTag("")(cls := "off is3"),
span(g.showBookmarks)
)
else if (g.hasBookmarks)
span(cls := "bookmark")(
span(dataIcon := "", cls := "is3")(g.showBookmarks)
)
else emptyFrag
}
| luanlv/lila | app/views/bookmark.scala | Scala | mit | 751 |
package ore.db.impl.schema
import ore.data.project.Category
import ore.db.DbRef
import ore.db.impl.OrePostgresDriver.api._
import ore.db.impl.table.common.{DescriptionColumn, NameColumn, VisibilityColumn}
import ore.models.project._
import ore.models.user.User
import io.circe.Json
class ProjectTable(tag: Tag)
extends ModelTable[Project](tag, "projects")
with NameColumn[Project]
with VisibilityColumn[Project]
with DescriptionColumn[Project] {
def pluginId = column[String]("plugin_id")
def ownerName = column[String]("owner_name")
def ownerId = column[DbRef[User]]("owner_id")
def slug = column[String]("slug")
def recommendedVersionId = column[DbRef[Version]]("recommended_version_id")
def category = column[Category]("category")
def topicId = column[Option[Int]]("topic_id")
def postId = column[Int]("post_id")
def notes = column[Json]("notes")
def keywords = column[List[String]]("keywords")
def homepage = column[String]("homepage")
def issues = column[String]("issues")
def source = column[String]("source")
def support = column[String]("support")
def licenseName = column[String]("license_name")
def licenseUrl = column[String]("license_url")
def forumSync = column[Boolean]("forum_sync")
def settings =
(
keywords,
homepage.?,
issues.?,
source.?,
support.?,
licenseName.?,
licenseUrl.?,
forumSync
).<>(Project.ProjectSettings.tupled, Project.ProjectSettings.unapply)
override def * =
(
id.?,
createdAt.?,
(
pluginId,
ownerName,
ownerId,
name,
slug,
recommendedVersionId.?,
category,
description.?,
topicId,
postId.?,
visibility,
notes,
settings
)
).<>(mkApply((Project.apply _).tupled), mkUnapply(Project.unapply))
}
| SpongePowered/Ore | models/src/main/scala/ore/db/impl/schema/ProjectTable.scala | Scala | mit | 2,080 |
package com.twitter.finagle
import com.twitter.util.{Future, Time}
/**
* A Filter acts as a decorator/transformer of a service. It may apply
* transformations to the input and output of that service:
*
* (* MyService *)
* [ReqIn -> (ReqOut -> RepIn) -> RepOut]
*
* For example, you may have a POJO service that takes Strings and
* parses them as Ints. If you want to expose this as a Network
* Service via Thrift, it is nice to isolate the protocol handling
* from the business rules. Hence you might have a Filter that
* converts back and forth between Thrift structs. Again, your service
* deals with POJOs:
*
* [ThriftIn -> (String -> Int) -> ThriftOut]
*
* Thus, a Filter[A, B, C, D] converts a Service[C, D] to a Service[A, B].
* In other words, it converts a Service[ReqOut, RepIn] to a
* Service[ReqIn, RepOut].
*
*/
abstract class Filter[-ReqIn, +RepOut, +ReqOut, -RepIn]
extends ((ReqIn, Service[ReqOut, RepIn]) => Future[RepOut])
{
/**
* This is the method to override/implement to create your own Filter.
*
* @param request the input request type
* @param service a service that takes the output request type and the input response type
*
*/
def apply(request: ReqIn, service: Service[ReqOut, RepIn]): Future[RepOut]
/**
* Chains a series of filters together:
*
* myModularService = handleExceptions.andThen(thrift2Pojo.andThen(parseString))
*
* '''Note:''' synchronously thrown exceptions in the underlying service are automatically
* lifted into Future.exception.
*
* @param next another filter to follow after this one
*
*/
def andThen[Req2, Rep2](next: Filter[ReqOut, RepIn, Req2, Rep2]) =
new Filter[ReqIn, RepOut, Req2, Rep2] {
def apply(request: ReqIn, service: Service[Req2, Rep2]) = {
Filter.this.apply(
request,
Service.rescue(new Service[ReqOut, RepIn] {
def apply(request: ReqOut): Future[RepIn] = next(request, service)
override def close(deadline: Time) = service.close(deadline)
override def status = service.status
})
)
}
}
/**
* Terminates a filter chain in a service. For example,
*
* myFilter.andThen(myService)
*
* @param service a service that takes the output request type and the input response type.
*
*/
def andThen(service: Service[ReqOut, RepIn]) = new Service[ReqIn, RepOut] {
def apply(request: ReqIn) = Filter.this.apply(request, Service.rescue(service))
override def close(deadline: Time) = service.close(deadline)
override def status = service.status
}
def andThen(f: ReqOut => Future[RepIn]): ReqIn => Future[RepOut] = {
val service = Service.mk(f)
(req) => Filter.this.apply(req, service)
}
def andThen(factory: ServiceFactory[ReqOut, RepIn]): ServiceFactory[ReqIn, RepOut] =
new ServiceFactory[ReqIn, RepOut] {
def apply(conn: ClientConnection) = factory(conn) map { Filter.this andThen _ }
def close(deadline: Time) = factory.close(deadline)
override def status = factory.status
override def toString = factory.toString
}
/**
* Conditionally propagates requests down the filter chain. This may
* useful if you are statically wiring together filter chains based
* on a configuration file, for instance.
*
* @param condAndFilter a tuple of boolean and filter.
*
*/
def andThenIf[Req2 >: ReqOut, Rep2 <: RepIn](
condAndFilter: (Boolean, Filter[ReqOut, RepIn, Req2, Rep2])) =
condAndFilter match {
case (true, filter) => andThen(filter)
case (false, _) => this
}
}
abstract class SimpleFilter[Req, Rep] extends Filter[Req, Rep, Req, Rep]
object Filter {
implicit def canStackFromSvc[Req, Rep]
: CanStackFrom[Filter[Req, Rep, Req, Rep], Service[Req, Rep]] =
new CanStackFrom[Filter[Req, Rep, Req, Rep], Service[Req, Rep]] {
def toStackable(_role: Stack.Role, filter: Filter[Req, Rep, Req, Rep]) =
new Stack.Module0[Service[Req, Rep]] {
val role = _role
val description = role.name
def make(next: Service[Req, Rep]) = filter andThen next
}
}
implicit def canStackFromFac[Req, Rep]
: CanStackFrom[Filter[Req, Rep, Req, Rep], ServiceFactory[Req, Rep]] =
new CanStackFrom[Filter[Req, Rep, Req, Rep], ServiceFactory[Req, Rep]] {
def toStackable(_role: Stack.Role, filter: Filter[Req, Rep, Req, Rep]) =
new Stack.Module0[ServiceFactory[Req, Rep]] {
val role = _role
val description = role.name
def make(next: ServiceFactory[Req, Rep]) = filter andThen next
}
}
/**
* TypeAgnostic filters are like SimpleFilters but they leave the Rep and Req types unspecified
* until .toFilter is called.
*/
trait TypeAgnostic {
def toFilter[Req, Rep]: Filter[Req, Rep, Req, Rep]
def andThen(next: TypeAgnostic): TypeAgnostic = new TypeAgnostic {
def toFilter[Req, Rep] = toFilter[Req, Rep].andThen(next.toFilter[Req, Rep])
}
}
def identity[Req, Rep] = new SimpleFilter[Req, Rep] {
override def andThen[Req2, Rep2](next: Filter[Req, Rep, Req2, Rep2]) = next
override def andThen(service: Service[Req, Rep]) = service
override def andThen(factory: ServiceFactory[Req, Rep]) = factory
def apply(request: Req, service: Service[Req, Rep]) = service(request)
}
def mk[ReqIn, RepOut, ReqOut, RepIn](
f: (ReqIn, ReqOut => Future[RepIn]) => Future[RepOut]
): Filter[ReqIn, RepOut, ReqOut, RepIn] = new Filter[ReqIn, RepOut, ReqOut, RepIn] {
def apply(request: ReqIn, service: Service[ReqOut, RepIn]) = f(request, service)
}
/**
* Chooses a filter to apply based on incoming requests. If the given partial
* function is not defined at the request, then the request goes directly to
* the next service.
*
* @param pf a partial function mapping requests to Filters that should
* be applied
*/
def choose[Req, Rep](
pf: PartialFunction[Req, Filter[Req, Rep, Req, Rep]]
): Filter[Req, Rep, Req, Rep] = new Filter[Req, Rep, Req, Rep] {
private[this] val const: (Req => SimpleFilter[Req, Rep]) =
Function.const(Filter.identity[Req, Rep])
def apply(request: Req, service: Service[Req, Rep]): Future[Rep] =
pf.applyOrElse(request, const)(request, service)
}
}
| travisbrown/finagle | finagle-core/src/main/scala/com/twitter/finagle/Filter.scala | Scala | apache-2.0 | 6,396 |
/**
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
package com.ksmpartners.ernie
import io.gatling.com.ksmpartners.ErnieGatling
import io.gatling.core.Predef._
import bootstrap._
import java.lang.String
import scala.concurrent.duration._
import scala.Some
import com.ksmpartners.ernie.model.ReportType
/**
* Provides user scenario(s) that can be used to stress test any Ernie protocol
*/
object ErnieSimulation {
/**
* Execute a basic scenario:
1. Create a new definition
1. Get all available definitions
1. Repeat 5 times:
- 50% of the time, create a job and immediately retrieve the output
- 50% of the time, create a job and repeat the following 5 times:
1. Select a random job
1.
- 60% of the time get that job's output
- 40% of the time pause for 1 seconds
*/
def scn(s: String, e: ErnieGatling) = {
scenario(s)
.exec(session => {
session.set("postCount", range(1, 5))
.set("resCount", range(4, 8))
.set("defs", List())
})
.randomSwitch(5 -> exec(e.createDef(Thread.currentThread.getContextClassLoader.getResource("test_def_params.rptdesign").getPath)),
95 -> pause(1 second))
.exec(e.getDefs)
.repeat(5) {
randomSwitch(
50 -> exec(e.postJob(Some("${defs(0)}"), ReportType.PDF)).exec(e.getResult(None)),
50 -> exec(e.postJob(Some("${defs(0)}"), ReportType.PDF))).repeat(5) {
exec(session => {
val jobs = session.get[List[Long]]("jobs") getOrElse List.empty[Long]
session.set("currentJob", scala.util.Random.shuffle(jobs).headOption getOrElse (session.get[Long]("currentJob") getOrElse -1L))
})
randomSwitch(
60 -> exec(e.getResult(None)),
40 -> pause(1 second))
}
}
}
def range(start: Int, end: Int): Int = start + (math.random.toInt % end)
} | ksmpartners/ernie | ernie-gatling/src/main/scala/ErnieSimulation.scala | Scala | apache-2.0 | 2,444 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.storage.util
import com.twitter.conversions.time._
import com.twitter.logging.Logger
import com.twitter.util.Await
import com.twitter.zipkin.common._
import com.twitter.zipkin.query.Trace
import com.twitter.zipkin.storage.SpanStore
import java.nio.ByteBuffer
class SpanStoreValidator(
newSpanStore: => SpanStore,
log: Logger = Logger.get("ValidateSpanStore")
) {
val ep = Endpoint(123, 123, "service")
def binaryAnnotation(key: String, value: String) =
BinaryAnnotation(key, ByteBuffer.wrap(value.getBytes), AnnotationType.String, Some(ep))
val spanId = 456
val ann1 = Annotation(1, "cs", Some(ep))
val ann2 = Annotation(2, "sr", None)
val ann3 = Annotation(2, "custom", Some(ep))
val ann4 = Annotation(2, "custom", Some(ep))
val span1 = Span(123, "methodcall", spanId, None, List(ann1, ann3),
List(binaryAnnotation("BAH", "BEH")))
val span2 = Span(123, "methodcall", spanId, None, List(ann2),
List(binaryAnnotation("BAH2", "BEH2")))
val span3 = Span(123, "methodcall", spanId, None, List(ann2, ann3, ann4),
List(binaryAnnotation("BAH2", "BEH2")))
val spanEmptySpanName = Span(123, "", spanId, None, List(ann1, ann2), List())
val spanEmptyServiceName = Span(123, "spanname", spanId, None, List(), List())
val mergedSpan = Span(123, "methodcall", spanId, None,
List(ann1, ann2), List(binaryAnnotation("BAH2", "BEH2")))
def resetAndLoadStore(spans: Seq[Span]): SpanStore = {
val store = newSpanStore
Await.result(store(spans))
store
}
private[this] var tests: Map[String, (() => Unit)] = Map.empty
private[this] def test(name: String)(f: => Unit) {
tests += (name -> f _)
}
def validate {
var passed = true
val spanStoreName = newSpanStore.getClass.getName.split('.').last
tests foreach { case (name, f) =>
println("validating %s: %s".format(spanStoreName, name))
try {
f(); println(" pass")
} catch { case e: Throwable =>
passed = false
log.error(e, "validation failed")
}
}
assert(passed)
}
test("get by trace id") {
val store = resetAndLoadStore(Seq(span1))
val spans = Await.result(store.getSpansByTraceId(span1.traceId))
assert(spans.size == 1)
assert(spans.head == span1)
}
test("get by trace ids") {
val span666 = Span(666, "methodcall2", spanId, None, List(ann2),
List(binaryAnnotation("BAH2", "BEH2")))
val store = resetAndLoadStore(Seq(span1, span666))
val actual1 = Await.result(store.getSpansByTraceIds(Seq(span1.traceId)))
assert(!actual1.isEmpty)
val trace1 = Trace(actual1(0))
assert(!trace1.spans.isEmpty)
assert(trace1.spans(0) == span1)
val actual2 = Await.result(store.getSpansByTraceIds(Seq(span1.traceId, span666.traceId)))
assert(actual2.size == 2)
val trace2 = Trace(actual2(0))
assert(!trace2.spans.isEmpty)
assert(trace2.spans(0) == span1)
val trace3 = Trace(actual2(1))
assert(!trace3.spans.isEmpty)
assert(trace3.spans(0) == span666)
}
test("get by trace ids returns an empty list if nothing is found") {
val store = resetAndLoadStore(Seq())
val spans = Await.result(store.getSpansByTraceIds(Seq(span1.traceId)))
assert(spans.isEmpty)
}
test("alter TTL on a span") {
val store = resetAndLoadStore(Seq(span1))
Await.result(store.setTimeToLive(span1.traceId, 1234.seconds))
assert(Await.result(store.getTimeToLive(span1.traceId)) == 1234.seconds)
}
test("get spans by name") {
val store = resetAndLoadStore(Seq(span1))
assert(Await.result(store.getSpanNames("service")) == Set(span1.name))
}
test("get service names") {
val store = resetAndLoadStore(Seq(span1))
assert(Await.result(store.getAllServiceNames) == span1.serviceNames)
}
// TODO: endTs seems wrong here
test("get trace ids by name") {
val store = resetAndLoadStore(Seq(span1))
assert(Await.result(store.getTraceIdsByName("service", None, 0, 3)).head.traceId == span1.traceId)
assert(Await.result(store.getTraceIdsByName("service", Some("methodcall"), 0, 3)).head.traceId == span1.traceId)
assert(Await.result(store.getTraceIdsByName("badservice", None, 0, 3)).isEmpty)
assert(Await.result(store.getTraceIdsByName("service", Some("badmethod"), 0, 3)).isEmpty)
assert(Await.result(store.getTraceIdsByName("badservice", Some("badmethod"), 0, 3)).isEmpty)
}
//TODO
test("get traces duration") {
println(" - not implemented")
// FakeCassandra doesn't support order and limit (!?)
}
test("get trace ids by annotation") {
val store = resetAndLoadStore(Seq(span1))
// fetch by time based annotation, find trace
val res1 = Await.result(store.getTraceIdsByAnnotation("service", "custom", None, 100, 3))
assert(res1.head.traceId == span1.traceId)
// should not find any traces since the core annotation doesn't exist in index
val res2 = Await.result(store.getTraceIdsByAnnotation("service", "cs", None, 100, 3))
assert(res2.isEmpty)
// should find traces by the key and value annotation
val res3 = Await.result(store.getTraceIdsByAnnotation("service", "BAH", Some(ByteBuffer.wrap("BEH".getBytes)), 100, 3))
assert(res3.head.traceId == span1.traceId)
}
test("wont index empty service names") {
val store = resetAndLoadStore(Seq(spanEmptyServiceName))
assert(Await.result(store.getAllServiceNames).isEmpty)
}
test("wont index empty span names") {
val store = resetAndLoadStore(Seq(spanEmptySpanName))
assert(Await.result(store.getSpanNames(spanEmptySpanName.name)).isEmpty)
}
}
| willCode2Surf/zipkin | zipkin-common/src/main/scala/com/twitter/zipkin/storage/util/SpanStoreValidator.scala | Scala | apache-2.0 | 6,212 |
package aima.core.agent.basic
import aima.core.agent.StatelessAgent
import aima.core.agent.basic.OnlineDFSAgent.IdentifyState
import aima.core.agent.basic.OnlineDFSAgentState.{RESULT, UNBACKTRACKED, UNTRIED}
import aima.core.fp.Eqv
import aima.core.fp.Eqv.Implicits._
import aima.core.search.api.OnlineSearchProblem
/**
* <pre>
* function ONLINE-DFS-AGENT(s′) returns an action
* inputs: s′, a percept that identifies the current state
* persistent: result, a table, indexed by state and action, initially empty
* untried, a table that lists, for each state, the actions not yet tried
* unbacktracked, a table that lists, for each state, the backtracks not yet tried
* s, a, the previous state and action, initially null
*
* if GOAL-TEST(s′) then return stop
* if s′ is a new state (not in untried) then untried[s′] ← ACTIONS(s′)
* if s is not null and s′ ≠ result[s, a] then
* result[s, a] ← s′
* add s to the front of the unbacktracked[s′]
* if untried[s′] is empty then
* if unbacktracked[s′] is empty then return stop
* else a ← an action b such that result[s′, b] = POP(unbacktracked[s′])
* else a ← POP(untried[s′])
* s ← s′
* return a
* </pre>
*
* @author Shawn Garner
*/
final class OnlineDFSAgent[PERCEPT, ACTION, STATE: Eqv](
identifyStateFor: IdentifyState[PERCEPT, STATE],
onlineProblem: OnlineSearchProblem[ACTION, STATE],
stop: ACTION
) extends StatelessAgent[PERCEPT, ACTION, OnlineDFSAgentState[ACTION, STATE]] {
import OnlineDFSAgentState.Implicits._
type RESULT_TYPE = RESULT[ACTION, STATE]
type UNTRIED_TYPE = UNTRIED[ACTION, STATE]
type UNBACKTRACKED_TYPE = UNBACKTRACKED[STATE]
override val agentFunction: AgentFunction = {
case (percept, priorAgentState) =>
val sPrime = identifyStateFor(percept)
if (onlineProblem.isGoalState(sPrime)) {
(stop, priorAgentState.copy(previousAction = Some(stop)))
} else {
val updatedUntried: UNTRIED_TYPE =
priorAgentState.untried.computeIfAbsent(sPrime, _ => onlineProblem.actions(sPrime))
val (updatedResult, updatedUnbacktracked): (RESULT_TYPE, UNBACKTRACKED_TYPE) =
(priorAgentState.previousState, priorAgentState.previousAction) match {
case (Some(_s), Some(_a)) if !priorAgentState.result.get(_s).flatMap(_.get(_a)).contains(sPrime) =>
val resultOrigActionToState: Map[ACTION, STATE] =
priorAgentState.result.getOrElse(_s, Map.empty[ACTION, STATE])
val updatedResultActionToState
: Map[ACTION, STATE] = resultOrigActionToState.put(_a, sPrime) // TODO: could be less verbose with lense
(
priorAgentState.result.put(_s, updatedResultActionToState),
priorAgentState.unbacktracked.transformValue(sPrime, fv => fv.fold(List(_s))(st => _s :: st))
)
case _ =>
(
priorAgentState.result,
priorAgentState.unbacktracked
)
}
val updatedUntriedList: List[ACTION] = updatedUntried.get(sPrime).toList.flatten
val updatedAgentState: OnlineDFSAgentState[ACTION, STATE] = updatedUntriedList match {
case Nil =>
val unbacktrackedList: List[STATE] = updatedUnbacktracked.get(sPrime).toList.flatten
unbacktrackedList match {
case Nil =>
priorAgentState.copy(
previousAction = Some(stop),
previousState = Some(sPrime),
untried = updatedUntried,
result = updatedResult,
unbacktracked = updatedUnbacktracked
)
case popped :: remainingUnbacktracked =>
val action: Option[ACTION] =
updatedResult.getOrElse(sPrime, Map.empty[ACTION, STATE]).toList.collectFirst {
case (action, state) if popped === state => action
}
priorAgentState.copy(
previousAction = action,
previousState = Some(sPrime),
untried = updatedUntried,
result = updatedResult,
unbacktracked = updatedUnbacktracked.updated(sPrime, remainingUnbacktracked)
)
}
case popped :: remainingUntried =>
priorAgentState.copy(
previousAction = Some(popped),
previousState = Some(sPrime),
untried = updatedUntried.updated(sPrime, remainingUntried),
result = updatedResult,
unbacktracked = updatedUnbacktracked
)
}
(updatedAgentState.previousAction.getOrElse(stop), updatedAgentState)
}
}
}
final case class OnlineDFSAgentState[ACTION, STATE](
result: RESULT[ACTION, STATE],
untried: UNTRIED[ACTION, STATE],
unbacktracked: UNBACKTRACKED[STATE],
previousState: Option[STATE], // s
previousAction: Option[ACTION] // a
)
object OnlineDFSAgentState {
def apply[ACTION, STATE] =
new OnlineDFSAgentState[ACTION, STATE](
result = Map.empty,
untried = Map.empty,
unbacktracked = Map.empty,
previousState = None,
previousAction = None
)
type RESULT[ACTION, STATE] = Map[STATE, Map[ACTION, STATE]]
type UNTRIED[ACTION, STATE] = Map[STATE, List[ACTION]]
type UNBACKTRACKED[STATE] = Map[STATE, List[STATE]]
object Implicits {
implicit class MapOps[K, V](m: Map[K, V]) {
def put(k: K, v: V): Map[K, V] =
m.updated(k, v)
def computeIfAbsent(k: K, v: K => V): Map[K, V] = {
if (m.contains(k)) {
m
} else {
put(k, v(k))
}
}
def transformValue(k: K, fv: Option[V] => V): Map[K, V] = {
val oldValue = m.get(k)
val newValue = fv(oldValue)
m.updated(k, newValue)
}
}
}
}
object OnlineDFSAgent {
type IdentifyState[PERCEPT, STATE] = PERCEPT => STATE
}
| aimacode/aima-scala | core/src/main/scala/aima/core/agent/basic/OnlineDFSAgent.scala | Scala | mit | 6,234 |
package io.udash.web.guide.views.ext.demo.bootstrap
import io.udash.bootstrap.button.UdashButtonOptions
import io.udash.web.guide.demos.AutoDemo
import io.udash.web.guide.styles.partials.GuideStyles
import scalatags.JsDom.all._
object ButtonsDemo extends AutoDemo {
private val (rendered, source) = {
import io.udash._
import io.udash.bootstrap._
import BootstrapStyles._
import io.udash.bootstrap.button.UdashButton
import io.udash.css.CssStyle
import io.udash.css.CssView._
import scalatags.JsDom.all._
import scala.util.Random
val disabledButtons = Property(Set.empty[Int])
def bottomMargin: CssStyle = {
Spacing.margin(
side = Side.Bottom,
size = SpacingSize.Normal
)
}
val buttons = Color.values.map(color =>
UdashButton(
options = UdashButtonOptions(
color.opt,
Size.Small.opt,
),
disabled = disabledButtons.transform(_.contains(color.ordinal))
)(_ => Seq[Modifier](
color.name,
Spacing.margin(size = SpacingSize.ExtraSmall)
))
)
val clicks = SeqProperty[String](Seq.empty)
buttons.foreach(_.listen {
case UdashButton.ButtonClickEvent(source, _) =>
clicks.append(source.render.textContent)
})
val push = UdashButton(
options = UdashButtonOptions(
size = Size.Large.opt,
block = true
)
)("Disable random buttons!")
push.listen {
case UdashButton.ButtonClickEvent(_, _) =>
clicks.set(Seq.empty)
val maxDisabledCount = Random.nextInt(buttons.size + 1)
disabledButtons.set(Seq.fill(maxDisabledCount)(
Random.nextInt(buttons.size)
).toSet)
}
div(
div(bottomMargin)(push),
div(
Display.flex(),
Flex.justifyContent(FlexContentJustification.Center),
bottomMargin
)(buttons),
h4("Clicks: "),
produce(clicks)(seq =>
ul(Card.card, Card.body, Background.color(Color.Light))(seq.map(li(_))).render
)
).render
}.withSourceCode
override protected def demoWithSource(): (Modifier, Iterator[String]) = {
import io.udash.css.CssView._
(rendered.setup(_.applyTags(GuideStyles.frame)), source.linesIterator)
}
}
| UdashFramework/udash-core | guide/guide/.js/src/main/scala/io/udash/web/guide/views/ext/demo/bootstrap/ButtonsDemo.scala | Scala | apache-2.0 | 2,280 |
package synthesis
import org.scalatest._
class APASyntaxTreeTest extends FunSpec with Matchers {
def O(name: String) = OutputVar(name)
def I(name: String) = InputVar(name)
implicit def OutputVarToPACombination(o: OutputVar):APACombination = APACombination(o)
implicit def InputTermConvert(i: ConvertibleToInputTerm):APAInputTerm = i.toInputTerm()
implicit def IntegerToPACombination(k: Int):APAInputCombination = APAInputCombination(k)
implicit def InputToPACombination(k: APAInputTerm):APACombination = APACombination(k)
val x = O("x")
val x1 = O("x1")
val y = O("y")
val y1 = O("y1")
val z = O("z")
val b = I("b")
val x0 = I("x0")
val c = I("c")
val d = I("d")
it("should get the right equations from a general formula") {
val pac = (x >= 0) && ((x >= 1) || ((y+z) <= 0)) && ((x < 1) || ((y-z) <= 0))
pac.getEquations.toList should equal (
List((x >= 0), (x >= 1), (x < 1)) ::
List((x >= 0), (x >= 1), ((y-z) <= 0)) ::
List((x >= 0), ((y+z) <= 0), (x < 1)) ::
List((x >= 0), ((y+z) <= 0), ((y-z) <= 0)) ::
Nil
)
}
it("should be able to extract equalities from a formula") {
val eq1 = (x+y === b).asInstanceOf[APAEqualZero]
val eq2 = (x-y === z+c).asInstanceOf[APAEqualZero]
val ineq0 = (x > z-c)
val ineq1 = x < y
val ineq2 = x > y
val pac = eq1 && eq2 && ineq0 && (ineq1 || ineq2)
val fs = pac.getLazyEquations
fs.eqs should equal (eq1::eq2::Nil)
fs.noneqs should equal (ineq0::Nil)
fs.remaining.head should equal (FormulaSplit(Nil, ineq1::Nil, Stream.empty))
fs.remaining.tail.head should equal (FormulaSplit(Nil, ineq2::Nil, Stream.empty))
fs.remaining.tail.tail should be ('empty)
}
}
| epfl-lara/comfusy | src/test/scala/APASyntaxTreeTest.scala | Scala | bsd-2-clause | 1,734 |
package play.modules.gjson
import org.joda.time.DateTime
import org.specs2.mutable._
import play.api.libs.json._
class JSONPicklerSpec extends Specification {
import JSON.{toJSON, fromJSON}
"pseudo-inverse" in {
val p = Prod1("John", Some(5))
fromJSON[Prod1](toJSON(p)).toEither must beRight(p)
}
"fromJSON" should {
"accepts optional values" in {
fromJSON[Prod1](Json.obj("name" -> "John")).toEither must beRight(Prod1("John", None))
}
"returns error message" in {
val errMsg1 = "type mismatch at `name': expected: play.api.libs.json.JsString, found: null"
val errMsg2 = "type mismatch at `p1.name': expected: play.api.libs.json.JsString, found: null"
val js1 = Json.obj("value" -> 1)
val js2 = Json.obj("name" -> "John", "value" -> 1, "p1" -> Json.obj("value" -> 1))
fromJSON[Prod1](js1).toEither must beLeft(errMsg1)
fromJSON[Prod2](js2).toEither must beLeft(errMsg2)
}
}
case class Prod1(name: String, value: Option[Int])
case class Prod2(name: String, value: Option[Int], p1: Prod1)
}
| kindleit/gkit | play-gjson/src/test/scala/JSONPicklerSpec.scala | Scala | apache-2.0 | 1,078 |
/*
* Copyright 2019 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.coders.instances
import com.spotify.scio.coders.Coder
import com.twitter.algebird.{BF, Batched, CMS, TopK}
trait AlgebirdCoders {
implicit def cmsCoder[K]: Coder[CMS[K]] = Coder.kryo
implicit def bfCoder[K]: Coder[BF[K]] = Coder.kryo
implicit def topKCoder[K]: Coder[TopK[K]] = Coder.kryo
implicit def batchedCoder[U]: Coder[Batched[U]] = Coder.kryo
}
| regadas/scio | scio-core/src/main/scala/com/spotify/scio/coders/instances/AlgebirdCoders.scala | Scala | apache-2.0 | 987 |
package com.gilesc
package mynab
package service
import com.gilesc.mynab.repository.mysql._
import com.gilesc.mynab.testkit.DatabaseTestCase
import org.scalacheck.Gen
import doobie.util.transactor.Transactor
import cats.effect.IO
class MysqlCreateCategoryRepositorySpec extends DatabaseTestCase {
case class DatabaseConfig(
driver: String,
url: String,
username: String,
password: String
)
val userId = UserId(1)
val config = pureconfig.loadConfigOrThrow[DatabaseConfig]("mynab.database")
def transactor = Transactor.fromDriverManager[IO](
config.driver,
config.url,
config.username,
config.password)
private val groupRepo = new MysqlCategoryGroupRepository[IO](transactor)
private val categoryRepo = new MysqlCategoryRepository[IO](transactor)
behavior of "Category Service With MySQL"
it should "allow me to store a category with new major / minor names" in {
val service = CreateCategoryService.apply[IO](groupRepo, categoryRepo)
val majorName = CategoryName(Gen.alphaStr.sample.get)
val minorName = CategoryName(Gen.alphaStr.sample.get)
val ctx = CreateCategoryContext(userId, majorName, minorName)
val Right(result) = service(ctx).unsafeRunSync
result.group.name should be(majorName)
result.name should be(minorName)
}
it should "allow me to store a category with existing major name, new minor name" in {
val service = CreateCategoryService.apply[IO](groupRepo, categoryRepo)
val majorName = CategoryName(Gen.alphaStr.sample.get)
val firstMinor = CategoryName(Gen.alphaStr.sample.get)
val secondMinor = CategoryName(Gen.alphaStr.sample.get)
val firstCtx = CreateCategoryContext(userId, majorName, firstMinor)
val secondCtx = CreateCategoryContext(userId, majorName, secondMinor)
val Right(firstResult) = service(firstCtx).unsafeRunSync
firstResult.group.name should be(majorName)
firstResult.name should be(firstMinor)
val Right(secondResult) = service(secondCtx).unsafeRunSync
secondResult.group.name should be(majorName)
secondResult.name should be(secondMinor)
}
it should "allow me to store a category with existing major / minor name" in {
val service = CreateCategoryService.apply[IO](groupRepo, categoryRepo)
val majorName = CategoryName(Gen.alphaStr.sample.get)
val minorName = CategoryName(Gen.alphaStr.sample.get)
val ctx = CreateCategoryContext(userId, majorName, minorName)
val Right(first) = service(ctx).unsafeRunSync
first.group.name should be(majorName)
first.name should be(minorName)
val Right(second) = service(ctx).unsafeRunSync
second.group.name should be(majorName)
second.name should be(minorName)
}
it should "give me a proper error when creating and no user is found" in {
val service = CreateCategoryService.apply[IO](groupRepo, categoryRepo)
val majorName = CategoryName(Gen.alphaStr.sample.get)
val minorName = CategoryName(Gen.alphaStr.sample.get)
val ctx = CreateCategoryContext(UserId(Long.MaxValue), majorName, minorName)
val Left(result) = service(ctx).unsafeRunSync
result should be(ServiceError.UnknownUser(UserId(Long.MaxValue)))
}
}
| CraigGiles/mynab | service/src/it/scala/com/gilesc/mynab/service/MysqlCreateCategoryServiceSpec.scala | Scala | mit | 3,202 |
/**
* mqlight-qpid-integration-test
*
* Written in 2014 by Bernard Leach <[email protected]>
*
* To the extent possible under law, the author(s) have dedicated all copyright and related
* and neighboring rights to this software to the public domain worldwide. This software is
* distributed without any warranty.
*
* You should have received a copy of the CC0 Public Domain Dedication along with this software.
* If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
*/
package org.leachbj.test.qpid.engine
import akka.actor.ActorLogging
import akka.camel.{CamelMessage, Consumer}
class ConsumerEndpointActor extends Consumer with ActorLogging {
// disableReplyTo=true is required as this consumer does not reply to the incoming messages
def endpointUri = "amqp:share:share1:topic1?disableReplyTo=true"
def receive = {
case msg: CamelMessage =>
log.debug("message received {}", msg.toString)
}
}
| leachbj/mqlight-qpid-integration-test | camel-amqp/src/main/scala/org/leachbj/test/qpid/engine/ConsumerEndpointActor.scala | Scala | cc0-1.0 | 947 |
package test_expect_failure.missing_direct_deps.internal_deps;
object A {
def foo = {
B.foo
C.foo
}
def main = foo
} | smparkes/rules_scala | test_expect_failure/missing_direct_deps/internal_deps/A.scala | Scala | apache-2.0 | 125 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.reflect
package runtime
private[reflect] trait Gil {
self: SymbolTable =>
// fixme... please...
// there are the following avenues of optimization we discussed with Roland:
// 1) replace PackageScope locks with ConcurrentHashMap, because PackageScope materializers seem to be idempotent
// 2) unlock unpickling completers by verifying that they are idempotent or moving non-idempotent parts
// 3) remove the necessity in global state for isSubType
private lazy val gil = new java.util.concurrent.locks.ReentrantLock
@inline final def gilSynchronized[T](body: => T): T = {
if (isCompilerUniverse) body
else {
try {
gil.lock()
body
} finally {
gil.unlock()
}
}
}
}
| scala/scala | src/reflect/scala/reflect/runtime/Gil.scala | Scala | apache-2.0 | 1,042 |
package org.jetbrains.plugins.scala
package lang
package resolve
import com.intellij.psi.impl.source.resolve.ResolveCache
import org.jetbrains.plugins.scala.lang.psi.api.base.ScStableCodeReferenceElement
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.{ScConstructorPattern, ScInfixPattern, ScInterpolationPattern}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.{ScImportExpr, ScImportSelector}
import org.jetbrains.plugins.scala.lang.psi.types.Compatibility.Expression
import org.jetbrains.plugins.scala.lang.resolve.processor._
class StableCodeReferenceElementResolver(reference: ResolvableStableCodeReferenceElement, shapeResolve: Boolean,
allConstructorResults: Boolean, noConstructorResolve: Boolean)
extends ResolveCache.PolyVariantResolver[ScStableCodeReferenceElement] {
def resolve(ref: ScStableCodeReferenceElement, incomplete: Boolean) = {
val kinds = ref.getKinds(incomplete = false)
val proc = if (ref.isConstructorReference && !noConstructorResolve) {
val constr = ref.getConstructor.get
val typeArgs = constr.typeArgList.map(_.typeArgs).getOrElse(Seq())
val effectiveArgs = constr.arguments.toList.map(_.exprs.map(new Expression(_))) match {
case List() => List(List())
case x => x
}
new ConstructorResolveProcessor(ref, ref.refName, effectiveArgs, typeArgs, kinds, shapeResolve, allConstructorResults)
} else ref.getContext match {
//last ref may import many elements with the same name
case e: ScImportExpr if e.selectorSet == None && !e.singleWildcard =>
new CollectAllForImportProcessor(kinds, ref, reference.refName)
case e: ScImportExpr if e.singleWildcard => new ResolveProcessor(kinds, ref, reference.refName)
case _: ScImportSelector => new CollectAllForImportProcessor(kinds, ref, reference.refName)
case constr: ScInterpolationPattern =>
new ExtractorResolveProcessor(ref, reference.refName, kinds, constr.expectedType)
case constr: ScConstructorPattern =>
new ExtractorResolveProcessor(ref, reference.refName, kinds, constr.expectedType)
case infix: ScInfixPattern => new ExtractorResolveProcessor(ref, reference.refName, kinds, infix.expectedType)
case _ => new ResolveProcessor(kinds, ref, reference.refName)
}
reference.doResolve(ref, proc)
}
}
| triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/lang/resolve/StableCodeReferenceElementResolver.scala | Scala | apache-2.0 | 2,403 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import java.io.File
import SharedHelpers.{createTempDirectory, thisLineNumber}
import enablers.Existence
import Matchers._
class ShouldExistLogicalAndImplicitSpec extends Spec {
trait Thing {
def exist: Boolean
}
val something = new Thing {
val exist = true
}
val nothing = new Thing {
val exist = false
}
implicit def existenceOfThing[T <: Thing]: Existence[T] =
new Existence[T] {
def exists(thing: T): Boolean = thing.exist
}
val fileName = "ShouldExistLogicalAndImplicitSpec.scala"
def doesNotExist(left: Any): String =
FailureMessages("doesNotExist", left)
def exists(left: Any): String =
FailureMessages("exists", left)
def wasEqualTo(left: Any, right: Any): String =
FailureMessages("wasEqualTo", left, right)
def wasNotEqualTo(left: Any, right: Any): String =
FailureMessages("wasNotEqualTo", left, right)
def equaled(left: Any, right: Any): String =
FailureMessages("equaled", left, right)
def didNotEqual(left: Any, right: Any): String =
FailureMessages("didNotEqual", left, right)
def allError(left: Any, message: String, lineNumber: Int): String = {
val messageWithIndex = UnquotedString(" " + FailureMessages("forAssertionsGenTraversableMessageWithStackDepth", 0, UnquotedString(message), UnquotedString(fileName + ":" + lineNumber)))
FailureMessages("allShorthandFailed", messageWithIndex, left)
}
object `The exist syntax when used with File` {
def `should do nothing when the file exists` {
something should (equal (something) and exist)
something should (exist and equal (something))
something should (be_== (something) and exist)
something should (exist and be_== (something))
}
def `should throw TFE with correct stack depth and message when the file does not exist` {
val e1 = intercept[exceptions.TestFailedException] {
nothing should (equal (nothing) and exist)
}
assert(e1.message === Some(equaled(nothing, nothing) + ", but " + doesNotExist(nothing)))
assert(e1.failedCodeFileName === Some(fileName))
assert(e1.failedCodeLineNumber === Some(thisLineNumber - 4))
val e2 = intercept[exceptions.TestFailedException] {
something should (exist and equal (nothing))
}
assert(e2.message === Some(exists(something) + ", but " + didNotEqual(something, nothing)))
assert(e2.failedCodeFileName === Some(fileName))
assert(e2.failedCodeLineNumber === Some(thisLineNumber - 4))
val e3 = intercept[exceptions.TestFailedException] {
nothing should (be_== (nothing) and exist)
}
assert(e3.message === Some(wasEqualTo(nothing, nothing) + ", but " + doesNotExist(nothing)))
assert(e3.failedCodeFileName === Some(fileName))
assert(e3.failedCodeLineNumber === Some(thisLineNumber - 4))
val e4 = intercept[exceptions.TestFailedException] {
something should (exist and be_== (nothing))
}
assert(e4.message === Some(exists(something) + ", but " + wasNotEqualTo(something, nothing)))
assert(e4.failedCodeFileName === Some(fileName))
assert(e4.failedCodeLineNumber === Some(thisLineNumber - 4))
}
def `should do nothing when it is used with not and the file does not exists` {
nothing should (equal (nothing) and not (exist))
nothing should (not (exist) and equal (nothing))
nothing should (be_== (nothing) and not (exist))
nothing should (not (exist) and be_== (nothing))
}
def `should throw TFE with correct stack depth and message when it is used with not and the file exists` {
val e1 = intercept[exceptions.TestFailedException] {
something should (equal (something) and not (exist))
}
assert(e1.message === Some(equaled(something, something) + ", but " + exists(something)))
assert(e1.failedCodeFileName === Some(fileName))
assert(e1.failedCodeLineNumber === Some(thisLineNumber - 4))
val e2 = intercept[exceptions.TestFailedException] {
nothing should (not (exist) and equal (something))
}
assert(e2.message === Some(doesNotExist(nothing) + ", but " + didNotEqual(nothing, something)))
assert(e2.failedCodeFileName === Some(fileName))
assert(e2.failedCodeLineNumber === Some(thisLineNumber - 4))
val e3 = intercept[exceptions.TestFailedException] {
something should (be_== (something) and not (exist))
}
assert(e3.message === Some(wasEqualTo(something, something) + ", but " + exists(something)))
assert(e3.failedCodeFileName === Some(fileName))
assert(e3.failedCodeLineNumber === Some(thisLineNumber - 4))
val e4 = intercept[exceptions.TestFailedException] {
nothing should (not (exist) and be_== (something))
}
assert(e4.message === Some(doesNotExist(nothing) + ", but " + wasNotEqualTo(nothing, something)))
assert(e4.failedCodeFileName === Some(fileName))
assert(e4.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
object `The exist syntax when used with all(xs)` {
def `should do nothing when the file exists` {
all(List(something)) should (equal (something) and exist)
all(List(something)) should (exist and equal (something))
all(List(something)) should (be_== (something) and exist)
all(List(something)) should (exist and be_== (something))
}
def `should throw TFE with correct stack depth and message when the file does not exist` {
val left1 = List(nothing)
val e1 = intercept[exceptions.TestFailedException] {
all(left1) should (equal (nothing) and exist)
}
assert(e1.message === Some(allError(left1, equaled(nothing, nothing) + ", but " + doesNotExist(nothing), thisLineNumber - 2)))
assert(e1.failedCodeFileName === Some(fileName))
assert(e1.failedCodeLineNumber === Some(thisLineNumber - 4))
val left2 = List(something)
val e2 = intercept[exceptions.TestFailedException] {
all(left2) should (exist and equal (nothing))
}
assert(e2.message === Some(allError(left2, exists(something) + ", but " + didNotEqual(something, nothing), thisLineNumber - 2)))
assert(e2.failedCodeFileName === Some(fileName))
assert(e2.failedCodeLineNumber === Some(thisLineNumber - 4))
val left3 = List(nothing)
val e3 = intercept[exceptions.TestFailedException] {
all(left3) should (be_== (nothing) and exist)
}
assert(e3.message === Some(allError(left3, wasEqualTo(nothing, nothing) + ", but " + doesNotExist(nothing), thisLineNumber - 2)))
assert(e3.failedCodeFileName === Some(fileName))
assert(e3.failedCodeLineNumber === Some(thisLineNumber - 4))
val left4 = List(something)
val e4 = intercept[exceptions.TestFailedException] {
all(left4) should (exist and be_== (nothing))
}
assert(e4.message === Some(allError(left4, exists(something) + ", but " + wasNotEqualTo(something, nothing), thisLineNumber - 2)))
assert(e4.failedCodeFileName === Some(fileName))
assert(e4.failedCodeLineNumber === Some(thisLineNumber - 4))
}
def `should do nothing when it is used with not and the file does not exists` {
all(List(nothing)) should (equal (nothing) and not (exist))
all(List(nothing)) should (not (exist) and equal (nothing))
all(List(nothing)) should (be_== (nothing) and not (exist))
all(List(nothing)) should (not (exist) and be_== (nothing))
}
def `should throw TFE with correct stack depth and message when it is used with not and the file exists` {
val left1 = List(something)
val e1 = intercept[exceptions.TestFailedException] {
all(left1) should (equal (something) and not (exist))
}
assert(e1.message === Some(allError(left1, equaled(something, something) + ", but " + exists(something), thisLineNumber - 2)))
assert(e1.failedCodeFileName === Some(fileName))
assert(e1.failedCodeLineNumber === Some(thisLineNumber - 4))
val left2 = List(nothing)
val e2 = intercept[exceptions.TestFailedException] {
all(left2) should (not (exist) and equal (something))
}
assert(e2.message === Some(allError(left2, doesNotExist(nothing) + ", but " + didNotEqual(nothing, something), thisLineNumber - 2)))
assert(e2.failedCodeFileName === Some(fileName))
assert(e2.failedCodeLineNumber === Some(thisLineNumber - 4))
val left3 = List(something)
val e3 = intercept[exceptions.TestFailedException] {
all(left3) should (be_== (something) and not (exist))
}
assert(e3.message === Some(allError(left3, wasEqualTo(something, something) + ", but " + exists(something), thisLineNumber - 2)))
assert(e3.failedCodeFileName === Some(fileName))
assert(e3.failedCodeLineNumber === Some(thisLineNumber - 4))
val left4 = List(nothing)
val e4 = intercept[exceptions.TestFailedException] {
all(left4) should (not (exist) and be_== (something))
}
assert(e4.message === Some(allError(left4, doesNotExist(nothing) + ", but " + wasNotEqualTo(nothing, something), thisLineNumber - 2)))
assert(e4.failedCodeFileName === Some(fileName))
assert(e4.failedCodeLineNumber === Some(thisLineNumber - 4))
}
}
} | travisbrown/scalatest | src/test/scala/org/scalatest/ShouldExistLogicalAndImplicitSpec.scala | Scala | apache-2.0 | 10,095 |
package gapt.logic.fol
import gapt.expr.formula.All
import gapt.expr.formula.And
import gapt.expr.formula.Bottom
import gapt.expr.formula.Ex
import gapt.expr.formula.Imp
import gapt.expr.formula.Neg
import gapt.expr.formula.Or
import gapt.expr.formula.Top
import gapt.expr.formula.fol.FOLAtom
import gapt.expr.formula.fol.FOLFormula
import gapt.logic.hol.simplifyPropositional
import gapt.logic.hol.toNNF
import gapt.proofs.FOLClause
import scala.annotation.tailrec
import scala.collection.mutable
object TseitinCNF {
/**
* Generates from a formula f a List of clauses in CNF by using Tseitin's Transformation
* @param f formula which should be transformed
* @return CNF satisfiability-equivalent to f
*/
def apply( f: FOLFormula ): List[FOLClause] = {
val tseitin = new TseitinCNF()
simplifyPropositional( toNNF( f ) ) match {
case And.nAry( conjuncts ) => conjuncts.flatMap( tseitin.apply )
}
}
}
class TseitinCNF {
// add already known subformulas
val subformulaMap = mutable.Map[FOLFormula, FOLAtom]()
val hc = "x"
var fsyms = Set[String]()
var auxsyms: List[String] = List()
/**
* Get a list of all Atoms symbols used in f
* @param f formula
* @return List of all atom symbols used in f
*/
def getAtomSymbols( f: FOLFormula ): List[String] = f match {
case FOLAtom( h, args ) => List( h )
case Top() | Bottom() => List()
case Neg( f2 ) => getAtomSymbols( f2 )
case And( f1, f2 ) => getAtomSymbols( f1 ) ::: getAtomSymbols( f2 )
case Or( f1, f2 ) => getAtomSymbols( f1 ) ::: getAtomSymbols( f2 )
case Imp( f1, f2 ) => getAtomSymbols( f1 ) ::: getAtomSymbols( f2 )
case Ex( _, f2 ) => getAtomSymbols( f2 )
case All( _, f2 ) => getAtomSymbols( f2 )
case _ => throw new IllegalArgumentException( "unknown head of formula: " + f.toString )
}
def apply( f: FOLFormula ): List[FOLClause] = {
fsyms = getAtomSymbols( f ) toSet
// processFormula and transform it via Tseitin-Transformation
val pf = processFormula( f )
pf._2 :+ FOLClause( List(), List( pf._1 ) )
}
/**
* Adds a FOLFormula to the subFormulas HashMap if it does not already map to an existing atom.
* The representing atom is returned.
* In case f is an atom itself, nothing will be added to the subformulas HashMap and the atom itself is returned.
* @param f subformula to possibly be added to subformulas HashMap
* @return an atom either representing the subformula or f if f is already an atom
*/
private var auxCounter: Int = 0
@tailrec
private def addIfNotExists( f: FOLFormula ): FOLAtom = f match {
case f @ FOLAtom( h, args ) => f
case _ =>
if ( subformulaMap.isDefinedAt( f ) ) {
subformulaMap( f )
} else {
auxCounter += 1
var auxsym = s"$hc$auxCounter"
if ( fsyms.contains( auxsym ) ) {
addIfNotExists( f )
} else {
auxsyms :+= auxsym
val auxAtom = FOLAtom( auxsym )
subformulaMap( f ) = auxAtom
auxAtom
}
}
}
/**
* Takes a propositional FOLFormula and processes it s.t. every subformula gets
* assigned a freshly introduced Atom which is from there on used instead of the formula
* @param f The formula to be processed.
* @return a Tuple2, where 1st is the prop. variable representing f and 2nd is a clause
* containing all the equivalences required for the representation of f by 1st.
*/
def processFormula( f: FOLFormula ): Tuple2[FOLAtom, List[FOLClause]] = f match {
case f @ FOLAtom( _, _ ) => ( f, List() )
case Top() =>
val x = addIfNotExists( f )
( x, List( FOLClause( List(), List( x ) ) ) )
case Bottom() =>
val x = addIfNotExists( f )
( x, List( FOLClause( List( x ), List() ) ) )
case Neg( f2 ) =>
val pf = processFormula( f2 )
val x = addIfNotExists( f )
val x1 = pf._1
val c1 = FOLClause( List( x, x1 ), List() )
val c2 = FOLClause( List(), List( x, x1 ) )
( x, pf._2 ++ List( c1, c2 ) )
case And( f1, f2 ) =>
val pf1 = processFormula( f1 )
val pf2 = processFormula( f2 )
val x = addIfNotExists( f )
val x1 = pf1._1
val x2 = pf2._1
val c1 = FOLClause( List( x ), List( x1 ) )
val c2 = FOLClause( List( x ), List( x2 ) )
val c3 = FOLClause( List( x1, x2 ), List( x ) )
( x, pf1._2 ++ pf2._2 ++ List( c1, c2, c3 ) )
case Or( f1, f2 ) =>
val pf1 = processFormula( f1 )
val pf2 = processFormula( f2 )
val x = addIfNotExists( f )
val x1 = pf1._1
val x2 = pf2._1
val c1 = FOLClause( List( x1 ), List( x ) )
val c2 = FOLClause( List( x2 ), List( x ) )
val c3 = FOLClause( List( x ), List( x1, x2 ) )
( x, pf1._2 ++ pf2._2 ++ List( c1, c2, c3 ) )
case Imp( f1, f2 ) =>
val pf1 = processFormula( f1 )
val pf2 = processFormula( f2 )
val x = addIfNotExists( f )
val x1 = pf1._1
val x2 = pf2._1
val c1 = FOLClause( List(), List( x, x1 ) )
val c2 = FOLClause( List( x2 ), List( x ) )
val c3 = FOLClause( List( x, x1 ), List( x2 ) )
( x, pf1._2 ++ pf2._2 ++ List( c1, c2, c3 ) )
case _ => throw new IllegalArgumentException( "Formula not supported in Tseitin transformation: " + f.toString )
}
}
| gapt/gapt | core/src/main/scala/gapt/logic/fol/TseitinCNF.scala | Scala | gpl-3.0 | 5,402 |
package com.gilt.opm.query
import com.gilt.opm.utils.MongoHelper
import com.mongodb.casbah.commons.MongoDBObject
import MongoHelper.toMongo
/**
* Case class representing the logic to filter a property that is equal to the given value.
*
* @param valueTranslator: see [[com.gilt.opm.query.OpmSearcher]]
*/
case class OpmPropertyEquals(property: String, value: Any, valueTranslator: Option[(String, Any) => Any] = None) extends OpmPropertyQuery {
override def isMatch(obj: Any) = obj == value
override def toMongoDBObject(prefix: String = "", matchInverse: Boolean = false) =
if (matchInverse) MongoDBObject("%s%s".format(prefix, property) -> MongoDBObject("$ne" -> toMongo(value, translate(property))))
else MongoDBObject("%s%s".format(prefix, property) -> toMongo(value, translate(property)))
} | gilt/opm | src/main/scala/com/gilt/opm/query/OpmPropertyEquals.scala | Scala | mit | 813 |
package supertaggedtests.tagged
import org.scalatest.matchers.should.Matchers
import org.scalatest.flatspec.AnyFlatSpec
import shapeless.test.illTyped
import supertagged.postfix._
import supertaggedtests.{userString, userStrings_1_lvl, userStrings_5_lvl}
class PostfixSyntax extends AnyFlatSpec with Matchers {
"Postfix syntax" should "work" in {
val user1 = userString @@ User1
testUser1(user1)
illTyped("""testUser2(user1)""", "type mismatch;.+")
val user2 = user1 !@@ User2
testUser2(user2)
illTyped("""testUser1(user2)""", "type mismatch;.+")
val userList_1_lvl = userStrings_1_lvl @@ User1
val userList_5_lvl = userStrings_5_lvl @@ User1
illTyped("""UserInt(userStrings_1_lvl)""", "could not find implicit value for parameter R.+")
illTyped("""UserInt(userStrings_5_lvl)""", "could not find implicit value for parameter R.+")
val head1 = userList_1_lvl.head
testUser1(head1)
val head5 = userList_5_lvl.head.head.head.head.head
testUser1(head5)
}
} | Rudogma/scala-supertagged | tests/src/test/scala/supertaggedtests/tagged/PostfixSyntax.scala | Scala | mit | 1,032 |
import sbt._
import Keys._
import AndroidKeys.Android
import AndroidNdkKeys._
import java.io.File
/**
* Support for the Android NDK.
*
* Adding support for compilation of C/C++ sources using the NDK.
*
* Adapted from work by Daniel Solano Gómez
*
* @author Daniel Solano Gómez, Martin Kneissl.
*/
object AndroidNdk {
/** The default name for the 'ndk-build' tool. */
val DefaultNdkBuildName = "ndk-build"
/** The default directory name for native sources. */
val DefaultJniDirectoryName = "jni"
/** The default directory name for compiled native objects. */
val DefaultObjDirectoryName = "obj"
/** The list of environment variables to check for the NDK. */
val DefaultEnvs = List("ANDROID_NDK_HOME", "ANDROID_NDK_ROOT")
/** The make environment variable name for the javah generated header directory. */
val DefaultJavahOutputEnv = "SBT_MANAGED_JNI_INCLUDE"
lazy val defaultSettings: Seq[Setting[_]] = inConfig(Android) (Seq (
ndkBuildName := DefaultNdkBuildName,
jniDirectoryName := DefaultJniDirectoryName,
objDirectoryName := DefaultObjDirectoryName,
ndkEnvs := DefaultEnvs,
javahName := "javah",
javahOutputEnv := DefaultJavahOutputEnv,
javahOutputFile := None
))
// ndk-related paths
lazy val pathSettings: Seq[Setting[_]] = inConfig(Android) (Seq (
jniSourcePath <<= (sourceDirectory, jniDirectoryName) (_ / _),
nativeOutputPath <<= (jniSourcePath) (_.getParentFile),
nativeObjectPath <<= (nativeOutputPath, objDirectoryName) (_ / _),
ndkBuildPath <<= (ndkEnvs, ndkBuildName) { (envs, ndkBuildName) =>
val paths = for {
e <- envs
p = System.getenv(e)
if p != null
b = new File(p, ndkBuildName)
if b.canExecute
} yield b
paths.headOption getOrElse (sys.error("Android NDK not found. " +
"You might need to set " + envs.mkString(" or ")))
},
javahPath <<= (javaHome, javahName) apply { (home, name) =>
home map ( h => (h / "bin" / name).absolutePath ) getOrElse name
},
javahOutputDirectory <<= (sourceManaged)(_ / "main" / DefaultJniDirectoryName )
))
private def split(file: File) = {
val parentsBottomToTop = Iterator.iterate(file)(_.getParentFile).takeWhile(_ != null).map(_.getName).toSeq
parentsBottomToTop.reverse
}
private def compose(parent: File, child: File): File = {
if (child.isAbsolute) {
child
} else {
split(child).foldLeft(parent)(new File(_,_))
}
}
private def javahTask(
javahPath: String,
classpath: Seq[File],
classes: Seq[String],
outputDirectory: File,
outputFile: Option[File],
streams: TaskStreams) {
val log = streams.log
if (classes.isEmpty) {
log.debug("No JNI classes, skipping javah")
} else {
outputDirectory.mkdirs()
val classpathArgument = classpath.map(_.getAbsolutePath()).mkString(File.pathSeparator)
val outputArguments = outputFile match {
case Some(file) =>
val outputFile = compose(outputDirectory, file)
// Neither javah nor RichFile.relativeTo will work unless the directories exist.
Option(outputFile.getParentFile) foreach (_.mkdirs())
if (! (outputFile relativeTo outputDirectory).isDefined) {
log.warn("javah output file [" + outputFile + "] is not within javah output directory [" +
outputDirectory + "], continuing anyway")
}
Seq("-o", outputFile.absolutePath)
case None => Seq("-d", outputDirectory.absolutePath)
}
val javahCommandLine = Seq(
javahPath,
"-classpath", classpathArgument) ++
outputArguments ++ classes
log.debug("Running javah: " + (javahCommandLine mkString " "))
val exitCode = Process(javahCommandLine) ! log
if (exitCode != 0) {
sys.error("javah exited with " + exitCode)
}
}
}
private def ndkBuildTask(targets: String*) =
(ndkBuildPath, javahOutputEnv, javahOutputDirectory, nativeOutputPath, streams) map {
(ndkBuildPath, javahOutputEnv, javahOutputDirectory, obj, s) =>
val ndkBuild = ndkBuildPath.absolutePath :: "-C" :: obj.absolutePath ::
(javahOutputEnv + "=" + javahOutputDirectory.absolutePath) :: targets.toList
s.log.debug("Running ndk-build: " + ndkBuild.mkString(" "))
val exitValue = ndkBuild.run(false).exitValue
if(exitValue != 0) sys.error("ndk-build failed with nonzero exit code (" + exitValue + ")")
()
}
lazy val settings: Seq[Setting[_]] = defaultSettings ++ pathSettings ++ inConfig(Android) (Seq (
javah <<= (
(compile in Compile),
javahPath,
(classDirectory in Compile), (internalDependencyClasspath in Compile), (externalDependencyClasspath in Compile),
jniClasses,
javahOutputDirectory, javahOutputFile,
streams) map ((
_, // we only depend on a side effect (built classes) of compile
javahPath,
classDirectory, internalDependencyClasspath, externalDependencyClasspath,
jniClasses,
javahOutputDirectory,
javahOutputFile,
streams) =>
javahTask(
javahPath,
Seq(classDirectory) ++ internalDependencyClasspath.files ++ externalDependencyClasspath.files,
jniClasses,
javahOutputDirectory, javahOutputFile,
streams)
),
ndkBuild <<= ndkBuildTask(),
ndkBuild <<= ndkBuild.dependsOn(javah),
ndkClean <<= ndkBuildTask("clean"),
jniClasses := Seq.empty,
(products in Compile) <<= (products in Compile).dependsOn(ndkBuild),
javahClean <<= (javahOutputDirectory) map IO.delete
)) ++ Seq (
cleanFiles <+= (nativeObjectPath in Android),
clean <<= clean.dependsOn(ndkClean in Android, javahClean in Android)
)
}
| taisukeoe/sbt-android-plugin | src/main/scala/AndroidNdk.scala | Scala | bsd-3-clause | 5,839 |
object O {
def main(argv: Array[String]): Unit = {
new java.awt.Color(0,0,0)
}
}
| mdedetrich/sbt | sbt/src/sbt-test/run/awt/A.scala | Scala | bsd-3-clause | 89 |
/*
* Copyright (C) 2013 The Mango Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* The code of this project is a port of (or wrapper around) the Guava-libraries.
* See http://code.google.com/p/guava-libraries/
*
* @author Markus Schneider
*/
package org.feijoas.mango.common
/** This package contains caching utilities.
*
* <p>The core interface used to represent caches is [[Cache]].
* In-memory caches can be configured and created using
* [[CacheBuilder]], with cache entries being loaded by
* [[CacheLoader]]. Statistics about cache performance are exposed using
* [[CacheStats]].
*
* <p>See [[http://code.google.com/p/guava-libraries/wiki/CachesExplained the Guava User Guide article on caches]].
*
*
* @author Markus Schneider
* @since 0.7 (copied from Guava-libraries)
*/
package object cache | feijoas/mango | src/main/scala/org/feijoas/mango/common/cache/package.scala | Scala | apache-2.0 | 1,354 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import scala.util.Random
import org.apache.spark.{SparkException, SparkFunSuite}
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.ml.param.ParamsSuite
import org.apache.spark.ml.util.{DefaultReadWriteTest, MLTestingUtils}
import org.apache.spark.ml.util.TestingUtils._
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.sql.{DataFrame, Row}
class BucketizerSuite extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest {
import testImplicits._
test("params") {
ParamsSuite.checkParams(new Bucketizer)
}
test("Bucket continuous features, without -inf,inf") {
// Check a set of valid feature values.
val splits = Array(-0.5, 0.0, 0.5)
val validData = Array(-0.5, -0.3, 0.0, 0.2)
val expectedBuckets = Array(0.0, 0.0, 1.0, 1.0)
val dataFrame: DataFrame = validData.zip(expectedBuckets).toSeq.toDF("feature", "expected")
val bucketizer: Bucketizer = new Bucketizer()
.setInputCol("feature")
.setOutputCol("result")
.setSplits(splits)
bucketizer.transform(dataFrame).select("result", "expected").collect().foreach {
case Row(x: Double, y: Double) =>
assert(x === y,
s"The feature value is not correct after bucketing. Expected $y but found $x")
}
// Check for exceptions when using a set of invalid feature values.
val invalidData1: Array[Double] = Array(-0.9) ++ validData
val invalidData2 = Array(0.51) ++ validData
val badDF1 = invalidData1.zipWithIndex.toSeq.toDF("feature", "idx")
withClue("Invalid feature value -0.9 was not caught as an invalid feature!") {
intercept[SparkException] {
bucketizer.transform(badDF1).collect()
}
}
val badDF2 = invalidData2.zipWithIndex.toSeq.toDF("feature", "idx")
withClue("Invalid feature value 0.51 was not caught as an invalid feature!") {
intercept[SparkException] {
bucketizer.transform(badDF2).collect()
}
}
}
test("Bucket continuous features, with -inf,inf") {
val splits = Array(Double.NegativeInfinity, -0.5, 0.0, 0.5, Double.PositiveInfinity)
val validData = Array(-0.9, -0.5, -0.3, 0.0, 0.2, 0.5, 0.9)
val expectedBuckets = Array(0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0)
val dataFrame: DataFrame = validData.zip(expectedBuckets).toSeq.toDF("feature", "expected")
val bucketizer: Bucketizer = new Bucketizer()
.setInputCol("feature")
.setOutputCol("result")
.setSplits(splits)
bucketizer.transform(dataFrame).select("result", "expected").collect().foreach {
case Row(x: Double, y: Double) =>
assert(x === y,
s"The feature value is not correct after bucketing. Expected $y but found $x")
}
}
test("Bucket continuous features, with NaN data but non-NaN splits") {
val splits = Array(Double.NegativeInfinity, -0.5, 0.0, 0.5, Double.PositiveInfinity)
val validData = Array(-0.9, -0.5, -0.3, 0.0, 0.2, 0.5, 0.9, Double.NaN, Double.NaN, Double.NaN)
val expectedBuckets = Array(0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0, 4.0)
val dataFrame: DataFrame = validData.zip(expectedBuckets).toSeq.toDF("feature", "expected")
val bucketizer: Bucketizer = new Bucketizer()
.setInputCol("feature")
.setOutputCol("result")
.setSplits(splits)
bucketizer.setHandleInvalid("keep")
bucketizer.transform(dataFrame).select("result", "expected").collect().foreach {
case Row(x: Double, y: Double) =>
assert(x === y,
s"The feature value is not correct after bucketing. Expected $y but found $x")
}
bucketizer.setHandleInvalid("skip")
val skipResults: Array[Double] = bucketizer.transform(dataFrame)
.select("result").as[Double].collect()
assert(skipResults.length === 7)
assert(skipResults.forall(_ !== 4.0))
bucketizer.setHandleInvalid("error")
withClue("Bucketizer should throw error when setHandleInvalid=error and given NaN values") {
intercept[SparkException] {
bucketizer.transform(dataFrame).collect()
}
}
}
test("Bucket continuous features, with NaN splits") {
val splits = Array(Double.NegativeInfinity, -0.5, 0.0, 0.5, Double.PositiveInfinity, Double.NaN)
withClue("Invalid NaN split was not caught during Bucketizer initialization") {
intercept[IllegalArgumentException] {
new Bucketizer().setSplits(splits)
}
}
}
test("Binary search correctness on hand-picked examples") {
import BucketizerSuite.checkBinarySearch
// length 3, with -inf
checkBinarySearch(Array(Double.NegativeInfinity, 0.0, 1.0))
// length 4
checkBinarySearch(Array(-1.0, -0.5, 0.0, 1.0))
// length 5
checkBinarySearch(Array(-1.0, -0.5, 0.0, 1.0, 1.5))
// length 3, with inf
checkBinarySearch(Array(0.0, 1.0, Double.PositiveInfinity))
// length 3, with -inf and inf
checkBinarySearch(Array(Double.NegativeInfinity, 1.0, Double.PositiveInfinity))
// length 4, with -inf and inf
checkBinarySearch(Array(Double.NegativeInfinity, 0.0, 1.0, Double.PositiveInfinity))
}
test("Binary search correctness in contrast with linear search, on random data") {
val data = Array.fill(100)(Random.nextDouble())
val splits: Array[Double] = Double.NegativeInfinity +:
Array.fill(10)(Random.nextDouble()).sorted :+ Double.PositiveInfinity
val bsResult = Vectors.dense(data.map(x =>
Bucketizer.binarySearchForBuckets(splits, x, false)))
val lsResult = Vectors.dense(data.map(x => BucketizerSuite.linearSearchForBuckets(splits, x)))
assert(bsResult ~== lsResult absTol 1e-5)
}
test("read/write") {
val t = new Bucketizer()
.setInputCol("myInputCol")
.setOutputCol("myOutputCol")
.setSplits(Array(0.1, 0.8, 0.9))
testDefaultReadWrite(t)
}
}
private object BucketizerSuite extends SparkFunSuite {
/** Brute force search for buckets. Bucket i is defined by the range [split(i), split(i+1)). */
def linearSearchForBuckets(splits: Array[Double], feature: Double): Double = {
require(feature >= splits.head)
var i = 0
val n = splits.length - 1
while (i < n) {
if (feature < splits(i + 1)) return i
i += 1
}
throw new RuntimeException(
s"linearSearchForBuckets failed to find bucket for feature value $feature")
}
/** Check all values in splits, plus values between all splits. */
def checkBinarySearch(splits: Array[Double]): Unit = {
def testFeature(feature: Double, expectedBucket: Double): Unit = {
assert(Bucketizer.binarySearchForBuckets(splits, feature, false) === expectedBucket,
s"Expected feature value $feature to be in bucket $expectedBucket with splits:" +
s" ${splits.mkString(", ")}")
}
var i = 0
val n = splits.length - 1
while (i < n) {
// Split i should fall in bucket i.
testFeature(splits(i), i)
// Value between splits i,i+1 should be in i, which is also true if the (i+1)-th split is inf.
testFeature((splits(i) + splits(i + 1)) / 2, i)
i += 1
}
}
}
| spark0001/spark2.1.1 | mllib/src/test/scala/org/apache/spark/ml/feature/BucketizerSuite.scala | Scala | apache-2.0 | 7,929 |
/*
* Copyright 2016 Miroslav Janíček
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.classdump.luna.test
import org.classdump.luna.{Conversions, LuaFormat, LuaRuntimeException}
import org.scalatest.FunSpec
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.language.implicitConversions
import scala.util.Try
trait Fragment {
private var _code: String = null
def description: String
def code: String = _code
protected def code_=(v: String): Unit = {
require(v != null)
this._code = v.stripMargin
}
}
object Fragment {
def apply(description: String, code: String): Fragment = new DefaultImpl(description, code)
class DefaultImpl(_desc: String, _code: String) extends Fragment {
require(_desc != null)
require(_code != null)
override val description = _desc
override val code = _code.stripMargin
}
}
trait FragmentBundle {
implicit protected val bundle = this
private val fragments = ArrayBuffer.empty[Fragment]
def name: String = this.getClass.getSimpleName
def lookup(name: String): Option[Fragment] = fragments find {
_.description == name
}
def all: Iterable[Fragment] = fragments
protected def fragment(name: String)(body: String): Fragment = {
register(Fragment(name, body))
}
protected def register(fragment: Fragment): Fragment = {
fragments.append(fragment)
fragment
}
}
trait FragmentExpectations {
import FragmentExpectations._
protected val EmptyContext = Env.Empty
protected val BasicContext = Env.Basic
protected val ModuleContext = Env.Module
protected val CoroContext = Env.Coro
protected val MathContext = Env.Math
protected val StringLibContext = Env.Str // must not be called StringContext -- messes up with string interpolation
protected val OsContext = Env.Os
protected val IOContext = Env.IO
protected val TableContext = Env.Tab
protected val DebugContext = Env.Debug
protected val FullContext = Env.Full
protected val NaN = ValueMatch.NaN
private val expectations = mutable.Map.empty[Fragment, mutable.Map[Env, Expect]]
def expectationFor(fragment: Fragment): Option[Map[Env, Expect]] = {
expectations.get(fragment) map {
_.toMap
}
}
// for code structuring purposes only
protected def expect(body: => Unit): Unit = {
body
}
protected def stringStartingWith(prefix: String) = ValueMatch.StringStartingWith(prefix)
protected implicit def fragmentToRichFragment(frag: Fragment): RichFragment = new RichFragment(frag)
private def addExpectation(fragment: Fragment, ctx: Env, expect: Expect): Unit = {
val es = expectations.getOrElseUpdate(fragment, mutable.Map.empty)
es(ctx) = expect
}
protected class RichFragment(fragment: Fragment) {
def in(ctx: Env) = new RichFragment.InContext(fragment, ctx)
}
protected object RichFragment {
class InContext(fragment: Fragment, ctx: Env) {
def succeedsWith(values: Any*) = {
addExpectation(fragment, ctx, Expect.Success(values map toLunaValue))
}
def failsWith(clazz: Class[_ <: Throwable]) = {
addExpectation(fragment, ctx, Expect.Failure.ExceptionFailure(Some(clazz), None))
}
def failsWith(clazz: Class[_ <: Throwable], message: StringMatcher) = {
addExpectation(fragment, ctx, Expect.Failure.ExceptionFailure(Some(clazz), Some(message)))
}
def failsWith(message: StringMatcher) = {
addExpectation(fragment, ctx, Expect.Failure.ExceptionFailure(None, Some(message)))
}
def failsWithLuaError(errorObject: AnyRef) = {
addExpectation(fragment, ctx, Expect.Failure.LuaErrorFailure(errorObject))
}
}
}
implicit def stringToMatcher(s: String): StringMatcher = StringMatcher(StringMatcher.Strict(s) :: Nil)
}
object FragmentExpectations {
private def toLunaValue(v: Any): ValueMatch = {
import ValueMatch._
v match {
case vm: ValueMatch => vm
case null => Eq(null)
case b: Boolean => Eq(java.lang.Boolean.valueOf(b))
case i: Int => Eq(java.lang.Long.valueOf(i))
case l: Long => Eq(java.lang.Long.valueOf(l))
case f: Float => Eq(java.lang.Double.valueOf(f))
case d: Double => Eq(java.lang.Double.valueOf(d))
case s: String => Eq(s)
case c: Class[_] => SubtypeOf(c)
case _ => throw new IllegalArgumentException("illegal value: " + v)
}
}
sealed trait Env
sealed trait Expect {
def tryMatch(actual: Try[Seq[AnyRef]], onFail: () => Unit)(spec: FunSpec): Unit
}
sealed trait ValueMatch {
def matches(o: AnyRef): Boolean
}
case class StringMatcher(parts: List[StringMatcher.Part]) {
def <<(arg: String) = StringMatcher(parts :+ StringMatcher.NonStrict(arg))
def >>(arg: String) = StringMatcher(parts :+ StringMatcher.Strict(arg))
def matches(actual: String): Boolean = StringMatcher.matches(parts, actual)
override def toString = parts.mkString("")
}
object Env {
case object Empty extends Env
case object Basic extends Env
case object Module extends Env
case object Coro extends Env
case object Math extends Env
case object Str extends Env
case object Os extends Env
case object IO extends Env
case object Tab extends Env
case object Debug extends Env
case object Full extends Env
}
object Expect {
sealed trait Failure extends Expect {
override def tryMatch(actual: Try[Seq[AnyRef]], onFail: () => Unit)(spec: FunSpec) = {
actual match {
case scala.util.Success(vs) =>
onFail()
spec.fail("Expected failure, got success")
case scala.util.Failure(ex) => matchError(ex, onFail)(spec)
}
}
protected def matchError(ex: Throwable, onFail: () => Unit)(spec: FunSpec): Unit
}
case class Success(vms: Seq[ValueMatch]) extends Expect {
override def tryMatch(actual: Try[Seq[AnyRef]], onFail: () => Unit)(spec: FunSpec) = {
actual match {
case scala.util.Success(vs) =>
if (vs.size != vms.size) {
onFail()
spec.fail("result list size does not match: expected " + vms.size + ", got " + vs.size)
}
spec.assertResult(vs.size)(vms.size)
for (((v, vm), i) <- (vs zip vms).zipWithIndex) {
if (!vm.matches(v)) {
onFail()
spec.fail("value #" + i + " does not match: expected " + vm + ", got " + v)
}
}
case scala.util.Failure(ex) =>
onFail()
spec.fail("Expected success, got an exception: " + ex.getMessage, ex)
}
}
}
object Failure {
case class ExceptionFailure(optExpectClass: Option[Class[_ <: Throwable]], optExpectMessage: Option[StringMatcher]) extends Failure {
override protected def matchError(ex: Throwable, onFail: () => Unit)(spec: FunSpec) = {
for (expectClass <- optExpectClass) {
val actualClass = ex.getClass
if (!expectClass.isAssignableFrom(actualClass)) {
onFail()
spec.fail("Expected exception of type " + expectClass.getName + ", got " + actualClass.getName)
}
}
for (messageMatcher <- optExpectMessage) {
val actualMessage = ex.getMessage
if (!messageMatcher.matches(actualMessage)) {
onFail()
spec.fail("Error message mismatch: expected \\"" + messageMatcher + "\\", got \\"" + actualMessage + "\\"")
}
}
}
}
case class LuaErrorFailure(expectErrorObject: AnyRef) extends Failure {
override protected def matchError(ex: Throwable, onFail: () => Unit)(spec: FunSpec) = {
ex match {
case le: LuaRuntimeException =>
val actualErrorObject = Conversions.javaRepresentationOf(le.getErrorObject)
if (expectErrorObject != actualErrorObject) {
onFail()
spec.fail("Error object mismatch: expected [" + expectErrorObject + "], got [" + actualErrorObject + "]")
}
}
}
}
}
}
object StringMatcher {
def matches(parts: List[StringMatcher.Part], actual: String): Boolean = {
def either(parts: List[StringMatcher.Part], idx: Int): Boolean = {
parts.headOption match {
case Some(h: Strict) => strict(parts, 0)
case Some(h: NonStrict) => nonStrict(parts, 0)
case None => true
}
}
def strict(parts: List[StringMatcher.Part], idx: Int): Boolean = {
parts match {
case Strict(s) :: tail if actual.indexOf(s, idx) == idx => strict(tail, idx + s.length)
case NonStrict(_) :: tail => nonStrict(tail, idx)
case Nil => true
case _ => false
}
}
def nonStrict(parts: List[StringMatcher.Part], idx: Int): Boolean = {
parts match {
case Strict(s) :: tail =>
val nextIdx = actual.indexOf(s, idx)
(nextIdx >= 0) && (either(tail, nextIdx + s.length()) || nonStrict(parts, idx + 1))
case NonStrict(_) :: tail => nonStrict(tail, idx)
case Nil => true
}
}
either(parts, 0)
}
sealed trait Part
case class Strict(s: String) extends Part {
override def toString = s
}
case class NonStrict(s: String) extends Part {
override def toString = "<<" + s + ">>"
}
}
object ValueMatch {
case class Eq(v: AnyRef) extends ValueMatch {
override def matches(o: AnyRef) = {
if (o == null || v == null) {
o eq v
}
else {
v.getClass == o.getClass && v == o
}
}
}
case class SubtypeOf(c: Class[_]) extends ValueMatch {
override def matches(o: AnyRef) = if (o == null) false else c.isAssignableFrom(o.getClass)
}
case class StringStartingWith(prefix: String) extends ValueMatch {
override def matches(o: AnyRef) = {
o match {
case s: String if s.startsWith(prefix) => true
case _ => false
}
}
}
case object NaN extends ValueMatch {
override def matches(o: AnyRef) = {
o match {
case d: java.lang.Double if d.isNaN => true
case _ => false
}
}
}
}
}
trait OneLiners {
this: FragmentBundle with FragmentExpectations =>
private var prefixes: List[String] = Nil
private var context: FragmentExpectations.Env = null
def about(desc: String)(body: => Unit): Unit = {
val oldPrefixes = prefixes
try {
prefixes = desc :: oldPrefixes
body
}
finally {
prefixes = oldPrefixes
}
}
def in(env: FragmentExpectations.Env)(body: => Unit): Unit = {
val oldContext = context
try {
context = env
body
}
finally {
context = oldContext
}
}
def program(body: String): RichFragment.InContext = {
val name = (LuaFormat.escape(body) :: prefixes).reverse.mkString(": ")
fragment(name)(body) in context
}
protected def thisContext: FragmentExpectations.Env = context
} | kroepke/luna | luna-tests/src/test/scala/org/classdump/luna/test/Fragment.scala | Scala | apache-2.0 | 11,752 |
package com.twitter.scalding.bdd
import org.scalatest.{ Matchers, WordSpec }
import com.twitter.scalding.{ Dsl, RichPipe }
import scala.collection.mutable.Buffer
import cascading.pipe.Pipe
import cascading.tuple.Tuple
import com.twitter.scalding.Dsl._
class SingleSourceSpecTest extends WordSpec with Matchers with BddDsl {
"A test with single source" should {
"accept an operation with a single input rich pipe" in {
Given {
List(("col1_1", "col2_1"), ("col1_2", "col2_2")) withSchema (('col1, 'col2))
} When {
pipe: RichPipe =>
{
pipe.map('col1 -> 'col1_transf) {
col1: String => col1 + "_transf"
}
}
} Then {
buffer: Buffer[(String, String, String)] =>
{
buffer.forall({
case (_, _, transformed) => transformed.endsWith("_transf")
}) shouldBe true
}
}
}
"accept an operation with a single input pipe" in {
Given {
List(("col1_1", "col2_1"), ("col1_2", "col2_2")) withSchema (('col1, 'col2))
} When {
pipe: Pipe =>
{
pipe.map('col1 -> 'col1_transf) {
col1: String => col1 + "_transf"
}
}
} Then {
buffer: Buffer[(String, String, String)] =>
{
buffer.forall({
case (_, _, transformed) => transformed.endsWith("_transf")
}) shouldBe true
}
}
}
"work with output as Tuple" in {
Given {
List(("col1_1", "col2_1"), ("col1_2", "col2_2")) withSchema (('col1, 'col2))
} When {
pipe: RichPipe =>
{
pipe.map('col1 -> 'col1_transf) {
col1: String => col1 + "_transf"
}
}
} Then {
buffer: Buffer[Tuple] =>
{
buffer.forall(tuple => tuple.getString(2).endsWith("_transf")) shouldBe true
}
}
}
"work with input as simple type" in {
Given {
List("col1_1", "col1_2") withSchema ('col1)
} When {
pipe: RichPipe =>
{
pipe.map('col1 -> 'col1_transf) {
col1: String => col1 + "_transf"
}
}
} Then {
buffer: Buffer[Tuple] =>
{
buffer.forall(tuple => tuple.getString(1).endsWith("_transf")) shouldBe true
}
}
}
"work with input as Tuple" in {
Given {
List(new Tuple("col1_1", "col2_1"), new Tuple("col1_2", "col2_2")) withSchema (('col1, 'col2))
} When {
pipe: RichPipe =>
{
pipe.map('col1 -> 'col1_transf) {
col1: String => col1 + "_transf"
}
}
} Then {
buffer: Buffer[Tuple] =>
{
buffer.forall(tuple => tuple.getString(2).endsWith("_transf")) shouldBe true
}
}
}
}
}
| sriramkrishnan/scalding | scalding-core/src/test/scala/com/twitter/scalding/bdd/SingleSourceSpecTest.scala | Scala | apache-2.0 | 2,940 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.status.api.v1
import java.lang.{Long => JLong}
import java.util.Date
import scala.xml.{NodeSeq, Text}
import com.fasterxml.jackson.annotation.JsonIgnoreProperties
import com.fasterxml.jackson.core.{JsonGenerator, JsonParser}
import com.fasterxml.jackson.core.`type`.TypeReference
import com.fasterxml.jackson.databind.{DeserializationContext, JsonDeserializer, JsonSerializer, SerializerProvider}
import com.fasterxml.jackson.databind.annotation.{JsonDeserialize, JsonSerialize}
import org.apache.spark.JobExecutionStatus
import org.apache.spark.executor.ExecutorMetrics
import org.apache.spark.metrics.ExecutorMetricType
case class ApplicationInfo private[spark](
id: String,
name: String,
coresGranted: Option[Int],
maxCores: Option[Int],
coresPerExecutor: Option[Int],
memoryPerExecutorMB: Option[Int],
attempts: Seq[ApplicationAttemptInfo])
@JsonIgnoreProperties(
value = Array("startTimeEpoch", "endTimeEpoch", "lastUpdatedEpoch"),
allowGetters = true)
case class ApplicationAttemptInfo private[spark](
attemptId: Option[String],
startTime: Date,
endTime: Date,
lastUpdated: Date,
duration: Long,
sparkUser: String,
completed: Boolean = false,
appSparkVersion: String) {
def getStartTimeEpoch: Long = startTime.getTime
def getEndTimeEpoch: Long = endTime.getTime
def getLastUpdatedEpoch: Long = lastUpdated.getTime
}
class ExecutorStageSummary private[spark](
val taskTime : Long,
val failedTasks : Int,
val succeededTasks : Int,
val killedTasks : Int,
val inputBytes : Long,
val inputRecords : Long,
val outputBytes : Long,
val outputRecords : Long,
val shuffleRead : Long,
val shuffleReadRecords : Long,
val shuffleWrite : Long,
val shuffleWriteRecords : Long,
val memoryBytesSpilled : Long,
val diskBytesSpilled : Long,
val isBlacklistedForStage: Boolean)
class ExecutorSummary private[spark](
val id: String,
val hostPort: String,
val isActive: Boolean,
val rddBlocks: Int,
val memoryUsed: Long,
val diskUsed: Long,
val totalCores: Int,
val maxTasks: Int,
val activeTasks: Int,
val failedTasks: Int,
val completedTasks: Int,
val totalTasks: Int,
val totalDuration: Long,
val totalGCTime: Long,
val totalInputBytes: Long,
val totalShuffleRead: Long,
val totalShuffleWrite: Long,
val isBlacklisted: Boolean,
val maxMemory: Long,
val addTime: Date,
val removeTime: Option[Date],
val removeReason: Option[String],
val executorLogs: Map[String, String],
val memoryMetrics: Option[MemoryMetrics],
val blacklistedInStages: Set[Int],
@JsonSerialize(using = classOf[ExecutorMetricsJsonSerializer])
@JsonDeserialize(using = classOf[ExecutorMetricsJsonDeserializer])
val peakMemoryMetrics: Option[ExecutorMetrics])
class MemoryMetrics private[spark](
val usedOnHeapStorageMemory: Long,
val usedOffHeapStorageMemory: Long,
val totalOnHeapStorageMemory: Long,
val totalOffHeapStorageMemory: Long)
/** deserializer for peakMemoryMetrics: convert map to ExecutorMetrics */
private[spark] class ExecutorMetricsJsonDeserializer
extends JsonDeserializer[Option[ExecutorMetrics]] {
override def deserialize(
jsonParser: JsonParser,
deserializationContext: DeserializationContext): Option[ExecutorMetrics] = {
val metricsMap = jsonParser.readValueAs[Option[Map[String, Long]]](
new TypeReference[Option[Map[String, java.lang.Long]]] {})
metricsMap.map(metrics => new ExecutorMetrics(metrics))
}
}
/** serializer for peakMemoryMetrics: convert ExecutorMetrics to map with metric name as key */
private[spark] class ExecutorMetricsJsonSerializer
extends JsonSerializer[Option[ExecutorMetrics]] {
override def serialize(
metrics: Option[ExecutorMetrics],
jsonGenerator: JsonGenerator,
serializerProvider: SerializerProvider): Unit = {
metrics.foreach { m: ExecutorMetrics =>
val metricsMap = ExecutorMetricType.values.map { metricType =>
metricType.name -> m.getMetricValue(metricType)
}.toMap
jsonGenerator.writeObject(metricsMap)
}
}
}
class JobData private[spark](
val jobId: Int,
val name: String,
val description: Option[String],
val submissionTime: Option[Date],
val completionTime: Option[Date],
val stageIds: Seq[Int],
val jobGroup: Option[String],
val status: JobExecutionStatus,
val numTasks: Int,
val numActiveTasks: Int,
val numCompletedTasks: Int,
val numSkippedTasks: Int,
val numFailedTasks: Int,
val numKilledTasks: Int,
val numCompletedIndices: Int,
val numActiveStages: Int,
val numCompletedStages: Int,
val numSkippedStages: Int,
val numFailedStages: Int,
val killedTasksSummary: Map[String, Int])
class RDDStorageInfo private[spark](
val id: Int,
val name: String,
val numPartitions: Int,
val numCachedPartitions: Int,
val storageLevel: String,
val memoryUsed: Long,
val diskUsed: Long,
val dataDistribution: Option[Seq[RDDDataDistribution]],
val partitions: Option[Seq[RDDPartitionInfo]])
class RDDDataDistribution private[spark](
val address: String,
val memoryUsed: Long,
val memoryRemaining: Long,
val diskUsed: Long,
@JsonDeserialize(contentAs = classOf[JLong])
val onHeapMemoryUsed: Option[Long],
@JsonDeserialize(contentAs = classOf[JLong])
val offHeapMemoryUsed: Option[Long],
@JsonDeserialize(contentAs = classOf[JLong])
val onHeapMemoryRemaining: Option[Long],
@JsonDeserialize(contentAs = classOf[JLong])
val offHeapMemoryRemaining: Option[Long])
class RDDPartitionInfo private[spark](
val blockName: String,
val storageLevel: String,
val memoryUsed: Long,
val diskUsed: Long,
val executors: Seq[String])
class StageData private[spark](
val status: StageStatus,
val stageId: Int,
val attemptId: Int,
val numTasks: Int,
val numActiveTasks: Int,
val numCompleteTasks: Int,
val numFailedTasks: Int,
val numKilledTasks: Int,
val numCompletedIndices: Int,
val executorRunTime: Long,
val executorCpuTime: Long,
val submissionTime: Option[Date],
val firstTaskLaunchedTime: Option[Date],
val completionTime: Option[Date],
val failureReason: Option[String],
val inputBytes: Long,
val inputRecords: Long,
val outputBytes: Long,
val outputRecords: Long,
val shuffleReadBytes: Long,
val shuffleReadRecords: Long,
val shuffleWriteBytes: Long,
val shuffleWriteRecords: Long,
val memoryBytesSpilled: Long,
val diskBytesSpilled: Long,
val name: String,
val description: Option[String],
val details: String,
val schedulingPool: String,
val rddIds: Seq[Int],
val accumulatorUpdates: Seq[AccumulableInfo],
val tasks: Option[Map[Long, TaskData]],
val executorSummary: Option[Map[String, ExecutorStageSummary]],
val killedTasksSummary: Map[String, Int])
class TaskData private[spark](
val taskId: Long,
val index: Int,
val attempt: Int,
val launchTime: Date,
val resultFetchStart: Option[Date],
@JsonDeserialize(contentAs = classOf[JLong])
val duration: Option[Long],
val executorId: String,
val host: String,
val status: String,
val taskLocality: String,
val speculative: Boolean,
val accumulatorUpdates: Seq[AccumulableInfo],
val errorMessage: Option[String] = None,
val taskMetrics: Option[TaskMetrics] = None)
class TaskMetrics private[spark](
val executorDeserializeTime: Long,
val executorDeserializeCpuTime: Long,
val executorRunTime: Long,
val executorCpuTime: Long,
val resultSize: Long,
val jvmGcTime: Long,
val resultSerializationTime: Long,
val memoryBytesSpilled: Long,
val diskBytesSpilled: Long,
val peakExecutionMemory: Long,
val inputMetrics: InputMetrics,
val outputMetrics: OutputMetrics,
val shuffleReadMetrics: ShuffleReadMetrics,
val shuffleWriteMetrics: ShuffleWriteMetrics)
class InputMetrics private[spark](
val bytesRead: Long,
val recordsRead: Long)
class OutputMetrics private[spark](
val bytesWritten: Long,
val recordsWritten: Long)
class ShuffleReadMetrics private[spark](
val remoteBlocksFetched: Long,
val localBlocksFetched: Long,
val fetchWaitTime: Long,
val remoteBytesRead: Long,
val remoteBytesReadToDisk: Long,
val localBytesRead: Long,
val recordsRead: Long)
class ShuffleWriteMetrics private[spark](
val bytesWritten: Long,
val writeTime: Long,
val recordsWritten: Long)
class TaskMetricDistributions private[spark](
val quantiles: IndexedSeq[Double],
val executorDeserializeTime: IndexedSeq[Double],
val executorDeserializeCpuTime: IndexedSeq[Double],
val executorRunTime: IndexedSeq[Double],
val executorCpuTime: IndexedSeq[Double],
val resultSize: IndexedSeq[Double],
val jvmGcTime: IndexedSeq[Double],
val resultSerializationTime: IndexedSeq[Double],
val gettingResultTime: IndexedSeq[Double],
val schedulerDelay: IndexedSeq[Double],
val peakExecutionMemory: IndexedSeq[Double],
val memoryBytesSpilled: IndexedSeq[Double],
val diskBytesSpilled: IndexedSeq[Double],
val inputMetrics: InputMetricDistributions,
val outputMetrics: OutputMetricDistributions,
val shuffleReadMetrics: ShuffleReadMetricDistributions,
val shuffleWriteMetrics: ShuffleWriteMetricDistributions)
class InputMetricDistributions private[spark](
val bytesRead: IndexedSeq[Double],
val recordsRead: IndexedSeq[Double])
class OutputMetricDistributions private[spark](
val bytesWritten: IndexedSeq[Double],
val recordsWritten: IndexedSeq[Double])
class ShuffleReadMetricDistributions private[spark](
val readBytes: IndexedSeq[Double],
val readRecords: IndexedSeq[Double],
val remoteBlocksFetched: IndexedSeq[Double],
val localBlocksFetched: IndexedSeq[Double],
val fetchWaitTime: IndexedSeq[Double],
val remoteBytesRead: IndexedSeq[Double],
val remoteBytesReadToDisk: IndexedSeq[Double],
val totalBlocksFetched: IndexedSeq[Double])
class ShuffleWriteMetricDistributions private[spark](
val writeBytes: IndexedSeq[Double],
val writeRecords: IndexedSeq[Double],
val writeTime: IndexedSeq[Double])
class AccumulableInfo private[spark](
val id: Long,
val name: String,
val update: Option[String],
val value: String)
class VersionInfo private[spark](
val spark: String)
class ApplicationEnvironmentInfo private[spark] (
val runtime: RuntimeInfo,
val sparkProperties: Seq[(String, String)],
val systemProperties: Seq[(String, String)],
val classpathEntries: Seq[(String, String)])
class RuntimeInfo private[spark](
val javaVersion: String,
val javaHome: String,
val scalaVersion: String)
case class StackTrace(elems: Seq[String]) {
override def toString: String = elems.mkString
def html: NodeSeq = {
val withNewLine = elems.foldLeft(NodeSeq.Empty) { (acc, elem) =>
if (acc.isEmpty) {
acc :+ Text(elem)
} else {
acc :+ <br /> :+ Text(elem)
}
}
withNewLine
}
def mkString(start: String, sep: String, end: String): String = {
elems.mkString(start, sep, end)
}
}
case class ThreadStackTrace(
val threadId: Long,
val threadName: String,
val threadState: Thread.State,
val stackTrace: StackTrace,
val blockedByThreadId: Option[Long],
val blockedByLock: String,
val holdingLocks: Seq[String])
| zhouyejoe/spark | core/src/main/scala/org/apache/spark/status/api/v1/api.scala | Scala | apache-2.0 | 12,470 |
package se.culvertsoft.mgen.cpppack.generator.impl.classcpp
import scala.collection.JavaConversions.asScalaBuffer
import se.culvertsoft.mgen.api.model.ClassType
import se.culvertsoft.mgen.api.model.Module
import se.culvertsoft.mgen.compiler.internal.BuiltInGeneratorUtil.endl
import se.culvertsoft.mgen.compiler.internal.BuiltInGeneratorUtil.ln
import se.culvertsoft.mgen.compiler.internal.BuiltInGeneratorUtil.quote
import se.culvertsoft.mgen.compiler.util.SourceCodeBuffer
import se.culvertsoft.mgen.cpppack.generator.impl.Alias.fieldMetaString
object MkMetadataFields {
def apply(t: ClassType)(implicit txtBuffer: SourceCodeBuffer) {
val allFields = t.fieldsInclSuper()
val pfx = s"_${t.shortName}"
// Own type data
ln(s"const std::string& ${t.shortName()}::_type_name() {")
ln(1, s"static const std::string out(${quote(t.fullName)});")
ln(1, "return out;")
ln("}")
endl()
{
val ids = t.typeHierarchy.map(_.typeId.toString + "LL")
ln(s"const std::vector<long long>& ${t.shortName()}::_type_ids() {")
ln(1, s"static const std::vector<long long> out = mgen::make_vector<long long>() << ${ids.mkString(" << ")};")
ln(1, s"return out;")
ln("}")
endl()
}
{
val ids16bit = t.typeHierarchy.map(_.typeId16Bit.toString)
ln(s"const std::vector<short>& ${t.shortName()}::_type_ids_16bit() {")
ln(1, s"static const std::vector<short> out = mgen::make_vector<short>() << ${ids16bit.mkString(" << ")};")
ln(1, "return out;")
ln("}")
endl()
}
{
val names = t.typeHierarchy.map(x => quote(x.fullName))
ln(s"const std::vector<std::string>& ${t.shortName()}::_type_names() {")
ln(1, s"static const std::vector<std::string> out = mgen::make_vector<std::string>() << ${names.mkString(" << ")};")
ln(1, "return out;")
ln("}")
endl()
}
{
val ids = t.typeHierarchy.map(x => quote(x.typeId16BitBase64))
ln(s"const std::vector<std::string>& ${t.shortName()}::_type_ids_16bit_base64() {")
ln(1, s"static const std::vector<std::string> out = mgen::make_vector<std::string>() << ${ids.mkString(" << ")};")
ln(1, "return out;")
ln("}")
endl()
}
val base64ids = t.typeHierarchy().map(_.typeId16BitBase64())
val base64String = quote(base64ids.mkString(""))
ln(s"const std::string& ${t.shortName()}::_type_ids_16bit_base64_string() {")
ln(1, s"static const std::string out($base64String);")
ln(1, "return out;")
ln("}")
endl()
ln(s"const std::string& ${t.shortName()}::_type_id_16bit_base64() {")
ln(1, s"static const std::string out(${quote(t.typeId16BitBase64)});")
ln(1, "return out;")
ln("}")
endl()
// Field type data
ln(s"const std::vector<mgen::Field>& ${t.shortName()}::_field_metadatas() {")
val metadatas = t.fieldsInclSuper.map(fieldMetaString(_))
val metadatasString = if (metadatas.isEmpty) ";" else s" = mgen::make_vector<mgen::Field>() << ${metadatas.mkString(" << ")};"
ln(1, s"static const std::vector<mgen::Field> out${metadatasString}")
ln(1, "return out;")
ln("}")
endl()
// Fields metadata implementation
for (field <- t.fields()) {
val enumString = field.typ.typeEnum.toString
val tagString = enumString match {
case "ENUM" => "STRING"
case _ => enumString
}
ln(s"const mgen::Field& ${t.shortName()}::${fieldMetaString(field)} {")
val flagsString = if (field.flags.nonEmpty) s", mgen::make_vector<std::string>() << ${field.flags.map(quote).mkString(" << ")}" else ""
ln(1, s"static const mgen::Field out(${field.id}, ${quote(field.name)}$flagsString);")
ln(1, "return out;")
ln("}")
endl()
}
}
} | culvertsoft/mgen | mgen-cppgenerator/src/main/scala/se/culvertsoft/mgen/cpppack/generator/impl/classcpp/MkMetadataFields.scala | Scala | mit | 3,785 |
package scala.meta.tests
package semanticdb
import org.scalameta.logger
import munit.FunSuite
import scala.meta.interactive.InteractiveSemanticdb._
import scala.meta.internal.semanticdb.Print
import scala.tools.nsc.interactive.Global
import scala.util.Properties
import scala.collection.SortedMap
class Source3Suite extends FunSuite {
override def munitIgnore: Boolean = !ScalaVersion.isSupported(
minimal212 = 14,
minimal213 = 6
)
val compiler: Global = newCompiler(scalacOptions = "-Xsource:3" :: Nil)
def check(
original: String,
expected: String,
compat: List[(String, String)] = List.empty
): Unit = {
test(logger.revealWhitespace(original)) {
val options = List("-P:semanticdb:synthetics:on", "-P:semanticdb:text:on")
val document = toTextDocument(compiler, original, options)
val format = scala.meta.metap.Format.Detailed
val syntax = Print.document(format, document)
val expectedCompat = ScalaVersion.getExpected(compat, expected)
assertNoDiff(syntax, expectedCompat)
}
}
val expected =
"""|interactive.scala
|-----------------
|
|Summary:
|Schema => SemanticDB v4
|Uri => interactive.scala
|Text => non-empty
|Language => Scala
|Symbols => 4 entries
|Occurrences => 16 entries
|
|Symbols:
|b/a. => final object a extends AnyRef { +2 decls }
| AnyRef => scala/AnyRef#
|b/a.args. => val method args: List[String]
| List => scala/collection/immutable/List#
| String => scala/Predef.String#
|b/a.func(). => method func(args: String*): Nothing
| args => b/a.func().(args)
| String => scala/Predef.String#
| Nothing => scala/Nothing#
|b/a.func().(args) => param args: String*
| String => scala/Predef.String#
|
|Occurrences:
|[0:8..0:9): b <= b/
|[1:7..1:12): scala => scala/
|[1:13..1:23): concurrent => scala/concurrent/
|[1:24..1:30): Future => scala/concurrent/Future#
|[1:24..1:30): Future => scala/concurrent/Future.
|[2:7..2:8): a <= b/a.
|[3:6..3:10): func <= b/a.func().
|[3:11..3:15): args <= b/a.func().(args)
|[3:17..3:23): String => java/lang/String#
|[3:28..3:31): ??? => scala/Predef.`???`().
|[4:6..4:10): args <= b/a.args.
|[4:13..4:17): List => scala/package.List.
|[4:18..4:23): empty => scala/collection/immutable/List.empty().
|[4:24..4:30): String => scala/Predef.String#
|[5:2..5:6): func => b/a.func().
|[5:7..5:11): args => b/a.args.""".stripMargin
check(
"""package b
|import scala.concurrent.Future as F
|object a {
| def func(args: String*) = ???
| val args = List.empty[String]
| func(args*)
|}
""".stripMargin,
expected,
compat = List(
"2.12.14" -> expected.replace("scala/package.List.", "scala/collection/immutable/List."),
"2.12.15" -> expected.replace("scala/package.List.", "scala/collection/immutable/List.")
)
)
}
| scalameta/scalameta | tests/jvm/src/test/scala/scala/meta/tests/semanticdb/Source3Suite.scala | Scala | bsd-3-clause | 3,112 |
import org.hamcrest.CoreMatchers._
import org.scalatest.FlatSpec
import org.springframework.security.test.web.servlet.request.SecurityMockMvcRequestPostProcessors._
import org.springframework.test.web.servlet.request.MockMvcRequestBuilders._
import org.springframework.test.web.servlet.result.MockMvcResultMatchers._
import test.annotations.SpringIntegrationTest
import test.traits.{MockMvcTest, TestContextManagement}
/**
* Add your spec here.
* You can mock out a whole application including requests, plugins etc.
* For more information, consult the wiki.
*/
@SpringIntegrationTest
class ApplicationSpec extends FlatSpec with TestContextManagement with MockMvcTest {
"Application" should "Send 404 on bad request" in {
mockMvc.perform(get("/boum").`with`(user("any"))).andExpect(status().isNotFound)
}
it should "Open index page" in {
mockMvc.perform(get("/"))
.andExpect(status().isOk).andExpect(content().string(containsString("Application")))
}
}
| giovannicandido/slush-spring-aurelia | templates/server/src/it/scala/ApplicationSpec.scala | Scala | mit | 988 |
package services
import play.api.{Logger, Play}
trait ConfigSupport{
def configKey(key: String, default: String = "N/A"): String
}
object ConfigSupport extends ConfigSupport{
import play.api.Play.current
def configKey(key: String, default: String = "N/A"): String = {
Play.configuration.getString(key).getOrElse({
Logger.error(s"missing config key [$key]; using default [$default]")
default
})
}
} | tsechov/shoehorn | app/services/Config.scala | Scala | apache-2.0 | 431 |
package org.ninjatasks.taskmanagement
import akka.actor.{ActorRef, Cancellable, Actor, ActorLogging}
import scala.concurrent.duration._
import akka.pattern.{ask, pipe, AskTimeoutException}
import scala.concurrent.ExecutionContext.Implicits.global
import org.ninjatasks.utils.ManagementConsts
import scala.language.postfixOps
/**
*
* Created by Gilad Ber on 4/26/2014.
*/
class JobExtractor(val workSource: ActorRef, val delegator: ActorRef) extends Actor with ActorLogging
{
var cancelOption: Option[Cancellable] = None
val config = ManagementConsts.config
val initialDelay = config.getLong("ninja.extractor.initial-delay")
val periodicDelay = config.getLong("ninja.extractor.periodic-delay")
val capacityRequestTimeout = config.getLong("ninja.extractor.request-timeout")
def scheduler = context.system.scheduler
override def preStart() =
{
cancelOption foreach (c => c.cancel())
val cancellable = scheduler.schedule(initialDelay = initialDelay millis, interval = periodicDelay millis)
{
val f = ask(delegator, JobCapacityRequest)(timeout = capacityRequestTimeout millis)
f map
{
case JobCapacity(amount) => JobSetRequest(amount)
case e: AskTimeoutException => throw e
case _ => throw new IllegalArgumentException("Invalid response received to job capacity request")
} pipeTo workSource
}
cancelOption = Some(cancellable)
}
override def receive =
{
case ajm: AggregateJobMessage => delegator ! ajm
}
}
| giladber/ninja-tasks | src/main/scala/org/ninjatasks/taskmanagement/JobExtractor.scala | Scala | apache-2.0 | 1,466 |
package org.nedervold.grammareditor.models.views
import org.scalatest.FlatSpec
import org.nedervold.grammareditor.models.VarModel
class TextAreaViewSpec extends FlatSpec {
behavior of "a TextViewSpec"
it should "require a non-null Model" in {
intercept[IllegalArgumentException] {
new TextAreaView(null)
}
}
it should "require a non-null unparse function" in {
var model = new VarModel(5)
intercept[IllegalArgumentException] {
new TextAreaView(model, null)
}
}
} | nedervold/GrammarEditor | src/test/scala/org/nedervold/grammareditor/models/views/TextAreaViewSpec.scala | Scala | apache-2.0 | 550 |
import com.typesafe.tools.mima.core._
object MiMaFilters {
val Library: Seq[ProblemFilter] = Seq(
// Experimental APIs that can be added in 3.2.0
ProblemFilters.exclude[DirectMissingMethodProblem]("scala.runtime.Tuples.append"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("scala.quoted.Quotes#reflectModule#TypeReprMethods.substituteTypes"),
ProblemFilters.exclude[DirectMissingMethodProblem]("scala.quoted.Quotes#reflectModule#TypeReprMethods.substituteTypes"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("scala.quoted.Quotes#reflectModule#TypeReprMethods.typeArgs"),
ProblemFilters.exclude[DirectMissingMethodProblem]("scala.quoted.Quotes#reflectModule#TypeReprMethods.typeArgs"),
ProblemFilters.exclude[MissingClassProblem]("scala.compiletime.ops.double"),
ProblemFilters.exclude[MissingClassProblem]("scala.compiletime.ops.double$"),
ProblemFilters.exclude[MissingClassProblem]("scala.compiletime.ops.float"),
ProblemFilters.exclude[MissingClassProblem]("scala.compiletime.ops.float$"),
ProblemFilters.exclude[MissingClassProblem]("scala.compiletime.ops.long"),
ProblemFilters.exclude[MissingClassProblem]("scala.compiletime.ops.long$"),
ProblemFilters.exclude[ReversedMissingMethodProblem]("scala.quoted.Quotes#reflectModule#CompilationInfoModule.XmacroSettings"),
ProblemFilters.exclude[DirectMissingMethodProblem]("scala.quoted.Quotes#reflectModule#CompilationInfoModule.XmacroSettings"),
ProblemFilters.exclude[DirectMissingMethodProblem]("scala.deriving.Mirror.fromProductTyped"),
ProblemFilters.exclude[DirectMissingMethodProblem]("scala.deriving.Mirror.fromTuple"),
// Private to the compiler - needed for forward binary compatibility
ProblemFilters.exclude[MissingClassProblem]("scala.annotation.since")
)
}
| dotty-staging/dotty | project/MiMaFilters.scala | Scala | apache-2.0 | 1,819 |
package com.estus.distribution
object rng {
private val N = 624
private val M = 397
private val MatrixA = 0x9908b0dfL
private val UpperMask = 0x80000000L
private val LowerMask = 0x7fffffffL
private val mt = new Array[Long](N)
private var mti = N + 1
private val seed = System.currentTimeMillis
mt(0) = seed
for (i <- 1 until N) mt(i) = (1812433253L * (mt(i - 1) ^ (mt(i - 1) >>> 30)) + i) & 0xffffffffL
// Sets Seed
def setSeed(seed: Long) = {
mt(0) = seed
for (i <- 1 until N) mt(i) = (1812433253L * (mt(i - 1) ^ (mt(i - 1) >>> 30)) + i) & 0xffffffffL
}
// Generates the next random integer in the sequence
def nextInt(): Int = {
var y = 0L
if(mti >= N) {
val mag01 = Array(0L, MatrixA)
var kk = 0
while (kk < N - M) {
y = (mt(kk) & UpperMask) | (mt(kk + 1) & LowerMask)
mt(kk) = mt(kk + M) ^ (y >>> 1) ^ mag01(y.toInt & 0x1)
kk += 1
}
while (kk < N - 1) {
y = (mt(kk) & UpperMask) | (mt(kk + 1) & LowerMask)
mt(kk) = mt(kk + (M - N)) ^ (y >>> 1) ^ mag01(y.toInt & 0x1)
kk += 1
}
y = (mt(N - 1) & UpperMask) | (mt(0) & LowerMask)
mt(N - 1) = mt(M - 1) ^ (y >>> 1) ^ mag01(y.toInt & 0x1)
mti = 0
}
y = mt(mti); mti += 1
y ^= y >>> 11
y ^= (y << 7) & 0x9d2c5680L
y ^= (y << 15) & 0xefc60000L
y ^= (y >>> 18)
y.toInt
}
// Generates a random integer in the interval [0, limit)
def nextInt(limit: Int): Int = {
// Find shift distance
val lim = limit.toLong & 0xffffffffL
var n = -1; var bit = 1L << 32
while (bit > lim) { n += 1; bit >>>= 1 }
// Generate integer, take most significant bits; reject while outside interval
var r = (nextInt().toLong & 0xffffffffL) >>> n
while (r >= lim) { r = (nextInt().toLong & 0xffffffffL) >>> n }
r.toInt
}
// Generates a random Double in the interval [0, 1)
def nextDouble(): Double = {
val a: Long = (nextInt().toLong & 0xffffffffL) >>> 5
val b: Long = (nextInt().toLong & 0xffffffffL) >>> 6
(a * 67108864.0 + b) / 9007199254740992.0
}
} | EstusDev/Estus | estus-distribution/src/main/scala/rng.scala | Scala | apache-2.0 | 2,113 |
/*
* Copyright (C) 2014 Romain Reuillon
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.plugin.task
package object netlogo4 extends netlogo.NetLogoPackage
| ISCPIF/PSEExperiments | openmole-src/openmole/plugins/org.openmole.plugin.task.netlogo4/src/main/scala/org/openmole/plugin/task/netlogo4/package.scala | Scala | agpl-3.0 | 791 |
package pl.pholda.malpompaaligxilo.dsl.parser.expr
import pl.pholda.malpompaaligxilo.dsl.DslFormExpr
import pl.pholda.malpompaaligxilo.dsl.expr.date._
import pl.pholda.malpompaaligxilo.dsl.parser.UtilParsers
import pl.pholda.malpompaaligxilo.util.Date
import scala.util.parsing.combinator.syntactical.StandardTokenParsers
trait DateExprParser extends StandardTokenParsers with UtilParsers {
lexical.reserved += ("date", "dateDiff", "years", "months", "days",
"compareDates", "today", "dayOfWeek")
lexical.delimiters += ("(", ")", ",", "<", "<=", "=", ">", ">=", "!=")
def date: PackratParser[DslFormExpr[_]] = dateFromString | dateDiff | dateCompare | dateToday
protected[dsl] def dateToday: Parser[DslFormExpr[Date]] = "date" ~ "(" ~ "today" ~ ")" ^^^ DateToday
protected[dsl] def dateFromString: Parser[DslFormExpr[Date]] = "date" ~> "(" ~> expr <~ ")" ^^ {
DateFromString
}
protected[dsl] def dateDiff: Parser[DslFormExpr[Int]] =
("dateDiff" ~> "(" ~> ("years" | "months" | "days") <~ ",") ~
expr ~ ("," ~> expr <~ ")") ^^ {
case unit ~ from ~ to => DateDiff(unit, from, to)
}
protected[dsl] def dateCompare: PackratParser[DslFormExpr[Boolean]] =
"compareDates" ~> "(" ~> expr ~ ("<" | "<=" | "=" | ">" | ">=" | "!=") ~ expr <~ ")" ^^ {
case a ~ op ~ b => DateCompare(op, a, b)
}
protected[dsl] def dateOfWeek: Parser[DslFormExpr[Int]] =
"dayOfWeek" ~> "(" ~> expr <~ ")" ^^ DayOfWeek
} | pholda/MalpompaAligxilo | dsl/shared/src/main/scala/pl/pholda/malpompaaligxilo/dsl/parser/expr/DateExprParser.scala | Scala | gpl-3.0 | 1,464 |
/*
* Copyright (c) 2014 Paul Bernard
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Spectrum Finance is based in part on:
* QuantLib. http://quantlib.org/
*
*/
package org.quantintel.ql.instruments.bonds
import java.util
import java.util.Collections
import org.quantintel.ql.Settings
import org.quantintel.ql.cashflows.{CashFlow, Leg, SimpleCashFlow}
import org.quantintel.ql.instruments.Instrument
import org.quantintel.ql.time.TimeUnit.DAYS
import org.quantintel.ql.time.{Calendar, Date}
import scala.collection.JavaConversions._
import scala.collection.mutable.ArrayBuffer
/**
* A debt investment in which an investor loans money to an entity (corporate or
* govermental) that borrows the funds for a defined period of time at a fixed interest
* rate. Bond are used by companies, municipalities, states, as well as US and
* foreign governments to finance a variety of projects and acivities.
* Bonds are commonly referred to as fixed-income securities.
* source- Investopedia 2014
*
*
* @author Paul Bernard
*/
class Bond extends Instrument {
protected var settlementDays: Int = 0
protected var calendar: Calendar = null
protected var issueDate: Date = null
protected var coupons: Leg = null
protected var notionalSchedule : ArrayBuffer[Date] = null
protected var notionals : ArrayBuffer[Double] = null
protected var cashflows: Leg = coupons
protected var redemptions = new Leg()
protected var maturityDate : Date = null
protected var settlementValue: Double = 0.0
def this(settlementDays: Int, calendar: Calendar, iDate: Date, coupons: Leg) {
this
this.settlementDays = settlementDays
this.calendar = calendar
this.cashflows = coupons
this.issueDate = issueDate.clone()
this.notionals = ArrayBuffer[Double]()
this.notionalSchedule = ArrayBuffer[Date]()
this.redemptions = Leg()
if (coupons.nonEmpty){
cashflows.sortWith(_.date <= _.date)
maturityDate = coupons.last.date
//addRedemptionsToCashFlows()
}
val evaluationDate : Date = new Settings().evaluationDate
evaluationDate.addObserver(this)
}
def this(settlementDays: Int, calendar: Calendar){
this(settlementDays, calendar, new Date, new Leg())
}
def this(settlementDays: Int, calendar: Calendar, issueDate: Date){
this(settlementDays, calendar, issueDate, new Leg())
}
def this(settlementDays : Int,
calendar: Calendar ,
faceAmount: Double,
maturityDate: Date,
issueDate: Date,
cashflows: Leg) {
this(settlementDays, calendar, issueDate.clone, cashflows)
this.maturityDate = maturityDate.clone()
this.notionalSchedule = ArrayBuffer[Date]()
this.notionals = ArrayBuffer[Double]()
this.redemptions = Leg()
if(cashflows.nonEmpty){
notionalSchedule.add(new Date())
notionals.add(faceAmount)
notionalSchedule.add(maturityDate.clone)
notionals.add(0.0)
val last : CashFlow = cashflows.last
redemptions.add(last)
cashflows.remove(last)
cashflows.sortWith(_.date <= _.date)
cashflows.add(last)
}
val evaluationDate : Date = new Settings().evaluationDate
evaluationDate.addObserver(this)
}
def this (settlementDays: Int,
calendar: Calendar,
faceAmount: Double,
maturityDate: Date) {
this(settlementDays, calendar, faceAmount, maturityDate, new Date(), new Leg())
}
def this (settlementDays: Int,
calendar: Calendar ,
faceAmount: Double,
maturityDate: Date,
issueDate: Date) {
this(settlementDays, calendar, faceAmount, maturityDate, issueDate, new Leg())
}
def notional : Double = notional(new Date)
def notional(date: Date) : Double = {
var ldate = date
if (date.isNull) ldate = settlementDate
if (ldate > notionalSchedule(notionalSchedule.size - 1)) return 0.0
var index: Int = Collections.binarySearch[Date](notionalSchedule, date)
if (index < 0) index = (index + 1) * -1
if (date <= notionalSchedule(index)) {
notionals(index-1)
} else {
if (new Settings().isTodaysPayments){
notionals(index-1)
} else {
notionals(index)
}
}
}
def settlementDate : Date = settlementDate(new Date())
def settlementDate (date: Date): Date = {
var d : Date = null
if (date.isNull) d = new Settings().evaluationDate else d = date
val settlement: Date = calendar.advance(d, settlementDays, DAYS)
if (issueDate.isNull) settlement else Date.max(settlement, issueDate.clone)
}
def setSingleRedemption(notional: Double, redemption: Double, date: Date) {
redemptions.clear()
notionalSchedule.add(new Date())
notionals :: notional :: Nil
notionalSchedule.add(new Date())
notionals :: 0.0 :: Nil
val redemptionCashflow : CashFlow = new SimpleCashFlow (notional * redemption/100.0, date)
cashflows.add(redemptionCashflow)
redemptions.add(redemptionCashflow)
}
def isExpired : Boolean = cashflows.last.hasOccurred(settlementDate)
protected def addRedemptionsToCashFlow(): Unit = {
//addRedemptionsToCashflows(new List[Double]())
}
protected def addRedemptionsToCashflows(redemptions: List[Double]) {
calculateNotionalsFromCashflows()
this.redemptions.clear()
(1 to notionalSchedule.size -1).foreach(
(i: Int) => {
val R : Double = if (i< redemptions.size) redemptions.get(i)
else if (!redemptions.isEmpty) redemptions.get(redemptions.size-1) else
100.00
val amount : Double = (R/100.0)*(this.notionals.get(i-1)-notionals.get(i))
val redemption = new SimpleCashFlow(amount, notionalSchedule.get(i))
this.cashflows.add(redemption)
this.redemptions.add(redemption)
}
)
}
protected def calculateNotionalsFromCashflows(): Unit = {
notionalSchedule clear()
notionals clear()
var lastPaymentDate : Date = Date()
notionalSchedule add Date()
(0 to (cashflows.size - 1)).foreach(
(i: Int) => {
val cfObj : Object = cashflows.get(i)
// TODO:
}
)
}
}
| quantintel/spectrum | financial/src/main/scala/org/quantintel/ql/instruments/bonds/Bond.scala | Scala | apache-2.0 | 6,717 |
package org.apache.spark.storage.pmof
import java.nio.ByteBuffer
import org.apache.spark.internal.Logging
import org.apache.spark.network.pmof.PmofTransferService
import org.apache.spark.SparkEnv
import scala.collection.JavaConverters._
import java.nio.file.{Files, Paths}
import java.util.UUID
import java.lang.management.ManagementFactory
import org.apache.spark.network.buffer.ManagedBuffer
import org.apache.spark.util.configuration.pmof.PmofConf
import scala.concurrent.Future
import scala.concurrent.ExecutionContext.Implicits.global
private[spark] class PersistentMemoryHandler(
val root_dir: String,
val path_list: List[String],
val shuffleId: String,
var poolSize: Long = -1) extends Logging {
// need to use a locked file to get which pmem device should be used.
val pmMetaHandler: PersistentMemoryMetaHandler = new PersistentMemoryMetaHandler(root_dir)
var device: String = pmMetaHandler.getShuffleDevice(shuffleId)
if(device == "") {
//this shuffleId haven't been written before, choose a new device
val path_array_list = new java.util.ArrayList[String](path_list.asJava)
device = pmMetaHandler.getUnusedDevice(path_array_list)
val dev = Paths.get(device)
if (Files.isDirectory(dev)) {
// this is fsdax, add a subfile
device += "/shuffle_block_" + UUID.randomUUID().toString()
logInfo("This is a fsdax, filename:" + device)
} else {
logInfo("This is a devdax, name:" + device)
poolSize = 0
}
}
val pmpool = new PersistentMemoryPool(device, poolSize)
var rkey: Long = 0
def getDevice(): String = {
device
}
def updateShuffleMeta(shuffleId: String): Unit = synchronized {
pmMetaHandler.insertRecord(shuffleId, device);
}
def getPartitionBlockInfo(blockId: String): Array[(Long, Int)] = {
var res_array: Array[Long] = pmpool.getPartitionBlockInfo(blockId)
var i = -2
var blockInfo = Array.ofDim[(Long, Int)]((res_array.length)/2)
blockInfo.map{
x => i += 2;
(res_array(i), res_array(i+1).toInt)
}
}
def getPartitionSize(blockId: String): Long = {
pmpool.getPartitionSize(blockId)
}
def setPartition(numPartitions: Int, blockId: String, byteBuffer: ByteBuffer, size: Int, clean: Boolean): Unit = {
pmpool.setPartition(blockId, byteBuffer, size, clean)
}
def deletePartition(blockId: String): Unit = {
pmpool.deletePartition(blockId)
}
def removeBlock(blockId: String): Long = {
pmpool.removeBlock(blockId)
}
def getPartitionManagedBuffer(blockId: String): ManagedBuffer = {
new PmemManagedBuffer(this, blockId)
}
def close(): Unit = synchronized {
pmpool.close()
pmMetaHandler.remove()
}
def getRootAddr(): Long = {
pmpool.getRootAddr();
}
def log(printout: String) {
logInfo(printout)
}
}
object PersistentMemoryHandler {
private var persistentMemoryHandler: PersistentMemoryHandler = _
private var stopped: Boolean = _
def getPersistentMemoryHandler(pmofConf: PmofConf, root_dir: String, path_arg: List[String], shuffleBlockId: String, pmPoolSize: Long): PersistentMemoryHandler = synchronized {
if (persistentMemoryHandler == null) {
persistentMemoryHandler = new PersistentMemoryHandler(root_dir, path_arg, shuffleBlockId, pmPoolSize)
persistentMemoryHandler.log("Use persistentMemoryHandler Object: " + this)
if (pmofConf.enableRdma) {
val blockManager = SparkEnv.get.blockManager
val eqService = PmofTransferService.getTransferServiceInstance(pmofConf, blockManager).server.getEqService
val offset: Long = persistentMemoryHandler.getRootAddr
val rdmaBuffer = eqService.regRmaBufferByAddress(null, offset, pmofConf.pmemCapacity)
persistentMemoryHandler.rkey = rdmaBuffer.getRKey()
}
val core_set = pmofConf.pmemCoreMap.get(persistentMemoryHandler.getDevice())
core_set match {
case Some(s) => Future {nativeTaskset(s)}
case None => {}
}
stopped = false
}
persistentMemoryHandler
}
def getPersistentMemoryHandler: PersistentMemoryHandler = synchronized {
if (persistentMemoryHandler == null) {
throw new NullPointerException("persistentMemoryHandler")
}
persistentMemoryHandler
}
def stop(): Unit = synchronized {
if (!stopped && persistentMemoryHandler != null) {
persistentMemoryHandler.close()
persistentMemoryHandler = null
stopped = true
}
}
def nativeTaskset(core_set: String): Unit = {
Runtime.getRuntime.exec("taskset -cpa " + core_set + " " + getProcessId())
}
def getProcessId(): Int = {
val runtimeMXBean = ManagementFactory.getRuntimeMXBean()
runtimeMXBean.getName().split("@")(0).toInt
}
}
| Intel-bigdata/OAP | oap-shuffle/RPMem-shuffle/core/src/main/scala/org/apache/spark/storage/pmof/PersistentMemoryHandler.scala | Scala | apache-2.0 | 4,748 |
package com.teambytes.awsleader
import akka.actor.{ActorSystem, PoisonPill}
import akka.contrib.pattern.ClusterSingletonManager
import com.typesafe.config.Config
import org.slf4j.LoggerFactory
import scala.concurrent.ExecutionContext
object AwsLeaderElection {
def startLeaderElection(handler: LeaderActionsHandler)(implicit ec: ExecutionContext): Unit =
new AwsLeaderElection(handler, AkkaConfig.apply())(ec)
def startLeaderElection(handler: LeaderActionsHandler, defaults: Config)(implicit ec: ExecutionContext): Unit =
new AwsLeaderElection(handler, AkkaConfig(defaults))(ec)
}
class AwsLeaderElection(handler: LeaderActionsHandler, akkaConfig: AkkaConfig)(implicit ec: ExecutionContext) {
private val logger = LoggerFactory.getLogger(classOf[AwsLeaderElection])
logger.info("Loading leader election system...")
logger.info(s"Seeds: ${akkaConfig.seeds}")
private val clusterSystem = ActorSystem("aws-leader-election-cluster", akkaConfig.config)
clusterSystem.actorOf(
ClusterSingletonManager.props(
singletonProps = LeaderElectionActor.props(handler, akkaConfig.seeds.size),
singletonName = "aws-leader-elector",
terminationMessage = PoisonPill,
role = None
),
name = "singleton"
)
logger.info("Leader election started!")
} | grahamar/aws-leader-election | src/main/scala/com/teambytes/awsleader/AwsLeaderElection.scala | Scala | apache-2.0 | 1,301 |
package moe.pizza.zkapi
import io.circe.JsonObject
/**
* Created by Andi on 31/01/2016.
*/
object RedisQTypes {
case class SolarSystem(
id_str: String,
href: String,
id: Long,
name: String
)
case class Icon(
href: String
)
case class Href(
id_str: String,
href: String,
id: Long,
name: String,
icon: Icon
)
case class Attackers(
alliance: Option[Href],
shipType: Option[Href],
corporation: Option[Href],
character: Option[Href],
damageDone_str: String,
weaponType: Option[Href],
faction: Option[Href],
finalBlow: Boolean,
securityStatus: Double,
damageDone: Long
)
case class Position(
y: Double,
x: Double,
z: Double
)
case class Items(
singleton: Long,
itemType: Href,
items: Option[List[Items]],
quantityDestroyed_str: Option[String],
quantityDropped_str: Option[String],
flag: Long,
flag_str: String,
singleton_str: String,
quantityDestroyed: Option[Long],
quantityDropped: Option[Long]
)
case class Victim(
alliance: Option[Href],
faction: Option[Href],
damageTaken: Long,
items: List[Items],
damageTaken_str: String,
character: Href,
shipType: Href,
corporation: Href,
position: Position
)
case class War(
href: String,
id: Long,
id_str: String
)
case class Killmail(
solarSystem: SolarSystem,
killID: Long,
killTime: String,
attackers: List[Attackers],
attackerCount: Long,
victim: Victim,
killID_str: String,
attackerCount_str: String,
war: War
)
case class Zkb(
locationID: Long,
hash: String,
totalValue: Double,
points: Long,
href: String
)
case class Package(
killID: Long,
killmail: Killmail,
zkb: Zkb
)
case class RedisQResponse(`package`: Option[JsonObject])
}
| xxpizzaxx/pizza-eveapi | src/main/scala/moe/pizza/zkapi/RedisQTypes.scala | Scala | mit | 2,071 |
/*
# Copyright 2016 Georges Lipka
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*/
package com.glipka.easyReactJS.reactBootstrap
import scala.scalajs.js
import com.glipka.easyReactJS.react._
import ReactBootstrap._
// [ButtonGroup /]
@js.native trait ButtonGroupProps extends HTMLProps[ButtonGroup] with js.Any {
var block : Boolean=js.native
var bsSize : Sizes=js.native
var bsStyle : String=js.native
var justified : Boolean=js.native
var vertical : Boolean=js.native
}
| glipka/Easy-React-With-ScalaJS | src/main/scala/com/glipka/easyReactJS/reactBootstrap/ButtonGroupProps.scala | Scala | apache-2.0 | 1,022 |
/*
*
* Copyright (c) 2016 Sylvain Julmy
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to the
* Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
package klughdl.core.backend.dot
import java.io.File
import klughdl.core.backend.Backend
import klughdl.core.model._
import klughdl.core.utils.{DotConverterTools, FileManager}
case class Dot(targetDirectory: String) extends Backend {
private val extInput = "EXTERNAL_INPUT"
private val extOutput = "EXTERNAL_OUTPUT"
override def generate(model: Model): String = {
model.diagrams.map(generate).mkString("\\n\\n\\n")
}
override def generate(diagram: Diagram): String = {
val head =
s"""digraph g {
|graph [rankdir=LR,ranksep=\\"2\\",nodesep=\\"2\\"];
|node [shape=record];
""".stripMargin
val last =
s"""
|}
""".stripMargin
/*
* Now we generate :
* - the node with port
* - the inputs of the parent component which are outputs in the diagram
* - the outputs of the parent component which are inputs in the diagram
*/
val nodes = diagram.components.map { entry =>
entry._2 match {
case KlugHDLComponentBasic(_, _, _) =>
s"""${entry._2.name} [label="{{${inputDotPort(entry._2.ports)}}|${entry._2.name}|{${outputDotPort(entry._2.ports)}}}"];"""
case KlugHDLComponentIO(_, _) =>
s"""
|$extInput [label="{$extInput|{${inputDotPort(entry._2.ports)}}}"];
|$extOutput [label="{{${outputDotPort(entry._2.ports)}}|$extOutput}"];
""".stripMargin
}
}.mkString("\\n")
/*
* Generate the connection between the nodes
* the connection who only consider the brother nodes
* are generating
*/
val connections = (for {
entry <- diagram.connections
value <- entry._2
if !entry._1._1.isIO && !value._1.isIO
if entry._1._1.component != diagram.parent
if entry._1._1.parent == value._1.parent
if entry._1._1 != value._1
} yield
s"""${entry._1._1.name}:${entry._1._2.dotName} -> ${value._1.name}:${value._2.dotName};""")
.toList
.distinct
.mkString("\\n")
/*
* Generate the connection from the
* inputs/outputs of the external world
*/
// inputs
val inputs =
(for {
entry <- diagram.connections
value <- entry._2
if entry._1._1.isIO
if !value._1.isIO
} yield
s"""$extInput:${entry._1._2.dotName} -> ${value._1.name}:${value._2.dotName};""")
.toList
.distinct
.mkString("\\n")
// outputs
val outputs = (for {
entry <- diagram.connections
value <- entry._2
if value._1.isIO
if !entry._1._1.isIO
} yield
s"""${entry._1._1.name}:${entry._1._2.dotName} -> $extOutput:${value._2.dotName};""")
.toList
.distinct
.mkString("\\n")
head + nodes + connections + inputs + outputs + last
}
def inputDotPort(ports: Set[Port]): String = {
ports.filter(_.isInput).map(p => s"<${p.dotName}>${p.dotName}").mkString(" | ")
}
def outputDotPort(ports: Set[Port]): String = {
ports.filter(_.isOutput).map(p => s"<${p.dotName}>${p.dotName}").mkString(" | ")
}
def generatePDFDiagram(model: Model): Dot = {
// Generate a diagram for each parent in the graph
model.diagrams.foreach {
generatePDFDiagram
}
this
}
private def generatePDFDiagram(diagram: Diagram): Dot = {
val outputFileName = {
if (diagram.parent == null) s"null.dot"
else s"${diagram.parent.definitionName}.dot"
}
val outputFile = new File(s"$targetDirectory/$outputFileName")
val fileManager: FileManager = FileManager(outputFileName, targetDirectory)
DotConverterTools.generatePdfFile(outputFile.getAbsolutePath)
fileManager.println(generate(diagram))
fileManager.close()
this
}
}
| SnipyJulmy/MSE_1617_PA | KlugHDL/src/main/scala/klughdl/core/backend/dot/Dot.scala | Scala | gpl-2.0 | 4,529 |
package com.campudus.tableaux.helper
object DocUriParser {
type SchemeHostAndPath = (String, String, String)
val DEFAULT_VALUES: SchemeHostAndPath = ("http", "localhost:8181", "")
def parse(absoluteUri: String): SchemeHostAndPath = {
val UriMatcher = "(https?)://([^/]+)/?(.*)/docs.*".r
absoluteUri match {
case UriMatcher(scheme, host, path) => (scheme, host, path)
case _ => DEFAULT_VALUES
}
}
}
| campudus/tableaux | src/main/scala/com/campudus/tableaux/helper/DocUriParser.scala | Scala | apache-2.0 | 435 |
package sword.langbook
import sword.langbook.db.{LinkedStorageManager, Alphabet}
/**
* Root point for all questions.
*
* This trait will be used by the UI to display in a generic way one or another question
*/
trait Question {
def clues: Map[Alphabet, String]
def possibleAnswers: Set[Map[Alphabet, String]]
/**
* Returns an string that can be used to create this question again.
* The result of this string do not include the type of question.
*/
protected def encoded: String
/**
* Encodes the question including the type as well.
*/
def encodedQuestion: String = {
this match {
case _: InterAlphabetQuestion => s"${Question.questionTypes.interAlphabet}$encoded"
case _: SynonymQuestion => s"${Question.questionTypes.synonym}$encoded"
case _: TranslationQuestion => s"${Question.questionTypes.translation}$encoded"
}
}
}
object Question {
object questionTypes {
val interAlphabet = 'A'
val synonym = 'S'
val translation = 'T'
}
/**
* Decodes a question.
* @param manager LinkedStorageManager to be used to generate the question again.
* @param encodedQuestion String returned by {@link Question#encodedQuestion}
* @return an instance of the question
*/
def decode(manager: LinkedStorageManager, encodedQuestion: String): Option[Question] = {
val questionType = encodedQuestion.head
val rest = encodedQuestion.tail
questionType match {
case questionTypes.interAlphabet => InterAlphabetQuestion.decode(manager, rest)
case questionTypes.synonym => SynonymQuestion.decode(manager, rest)
case questionTypes.translation => TranslationQuestion.decode(manager, rest)
case _ => None
}
}
} | carlos-sancho-ramirez/generic-scala-langbook | src/main/scala/sword/langbook/Question.scala | Scala | mit | 1,729 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import java.nio.ByteBuffer
import kafka.common.{TopicAndPartition, ErrorMapping}
import kafka.api.ApiUtils._
import collection.Set
object ControlledShutdownResponse {
def readFrom(buffer: ByteBuffer): ControlledShutdownResponse = {
val correlationId = buffer.getInt
val errorCode = buffer.getShort
val numEntries = buffer.getInt
var partitionsRemaining = Set[TopicAndPartition]()
for (i<- 0 until numEntries){
val topic = readShortString(buffer)
val partition = buffer.getInt
partitionsRemaining += new TopicAndPartition(topic, partition)
}
new ControlledShutdownResponse(correlationId, errorCode, partitionsRemaining)
}
}
case class ControlledShutdownResponse(val correlationId: Int,
val errorCode: Short = ErrorMapping.NoError,
val partitionsRemaining: Set[TopicAndPartition])
extends RequestOrResponse() {
def sizeInBytes(): Int ={
var size =
4 /* correlation id */ +
2 /* error code */ +
4 /* number of responses */
for (topicAndPartition <- partitionsRemaining) {
size +=
2 + topicAndPartition.topic.length /* topic */ +
4 /* partition */
}
size
}
def writeTo(buffer: ByteBuffer) {
buffer.putInt(correlationId)
buffer.putShort(errorCode)
buffer.putInt(partitionsRemaining.size)
for (topicAndPartition:TopicAndPartition <- partitionsRemaining){
writeShortString(buffer, topicAndPartition.topic)
buffer.putInt(topicAndPartition.partition)
}
}
override def describe(details: Boolean):String = { toString }
} | cran/rkafkajars | java/kafka/api/ControlledShutdownResponse.scala | Scala | apache-2.0 | 2,468 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package system.rest
import org.junit.runner.RunWith
import org.scalatest.FlatSpec
import org.scalatest.Matchers
import org.scalatest.junit.JUnitRunner
import com.jayway.restassured.RestAssured
import spray.json._
import DefaultJsonProtocol._
/**
* Basic tests of the download link for Go CLI binaries
*/
@RunWith(classOf[JUnitRunner])
class GoCLINginxTests extends FlatSpec with Matchers with RestUtil {
val DownloadLinkGoCli = "cli/go/download"
val ServiceURL = getServiceURL()
it should s"respond to all files in root directory" in {
val response = RestAssured.given().config(sslconfig).get(s"$ServiceURL/$DownloadLinkGoCli")
response.statusCode should be(200)
val responseString = response.body.asString
responseString should include("""<a href="content.json">content.json</a>""")
val responseJSON = RestAssured.given().config(sslconfig).get(s"$ServiceURL/$DownloadLinkGoCli/content.json")
responseJSON.statusCode should be(200)
val cli = responseJSON.body.asString.parseJson.asJsObject
.fields("cli")
.convertTo[Map[String, Map[String, Map[String, String]]]]
cli.foreach {
case (os, arch) => responseString should include(s"""<a href="$os/">$os/</a>""")
}
}
it should "respond to all operating systems and architectures in HTML index" in {
val responseJSON = RestAssured.given().config(sslconfig).get(s"$ServiceURL/$DownloadLinkGoCli/content.json")
responseJSON.statusCode should be(200)
val cli = responseJSON.body.asString.parseJson.asJsObject
.fields("cli")
.convertTo[Map[String, Map[String, Map[String, String]]]]
cli.foreach {
case (os, arch) =>
val response = RestAssured.given().config(sslconfig).get(s"$ServiceURL/$DownloadLinkGoCli/$os")
response.statusCode should be(200)
val responseString = response.body.asString
arch.foreach {
case (arch, path) =>
if (arch != "default") {
responseString should include(s"""<a href="$arch/">$arch/</a>""")
}
}
}
}
it should "respond to the download paths in content.json" in {
val response = RestAssured.given().config(sslconfig).get(s"$ServiceURL/$DownloadLinkGoCli/content.json")
response.statusCode should be(200)
val cli =
response.body.asString.parseJson.asJsObject.fields("cli").convertTo[Map[String, Map[String, Map[String, String]]]]
cli.values.flatMap(_.values).flatMap(_.values).foreach { path =>
RestAssured.given().config(sslconfig).get(s"$ServiceURL/$DownloadLinkGoCli/$path").statusCode should be(200)
}
}
}
| starpit/openwhisk | tests/src/test/scala/system/rest/GoCLINginxTests.scala | Scala | apache-2.0 | 3,414 |
package org.loudkicks.console
import org.loudkicks._
import org.loudkicks.service._
import scala.util.parsing.input.CharSequenceReader
class PublishParserSpec extends UnitSpec {
"PublishParser" when {
"parsing a valid command line" should {
"return a posted response for that user name and message" in new TestPublishParser {
publish("Alice -> I love the weather today").get should
be(PublishCommand(Alice, Message("I love the weather today"), posts))
}
}
"parsing a invalid command line" should {
"ignore it" in new TestPublishParser {
a [RuntimeException] should be thrownBy publish(new CharSequenceReader("Alice")).get
}
}
}
trait TestPublishParser extends PublishParser {
val posts = PostDistributor(Seq.empty, TestTime())
}
}
| timothygordon32/loudkicks | src/test/scala/org/loudkicks/console/PublishParserSpec.scala | Scala | apache-2.0 | 816 |
package io.findify.s3mock.provider.metadata
import com.amazonaws.services.s3.model.ObjectMetadata
import scala.collection.concurrent.TrieMap
import scala.collection.mutable
class InMemoryMetadataStore extends MetadataStore {
private val bucketMetadata = new TrieMap[String, mutable.Map[String, ObjectMetadata]]
override def put(bucket: String, key: String, meta: ObjectMetadata): Unit = {
val currentBucketMetadata = bucketMetadata.getOrElseUpdate(bucket, new TrieMap[String, ObjectMetadata]())
currentBucketMetadata.put(key, meta)
}
override def get(bucket: String, key: String): Option[ObjectMetadata] = {
bucketMetadata.get(bucket).flatMap(_.get(key))
}
override def delete(bucket: String, key: String): Unit = {
val currentBucketMetadata = bucketMetadata.get(bucket)
currentBucketMetadata.flatMap(_.remove(key))
}
override def remove(bucket: String): Unit = bucketMetadata.remove(bucket)
}
| findify/s3mock | src/main/scala/io/findify/s3mock/provider/metadata/InMemoryMetadataStore.scala | Scala | mit | 938 |
/*
Copyright 2013 Tomas Tauber
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.mathematics
import org.scalacheck.Arbitrary
import org.scalacheck.Arbitrary.arbitrary
import org.scalacheck.Properties
import org.scalacheck.Prop.forAll
import org.scalacheck._
import org.scalacheck.Gen._
import org.scalatest.{ Matchers, WordSpec }
import com.twitter.scalding._
import Matrix2._
import cascading.flow.FlowDef
import com.twitter.algebird.Ring
import com.twitter.scalding.IterableSource
/**
* Unit tests used in development
* (stronger properties are tested in ScalaCheck tests at the end)
*/
class Matrix2OptimizationSpec extends WordSpec with Matchers {
import com.twitter.scalding.Test
implicit val mode: Test = Test(Map())
implicit val fd: FlowDef = new FlowDef
val globM = TypedPipe.from(IterableSource(List((1, 2, 3.0), (2, 2, 4.0))))
implicit val ring: Ring[Double] = Ring.doubleRing
implicit val ord1: Ordering[Int] = Ordering.Int
implicit val ord2: Ordering[(Int, Int)] = Ordering.Tuple2[Int, Int]
def literal(tpipe: TypedPipe[(Int, Int, Double)], sizeHint: SizeHint): MatrixLiteral[Any, Any, Double] = MatrixLiteral(tpipe, sizeHint).asInstanceOf[MatrixLiteral[Any, Any, Double]]
def product(left: Matrix2[Any, Any, Double], right: Matrix2[Any, Any, Double]): Product[Any, Any, Any, Double] = Product(left, right, ring)
def sum(left: Matrix2[Any, Any, Double], right: Matrix2[Any, Any, Double]): Sum[Any, Any, Double] = Sum(left, right, ring)
/**
* Values used in tests
*/
// ((A1(A2 A3))((A4 A5) A6)
val optimizedPlan = product( // linter:ignore
product(literal(globM, FiniteHint(30, 35)),
product(literal(globM, FiniteHint(35, 15)),
literal(globM, FiniteHint(15, 5)))),
product(
product(literal(globM, FiniteHint(5, 10)),
literal(globM, FiniteHint(10, 20))),
literal(globM, FiniteHint(20, 25))))
val optimizedPlanCost = 1850 // originally 15125.0
// A1(A2(A3(A4(A5 A6))))
val unoptimizedPlan = product(literal(globM, FiniteHint(30, 35)), // linter:ignore
product(literal(globM, FiniteHint(35, 15)),
product(literal(globM, FiniteHint(15, 5)),
product(literal(globM, FiniteHint(5, 10)),
product(literal(globM, FiniteHint(10, 20)), literal(globM, FiniteHint(20, 25)))))))
val simplePlan = product(literal(globM, FiniteHint(30, 35)), literal(globM, FiniteHint(35, 25))) // linter:ignore
val simplePlanCost = 750 //originally 26250
val combinedUnoptimizedPlan = sum(unoptimizedPlan, simplePlan) // linter:ignore
val combinedOptimizedPlan = sum(optimizedPlan, simplePlan) // linter:ignore
val combinedOptimizedPlanCost = optimizedPlanCost + simplePlanCost
// A1 * (A2 * (A3 * ( A4 + A4 ) * (A5 * (A6))))
val unoptimizedGlobalPlan = product(literal(globM, FiniteHint(30, 35)), // linter:ignore
product(literal(globM, FiniteHint(35, 15)),
product(literal(globM, FiniteHint(15, 5)),
product(sum(literal(globM, FiniteHint(5, 10)), literal(globM, FiniteHint(5, 10))),
product(literal(globM, FiniteHint(10, 20)), literal(globM, FiniteHint(20, 25)))))))
// ((A1(A2 A3))(((A4 + A4) A5) A6)
val optimizedGlobalPlan = product( // linter:ignore
product(literal(globM, FiniteHint(30, 35)),
product(literal(globM, FiniteHint(35, 15)),
literal(globM, FiniteHint(15, 5)))),
product(
product(sum(literal(globM, FiniteHint(5, 10)), literal(globM, FiniteHint(5, 10))),
literal(globM, FiniteHint(10, 20))),
literal(globM, FiniteHint(20, 25))))
val productSequence = IndexedSeq(literal(globM, FiniteHint(30, 35)), literal(globM, FiniteHint(35, 15)),
literal(globM, FiniteHint(15, 5)), literal(globM, FiniteHint(5, 10)), literal(globM, FiniteHint(10, 20)),
literal(globM, FiniteHint(20, 25)))
val combinedSequence = List(IndexedSeq(literal(globM, FiniteHint(30, 35)), literal(globM, FiniteHint(35, 15)),
literal(globM, FiniteHint(15, 5)), literal(globM, FiniteHint(5, 10)), literal(globM, FiniteHint(10, 20)),
literal(globM, FiniteHint(20, 25))), IndexedSeq(literal(globM, FiniteHint(30, 35)), literal(globM, FiniteHint(35, 25))))
val planWithSum = product(literal(globM, FiniteHint(30, 35)), sum(literal(globM, FiniteHint(35, 25)), literal(globM, FiniteHint(35, 25)))) // linter:ignore
val g = literal(globM, FiniteHint(30, 30)) // linter:ignore
val g2 = product(g, g) // linter:ignore
val g4 = product(g2, g2) // linter:ignore
val optimizedGraph8 = product(g4, g4) // linter:ignore
val unoptimizedGraphVectorPlan = (g ^ (5)) * literal(globM, FiniteHint(Long.MaxValue, 1))
val optimizedGraphVectorPlan = product( // linter:ignore
product(
literal(globM, FiniteHint(30, 30)),
literal(globM, FiniteHint(30, 30))),
product(
literal(globM, FiniteHint(30, 30)),
product(
literal(globM, FiniteHint(30, 30)),
product(
literal(globM, FiniteHint(30, 30)),
literal(globM, FiniteHint(Long.MaxValue, 1))))))
"Matrix multiplication chain optimization" should {
"handle a single matrix" in {
val p = IndexedSeq(literal(globM, FiniteHint(30, 35)))
val result = optimizeProductChain(p, Some(ring, MatrixJoiner2.default))
result shouldBe (0, literal(globM, FiniteHint(30, 35)))
}
"handle two matrices" in {
val p = IndexedSeq(literal(globM, FiniteHint(30, 35)), literal(globM, FiniteHint(35, 25)))
val result = optimizeProductChain(p, Some(ring, MatrixJoiner2.default))
(simplePlanCost, simplePlan) shouldBe result
}
"handle an example with 6 matrices" in {
val result = optimizeProductChain(productSequence, Some(ring, MatrixJoiner2.default))
(optimizedPlanCost, optimizedPlan) shouldBe result
}
"not change an optimized plan" in {
(optimizedPlanCost, optimizedPlan) shouldBe optimize(optimizedPlan)
}
"change an unoptimized plan" in {
(optimizedPlanCost, optimizedPlan) shouldBe optimize(unoptimizedPlan)
}
"handle an optimized plan with sum" in {
(combinedOptimizedPlanCost, combinedOptimizedPlan) shouldBe optimize(combinedOptimizedPlan)
}
"handle an unoptimized plan with sum" in {
(combinedOptimizedPlanCost, combinedOptimizedPlan) shouldBe (optimize(combinedUnoptimizedPlan))
}
"not break A*(B+C)" in {
planWithSum shouldBe (optimize(planWithSum)._2)
}
"handle an unoptimized global plan" in {
optimizedGlobalPlan shouldBe (optimize(unoptimizedGlobalPlan)._2)
}
"handle an optimized global plan" in {
optimizedGlobalPlan shouldBe (optimize(optimizedGlobalPlan)._2)
}
"handle a G^5 V plan" in {
optimizedGraphVectorPlan shouldBe (optimize(unoptimizedGraphVectorPlan)._2)
}
"handle an optimized G^5 V plan" in {
optimizedGraphVectorPlan shouldBe (optimize(optimizedGraphVectorPlan)._2)
}
"handle a G^8 plan" in {
optimizedGraph8 shouldBe (optimize(g ^ 8)._2)
}
}
}
object Matrix2Props extends Properties("Matrix2") {
import com.twitter.scalding.Test
implicit val mode: Test = Test(Map())
implicit val fd: FlowDef = new FlowDef
val globM = TypedPipe.from(IterableSource(List((1, 2, 3.0), (2, 2, 4.0))))
implicit val ring: Ring[Double] = Ring.doubleRing
implicit val ord1: Ordering[Int] = Ordering.Int
def literal(tpipe: TypedPipe[(Int, Int, Double)], sizeHint: SizeHint): MatrixLiteral[Any, Any, Double] = MatrixLiteral(tpipe, sizeHint).asInstanceOf[MatrixLiteral[Any, Any, Double]]
def product(left: Matrix2[Any, Any, Double], right: Matrix2[Any, Any, Double]): Product[Any, Any, Any, Double] = Product(left, right, ring)
def sum(left: Matrix2[Any, Any, Double], right: Matrix2[Any, Any, Double]): Sum[Any, Any, Double] = Sum(left, right, ring)
/**
* Helper methods used in tests for randomized generations
*/
def genLeaf(dims: (Long, Long)): (MatrixLiteral[Any, Any, Double], Long) = {
val (rows, cols) = dims
val sparGen = Gen.choose(0.0f, 1.0f)
val sparsity = sparGen.sample.get
val rowGen = Gen.choose(1, 1000)
val nextRows = if (rows <= 0) rowGen.sample.get else rows
if (cols <= 0) {
val colGen = Gen.choose(1, 1000)
val nextCols = colGen.sample.get
(literal(globM, SparseHint(sparsity, nextRows, nextCols)), nextCols)
} else {
(literal(globM, SparseHint(sparsity, nextRows, cols)), cols)
}
}
def productChainGen(current: Int, target: Int, prevCol: Long, result: List[MatrixLiteral[Any, Any, Double]]): List[MatrixLiteral[Any, Any, Double]] = {
if (current == target) result
else {
val (randomMatrix, cols) = genLeaf((prevCol, 0)) // linter:ignore
productChainGen(current + 1, target, cols, result ++ List(randomMatrix))
}
}
def randomProduct(p: Int): Matrix2[Any, Any, Double] = {
if (p == 1) genLeaf((0, 0))._1
else {
val full = productChainGen(0, p, 0, Nil).toIndexedSeq
generateRandomPlan(0, full.size - 1, full)
}
}
def genNode(depth: Int): Gen[Matrix2[Any, Any, Double]] = for {
v <- arbitrary[Int]
p <- Gen.choose(1, 10)
left <- genFormula(depth + 1)
right <- genFormula(depth + 1)
} yield if (depth > 5 || v > 0) randomProduct(p) else Sum(left, right, ring)
def genFormula(depth: Int): Gen[Matrix2[Any, Any, Double]] =
if (depth > 5)
genLeaf((0, 0))._1
else
(oneOf(genNode(depth + 1), Gen.const(genLeaf((0, 0))._1)))
implicit def arbT: Arbitrary[Matrix2[Any, Any, Double]] = Arbitrary(genFormula(0))
val genProdSeq = for {
v <- Gen.choose(1, 10)
} yield productChainGen(0, v, 0, Nil).toIndexedSeq
implicit def arbSeq: Arbitrary[IndexedSeq[MatrixLiteral[Any, Any, Double]]] = Arbitrary(genProdSeq)
def generateRandomPlan(i: Int, j: Int, p: IndexedSeq[MatrixLiteral[Any, Any, Double]]): Matrix2[Any, Any, Double] = {
if (i == j) p(i)
else {
val genK = Gen.choose(i, j - 1)
val k = genK.sample.getOrElse(i)
val X = generateRandomPlan(i, k, p) // linter:ignore
val Y = generateRandomPlan(k + 1, j, p) // linter:ignore
Product(X, Y, ring)
}
}
/**
* Function that recursively estimates a cost of a given MatrixFormula / plan.
* This is the used in the tests for checking whether an optimized plan has
* a cost <= a randomized plan.
* The cost estimation of this evaluation should return the same values as the one
* used in building optimized plans -- this is checked in the tests below.
* @return resulting cost
*/
def evaluate(mf: Matrix2[Any, Any, Double]): BigInt = {
/**
* This function strips off the formula into a list of independent product chains
* (i.e. same as matrixFormulaToChains in Prototype, but has Products
* instead of IndexedSeq[Literal])
*/
def toProducts(mf: Matrix2[Any, Any, Double]): (Option[Product[Any, Any, Any, Double]], List[Product[Any, Any, Any, Double]]) = {
mf match {
case element @ MatrixLiteral(_, _) => (None, Nil)
case Sum(left, right, _) => {
val (lastLP, leftR) = toProducts(left)
val (lastRP, rightR) = toProducts(right)
val total = leftR ++ rightR ++ (if (lastLP.isDefined) List(lastLP.get) else Nil) ++
(if (lastRP.isDefined) List(lastRP.get) else Nil)
(None, total)
}
case Product(leftp @ MatrixLiteral(_, _), rightp @ MatrixLiteral(_, _), _, _) => {
(Some(Product(leftp, rightp, ring)), Nil)
}
case Product(left @ Product(_, _, _, _), right @ MatrixLiteral(_, _), _, _) => {
val (lastLP, leftR) = toProducts(left)
if (lastLP.isDefined) (Some(Product(lastLP.get, right, ring)), leftR)
else (None, leftR)
}
case Product(left @ MatrixLiteral(_, _), right @ Product(_, _, _, _), _, _) => {
val (lastRP, rightR) = toProducts(right)
if (lastRP.isDefined) (Some(Product(left, lastRP.get, ring)), rightR)
else (None, rightR)
}
case Product(left, right, _, _) => {
val (lastLP, leftR) = toProducts(left)
val (lastRP, rightR) = toProducts(right)
if (lastLP.isDefined && lastRP.isDefined) {
(Some(Product(lastLP.get, lastRP.get, ring)), leftR ++ rightR)
} else {
val newP = if (lastLP.isDefined) List(lastLP.get) else if (lastRP.isDefined) List(lastRP.get) else Nil
(None, newP ++ leftR ++ rightR)
}
}
case HadamardProduct(_, _, _) => sys.error("Hadamard unexpected here")
}
}
/**
* To create a companion tree which has respective ranges of each product
*/
class LabeledTree(val range: (Int, Int), val left: Option[LabeledTree], val right: Option[LabeledTree]) {
def diff: Int = range._2 - range._1
}
def labelTree(p: Matrix2[Any, Any, Double], start: Int): Option[LabeledTree] = {
p match {
case Product(left @ MatrixLiteral(_, _), right @ MatrixLiteral(_, _), _, _) => {
Some(new LabeledTree((start, start + 1), None, None))
}
case Product(left @ MatrixLiteral(_, _), right @ Product(_, _, _, _), _, _) => {
val labelRight = labelTree(right, start + 1)
Some(new LabeledTree((start, labelRight.get.range._2), None, labelRight))
}
case Product(left @ Product(_, _, _, _), right @ MatrixLiteral(_, _), _, _) => {
val labelLeft = labelTree(left, start)
Some(new LabeledTree((labelLeft.get.range._1, labelLeft.get.range._2 + 1), labelLeft, None))
}
case Product(left, right, _, _) => {
val labelLeft = labelTree(left, start)
val labelRight = labelTree(right, labelLeft.get.range._2 + 1)
Some(new LabeledTree((labelLeft.get.range._1, labelRight.get.range._2), labelLeft, labelRight))
}
case _ => None
}
}
/**
* This function evaluates a product chain in the same way
* as the dynamic programming procedure computes cost
* (optimizeProductChain - computeCosts in Prototype)
*/
def evaluateProduct(p: Matrix2[Any, Any, Double], labels: LabeledTree): Option[(BigInt, Matrix2[Any, Any, Double], Matrix2[Any, Any, Double])] = {
p match {
case Product(left @ MatrixLiteral(_, _), right @ MatrixLiteral(_, _), _, _) => {
// reflects optimize when k==i: p(i).sizeHint * (p(k).sizeHint * p(j).sizeHint)
Some((left.sizeHint * (left.sizeHint * right.sizeHint)).total.get,
left, right)
}
case Product(left @ MatrixLiteral(_, _), right @ Product(_, _, _, _), _, _) => {
val (cost, pLeft, pRight) = evaluateProduct(right, labels.right.get).get // linter:ignore
// reflects optimize when k==i: p(i).sizeHint * (p(k).sizeHint * p(j).sizeHint)
// diff is computed in the labeled tree - it measures "spread" of the tree
// diff corresponds to (k - i) or (j - k - 1) in optimize: (k - i) * computeCosts(p, i, k) + (j - k - 1) * computeCosts(p, k + 1, j)
Some(labels.right.get.diff * cost + (left.sizeHint * (left.sizeHint * pRight.sizeHint)).total.get,
left, pRight)
}
case Product(left @ Product(_, _, _, _), right @ MatrixLiteral(_, _), _, _) => {
val (cost, pLeft, pRight) = evaluateProduct(left, labels.left.get).get // linter:ignore
Some(labels.left.get.diff * cost + (pLeft.sizeHint * (pRight.sizeHint * right.sizeHint)).total.get,
pLeft, right)
}
case Product(left, right, _, _) => {
val (cost1, p1Left, p1Right) = evaluateProduct(left, labels.left.get).get // linter:ignore
val (cost2, p2Left, p2Right) = evaluateProduct(right, labels.right.get).get // linter:ignore
Some(labels.left.get.diff * cost1 + labels.right.get.diff * cost2 + (p1Left.sizeHint * (p1Right.sizeHint * p2Right.sizeHint)).total.get,
p1Left, p2Right)
}
case _ => None
}
}
val (last, productList) = toProducts(mf)
val products = if (last.isDefined) last.get :: productList else productList
products.map(p => evaluateProduct(p, labelTree(p, 0).get).get._1).sum
}
// ScalaCheck properties
/**
* Verifying "evaluate" function - that it does return
* the same overall costs as what is estimated in the optimization procedure
*/
property("evaluate function returns the same cost as optimize") = forAll { (a: Matrix2[Any, Any, Double]) =>
optimize(a)._1 == evaluate(optimize(a)._2)
}
/**
* "Proof": the goal property that estimated costs of optimized plans or product chains
* are less than or equal to costs of randomized equivalent plans or product chains
*/
property("a cost of an optimized chain of matrix products is <= a random one") = forAll { (a: IndexedSeq[MatrixLiteral[Any, Any, Double]]) =>
optimizeProductChain(a, Some(ring, MatrixJoiner2.default))._1 <=
evaluate(generateRandomPlan(0, a.length - 1, a))
}
property("cost of a random plan is <= a random one") = forAll { (a: Matrix2[Any, Any, Double]) =>
optimize(a)._1 <= evaluate(a)
}
/**
* Sanity check
*/
property("optimizing an optimized plan does not change it") = forAll { (a: Matrix2[Any, Any, Double]) =>
optimize(a) == optimize(optimize(a)._2)
}
}
| jzmq/scalding | scalding-core/src/test/scala/com/twitter/scalding/mathematics/Matrix2OptimizationTest.scala | Scala | apache-2.0 | 17,844 |
package com.twitter.finagle.context
import com.twitter.util.Local
/**
* A type of context that is local to the process. The type of Key is
* also unique (generative) to each instance of this context, so that keys
* cannot be used across different instances of this context type.
*/
final class LocalContext private[context] extends Context {
private[this] val local = new Local[Map[Key[_], Any]]
class Key[A]
/**
* A java-friendly key constructor.
*/
def newKey[A]() = new Key[A]
def get[A](key: Key[A]): Option[A] = env.get(key).asInstanceOf[Option[A]]
def let[A, R](key: Key[A], value: A)(fn: => R): R =
letLocal(env.updated(key, value))(fn)
def let[A, B, R](key1: Key[A], value1: A, key2: Key[B], value2: B)(fn: => R): R = {
val next = env.updated(key1, value1).updated(key2, value2)
letLocal(next)(fn)
}
def let[R](pairs: Iterable[KeyValuePair[_]])(fn: => R): R = {
val next = pairs.foldLeft(env) { case (env, KeyValuePair(k, v)) => env.updated(k, v) }
letLocal(next)(fn)
}
def letClear[R](key: Key[_])(fn: => R): R = letLocal(env - key)(fn)
def letClear[R](keys: Iterable[Key[_]])(fn: => R): R = {
val next = keys.foldLeft(env) { (e, k) => e - k }
letLocal(next)(fn)
}
def letClearAll[R](fn: => R): R = local.letClear(fn)
// Exposed for testing
private[context] def env: Map[Key[_], Any] = local() match {
case Some(env) => env
case None => Map.empty
}
// Exposed for testing
private[context] def letLocal[T](env: Map[Key[_], Any])(fn: => T): T =
local.let(env)(fn)
}
| twitter/finagle | finagle-core/src/main/scala/com/twitter/finagle/context/LocalContext.scala | Scala | apache-2.0 | 1,575 |
package io.iohk.ethereum.network.p2p
import akka.util.ByteString
import io.iohk.ethereum.{Fixtures, ObjectGenerators}
import io.iohk.ethereum.domain.ChainWeight
import io.iohk.ethereum.network.p2p.messages.Capability.Capabilities._
import io.iohk.ethereum.network.p2p.messages.CommonMessages.SignedTransactions
import io.iohk.ethereum.network.p2p.messages._
import io.iohk.ethereum.security.SecureRandomBuilder
import org.bouncycastle.util.encoders.Hex
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
class MessageDecodersSpec extends AnyFlatSpec with Matchers with SecureRandomBuilder {
val decode = EthereumMessageDecoder.fromBytes _
val exampleHash = ByteString(Hex.decode("fccdbfe911f9df0a6cc0107d1240f76dfdd1d301b65fdc3cd2ae62752affbef6"))
val blockHashesFromNumberBytes: Array[Byte] = Hex.decode("c20c28")
val NewBlockHashesPV61bytes: Array[Byte] =
Hex.decode(
"f842a0fccdbfe911f9df0a6cc0107d1240f76dfdd1d301b65fdc3cd2ae62752affbef6a0fccdbfe911f9df0a6cc0107d1240f76dfdd1d301b65fdc3cd2ae62752affbef6"
)
"MessageDecoders" should "decode wire protocol message for all versions of protocol" in {
val helloBytes: Array[Byte] =
Hex.decode(
"f85404866d616e746973c6c5836574683f820d05b840a13f3f0555b5037827c743e40fce29139fcf8c3f2a8f12753872fe906a77ff70f6a7f517be995805ff39ab73af1d53dac1a6c9786eebc5935fc455ac8f41ba67"
)
val hello = WireProtocol.Hello(
p2pVersion = 4,
clientId = "mantis",
capabilities = Seq(Eth63Capability),
listenPort = 3333,
nodeId = ByteString(
Hex.decode(
"a13f3f0555b5037827c743e40fce29139fcf8c3f2a8f12753872fe906a77ff70f6a7f517be995805ff39ab73af1d53dac1a6c9786eebc5935fc455ac8f41ba67"
)
)
)
NetworkMessageDecoder.fromBytes(WireProtocol.Hello.code, helloBytes, ProtocolVersions.PV61) shouldBe hello
NetworkMessageDecoder.fromBytes(WireProtocol.Hello.code, helloBytes, ProtocolVersions.PV62) shouldBe hello
NetworkMessageDecoder.fromBytes(WireProtocol.Hello.code, helloBytes, ProtocolVersions.PV63) shouldBe hello
NetworkMessageDecoder.fromBytes(WireProtocol.Hello.code, helloBytes, ProtocolVersions.PV64) shouldBe hello
}
it should "decode NewBlockHashes message for all supported versions of protocol" in {
val newBlockHashesPV61 = PV61.NewBlockHashes(Seq(exampleHash, exampleHash))
val NewBlockHashesPV62bytes: Array[Byte] =
Hex.decode(
"f846e2a0fccdbfe911f9df0a6cc0107d1240f76dfdd1d301b65fdc3cd2ae62752affbef601e2a0fccdbfe911f9df0a6cc0107d1240f76dfdd1d301b65fdc3cd2ae62752affbef602"
)
val newBlockHashesPV62 = PV62.NewBlockHashes(Seq(PV62.BlockHash(exampleHash, 1), PV62.BlockHash(exampleHash, 2)))
decode(Codes.NewBlockHashesCode, NewBlockHashesPV61bytes, ProtocolVersions.PV61) shouldBe newBlockHashesPV61
decode(Codes.NewBlockHashesCode, NewBlockHashesPV62bytes, ProtocolVersions.PV62) shouldBe newBlockHashesPV62
decode(Codes.NewBlockHashesCode, NewBlockHashesPV62bytes, ProtocolVersions.PV63) shouldBe newBlockHashesPV62
decode(Codes.NewBlockHashesCode, NewBlockHashesPV62bytes, ProtocolVersions.PV64) shouldBe newBlockHashesPV62
}
it should "not decode message from older version of protocol as newer version" in {
assertThrows[RuntimeException] {
decode(Codes.NewBlockHashesCode, NewBlockHashesPV61bytes, ProtocolVersions.PV62)
}
}
it should "decode BlockHashesFromNumber message for all supported versions of protocol" in {
val blockHashesFromNumber = PV61.BlockHashesFromNumber(12, 40)
decode(
Codes.BlockHashesFromNumberCode,
blockHashesFromNumberBytes,
ProtocolVersions.PV61
) shouldBe blockHashesFromNumber
}
it should "decode GetBlockHeaders message for all supported versions of protocol" in {
val getBlockHeaders = PV62.GetBlockHeaders(Left(1), 1, 1, false)
val getBlockHeadersBytes: Array[Byte] = getBlockHeaders.toBytes
assertThrows[RuntimeException] {
decode(Codes.GetBlockHeadersCode, getBlockHeadersBytes, ProtocolVersions.PV61)
}
decode(Codes.GetBlockHeadersCode, getBlockHeadersBytes, ProtocolVersions.PV62) shouldBe getBlockHeaders
decode(Codes.GetBlockHeadersCode, getBlockHeadersBytes, ProtocolVersions.PV63) shouldBe getBlockHeaders
decode(Codes.GetBlockHeadersCode, getBlockHeadersBytes, ProtocolVersions.PV64) shouldBe getBlockHeaders
}
it should "decode BlockHeaders message for all supported versions of protocol" in {
val blockHeaders = PV62.BlockHeaders(ObjectGenerators.seqBlockHeaderGen.sample.get)
val blockHeadersBytes: Array[Byte] = blockHeaders.toBytes
assertThrows[RuntimeException] {
decode(Codes.BlockHeadersCode, blockHeadersBytes, ProtocolVersions.PV61)
}
decode(Codes.BlockHeadersCode, blockHeadersBytes, ProtocolVersions.PV62) shouldBe blockHeaders
decode(Codes.BlockHeadersCode, blockHeadersBytes, ProtocolVersions.PV63) shouldBe blockHeaders
decode(Codes.BlockHeadersCode, blockHeadersBytes, ProtocolVersions.PV64) shouldBe blockHeaders
}
it should "decode GetBlockBodies message for all supported versions of protocol" in {
val getBlockBodies = PV62.GetBlockBodies(Seq(exampleHash))
val getBlockBodiesBytes: Array[Byte] = getBlockBodies.toBytes
assertThrows[RuntimeException] {
decode(Codes.GetBlockBodiesCode, getBlockBodiesBytes, ProtocolVersions.PV61)
}
decode(Codes.GetBlockBodiesCode, getBlockBodiesBytes, ProtocolVersions.PV62) shouldBe getBlockBodies
decode(Codes.GetBlockBodiesCode, getBlockBodiesBytes, ProtocolVersions.PV63) shouldBe getBlockBodies
decode(Codes.GetBlockBodiesCode, getBlockBodiesBytes, ProtocolVersions.PV64) shouldBe getBlockBodies
}
it should "decode BlockBodies message for all supported versions of protocol" in {
val blockBodies = PV62.BlockBodies(Seq(Fixtures.Blocks.Block3125369.body, Fixtures.Blocks.DaoForkBlock.body))
val blockBodiesBytes: Array[Byte] = blockBodies.toBytes
assertThrows[RuntimeException] {
decode(Codes.BlockBodiesCode, blockBodiesBytes, ProtocolVersions.PV61)
}
decode(Codes.BlockBodiesCode, blockBodiesBytes, ProtocolVersions.PV62) shouldBe blockBodies
decode(Codes.BlockBodiesCode, blockBodiesBytes, ProtocolVersions.PV63) shouldBe blockBodies
decode(Codes.BlockBodiesCode, blockBodiesBytes, ProtocolVersions.PV64) shouldBe blockBodies
}
it should "decode GetNodeData message for all supported versions of protocol" in {
val getNodeData = PV63.GetNodeData(Seq(exampleHash))
val getNodeDataBytes: Array[Byte] = getNodeData.toBytes
assertThrows[RuntimeException] {
decode(Codes.GetNodeDataCode, getNodeDataBytes, ProtocolVersions.PV61)
}
assertThrows[RuntimeException] {
decode(Codes.GetNodeDataCode, getNodeDataBytes, ProtocolVersions.PV62)
}
decode(Codes.GetNodeDataCode, getNodeDataBytes, ProtocolVersions.PV63) shouldBe getNodeData
decode(Codes.GetNodeDataCode, getNodeDataBytes, ProtocolVersions.PV64) shouldBe getNodeData
}
it should "decode NodeData message for all supported versions of protocol" in {
val nodeData = PV63.NodeData(Seq(exampleHash))
val nodeDataBytes: Array[Byte] = nodeData.toBytes
assertThrows[RuntimeException] {
decode(Codes.NodeDataCode, nodeDataBytes, ProtocolVersions.PV61)
}
assertThrows[RuntimeException] {
decode(Codes.NodeDataCode, nodeDataBytes, ProtocolVersions.PV62)
}
decode(Codes.NodeDataCode, nodeDataBytes, ProtocolVersions.PV63) shouldBe nodeData
decode(Codes.NodeDataCode, nodeDataBytes, ProtocolVersions.PV64) shouldBe nodeData
}
it should "decode GetReceipts message for all supported versions of protocol" in {
val getReceipts = PV63.GetReceipts(Seq(exampleHash))
val getReceiptsBytes: Array[Byte] = getReceipts.toBytes
assertThrows[RuntimeException] {
decode(Codes.GetReceiptsCode, getReceiptsBytes, ProtocolVersions.PV61)
}
assertThrows[RuntimeException] {
decode(Codes.GetReceiptsCode, getReceiptsBytes, ProtocolVersions.PV62)
}
decode(Codes.GetReceiptsCode, getReceiptsBytes, ProtocolVersions.PV63) shouldBe getReceipts
decode(Codes.GetReceiptsCode, getReceiptsBytes, ProtocolVersions.PV64) shouldBe getReceipts
}
it should "decode Receipts message for all supported versions of protocol" in {
val receipts = PV63.Receipts(ObjectGenerators.receiptsGen(3).sample.get)
val receiptsBytes: Array[Byte] = receipts.toBytes
assertThrows[RuntimeException] {
decode(Codes.ReceiptsCode, receiptsBytes, ProtocolVersions.PV61)
}
assertThrows[RuntimeException] {
decode(Codes.ReceiptsCode, receiptsBytes, ProtocolVersions.PV62)
}
decode(Codes.ReceiptsCode, receiptsBytes, ProtocolVersions.PV63) shouldBe receipts
decode(Codes.ReceiptsCode, receiptsBytes, ProtocolVersions.PV64) shouldBe receipts
}
it should "decode Status message for all supported versions of protocol" in {
val status63 = CommonMessages.Status(ProtocolVersions.PV63, 1, BigInt(100), exampleHash, exampleHash)
val status63Bytes: Array[Byte] = status63.toBytes
val status64 = PV64.Status(ProtocolVersions.PV63, 1, ChainWeight(1, BigInt(100)), exampleHash, exampleHash)
// it's not 100 % true as Status message was different in PV61, but we are not supporting old message
decode(Codes.StatusCode, status63Bytes, ProtocolVersions.PV61) shouldBe status63
decode(Codes.StatusCode, status63Bytes, ProtocolVersions.PV62) shouldBe status63
decode(Codes.StatusCode, status63Bytes, ProtocolVersions.PV63) shouldBe status63
decode(Codes.StatusCode, status64.toBytes, ProtocolVersions.PV64) shouldBe status64
}
it should "decode NewBlock message for all supported versions of protocol" in {
val newBlock63 = ObjectGenerators.newBlockGen(secureRandom, None).sample.get
val newBlock63Bytes: Array[Byte] = newBlock63.toBytes
val newBlock64 = ObjectGenerators.newBlock64Gen(secureRandom, None).sample.get
decode(Codes.NewBlockCode, newBlock63Bytes, ProtocolVersions.PV61) shouldBe newBlock63
decode(Codes.NewBlockCode, newBlock63Bytes, ProtocolVersions.PV62) shouldBe newBlock63
decode(Codes.NewBlockCode, newBlock63Bytes, ProtocolVersions.PV63) shouldBe newBlock63
decode(Codes.NewBlockCode, newBlock64.toBytes, ProtocolVersions.PV64) shouldBe newBlock64
}
it should "decode SignedTransactions message for all supported versions of protocol" in {
val signedTransactions = SignedTransactions(ObjectGenerators.signedTxSeqGen(3, secureRandom, None).sample.get)
val signedTransactionsBytes: Array[Byte] = signedTransactions.toBytes
decode(Codes.SignedTransactionsCode, signedTransactionsBytes, ProtocolVersions.PV61) shouldBe signedTransactions
decode(Codes.SignedTransactionsCode, signedTransactionsBytes, ProtocolVersions.PV62) shouldBe signedTransactions
decode(Codes.SignedTransactionsCode, signedTransactionsBytes, ProtocolVersions.PV63) shouldBe signedTransactions
decode(Codes.SignedTransactionsCode, signedTransactionsBytes, ProtocolVersions.PV64) shouldBe signedTransactions
}
it should "not decode message not existing in given protocol" in {
assertThrows[RuntimeException] {
decode(Codes.SignedTransactionsCode, blockHashesFromNumberBytes, ProtocolVersions.PV62)
}
}
it should "not decode message of not supported protocol" in {
assertThrows[RuntimeException] {
decode(Codes.NewBlockHashesCode, NewBlockHashesPV61bytes, ProtocolVersions.PV61 - 1)
}
}
}
| input-output-hk/etc-client | src/test/scala/io/iohk/ethereum/network/p2p/MessageDecodersSpec.scala | Scala | mit | 11,569 |
package com.wheaties.logical
object And{
def apply[A](x: A, y: A)(implicit con: Conjunction[A]) = con.conjunction(x, y)
}
trait Conjunction[A]{
def conjunction(p: A, q: A): A
} | wheaties/Predicates | predicates/src/main/scala/com/wheaties/logical/And.scala | Scala | apache-2.0 | 182 |
package org.jetbrains.plugins.scala.util
import com.intellij.openapi.application.ApplicationManager
import com.intellij.openapi.util.ShutDownTracker
import org.jetbrains.plugins.scala.extensions.invokeOnDispose
/**
* Ordinary [[ShutDownTracker]] is not enough cause it leads to lambda leaks
* on Scala Plugin unloading (see https://youtrack.jetbrains.com/issue/SCL-16809).<br>
* To avoid such leaks [[UnloadAwareDisposable.scalaPluginDisposable]] is used.
*
* However, during tests run, application is not properly disposed and scalaPluginDisposable is not disposed.<br>
* So in tests we fallback to [[ShutDownTracker]].
* It's fine since plugin unloading is not used in tests and there will be no leaks.
*
* @note
* scalaPluginDisposable is disposed in [[com.intellij.openapi.application.impl.ApplicationImpl#disposeContainer()]]
* which is called (in particular) from [[com.intellij.testFramework.TestApplicationManager]]
* via `disposeApplicationAndCheckForLeaks` method.<br>
* To call this method we would need to inject some test listener which would catch "all tests finished"
* (see [[org.junit.runner.notification.RunListener#testRunFinished(org.junit.runner.Result)]].
* However this is not easy to support for both local test running and running tests from sbt ('''though possible''')
*
* @note
* In IDEA platform there is utility class `_LastInSuiteTest` which properly disposes application.<br>
* But it is manually injected in some test runners. See for example com.intellij.javascript.debugger.DebugTestSuite
* or android tests using com.android.tools.tests.LeakCheckerRule.
* Those tests use manual enumeration of test suites via `@Suite.SuiteClasses`.
*/
object ScalaShutDownTracker {
def registerShutdownTask(runnable: Runnable): Unit =
if (ApplicationManager.getApplication.isUnitTestMode) {
ShutDownTracker.getInstance().registerShutdownTask(runnable)
}
else {
invokeOnDispose(UnloadAwareDisposable.scalaPluginDisposable)(runnable.run())
}
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/util/ScalaShutDownTracker.scala | Scala | apache-2.0 | 2,016 |
/* Copyright 2009-2021 EPFL, Lausanne */
package stainless
import inox.DebugSection
abstract class ReportMessage {
def sbtPluginOnly: Boolean
def title: String
def emit(reporter: inox.Reporter): Unit
}
class DefaultReporter(debugSections: Set[DebugSection]) extends inox.DefaultReporter(debugSections) {
var printingProgress = false
override def emit(msg: Message): Unit = synchronized {
if (printingProgress) {
println()
printingProgress = false
}
msg.msg match {
case rm: ReportMessage if rm.sbtPluginOnly => ()
case _ => super.emit(msg)
}
}
override def onCompilerProgress(current: Int, total: Int): Unit = synchronized {
printingProgress = true
print("\\r" + severityToPrefix(INFO) + s" Verified: $current / $total")
}
}
class PlainTextReporter(debugSections: Set[DebugSection]) extends inox.PlainTextReporter(debugSections) {
var printingProgress = false
override def emit(msg: Message): Unit = synchronized {
if (printingProgress) {
println()
printingProgress = false
}
msg.msg match {
case rm: ReportMessage if rm.sbtPluginOnly => ()
case _ => super.emit(msg)
}
}
override def onCompilerProgress(current: Int, total: Int): Unit = synchronized {
printingProgress = true
print(s"\\rVerified: $current / $total")
}
}
| epfl-lara/stainless | core/src/main/scala/stainless/Reporter.scala | Scala | apache-2.0 | 1,350 |
/*
* Copyright (c) 2015 Daniel Higuero.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scala.examples.basics
object User{
/**
* Main.
* @param args The arguments.
*/
def main(args: Array[String]) : Unit = {
// TODO 1) Build all types of users.
}
}
/**
* Class that demonstrates the use of constructors example.
* @param email The email.
* @param age The age.
*/
class User(email: String, age: Int = 20) {
/**
* Whether the user is registered.
*/
var registered : Boolean = false
/**
* Alternate constructor.
* @param email The email.
* @param age The age.
* @param registered Whether the user is registered.
*/
def this(email: String, age: Int, registered: Boolean) = {
this(email, age)
this.registered = false
}
// TODO 2) Modify the User class to use an Option for the age.
// TODO 3) Implement a toString method taking into account the option.
// TODO 4) Add an apply method to the companion object.
}
| dhiguero/scala-exercises | src/main/scala/org/scala/examples/basics/User.scala | Scala | apache-2.0 | 1,507 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.clustering
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.annotation.Since
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.internal.Logging
import org.apache.spark.ml.util.Instrumentation
import org.apache.spark.mllib.linalg.{Vector, Vectors}
import org.apache.spark.mllib.linalg.BLAS.axpy
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel
import org.apache.spark.util.Utils
import org.apache.spark.util.random.XORShiftRandom
/**
* K-means clustering with a k-means++ like initialization mode
* (the k-means|| algorithm by Bahmani et al).
*
* This is an iterative algorithm that will make multiple passes over the data, so any RDDs given
* to it should be cached by the user.
*/
@Since("0.8.0")
class KMeans private (
private var k: Int,
private var maxIterations: Int,
private var initializationMode: String,
private var initializationSteps: Int,
private var epsilon: Double,
private var seed: Long,
private var distanceMeasure: String) extends Serializable with Logging {
@Since("0.8.0")
private def this(k: Int, maxIterations: Int, initializationMode: String, initializationSteps: Int,
epsilon: Double, seed: Long) =
this(k, maxIterations, initializationMode, initializationSteps,
epsilon, seed, DistanceMeasure.EUCLIDEAN)
/**
* Constructs a KMeans instance with default parameters: {k: 2, maxIterations: 20,
* initializationMode: "k-means||", initializationSteps: 2, epsilon: 1e-4, seed: random,
* distanceMeasure: "euclidean"}.
*/
@Since("0.8.0")
def this() = this(2, 20, KMeans.K_MEANS_PARALLEL, 2, 1e-4, Utils.random.nextLong(),
DistanceMeasure.EUCLIDEAN)
/**
* Number of clusters to create (k).
*
* @note It is possible for fewer than k clusters to
* be returned, for example, if there are fewer than k distinct points to cluster.
*/
@Since("1.4.0")
def getK: Int = k
/**
* Set the number of clusters to create (k).
*
* @note It is possible for fewer than k clusters to
* be returned, for example, if there are fewer than k distinct points to cluster. Default: 2.
*/
@Since("0.8.0")
def setK(k: Int): this.type = {
require(k > 0,
s"Number of clusters must be positive but got ${k}")
this.k = k
this
}
/**
* Maximum number of iterations allowed.
*/
@Since("1.4.0")
def getMaxIterations: Int = maxIterations
/**
* Set maximum number of iterations allowed. Default: 20.
*/
@Since("0.8.0")
def setMaxIterations(maxIterations: Int): this.type = {
require(maxIterations >= 0,
s"Maximum of iterations must be nonnegative but got ${maxIterations}")
this.maxIterations = maxIterations
this
}
/**
* The initialization algorithm. This can be either "random" or "k-means||".
*/
@Since("1.4.0")
def getInitializationMode: String = initializationMode
/**
* Set the initialization algorithm. This can be either "random" to choose random points as
* initial cluster centers, or "k-means||" to use a parallel variant of k-means++
* (Bahmani et al., Scalable K-Means++, VLDB 2012). Default: k-means||.
*/
@Since("0.8.0")
def setInitializationMode(initializationMode: String): this.type = {
KMeans.validateInitMode(initializationMode)
this.initializationMode = initializationMode
this
}
/**
* Number of steps for the k-means|| initialization mode
*/
@Since("1.4.0")
def getInitializationSteps: Int = initializationSteps
/**
* Set the number of steps for the k-means|| initialization mode. This is an advanced
* setting -- the default of 2 is almost always enough. Default: 2.
*/
@Since("0.8.0")
def setInitializationSteps(initializationSteps: Int): this.type = {
require(initializationSteps > 0,
s"Number of initialization steps must be positive but got ${initializationSteps}")
this.initializationSteps = initializationSteps
this
}
/**
* The distance threshold within which we've consider centers to have converged.
*/
@Since("1.4.0")
def getEpsilon: Double = epsilon
/**
* Set the distance threshold within which we've consider centers to have converged.
* If all centers move less than this Euclidean distance, we stop iterating one run.
*/
@Since("0.8.0")
def setEpsilon(epsilon: Double): this.type = {
require(epsilon >= 0,
s"Distance threshold must be nonnegative but got ${epsilon}")
this.epsilon = epsilon
this
}
/**
* The random seed for cluster initialization.
*/
@Since("1.4.0")
def getSeed: Long = seed
/**
* Set the random seed for cluster initialization.
*/
@Since("1.4.0")
def setSeed(seed: Long): this.type = {
this.seed = seed
this
}
/**
* The distance suite used by the algorithm.
*/
@Since("2.4.0")
def getDistanceMeasure: String = distanceMeasure
/**
* Set the distance suite used by the algorithm.
*/
@Since("2.4.0")
def setDistanceMeasure(distanceMeasure: String): this.type = {
DistanceMeasure.validateDistanceMeasure(distanceMeasure)
this.distanceMeasure = distanceMeasure
this
}
// Initial cluster centers can be provided as a KMeansModel object rather than using the
// random or k-means|| initializationMode
private var initialModel: Option[KMeansModel] = None
/**
* Set the initial starting point, bypassing the random initialization or k-means||
* The condition model.k == this.k must be met, failure results
* in an IllegalArgumentException.
*/
@Since("1.4.0")
def setInitialModel(model: KMeansModel): this.type = {
require(model.k == k, "mismatched cluster count")
initialModel = Some(model)
this
}
/**
* Train a K-means model on the given set of points; `data` should be cached for high
* performance, because this is an iterative algorithm.
*/
@Since("0.8.0")
def run(data: RDD[Vector]): KMeansModel = {
val instances = data.map(point => (point, 1.0))
val handlePersistence = data.getStorageLevel == StorageLevel.NONE
runWithWeight(instances, handlePersistence, None)
}
private[spark] def runWithWeight(
instances: RDD[(Vector, Double)],
handlePersistence: Boolean,
instr: Option[Instrumentation]): KMeansModel = {
val norms = instances.map { case (v, _) => Vectors.norm(v, 2.0) }
val vectors = instances.zip(norms)
.map { case ((v, w), norm) => new VectorWithNorm(v, norm, w) }
if (handlePersistence) {
vectors.persist(StorageLevel.MEMORY_AND_DISK)
} else {
// Compute squared norms and cache them.
norms.persist(StorageLevel.MEMORY_AND_DISK)
}
val model = runAlgorithmWithWeight(vectors, instr)
if (handlePersistence) { vectors.unpersist() } else { norms.unpersist() }
model
}
/**
* Implementation of K-Means algorithm.
*/
private def runAlgorithmWithWeight(
data: RDD[VectorWithNorm],
instr: Option[Instrumentation]): KMeansModel = {
val sc = data.sparkContext
val initStartTime = System.nanoTime()
val distanceMeasureInstance = DistanceMeasure.decodeFromString(this.distanceMeasure)
val centers = initialModel match {
case Some(kMeansCenters) =>
kMeansCenters.clusterCenters.map(new VectorWithNorm(_))
case None =>
if (initializationMode == KMeans.RANDOM) {
initRandom(data)
} else {
initKMeansParallel(data, distanceMeasureInstance)
}
}
val numFeatures = centers.head.vector.size
val initTimeInSeconds = (System.nanoTime() - initStartTime) / 1e9
logInfo(f"Initialization with $initializationMode took $initTimeInSeconds%.3f seconds.")
var converged = false
var cost = 0.0
var iteration = 0
val iterationStartTime = System.nanoTime()
instr.foreach(_.logNumFeatures(numFeatures))
val shouldDistributed = centers.length * centers.length * numFeatures.toLong > 1000000L
// Execute iterations of Lloyd's algorithm until converged
while (iteration < maxIterations && !converged) {
val bcCenters = sc.broadcast(centers)
val stats = if (shouldDistributed) {
distanceMeasureInstance.computeStatisticsDistributedly(sc, bcCenters)
} else {
distanceMeasureInstance.computeStatistics(centers)
}
val bcStats = sc.broadcast(stats)
val costAccum = sc.doubleAccumulator
// Find the new centers
val collected = data.mapPartitions { points =>
val centers = bcCenters.value
val stats = bcStats.value
val dims = centers.head.vector.size
val sums = Array.fill(centers.length)(Vectors.zeros(dims))
// clusterWeightSum is needed to calculate cluster center
// cluster center =
// sample1 * weight1/clusterWeightSum + sample2 * weight2/clusterWeightSum + ...
val clusterWeightSum = Array.ofDim[Double](centers.length)
points.foreach { point =>
val (bestCenter, cost) = distanceMeasureInstance.findClosest(centers, stats, point)
costAccum.add(cost * point.weight)
distanceMeasureInstance.updateClusterSum(point, sums(bestCenter))
clusterWeightSum(bestCenter) += point.weight
}
Iterator.tabulate(centers.length)(j => (j, (sums(j), clusterWeightSum(j))))
.filter(_._2._2 > 0)
}.reduceByKey { (sumweight1, sumweight2) =>
axpy(1.0, sumweight2._1, sumweight1._1)
(sumweight1._1, sumweight1._2 + sumweight2._2)
}.collectAsMap()
if (iteration == 0) {
instr.foreach(_.logNumExamples(costAccum.count))
instr.foreach(_.logSumOfWeights(collected.values.map(_._2).sum))
}
bcCenters.destroy()
bcStats.destroy()
// Update the cluster centers and costs
converged = true
collected.foreach { case (j, (sum, weightSum)) =>
val newCenter = distanceMeasureInstance.centroid(sum, weightSum)
if (converged &&
!distanceMeasureInstance.isCenterConverged(centers(j), newCenter, epsilon)) {
converged = false
}
centers(j) = newCenter
}
cost = costAccum.value
instr.foreach(_.logNamedValue(s"Cost@iter=$iteration", s"$cost"))
iteration += 1
}
val iterationTimeInSeconds = (System.nanoTime() - iterationStartTime) / 1e9
logInfo(f"Iterations took $iterationTimeInSeconds%.3f seconds.")
if (iteration == maxIterations) {
logInfo(s"KMeans reached the max number of iterations: $maxIterations.")
} else {
logInfo(s"KMeans converged in $iteration iterations.")
}
logInfo(s"The cost is $cost.")
new KMeansModel(centers.map(_.vector), distanceMeasure, cost, iteration)
}
/**
* Initialize a set of cluster centers at random.
*/
private def initRandom(data: RDD[VectorWithNorm]): Array[VectorWithNorm] = {
// Select without replacement; may still produce duplicates if the data has < k distinct
// points, so deduplicate the centroids to match the behavior of k-means|| in the same situation
data.takeSample(false, k, new XORShiftRandom(this.seed).nextInt())
.map(_.vector).distinct.map(new VectorWithNorm(_))
}
/**
* Initialize a set of cluster centers using the k-means|| algorithm by Bahmani et al.
* (Bahmani et al., Scalable K-Means++, VLDB 2012). This is a variant of k-means++ that tries
* to find dissimilar cluster centers by starting with a random center and then doing
* passes where more centers are chosen with probability proportional to their squared distance
* to the current cluster set. It results in a provable approximation to an optimal clustering.
*
* The original paper can be found at http://theory.stanford.edu/~sergei/papers/vldb12-kmpar.pdf.
*/
private[clustering] def initKMeansParallel(data: RDD[VectorWithNorm],
distanceMeasureInstance: DistanceMeasure): Array[VectorWithNorm] = {
// Initialize empty centers and point costs.
var costs = data.map(_ => Double.PositiveInfinity)
// Initialize the first center to a random point.
val seed = new XORShiftRandom(this.seed).nextInt()
val sample = data.takeSample(false, 1, seed)
// Could be empty if data is empty; fail with a better message early:
require(sample.nonEmpty, s"No samples available from $data")
val centers = ArrayBuffer[VectorWithNorm]()
var newCenters = Array(sample.head.toDense)
centers ++= newCenters
// On each step, sample 2 * k points on average with probability proportional
// to their squared distance from the centers. Note that only distances between points
// and new centers are computed in each iteration.
var step = 0
val bcNewCentersList = ArrayBuffer[Broadcast[_]]()
while (step < initializationSteps) {
val bcNewCenters = data.context.broadcast(newCenters)
bcNewCentersList += bcNewCenters
val preCosts = costs
costs = data.zip(preCosts).map { case (point, cost) =>
math.min(distanceMeasureInstance.pointCost(bcNewCenters.value, point), cost)
}.persist(StorageLevel.MEMORY_AND_DISK)
val sumCosts = costs.sum()
bcNewCenters.unpersist()
preCosts.unpersist()
val chosen = data.zip(costs).mapPartitionsWithIndex { (index, pointCosts) =>
val rand = new XORShiftRandom(seed ^ (step << 16) ^ index)
pointCosts.filter { case (_, c) => rand.nextDouble() < 2.0 * c * k / sumCosts }.map(_._1)
}.collect()
newCenters = chosen.map(_.toDense)
centers ++= newCenters
step += 1
}
costs.unpersist()
bcNewCentersList.foreach(_.destroy())
val distinctCenters = centers.map(_.vector).distinct.map(new VectorWithNorm(_)).toArray
if (distinctCenters.length <= k) {
distinctCenters
} else {
// Finally, we might have a set of more than k distinct candidate centers; weight each
// candidate by the number of points in the dataset mapping to it and run a local k-means++
// on the weighted centers to pick k of them
val bcCenters = data.context.broadcast(distinctCenters)
val countMap = data
.map(distanceMeasureInstance.findClosest(bcCenters.value, _)._1)
.countByValue()
bcCenters.destroy()
val myWeights = distinctCenters.indices.map(countMap.getOrElse(_, 0L).toDouble).toArray
LocalKMeans.kMeansPlusPlus(0, distinctCenters, myWeights, k, 30)
}
}
}
/**
* Top-level methods for calling K-means clustering.
*/
@Since("0.8.0")
object KMeans {
// Initialization mode names
@Since("0.8.0")
val RANDOM = "random"
@Since("0.8.0")
val K_MEANS_PARALLEL = "k-means||"
/**
* Trains a k-means model using the given set of parameters.
*
* @param data Training points as an `RDD` of `Vector` types.
* @param k Number of clusters to create.
* @param maxIterations Maximum number of iterations allowed.
* @param initializationMode The initialization algorithm. This can either be "random" or
* "k-means||". (default: "k-means||")
* @param seed Random seed for cluster initialization. Default is to generate seed based
* on system time.
*/
@Since("2.1.0")
def train(
data: RDD[Vector],
k: Int,
maxIterations: Int,
initializationMode: String,
seed: Long): KMeansModel = {
new KMeans().setK(k)
.setMaxIterations(maxIterations)
.setInitializationMode(initializationMode)
.setSeed(seed)
.run(data)
}
/**
* Trains a k-means model using the given set of parameters.
*
* @param data Training points as an `RDD` of `Vector` types.
* @param k Number of clusters to create.
* @param maxIterations Maximum number of iterations allowed.
* @param initializationMode The initialization algorithm. This can either be "random" or
* "k-means||". (default: "k-means||")
*/
@Since("2.1.0")
def train(
data: RDD[Vector],
k: Int,
maxIterations: Int,
initializationMode: String): KMeansModel = {
new KMeans().setK(k)
.setMaxIterations(maxIterations)
.setInitializationMode(initializationMode)
.run(data)
}
/**
* Trains a k-means model using specified parameters and the default values for unspecified.
*/
@Since("0.8.0")
def train(
data: RDD[Vector],
k: Int,
maxIterations: Int): KMeansModel = {
new KMeans().setK(k)
.setMaxIterations(maxIterations)
.run(data)
}
private[spark] def validateInitMode(initMode: String): Boolean = {
initMode match {
case KMeans.RANDOM => true
case KMeans.K_MEANS_PARALLEL => true
case _ => false
}
}
}
/**
* A vector with its norm for fast distance computation.
*/
private[clustering] class VectorWithNorm(
val vector: Vector,
val norm: Double,
val weight: Double = 1.0) extends Serializable {
def this(vector: Vector) = this(vector, Vectors.norm(vector, 2.0))
def this(array: Array[Double]) = this(Vectors.dense(array))
/** Converts the vector to a dense vector. */
def toDense: VectorWithNorm = new VectorWithNorm(Vectors.dense(vector.toArray), norm, weight)
}
| maropu/spark | mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeans.scala | Scala | apache-2.0 | 18,061 |
package scalaDemo
import java.io._
import java.util.Date
/**
* Created by liush on 17-7-14.
* Scala函数按名称调用
* 通常情况下,函数的参数是传值参数;即,参数的值在它被传递给函数之前被确定。但是,如果我们需要编写一个接收参数不希望马上计算,直到调用函数内的表达式。
* 对于这种情况,Scala提供按名称参数调用函数。
* 按名称调用机制传递一个代码块给被调用者并且每次被调用方传接入参数,代码块被执行,值被计算。
*/
object BaseScala {
def main(args: Array[String]):Unit= {
printInt(b=5, a=7)
delayed(time())
printStrings("Hello", "Scala", "Python")
println("Returned Value : " + addInt())
println("高阶函数:"+ apply( layout, 10) )
//Scala匿名函数
var inc = (x:Int) => x+1
//变量inc现在可以使用以通常的方式的函数:
var x = inc(7)-1
println("一个参数匿名函数:"+(inc(7)-1))
//用多个参数定义的函数如下:
var mul = (x: Int, y: Int) => x*y
println("多个参数匿名函数:"+mul(3, 4))
//无参数定义函数如下:
var userDir = () => { System.getProperty("user.dir") }
println("无参数匿名函数:"+ userDir() )
val str1:String = "Hello, "
val str2:String = "Scala!"
//Scala柯里函数
println( "str1 + str2 = " + strcat(str1)(str2) )
//Scala嵌套函数
println( factorial(0) )
println( factorial(1) )
println( factorial(2) )
println( factorial(3) )
val date = new Date
//Scala部分应用函数
//如果只发送了几个参数,会得到一个部分函数应用。也可以绑定一些参数,剩下其余的稍后再填补上
/**
* log()方法有两个参数:date和message。如果想多次调用这个方法,如:date 使用不同的值,而 message 的值相同。
* 我们可以消除部分参数传递给 log() 方法,因为传递 date 在每个调用都可能会有干扰。要做到这一点,
* 我们首先绑定一个值到 date 参数,并把下划线放在其位置第二个参数之后
*/
val logWithDateBound = log(date, _ : String)
logWithDateBound("message1" )
Thread.sleep(1000)
logWithDateBound("message2" )
Thread.sleep(1000)
logWithDateBound("message3" )
///遍历元组
//Tuple.productIterator()方法来遍历一个元组的所有元素
val t = (4,3,2,1)
t.productIterator.foreach{ i =>println("Value = " + i )}
/**
* Scala的Option[T]是容器对于给定的类型的零个或一个元件。Option[T]可以是一些[T]或None对象,它代表一个缺失值。
* Scala映射get方法产生,如果给定的键没有在映射定义的一些(值),如果对应于给定键的值已经找到,或None。
* 选项Option类型常用于Scala程序,可以比较这对null值Java可用这表明没有任何值
*/
val capitals = Map("France" -> "Paris", "Japan" -> "Tokyo")
println("capitals.get( 'France' ) : " + capitals.get( "France" ))
println("capitals.get( 'India' ) : " + capitals.get( "India" ))
//區別Scala的Option
println("show(capitals.get( 'Japan')) : " +show(capitals.get( "Japan")) )
println("show(capitals.get( 'India')) : " +show(capitals.get( "India")) )
val a:Option[Int] = Some(5)
val b:Option[Int] = None
//getOrElse()来访问值或使用默认值
println("a.getOrElse(0): " + a.getOrElse(0) )
println("b.getOrElse(10): " + b.getOrElse(10) )
//isEmpty()检查该选项是否为 None
println("a.isEmpty: " + a.isEmpty )
println("b.isEmpty: " + b.isEmpty )
//迭代器不是集合,而是一种由一个访问的集合之一的元素
//调用 it.next()将返回迭代器的下一个元素
//是否有更多的元素使用迭代器的it.hasNext方法返回
val it = Iterator("a", "number", "of", "words")
while (it.hasNext){
println(it.next())
}
val ita = Iterator(20,40,2,50,69, 90)
val itb = Iterator(20,40,2,50,69, 90)
//查找最大元素。迭代器就是在这个方法返回后结束
println("Maximum valued element " + ita.max )
//查找最小元素,迭代器就是在这个方法返回后结束
println("Minimum valued element " + itb.min )
//Scala模式匹配
println("匹配一个整数值:"+matchTestValue(3))
println("不同类型的模式值:"+matchTest("two"))
println("不同类型的模式值:"+matchTest("test"))
println("不同类型的模式值:"+matchTest(1))
//case classes是用于模式匹配与case 表达式指定类
val alice = new Person("Alice", 25)
val bob = new Person("Bob", 32)
val charlie = new Person("Charlie", 32)
//case classes是用于模式匹配与case 表达式指定类
for (person <- List(alice, bob, charlie)) {
person match {
case Person("Alice", 25) => println("Hi Alice!")
case Person("Bob", 32) => println("Hi Bob!")
case Person(name, age) =>
println("Age: " + age + " year, name: " + name + "?")
}
}
//Scala正则表达式
//Scala支持通过Regex类的scala.util.matching封装正则表达式
//我们创建一个字符串,并调用r()方法就可以了。Scala中字符串隐式转换为一个RichString并调用该方法来获得正则表达式的一个实例
val pattern = "Scala".r
val str = "Scala is Scalable and cool"
//从Scala中一个语句中找出单词:
//找到第一个正则表达式匹配,只需调用findFirstIn()方法
println(pattern findFirstIn str)
//Scala异常处理
//Scala中try/catch在一个单独的块捕捉任何异常,然后使用case块进行模式匹配
try {
val f = new FileReader("input.txt")
} catch {
case ex: FileNotFoundException =>{
println("Missing file exception")
}
case ex: IOException => {
println("IO Exception")
}
}finally {
println("Exiting finally...")
}
//Scala文件I/O
//写入文件的一个例子:
val writer = new PrintWriter(new File("test.txt" ))
writer.write("Hello Scala")
writer.close()
//从屏幕读取一行
print("Please enter your input : " )
// val line = Console.readLine()
// println("Thanks, you just typed: " + line)
}
// case class, empty one.
case class Person(name: String, age: Int)
//它匹配针对不同类型的模式值:
def matchTest(x: Any): Any = x match {
case 1 => "one"
case "two" => 2
case y: Int => "scala.Int"
case _ => "many"
}
//匹配一个整数值
def matchTestValue(x: Int): String = x match {
case 1 => "one"
case 2 => "two"
case _ => "many"
}
def time() = {
println("Getting time in nano seconds")
System.nanoTime
}
//我们声明delayed方法,它通过=>符号定意变量的名称和反回类型,需要一个按名称调用参数。
def delayed( t: => Long ) = {
println("In delayed method")
println("Param: " + t)
//delayed打印一个与其消息的值,最后delayed方法返回 t,
t
}
//使用命名参数
def printInt( a:Int, b:Int ) = {
println("Value of a : " + a );
println("Value of b : " + b );
}
//使用可变参数
def printStrings( args:String* ) = {
var i : Int = 0;
for( arg <- args ){
println("Arg value[" + i + "] = " + arg );
i = i + 1;
}
}
//指定默认值函数的参数
def addInt( a:Int=5, b:Int=7 ) : Int = {
var sum:Int = 0
sum = a + b
return sum
}
//高阶函数的定义,采取其他函数参数,或它的结果是一个功能的函数
def apply(f: Int => String, v: Int) = f(v)
def layout[A](x: A) = "[" + x.toString() + "]"
//Scala匿名函数
var inc = (x:Int) => x+1
//变量inc现在可以使用以通常的方式的函数:
var x = inc(7)-1
//用多个参数定义的函数如下:
var mul = (x: Int, y: Int) => x*y
// println(mul(3, 4))
//无参数定义函数如下:
var userDir = () => { System.getProperty("user.dir") }
// println( userDir )
//Scala柯里函数
//柯里转换函数接受多个参数成一条链的函数,每次取一个参数
def strcat(s1: String)(s2: String) = {
s1 + s2
}
//二种定义柯里函数
//def strcat(s1: String) = (s2: String) => s1 + s2
//Scala嵌套函数
//阶乘计算器,在这里使用调用第二个函数
def factorial(i: Int): Int = {
def fact(i: Int, accumulator: Int): Int = {
if (i <= 1)
accumulator
else
fact(i - 1, i * accumulator)
}
fact(i, 1)
}
//Scala部分应用函数
//
def log(date: Date, message: String) = {
println(date + "----" + message)
}
//Scala的Option[T]是容器对于给定的类型的零个或一个元件
def show(x: Option[String]) = x match {
case Some(s) => s
case None => "?"
}
//
}
| tophua/spark1.52 | examples/src/main/scala/scalaDemo/BaseScala.scala | Scala | apache-2.0 | 8,954 |
/*
* Copyright 2007-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package record
import net.liftweb.http.js.{JsExp, JsObj}
import net.liftweb.http.js.JE.{JsArray, JsFalse, JsNull, JsObj, JsTrue, Num, Str}
import net.liftweb.json.JsonAST.{JArray, JBool, JInt, JDouble, JField, JNothing, JNull, JObject, JString, JValue}
object RecordHelpers {
/* For the moment, I couldn't find any other way to bridge JValue and JsExp, so I wrote something simple here */
implicit def jvalueToJsExp(jvalue: JValue): JsExp = {
jvalue match {
case JArray(vs) => JsArray(vs.map(jvalueToJsExp): _*)
case JBool(b) => if (b) JsTrue else JsFalse
case JDouble(d) => Num(d)
case JInt(i) => Num(i)
case JNothing => JsNull
case JNull => JsNull
case JObject(fs) => JsObj(fs.map(f => (f.name, jvalueToJsExp(f.value))): _*)
case JString(s) => Str(s)
}
}
}
| lzpfmh/framework-2 | persistence/record/src/main/scala/net/liftweb/record/RecordHelpers.scala | Scala | apache-2.0 | 1,483 |
/* Copyright 2017-19, Emmanouil Antonios Platanios. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package org.platanios.tensorflow.api.core
import org.platanios.tensorflow.api._
import org.platanios.tensorflow.api.core.exception.{GraphMismatchException, InvalidArgumentException}
import org.platanios.tensorflow.api.ops.{Op, UntypedOp}
import org.platanios.tensorflow.api.ops.Op.createWith
import org.platanios.tensorflow.api.ops.basic.Basic
import org.platanios.tensorflow.api.ops.math.Math
import org.platanios.tensorflow.api.tensors.Tensor
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
/**
* @author Emmanouil Antonios Platanios
*/
class GraphSpec extends AnyFlatSpec with Matchers {
private[this] def prepareGraph(): (Graph, Array[UntypedOp]) = {
val graph = Graph()
val ops = createWith(graph = graph) {
val c1 = Basic.constant(Tensor(1.0), name = "C_1")
val c2 = Basic.constant(Tensor(2.0), name = "C_2")
val c3 = createWith(nameScope = "Nested") {
Basic.constant(Tensor(3.0), name = "C_3")
}
val c4 = Basic.constant(Tensor(4.0), name = "C_4")
Array(c1.op, c2.op, c3.op, c4.op)
}
(graph, ops)
}
// TODO: Add collections specification.
"'preventFeeding'" must "prevent valid ops from being fetched" in {
val (graph, ops) = prepareGraph()
assert(graph.isFeedable(ops(0).outputsSeq(0)))
assert(graph.isFeedable(ops(1).outputsSeq(0)))
assert(graph.isFeedable(ops(2).outputsSeq(0)))
assert(graph.isFeedable(ops(3).outputsSeq(0)))
graph.preventFeeding(ops(0).outputsSeq(0))
assert(!graph.isFeedable(ops(0).outputsSeq(0)))
assert(graph.isFeedable(ops(1).outputsSeq(0)))
assert(graph.isFeedable(ops(2).outputsSeq(0)))
assert(graph.isFeedable(ops(3).outputsSeq(0)))
graph.preventFeeding(ops(2).outputsSeq(0))
assert(!graph.isFeedable(ops(0).outputsSeq(0)))
assert(graph.isFeedable(ops(1).outputsSeq(0)))
assert(!graph.isFeedable(ops(2).outputsSeq(0)))
assert(graph.isFeedable(ops(3).outputsSeq(0)))
}
it must "throw a 'GraphMismatchException' when provided ops from other graphs" in {
val (graph, ops) = prepareGraph()
createWith(graph = Graph()) {
assert(intercept[GraphMismatchException](graph.isFeedable(Basic.constant(1.0))).getMessage ===
"The provided op output does not belong to this graph.")
assert(intercept[GraphMismatchException](graph.preventFeeding(Basic.constant(1.0))).getMessage ===
"The provided op output does not belong to this graph.")
}
}
"'preventFetching'" must "prevent valid ops from being fetched" in {
val (graph, ops) = prepareGraph()
assert(graph.isFetchable(ops(0)))
assert(graph.isFetchable(ops(1)))
assert(graph.isFetchable(ops(2)))
assert(graph.isFetchable(ops(3)))
graph.preventFetching(ops(0))
assert(!graph.isFetchable(ops(0)))
assert(graph.isFetchable(ops(1)))
assert(graph.isFetchable(ops(2)))
assert(graph.isFetchable(ops(3)))
graph.preventFetching(ops(2))
assert(!graph.isFetchable(ops(0)))
assert(graph.isFetchable(ops(1)))
assert(!graph.isFetchable(ops(2)))
assert(graph.isFetchable(ops(3)))
}
it must "throw a 'GraphMismatchException' when provided ops from other graphs" in {
val (graph, ops) = prepareGraph()
createWith(graph = Graph()) {
assert(intercept[GraphMismatchException](graph.isFetchable(Basic.constant(1.0).op)).getMessage ===
"The provided op does not belong to this graph.")
assert(intercept[GraphMismatchException](graph.preventFetching(Basic.constant(1.0).op)).getMessage ===
"The provided op does not belong to this graph.")
}
}
"'findOp'" must "return an existing op in a graph" in {
val (graph, ops) = prepareGraph()
assert(graph.findOp("C_2").get === ops(1))
}
it must "return 'None' if an op name does not exist in the graph" in {
val (graph, _) = prepareGraph()
assert(graph.findOp("A") === None)
}
"'ops'" must "return all the ops in a graph" in {
val (graph, ops) = prepareGraph()
assert(graph.ops === ops)
}
"'opByName'" must "return an existing op in a graph" in {
val (graph, ops) = prepareGraph()
assert(graph.getOpByName("C_2") === ops(1))
}
it must "throw an 'InvalidArgumentException' exception with an informative message " +
"if an op name does not exist in the graph" in {
val (graph, _) = prepareGraph()
assert(intercept[InvalidArgumentException](graph.getOpByName("A")).getMessage
=== "Name 'A' refers to an op which does not exist in the graph.")
assert(intercept[InvalidArgumentException](graph.getOpByName("A:0")).getMessage
=== "Name 'A:0' appears to refer to an op output, but 'allowOutput' was set to 'false'.")
}
"'outputByName'" must "return an existing op output in a graph" in {
val (graph, ops) = prepareGraph()
assert(graph.getOutputByName("C_2:0") == ops(1).outputsSeq(0))
}
it must "throw an 'InvalidArgumentException' exception with an informative message " +
"if an op output name does not exist in the graph" in {
val (graph, _) = prepareGraph()
assert(intercept[InvalidArgumentException](graph.getOutputByName("A:0:3")).getMessage
=== "Name 'A:0:3' looks a like an op output name, but it is not a valid one. " +
"Op output names must be of the form \\"<op_name>:<output_index>\\".")
assert(intercept[InvalidArgumentException](graph.getOutputByName("A:0")).getMessage
=== "Name 'A:0' refers to an op output which does not exist in the graph. " +
"More specifically, op, 'A', does not exist in the graph.")
assert(intercept[InvalidArgumentException](graph.getOutputByName("C_2:5")).getMessage
=== "Name 'C_2:5' refers to an op output which does not exist in the graph. " +
"More specifically, op, 'C_2', does exist in the graph, but it only has 1 output(s).")
assert(intercept[InvalidArgumentException](graph.getOutputByName("A")).getMessage
=== "Name 'A' looks like an (invalid) op name, and not an op output name. " +
"Op output names must be of the form \\"<op_name>:<output_index>\\".")
assert(intercept[InvalidArgumentException](graph.getOutputByName("C_2")).getMessage
=== "Name 'C_2' appears to refer to an op, but 'allowOp' was set to 'false'.")
}
"'graphElementByName'" must "return an existing element in a graph" in {
val (graph, ops) = prepareGraph()
graph.getByName("C_2").left.foreach(op => assert(op === ops(1)))
graph.getByName("C_2:0").foreach(output => assert(output == ops(1).outputsSeq.head))
}
it must "throw an 'InvalidArgumentException' exception with an informative message " +
"if an element name does not exist in the graph" in {
val (graph, _) = prepareGraph()
assert(intercept[InvalidArgumentException](
graph.getByName("A", allowOp = true, allowOutput = true)).getMessage
=== "Name 'A' refers to an op which does not exist in the graph.")
assert(intercept[InvalidArgumentException](
graph.getByName("A:0:3", allowOp = true, allowOutput = true)).getMessage
=== "Name 'A:0:3' looks a like an op output name, but it is not a valid one. " +
"Op output names must be of the form \\"<op_name>:<output_index>\\".")
assert(intercept[InvalidArgumentException](
graph.getByName("A:0", allowOp = true, allowOutput = true)).getMessage
=== "Name 'A:0' refers to an op output which does not exist in the graph. " +
"More specifically, op, 'A', does not exist in the graph.")
assert(intercept[InvalidArgumentException](
graph.getByName("C_2:5", allowOp = true, allowOutput = true)).getMessage
=== "Name 'C_2:5' refers to an op output which does not exist in the graph. " +
"More specifically, op, 'C_2', does exist in the graph, but it only has 1 output(s).")
assert(intercept[IllegalArgumentException](
graph.getByName("A", allowOp = false, allowOutput = false)).getMessage
=== "'allowOutput' and 'allowOp' cannot both be set to 'false'.")
}
object INPUTS extends Graph.Keys.OutputCollectionKey {
override def name: String = "inputs"
}
object OUTPUTS extends Graph.Keys.OutputCollectionKey {
override def name: String = "outputs"
}
"'Graph.toMetaGraphDef'" must "work when no scope is provided" in {
val graph = Graph()
val session = Session(graph)
Op.createWith(graph) {
// Create a minimal graph with zero variables.
val input = Basic.placeholder[Float](Shape(), name = "Input")
val offset = Basic.constant(42.0f, name = "Offset")
val output = Math.add(input, offset, name = "AddOffset")
// Add input and output tensors to graph collections.
graph.addToCollection(INPUTS)(input.asInstanceOf[Output[Any]])
graph.addToCollection(OUTPUTS)(output.asInstanceOf[Output[Any]])
val outputValue = session.run(Map(input -> Tensor(-10f)), output)
assert(outputValue.scalar == 32)
}
// Generate the 'MetaGraphDef' object.
val metaGraphDef = graph.toMetaGraphDef(collections = Set(INPUTS, OUTPUTS))
assert(metaGraphDef.hasMetaInfoDef)
assert(metaGraphDef.getMetaInfoDef.getTensorflowVersion !== "")
// assert(metaGraphDef.getMetaInfoDef.getTensorflowGitVersion !== "")
session.close()
// Create a clean graph and import the 'MetaGraphDef' object.
val newGraph = Graph()
val newSession = Session(newGraph)
newGraph.importMetaGraphDef(metaGraphDef)
// Re-exports the current graph state for comparison to the original.
val newMetaGraphDef = newGraph.toMetaGraphDef()
// TODO: [PROTO] Utility functions for ProtoBuf comparisons.
// assert(newMetaGraphDef.equals(metaGraphDef))
// Ensure that we can still get a reference to our graph collections.
val newInput = newGraph.getCollection(INPUTS).head.asInstanceOf[Output[Float]]
val newOutput = newGraph.getCollection(OUTPUTS).head.asInstanceOf[Output[Float]]
// Verify that the new graph computes the same result as the original.
val newOutputValue = newSession.run(Map(newInput -> Tensor(-10f)), newOutput)
assert(newOutputValue.scalar == 32.0f)
newSession.close()
}
}
| eaplatanios/tensorflow_scala | modules/api/src/test/scala/org/platanios/tensorflow/api/core/GraphSpec.scala | Scala | apache-2.0 | 10,834 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.integration.spark.testsuite.dataload
import java.math.BigDecimal
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.sql.Row
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.BeforeAndAfterEach
import org.apache.carbondata.core.util.path.CarbonTablePath
import org.apache.carbondata.core.datastore.impl.FileFactory
import org.apache.carbondata.core.metadata.CarbonMetadata
import org.apache.carbondata.core.constants.{CarbonCommonConstants, CarbonLoadOptionConstants}
import org.apache.carbondata.core.datamap.Segment
import org.apache.carbondata.core.util.CarbonProperties
class TestLoadDataGeneral extends QueryTest with BeforeAndAfterEach {
override def beforeEach {
sql("DROP TABLE IF EXISTS loadtest")
sql(
"""
| CREATE TABLE loadtest(id int, name string, city string, age int)
| STORED AS carbondata
""".stripMargin)
}
private def checkSegmentExists(
segmentId: String,
databaseName: String,
tableName: String): Boolean = {
val carbonTable = CarbonMetadata.getInstance().getCarbonTable(databaseName, tableName)
val partitionPath =
CarbonTablePath.getPartitionDir(carbonTable.getAbsoluteTableIdentifier.getTablePath)
val segment = Segment.getSegment(segmentId, carbonTable.getAbsoluteTableIdentifier.getTablePath)
segment != null
}
test("test data loading CSV file") {
val testData = s"$resourcesPath/sample.csv"
sql(s"LOAD DATA LOCAL INPATH '$testData' into table loadtest")
checkAnswer(
sql("SELECT COUNT(*) FROM loadtest"),
Seq(Row(6))
)
}
test("test data loading CSV file without extension name") {
val testData = s"$resourcesPath/sample"
sql(s"LOAD DATA LOCAL INPATH '$testData' into table loadtest")
checkAnswer(
sql("SELECT COUNT(*) FROM loadtest"),
Seq(Row(4))
)
}
test("test data loading GZIP compressed CSV file") {
val testData = s"$resourcesPath/sample.csv.gz"
sql(s"LOAD DATA LOCAL INPATH '$testData' into table loadtest")
checkAnswer(
sql("SELECT COUNT(*) FROM loadtest"),
Seq(Row(4))
)
}
test("test data loading BZIP2 compressed CSV file") {
val testData = s"$resourcesPath/sample.csv.bz2"
sql(s"LOAD DATA LOCAL INPATH '$testData' into table loadtest")
checkAnswer(
sql("SELECT COUNT(*) FROM loadtest"),
Seq(Row(4))
)
}
test("test data loading CSV file with delimiter char \\017") {
val testData = s"$resourcesPath/sample_withDelimiter017.csv"
sql(s"LOAD DATA LOCAL INPATH '$testData' into table loadtest options ('delimiter'='\\017')")
checkAnswer(
sql("SELECT COUNT(*) FROM loadtest"),
Seq(Row(4))
)
}
test("test data loading with invalid values for mesasures") {
val testData = s"$resourcesPath/invalidMeasures.csv"
sql("drop table if exists invalidMeasures")
sql("CREATE TABLE invalidMeasures (country String, salary double, age decimal(10,2)) STORED AS carbondata")
sql(s"LOAD DATA LOCAL INPATH '$testData' into table invalidMeasures options('Fileheader'='country,salary,age')")
checkAnswer(
sql("SELECT * FROM invalidMeasures"),
Seq(Row("India",null,new BigDecimal("22.44")), Row("Russia",null,null), Row("USA",234.43,null))
)
}
test("test data loading into table whose name has '_'") {
sql("DROP TABLE IF EXISTS load_test")
sql(""" CREATE TABLE load_test(id int, name string, city string, age int)
STORED AS carbondata """)
val testData = s"$resourcesPath/sample.csv"
try {
sql(s"LOAD DATA LOCAL INPATH '$testData' into table load_test")
sql(s"LOAD DATA LOCAL INPATH '$testData' into table load_test")
} catch {
case ex: Exception =>
assert(false)
}
assert(checkSegmentExists("0", "default", "load_test"))
assert(checkSegmentExists("1", "default", "load_test"))
sql("DROP TABLE load_test")
}
test("test load data with decimal type and sort intermediate files as 1") {
sql("drop table if exists carbon_table")
sql("drop table if exists carbonBigDecimalLoad")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.SORT_INTERMEDIATE_FILES_LIMIT, "1")
.addProperty(CarbonCommonConstants.SORT_SIZE, "1")
.addProperty(CarbonCommonConstants.DATA_LOAD_BATCH_SIZE, "1")
sql("create table if not exists carbonBigDecimalLoad (ID Int, date Timestamp, country String, name String, phonetype String, serialname String, salary decimal(27, 10)) STORED AS carbondata")
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/decimalBoundaryDataCarbon.csv' into table carbonBigDecimalLoad")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.SORT_INTERMEDIATE_FILES_LIMIT,
CarbonCommonConstants.SORT_INTERMEDIATE_FILES_LIMIT_DEFAULT_VALUE)
.addProperty(CarbonCommonConstants.SORT_SIZE, CarbonCommonConstants.SORT_SIZE_DEFAULT_VAL)
.addProperty(CarbonCommonConstants.DATA_LOAD_BATCH_SIZE,
CarbonCommonConstants.DATA_LOAD_BATCH_SIZE_DEFAULT)
sql("drop table if exists carbon_table")
}
test("test insert / update with data more than 32000 characters") {
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_ENABLE_BAD_RECORD_HANDLING_FOR_INSERT, "true")
val testdata =s"$resourcesPath/32000char.csv"
sql("drop table if exists load32000chardata")
sql("drop table if exists load32000chardata_dup")
sql("CREATE TABLE load32000chardata(dim1 String, dim2 String, mes1 int) STORED AS carbondata")
sql("CREATE TABLE load32000chardata_dup(dim1 String, dim2 String, mes1 int) STORED AS carbondata")
sql(s"LOAD DATA LOCAL INPATH '$testdata' into table load32000chardata OPTIONS('FILEHEADER'='dim1,dim2,mes1')")
intercept[Exception] {
sql("insert into load32000chardata_dup select dim1,concat(load32000chardata.dim2,'aaaa'),mes1 from load32000chardata").show()
}
sql(s"LOAD DATA LOCAL INPATH '$testdata' into table load32000chardata_dup OPTIONS('FILEHEADER'='dim1,dim2,mes1')")
intercept[Exception] {
sql("update load32000chardata_dup set(load32000chardata_dup.dim2)=(select concat(load32000chardata.dim2,'aaaa') from load32000chardata)").show()
}
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_ENABLE_BAD_RECORD_HANDLING_FOR_INSERT, "false")
}
test("test load / insert / update with data more than 32000 bytes - dictionary_exclude") {
val testdata = s"$resourcesPath/unicodechar.csv"
sql("drop table if exists load32000bytes")
sql("create table load32000bytes(name string) STORED AS carbondata")
sql("insert into table load32000bytes select 'aaa'")
assert(intercept[Exception] {
sql(s"load data local inpath '$testdata' into table load32000bytes OPTIONS ('FILEHEADER'='name')")
}.getMessage.contains("DataLoad failure: Dataload failed, String size cannot exceed 32000 bytes"))
val source = scala.io.Source.fromFile(testdata, CarbonCommonConstants.DEFAULT_CHARSET)
val data = source.mkString
intercept[Exception] {
sql(s"insert into load32000bytes values('$data')")
}
intercept[Exception] {
sql(s"update load32000bytes set(name)= ('$data')").show()
}
sql("drop table if exists load32000bytes")
}
test("test if stale folders are deleting on data load") {
sql("drop table if exists stale")
sql("create table stale(a string) STORED AS carbondata")
sql("insert into stale values('k')")
val carbonTable = CarbonMetadata.getInstance().getCarbonTable("default", "stale")
val tableStatusFile = CarbonTablePath.getTableStatusFilePath(carbonTable.getTablePath)
FileFactory.getCarbonFile(tableStatusFile).delete()
sql("insert into stale values('k')")
checkAnswer(sql("select * from stale"), Row("k"))
}
test("test data loading with directly writing fact data to hdfs") {
val originStatus = CarbonProperties.getInstance().getProperty(
CarbonLoadOptionConstants.ENABLE_CARBON_LOAD_DIRECT_WRITE_TO_STORE_PATH,
CarbonLoadOptionConstants.ENABLE_CARBON_LOAD_DIRECT_WRITE_TO_STORE_PATH_DEFAULT)
CarbonProperties.getInstance().addProperty(
CarbonLoadOptionConstants.ENABLE_CARBON_LOAD_DIRECT_WRITE_TO_STORE_PATH, "true")
val testData = s"$resourcesPath/sample.csv"
sql(s"LOAD DATA LOCAL INPATH '$testData' into table loadtest")
checkAnswer(
sql("SELECT COUNT(*) FROM loadtest"),
Seq(Row(6))
)
CarbonProperties.getInstance().addProperty(
CarbonLoadOptionConstants.ENABLE_CARBON_LOAD_DIRECT_WRITE_TO_STORE_PATH,
originStatus)
}
test("test data loading with page size less than 32000") {
CarbonProperties.getInstance().addProperty(
CarbonCommonConstants.BLOCKLET_SIZE, "16000")
val testData = s"$resourcesPath/sample.csv"
sql(s"LOAD DATA LOCAL INPATH '$testData' into table loadtest")
checkAnswer(
sql("SELECT COUNT(*) FROM loadtest"),
Seq(Row(6))
)
CarbonProperties.getInstance().addProperty(CarbonCommonConstants.BLOCKLET_SIZE,
CarbonCommonConstants.BLOCKLET_SIZE_DEFAULT_VAL)
}
override def afterEach {
sql("DROP TABLE if exists loadtest")
sql("drop table if exists invalidMeasures")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.SORT_INTERMEDIATE_FILES_LIMIT,
CarbonCommonConstants.SORT_INTERMEDIATE_FILES_LIMIT_DEFAULT_VALUE)
.addProperty(CarbonCommonConstants.SORT_SIZE, CarbonCommonConstants.SORT_SIZE_DEFAULT_VAL)
.addProperty(CarbonCommonConstants.DATA_LOAD_BATCH_SIZE,
CarbonCommonConstants.DATA_LOAD_BATCH_SIZE_DEFAULT)
}
}
| jackylk/incubator-carbondata | integration/spark/src/test/scala/org/apache/carbondata/integration/spark/testsuite/dataload/TestLoadDataGeneral.scala | Scala | apache-2.0 | 10,495 |
package aerospiker
package msgpack
import java.nio.CharBuffer
import java.nio.charset.CharsetEncoder
import java.nio.charset.StandardCharsets.UTF_8
import com.aerospike.client.AerospikeException
import com.aerospike.client.command.ParticleType
import io.circe._
import scala.collection.mutable
/**
* Serialize collection objects using MessagePack format specification:
*
* https://github.com/msgpack/msgpack/blob/master/spec.md
*/
object JsonPacker {
private[this] val encoder: ThreadLocal[CharsetEncoder] = new ThreadLocal[CharsetEncoder] {
override def initialValue(): CharsetEncoder = UTF_8.newEncoder()
}
def apply(): JsonPacker = new JsonPacker
def double(x: BigDecimal): Boolean = x.scale != 0
def formatArrayFamilyHeader(size: Int, builder: mutable.ArrayBuilder[Byte]): Unit = {
if (size < 16)
builder += (0x90 | size).toByte
else if (size < 65536) {
builder += 0xdc.toByte
builder += (size >>> 8).toByte
builder += (size >>> 0).toByte
} else {
builder += 0xdd.toByte
builder += (size >>> 24).toByte
builder += (size >>> 16).toByte
builder += (size >>> 8).toByte
builder += (size >>> 0).toByte
}
}
def formatMapFamilyHeader(sz: Int, builder: mutable.ArrayBuilder[Byte]): Unit = {
if (sz < 16)
builder += (0x80 | sz).toByte
else if (sz < 65536) {
builder += 0xde.toByte
builder += (sz >>> 8).toByte
builder += (sz >>> 0).toByte
} else {
builder += 0xdf.toByte
builder += (sz >>> 24).toByte
builder += (sz >>> 16).toByte
builder += (sz >>> 8).toByte
builder += (sz >>> 0).toByte
}
}
def formatNil(builder: mutable.ArrayBuilder[Byte]): Unit = builder += 0xc0.toByte
def formatBoolFamily(v: Boolean, builder: mutable.ArrayBuilder[Byte]): Unit =
builder += (if (v) 0xc3 else 0xc2).toByte
def formatIntFamily(l: Long, builder: mutable.ArrayBuilder[Byte]): Unit =
if (4294967296L <= l) formatLong(0xcf.toByte, l, builder)
else if (65536L <= l) formatInt(0xce.toByte, l.toInt, builder)
else if (256L <= l) formatShort(0xcd.toByte, l.toInt, builder)
else if (128 <= l) formatByte(0xcc.toByte, l.toByte, builder)
else if (0 <= l) formatByte(l.toByte, builder)
else if (l >= -32L) formatByte((0xe0 | (l + 32)).toByte, builder)
else if (l >= Byte.MinValue.toLong) formatByte(0xd0.toByte, l.toInt.toByte, builder)
else if (l >= Short.MinValue.toLong) formatShort(0xd1.toByte, l.toInt, builder)
else if (l >= Int.MinValue.toLong) formatInt(0xd2.toByte, l.toInt, builder)
else formatLong(0xd3.toByte, l, builder)
def formatIntFamily(t: Byte, v: BigInt, builder: mutable.ArrayBuilder[Byte]): Unit = {
builder += t
builder += (v >> 56).toByte
builder += (v >> 48).toByte
builder += (v >> 40).toByte
builder += (v >> 32).toByte
builder += (v >> 24).toByte
builder += (v >> 16).toByte
builder += (v >> 8).toByte
builder += (v >> 0).toByte
}
def formatFloatFamily(v: Double, builder: mutable.ArrayBuilder[Byte]): Unit = {
val x = java.lang.Double.doubleToLongBits(v)
builder += 0xcb.toByte
builder += (x >>> 56).toByte
builder += (x >>> 48).toByte
builder += (x >>> 40).toByte
builder += (x >>> 32).toByte
builder += (x >>> 24).toByte
builder += (x >>> 16).toByte
builder += (x >>> 8).toByte
builder += (x >>> 0).toByte
}
def formatStrFamilyHeader(sz: Int, builder: mutable.ArrayBuilder[Byte]): Unit =
if (sz < 32) {
builder += (0xa0 | sz).toByte
builder += ParticleType.STRING.toByte
} else if (sz < 65536) {
builder += 0xda.toByte
builder += (sz >>> 8).toByte
builder += (sz >>> 0).toByte
builder += ParticleType.STRING.toByte
} else {
builder += 0xdb.toByte
builder += (sz >>> 24).toByte
builder += (sz >>> 16).toByte
builder += (sz >>> 8).toByte
builder += (sz >>> 0).toByte
builder += ParticleType.STRING.toByte
}
def formatStrFamily(v: String, builder: mutable.ArrayBuilder[Byte]): Unit = {
val cb = CharBuffer.wrap(v)
val buf = encoder.get.encode(cb)
val len = buf.remaining() + 1
formatStrFamilyHeader(len, builder)
val arr = Array.ofDim[Byte](len - 1)
buf.get(arr)
builder ++= arr
buf.clear()
cb.clear()
}
def formatByte(v: Byte, builder: mutable.ArrayBuilder[Byte]): Unit = builder += v
def formatByte(t: Byte, v: Byte, builder: mutable.ArrayBuilder[Byte]): Unit = {
builder += t
builder += v
}
def formatShort(t: Byte, v: Int, builder: mutable.ArrayBuilder[Byte]): Unit = {
builder += t
builder += (v >>> 8).toByte
builder += (v >>> 0).toByte
}
def formatInt(t: Byte, v: Int, builder: mutable.ArrayBuilder[Byte]): Unit = {
builder += t
builder += (v >>> 24).toByte
builder += (v >>> 16).toByte
builder += (v >>> 8).toByte
builder += (v >>> 0).toByte
}
def formatLong(t: Byte, v: Long, builder: mutable.ArrayBuilder[Byte]): Unit = {
builder += t
builder += (v >>> 56).toByte
builder += (v >>> 48).toByte
builder += (v >>> 40).toByte
builder += (v >>> 32).toByte
builder += (v >>> 24).toByte
builder += (v >>> 16).toByte
builder += (v >>> 8).toByte
builder += (v >>> 0).toByte
}
}
final class JsonPacker {
import JsonPacker._
def encode[A](a: A)(implicit A: Encoder[A]): Either[AerospikeException, Array[Byte]] = pack(A(a))
def pack(doc: Json): Either[AerospikeException, Array[Byte]] = {
try {
val acc: mutable.ArrayBuilder[Byte] = mutable.ArrayBuilder.make[Byte]
go(doc, acc)
Right(acc.result())
} catch {
case e: AerospikeException => Left(e)
}
}
private[this] def go(json: Json, acc: mutable.ArrayBuilder[Byte]): Unit = json.fold[Unit](
formatNil(acc),
x => formatBoolFamily(x, acc),
x => {
val n = x.toBigDecimal
n match {
case None => ()
case Some(v) if double(v) =>
formatFloatFamily(v.toDouble, acc)
case Some(v) if v.isValidLong =>
formatIntFamily(v.toLong, acc)
case Some(v) if v.signum == -1 =>
formatIntFamily(0x3d.toByte, v.toBigInt(), acc)
case Some(v) =>
formatIntFamily(0xcf.toByte, v.toBigInt(), acc)
}
},
x => formatStrFamily(x, acc),
xs => {
formatArrayFamilyHeader(xs.size, acc)
xs.foreach(go(_, acc))
},
x => {
val xs = x.toList
formatMapFamilyHeader(xs.size, acc)
xs.foreach {
case (key, (v)) =>
formatStrFamily(key, acc)
go(v, acc)
}
}
)
}
| tkrs/aerospiker | msgpack/src/main/scala/aerospiker/msgpack/JsonPacker.scala | Scala | mit | 6,650 |
package com.github.tminglei.slickpg
import java.sql.{Date, Timestamp}
import java.time.{LocalDate, LocalDateTime, OffsetDateTime}
import slick.jdbc.{JdbcType, PositionedResult, PostgresProfile}
import scala.reflect.classTag
// edge type definitions
sealed trait EdgeType
case object `[_,_)` extends EdgeType
case object `(_,_]` extends EdgeType
case object `(_,_)` extends EdgeType
case object `[_,_]` extends EdgeType
case object `empty` extends EdgeType
import PgRangeSupportUtils._
case class Range[T](start: Option[T], end: Option[T], edge: EdgeType) {
def as[A](convert: (T => A)): Range[A] = {
new Range[A](start.map(convert), end.map(convert), edge)
}
override def toString = edge match {
case `[_,_)` => s"[${oToString(start)},${oToString(end)})"
case `(_,_]` => s"(${oToString(start)},${oToString(end)}]"
case `(_,_)` => s"(${oToString(start)},${oToString(end)})"
case `[_,_]` => s"[${oToString(start)},${oToString(end)}]"
case `empty` => Range.empty_str
}
}
object Range {
def emptyRange[T]: Range[T] = Range[T](None, None, `empty`)
val empty_str = "empty"
def apply[T](start: T, end: T, edge: EdgeType = `[_,_)`): Range[T] = Range(Some(start), Some(end), edge)
}
/**
* simple range support; if all you want is just getting from / saving to db, and using pg range operations/methods, it should be enough
*/
trait PgRangeSupport extends range.PgRangeExtensions with utils.PgCommonJdbcTypes with PgDate2Support { driver: PostgresProfile =>
import driver.api._
private def toTimestamp(str: String) = Timestamp.valueOf(str)
private def toSQLDate(str: String) = Date.valueOf(str)
trait SimpleRangeCodeGenSupport {
// register types to let `ExModelBuilder` find them
if (driver.isInstanceOf[ExPostgresProfile]) {
driver.asInstanceOf[ExPostgresProfile].bindPgTypeToScala("int4range", classTag[Range[Int]])
driver.asInstanceOf[ExPostgresProfile].bindPgTypeToScala("int8range", classTag[Range[Long]])
driver.asInstanceOf[ExPostgresProfile].bindPgTypeToScala("numrange", classTag[Range[Float]])
driver.asInstanceOf[ExPostgresProfile].bindPgTypeToScala("tsrange", classTag[Range[Timestamp]])
driver.asInstanceOf[ExPostgresProfile].bindPgTypeToScala("daterange", classTag[Range[Date]])
driver.asInstanceOf[ExPostgresProfile].bindPgTypeToScala("tsrange", classTag[Range[LocalDateTime]])
driver.asInstanceOf[ExPostgresProfile].bindPgTypeToScala("tstzrange", classTag[Range[OffsetDateTime]])
driver.asInstanceOf[ExPostgresProfile].bindPgTypeToScala("daterange", classTag[Range[LocalDate]])
}
}
/// alias
trait RangeImplicits extends SimpleRangeImplicits
trait SimpleRangeImplicits extends SimpleRangeCodeGenSupport with Date2DateTimeFormatters {
implicit val simpleIntRangeTypeMapper: JdbcType[Range[Int]] = new GenericJdbcType[Range[Int]]("int4range", mkRangeFn(_.toInt))
implicit val simpleLongRangeTypeMapper: JdbcType[Range[Long]] = new GenericJdbcType[Range[Long]]("int8range", mkRangeFn(_.toLong))
implicit val simpleFloatRangeTypeMapper: JdbcType[Range[Float]] = new GenericJdbcType[Range[Float]]("numrange", mkRangeFn(_.toFloat))
implicit val simpleTimestampRangeTypeMapper: JdbcType[Range[Timestamp]] = new GenericJdbcType[Range[Timestamp]]("tsrange", mkRangeFn(toTimestamp))
implicit val simpleDateRangeTypeMapper: JdbcType[Range[Date]] = new GenericJdbcType[Range[Date]]("daterange", mkRangeFn(toSQLDate))
implicit val simpleLocalDateTimeRangeTypeMapper: JdbcType[Range[LocalDateTime]] = new GenericJdbcType[Range[LocalDateTime]]("tsrange", mkRangeFn(fromDateTimeOrInfinity))
implicit val simpleOffsetDateTimeRangeTypeMapper: JdbcType[Range[OffsetDateTime]] = new GenericJdbcType[Range[OffsetDateTime]]("tstzrange", mkRangeFn(fromOffsetDateTimeOrInfinity))
implicit val simpleLocalDateRangeTypeMapper: JdbcType[Range[LocalDate]] = new GenericJdbcType[Range[LocalDate]]("daterange", mkRangeFn(fromDateOrInfinity))
implicit def simpleRangeColumnExtensionMethods[B0](c: Rep[Range[B0]])(
implicit tm: JdbcType[B0], tm1: JdbcType[Range[B0]]) = {
new RangeColumnExtensionMethods[Range[B0], B0, Range[B0]](c)
}
implicit def simpleRangeOptionColumnExtensionMethods[B0](c: Rep[Option[Range[B0]]])(
implicit tm: JdbcType[B0], tm1: JdbcType[Range[B0]]) = {
new RangeColumnExtensionMethods[Range[B0], B0, Option[Range[B0]]](c)
}
}
trait SimpleRangePlainImplicits extends SimpleRangeCodeGenSupport with Date2DateTimeFormatters {
import utils.PlainSQLUtils._
// to support 'nextArray[T]/nextArrayOption[T]' in PgArraySupport
{
addNextArrayConverter((r) => utils.SimpleArrayUtils.fromString(mkRangeFn(_.toInt))(r.nextString()))
addNextArrayConverter((r) => utils.SimpleArrayUtils.fromString(mkRangeFn(_.toLong))(r.nextString()))
addNextArrayConverter((r) => utils.SimpleArrayUtils.fromString(mkRangeFn(_.toFloat))(r.nextString()))
addNextArrayConverter((r) => utils.SimpleArrayUtils.fromString(mkRangeFn(toTimestamp))(r.nextString()))
addNextArrayConverter((r) => utils.SimpleArrayUtils.fromString(mkRangeFn(toSQLDate))(r.nextString()))
addNextArrayConverter((r) => utils.SimpleArrayUtils.fromString(mkRangeFn(fromDateTimeOrInfinity))(r.nextString()))
addNextArrayConverter((r) => utils.SimpleArrayUtils.fromString(mkRangeFn(fromOffsetDateTimeOrInfinity))(r.nextString()))
addNextArrayConverter((r) => utils.SimpleArrayUtils.fromString(mkRangeFn(fromDateOrInfinity))(r.nextString()))
}
implicit class PgRangePositionedResult(r: PositionedResult) {
def nextIntRange() = nextIntRangeOption().orNull
def nextIntRangeOption() = r.nextStringOption().map(mkRangeFn(_.toInt))
def nextLongRange() = nextLongRangeOption().orNull
def nextLongRangeOption() = r.nextStringOption().map(mkRangeFn(_.toLong))
def nextFloatRange() = nextFloatRangeOption().orNull
def nextFloatRangeOption() = r.nextStringOption().map(mkRangeFn(_.toFloat))
def nextTimestampRange() = nextTimestampRangeOption().orNull
def nextTimestampRangeOption() = r.nextStringOption().map(mkRangeFn(toTimestamp))
def nextDateRange() = nextDateRangeOption().orNull
def nextDateRangeOption() = r.nextStringOption().map(mkRangeFn(toSQLDate))
def nextLocalDateTimeRange() = nextLocalDateTimeRangeOption().orNull
def nextLocalDateTimeRangeOption() = r.nextStringOption().map(mkRangeFn(fromDateTimeOrInfinity))
def nextOffsetDateTimeRange() = nextOffsetDateTimeRangeOption().orNull
def nextOffsetDateTimeRangeOption() = r.nextStringOption().map(mkRangeFn(fromOffsetDateTimeOrInfinity))
def nextLocalDateRange() = nextLocalDateRangeOption().orNull
def nextLocalDateRangeOption() = r.nextStringOption().map(mkRangeFn(fromDateOrInfinity))
}
////////////////////////////////////////////////////////////////////
implicit val getIntRange = mkGetResult(_.nextIntRange())
implicit val getIntRangeOption = mkGetResult(_.nextIntRangeOption())
implicit val setIntRange = mkSetParameter[Range[Int]]("int4range")
implicit val setIntRangeOption = mkOptionSetParameter[Range[Int]]("int4range")
implicit val getLongRange = mkGetResult(_.nextLongRange())
implicit val getLongRangeOption = mkGetResult(_.nextLongRangeOption())
implicit val setLongRange = mkSetParameter[Range[Long]]("int8range")
implicit val setLongRangeOption = mkOptionSetParameter[Range[Long]]("int8range")
implicit val getFloatRange = mkGetResult(_.nextFloatRange())
implicit val getFloatRangeOption = mkGetResult(_.nextFloatRangeOption())
implicit val setFloatRange = mkSetParameter[Range[Float]]("numrange")
implicit val setFloatRangeOption = mkOptionSetParameter[Range[Float]]("numrange")
implicit val getTimestampRange = mkGetResult(_.nextTimestampRange())
implicit val getTimestampRangeOption = mkGetResult(_.nextTimestampRangeOption())
implicit val setTimestampRange = mkSetParameter[Range[Timestamp]]("tsrange")
implicit val setTimestampRangeOption = mkOptionSetParameter[Range[Timestamp]]("tsrange")
implicit val getDateRange = mkGetResult(_.nextDateRange())
implicit val getDateRangeOption = mkGetResult(_.nextDateRangeOption())
implicit val setDateRange = mkSetParameter[Range[Date]]("daterange")
implicit val setDateRangeOption = mkOptionSetParameter[Range[Date]]("daterange")
implicit val getLocalDateTimeRange = mkGetResult(_.nextLocalDateTimeRange())
implicit val getLocalDateTimeRangeOption = mkGetResult(_.nextLocalDateTimeRangeOption())
implicit val setLocalDateTimeRange = mkSetParameter[Range[LocalDateTime]]("tsrange")
implicit val setLocalDateTimeRangeOption = mkOptionSetParameter[Range[LocalDateTime]]("tsrange")
implicit val getOffsetDateTimeRange = mkGetResult(_.nextOffsetDateTimeRange())
implicit val getOffsetDateTimeRangeOption = mkGetResult(_.nextOffsetDateTimeRangeOption())
implicit val setOffsetDateTimeRange = mkSetParameter[Range[OffsetDateTime]]("tsrange")
implicit val setOffsetDateTimeRangeOption = mkOptionSetParameter[Range[OffsetDateTime]]("tsrange")
implicit val getLocalDateRange = mkGetResult(_.nextLocalDateRange())
implicit val getLocalDateRangeOption = mkGetResult(_.nextLocalDateRangeOption())
implicit val setLocalDateRange = mkSetParameter[Range[LocalDate]]("daterange")
implicit val setLocalDateRangeOption = mkOptionSetParameter[Range[LocalDate]]("daterange")
}
}
object PgRangeSupportUtils {
// regular expr matchers to range string
val `[_,_)Range` = """\\["?([^,"]*)"?,[ ]*"?([^,"]*)"?\\)""".r // matches: [_,_)
val `(_,_]Range` = """\\("?([^,"]*)"?,[ ]*"?([^,"]*)"?\\]""".r // matches: (_,_]
val `(_,_)Range` = """\\("?([^,"]*)"?,[ ]*"?([^,"]*)"?\\)""".r // matches: (_,_)
val `[_,_]Range` = """\\["?([^,"]*)"?,[ ]*"?([^,"]*)"?\\]""".r // matches: [_,_]
def mkRangeFn[T](convert: (String => T)): (String => Range[T]) = {
def conv[T](str: String, convert: (String => T)): Option[T] =
Option(str).filterNot(_.isEmpty).map(convert)
(str: String) => str match {
case Range.`empty_str` => Range.emptyRange[T]
case `[_,_)Range`(start, end) => Range(conv(start, convert), conv(end, convert), `[_,_)`)
case `(_,_]Range`(start, end) => Range(conv(start, convert), conv(end, convert), `(_,_]`)
case `(_,_)Range`(start, end) => Range(conv(start, convert), conv(end, convert), `(_,_)`)
case `[_,_]Range`(start, end) => Range(conv(start, convert), conv(end, convert), `[_,_]`)
}
}
def toStringFn[T](toString: (T => String)): (Range[T] => String) =
(r: Range[T]) => r.edge match {
case `empty` => Range.empty_str
case `[_,_)` => s"[${oToString(r.start, toString)},${oToString(r.end, toString)})"
case `(_,_]` => s"(${oToString(r.start, toString)},${oToString(r.end, toString)}]"
case `(_,_)` => s"(${oToString(r.start, toString)},${oToString(r.end, toString)})"
case `[_,_]` => s"[${oToString(r.start, toString)},${oToString(r.end, toString)}]"
}
///
def mkWithLength[T](start: T, length: Double, edge: EdgeType = `[_,_)`) = {
val upper = (start.asInstanceOf[Double] + length).asInstanceOf[T]
new Range[T](Some(start), Some(upper), edge)
}
def mkWithInterval[T <: java.util.Date](start: T, interval: Interval, edge: EdgeType = `[_,_)`) = {
val end = (start +: interval).asInstanceOf[T]
new Range[T](Some(start), Some(end), edge)
}
////// helper methods
private[slickpg] def oToString[T](o: Option[T], toString: (T => String) = (r: T) => r.toString) =
o.map(toString).getOrElse("")
}
| tminglei/slick-pg | src/main/scala/com/github/tminglei/slickpg/PgRangeSupport.scala | Scala | bsd-2-clause | 11,693 |
// Generated by the Scala Plugin for the Protocol Buffer Compiler.
// Do not edit!
//
// Protofile syntax: PROTO3
package mytypes.duration
@SerialVersionUID(0L)
final case class Duration(
seconds: _root_.scala.Int = 0,
unknownFields: _root_.scalapb.UnknownFieldSet = _root_.scalapb.UnknownFieldSet.empty
) extends scalapb.GeneratedMessage
with scalapb.lenses.Updatable[Duration] {
@transient
private[this] var __serializedSizeMemoized: _root_.scala.Int = 0
private[this] def __computeSerializedSize(): _root_.scala.Int = {
var __size = 0
{
val __value = seconds
if (__value != 0) {
__size += _root_.com.google.protobuf.CodedOutputStream.computeInt32Size(1, __value)
}
};
__size += unknownFields.serializedSize
__size
}
override def serializedSize: _root_.scala.Int = {
var __size = __serializedSizeMemoized
if (__size == 0) {
__size = __computeSerializedSize() + 1
__serializedSizeMemoized = __size
}
__size - 1
}
def writeTo(`_output__`: _root_.com.google.protobuf.CodedOutputStream): _root_.scala.Unit = {
{
val __v = seconds
if (__v != 0) {
_output__.writeInt32(1, __v)
}
};
unknownFields.writeTo(_output__)
}
def withSeconds(__v: _root_.scala.Int): Duration = copy(seconds = __v)
def withUnknownFields(__v: _root_.scalapb.UnknownFieldSet) = copy(unknownFields = __v)
def discardUnknownFields = copy(unknownFields = _root_.scalapb.UnknownFieldSet.empty)
def getFieldByNumber(__fieldNumber: _root_.scala.Int): _root_.scala.Any = {
(__fieldNumber: @ _root_.scala.unchecked) match {
case 1 => {
val __t = seconds
if (__t != 0) __t else null
}
}
}
def getField(
__field: _root_.scalapb.descriptors.FieldDescriptor
): _root_.scalapb.descriptors.PValue = {
_root_.scala.Predef.require(__field.containingMessage eq companion.scalaDescriptor)
(__field.number: @ _root_.scala.unchecked) match {
case 1 => _root_.scalapb.descriptors.PInt(seconds)
}
}
def toProtoString: _root_.scala.Predef.String =
_root_.scalapb.TextFormat.printToUnicodeString(this)
def companion: mytypes.duration.Duration.type = mytypes.duration.Duration
// @@protoc_insertion_point(GeneratedMessage[mytypes.Duration])
}
object Duration extends scalapb.GeneratedMessageCompanion[mytypes.duration.Duration] {
implicit def messageCompanion: scalapb.GeneratedMessageCompanion[mytypes.duration.Duration] = this
def parseFrom(
`_input__`: _root_.com.google.protobuf.CodedInputStream
): mytypes.duration.Duration = {
var __seconds: _root_.scala.Int = 0
var `_unknownFields__` : _root_.scalapb.UnknownFieldSet.Builder = null
var _done__ = false
while (!_done__) {
val _tag__ = _input__.readTag()
_tag__ match {
case 0 => _done__ = true
case 8 =>
__seconds = _input__.readInt32()
case tag =>
if (_unknownFields__ == null) {
_unknownFields__ = new _root_.scalapb.UnknownFieldSet.Builder()
}
_unknownFields__.parseField(tag, _input__)
}
}
mytypes.duration.Duration(
seconds = __seconds,
unknownFields =
if (_unknownFields__ == null) _root_.scalapb.UnknownFieldSet.empty
else _unknownFields__.result()
)
}
implicit def messageReads: _root_.scalapb.descriptors.Reads[mytypes.duration.Duration] =
_root_.scalapb.descriptors.Reads {
case _root_.scalapb.descriptors.PMessage(__fieldsMap) =>
_root_.scala.Predef.require(
__fieldsMap.keys.forall(_.containingMessage eq scalaDescriptor),
"FieldDescriptor does not match message type."
)
mytypes.duration.Duration(
seconds = __fieldsMap
.get(scalaDescriptor.findFieldByNumber(1).get)
.map(_.as[_root_.scala.Int])
.getOrElse(0)
)
case _ => throw new RuntimeException("Expected PMessage")
}
def javaDescriptor: _root_.com.google.protobuf.Descriptors.Descriptor =
DurationProto.javaDescriptor.getMessageTypes().get(0)
def scalaDescriptor: _root_.scalapb.descriptors.Descriptor =
DurationProto.scalaDescriptor.messages(0)
def messageCompanionForFieldNumber(
__number: _root_.scala.Int
): _root_.scalapb.GeneratedMessageCompanion[_] = throw new MatchError(__number)
lazy val nestedMessagesCompanions
: Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]] =
Seq.empty
def enumCompanionForFieldNumber(
__fieldNumber: _root_.scala.Int
): _root_.scalapb.GeneratedEnumCompanion[_] = throw new MatchError(__fieldNumber)
lazy val defaultInstance = mytypes.duration.Duration(
seconds = 0
)
implicit class DurationLens[UpperPB](
_l: _root_.scalapb.lenses.Lens[UpperPB, mytypes.duration.Duration]
) extends _root_.scalapb.lenses.ObjectLens[UpperPB, mytypes.duration.Duration](_l) {
def seconds: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Int] =
field(_.seconds)((c_, f_) => c_.copy(seconds = f_))
}
final val SECONDS_FIELD_NUMBER = 1
def of(
seconds: _root_.scala.Int
): _root_.mytypes.duration.Duration = _root_.mytypes.duration.Duration(
seconds
)
// @@protoc_insertion_point(GeneratedMessageCompanion[mytypes.Duration])
}
| scalapb/ScalaPB | docs/src/main/scala/generated/mytypes/duration/Duration.scala | Scala | apache-2.0 | 5,445 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming
import scala.collection.mutable.ArrayBuffer
import java.io.{ObjectInputStream, IOException, ObjectOutputStream}
import org.apache.spark.Logging
import org.apache.spark.streaming.scheduler.Job
import org.apache.spark.streaming.dstream.{DStream, NetworkInputDStream, InputDStream}
final private[streaming] class DStreamGraph extends Serializable with Logging {
private val inputStreams = new ArrayBuffer[InputDStream[_]]()
private val outputStreams = new ArrayBuffer[DStream[_]]()
var rememberDuration: Duration = null
var checkpointInProgress = false
var zeroTime: Time = null
var startTime: Time = null
var batchDuration: Duration = null
def start(time: Time) {
this.synchronized {
if (zeroTime != null) {
throw new Exception("DStream graph computation already started")
}
zeroTime = time
startTime = time
outputStreams.foreach(_.initialize(zeroTime))
outputStreams.foreach(_.remember(rememberDuration))
outputStreams.foreach(_.validate)
inputStreams.par.foreach(_.start())
}
}
def restart(time: Time) {
this.synchronized { startTime = time }
}
def stop() {
this.synchronized {
inputStreams.par.foreach(_.stop())
}
}
def setContext(ssc: StreamingContext) {
this.synchronized {
outputStreams.foreach(_.setContext(ssc))
}
}
def setBatchDuration(duration: Duration) {
this.synchronized {
if (batchDuration != null) {
throw new Exception("Batch duration already set as " + batchDuration +
". cannot set it again.")
}
batchDuration = duration
}
}
def remember(duration: Duration) {
this.synchronized {
if (rememberDuration != null) {
throw new Exception("Remember duration already set as " + batchDuration +
". cannot set it again.")
}
rememberDuration = duration
}
}
def addInputStream(inputStream: InputDStream[_]) {
this.synchronized {
inputStream.setGraph(this)
inputStreams += inputStream
}
}
def addOutputStream(outputStream: DStream[_]) {
this.synchronized {
outputStream.setGraph(this)
outputStreams += outputStream
}
}
def getInputStreams() = this.synchronized { inputStreams.toArray }
def getOutputStreams() = this.synchronized { outputStreams.toArray }
def getNetworkInputStreams() = this.synchronized {
inputStreams.filter(_.isInstanceOf[NetworkInputDStream[_]])
.map(_.asInstanceOf[NetworkInputDStream[_]])
.toArray
}
def generateJobs(time: Time): Seq[Job] = {
logDebug("Generating jobs for time " + time)
val jobs = this.synchronized {
outputStreams.flatMap(outputStream => outputStream.generateJob(time))
}
logDebug("Generated " + jobs.length + " jobs for time " + time)
jobs
}
def clearMetadata(time: Time) {
logDebug("Clearing metadata for time " + time)
this.synchronized {
outputStreams.foreach(_.clearMetadata(time))
}
logDebug("Cleared old metadata for time " + time)
}
def updateCheckpointData(time: Time) {
logInfo("Updating checkpoint data for time " + time)
this.synchronized {
outputStreams.foreach(_.updateCheckpointData(time))
}
logInfo("Updated checkpoint data for time " + time)
}
def clearCheckpointData(time: Time) {
logInfo("Clearing checkpoint data for time " + time)
this.synchronized {
outputStreams.foreach(_.clearCheckpointData(time))
}
logInfo("Cleared checkpoint data for time " + time)
}
def restoreCheckpointData() {
logInfo("Restoring checkpoint data")
this.synchronized {
outputStreams.foreach(_.restoreCheckpointData())
}
logInfo("Restored checkpoint data")
}
def validate() {
this.synchronized {
assert(batchDuration != null, "Batch duration has not been set")
//assert(batchDuration >= Milliseconds(100), "Batch duration of " + batchDuration +
// " is very low")
assert(getOutputStreams().size > 0, "No output streams registered, so nothing to execute")
}
}
@throws(classOf[IOException])
private def writeObject(oos: ObjectOutputStream) {
logDebug("DStreamGraph.writeObject used")
this.synchronized {
checkpointInProgress = true
logDebug("Enabled checkpoint mode")
oos.defaultWriteObject()
checkpointInProgress = false
logDebug("Disabled checkpoint mode")
}
}
@throws(classOf[IOException])
private def readObject(ois: ObjectInputStream) {
logDebug("DStreamGraph.readObject used")
this.synchronized {
checkpointInProgress = true
ois.defaultReadObject()
checkpointInProgress = false
}
}
}
| sryza/spark | streaming/src/main/scala/org/apache/spark/streaming/DStreamGraph.scala | Scala | apache-2.0 | 5,539 |
/*******************************************************************************
Copyright (c) 2012-2013, S-Core, KAIST.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
***************************************************************************** */
package kr.ac.kaist.jsaf.analysis.typing.domain
import scala.collection.immutable.HashMap
import kr.ac.kaist.jsaf.analysis.typing.AddressManager._
/**
* Location Property Set
* Loc : #1, #2, ...
* Set[String] : Property names
*/
case class LPSet(map: Map[Loc,Set[String]]) {
def + (pair: (Loc,String)) = {
LPSet(map.get(pair._1) match {
case Some(s) => map + (pair._1 -> (s + pair._2))
case None => map + (pair._1 -> Set(pair._2))
})
}
def ++ (lpset: LPSet) = {
LPSet(lpset.map.foldLeft(this.map)((m, kv) =>
m.get(kv._1) match {
case Some(s) => m + (kv._1 -> (s ++ kv._2))
case None => m + (kv._1 -> kv._2)
}))
}
def -- (lpset: LPSet) = {
val new_map =
lpset.map.foldLeft(this.map)((m, kv) =>
m.get(kv._1) match {
case Some(s) => m + (kv._1 -> (s -- kv._2))
case None => m
})
LPSet(new_map.foldLeft(new_map)((m, kv) => if (kv._2.isEmpty) m - kv._1 else m))
}
def get(l: Loc) = map.get(l)
def toSet = {
map.foldLeft[Set[(Loc,String)]](Set())((S, kv) => {
S ++ (kv._2.map((n) => (kv._1,n)))
})
}
def isEmpty: Boolean = toSet.isEmpty
def toLSet: LocSet = {
LocSetBot ++ map.keySet
}
def subsetOf(lpset: LPSet) = {
if (this.map.keySet.subsetOf(lpset.map.keySet))
this.map.foldLeft(true)((b, kv) =>
if (b && kv._2.subsetOf(lpset.map(kv._1))) true
else false)
else
false
}
def ppLoc(loc: Loc): String = {
val name = locName(loc)
if (isOldLoc(loc))
"##" + name
else
"#" + name
/*
loc._2 match {
case Recent => "#" + name
case Old => "##" + name
}*/
}
override def toString() = {
toSet.foldLeft("")((s, kv) => s + "{"+ppLoc(kv._1)+", "+kv._2+"}, ")
}
}
object LPSet {
def apply(pair: (Loc,String)): LPSet = {
LPSet(HashMap() + (pair._1 -> Set(pair._2)))
}
def apply(set: Set[(Loc,String)]): LPSet = {
LPSet(set.foldLeft[Map[Loc,Set[String]]](HashMap())((m, kv) =>
m.get(kv._1) match {
case Some(s) => m + (kv._1 -> (s + kv._2))
case None => m + (kv._1 -> Set(kv._2))
}))
}
}
| darkrsw/safe | src/main/scala/kr/ac/kaist/jsaf/analysis/typing/domain/LPSet.scala | Scala | bsd-3-clause | 2,521 |
package clasp.scripts
import scala.language.postfixOps
import scala.sys.process.stringToProcess
class Main extends App {
if ( args(1) == "deploy" ) {
val command = "echo \\"Funciona\\"";
var output: String = command!!;
}
}
| hamiltont/clasp | src/clasp/scripts/main.scala | Scala | mit | 237 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.kafka010
import java.io.{File, IOException}
import java.lang.{Integer => JInt}
import java.net.InetSocketAddress
import java.util.{Map => JMap, Properties}
import java.util.concurrent.{TimeoutException, TimeUnit}
import scala.annotation.tailrec
import scala.collection.JavaConverters._
import scala.util.control.NonFatal
import kafka.api.Request
import kafka.server.{KafkaConfig, KafkaServer}
import kafka.zk.{AdminZkClient, KafkaZkClient}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.network.ListenerName
import org.apache.kafka.common.serialization.StringSerializer
import org.apache.kafka.common.utils.{Time => KTime}
import org.apache.zookeeper.server.{NIOServerCnxnFactory, ZooKeeperServer}
import org.apache.spark.{SparkConf, SparkException}
import org.apache.spark.internal.Logging
import org.apache.spark.streaming.Time
import org.apache.spark.util.{ShutdownHookManager, Utils}
/**
* This is a helper class for Kafka test suites. This has the functionality to set up
* and tear down local Kafka servers, and to push data using Kafka producers.
*
* The reason to put Kafka test utility class in src is to test Python related Kafka APIs.
*/
private[kafka010] class KafkaTestUtils extends Logging {
// Zookeeper related configurations
private val zkHost = "127.0.0.1"
private var zkPort: Int = 0
private val zkConnectionTimeout = 60000
private val zkSessionTimeout = 10000
private var zookeeper: EmbeddedZookeeper = _
private var zkClient: KafkaZkClient = _
private var admClient: AdminZkClient = _
// Kafka broker related configurations
private val brokerHost = "127.0.0.1"
private var brokerPort = 0
private var brokerConf: KafkaConfig = _
// Kafka broker server
private var server: KafkaServer = _
// Kafka producer
private var producer: KafkaProducer[String, String] = _
// Flag to test whether the system is correctly started
private var zkReady = false
private var brokerReady = false
private var leakDetector: AnyRef = null
def zkAddress: String = {
assert(zkReady, "Zookeeper not setup yet or already torn down, cannot get zookeeper address")
s"$zkHost:$zkPort"
}
def brokerAddress: String = {
assert(brokerReady, "Kafka not setup yet or already torn down, cannot get broker address")
s"$brokerHost:$brokerPort"
}
def zookeeperClient: KafkaZkClient = {
assert(zkReady, "Zookeeper not setup yet or already torn down, cannot get zookeeper client")
Option(zkClient).getOrElse(
throw new IllegalStateException("Zookeeper client is not yet initialized"))
}
def adminClient: AdminZkClient = {
assert(zkReady, "Zookeeper not setup yet or already torn down, cannot get zookeeper client")
Option(admClient).getOrElse(
throw new IllegalStateException("Admin client is not yet initialized"))
}
// Set up the Embedded Zookeeper server and get the proper Zookeeper port
private def setupEmbeddedZookeeper(): Unit = {
// Zookeeper server startup
zookeeper = new EmbeddedZookeeper(s"$zkHost:$zkPort")
// Get the actual zookeeper binding port
zkPort = zookeeper.actualPort
zkClient = KafkaZkClient(s"$zkHost:$zkPort", isSecure = false, zkSessionTimeout,
zkConnectionTimeout, 1, KTime.SYSTEM)
admClient = new AdminZkClient(zkClient)
zkReady = true
}
// Set up the Embedded Kafka server
private def setupEmbeddedKafkaServer(): Unit = {
assert(zkReady, "Zookeeper should be set up beforehand")
// Kafka broker startup
Utils.startServiceOnPort(brokerPort, port => {
brokerPort = port
brokerConf = new KafkaConfig(brokerConfiguration, doLog = false)
server = new KafkaServer(brokerConf)
server.startup()
brokerPort = server.boundPort(new ListenerName("PLAINTEXT"))
(server, brokerPort)
}, new SparkConf(), "KafkaBroker")
brokerReady = true
}
/** setup the whole embedded servers, including Zookeeper and Kafka brokers */
def setup(): Unit = {
// Set up a KafkaTestUtils leak detector so that we can see where the leak KafkaTestUtils is
// created.
val exception = new SparkException("It was created at: ")
leakDetector = ShutdownHookManager.addShutdownHook { () =>
logError("Found a leak KafkaTestUtils.", exception)
}
setupEmbeddedZookeeper()
setupEmbeddedKafkaServer()
}
/** Teardown the whole servers, including Kafka broker and Zookeeper */
def teardown(): Unit = {
if (leakDetector != null) {
ShutdownHookManager.removeShutdownHook(leakDetector)
}
brokerReady = false
zkReady = false
if (producer != null) {
producer.close()
producer = null
}
if (server != null) {
server.shutdown()
server.awaitShutdown()
server = null
}
// On Windows, `logDirs` is left open even after Kafka server above is completely shut down
// in some cases. It leads to test failures on Windows if the directory deletion failure
// throws an exception.
brokerConf.logDirs.foreach { f =>
try {
Utils.deleteRecursively(new File(f))
} catch {
case e: IOException if Utils.isWindows =>
logWarning(e.getMessage)
}
}
if (zkClient != null) {
zkClient.close()
zkClient = null
}
if (zookeeper != null) {
zookeeper.shutdown()
zookeeper = null
}
}
/** Create a Kafka topic and wait until it is propagated to the whole cluster */
def createTopic(topic: String, partitions: Int, config: Properties): Unit = {
adminClient.createTopic(topic, partitions, 1, config)
// wait until metadata is propagated
(0 until partitions).foreach { p =>
waitUntilMetadataIsPropagated(topic, p)
}
}
/** Create a Kafka topic and wait until it is propagated to the whole cluster */
def createTopic(topic: String, partitions: Int): Unit = {
createTopic(topic, partitions, new Properties())
}
/** Create a Kafka topic and wait until it is propagated to the whole cluster */
def createTopic(topic: String): Unit = {
createTopic(topic, 1, new Properties())
}
/** Java-friendly function for sending messages to the Kafka broker */
def sendMessages(topic: String, messageToFreq: JMap[String, JInt]): Unit = {
sendMessages(topic, Map(messageToFreq.asScala.mapValues(_.intValue()).toSeq: _*))
}
/** Send the messages to the Kafka broker */
def sendMessages(topic: String, messageToFreq: Map[String, Int]): Unit = {
val messages = messageToFreq.flatMap { case (s, freq) => Seq.fill(freq)(s) }.toArray
sendMessages(topic, messages)
}
/** Send the array of messages to the Kafka broker */
def sendMessages(topic: String, messages: Array[String]): Unit = {
producer = new KafkaProducer[String, String](producerConfiguration)
messages.foreach { message =>
producer.send(new ProducerRecord[String, String](topic, message))
}
producer.close()
producer = null
}
/** Send the array of (key, value) messages to the Kafka broker */
def sendMessages(topic: String, messages: Array[(String, String)]): Unit = {
producer = new KafkaProducer[String, String](producerConfiguration)
messages.foreach { message =>
producer.send(new ProducerRecord[String, String](topic, message._1, message._2))
}
producer.close()
producer = null
}
val brokerLogDir = Utils.createTempDir().getAbsolutePath
private def brokerConfiguration: Properties = {
val props = new Properties()
props.put("broker.id", "0")
props.put("host.name", "127.0.0.1")
props.put("advertised.host.name", "127.0.0.1")
props.put("port", brokerPort.toString)
props.put("log.dir", brokerLogDir)
props.put("zookeeper.connect", zkAddress)
props.put("zookeeper.connection.timeout.ms", "60000")
props.put("log.flush.interval.messages", "1")
props.put("replica.socket.timeout.ms", "1500")
props.put("delete.topic.enable", "true")
props.put("offsets.topic.num.partitions", "1")
props.put("offsets.topic.replication.factor", "1")
props.put("group.initial.rebalance.delay.ms", "10")
props
}
private def producerConfiguration: Properties = {
val props = new Properties()
props.put("bootstrap.servers", brokerAddress)
props.put("value.serializer", classOf[StringSerializer].getName)
// Key serializer is required.
props.put("key.serializer", classOf[StringSerializer].getName)
// wait for all in-sync replicas to ack sends
props.put("acks", "all")
props
}
// A simplified version of scalatest eventually, rewritten here to avoid adding extra test
// dependency
def eventually[T](timeout: Time, interval: Time)(func: => T): T = {
def makeAttempt(): Either[Throwable, T] = {
try {
Right(func)
} catch {
case e if NonFatal(e) => Left(e)
}
}
val startTimeNs = System.nanoTime()
@tailrec
def tryAgain(attempt: Int): T = {
makeAttempt() match {
case Right(result) => result
case Left(e) =>
val durationMs = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTimeNs)
if (durationMs < timeout.milliseconds) {
Thread.sleep(interval.milliseconds)
} else {
throw new TimeoutException(e.getMessage)
}
tryAgain(attempt + 1)
}
}
tryAgain(1)
}
private def waitUntilMetadataIsPropagated(topic: String, partition: Int): Unit = {
def isPropagated = server.dataPlaneRequestProcessor.metadataCache
.getPartitionInfo(topic, partition) match {
case Some(partitionState) =>
val leader = partitionState.leader
val isr = partitionState.isr
zkClient.getLeaderForPartition(new TopicPartition(topic, partition)).isDefined &&
Request.isValidBrokerId(leader) && !isr.isEmpty
case _ =>
false
}
eventually(Time(10000), Time(100)) {
assert(isPropagated, s"Partition [$topic, $partition] metadata not propagated after timeout")
}
}
private class EmbeddedZookeeper(val zkConnect: String) {
val snapshotDir = Utils.createTempDir()
val logDir = Utils.createTempDir()
val zookeeper = new ZooKeeperServer(snapshotDir, logDir, 500)
val (ip, port) = {
val splits = zkConnect.split(":")
(splits(0), splits(1).toInt)
}
val factory = new NIOServerCnxnFactory()
factory.configure(new InetSocketAddress(ip, port), 16)
factory.startup(zookeeper)
val actualPort = factory.getLocalPort
def shutdown(): Unit = {
factory.shutdown()
// The directories are not closed even if the ZooKeeper server is shut down.
// Please see ZOOKEEPER-1844, which is fixed in 3.4.6+. It leads to test failures
// on Windows if the directory deletion failure throws an exception.
try {
Utils.deleteRecursively(snapshotDir)
} catch {
case e: IOException if Utils.isWindows =>
logWarning(e.getMessage)
}
try {
Utils.deleteRecursively(logDir)
} catch {
case e: IOException if Utils.isWindows =>
logWarning(e.getMessage)
}
}
}
}
| maropu/spark | external/kafka-0-10/src/test/scala/org/apache/spark/streaming/kafka010/KafkaTestUtils.scala | Scala | apache-2.0 | 12,099 |
package fastparsers.tools
/**
* Created by Eric on 10.04.14.
*/
import java.util
import scala.collection.mutable.ArrayBuffer
import scala.util.parsing.input._
/**
* Create a position object for an input
*/
object ToPosition {
trait ToPosition[T] {
def get(offset: Int): Position
}
type IndexedCharSeq[T] = {
def apply(n: Int):Char
def size: Int
def slice(start: Int, end: Int): T
}
/**
* TODOD change...
*/
implicit class StringProxy(s: String) {
def apply(n: Int):Char = s.charAt(n)
def size: Int = s.length
def slice(start: Int, end: Int): String = s.substring(start,end)
}
implicit class CharArrayProxy(s: Array[Char]) {
def apply(n: Int):Char = s(n)
def size: Int = s.length
def slice(start: Int, end: Int): Array[Char] = util.Arrays.copyOfRange(s, start, start + end);
}
/*
Almost copy pasted from scala.util.parsing.fastparsers.input.OffsetPosition @ https://github.com/scala/scala/blob/v2.10.2/src/library/scala/util/parsing/fastparsers.input/OffsetPosition.scala
*/
class IndexedCharSeqToPosition[T](input: IndexedCharSeq[T]) extends ToPosition[IndexedCharSeq[T]] {
val index = new ArrayBuffer[Int]()
index += 0
def computeIndexTill(pos: Int) {
def compute(from: Int, to: Int) = {
for (i <- from until to)
if (input(i) == '\\n')
index += (i + 1)
if (pos >= input.size)
index += input.size
}
if (pos > index.last && index.last < input.size)
compute(index.last + 1,Math.min(pos,input.size))
}
def lineFromOffset(pos: Int) = {
computeIndexTill(pos)
var lo = 0
var hi = index.length - 1
while (lo + 1 < hi) {
val mid = (hi + lo) / 2
if (pos < index(mid)) hi = mid
else lo = mid
}
lo + 1
}
def get(offset: Int) = {
val l = lineFromOffset(offset)
new Position {
override protected def lineContents = input.slice(index(line - 1), index(line)).toString //TODO change that
override def line = l
override def column = offset - index(lineFromOffset(l) - 1) + 1
}
}
}
} | begeric/FastParsers | FastParsers/src/main/scala/fastparsers/tools/ToPosition.scala | Scala | mit | 2,187 |
package no.skytteren.elasticala.index
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.concurrent.Promise
import org.elasticsearch.action.ActionListener
import org.elasticsearch.action.admin.indices.flush.FlushRequestBuilder
import org.elasticsearch.action.admin.indices.flush.FlushResponse
import org.elasticsearch.client.{ Client => EsClient }
import no.skytteren.elasticala.Executor
import no.skytteren.elasticala.Index
import no.skytteren.elasticala.Request
import no.skytteren.elasticala.Response
import org.elasticsearch.action.ShardOperationFailedException
case class FlushIndexRequest(index: Index) extends Request
case class FlushIndexResponse(totalShards: Int, successfulShards: Int, failedShards: Int,
shardFailures: List[ShardOperationFailedException]) extends Response{
val flushed = successfulShards == totalShards
}
class FlushIndexExecutor extends Executor[FlushIndexRequest, FlushIndexResponse] {
def execute(req: FlushIndexRequest, client: EsClient)(implicit ec: ExecutionContext): Future[FlushIndexResponse] = {
val promise = Promise[FlushIndexResponse]()
val listener = new ActionListener[FlushResponse]{
override def onResponse(response: FlushResponse): Unit = {
promise.success(FlushIndexResponse(response.getTotalShards, response.getSuccessfulShards,
response.getFailedShards, response.getShardFailures.toList))
}
def onFailure(e: Throwable): Unit = promise.failure(e)
}
client.admin().indices().prepareFlush(req.index.value).execute(listener)
promise.future
}
} | skytteren/elasticala | src/main/scala/no/skytteren/elasticala/index/Flush.scala | Scala | apache-2.0 | 1,594 |
package org.romeo.loveletter.game
import scalaz.State
import Game._
object Deck {
val cards: Seq[Card] = Seq.fill(5)(Guard) ++
Seq.fill(2)(Priest) ++
Seq.fill(2)(Baron) ++
Seq.fill(2)(Handmaid) ++
Seq.fill(2)(Prince) ++
Seq.fill(1)(King) ++
Seq.fill(1)(Countess) ++
Seq.fill(1)(Princess)
def getCardByName(name: String): Option[Card] = {
cards.distinct.find(card => card.name.equalsIgnoreCase(name) || card.value.toString == name)
}
def isCardName(name: String): Boolean = getCardByName(name).isDefined
}
trait Card {
val value: Int
val name: String
val description: String
val requiresTarget: Boolean
val requiresGuess: Boolean
def doAction(discarder: Player, targetName: Option[String], guess: Option[Card]): State[Game, Either[String, Message]]
override def toString: String =
s"""$value: $name
|$description""".stripMargin
}
object Card {
implicit def orderingByValue[A <: Card]: Ordering[A] =
Ordering.by(a => a.value)
}
case object Guard extends Card {
val value = 1
val name = "Guard"
val description = "Name a non-Guard card and choose another player. If that player has that card, he or she is out of the round."
val requiresTarget: Boolean = true
val requiresGuess: Boolean = true
override def doAction(discarder: Player, targetName: Option[String], guess: Option[Card]): State[Game, Either[String, Message]] = {
if (targetName.isEmpty || guess.isEmpty) {
return Game.isEveryoneElseProtectedOrEliminated.map(if (_) {
Right(s"Everyone is safe, $name discarded with no effect")
} else {
Left(s"A target and guess must be specified")
})
}
if (guess.get == Guard) {
State.state(Left("You can't guess Guard"))
} else {
Game.getPlayer(targetName.get).flatMap(pOption => {
pOption.map(p =>
if (p.isProtected) {
State.state[Game, Either[String, Message]](Left(s"${p.name} is protected"))
} else if (p.isEliminated) {
State.state[Game, Either[String, Message]](Left(s"${p.name} isn't in the match"))
} else if (p.name == discarder.name) {
State.state[Game, Either[String, Message]](Left(s"You can't target yourself with Guard"))
} else if (p.hand.contains(guess.get)) {
Game.eliminatePlayer(p.name, isEliminated = true).map(_ => Right(s"You're right! ${p.name} is out"): Either[String, Message])
} else {
State.state[Game, Either[String, Message]](Right(s"${p.name} does not have a ${guess.get.name}"))
}
).getOrElse(State.state[Game, Either[String, Message]](Left(s"${targetName.get} isn't in the game!")))
})
}
}
}
case object Priest extends Card {
val value = 2
val name = "Priest"
val description = "Look at another player's hand."
val requiresTarget: Boolean = true
val requiresGuess: Boolean = false
override def doAction(discarder: Player, targetName: Option[String], guess: Option[Card] = None): State[Game, Either[String, Message]] = {
if (targetName.isEmpty) {
return Game.isEveryoneElseProtectedOrEliminated.map(if (_) {
Right(s"Everyone is safe, $name discarded with no effect")
} else {
Left(s"A target must be specified")
})
}
Game.getPlayer(targetName.get).flatMap(pOption => {
pOption.map(p =>
if (p.isProtected) {
State.state[Game, Either[String, Message]](Left(s"${p.name} is protected"))
} else if (p.isEliminated) {
State.state[Game, Either[String, Message]](Left(s"${p.name} isn't in the match"))
} else if (p.name == discarder.name) {
State.state[Game, Either[String, Message]](Left(s"You can't target yourself with Priest"))
} else {
State.state[Game, Either[String, Message]](Right(Private(s"${p.name} has a ${p.hand.head}")))
}
).getOrElse(State.state[Game, Either[String, Message]](Left(s"${targetName.get} isn't in the game!")))
})
}
}
case object Baron extends Card {
val value = 3
val name = "Baron"
val description = "You and another player secretly compare hands. The player with the lower value is out of the round."
val requiresTarget: Boolean = true
val requiresGuess: Boolean = false
override def doAction(discarder: Player, targetName: Option[String], guess: Option[Card] = None): State[Game, Either[String, Message]] = {
if (targetName.isEmpty) {
return Game.isEveryoneElseProtectedOrEliminated.map(if (_) {
Right(s"Everyone is safe, $name discarded with no effect")
} else {
Left(s"A target must be specified")
})
}
Game.getPlayer(targetName.get).flatMap(pOption => {
pOption.map(p => {
if (p.isProtected) {
State.state[Game, Either[String, Message]](Left(s"${p.name} is protected"))
} else if (p.isEliminated) {
State.state[Game, Either[String, Message]](Left(s"${p.name} isn't in the match"))
} else if (p.name == discarder.name) {
State.state[Game, Either[String, Message]](Left(s"You can't target yourself with Baron"))
} else {
val playerCard = discarder.hand.diff(Seq(Baron)).head
//discard hasn't been processed yet, so remove the baron for the comparison
val targetCard = p.hand.head
if (targetCard.value > playerCard.value) {
Game.eliminatePlayer(discarder.name, isEliminated = true).map(_ => Right(s"${discarder.name} has been eliminated and discards a ${playerCard.name}"): Either[String, Message])
} else if (targetCard.value < playerCard.value) {
Game.eliminatePlayer(p.name, isEliminated = true).map(_ => Right(s"${p.name} has been eliminated and discards a ${targetCard.name}"): Either[String, Message])
} else {
State.state[Game, Either[String, Message]](Right("It is a tie. No one is eliminated"))
}
}
}).getOrElse(State.state[Game, Either[String, Message]](Left(s"${targetName.get} isn't in the game!")))
})
}
}
case object Handmaid extends Card {
val value = 4
val name = "Handmaid"
val description = "Until your next turn, ignore all effects from other player's cards."
val requiresTarget: Boolean = false
val requiresGuess: Boolean = false
override def doAction(discarder: Player, targetName: Option[String] = None, guess: Option[Card] = None): State[Game, Either[String, Message]] = {
Game.protectPlayer(discarder.name, isProtected = true).map(_ => Right(s"${discarder.name} is protected")) //HAMMAID
}
}
case object Prince extends Card {
val value = 5
val name = "Prince"
val description = "Choose any player (including yourself) to discard his or her hand and draw a new card."
val requiresTarget: Boolean = true
val requiresGuess: Boolean = false
val privateResponse: Boolean = false
override def doAction(discarder: Player, targetName: Option[String], guess: Option[Card] = None): State[Game, Either[String, Message]] = {
if (targetName.isEmpty) {
return State.state[Game, Either[String, Message]](Left(s"A target must be specified"))
}
if (discarder.hand.contains(Countess)) {
return State.state[Game, Either[String, Message]](Left(s"Can't discard $name with Countess"))
}
Game.getPlayer(targetName.get).flatMap(pOption => {
pOption.map(p =>
if (p.isProtected) {
State.state[Game, Either[String, Message]](Left(s"${p.name} is protected"))
} else if (p.isEliminated) {
State.state[Game, Either[String, Message]](Left(s"${p.name} isn't in the match"))
} else {
//if you call this on yourself, you still have a prince in your hand, so remove that
val cardToDiscard = (if (p.hand.length > 1) p.hand.diff(Seq(Prince)) else p.hand).head
def discardThenDraw: State[Game, Either[String, Message]] = for {
discard <- Game.playerDiscard(p.name, cardToDiscard)
_ <- Game.drawFromDeckOrBurnCard(p.name)
} yield Right(s"${p.name} forced to discard a ${discard.head.name}")
def discardPrincess: State[Game, Either[String, Message]] = for {
discard <- Game.playerDiscard(p.name, cardToDiscard)
_ <- Game.eliminatePlayer(p.name, isEliminated = true)
} yield Right(s"${p.name} forced to discard a ${discard.head}. ${p.name} is eliminated")
if (Princess == cardToDiscard) discardPrincess else discardThenDraw
}
).getOrElse(State.state[Game, Either[String, Message]](Left(s"${targetName.get} isn't in the game!")))
})
}
}
case object King extends Card {
val value = 6
val name = "King"
val description = "Trade hands with another player of your choice."
val requiresTarget: Boolean = true
val requiresGuess: Boolean = false
override def doAction(discarder: Player, targetName: Option[String], guess: Option[Card] = None): State[Game, Either[String, Message]] = {
if (targetName.isEmpty) {
return Game.isEveryoneElseProtectedOrEliminated.map {
case true => Right(s"Everyone is safe, $name discarded with no effect")
case false => Left(s"A target must be specified")
}
}
if (discarder.hand.contains(Countess)) {
return State.state(Left(s"Can't discard $name with Countess")): State[Game, Either[String, Message]]
}
Game.getPlayer(targetName.get).flatMap(pOption => {
pOption.map(p => {
if (p.isProtected) {
State.state[Game, Either[String, Message]](Left(s"${p.name} is protected"))
} else if (p.isEliminated) {
State.state[Game, Either[String, Message]](Left(s"${p.name} isn't in the match"))
} else if (p.name == discarder.name) {
State.state[Game, Either[String, Message]](Left(s"You can't target yourself with King"))
} else {
val playerCard = discarder.hand.diff(Seq(King)).head
//discard hasn't been processed yet, so remove the king for the comparison
val targetCard = p.hand.head
for {
_ <- Game.updatePlayer(Some(p.copy(hand = Seq(playerCard))))
_ <- Game.updatePlayer(Some(discarder.copy(hand = Seq(King, targetCard))))
} yield Right(s"${discarder.name} switched hands with ${p.name}"): Either[String, Message]
}
}).getOrElse(State.state[Game, Either[String, Message]](Left(s"${targetName.get} isn't in the game!"): Either[String, Message]))
})
}
}
case object Countess extends Card {
val value = 7
val name = "Countess"
val description = "If you have this card and the King or Prince in your hand, you must discard this card."
val requiresTarget: Boolean = false
val requiresGuess: Boolean = false
override def doAction(discarder: Player, targetName: Option[String] = None, guess: Option[Card] = None): State[Game, Either[String, Message]] = {
State.state(Right("You discarded the Countess"))
}
}
case object Princess extends Card {
val value = 8
val name = "Princess"
val description = "If you discard this card, you are out of the round."
val requiresTarget: Boolean = false
val requiresGuess: Boolean = false
override def doAction(discarder: Player, targetName: Option[String] = None, guess: Option[Card] = None): State[Game, Either[String, Message]] = {
Game.eliminatePlayer(discarder.name, isEliminated = true).map(_ => Right(s"${discarder.name} discarded a $name and is eliminated"): Either[String, Message])
}
}
| tylerjromeo/love-letter-slack-commands | src/main/scala-2.11/org/romeo/loveletter/game/Deck.scala | Scala | mit | 11,519 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import java.util.concurrent.atomic.AtomicReference
import MessageRecorder.RecordedMessageEventFun
import MessageRecorder.ConcurrentMessageFiringFun
import org.scalatest.events.Location
import org.scalatest.Suite.getLineInFile
import org.scalatest.events.Event
import org.scalatest.events.RecordableEvent
import org.scalatest.events.NoteProvided
import org.scalatest.events.AlertProvided
import org.scalatest.events.NotificationEvent
/*
This is used by Suite and test informers created as tests run, which therefore have
populated defined NameInfos. These informers are returned by info in FunSuite and Spec,
or passed to test methods that take an Informer in Suite, for example. If called by the
thread that constructed them, which is the thread that was executing the suite and the tests
inside the suite, then that NameInfo should be propagated. However, if a test starts other
threads for a multi-threaded test, and those threads apply the Informer, then the NameInfo
should *not* be propagated, because otherwise it could become very confusing to figure out
what came from where in the report. Threads started by the test could outlast the thread
that was running the test, for example. There will be a thread-name, so they can use that
to figure out who sent what. And threads that call these informers will share a Tracker with
the thread that was running the tests, so they should be ordered close together after
sorting by Ordinal. But that's it. NameInfo only goes out when the thread running a test
or suite applies the Informer.
This in turn means that a reporter may get hit by multiple threads sending InfoProvided
messages. If run with the Runner, that will be OK, because DispatchReporter will be in front
serializing events with its actor. If run() is invoked directly on a suite instance, such as
from the Scala interpreter, then it may not work. I think I may just say that when running
from the interpreter, say with run(), you may get interleaved output. This would only happen
when doing a multi-threaded test that starts threads that calls informer methods, likely a
rare case. Also, in that case I think it is reasonable to say you may get interleaved output
in the interpreter, so if you don't like that, use the Runner.
*/
private[scalatest] abstract class ThreadAwareness {
private final val atomic = new AtomicReference[Thread](Thread.currentThread)
def isConstructingThread: Boolean = {
val constructingThread = atomic.get
Thread.currentThread == constructingThread
}
}
/*
private[scalatest] class ConcurrentMessageSender(fire: ConcurrentMessageFiringFun) extends ThreadAwareness {
/*
def apply(message: String) {
if (message == null)
throw new NullPointerException("message was null")
fire(message, None, isConstructingThread, getLineInFile(Thread.currentThread.getStackTrace, 2)) // Fire the info provided event using the passed function
}
*/
def apply(message: String, payload: Option[Any] = None) {
if (message == null)
throw new NullPointerException
if (payload == null)
throw new NullPointerException
fire(message, payload, isConstructingThread, getLineInFile(Thread.currentThread.getStackTrace, 2))
}
}
*/
private[scalatest] class ConcurrentInformer(fire: ConcurrentMessageFiringFun) extends ThreadAwareness with Informer {
def apply(message: String, payload: Option[Any] = None) = {
if (message == null)
throw new NullPointerException
if (payload == null)
throw new NullPointerException
fire(message, payload, isConstructingThread, getLineInFile(Thread.currentThread.getStackTrace, 2))
}
}
private[scalatest] object ConcurrentInformer {
def apply(fire: (String, Option[Any], Boolean, Option[Location]) => Unit) = new ConcurrentInformer(fire)
}
private[scalatest] class ConcurrentNotifier(fire: ConcurrentMessageFiringFun) extends ThreadAwareness with Notifier {
def apply(message: String, payload: Option[Any] = None) = {
if (message == null)
throw new NullPointerException
if (payload == null)
throw new NullPointerException
fire(message, payload, isConstructingThread, getLineInFile(Thread.currentThread.getStackTrace, 2))
}
}
private[scalatest] object ConcurrentNotifier {
def apply(fire: (String, Option[Any], Boolean, Option[Location]) => Unit) = new ConcurrentNotifier(fire)
}
private[scalatest] class ConcurrentAlerter(fire: ConcurrentMessageFiringFun) extends ThreadAwareness with Alerter {
def apply(message: String, payload: Option[Any] = None) = {
if (message == null)
throw new NullPointerException
if (payload == null)
throw new NullPointerException
fire(message, payload, isConstructingThread, getLineInFile(Thread.currentThread.getStackTrace, 2))
}
}
private[scalatest] object ConcurrentAlerter {
def apply(fire: (String, Option[Any], Boolean, Option[Location]) => Unit) = new ConcurrentAlerter(fire)
}
private[scalatest] class ConcurrentDocumenter(fire: ConcurrentMessageFiringFun) extends ThreadAwareness with Documenter {
def apply(text: String) = {
if (text == null)
throw new NullPointerException("text was null")
fire(text, None, isConstructingThread, getLineInFile(Thread.currentThread.getStackTrace, 2)) // Fire the info provided event using the passed function
}
}
private[scalatest] object ConcurrentDocumenter {
def apply(fire: (String, Option[Any], Boolean, Option[Location]) => Unit) = new ConcurrentDocumenter(fire)
}
//
// Three params of function are the string message, a boolean indicating this was from the current thread, and
// the last one is an optional boolean that indicates the message is about a pending test, in which case it would
// be printed out in yellow.
//
// This kind of informer is only used during the execution of tests, to delay the printing out of info's fired
// during tests until after the test succeeded, failed, or pending gets sent out.
//
private[scalatest] class MessageRecorder(dispatch: Reporter) extends ThreadAwareness {
private var messages: List[(String, Option[Any], RecordedMessageEventFun, Option[Location])] = List.empty
// Should only be called by the thread that constructed this
// ConcurrentInformer, because don't want to worry about synchronization here. Just send stuff from
// other threads whenever thConcurrentInformerey come in. So only call record after first checking isConstructingThread
private def record(message: String, payload: Option[Any], eventFun: RecordedMessageEventFun, location: Option[Location]) {
require(isConstructingThread)
messages ::= (message, payload, eventFun, location)
}
// Returns them in order recorded
private def recordedMessages: List[(String, Option[Any], RecordedMessageEventFun, Option[Location])] = messages.reverse
def apply(message: String, payload: Option[Any], eventFun: RecordedMessageEventFun, location: Option[Location]) {
if (message == null)
throw new NullPointerException
if (payload == null)
throw new NullPointerException
if (isConstructingThread)
record(message, payload, eventFun, location)
else
dispatch(eventFun(message, payload, false, false, false, location)) // Fire the info provided event using the passed function
}
def recordedEvents(testWasPending: Boolean, testWasCanceled: Boolean): collection.immutable.IndexedSeq[RecordableEvent] = {
Vector.empty ++ recordedMessages.map { case (message, payload, eventFun, location) =>
eventFun(message, payload, true, testWasPending, testWasCanceled, location)
}
}
}
private[scalatest] class MessageRecordingInformer(recorder: MessageRecorder, eventFun: RecordedMessageEventFun) extends Informer {
def apply(message: String, payload: Option[Any]) {
recorder.apply(message, payload, eventFun, getLineInFile(Thread.currentThread.getStackTrace, 2))
}
}
private[scalatest] object MessageRecordingInformer {
def apply(recorder: MessageRecorder, eventFun: RecordedMessageEventFun) = new MessageRecordingInformer(recorder, eventFun)
}
private[scalatest] class MessageRecordingDocumenter(recorder: MessageRecorder, eventFun: RecordedMessageEventFun) extends Documenter {
def apply(message: String) {
recorder.apply(message, None, eventFun, getLineInFile(Thread.currentThread.getStackTrace, 2))
}
}
private[scalatest] object MessageRecordingDocumenter {
def apply(recorder: MessageRecorder, eventFun: RecordedMessageEventFun) = new MessageRecordingDocumenter(recorder, eventFun)
}
private[scalatest] object MessageRecorder {
// Three params of function are the string message, a boolean indicating this was from the current
// thread, two booleans that indicate the message is about a pending or canceled
// test (in which case it would be printed out in yellow) and an optional location.
type RecordedMessageEventFun = (String, Option[Any], Boolean, Boolean, Boolean, Option[Location]) => RecordableEvent
// First two params of function are the string message and a boolean indicating this was from the current thread,
// and an optional location.
type ConcurrentMessageFiringFun = (String, Option[Any], Boolean, Option[Location]) => Unit
}
// For path traits, need a message recording informer that only later gets
// (theSuite: Suite, report: Reporter, tracker: Tracker, testName: String, theTest: TestLeaf, includeIcon: Boolean. thread: Thread)
private[scalatest] class PathMessageRecordingInformer(eventFun: (String, Option[Any], Boolean, Boolean, Suite, Reporter, Tracker, String, Int, Boolean, Thread) => RecordableEvent) extends ThreadAwareness with Informer {
import scala.collection.mutable.SynchronizedBuffer
import scala.collection.mutable.ArrayBuffer
type Tup = (String, Option[Any], Thread, Boolean)
private val messages = new ArrayBuffer[Tup] with SynchronizedBuffer[Tup]
// Should only be called by the thread that constructed this
// ConcurrentInformer, because don't want to worry about synchronization here. Just send stuff from
// other threads whenever they come in. So only call record after first checking isConstructingThread
// So now do have to worry about concurrency
private def record(message: String, payload: Option[Any]) {
messages += ((message, payload, Thread.currentThread, isConstructingThread))
}
def apply(message: String, payload: Option[Any] = None) {
if (message == null)
throw new NullPointerException
if (payload == null)
throw new NullPointerException
record(message, payload) // have to record all because of eager execution of tests in path traits
}
def recordedEvents(testWasPending: Boolean, theSuite: Suite, report: Reporter, tracker: Tracker, testName: String, indentation: Int, includeIcon: Boolean): collection.immutable.IndexedSeq[RecordableEvent] = {
Vector.empty ++ messages.map { case (message, payload, thread, wasConstructingThread) =>
eventFun(message, payload, wasConstructingThread, testWasPending, theSuite, report, tracker, testName, indentation, includeIcon, thread)
}
}
}
private[scalatest] object PathMessageRecordingInformer {
def apply(eventFun: (String, Option[Any], Boolean, Boolean, Suite, Reporter, Tracker, String, Int, Boolean, Thread) => RecordableEvent) = new PathMessageRecordingInformer(eventFun)
}
private[scalatest] class PathMessageRecordingNotifier(eventFun: (String, Option[Any], Boolean, Boolean, Suite, Reporter, Tracker, String, Int, Boolean, Thread) => NoteProvided) extends ThreadAwareness with Notifier {
import scala.collection.mutable.SynchronizedBuffer
import scala.collection.mutable.ArrayBuffer
type Tup = (String, Option[Any], Thread, Boolean)
private val messages = new ArrayBuffer[Tup] with SynchronizedBuffer[Tup]
// Should only be called by the thread that constructed this
// ConcurrentNotifier, because don't want to worry about synchronization here. Just send stuff from
// other threads whenever they come in. So only call record after first checking isConstructingThread
// So now do have to worry about concurrency
private def record(message: String, payload: Option[Any]) {
messages += ((message, payload, Thread.currentThread, isConstructingThread))
}
def apply(message: String, payload: Option[Any] = None) {
if (message == null)
throw new NullPointerException
if (payload == null)
throw new NullPointerException
record(message, payload) // have to record all because of eager execution of tests in path traits
}
def recordedEvents(testWasPending: Boolean, theSuite: Suite, report: Reporter, tracker: Tracker, testName: String, indentation: Int, includeIcon: Boolean): collection.immutable.IndexedSeq[NotificationEvent] = {
Vector.empty ++ messages.map { case (message, payload, thread, wasConstructingThread) =>
eventFun(message, payload, wasConstructingThread, testWasPending, theSuite, report, tracker, testName, indentation, includeIcon, thread)
}
}
}
private[scalatest] object PathMessageRecordingNotifier {
def apply(eventFun: (String, Option[Any], Boolean, Boolean, Suite, Reporter, Tracker, String, Int, Boolean, Thread) => NoteProvided) = new PathMessageRecordingNotifier(eventFun)
}
private[scalatest] class PathMessageRecordingAlerter(eventFun: (String, Option[Any], Boolean, Boolean, Suite, Reporter, Tracker, String, Int, Boolean, Thread) => AlertProvided) extends ThreadAwareness with Alerter {
import scala.collection.mutable.SynchronizedBuffer
import scala.collection.mutable.ArrayBuffer
type Tup = (String, Option[Any], Thread, Boolean)
private val messages = new ArrayBuffer[Tup] with SynchronizedBuffer[Tup]
// Should only be called by the thread that constructed this
// ConcurrentAlerter, because don't want to worry about synchronization here. Just send stuff from
// other threads whenever they come in. So only call record after first checking isConstructingThread
// So now do have to worry about concurrency
private def record(message: String, payload: Option[Any]) {
messages += ((message, payload, Thread.currentThread, isConstructingThread))
}
def apply(message: String, payload: Option[Any] = None) {
if (message == null)
throw new NullPointerException
if (payload == null)
throw new NullPointerException
record(message, payload) // have to record all because of eager execution of tests in path traits
}
def recordedEvents(testWasPending: Boolean, theSuite: Suite, report: Reporter, tracker: Tracker, testName: String, indentation: Int, includeIcon: Boolean): collection.immutable.IndexedSeq[NotificationEvent] = {
Vector.empty ++ messages.map { case (message, payload, thread, wasConstructingThread) =>
eventFun(message, payload, wasConstructingThread, testWasPending, theSuite, report, tracker, testName, indentation, includeIcon, thread)
}
}
}
private[scalatest] object PathMessageRecordingAlerter {
def apply(eventFun: (String, Option[Any], Boolean, Boolean, Suite, Reporter, Tracker, String, Int, Boolean, Thread) => AlertProvided) = new PathMessageRecordingAlerter(eventFun)
}
private[scalatest] class PathMessageRecordingDocumenter(eventFun: (String, Boolean, Boolean, Suite, Reporter, Tracker, String, Int, Boolean, Thread) => RecordableEvent) extends ThreadAwareness with Documenter {
import scala.collection.mutable.SynchronizedBuffer
import scala.collection.mutable.ArrayBuffer
type Tup = (String, Thread, Boolean)
private val messages = new ArrayBuffer[Tup] with SynchronizedBuffer[Tup]
// Should only be called by the thread that constructed this
// ConcurrentDocumenter, because don't want to worry about synchronization here. Just send stuff from
// other threads whenever they come in. So only call record after first checking isConstructingThread
// So now do have to worry about concurrency
private def record(message: String) {
messages += ((message, Thread.currentThread, isConstructingThread))
}
def apply(message: String) {
if (message == null)
throw new NullPointerException
record(message) // have to record all because of eager execution of tests in path traits
}
def recordedEvents(testWasPending: Boolean, theSuite: Suite, report: Reporter, tracker: Tracker, testName: String, indentation: Int, includeIcon: Boolean): collection.immutable.IndexedSeq[RecordableEvent] = {
Vector.empty ++ messages.map { case (message, thread, wasConstructingThread) =>
eventFun(message, wasConstructingThread, testWasPending, theSuite, report, tracker, testName, indentation, includeIcon, thread)
}
}
}
private[scalatest] object PathMessageRecordingDocumenter {
def apply(eventFun: (String, Boolean, Boolean, Suite, Reporter, Tracker, String, Int, Boolean, Thread) => RecordableEvent) = new PathMessageRecordingDocumenter(eventFun)
}
| SRGOM/scalatest | scalatest/src/main/scala/org/scalatest/ConcurrentInformer.scala | Scala | apache-2.0 | 17,464 |
package qrhl.toplevel
import hashedcomputation.{Hash, HashTag, Hashable}
import qrhl.{State, SubgoalSelector}
import java.io.PrintWriter
import hashedcomputation.Implicits._
case class FocusCommand(selector: Option[SubgoalSelector], label: String) extends Command {
override protected def act(state: State, output: PrintWriter): State = {
state.focusOrUnfocus(selector, label)
}
override def hash: Hash[FocusCommand.this.type] = HashTag()(Hashable.hash(selector), Hashable.hash(label))
}
| dominique-unruh/qrhl-tool | src/main/scala/qrhl/toplevel/FocusCommand.scala | Scala | mit | 501 |
package org.genericConfig.admin.client.models
import org.genericConfig.admin.client.controllers.websocket.WebSocketListner
import org.genericConfig.admin.client.views.html.HtmlElementIds
import org.genericConfig.admin.client.views.user.{UpdateUserPage, UserPage}
import org.genericConfig.admin.shared.Actions
import org.genericConfig.admin.shared.config.{ConfigDTO, ConfigParamsDTO}
import org.genericConfig.admin.shared.user.{UserDTO, UserParamsDTO, UserUpdateDTO}
import org.scalajs.jquery.jQuery
import play.api.libs.json.Json
/**
* Copyright (C) 2016 Gennadi Heimann [email protected]
*
* Created by Gennadi Heimann 10.04.2020
*/
class User {
def showUser(param: Option[Any]): Unit = {
// new UserPage().drawUserPage(param.get.asInstanceOf[UserDTO])
new UserPage().drawUserWithConfigPage(userDTO = Some(param.get.asInstanceOf[UserDTO]))
}
def showUpdateUserPage(param : Option[Any]) : Unit = {
new UpdateUserPage().drawUpdateUserPage(param.get.asInstanceOf[UserDTO])
}
def updateUsername(param : Option[Any]): Unit = {
val updateUsername = Json.toJson(
UserDTO(
action = Actions.UPDATE_USER,
params = Some(UserParamsDTO(
username = "",
password = "",
update = Some(UserUpdateDTO(
oldUsername = param.get.asInstanceOf[UserDTO].result.get.username.get,
newUsername = jQuery(HtmlElementIds.inputFieldUpdateUsernameJQuery).value().toString,
oldPassword = "",
newPassword = ""
)),
)),
result = None
)
).toString
println("OUT -> " + updateUsername)
WebSocketListner.webSocket.send(updateUsername)
}
def deleteUser(param : Option[Any]): Unit = {
val deleteUsername = Json.toJson(
UserDTO(
action = Actions.DELETE_USER,
params = Some(UserParamsDTO(
username = param.get.asInstanceOf[UserDTO].result.get.username.get,
password = "",
update = None,
)),
result = None
)
).toString
println("OUT -> " + deleteUsername)
WebSocketListner.webSocket.send(deleteUsername)
}
}
| gennadij/admin | client/src/main/scala/org/genericConfig/admin/client/models/User.scala | Scala | apache-2.0 | 2,146 |
package com.avsystem.commons
package rpc
import com.avsystem.commons.meta._
import com.avsystem.commons.serialization.GenCodec
trait RPCFramework {
type RawValue
type Reader[T]
type Writer[T]
case class RawInvocation(@methodName rpcName: String, @multi args: List[RawValue])
object RawInvocation {
implicit def codec(implicit rawValueCodec: GenCodec[RawValue]): GenCodec[RawInvocation] = GenCodec.materialize
}
type RawRPC
val RawRPC: BaseRawRpcCompanion
trait BaseRawRpcCompanion extends RawRpcCompanion[RawRPC]
def read[T: Reader](raw: RawValue): T
def write[T: Writer](value: T): RawValue
implicit def readerBasedAsReal[T: Reader]: AsReal[RawValue, T] = read[T](_)
implicit def writerBasedAsRaw[T: Writer]: AsRaw[RawValue, T] = write[T](_)
type ParamTypeMetadata[T]
type ResultTypeMetadata[T]
type RPCMetadata[RealRPC]
val RPCMetadata: RpcMetadataCompanion[RPCMetadata]
type AsRawRPC[RealRPC] = AsRaw[RawRPC, RealRPC]
object AsRawRPC {
def apply[RealRPC](implicit asRawRPC: AsRawRPC[RealRPC]): AsRawRPC[RealRPC] = asRawRPC
}
/**
* Materializes a factory of implementations of [[RawRPC]] which translate invocations of its raw methods
* to invocations of actual methods on `rpcImpl`. Method arguments and results are serialized and deserialized
* from/to [[RawValue]] using [[Reader]] and [[Writer]] typeclasses.
*/
def materializeAsRaw[T]: AsRawRPC[T] = macro macros.rpc.RPCFrameworkMacros.asRawImpl[T]
type AsRealRPC[RealRPC] = AsReal[RawRPC, RealRPC]
object AsRealRPC {
@inline def apply[T](implicit asRealRPC: AsRealRPC[T]): AsRealRPC[T] = asRealRPC
}
/**
* Materializes a factory of implementations of `T` which are proxies that implement all abstract methods of `T`
* by forwarding them to `rawRpc`. Method arguments and results are serialized and deserialized
* from/to [[RawValue]] using [[Reader]] and [[Writer]] typeclasses.
*/
def materializeAsReal[T]: AsRealRPC[T] = macro macros.rpc.RPCFrameworkMacros.asRealImpl[T]
type AsRawRealRPC[RealRPC] = AsRawReal[RawRPC, RealRPC]
object AsRawRealRPC {
@inline def apply[RealRPC](implicit AsRawRealRPC: AsRawRealRPC[RealRPC]): AsRawRealRPC[RealRPC] = AsRawRealRPC
}
def materializeAsRawReal[T]: AsRawRealRPC[T] = macro macros.rpc.RPCFrameworkMacros.AsRawRealImpl[T]
trait Signature {
@reifyName def name: String
@multi @rpcParamMetadata def paramMetadata: List[ParamMetadata[_]]
@reifyAnnot
@multi def annotations: List[MetadataAnnotation]
}
case class ParamMetadata[T](
@reifyName name: String,
@reifyAnnot @multi annotations: List[MetadataAnnotation],
@infer typeMetadata: ParamTypeMetadata[T]
) extends TypedMetadata[T]
def materializeMetadata[RealRPC]: RPCMetadata[RealRPC] = macro macros.rpc.RPCFrameworkMacros.metadataImpl[RealRPC]
/**
* Base trait for traits or classes "implementing" [[FullRPCInfo]] in various RPC frameworks.
* Having a separate subtrait/subclass for every framework is beneficial for ScalaJS DCE.
*/
trait BaseFullRPCInfo[RealRPC] {
def asRealRPC: AsRealRPC[RealRPC]
def asRawRPC: AsRawRPC[RealRPC]
def metadata: RPCMetadata[RealRPC]
}
/**
* This type must be defined as trait or class by an [[RPCFramework]] in order to be able
* to use it's [[RPCCompanion]]. The fact that every [[RPCFramework]] may define its own trait or class for
* [[FullRPCInfo]] helps ScalaJS DCE distinguish between instances of [[AsRawRPC]], [[AsRealRPC]] and [[RPCMetadata]]
* for different frameworks and to get rid of unused instances.
*
* @example
* {{{
* object SomeRPCFramework extends RPCFramework {
* abstract class FullRPCInfo[RealRPC] extends BaseFullRPCInfo[RealRPC]
* ...
* }
* }}}
*/
type FullRPCInfo[RealRPC] <: BaseFullRPCInfo[RealRPC]
implicit def materializeFullInfo[T]: FullRPCInfo[T] = macro macros.rpc.RPCFrameworkMacros.fullInfoImpl[T]
/**
* Convenience abstract class for companion objects of RPC interfaces. Makes sure all three RPC type classes
* ([[AsRawRPC]], [[AsRealRPC]] and [[RPCMetadata]]) are macro-materialized for that RPC interface and confines
* macro materialization to the same compilation unit where the RPC interface is defined.
* This is a good practice to avoid incremental compilation problems and duplication of macro-generated code
* in various callsites. In order to be able to use [[RPCCompanion]], the RPC framework must define [[FullRPCInfo]]
* as a trait or class. Additionally, some special wizardry has been employed to make sure that when an RPC interface
* is a part of shared (cross-compiled) code of a ScalaJS application then ScalaJS optimizer can remove unused
* instances of macro generated typeclasses.
*
* @example
* {{{
* object SomeRPCFramework extends StandardRPCFramework { ... }
* trait SomeRPC {
* def doSomething(str: String): Unit
* def callSomething(int: Int): Future[String]
* }
* object SomeRPC extends SomeRPCFramework.RPCCompanion[SomeRPC]
* }}}
*/
abstract class RPCCompanion[RealRPC](implicit fri: FullRPCInfo[RealRPC]) {
final def fullRpcInfo: FullRPCInfo[RealRPC] = fri
// You would think: why the hell are these implicits defined as macros?
// Can't we just simply refer to members of `fullRpcInfo` in a regular method?
// We can, but this prevents ScalaJS optimizer's DCE from distinguishing between `FullRPCInfo` traits/classes
// of different RPC frameworks. This is important in cross-compiled code where any of these three typeclasses
// may be completely unused on the JS side and we want to make sure that DCE gets rid of them.
implicit def asRealRPC: AsRealRPC[RealRPC] = macro macros.rpc.RPCFrameworkMacros.typeClassFromFullInfo
implicit def asRawRPC: AsRawRPC[RealRPC] = macro macros.rpc.RPCFrameworkMacros.typeClassFromFullInfo
implicit def metadata: RPCMetadata[RealRPC] = macro macros.rpc.RPCFrameworkMacros.typeClassFromFullInfo
}
}
| AVSystem/scala-commons | commons-core/src/main/scala/com/avsystem/commons/rpc/RPCFramework.scala | Scala | mit | 6,081 |
package visceljs
import loci.registry.Registry
import org.scalajs.dom
import org.scalajs.dom.{Fetch, HttpMethod, RequestInit}
import rescala.default._
import rescala.extra.Tags._
import rescala.extra.lattices.IdUtil
import rescala.extra.lattices.IdUtil.Id
import scalatags.JsDom.implicits.stringFrag
import scalatags.JsDom.tags.{body, h1, p}
import visceljs.connection.{BookmarkManager, ContentConnectionManager, ServiceWorker}
import visceljs.render.{DetailsPage, ImagePage, OverviewPage, Snippets}
import scala.concurrent.Future
import scala.scalajs.concurrent.JSExecutionContext.Implicits.queue
import scala.scalajs.js
import scala.scalajs.js.typedarray.ArrayBuffer
case class MetaInfo(
version: String,
remoteVersion: Signal[String],
serviceState: Signal[String],
connection: Signal[Int],
reconnecting: Signal[Int]
)
object ViscelJS {
val baseurl = ""
def fetchbuffer(
endpoint: String,
method: HttpMethod = HttpMethod.GET,
body: Option[String] = None
): Future[ArrayBuffer] = {
val ri = js.Dynamic.literal(method = method).asInstanceOf[RequestInit]
body.foreach { content =>
ri.body = content
ri.headers = js.Dictionary("Content-Type" -> "application/json;charset=utf-8")
}
//authentication.foreach{ user =>
// if (js.isUndefined(ri.headers)) ri.headers = js.Dictionary.empty[String]
// ri.headers.asInstanceOf[js.Dictionary[String]]("Authorization") = s"Token ${user.token}"
//}
Fetch.fetch(baseurl + endpoint, ri).toFuture
.flatMap(_.arrayBuffer().toFuture)
}
val replicaID: Id = IdUtil.genId()
def main(args: Array[String]): Unit = {
dom.document.body = body("loading data …").render
val swstate = ServiceWorker.register()
val registry = new Registry
val bookmarkManager = new BookmarkManager(registry)
val ccm = new ContentConnectionManager(registry)
ccm.autoreconnect()
val actions = new Actions(ccm, bookmarkManager)
//ccm.remoteVersion.observe{v =>
// if (!(v == "unknown" || v.startsWith("error")) && v != viscel.shared.Version.str) {
// ServiceWorker.unregister().andThen(_ => dom.window.location.reload(true))
// }
//}
val meta = MetaInfo(viscel.shared.Version.str, ccm.remoteVersion, swstate, ccm.connectionStatus, ccm.reconnecting)
val index = new OverviewPage(meta, actions, bookmarkManager.bookmarks, ccm.descriptions)
val front = new DetailsPage(actions)
val view = new ImagePage(actions)
val app =
new ReaderApp(content = ccm.content, descriptions = ccm.descriptions, bookmarks = bookmarkManager.bookmarks)
val bodySig = app.makeBody(index, front, view)
val metaLoading = Snippets.meta(meta)
def loading =
body(
h1("This is basically a loading screen"),
p(
"However, this does not necessarily refresh by itself, try reloading at some point. If that does not help, there may just be nothing here."
),
metaLoading.asModifier
)
val bodyParent = dom.document.body.parentElement
bodyParent.removeChild(dom.document.body)
import rescala.extra.Tags._
bodySig.map {
case Some(body) => body
case None => loading
}.recover { error =>
error.printStackTrace(System.err)
body(h1("An error occurred"), p(error.toString), metaLoading.asModifier)
}.asModifier.applyTo(bodyParent)
}
}
| rmgk/viscel | code/js/src/main/scala/visceljs/ViscelJS.scala | Scala | agpl-3.0 | 3,431 |
package com.twitter.finatra.http.modules
import com.twitter.inject.TwitterModule
object DocRootModule extends TwitterModule {
// Only one of these flags should ever be set to a non-empty string as
// these flags are mutually exclusive. Setting both will result in error.
val localDocRoot = flag("local.doc.root", "", "File serving directory for local development")
val docRoot = flag("doc.root", "", "File serving directory/namespace for classpath resources")
}
| joecwu/finatra | http/src/main/scala/com/twitter/finatra/http/modules/DocRootModule.scala | Scala | apache-2.0 | 473 |
package com.tuzhucheng.chat4s
import util.Properties
import org.http4s.server.jetty.JettyBuilder
object Chat extends App {
val port = Properties.envOrElse("PORT", "8080").toInt
println("Starting on port " + port)
JettyBuilder.bindHttp(port)
.mountService(ChatService.service, "/chat4s")
.run
.awaitShutdown()
}
| tuzhucheng/chat4s | src/main/scala/com/tuzhucheng/chat4s/Chat.scala | Scala | mit | 331 |
package com.arcusys.valamis.lesson.scorm.service.serializer
import com.arcusys.valamis.lesson.scorm.model.manifest.Activity
import com.arcusys.valamis.util.TreeNode
object OrganizationsGenerator {
import AttributeImplicits._
def toXML(organizations: Seq[TreeNode[Activity]], defaultOrganization: Option[String]) = (
<organizations>
{ for (organization <- organizations) yield serializeSingleOrganization(organization) }
</organizations>
) % ("default" -> defaultOrganization)
def serializeSingleOrganization(organization: TreeNode[Activity]) =
<organization identifier={ organization.item.id } structure="hierarchical">
<title>{ organization.item.title }</title>
{ ActivitiesGenerator.toXML(organization.children) }
<imsss:sequencing>
<imsss:controlMode flow="true"/>
</imsss:sequencing>
</organization>
}
| ViLPy/Valamis | valamis-scorm-lesson/src/main/scala/com/arcusys/valamis/lesson/scorm/service/serializer/OrganizationsGenerator.scala | Scala | lgpl-3.0 | 874 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.utils.stats
import java.lang.{Double => jDouble, Float => jFloat, Long => jLong}
import java.util.Date
import com.vividsolutions.jts.geom.Geometry
import org.geotools.feature.simple.SimpleFeatureBuilder
import org.junit.runner.RunWith
import org.locationtech.geomesa.utils.geotools.GeoToolsDateFormat
import org.locationtech.geomesa.utils.text.WKTUtils
import org.opengis.feature.simple.SimpleFeature
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class HistogramTest extends Specification with StatTestHelper {
def createStat[T](attribute: String, bins: Int, min: String, max: String, observe: Boolean): Histogram[T] = {
val s = Stat(sft, s"Histogram($attribute,$bins,'$min','$max')")
if (observe) {
features.foreach { s.observe }
}
s.asInstanceOf[Histogram[T]]
}
def stringStat(bins: Int, min: String, max: String, observe: Boolean = true) =
createStat[String]("strAttr", bins, min, max, observe)
def intStat(bins: Int, min: Int, max: Int, observe: Boolean = true) =
createStat[Integer]("intAttr", bins, min.toString, max.toString, observe)
def longStat(bins: Int, min: Long, max: Long, observe: Boolean = true) =
createStat[jLong]("longAttr", bins, min.toString, max.toString, observe)
def floatStat(bins: Int, min: Float, max: Float, observe: Boolean = true) =
createStat[jFloat]("floatAttr", bins, min.toString, max.toString, observe)
def doubleStat(bins: Int, min: Double, max: Double, observe: Boolean = true) =
createStat[jDouble]("doubleAttr", bins, min.toString, max.toString, observe)
def dateStat(bins: Int, min: String, max: String, observe: Boolean = true) =
createStat[Date]("dtg", bins, min, max, observe)
def geomStat(bins: Int, min: String, max: String, observe: Boolean = true) =
createStat[Geometry]("geom", bins, min, max, observe)
def toDate(string: String) = java.util.Date.from(java.time.LocalDateTime.parse(string, GeoToolsDateFormat).toInstant(java.time.ZoneOffset.UTC))
def toGeom(string: String) = WKTUtils.read(string)
"RangeHistogram stat" should {
"work with strings" >> {
"be empty initially" >> {
val stat = stringStat(20, "abc000", "abc200", observe = false)
stat.isEmpty must beTrue
stat.length mustEqual 20
stat.bounds mustEqual ("abc000", "abc200")
forall(0 until 20)(stat.count(_) mustEqual 0)
}
"correctly bin values" >> {
val stat = stringStat(36, "abc000", "abc099")
stat.isEmpty must beFalse
stat.length mustEqual 36
(0 until 36).map(stat.count).sum mustEqual 100
}
"serialize and deserialize" >> {
val stat = stringStat(20, "abc000", "abc200")
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[String]]
unpacked.asInstanceOf[Histogram[String]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[String]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[Histogram[String]].toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = stringStat(20, "abc000", "abc200", observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[String]]
unpacked.asInstanceOf[Histogram[String]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[String]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[Histogram[String]].toJson mustEqual stat.toJson
}
"deserialize as immutable value" >> {
val stat = stringStat(20, "abc000", "abc200")
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed, immutable = true)
unpacked must beAnInstanceOf[Histogram[String]]
unpacked.asInstanceOf[Histogram[String]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[String]].attribute mustEqual stat.attribute
unpacked.asInstanceOf[Histogram[String]].toJson mustEqual stat.toJson
unpacked.clear must throwAn[Exception]
unpacked.+=(stat) must throwAn[Exception]
unpacked.observe(features.head) must throwAn[Exception]
unpacked.unobserve(features.head) must throwAn[Exception]
}
"combine two RangeHistograms" >> {
val stat = stringStat(36, "abc000", "abc099")
val stat2 = stringStat(36, "abc100", "abc199", observe = false)
features2.foreach { stat2.observe }
stat2.length mustEqual 36
(0 until 36).map(stat2.count).sum mustEqual 100
stat += stat2
stat.length mustEqual 36
(0 until 36).map(stat.count).sum mustEqual 200
stat2.length mustEqual 36
(0 until 36).map(stat2.count).sum mustEqual 100
}
"combine two RangeHistograms with empty values" >> {
val stat = stringStat(100, "0", "z", observe = false)
val stat2 = stringStat(100, "alpha", "gamma", observe = false)
stat.bins.add("0")
stat2.bins.add("alpha")
stat2.bins.add("beta")
stat2.bins.add("gamma")
stat2.bins.add("cappa")
stat2 += stat
stat2.bounds mustEqual ("00000", "gamma")
}
"clear" >> {
val stat = stringStat(20, "abc000", "abc200")
stat.clear()
stat.isEmpty must beTrue
stat.length mustEqual 20
forall(0 until 20)(stat.count(_) mustEqual 0)
}
}
"work with integers" >> {
"be empty initially" >> {
val stat = intStat(20, 0, 199, observe = false)
stat.isEmpty must beTrue
stat.length mustEqual 20
stat.bounds mustEqual (0, 199)
forall(0 until 20)(stat.count(_) mustEqual 0)
}
"correctly bin values" >> {
val stat = intStat(20, 0, 199)
stat.isEmpty must beFalse
stat.length mustEqual 20
forall(0 until 10)(stat.count(_) mustEqual 10)
forall(10 until 20)(stat.count(_) mustEqual 0)
}
"correctly remove values" >> {
val stat = intStat(20, 0, 199)
stat.isEmpty must beFalse
stat.length mustEqual 20
forall(0 until 10)(stat.count(_) mustEqual 10)
forall(10 until 20)(stat.count(_) mustEqual 0)
features.take(50).foreach(stat.unobserve)
forall(5 until 10)(stat.count(_) mustEqual 10)
forall((0 until 5) ++ (10 until 20))(stat.count(_) mustEqual 0)
}
"serialize and deserialize" >> {
val stat = intStat(20, 0, 199)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[Integer]]
unpacked.asInstanceOf[Histogram[Integer]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[Integer]].attribute mustEqual stat.attribute
unpacked.toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = intStat(20, 0, 199, observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[Integer]]
unpacked.asInstanceOf[Histogram[Integer]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[Integer]].attribute mustEqual stat.attribute
unpacked.toJson mustEqual stat.toJson
}
"combine two RangeHistograms" >> {
val stat = intStat(20, 0, 199)
val stat2 = intStat(20, 0, 199, observe = false)
features2.foreach { stat2.observe }
stat2.length mustEqual 20
forall(0 until 10)(stat2.count(_) mustEqual 0)
forall(10 until 20)(stat2.count(_) mustEqual 10)
stat += stat2
stat.length mustEqual 20
forall(0 until 20)(stat.count(_) mustEqual 10)
stat2.length mustEqual 20
forall(0 until 10)(stat2.count(_) mustEqual 0)
forall(10 until 20)(stat2.count(_) mustEqual 10)
}
"combine two RangeHistograms with different bounds" >> {
val stat = intStat(20, 0, 99)
val stat2 = intStat(20, 100, 199, observe = false)
features2.foreach { stat2.observe }
stat.length mustEqual 20
forall(0 until 20)(stat.count(_) mustEqual 5)
stat2.length mustEqual 20
forall(0 until 20)(stat2.count(_) mustEqual 5)
stat += stat2
stat.length mustEqual 20
stat.bounds mustEqual (0, 199)
forall(0 until 20)(stat.count(_) mustEqual 10)
}
"combine two RangeHistograms with different lengths" >> {
val stat = intStat(20, 0, 199)
val stat2 = intStat(10, 0, 199, observe = false)
features2.foreach { stat2.observe }
stat.length mustEqual 20
forall(0 until 10)(stat.count(_) mustEqual 10)
forall(10 until 20)(stat.count(_) mustEqual 0)
stat2.length mustEqual 10
forall(0 until 5)(stat2.count(_) mustEqual 0)
forall(5 until 10)(stat2.count(_) mustEqual 20)
stat += stat2
stat.length mustEqual 20
stat.bounds mustEqual (0, 199)
forall(0 until 20)(stat.count(_) mustEqual 10)
}
"combine two RangeHistograms with empty values" >> {
val stat = intStat(20, -100, 300)
val stat2 = intStat(20, 50, 249, observe = false)
features2.foreach { stat2.observe }
stat.length mustEqual 20
forall((0 until 5) ++ (10 until 20))(stat.count(_) mustEqual 0)
forall(5 until 10)(stat.count(_) mustEqual 20)
stat2.length mustEqual 20
forall((0 until 5) ++ (15 until 20))(stat2.count(_) mustEqual 0)
forall(5 until 15)(stat2.count(_) mustEqual 10)
stat += stat2
stat.length mustEqual 20
stat.bounds mustEqual (0, 199)
stat.bins.counts mustEqual Array(6, 8, 12, 8, 12, 8, 12, 8, 12, 8, 16, 10, 10, 10, 10, 10, 10, 10, 10, 10)
(0 until stat.length).map(stat.count).sum mustEqual 200
}
"clear" >> {
val stat = intStat(20, 0, 199)
stat.clear()
stat.isEmpty must beTrue
stat.length mustEqual 20
forall(0 until 20)(stat.count(_) mustEqual 0)
}
}
"work with longs" >> {
"be empty initially" >> {
val stat = longStat(10, 0, 99, observe = false)
stat.isEmpty must beTrue
stat.length mustEqual 10
stat.bounds mustEqual (0, 99)
forall(0 until 10)(stat.count(_) mustEqual 0)
}
"correctly bin values" >> {
val stat = longStat(10, 0, 99)
stat.isEmpty must beFalse
stat.length mustEqual 10
stat.bounds mustEqual (0, 99)
forall(0 until 10)(stat.count(_) mustEqual 10)
}
"serialize and deserialize" >> {
val stat = longStat(7, 90, 110)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[jLong]]
unpacked.asInstanceOf[Histogram[jLong]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[jLong]].attribute mustEqual stat.attribute
unpacked.toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = longStat(7, 90, 110, observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[jLong]]
unpacked.asInstanceOf[Histogram[jLong]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[jLong]].attribute mustEqual stat.attribute
unpacked.toJson mustEqual stat.toJson
}
"combine two RangeHistograms" >> {
val stat = longStat(10, 0, 99)
val stat2 = longStat(10, 100, 199, observe = false)
features2.foreach { stat2.observe }
stat2.length mustEqual 10
forall(0 until 10)(stat2.count(_) mustEqual 10)
stat += stat2
stat.length mustEqual 10
stat.bounds mustEqual (0, 199)
forall(0 until 10)(stat.count(_) mustEqual 20)
stat2.length mustEqual 10
forall(0 until 10)(stat2.count(_) mustEqual 10)
}
"clear" >> {
val stat = longStat(7, 90, 110)
stat.clear()
stat.isEmpty must beTrue
stat.length mustEqual 7
forall(0 until 7)(stat.count(_) mustEqual 0)
}
}
"work with floats" >> {
"be empty initially" >> {
val stat = floatStat(7, 90, 110, observe = false)
stat.isEmpty must beTrue
stat.length mustEqual 7
stat.bounds mustEqual (90f, 110f)
forall(0 until 7)(stat.count(_) mustEqual 0)
}
"correctly bin values" >> {
val stat = floatStat(10, 0, 100)
stat.isEmpty must beFalse
stat.length mustEqual 10
stat.bounds mustEqual (0f, 100f)
forall(0 until 10)(stat.count(_) mustEqual 10)
}
"serialize and deserialize" >> {
val stat = floatStat(7, 90, 110)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[jFloat]]
unpacked.asInstanceOf[Histogram[jFloat]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[jFloat]].attribute mustEqual stat.attribute
unpacked.toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = floatStat(7, 90, 110, observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[jFloat]]
unpacked.asInstanceOf[Histogram[jFloat]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[jFloat]].attribute mustEqual stat.attribute
unpacked.toJson mustEqual stat.toJson
}
"combine two RangeHistograms" >> {
val stat = floatStat(10, 0, 100)
val stat2 = floatStat(10, 100, 200, observe = false)
features2.foreach { stat2.observe }
stat2.length mustEqual 10
stat2.bounds mustEqual (100f, 200f)
forall(0 until 10)(stat2.count(_) mustEqual 10)
stat += stat2
stat.length mustEqual 10
stat.count(0) mustEqual 15
forall(1 until 9)(stat.count(_) mustEqual 20)
stat.count(9) mustEqual 25
stat2.length mustEqual 10
stat2.bounds mustEqual (100f, 200f)
forall(0 until 10)(stat2.count(_) mustEqual 10)
}
"clear" >> {
val stat = floatStat(7, 90, 110)
stat.clear()
stat.isEmpty must beTrue
stat.length mustEqual 7
forall(0 until 7)(stat.count(_) mustEqual 0)
}
}
"work with doubles" >> {
"be empty initially" >> {
val stat = doubleStat(7, 90, 110, observe = false)
stat.isEmpty must beTrue
stat.length mustEqual 7
stat.bounds mustEqual (90.0, 110.0)
forall(0 until 7)(stat.count(_) mustEqual 0)
}
"correctly bin values" >> {
val stat = doubleStat(10, 0, 99)
stat.isEmpty must beFalse
stat.length mustEqual 10
stat.bounds mustEqual (0.0, 99.0)
forall(0 until 10)(stat.count(_) mustEqual 10)
}
"serialize and deserialize" >> {
val stat = doubleStat(7, 90, 110)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[jDouble]]
unpacked.asInstanceOf[Histogram[jDouble]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[jDouble]].attribute mustEqual stat.attribute
unpacked.toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = doubleStat(7, 90, 110, observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[jDouble]]
unpacked.asInstanceOf[Histogram[jDouble]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[jDouble]].attribute mustEqual stat.attribute
unpacked.toJson mustEqual stat.toJson
}
"combine two RangeHistograms" >> {
val stat = doubleStat(10, 0, 100)
val stat2 = doubleStat(10, 100, 200, observe = false)
features2.foreach { stat2.observe }
stat2.length mustEqual 10
forall(0 until 10)(stat2.count(_) mustEqual 10)
stat += stat2
stat.length mustEqual 10
stat.bounds mustEqual (0.0, 200.0)
stat.count(0) mustEqual 15
forall(1 until 9)(stat.count(_) mustEqual 20)
stat.count(9) mustEqual 25
(0 until 10).map(stat.count).sum mustEqual 200
stat2.length mustEqual 10
forall(0 until 10)(stat2.count(_) mustEqual 10)
}
"clear" >> {
val stat = doubleStat(7, 90, 110)
stat.clear()
stat.isEmpty must beTrue
stat.length mustEqual 7
forall(0 until 7)(stat.count(_) mustEqual 0)
}
}
"work with dates" >> {
"be empty initially" >> {
val stat = dateStat(24, "2012-01-01T00:00:00.000Z", "2012-01-03T00:00:00.000Z", observe = false)
stat.isEmpty must beTrue
stat.length mustEqual 24
stat.bounds mustEqual (toDate("2012-01-01T00:00:00.000Z"), toDate("2012-01-03T00:00:00.000Z"))
forall(0 until 24)(stat.count(_) mustEqual 0)
}
"correctly bin values" >> {
val stat = dateStat(24, "2012-01-01T00:00:00.000Z", "2012-01-03T00:00:00.000Z")
stat.isEmpty must beFalse
stat.length mustEqual 24
stat.bounds mustEqual (toDate("2012-01-01T00:00:00.000Z"), toDate("2012-01-03T00:00:00.000Z"))
forall(0 until 2)(stat.count(_) mustEqual 10)
forall(2 until 12)(stat.count(_) mustEqual 8)
forall(12 until 24)(stat.count(_) mustEqual 0)
}
"serialize and deserialize" >> {
val stat = dateStat(24, "2012-01-01T00:00:00.000Z", "2012-01-03T00:00:00.000Z")
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[Date]]
unpacked.asInstanceOf[Histogram[Date]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[Date]].attribute mustEqual stat.attribute
unpacked.toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = dateStat(24, "2012-01-01T00:00:00.000Z", "2012-01-03T00:00:00.000Z", observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[jDouble]]
unpacked.asInstanceOf[Histogram[jDouble]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[jDouble]].attribute mustEqual stat.attribute
unpacked.toJson mustEqual stat.toJson
}
"combine two RangeHistograms" >> {
val stat = dateStat(24, "2012-01-01T00:00:00.000Z", "2012-01-03T00:00:00.000Z")
val stat2 = dateStat(24, "2012-01-01T00:00:00.000Z", "2012-01-03T00:00:00.000Z", observe = false)
features2.foreach { stat2.observe }
stat2.length mustEqual 24
forall(0 until 12)(stat2.count(_) mustEqual 0)
forall((12 until 14) ++ (16 until 24))(stat2.count(_) mustEqual 8)
forall(15 until 16)(stat2.count(_) mustEqual 10)
stat += stat2
stat.length mustEqual 24
forall((0 until 2) ++ (15 until 16))(stat.count(_) mustEqual 10)
forall((2 until 14) ++ (16 until 24))(stat.count(_) mustEqual 8)
stat2.length mustEqual 24
forall(0 until 12)(stat2.count(_) mustEqual 0)
forall((12 until 14) ++ (16 until 24))(stat2.count(_) mustEqual 8)
forall(15 until 16)(stat2.count(_) mustEqual 10)
}
"combine two RangeHistograms with weekly splits" >> {
// simulates the way date histograms will be gathered as we track stats dynamically
val stat = dateStat(4, "2012-01-01T00:00:00.000Z", "2012-01-28T23:59:59.999Z", observe = false)
val stat2 = dateStat(5, "2012-01-01T00:00:00.000Z", "2012-02-04T23:59:59.999Z", observe = false)
def newSF(dtg: String): SimpleFeature = {
val sf = SimpleFeatureBuilder.build(sft, Array[AnyRef](), "")
sf.setAttribute("dtg", dtg)
sf
}
( 1 to 28).foreach { i => stat.observe(newSF(f"2012-01-$i%02dT12:00:00.000Z")) }
(29 to 31).foreach { i => stat2.observe(newSF(f"2012-01-$i%02dT12:00:00.000Z")) }
( 1 to 4).foreach { i => stat2.observe(newSF(f"2012-02-$i%02dT12:00:00.000Z")) }
stat.length mustEqual 4
forall(0 until 4)(stat.count(_) mustEqual 7)
stat2.length mustEqual 5
forall(0 until 4)(stat2.count(_) mustEqual 0)
stat2.count(4) mustEqual 7
stat += stat2
stat.length mustEqual 5
forall(0 until 5)(stat.count(_) mustEqual 7)
stat2.length mustEqual 5
forall(0 until 4)(stat2.count(_) mustEqual 0)
stat2.count(4) mustEqual 7
}
"clear" >> {
val stat = dateStat(24, "2012-01-01T00:00:00.000Z", "2012-01-03T00:00:00.000Z")
stat.clear()
stat.isEmpty must beTrue
stat.length mustEqual 24
forall(0 until 24)(stat.count(_) mustEqual 0)
}
}
"work with geometries" >> {
"be empty initially" >> {
val stat = geomStat(32, "POINT(-180 -90)", "POINT(180 90)", observe = false)
stat.isEmpty must beTrue
stat.length mustEqual 32
stat.bounds mustEqual (toGeom("POINT(-180 -90)"), toGeom("POINT(180 90)"))
forall(0 until 32)(stat.count(_) mustEqual 0)
}
"correctly bin values" >> {
val stat = geomStat(32, "POINT(-180 -90)", "POINT(180 90)")
stat.isEmpty must beFalse
stat.length mustEqual 32
stat.bounds mustEqual (toGeom("POINT(-180 -90)"), toGeom("POINT(180 90)"))
stat.count(18) mustEqual 45
stat.count(19) mustEqual 44
stat.count(20) mustEqual 9
stat.count(22) mustEqual 1
stat.count(24) mustEqual 1
forall((0 until 18) ++ Seq(21, 23) ++ (25 until 32))(stat.count(_) mustEqual 0)
}
"serialize and deserialize" >> {
val stat = geomStat(32, "POINT(-180 -90)", "POINT(180 90)")
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[jDouble]]
unpacked.asInstanceOf[Histogram[jDouble]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[jDouble]].attribute mustEqual stat.attribute
unpacked.toJson mustEqual stat.toJson
}
"serialize and deserialize empty stats" >> {
val stat = geomStat(32, "POINT(-180 -90)", "POINT(180 90)", observe = false)
val packed = StatSerializer(sft).serialize(stat)
val unpacked = StatSerializer(sft).deserialize(packed)
unpacked must beAnInstanceOf[Histogram[jDouble]]
unpacked.asInstanceOf[Histogram[jDouble]].length mustEqual stat.length
unpacked.asInstanceOf[Histogram[jDouble]].attribute mustEqual stat.attribute
unpacked.toJson mustEqual stat.toJson
}
"combine two RangeHistograms" >> {
val stat = geomStat(32, "POINT(-180 -90)", "POINT(180 90)")
val stat2 = geomStat(32, "POINT(-180 -90)", "POINT(180 90)", observe = false)
features2.foreach { stat2.observe }
stat2.length mustEqual 32
stat2.count(25) mustEqual 10
stat2.count(27) mustEqual 20
stat2.count(30) mustEqual 46
stat2.count(31) mustEqual 24
forall((0 until 25) ++ Seq(26, 28, 29))(stat2.count(_) mustEqual 0)
stat += stat2
stat.count(18) mustEqual 45
stat.count(19) mustEqual 44
stat.count(20) mustEqual 9
stat.count(22) mustEqual 1
stat.count(24) mustEqual 1
stat.count(25) mustEqual 10
stat.count(27) mustEqual 20
stat.count(30) mustEqual 46
stat.count(31) mustEqual 24
forall((0 until 18) ++ Seq(21, 23, 26, 28, 29))(stat.count(_) mustEqual 0)
stat2.length mustEqual 32
stat2.count(25) mustEqual 10
stat2.count(27) mustEqual 20
stat2.count(30) mustEqual 46
stat2.count(31) mustEqual 24
forall((0 until 25) ++ Seq(26, 28, 29))(stat2.count(_) mustEqual 0)
}
"merge high-precision points" >> {
val fromBounds =
(WKTUtils.read("POINT (-91.7467224461 40.6750300641)"), WKTUtils.read("POINT (-91.723442566 40.691904323)"))
val toBounds =
(WKTUtils.read("POINT (-91.7467224461 40.6750300641)"), WKTUtils.read("POINT (-91.7186474559 40.6933565934)"))
val from = new BinnedGeometryArray(10000, fromBounds)
val to = new BinnedGeometryArray(10000, toBounds)
(0 until 10000).foreach(i => from.counts(i) = 1)
Histogram.copyInto(to, from) must not(throwAn[Exception])
}
"clear" >> {
val stat = geomStat(32, "POINT(-180 -90)", "POINT(180 90)")
stat.clear()
stat.isEmpty must beTrue
stat.length mustEqual 32
forall(0 until 32)(stat.count(_) mustEqual 0)
}
}
}
}
| MutahirKazmi/geomesa | geomesa-utils/src/test/scala/org/locationtech/geomesa/utils/stats/HistogramTest.scala | Scala | apache-2.0 | 26,350 |
package models
import java.util.{Date}
import play.api.db._
import play.api.Play.current
import anorm._
import anorm.SqlParser._
import scala.language.postfixOps
case class Task(id: Pk[Long], folder: String, project: Long, title: String, done: Boolean, dueDate: Option[Date], assignedTo: Option[String])
object Task {
// -- Parsers
/**
* Parse a Task from a ResultSet
*/
val simple = {
get[Pk[Long]]("task.id") ~
get[String]("task.folder") ~
get[Long]("task.project") ~
get[String]("task.title") ~
get[Boolean]("task.done") ~
get[Option[Date]]("task.due_date") ~
get[Option[String]]("task.assigned_to") map {
case id~folder~project~title~done~dueDate~assignedTo => Task(
id, folder, project, title, done, dueDate, assignedTo
)
}
}
// -- Queries
/**
* Retrieve a Task from the id.
*/
def findById(id: Long): Option[Task] = {
DB.withConnection { implicit connection =>
SQL("select * from task where id = {id}").on(
'id -> id
).as(Task.simple.singleOpt)
}
}
/**
* Retrieve todo tasks for the user.
*/
def findTodoInvolving(user: String): Seq[(Task,Project)] = {
DB.withConnection { implicit connection =>
SQL(
"""
select * from task
join project_member on project_member.project_id = task.project
join project on project.id = project_member.project_id
where task.done = false and project_member.user_email = {email}
"""
).on(
'email -> user
).as(Task.simple ~ Project.simple map {
case task~project => task -> project
} *)
}
}
/**
* Find tasks related to a project
*/
def findByProject(project: Long): Seq[Task] = {
DB.withConnection { implicit connection =>
SQL(
"""
select * from task
where task.project = {project}
"""
).on(
'project -> project
).as(Task.simple *)
}
}
/**
* Delete a task
*/
def delete(id: Long) {
DB.withConnection { implicit connection =>
SQL("delete from task where id = {id}").on(
'id -> id
).executeUpdate()
}
}
/**
* Delete all task in a folder.
*/
def deleteInFolder(projectId: Long, folder: String) {
DB.withConnection { implicit connection =>
SQL("delete from task where project = {project} and folder = {folder}").on(
'project -> projectId, 'folder -> folder
).executeUpdate()
}
}
/**
* Mark a task as done or not
*/
def markAsDone(taskId: Long, done: Boolean) {
DB.withConnection { implicit connection =>
SQL("update task set done = {done} where id = {id}").on(
'id -> taskId,
'done -> done
).executeUpdate()
}
}
/**
* Rename a folder.
*/
def renameFolder(projectId: Long, folder: String, newName: String) {
DB.withConnection { implicit connection =>
SQL("update task set folder = {newName} where folder = {name} and project = {project}").on(
'project -> projectId, 'name -> folder, 'newName -> newName
).executeUpdate()
}
}
/**
* Check if a user is the owner of this task
*/
def isOwner(task: Long, user: String): Boolean = {
DB.withConnection { implicit connection =>
SQL(
"""
select count(task.id) = 1 from task
join project on task.project = project.id
join project_member on project_member.project_id = project.id
where project_member.user_email = {email} and task.id = {task}
"""
).on(
'task -> task,
'email -> user
).as(scalar[Boolean].single)
}
}
/**
* Create a Task.
*/
def create(task: Task): Task = {
DB.withConnection { implicit connection =>
// Get the task id
val id: Long = task.id.getOrElse {
SQL("select next value for task_seq").as(scalar[Long].single)
}
SQL(
"""
insert into task values (
{id}, {title}, {done}, {dueDate}, {assignedTo}, {project}, {folder}
)
"""
).on(
'id -> id,
'folder -> task.folder,
'project -> task.project,
'title -> task.title,
'done -> task.done,
'dueDate -> task.dueDate,
'assignedTo -> task.assignedTo
).executeUpdate()
task.copy(id = Id(id))
}
}
}
| 166yuan/play2sae | samples/zentasks/app/models/Task.scala | Scala | apache-2.0 | 4,456 |
import org.specs2.mutable._
import org.specs2.runner._
import org.junit.runner._
import play.api.test._
import play.api.test.Helpers._
/**
* add your integration spec here.
* An integration test will fire up a whole play application in a real (or headless) browser
@RunWith(classOf[JUnitRunner])
class IntegrationSpec extends Specification {
"Application" should {
"work from within a browser" in new WithBrowser {
browser.goTo("http://localhost:" + port)
browser.pageSource must contain("Your new application is ready.")
}
}
}
*/
| hadesgames/online-pdf-cropper | play/test/IntegrationSpec.scala | Scala | mit | 563 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import java.util.{Locale, ServiceConfigurationError, ServiceLoader}
import scala.collection.JavaConverters._
import scala.language.{existentials, implicitConversions}
import scala.util.{Failure, Success, Try}
import org.apache.hadoop.fs.Path
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.analysis.UnresolvedAttribute
import org.apache.spark.sql.catalyst.catalog.{BucketSpec, CatalogStorageFormat, CatalogTable, CatalogUtils}
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.execution.datasources.csv.CSVFileFormat
import org.apache.spark.sql.execution.datasources.jdbc.JdbcRelationProvider
import org.apache.spark.sql.execution.datasources.json.JsonFileFormat
import org.apache.spark.sql.execution.datasources.orc.OrcFileFormat
import org.apache.spark.sql.execution.datasources.parquet.ParquetFileFormat
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.execution.streaming.sources.{RateStreamProvider, TextSocketSourceProvider}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources._
import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.types.{CalendarIntervalType, StructField, StructType}
import org.apache.spark.sql.util.SchemaUtils
import org.apache.spark.util.Utils
/**
* The main class responsible for representing a pluggable Data Source in Spark SQL. In addition to
* acting as the canonical set of parameters that can describe a Data Source, this class is used to
* resolve a description to a concrete implementation that can be used in a query plan
* (either batch or streaming) or to write out data using an external library.
*
* From an end user's perspective a DataSource description can be created explicitly using
* [[org.apache.spark.sql.DataFrameReader]] or CREATE TABLE USING DDL. Additionally, this class is
* used when resolving a description from a metastore to a concrete implementation.
*
* Many of the arguments to this class are optional, though depending on the specific API being used
* these optional arguments might be filled in during resolution using either inference or external
* metadata. For example, when reading a partitioned table from a file system, partition columns
* will be inferred from the directory layout even if they are not specified.
*
* @param paths A list of file system paths that hold data. These will be globbed before and
* qualified. This option only works when reading from a [[FileFormat]].
* @param userSpecifiedSchema An optional specification of the schema of the data. When present
* we skip attempting to infer the schema.
* @param partitionColumns A list of column names that the relation is partitioned by. This list is
* generally empty during the read path, unless this DataSource is managed
* by Hive. In these cases, during `resolveRelation`, we will call
* `getOrInferFileFormatSchema` for file based DataSources to infer the
* partitioning. In other cases, if this list is empty, then this table
* is unpartitioned.
* @param bucketSpec An optional specification for bucketing (hash-partitioning) of the data.
* @param catalogTable Optional catalog table reference that can be used to push down operations
* over the datasource to the catalog service.
*/
case class DataSource(
sparkSession: SparkSession,
className: String,
paths: Seq[String] = Nil,
userSpecifiedSchema: Option[StructType] = None,
partitionColumns: Seq[String] = Seq.empty,
bucketSpec: Option[BucketSpec] = None,
options: Map[String, String] = Map.empty,
catalogTable: Option[CatalogTable] = None) extends Logging {
case class SourceInfo(name: String, schema: StructType, partitionColumns: Seq[String])
lazy val providingClass: Class[_] =
DataSource.lookupDataSource(className, sparkSession.sessionState.conf)
lazy val sourceInfo: SourceInfo = sourceSchema()
private val caseInsensitiveOptions = CaseInsensitiveMap(options)
private val equality = sparkSession.sessionState.conf.resolver
bucketSpec.map { bucket =>
SchemaUtils.checkColumnNameDuplication(
bucket.bucketColumnNames, "in the bucket definition", equality)
SchemaUtils.checkColumnNameDuplication(
bucket.sortColumnNames, "in the sort definition", equality)
}
/**
* Get the schema of the given FileFormat, if provided by `userSpecifiedSchema`, or try to infer
* it. In the read path, only managed tables by Hive provide the partition columns properly when
* initializing this class. All other file based data sources will try to infer the partitioning,
* and then cast the inferred types to user specified dataTypes if the partition columns exist
* inside `userSpecifiedSchema`, otherwise we can hit data corruption bugs like SPARK-18510.
* This method will try to skip file scanning whether `userSpecifiedSchema` and
* `partitionColumns` are provided. Here are some code paths that use this method:
* 1. `spark.read` (no schema): Most amount of work. Infer both schema and partitioning columns
* 2. `spark.read.schema(userSpecifiedSchema)`: Parse partitioning columns, cast them to the
* dataTypes provided in `userSpecifiedSchema` if they exist or fallback to inferred
* dataType if they don't.
* 3. `spark.readStream.schema(userSpecifiedSchema)`: For streaming use cases, users have to
* provide the schema. Here, we also perform partition inference like 2, and try to use
* dataTypes in `userSpecifiedSchema`. All subsequent triggers for this stream will re-use
* this information, therefore calls to this method should be very cheap, i.e. there won't
* be any further inference in any triggers.
*
* @param format the file format object for this DataSource
* @param fileIndex optional [[InMemoryFileIndex]] for getting partition schema and file list
* @return A pair of the data schema (excluding partition columns) and the schema of the partition
* columns.
*/
private def getOrInferFileFormatSchema(
format: FileFormat,
fileIndex: Option[InMemoryFileIndex] = None): (StructType, StructType) = {
// The operations below are expensive therefore try not to do them if we don't need to, e.g.,
// in streaming mode, we have already inferred and registered partition columns, we will
// never have to materialize the lazy val below
lazy val tempFileIndex = fileIndex.getOrElse {
val globbedPaths =
checkAndGlobPathIfNecessary(checkEmptyGlobPath = false, checkFilesExist = false)
createInMemoryFileIndex(globbedPaths)
}
val partitionSchema = if (partitionColumns.isEmpty) {
// Try to infer partitioning, because no DataSource in the read path provides the partitioning
// columns properly unless it is a Hive DataSource
tempFileIndex.partitionSchema
} else {
// maintain old behavior before SPARK-18510. If userSpecifiedSchema is empty used inferred
// partitioning
if (userSpecifiedSchema.isEmpty) {
val inferredPartitions = tempFileIndex.partitionSchema
inferredPartitions
} else {
val partitionFields = partitionColumns.map { partitionColumn =>
userSpecifiedSchema.flatMap(_.find(c => equality(c.name, partitionColumn))).orElse {
val inferredPartitions = tempFileIndex.partitionSchema
val inferredOpt = inferredPartitions.find(p => equality(p.name, partitionColumn))
if (inferredOpt.isDefined) {
logDebug(
s"""Type of partition column: $partitionColumn not found in specified schema
|for $format.
|User Specified Schema
|=====================
|${userSpecifiedSchema.orNull}
|
|Falling back to inferred dataType if it exists.
""".stripMargin)
}
inferredOpt
}.getOrElse {
throw new AnalysisException(s"Failed to resolve the schema for $format for " +
s"the partition column: $partitionColumn. It must be specified manually.")
}
}
StructType(partitionFields)
}
}
val dataSchema = userSpecifiedSchema.map { schema =>
StructType(schema.filterNot(f => partitionSchema.exists(p => equality(p.name, f.name))))
}.orElse {
format.inferSchema(
sparkSession,
caseInsensitiveOptions,
tempFileIndex.allFiles())
}.getOrElse {
throw new AnalysisException(
s"Unable to infer schema for $format. It must be specified manually.")
}
// We just print a waring message if the data schema and partition schema have the duplicate
// columns. This is because we allow users to do so in the previous Spark releases and
// we have the existing tests for the cases (e.g., `ParquetHadoopFsRelationSuite`).
// See SPARK-18108 and SPARK-21144 for related discussions.
try {
SchemaUtils.checkColumnNameDuplication(
(dataSchema ++ partitionSchema).map(_.name),
"in the data schema and the partition schema",
equality)
} catch {
case e: AnalysisException => logWarning(e.getMessage)
}
(dataSchema, partitionSchema)
}
/** Returns the name and schema of the source that can be used to continually read data. */
private def sourceSchema(): SourceInfo = {
providingClass.newInstance() match {
case s: StreamSourceProvider =>
val (name, schema) = s.sourceSchema(
sparkSession.sqlContext, userSpecifiedSchema, className, caseInsensitiveOptions)
SourceInfo(name, schema, Nil)
case format: FileFormat =>
val path = caseInsensitiveOptions.getOrElse("path", {
throw new IllegalArgumentException("'path' is not specified")
})
// Check whether the path exists if it is not a glob pattern.
// For glob pattern, we do not check it because the glob pattern might only make sense
// once the streaming job starts and some upstream source starts dropping data.
val hdfsPath = new Path(path)
if (!SparkHadoopUtil.get.isGlobPath(hdfsPath)) {
val fs = hdfsPath.getFileSystem(sparkSession.sessionState.newHadoopConf())
if (!fs.exists(hdfsPath)) {
throw new AnalysisException(s"Path does not exist: $path")
}
}
val isSchemaInferenceEnabled = sparkSession.sessionState.conf.streamingSchemaInference
val isTextSource = providingClass == classOf[text.TextFileFormat]
// If the schema inference is disabled, only text sources require schema to be specified
if (!isSchemaInferenceEnabled && !isTextSource && userSpecifiedSchema.isEmpty) {
throw new IllegalArgumentException(
"Schema must be specified when creating a streaming source DataFrame. " +
"If some files already exist in the directory, then depending on the file format " +
"you may be able to create a static DataFrame on that directory with " +
"'spark.read.load(directory)' and infer schema from it.")
}
val (dataSchema, partitionSchema) = getOrInferFileFormatSchema(format)
SourceInfo(
s"FileSource[$path]",
StructType(dataSchema ++ partitionSchema),
partitionSchema.fieldNames)
case _ =>
throw new UnsupportedOperationException(
s"Data source $className does not support streamed reading")
}
}
/** Returns a source that can be used to continually read data. */
def createSource(metadataPath: String): Source = {
providingClass.newInstance() match {
case s: StreamSourceProvider =>
s.createSource(
sparkSession.sqlContext,
metadataPath,
userSpecifiedSchema,
className,
caseInsensitiveOptions)
case format: FileFormat =>
val path = caseInsensitiveOptions.getOrElse("path", {
throw new IllegalArgumentException("'path' is not specified")
})
new FileStreamSource(
sparkSession = sparkSession,
path = path,
fileFormatClassName = className,
schema = sourceInfo.schema,
partitionColumns = sourceInfo.partitionColumns,
metadataPath = metadataPath,
options = caseInsensitiveOptions)
case _ =>
throw new UnsupportedOperationException(
s"Data source $className does not support streamed reading")
}
}
/** Returns a sink that can be used to continually write data. */
def createSink(outputMode: OutputMode): Sink = {
providingClass.newInstance() match {
case s: StreamSinkProvider =>
s.createSink(sparkSession.sqlContext, caseInsensitiveOptions, partitionColumns, outputMode)
case fileFormat: FileFormat =>
val path = caseInsensitiveOptions.getOrElse("path", {
throw new IllegalArgumentException("'path' is not specified")
})
if (outputMode != OutputMode.Append) {
throw new AnalysisException(
s"Data source $className does not support $outputMode output mode")
}
new FileStreamSink(sparkSession, path, fileFormat, partitionColumns, caseInsensitiveOptions)
case _ =>
throw new UnsupportedOperationException(
s"Data source $className does not support streamed writing")
}
}
/**
* Create a resolved [[BaseRelation]] that can be used to read data from or write data into this
* [[DataSource]]
*
* @param checkFilesExist Whether to confirm that the files exist when generating the
* non-streaming file based datasource. StructuredStreaming jobs already
* list file existence, and when generating incremental jobs, the batch
* is considered as a non-streaming file based data source. Since we know
* that files already exist, we don't need to check them again.
*/
def resolveRelation(checkFilesExist: Boolean = true): BaseRelation = {
val relation = (providingClass.newInstance(), userSpecifiedSchema) match {
// TODO: Throw when too much is given.
case (dataSource: SchemaRelationProvider, Some(schema)) =>
dataSource.createRelation(sparkSession.sqlContext, caseInsensitiveOptions, schema)
case (dataSource: RelationProvider, None) =>
dataSource.createRelation(sparkSession.sqlContext, caseInsensitiveOptions)
case (_: SchemaRelationProvider, None) =>
throw new AnalysisException(s"A schema needs to be specified when using $className.")
case (dataSource: RelationProvider, Some(schema)) =>
val baseRelation =
dataSource.createRelation(sparkSession.sqlContext, caseInsensitiveOptions)
if (baseRelation.schema != schema) {
throw new AnalysisException(s"$className does not allow user-specified schemas.")
}
baseRelation
// We are reading from the results of a streaming query. Load files from the metadata log
// instead of listing them using HDFS APIs.
case (format: FileFormat, _)
if FileStreamSink.hasMetadata(
caseInsensitiveOptions.get("path").toSeq ++ paths,
sparkSession.sessionState.newHadoopConf()) =>
val basePath = new Path((caseInsensitiveOptions.get("path").toSeq ++ paths).head)
val fileCatalog = new MetadataLogFileIndex(sparkSession, basePath, userSpecifiedSchema)
val dataSchema = userSpecifiedSchema.orElse {
format.inferSchema(
sparkSession,
caseInsensitiveOptions,
fileCatalog.allFiles())
}.getOrElse {
throw new AnalysisException(
s"Unable to infer schema for $format at ${fileCatalog.allFiles().mkString(",")}. " +
"It must be specified manually")
}
HadoopFsRelation(
fileCatalog,
partitionSchema = fileCatalog.partitionSchema,
dataSchema = dataSchema,
bucketSpec = None,
format,
caseInsensitiveOptions)(sparkSession)
// This is a non-streaming file based datasource.
case (format: FileFormat, _) =>
val globbedPaths =
checkAndGlobPathIfNecessary(checkEmptyGlobPath = true, checkFilesExist = checkFilesExist)
val useCatalogFileIndex = sparkSession.sqlContext.conf.manageFilesourcePartitions &&
catalogTable.isDefined && catalogTable.get.tracksPartitionsInCatalog &&
catalogTable.get.partitionColumnNames.nonEmpty
val (fileCatalog, dataSchema, partitionSchema) = if (useCatalogFileIndex) {
val defaultTableSize = sparkSession.sessionState.conf.defaultSizeInBytes
val index = new CatalogFileIndex(
sparkSession,
catalogTable.get,
catalogTable.get.stats.map(_.sizeInBytes.toLong).getOrElse(defaultTableSize))
(index, catalogTable.get.dataSchema, catalogTable.get.partitionSchema)
} else {
val index = createInMemoryFileIndex(globbedPaths)
val (resultDataSchema, resultPartitionSchema) =
getOrInferFileFormatSchema(format, Some(index))
(index, resultDataSchema, resultPartitionSchema)
}
HadoopFsRelation(
fileCatalog,
partitionSchema = partitionSchema,
dataSchema = dataSchema.asNullable,
bucketSpec = bucketSpec,
format,
caseInsensitiveOptions)(sparkSession)
case _ =>
throw new AnalysisException(
s"$className is not a valid Spark SQL Data Source.")
}
relation match {
case hs: HadoopFsRelation =>
SchemaUtils.checkColumnNameDuplication(
hs.dataSchema.map(_.name),
"in the data schema",
equality)
SchemaUtils.checkColumnNameDuplication(
hs.partitionSchema.map(_.name),
"in the partition schema",
equality)
DataSourceUtils.verifyReadSchema(hs.fileFormat, hs.dataSchema)
case _ =>
SchemaUtils.checkColumnNameDuplication(
relation.schema.map(_.name),
"in the data schema",
equality)
}
relation
}
/**
* Creates a command node to write the given [[LogicalPlan]] out to the given [[FileFormat]].
* The returned command is unresolved and need to be analyzed.
*/
private def planForWritingFileFormat(
format: FileFormat, mode: SaveMode, data: LogicalPlan): InsertIntoHadoopFsRelationCommand = {
// Don't glob path for the write path. The contracts here are:
// 1. Only one output path can be specified on the write path;
// 2. Output path must be a legal HDFS style file system path;
// 3. It's OK that the output path doesn't exist yet;
val allPaths = paths ++ caseInsensitiveOptions.get("path")
val outputPath = if (allPaths.length == 1) {
val path = new Path(allPaths.head)
val fs = path.getFileSystem(sparkSession.sessionState.newHadoopConf())
path.makeQualified(fs.getUri, fs.getWorkingDirectory)
} else {
throw new IllegalArgumentException("Expected exactly one path to be specified, but " +
s"got: ${allPaths.mkString(", ")}")
}
val caseSensitive = sparkSession.sessionState.conf.caseSensitiveAnalysis
PartitioningUtils.validatePartitionColumn(data.schema, partitionColumns, caseSensitive)
val fileIndex = catalogTable.map(_.identifier).map { tableIdent =>
sparkSession.table(tableIdent).queryExecution.analyzed.collect {
case LogicalRelation(t: HadoopFsRelation, _, _, _) => t.location
}.head
}
// For partitioned relation r, r.schema's column ordering can be different from the column
// ordering of data.logicalPlan (partition columns are all moved after data column). This
// will be adjusted within InsertIntoHadoopFsRelation.
InsertIntoHadoopFsRelationCommand(
outputPath = outputPath,
staticPartitions = Map.empty,
ifPartitionNotExists = false,
partitionColumns = partitionColumns.map(UnresolvedAttribute.quoted),
bucketSpec = bucketSpec,
fileFormat = format,
options = options,
query = data,
mode = mode,
catalogTable = catalogTable,
fileIndex = fileIndex,
outputColumns = data.output)
}
/**
* Writes the given [[LogicalPlan]] out to this [[DataSource]] and returns a [[BaseRelation]] for
* the following reading.
*
* @param mode The save mode for this writing.
* @param data The input query plan that produces the data to be written. Note that this plan
* is analyzed and optimized.
* @param outputColumns The original output columns of the input query plan. The optimizer may not
* preserve the output column's names' case, so we need this parameter
* instead of `data.output`.
* @param physicalPlan The physical plan of the input query plan. We should run the writing
* command with this physical plan instead of creating a new physical plan,
* so that the metrics can be correctly linked to the given physical plan and
* shown in the web UI.
*/
def writeAndRead(
mode: SaveMode,
data: LogicalPlan,
outputColumns: Seq[Attribute],
physicalPlan: SparkPlan): BaseRelation = {
if (outputColumns.map(_.dataType).exists(_.isInstanceOf[CalendarIntervalType])) {
throw new AnalysisException("Cannot save interval data type into external storage.")
}
providingClass.newInstance() match {
case dataSource: CreatableRelationProvider =>
dataSource.createRelation(
sparkSession.sqlContext, mode, caseInsensitiveOptions, Dataset.ofRows(sparkSession, data))
case format: FileFormat =>
val cmd = planForWritingFileFormat(format, mode, data)
val resolvedPartCols = cmd.partitionColumns.map { col =>
// The partition columns created in `planForWritingFileFormat` should always be
// `UnresolvedAttribute` with a single name part.
assert(col.isInstanceOf[UnresolvedAttribute])
val unresolved = col.asInstanceOf[UnresolvedAttribute]
assert(unresolved.nameParts.length == 1)
val name = unresolved.nameParts.head
outputColumns.find(a => equality(a.name, name)).getOrElse {
throw new AnalysisException(
s"Unable to resolve $name given [${data.output.map(_.name).mkString(", ")}]")
}
}
val resolved = cmd.copy(partitionColumns = resolvedPartCols, outputColumns = outputColumns)
resolved.run(sparkSession, physicalPlan)
// Replace the schema with that of the DataFrame we just wrote out to avoid re-inferring
copy(userSpecifiedSchema = Some(outputColumns.toStructType.asNullable)).resolveRelation()
case _ =>
sys.error(s"${providingClass.getCanonicalName} does not allow create table as select.")
}
}
/**
* Returns a logical plan to write the given [[LogicalPlan]] out to this [[DataSource]].
*/
def planForWriting(mode: SaveMode, data: LogicalPlan): LogicalPlan = {
if (data.schema.map(_.dataType).exists(_.isInstanceOf[CalendarIntervalType])) {
throw new AnalysisException("Cannot save interval data type into external storage.")
}
providingClass.newInstance() match {
case dataSource: CreatableRelationProvider =>
SaveIntoDataSourceCommand(data, dataSource, caseInsensitiveOptions, mode)
case format: FileFormat =>
DataSource.validateSchema(data.schema)
planForWritingFileFormat(format, mode, data)
case _ =>
sys.error(s"${providingClass.getCanonicalName} does not allow create table as select.")
}
}
/** Returns an [[InMemoryFileIndex]] that can be used to get partition schema and file list. */
private def createInMemoryFileIndex(globbedPaths: Seq[Path]): InMemoryFileIndex = {
val fileStatusCache = FileStatusCache.getOrCreate(sparkSession)
new InMemoryFileIndex(
sparkSession, globbedPaths, options, userSpecifiedSchema, fileStatusCache)
}
/**
* Checks and returns files in all the paths.
*/
private def checkAndGlobPathIfNecessary(
checkEmptyGlobPath: Boolean,
checkFilesExist: Boolean): Seq[Path] = {
val allPaths = caseInsensitiveOptions.get("path") ++ paths
val hadoopConf = sparkSession.sessionState.newHadoopConf()
allPaths.flatMap { path =>
val hdfsPath = new Path(path)
val fs = hdfsPath.getFileSystem(hadoopConf)
val qualified = hdfsPath.makeQualified(fs.getUri, fs.getWorkingDirectory)
val globPath = SparkHadoopUtil.get.globPathIfNecessary(fs, qualified)
if (checkEmptyGlobPath && globPath.isEmpty) {
throw new AnalysisException(s"Path does not exist: $qualified")
}
// Sufficient to check head of the globPath seq for non-glob scenario
// Don't need to check once again if files exist in streaming mode
if (checkFilesExist && !fs.exists(globPath.head)) {
throw new AnalysisException(s"Path does not exist: ${globPath.head}")
}
globPath
}.toSeq
}
}
object DataSource extends Logging {
/** A map to maintain backward compatibility in case we move data sources around. */
private val backwardCompatibilityMap: Map[String, String] = {
val jdbc = classOf[JdbcRelationProvider].getCanonicalName
val json = classOf[JsonFileFormat].getCanonicalName
val parquet = classOf[ParquetFileFormat].getCanonicalName
val csv = classOf[CSVFileFormat].getCanonicalName
val libsvm = "org.apache.spark.ml.source.libsvm.LibSVMFileFormat"
val orc = "org.apache.spark.sql.hive.orc.OrcFileFormat"
val nativeOrc = classOf[OrcFileFormat].getCanonicalName
val socket = classOf[TextSocketSourceProvider].getCanonicalName
val rate = classOf[RateStreamProvider].getCanonicalName
val avro = "org.apache.spark.sql.avro.AvroFileFormat"
Map(
"org.apache.spark.sql.jdbc" -> jdbc,
"org.apache.spark.sql.jdbc.DefaultSource" -> jdbc,
"org.apache.spark.sql.execution.datasources.jdbc.DefaultSource" -> jdbc,
"org.apache.spark.sql.execution.datasources.jdbc" -> jdbc,
"org.apache.spark.sql.json" -> json,
"org.apache.spark.sql.json.DefaultSource" -> json,
"org.apache.spark.sql.execution.datasources.json" -> json,
"org.apache.spark.sql.execution.datasources.json.DefaultSource" -> json,
"org.apache.spark.sql.parquet" -> parquet,
"org.apache.spark.sql.parquet.DefaultSource" -> parquet,
"org.apache.spark.sql.execution.datasources.parquet" -> parquet,
"org.apache.spark.sql.execution.datasources.parquet.DefaultSource" -> parquet,
"org.apache.spark.sql.hive.orc.DefaultSource" -> orc,
"org.apache.spark.sql.hive.orc" -> orc,
"org.apache.spark.sql.execution.datasources.orc.DefaultSource" -> nativeOrc,
"org.apache.spark.sql.execution.datasources.orc" -> nativeOrc,
"org.apache.spark.ml.source.libsvm.DefaultSource" -> libsvm,
"org.apache.spark.ml.source.libsvm" -> libsvm,
"com.databricks.spark.csv" -> csv,
"com.databricks.spark.avro" -> avro,
"org.apache.spark.sql.execution.streaming.TextSocketSourceProvider" -> socket,
"org.apache.spark.sql.execution.streaming.RateSourceProvider" -> rate
)
}
/**
* Class that were removed in Spark 2.0. Used to detect incompatibility libraries for Spark 2.0.
*/
private val spark2RemovedClasses = Set(
"org.apache.spark.sql.DataFrame",
"org.apache.spark.sql.sources.HadoopFsRelationProvider",
"org.apache.spark.Logging")
/** Given a provider name, look up the data source class definition. */
def lookupDataSource(provider: String, conf: SQLConf): Class[_] = {
val provider1 = backwardCompatibilityMap.getOrElse(provider, provider) match {
case name if name.equalsIgnoreCase("orc") &&
conf.getConf(SQLConf.ORC_IMPLEMENTATION) == "native" =>
classOf[OrcFileFormat].getCanonicalName
case name if name.equalsIgnoreCase("orc") &&
conf.getConf(SQLConf.ORC_IMPLEMENTATION) == "hive" =>
"org.apache.spark.sql.hive.orc.OrcFileFormat"
case name => name
}
val provider2 = s"$provider1.DefaultSource"
val loader = Utils.getContextOrSparkClassLoader
val serviceLoader = ServiceLoader.load(classOf[DataSourceRegister], loader)
try {
serviceLoader.asScala.filter(_.shortName().equalsIgnoreCase(provider1)).toList match {
// the provider format did not match any given registered aliases
case Nil =>
try {
Try(loader.loadClass(provider1)).orElse(Try(loader.loadClass(provider2))) match {
case Success(dataSource) =>
// Found the data source using fully qualified path
dataSource
case Failure(error) =>
if (provider1.startsWith("org.apache.spark.sql.hive.orc")) {
throw new AnalysisException(
"Hive built-in ORC data source must be used with Hive support enabled. " +
"Please use the native ORC data source by setting 'spark.sql.orc.impl' to " +
"'native'")
} else {
throw new ClassNotFoundException(
s"Failed to find data source: $provider1. Please find packages at " +
"http://spark.apache.org/third-party-projects.html",
error)
}
}
} catch {
case e: NoClassDefFoundError => // This one won't be caught by Scala NonFatal
// NoClassDefFoundError's class name uses "/" rather than "." for packages
val className = e.getMessage.replaceAll("/", ".")
if (spark2RemovedClasses.contains(className)) {
throw new ClassNotFoundException(s"$className was removed in Spark 2.0. " +
"Please check if your library is compatible with Spark 2.0", e)
} else {
throw e
}
}
case head :: Nil =>
// there is exactly one registered alias
head.getClass
case sources =>
// There are multiple registered aliases for the input. If there is single datasource
// that has "org.apache.spark" package in the prefix, we use it considering it is an
// internal datasource within Spark.
val sourceNames = sources.map(_.getClass.getName)
val internalSources = sources.filter(_.getClass.getName.startsWith("org.apache.spark"))
if (internalSources.size == 1) {
logWarning(s"Multiple sources found for $provider1 (${sourceNames.mkString(", ")}), " +
s"defaulting to the internal datasource (${internalSources.head.getClass.getName}).")
internalSources.head.getClass
} else {
throw new AnalysisException(s"Multiple sources found for $provider1 " +
s"(${sourceNames.mkString(", ")}), please specify the fully qualified class name.")
}
}
} catch {
case e: ServiceConfigurationError if e.getCause.isInstanceOf[NoClassDefFoundError] =>
// NoClassDefFoundError's class name uses "/" rather than "." for packages
val className = e.getCause.getMessage.replaceAll("/", ".")
if (spark2RemovedClasses.contains(className)) {
throw new ClassNotFoundException(s"Detected an incompatible DataSourceRegister. " +
"Please remove the incompatible library from classpath or upgrade it. " +
s"Error: ${e.getMessage}", e)
} else {
throw e
}
}
}
/**
* When creating a data source table, the `path` option has a special meaning: the table location.
* This method extracts the `path` option and treat it as table location to build a
* [[CatalogStorageFormat]]. Note that, the `path` option is removed from options after this.
*/
def buildStorageFormatFromOptions(options: Map[String, String]): CatalogStorageFormat = {
val path = CaseInsensitiveMap(options).get("path")
val optionsWithoutPath = options.filterKeys(_.toLowerCase(Locale.ROOT) != "path")
CatalogStorageFormat.empty.copy(
locationUri = path.map(CatalogUtils.stringToURI), properties = optionsWithoutPath)
}
/**
* Called before writing into a FileFormat based data source to make sure the
* supplied schema is not empty.
* @param schema
*/
private def validateSchema(schema: StructType): Unit = {
def hasEmptySchema(schema: StructType): Boolean = {
schema.size == 0 || schema.find {
case StructField(_, b: StructType, _, _) => hasEmptySchema(b)
case _ => false
}.isDefined
}
if (hasEmptySchema(schema)) {
throw new AnalysisException(
s"""
|Datasource does not support writing empty or nested empty schemas.
|Please make sure the data schema has at least one or more column(s).
""".stripMargin)
}
}
}
| eyalfa/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/DataSource.scala | Scala | apache-2.0 | 34,598 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.3
* @date Tue Jan 5 16:14:38 EST 2010
* @see LICENSE (MIT style license file).
*/
package scalation.scala2d
import scala.math.{atan, cos, Pi, sin}
import scalation.scala2d.Colors._
import scalation.scala2d.QCurve.calcControlPoint
import scalation.scala2d.Shapes.{Dimension, Graphics, Graphics2D}
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `QArrow` class uses Java's `Path2D` class to create a quad curve with an
* arrowhead on the far end. The main curve is defined by points 'p1' and 'p2'
* along with a control point 'pc'. Points 'p3' and 'p4' are the corners of the
* triangular arrowhead.
* @param p1 the starting point for the curve/arc
* @param pc the control point for the curve/arc
* @param p2 the ending point for the curve/arc
* @param len the length of the arrowhead on the curve/arc
*/
case class QArrow (var p1: R2 = R2 (0.0, 0.0),
var pc: R2 = R2 (0.0, 0.0),
var p2: R2 = R2 (0.0, 0.0),
len: Int = 10)
extends java.awt.geom.Path2D.Double //with CurvilinearShape
{
{
val deltaX = p2.x - pc.x
val slope = (p2.y - pc.y) / deltaX // slope of curve at p2
val a1_2 = if (slope == Double.PositiveInfinity) Pi / 2.0 // angle of line pc to p2
else if (slope == Double.NegativeInfinity) 3.0 * Pi / 2.0
else if (deltaX < 0.0) Pi + atan (slope)
else atan (slope)
val a2_3 = a1_2 - 5.0 * Pi / 6.0 // angle of line p2 to p3
val a3_4 = a1_2 + Pi / 2.0 // angle of line p3 to p4
val p3 = R2 (p2.x + len * cos (a2_3), p2.y + len * sin (a2_3))
val p4 = R2 (p3.x + len * cos (a3_4), p3.y + len * sin (a3_4))
moveTo (p1.x, p1.y)
quadTo (pc.x, pc.y, p2.x, p2.y)
lineTo (p3.x, p3.y)
lineTo (p4.x, p4.y)
lineTo (p2.x, p2.y)
} // primary constructor
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Construct a `QArrow` (quad arc) where bend indicates the distance to the
* control point.
* @param p1 the starting point for the curve/arc
* @param p2 the ending point for the curve/arc
* @param bend the bend or curvature (1. => line length)
*/
def this (p1: R2, p2: R2, bend: Double)
{
this (p1, calcControlPoint (p1, p2, bend), p2)
} // constructor
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get the x-coordinate of the center of the main line/curve.
*/
def getCenterX (): Double =
{
if (pc.x > 0.0) (p1.x + 2.0 * pc.x + p2.x) / 4.0
else (p1.x + p2.x) / 2.0
} // getCenterX
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Get the y-coordinate of the center of the main line/curve.
*/
def getCenterY (): Double =
{
if (pc.y > 0.0) (p1.y + 2.0 * pc.y + p2.y) / 4.0
else (p1.y + p2.y) / 2.0
} // getCenterY
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set (or reset) the location for the `QArrow` as a line.
* @param _p1 the starting point
* @param _p2 the ending point
*/
def setLine (_p1: R2, _p2: R2)
{
p1 = _p1; p2 = _p2
val pc = calcControlPoint (p1, p2, 0.0) // use 0 for the bend
setLine (p1, pc, p2)
} // setLine
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set (or reset) the location for the `QArrow` as a curve using bend
* to compute the control point.
* @param _p1 the starting point
* @param _p2 the ending point
* @param bend the bend or curvature (1. => line-length)
*/
def setLine (_p1: R2, _p2: R2, bend: Double)
{
p1 = _p1; p2 = _p2
pc = calcControlPoint (p1, p2, bend)
setLine (p1, pc, p2)
} // setLine
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Set (or reset) the location for the `QArrow` as a curve using an explicitly
* given control point.
* @param _p1 the starting point
* @param _pc the control point
* @param _p2 the ending point
*/
def setLine (_p1: R2, _pc: R2, _p2: R2)
{
p1 = _p1; pc = _pc; p2 = _p2
val deltaX = p2.x - pc.x
val slope = (p2.y - pc.y) / deltaX // slope of curve at p2
val a1_2 = if (slope == Double.PositiveInfinity) Pi / 2.0 // angle of line pc to p2
else if (slope == Double.NegativeInfinity) 3.0 * Pi / 2.0
else if (deltaX < 0.0) Pi + atan (slope)
else atan (slope)
val a2_3 = a1_2 - 5.0 * Pi / 6.0 // angle of line p2 to p3
val a3_4 = a1_2 + Pi / 2.0 // angle of line p3 to p4
val p3 = R2 (p2.x + len * cos (a2_3), p2.y + len * sin (a2_3))
val p4 = R2 (p3.x + len * cos (a3_4), p3.y + len * sin (a3_4))
moveTo (p1.x, p1.y)
quadTo (pc.x, pc.y, p2.x, p2.y)
lineTo (p3.x, p3.y)
lineTo (p4.x, p4.y)
lineTo (p2.x, p2.y)
} // setLine
} // QArrow class
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `QArrowTest` object is used to test the `QArrow` class.
*/
object QArrowTest extends App
{
private val arc1 = new QArrow (R2 (200, 200), R2 (300, 200), .25)
private val arc2 = new QArrow (R2 (200, 200), R2 (300, 300), .25)
private val arc3 = new QArrow (R2 (200, 200), R2 (200, 300), .25)
private val arc4 = new QArrow ()
private val arc5 = new QArrow (R2 (200, 200), R2 (150, 220), R2 (100, 200))
private val arc6 = new QArrow (R2 (200, 200), R2 (180, 170), R2 (100, 100))
private val arc7 = new QArrow (R2 (200, 200), R2 (220, 150), R2 (200, 100))
private val arc8 = new QArrow (R2 (200, 200), R2 (250, 170), R2 (300, 100))
class Canvas extends Panel
{
setBackground (white)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Paint the components into the canvas (drawing panel).
* @param gr low-resolution graphics environment
*/
override def paintComponent (gr: Graphics)
{
super.paintComponent (gr)
val g2d = gr.asInstanceOf [Graphics2D] // use hi-resolution
g2d.setPaint (red)
g2d.draw (arc1)
g2d.setPaint (orange)
g2d.draw (arc2)
g2d.setPaint (yellow)
g2d.draw (arc3)
g2d.setPaint (yellowgreen)
arc4.setLine (R2 (200, 200), R2 (100, 300), .25)
g2d.draw (arc4)
g2d.setPaint (green)
g2d.draw (arc5)
g2d.setPaint (cyan)
g2d.draw (arc6)
g2d.setPaint (blue)
g2d.draw (arc7)
g2d.setPaint (violet)
g2d.draw (arc8)
} // paintComponent
} // Canvas class
// Put the drawing canvas in the visualization frame
new VizFrame ("QArrowTest", new Canvas (), 600, 600)
} // QArrowTest object
| scalation/fda | scalation_1.3/scalation_mathstat/src/main/scala/scalation/scala2d/QArrow.scala | Scala | mit | 7,521 |
/*
* Copyright (c) 2013 Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see http://www.gnu.org/licenses/agpl.html.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package generated.scala
/* StreamRowImpl wraps a DenseVectorView that represents a Stream row.
*
* author: Arvind Sujeeth ([email protected])
* created: 3/15/11
*
* Pervasive Parallelism Laboratory (PPL)
* Stanford University
*
*/
class FloatStreamRow(chunkRow: Int, offset: Int, stream: FloatStream, x: Array[Float])
extends FloatDenseVectorView(x, chunkRow * stream.numCols, 1, stream.numCols, true) {
// absolute row index in the stream
val index = offset * stream.chunkSize + chunkRow
}
| tesendic/Relite | src/generated/scala/FloatStreamRow.scala | Scala | agpl-3.0 | 1,504 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.index
import java.util.Date
import org.geotools.data.{Query, Transaction}
import org.geotools.filter.text.ecql.ECQL
import org.junit.runner.RunWith
import org.locationtech.geomesa.accumulo.TestWithFeatureType
import org.locationtech.geomesa.features.ScalaSimpleFeature
import org.locationtech.geomesa.index.conf.QueryHints._
import org.locationtech.geomesa.index.index.z2.XZ2Index
import org.locationtech.geomesa.index.index.z3.XZ3Index
import org.locationtech.geomesa.utils.bin.BinaryOutputEncoder
import org.locationtech.geomesa.utils.bin.BinaryOutputEncoder.EncodedValues
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class BinLineStringTest extends Specification with TestWithFeatureType {
import org.locationtech.geomesa.utils.geotools.GeoToolsDateFormat
sequential
override val spec = "name:String,track:String,dtgList:List[Date],dtg:Date,*geom:LineString:srid=4326"
val features =
(0 until 10).map { i =>
val sf = new ScalaSimpleFeature(sft, s"$i")
val geom = s"LINESTRING(40 6$i, 40.1 6$i, 40.2 6$i, 40.3 6$i)"
val dates = new java.util.ArrayList[Date]
(0 until 4).map(mm => java.util.Date.from(java.time.LocalDateTime.parse(s"2010-05-07T0$i:0$mm:00.000Z", GeoToolsDateFormat).toInstant(java.time.ZoneOffset.UTC))).foreach(dates.add)
sf.setAttributes(Array[AnyRef](s"name$i", "track1", dates, s"2010-05-07T0$i:00:00.000Z", geom))
sf
} ++ (10 until 20).map { i =>
val sf = new ScalaSimpleFeature(sft, s"$i")
val geom = s"LINESTRING(40 8${i - 10}, 40.1 8${i - 10}, 40.2 8${i - 10}, 40.3 8${i - 10})"
val dates = new java.util.ArrayList[Date]
(0 until 4).map(mm => java.util.Date.from(java.time.LocalDateTime.parse(s"2010-05-07T0${i-10}:0$mm:00.000Z", GeoToolsDateFormat).toInstant(java.time.ZoneOffset.UTC))).foreach(dates.add)
sf.setAttributes(Array[AnyRef](s"name$i", "track2", dates, s"2010-05-07T0${i-10}:00:00.000Z", geom))
sf
}
addFeatures(features)
def getQuery(filter: String, dtg: Option[String] = None, label: Option[String] = None): Query = {
val query = new Query(sftName, ECQL.toFilter(filter))
query.getHints.put(BIN_TRACK, "track")
query.getHints.put(BIN_BATCH_SIZE, 100)
dtg.foreach(query.getHints.put(BIN_DTG, _))
label.foreach(query.getHints.put(BIN_LABEL, _))
query
}
def runQuery(query: Query): Seq[EncodedValues] = {
import org.locationtech.geomesa.utils.bin.BinaryOutputEncoder.BIN_ATTRIBUTE_INDEX
val binSize = if (query.getHints.containsKey(BIN_LABEL)) 24 else 16
val features = SelfClosingIterator(ds.getFeatureReader(query, Transaction.AUTO_COMMIT))
val bytes = features.map { f =>
val array = f.getAttribute(BIN_ATTRIBUTE_INDEX).asInstanceOf[Array[Byte]]
val copy = Array.ofDim[Byte](array.length)
System.arraycopy(array, 0, copy, 0, array.length)
copy
}
bytes.flatMap(b => b.grouped(binSize).map(BinaryOutputEncoder.decode)).toSeq
}
"BinAggregatingIterator" should {
"return all points of a linestring with z2 index" >> {
val filter = "bbox(geom, 38, 58, 42, 72)"
val query = getQuery(filter)
forall(ds.getQueryPlan(query))(_.filter.index.name mustEqual XZ2Index.name)
val bins = runQuery(query)
bins must haveLength(40)
forall(bins.map(_.trackId))(_ mustEqual "track1".hashCode)
forall(0 until 10) { i =>
bins.map(_.dtg) must contain(features(i).getAttribute("dtg").asInstanceOf[Date].getTime).exactly(4.times)
bins.map(_.lat) must contain(60.0f + i).exactly(4.times)
}
bins.map(_.lon) must contain(40.0f).exactly(10.times)
bins.map(_.lon) must contain(40.1f).exactly(10.times)
bins.map(_.lon) must contain(40.2f).exactly(10.times)
bins.map(_.lon) must contain(40.3f).exactly(10.times)
}
"return all points of a linestring with z3 index" >> {
val filter = "bbox(geom, 38, 58, 42, 72) " +
"AND dtg between '2010-05-07T00:00:00.000Z' and '2010-05-08T00:00:00.000Z'"
val query = getQuery(filter)
forall(ds.getQueryPlan(query))(_.filter.index.name mustEqual XZ3Index.name)
val bins = runQuery(query)
bins must haveLength(40)
forall(bins.map(_.trackId))(_ mustEqual "track1".hashCode)
forall(0 until 10) { i =>
bins.map(_.dtg) must contain(features(i).getAttribute("dtg").asInstanceOf[Date].getTime).exactly(4.times)
bins.map(_.lat) must contain(60.0f + i).exactly(4.times)
}
bins.map(_.lon) must contain(40.0f).exactly(10.times)
bins.map(_.lon) must contain(40.1f).exactly(10.times)
bins.map(_.lon) must contain(40.2f).exactly(10.times)
bins.map(_.lon) must contain(40.3f).exactly(10.times)
}
"return all points of a linestring plus label with z2 index" >> {
val filter = "bbox(geom, 38, 58, 42, 72)"
val query = getQuery(filter, label = Some("name"))
forall(ds.getQueryPlan(query))(_.filter.index.name mustEqual XZ2Index.name)
val bins = runQuery(query)
bins must haveLength(40)
forall(bins.map(_.trackId))(_ mustEqual "track1".hashCode)
forall(0 until 10) { i =>
bins.map(_.dtg) must contain(features(i).getAttribute("dtg").asInstanceOf[Date].getTime).exactly(4.times)
bins.map(_.lat) must contain(60.0f + i).exactly(4.times)
bins.map(_.label) must contain(BinaryOutputEncoder.convertToLabel(s"name$i")).exactly(4.times)
}
bins.map(_.lon) must contain(40.0f).exactly(10.times)
bins.map(_.lon) must contain(40.1f).exactly(10.times)
bins.map(_.lon) must contain(40.2f).exactly(10.times)
bins.map(_.lon) must contain(40.3f).exactly(10.times)
}
"return all points of a linestring plus label with z3 index" >> {
val filter = "bbox(geom, 38, 58, 42, 72) " +
"AND dtg between '2010-05-07T00:00:00.000Z' and '2010-05-08T00:00:00.000Z'"
val query = getQuery(filter, label = Some("name"))
forall(ds.getQueryPlan(query))(_.filter.index.name mustEqual XZ3Index.name)
val bins = runQuery(query)
bins must haveLength(40)
forall(bins.map(_.trackId))(_ mustEqual "track1".hashCode)
forall(0 until 10) { i =>
bins.map(_.dtg) must contain(features(i).getAttribute("dtg").asInstanceOf[Date].getTime).exactly(4.times)
bins.map(_.lat) must contain(60.0f + i).exactly(4.times)
bins.map(_.label) must contain(BinaryOutputEncoder.convertToLabel(s"name$i")).exactly(4.times)
}
bins.map(_.lon) must contain(40.0f).exactly(10.times)
bins.map(_.lon) must contain(40.1f).exactly(10.times)
bins.map(_.lon) must contain(40.2f).exactly(10.times)
bins.map(_.lon) must contain(40.3f).exactly(10.times)
}
"return all points of a linestring and date list with z2 index" >> {
val filter = "bbox(geom, 38, 58, 42, 72)"
val query = getQuery(filter, dtg = Some("dtgList"))
forall(ds.getQueryPlan(query))(_.filter.index.name mustEqual XZ2Index.name)
val bins = runQuery(query)
bins must haveLength(40)
forall(bins.map(_.trackId))(_ mustEqual "track1".hashCode)
forall(0 until 10) { i =>
val baseDate = features(i).getAttribute("dtg").asInstanceOf[Date].getTime
bins.map(_.dtg) must containAllOf(Seq(baseDate, baseDate + 60000, baseDate + 120000, baseDate + 180000))
bins.map(_.lat) must contain(60.0f + i).exactly(4.times)
}
bins.map(_.lon) must contain(40.0f).exactly(10.times)
bins.map(_.lon) must contain(40.1f).exactly(10.times)
bins.map(_.lon) must contain(40.2f).exactly(10.times)
bins.map(_.lon) must contain(40.3f).exactly(10.times)
}
"return all points of a linestring and date list with z3 index" >> {
val filter = "bbox(geom, 38, 58, 42, 72) " +
"AND dtg between '2010-05-07T00:00:00.000Z' and '2010-05-08T00:00:00.000Z'"
val query = getQuery(filter, dtg = Some("dtgList"))
forall(ds.getQueryPlan(query))(_.filter.index.name mustEqual XZ3Index.name)
val bins = runQuery(query)
bins must haveLength(40)
forall(bins.map(_.trackId))(_ mustEqual "track1".hashCode)
forall(0 until 10) { i =>
val baseDate = features(i).getAttribute("dtg").asInstanceOf[Date].getTime
bins.map(_.dtg) must containAllOf(Seq(baseDate, baseDate + 60000, baseDate + 120000, baseDate + 180000))
bins.map(_.lat) must contain(60.0f + i).exactly(4.times)
}
bins.map(_.lon) must contain(40.0f).exactly(10.times)
bins.map(_.lon) must contain(40.1f).exactly(10.times)
bins.map(_.lon) must contain(40.2f).exactly(10.times)
bins.map(_.lon) must contain(40.3f).exactly(10.times)
}
"return all points of a linestring and date list plus label with z2 index" >> {
val filter = "bbox(geom, 38, 58, 42, 72)"
val query = getQuery(filter, dtg = Some("dtgList"), label = Some("name"))
forall(ds.getQueryPlan(query))(_.filter.index.name mustEqual XZ2Index.name)
val bins = runQuery(query)
bins must haveLength(40)
forall(bins.map(_.trackId))(_ mustEqual "track1".hashCode)
forall(0 until 10) { i =>
val baseDate = features(i).getAttribute("dtg").asInstanceOf[Date].getTime
bins.map(_.dtg) must containAllOf(Seq(baseDate, baseDate + 60000, baseDate + 120000, baseDate + 180000))
bins.map(_.lat) must contain(60.0f + i).exactly(4.times)
bins.map(_.label) must contain(BinaryOutputEncoder.convertToLabel(s"name$i")).exactly(4.times)
}
bins.map(_.lon) must contain(40.0f).exactly(10.times)
bins.map(_.lon) must contain(40.1f).exactly(10.times)
bins.map(_.lon) must contain(40.2f).exactly(10.times)
bins.map(_.lon) must contain(40.3f).exactly(10.times)
}
"return all points of a linestring and date list plus label with z3 index" >> {
val filter = "bbox(geom, 38, 58, 42, 72) " +
"AND dtg between '2010-05-07T00:00:00.000Z' and '2010-05-08T00:00:00.000Z'"
val query = getQuery(filter, dtg = Some("dtgList"), label = Some("name"))
forall(ds.getQueryPlan(query))(_.filter.index.name mustEqual XZ3Index.name)
val bins = runQuery(query)
bins must haveLength(40)
forall(bins.map(_.trackId))(_ mustEqual "track1".hashCode)
forall(0 until 10) { i =>
val baseDate = features(i).getAttribute("dtg").asInstanceOf[Date].getTime
bins.map(_.dtg) must containAllOf(Seq(baseDate, baseDate + 60000, baseDate + 120000, baseDate + 180000))
bins.map(_.lat) must contain(60.0f + i).exactly(4.times)
bins.map(_.label) must contain(BinaryOutputEncoder.convertToLabel(s"name$i")).exactly(4.times)
}
bins.map(_.lon) must contain(40.0f).exactly(10.times)
bins.map(_.lon) must contain(40.1f).exactly(10.times)
bins.map(_.lon) must contain(40.2f).exactly(10.times)
bins.map(_.lon) must contain(40.3f).exactly(10.times)
}
}
}
| locationtech/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/test/scala/org/locationtech/geomesa/accumulo/index/BinLineStringTest.scala | Scala | apache-2.0 | 11,572 |
/**
* © 2019 Refinitiv. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.bg.imp
import akka.NotUsed
import akka.stream.FlowShape
import akka.stream.contrib.PartitionWith
import akka.stream.scaladsl.{Flow, GraphDSL, Merge, Partition}
import cmwell.bg.BGMetrics
import cmwell.common.formats.BGMessage
import cmwell.common._
import cmwell.zstore.ZStore
import com.typesafe.scalalogging.LazyLogging
import scala.concurrent.ExecutionContext
object RefsEnricher extends LazyLogging {
def toSingle(bgm: BGMetrics, irwReadConcurrency: Int, zStore: ZStore)
(implicit ec: ExecutionContext): Flow[BGMessage[Command], BGMessage[SingleCommand], NotUsed] = {
Flow.fromGraph(GraphDSL.create() { implicit b =>
import GraphDSL.Implicits._
// CommandRef goes left, all rest go right
// update metrics for each type of command
val commandsPartitioner = b.add(PartitionWith[BGMessage[Command], BGMessage[CommandRef], BGMessage[Command]] {
case bgm @ BGMessage(_, CommandRef(_)) => Left(bgm.asInstanceOf[BGMessage[CommandRef]])
case bgm => Right(bgm)
})
val commandRefsFetcher = Flow[BGMessage[CommandRef]].mapAsync(irwReadConcurrency) {
case bgMessage @ BGMessage(_, CommandRef(ref)) => {
zStore.get(ref).map { payload =>
bgMessage.copy(message = CommandSerializer.decode(payload))
}
}
}
val singleCommandsMerge = b.add(Merge[BGMessage[Command]](2))
commandsPartitioner.out0 ~> commandRefsFetcher ~> singleCommandsMerge.in(0)
commandsPartitioner.out1 ~> singleCommandsMerge.in(1)
FlowShape(commandsPartitioner.in,singleCommandsMerge.out.map {
bgMessage => {
// cast to SingleCommand while updating metrics
bgMessage.message match {
case wc: WriteCommand => bgm.writeCommandsCounter += 1
bgm.infotonCommandWeightHist += wc.infoton.weight
case oc: OverwriteCommand => bgm.overrideCommandCounter += 1
bgm.infotonCommandWeightHist += oc.infoton.weight
case _: UpdatePathCommand => bgm.updatePathCommandsCounter += 1
case _: DeletePathCommand => bgm.deletePathCommandsCounter += 1
case _: DeleteAttributesCommand => bgm.deleteAttributesCommandsCounter += 1
case unknown => logger.error(s"unknown command [$unknown]")
}
bgm.commandMeter.mark()
bgMessage.copy(message = bgMessage.message.asInstanceOf[SingleCommand])
}
}.outlet)
})
}
}
| e-orz/CM-Well | server/cmwell-bg/src/main/scala/cmwell/bg/imp/RefsEnricher.scala | Scala | apache-2.0 | 3,228 |
package play.modules.reactivemongo;
import java.io.Serializable
final class NamedDatabaseImpl(val value: String)
extends NamedDatabase
with Serializable {
assert(value != null)
override def hashCode: Int = (127 * "value".hashCode()) ^ value.hashCode()
override def equals(that: Any): Boolean = that match {
case other: NamedDatabase =>
other.value == this.value
case _ =>
false
}
override def toString: String =
s"@${classOf[NamedDatabase].getName}(value=${value})"
def annotationType = classOf[NamedDatabase]
}
| ReactiveMongo/Play-ReactiveMongo | src/main/scala-2.13/play/modules/reactivemongo/NamedDatabaseImpl.scala | Scala | apache-2.0 | 564 |
package co.s4n.comision.infrastructure
import co.s4n.comision.domain.{ Cliente, Nueva, Comision }
import ddd.Transformer
import org.scalatest.FunSuite
class TransformableTest extends FunSuite {
val c = new Comision(
id = None,
valorComision = 1l,
iva = 1l,
Nueva(),
new Cliente("CC1234567", "Pepito")
)
test("Basic transformation Entity to Record test") {
import co.s4n.comision.domain.ComisionRepository._
val record: (ComisionRecord, ClienteRecord) = Transformer.toRecord(c)
assert("CC1234567" === record._2.id)
}
}
| yujikiriki/functional-domain | src/test/scala/co/s4n/comision/infrastructure/TransformableTest.scala | Scala | unlicense | 563 |
package archiver
import java.io.File
trait Logger {
def debug(msg: String): Unit
def info(msg: String): Unit
}
object Logger {
implicit object ConsoleLogger extends Logger {
def debug(msg: String) = Console.out.println("[DEBUG] %s".format(msg))
def info(msg: String) = Console.out.println("[INFO] %s".format(msg))
}
}
object Archiver {
def apply(packaging: Packaging): Archiver = {
packaging match {
case Packaging.Directory => DirectoryArchiver
case Packaging.Zip => ZipArchiver
case Packaging.TarGz => TarArchiver
case _ => sys.error("Unsupported atm")
}
}
}
trait Archiver {
def create(mapping: FileMapping, output: File)(implicit logger: Logger): File
}
object DirectoryArchiver extends Archiver {
def create(mapping: FileMapping, output: File)(implicit logger: Logger) = {
if (output.exists) {
IO.delete(output)
}
IO.createDirectory(output)
mapping.foreach { case (name, file) =>
val target = output / name
IO.copy(file, target)
}
logger.debug("Copied all files to " + output)
logger.debug("Setting permissions")
mapping.permissions.foreach{ case (name, p) =>
val target = output / name
target.setPermissions(p)
}
logger.debug("Done setting permissions")
output
}
}
| hamnis/scala-archiver | src/main/scala/archiver/Archiver.scala | Scala | apache-2.0 | 1,314 |
package almond.interpreter
import almond.interpreter.api.DisplayData
import almond.interpreter.util.DisplayDataOps.toDisplayDataOps
import almond.protocol.RawJson
final case class Inspection(data: Map[String, RawJson], metadata: Map[String, RawJson] = Map.empty)
object Inspection {
def fromDisplayData(data: DisplayData): Inspection =
Inspection(data.jsonData, data.jsonMetadata)
}
| alexarchambault/jupyter-scala | modules/shared/interpreter/src/main/scala/almond/interpreter/Inspection.scala | Scala | apache-2.0 | 392 |
/*
* Copyright 2009-2017. DigitalGlobe, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and limitations under the License.
*/
package org.mrgeo.mapalgebra.unarymath
import java.awt.image.DataBuffer
import org.mrgeo.mapalgebra.parser.ParserNode
import org.mrgeo.mapalgebra.raster.RasterMapOp
import org.mrgeo.mapalgebra.{MapOp, MapOpRegistrar}
object TanMapOp extends MapOpRegistrar {
override def register:Array[String] = {
Array[String]("tan")
}
def create(raster:RasterMapOp):MapOp =
new TanMapOp(Some(raster))
override def apply(node:ParserNode, variables:String => Option[ParserNode]):MapOp =
new TanMapOp(node, variables)
}
class TanMapOp extends RawUnaryMathMapOp {
private[unarymath] def this(raster:Option[RasterMapOp]) = {
this()
input = raster
}
private[unarymath] def this(node:ParserNode, variables:String => Option[ParserNode]) = {
this()
initialize(node, variables)
}
override private[unarymath] def function(a:Double):Double = Math.tan(a)
override private[unarymath] def datatype():Int = {
DataBuffer.TYPE_FLOAT
}
override private[unarymath] def nodata():Double = {
Float.NaN
}
}
| ngageoint/mrgeo | mrgeo-mapalgebra/mrgeo-mapalgebra-rastermath/src/main/scala/org/mrgeo/mapalgebra/unarymath/TanMapOp.scala | Scala | apache-2.0 | 1,650 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.